Graphviz 14.1.3~dev.20260207.0611
Loading...
Searching...
No Matches
mincross.c
Go to the documentation of this file.
1/*************************************************************************
2 * Copyright (c) 2011 AT&T Intellectual Property
3 * All rights reserved. This program and the accompanying materials
4 * are made available under the terms of the Eclipse Public License v1.0
5 * which accompanies this distribution, and is available at
6 * https://www.eclipse.org/legal/epl-v10.html
7 *
8 * Contributors: Details at https://graphviz.org
9 *************************************************************************/
10
11
12/*
13 * dot_mincross(g) takes a ranked graphs, and finds an ordering
14 * that avoids edge crossings. clusters are expanded.
15 * N.B. the rank structure is global (not allocated per cluster)
16 * because mincross may compare nodes in different clusters.
17 */
18
19#include "config.h"
20
21#include <assert.h>
22#include <cgraph/cgraph.h>
23#include <dotgen/dot.h>
24#include <inttypes.h>
25#include <limits.h>
26#include <stdbool.h>
27#include <stdint.h>
28#include <stdlib.h>
29#include <string.h>
30#include <util/alloc.h>
31#include <util/bitarray.h>
32#include <util/exit.h>
33#include <util/gv_math.h>
34#include <util/itos.h>
35#include <util/list.h>
36#include <util/streq.h>
37
39 size_t nrows;
40 size_t ncols;
41 uint8_t *data;
42};
43
50static bool matrix_get(adjmatrix_t *me, size_t row, size_t col) {
51 assert(me != NULL);
52
53 // if this index is beyond anything allocated, infer it as unset
54 if (row >= me->nrows) {
55 return false;
56 }
57 if (col >= me->ncols) {
58 return false;
59 }
60
61 const size_t index = row * me->ncols + col;
62 const size_t byte_index = index / 8;
63 const size_t bit_index = index % 8;
64 return (me->data[byte_index] >> bit_index) & 1;
65}
66
72static void matrix_set(adjmatrix_t *me, size_t row, size_t col) {
73 assert(me != NULL);
74
75 // if we are updating beyond allocated space, expand the backing store
76 if (row >= me->nrows || col >= me->ncols) {
77 // allocate an enlarged space
78 const size_t nrows = zmax(me->nrows, row + 1);
79 const size_t ncols = zmax(me->ncols, col + 1);
80 const size_t bits = nrows * ncols;
81 const size_t bytes = bits / 8 + (bits % 8 == 0 ? 0 : 1);
82 uint8_t *const data = gv_alloc(bytes);
83
84 // replicate set bits
85 for (size_t r = 0; r < me->nrows; ++r) {
86 for (size_t c = 0; c < me->ncols; ++c) {
87 if (!matrix_get(me, r, c)) {
88 continue;
89 }
90 const size_t index = r * ncols + c;
91 const size_t byte_index = index / 8;
92 const size_t bit_index = index % 8;
93 data[byte_index] |= (uint8_t)(UINT8_C(1) << bit_index);
94 }
95 }
96
97 // replace old matrix with newly expanded one
98 free(me->data);
99 *me = (adjmatrix_t){.nrows = nrows, .ncols = ncols, .data = data};
100 }
101
102 assert(row < me->nrows);
103 assert(col < me->ncols);
104
105 const size_t index = row * me->ncols + col;
106 const size_t byte_index = index / 8;
107 const size_t bit_index = index % 8;
108 me->data[byte_index] |= (uint8_t)(UINT8_C(1) << bit_index);
109}
110
111/* #define DEBUG */
112#define MARK(v) (ND_mark(v))
113#define saveorder(v) (ND_coord(v)).x
114#define flatindex(v) ((size_t)ND_low(v))
115
116 /* forward declarations */
117static bool medians(graph_t * g, int r0, int r1);
118static int nodeposcmpf(const void *, const void *);
119static int edgeidcmpf(const void *, const void *);
120static void flat_breakcycles(graph_t * g);
121static void flat_reorder(graph_t * g);
122static void flat_search(graph_t * g, node_t * v);
123static void init_mincross(graph_t * g);
124static void merge2(graph_t * g);
125static void init_mccomp(graph_t *g, size_t c);
126static void cleanup2(graph_t *g, int64_t nc);
128static int64_t mincross_clust(graph_t *g);
130static int64_t mincross(graph_t *g, int startpass);
131static void mincross_step(graph_t * g, int pass);
132static void mincross_options(graph_t * g);
133static void save_best(graph_t * g);
134static void restore_best(graph_t * g);
135
144static adjmatrix_t *new_matrix(size_t initial_rows, size_t initial_columns);
145
146static void free_matrix(adjmatrix_t * p);
147static int ordercmpf(const void *, const void *);
148static int64_t ncross(void);
149#ifdef DEBUG
150void check_order(void);
151void check_vlists(graph_t * g);
152void node_in_root_vlist(node_t * n);
153#endif
154
155
156 /* mincross parameters */
157static int MinQuit;
158static const double Convergence = .995;
159
160static graph_t *Root;
163static int *TI_list;
164static bool ReMincross;
165
166typedef struct {
168 int x, lo, hi;
170} info_t;
171
172#define ND_x(n) (((info_t*)AGDATA(n))->x)
173#define ND_lo(n) (((info_t*)AGDATA(n))->lo)
174#define ND_hi(n) (((info_t*)AGDATA(n))->hi)
175#define ND_np(n) (((info_t*)AGDATA(n))->np)
176#define ND_idx(n) (ND_order(ND_np(n)))
177
178static void
180{
181 Agnode_t* n;
182 Agnode_t* nxt;
183
184 for (n = agfstnode(sg); n; n = nxt) {
185 nxt = agnxtnode (sg, n);
186 agdelnode(sg,n);
187 }
188}
189
190#define isBackedge(e) (ND_idx(aghead(e)) > ND_idx(agtail(e)))
191
192static Agnode_t*
194{
195 Agnode_t* n;
196
197 for (n = agfstnode(sg); n; n = agnxtnode(sg, n))
198 if (agdegree(g,n,1,0) == 0) return n;
199 return NULL;
200}
201
202static int
204{
205 Agnode_t* n;
206 Agedge_t* e;
207 Agedge_t* nxte;
208 int cnt = 0;
209
210 while ((n = findSource(g, sg))) {
211 arr[cnt++] = ND_np(n);
212 agdelnode(sg, n);
213 for (e = agfstout(g, n); e; e = nxte) {
214 nxte = agnxtout(g, e);
215 agdeledge(g, e);
216 }
217 }
218 return cnt;
219}
220
221static int
222getComp (graph_t* g, node_t* n, graph_t* comp, int* indices)
223{
224 int backedge = 0;
225 Agedge_t* e;
226
227 ND_x(n) = 1;
228 indices[agnnodes(comp)] = ND_idx(n);
229 agsubnode(comp, n, 1);
230 for (e = agfstout(g,n); e; e = agnxtout(g,e)) {
231 if (isBackedge(e)) backedge++;
232 if (!ND_x(aghead(e)))
233 backedge += getComp(g, aghead(e), comp, indices);
234 }
235 for (e = agfstin(g,n); e; e = agnxtin(g,e)) {
236 if (isBackedge(e)) backedge++;
237 if (!ND_x(agtail(e)))
238 backedge += getComp(g, agtail(e), comp, indices);
239 }
240 return backedge;
241}
242
244static void
246{
247 int cnt;
248 bool haveBackedge = false;
249 Agraph_t* sg;
250 Agnode_t* n;
251 Agnode_t* nxtp;
252 Agnode_t* v;
253
254 for (n = agfstnode(g); n; n = nxtp) {
255 v = nxtp = agnxtnode(g, n);
256 for (; v; v = agnxtnode(g, v)) {
257 if (ND_hi(v) <= ND_lo(n)) {
258 haveBackedge = true;
259 agedge(g, v, n, NULL, 1);
260 }
261 else if (ND_hi(n) <= ND_lo(v)) {
262 agedge(g, n, v, NULL, 1);
263 }
264 }
265 }
266 if (!haveBackedge) return;
267
268 sg = agsubg(g, "comp", 1);
269 Agnode_t **arr = gv_calloc(agnnodes(g), sizeof(Agnode_t*));
270 int *indices = gv_calloc(agnnodes(g), sizeof(int));
271
272 for (n = agfstnode(g); n; n = agnxtnode(g,n)) {
273 if (ND_x(n) || agdegree(g,n,1,1) == 0) continue;
274 if (getComp(g, n, sg, indices)) {
275 int i, sz = agnnodes(sg);
276 cnt = topsort (g, sg, arr);
277 assert (cnt == sz);
278 qsort(indices, cnt, sizeof(int), ordercmpf);
279 for (i = 0; i < sz; i++) {
280 ND_order(arr[i]) = indices[i];
281 rk->v[indices[i]] = arr[i];
282 }
283 }
284 emptyComp(sg);
285 }
286 free(indices);
287 free (arr);
288}
289
290/* Check that the ordering of labels for flat edges is consistent.
291 * This is necessary because dot_position will attempt to force the label
292 * to be between the edge's vertices. This can lead to an infeasible problem.
293 *
294 * We check each rank for any flat edge labels (as dummy nodes) and create a
295 * graph with a node for each label. If the graph contains more than 1 node, we
296 * call fixLabelOrder to see if there really is a problem and, if so, fix it.
297 */
298void
300{
301 graph_t* lg = NULL;
302
303 for (int r = GD_minrank(g); r <= GD_maxrank(g); r++) {
304 rank_t *const rk = GD_rank(g)+r;
305 for (int j = 0; j < rk->n; j++) {
306 Agnode_t *const u = rk->v[j];
307 if (ND_alg(u)) {
308 if (!lg) lg = agopen ("lg", Agstrictdirected, 0);
309 Agnode_t *const n = agnode(lg, ITOS(j), 1);
310 agbindrec(n, "info", sizeof(info_t), true);
311 int lo = ND_order(aghead(ND_out(u).list[0]));
312 int hi = ND_order(aghead(ND_out(u).list[1]));
313 if (lo > hi) {
314 SWAP(&lo, &hi);
315 }
316 ND_lo(n) = lo;
317 ND_hi(n) = hi;
318 ND_np(n) = u;
319 }
320 }
321 if (lg) {
322 if (agnnodes(lg) > 1) fixLabelOrder (lg, rk);
323 agclose(lg);
324 lg = NULL;
325 }
326 }
327}
328
329/* Minimize edge crossings
330 * Note that nodes are not placed into GD_rank(g) until mincross()
331 * is called.
332 */
334 int64_t nc;
335 char *s;
336
337 /* check whether malformed input has led to empty cluster that the crossing
338 * functions will not anticipate
339 */
340 {
341 size_t i;
342 for (i = 1; i <= (size_t)GD_n_cluster(g); ) {
343 if (agfstnode(GD_clust(g)[i]) == NULL) {
344 agwarningf("removing empty cluster\n");
345 memmove(&GD_clust(g)[i], &GD_clust(g)[i + 1],
346 ((size_t)GD_n_cluster(g) - i) * sizeof(GD_clust(g)[0]));
347 --GD_n_cluster(g);
348 } else {
349 ++i;
350 }
351 }
352 }
353
354 init_mincross(g);
355
356 size_t comp;
357 for (nc = 0, comp = 0; comp < GD_comp(g).size; comp++) {
358 init_mccomp(g, comp);
359 const int64_t mc = mincross(g, 0);
360 if (mc < 0) {
361 return -1;
362 }
363 nc += mc;
364 }
365
366 merge2(g);
367
368 /* run mincross on contents of each cluster */
369 for (int c = 1; c <= GD_n_cluster(g); c++) {
370 const int64_t mc = mincross_clust(GD_clust(g)[c]);
371 if (mc < 0) {
372 return -1;
373 }
374 nc += mc;
375#ifdef DEBUG
376 check_vlists(GD_clust(g)[c]);
377 check_order();
378#endif
379 }
380
381 if (GD_n_cluster(g) > 0 && (!(s = agget(g, "remincross")) || mapbool(s))) {
383 ReMincross = true;
384 const int64_t mc = mincross(g, 2);
385 if (mc < 0) {
386 return -1;
387 }
388 nc = mc;
389#ifdef DEBUG
390 for (int c = 1; c <= GD_n_cluster(g); c++)
391 check_vlists(GD_clust(g)[c]);
392#endif
393 }
394 cleanup2(g, nc);
395 return 0;
396}
397
398static adjmatrix_t *new_matrix(size_t initial_rows, size_t initial_columns) {
399 adjmatrix_t *rv = gv_alloc(sizeof(adjmatrix_t));
400 const size_t bits = initial_rows * initial_columns;
401 const size_t bytes = bits / 8 + (bits % 8 == 0 ? 0 : 1);
402 uint8_t *const data = gv_alloc(bytes);
403 *rv = (adjmatrix_t){.nrows = initial_rows, .ncols = initial_columns, .data = data};
404 return rv;
405}
406
407static void free_matrix(adjmatrix_t * p)
408{
409 if (p) {
410 free(p->data);
411 free(p);
412 }
413}
414
415static void init_mccomp(graph_t *g, size_t c) {
416 int r;
417
418 GD_nlist(g) = GD_comp(g).list[c];
419 if (c > 0) {
420 for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
421 GD_rank(g)[r].v = GD_rank(g)[r].v + GD_rank(g)[r].n;
422 GD_rank(g)[r].n = 0;
423 }
424 }
425}
426
427static int betweenclust(edge_t * e)
428{
429 while (ED_to_orig(e))
430 e = ED_to_orig(e);
431 return ND_clust(agtail(e)) != ND_clust(aghead(e));
432}
433
434static void do_ordering_node(graph_t *g, node_t *n, bool outflag) {
435 int i, ne;
436 node_t *u, *v;
437 edge_t *e, *f, *fe;
438 edge_t **sortlist = TE_list;
439
440 if (ND_clust(n))
441 return;
442 if (outflag) {
443 for (i = ne = 0; (e = ND_out(n).list[i]); i++)
444 if (!betweenclust(e))
445 sortlist[ne++] = e;
446 } else {
447 for (i = ne = 0; (e = ND_in(n).list[i]); i++)
448 if (!betweenclust(e))
449 sortlist[ne++] = e;
450 }
451 if (ne <= 1)
452 return;
453 // Write null terminator at end of list. Requires +1 in TE_list allocation.
454 sortlist[ne] = 0;
455 qsort(sortlist, ne, sizeof(sortlist[0]), edgeidcmpf);
456 for (ne = 1; (f = sortlist[ne]); ne++) {
457 e = sortlist[ne - 1];
458 if (outflag) {
459 u = aghead(e);
460 v = aghead(f);
461 } else {
462 u = agtail(e);
463 v = agtail(f);
464 }
465 if (find_flat_edge(u, v))
466 return;
467 fe = new_virtual_edge(u, v, NULL);
469 flat_edge(g, fe);
470 }
471}
472
473static void do_ordering(graph_t *g, bool outflag) {
474 /* Order all nodes in graph */
475 node_t *n;
476
477 for (n = agfstnode(g); n; n = agnxtnode(g, n)) {
478 do_ordering_node (g, n, outflag);
479 }
480}
481
483{
484 /* Order nodes which have the "ordered" attribute */
485 node_t *n;
486 const char *ordering;
487
488 for (n = agfstnode(g); n; n = agnxtnode(g, n)) {
489 if ((ordering = late_string(n, N_ordering, NULL))) {
490 if (streq(ordering, "out"))
491 do_ordering_node(g, n, true);
492 else if (streq(ordering, "in"))
493 do_ordering_node(g, n, false);
494 else if (ordering[0])
495 agerrorf("ordering '%s' not recognized for node '%s'.\n", ordering, agnameof(n));
496 }
497 }
498}
499
500/* handle case where graph specifies edge ordering
501 * If the graph does not have an ordering attribute, we then
502 * check for nodes having the attribute.
503 * Note that, in this implementation, the value of G_ordering
504 * dominates the value of N_ordering.
505 */
506static void ordered_edges(graph_t * g)
507{
508 char *ordering;
509
510 if (!G_ordering && !N_ordering)
511 return;
512 if ((ordering = late_string(g, G_ordering, NULL))) {
513 if (streq(ordering, "out"))
514 do_ordering(g, true);
515 else if (streq(ordering, "in"))
516 do_ordering(g, false);
517 else if (ordering[0])
518 agerrorf("ordering '%s' not recognized.\n", ordering);
519 }
520 else
521 {
522 graph_t *subg;
523
524 for (subg = agfstsubg(g); subg; subg = agnxtsubg(subg)) {
525 /* clusters are processed by separate calls to ordered_edges */
526 if (!is_cluster(subg))
527 ordered_edges(subg);
528 }
530 }
531}
532
533static int64_t mincross_clust(graph_t *g) {
534 int c;
535
536 if (expand_cluster(g) != 0) {
537 return -1;
538 }
539 ordered_edges(g);
541 flat_reorder(g);
542 int64_t nc = mincross(g, 2);
543 if (nc < 0) {
544 return nc;
545 }
546
547 for (c = 1; c <= GD_n_cluster(g); c++) {
548 const int64_t mc = mincross_clust(GD_clust(g)[c]);
549 if (mc < 0) {
550 return mc;
551 }
552 nc += mc;
553 }
554
555 save_vlist(g);
556 return nc;
557}
558
559static bool left2right(graph_t *g, node_t *v, node_t *w) {
560 /* CLUSTER indicates orig nodes of clusters, and vnodes of skeletons */
561 if (!ReMincross) {
562 if (ND_clust(v) != ND_clust(w) && ND_clust(v) && ND_clust(w)) {
563 /* the following allows cluster skeletons to be swapped */
564 if (ND_ranktype(v) == CLUSTER && ND_node_type(v) == VIRTUAL)
565 return false;
566 if (ND_ranktype(w) == CLUSTER && ND_node_type(w) == VIRTUAL)
567 return false;
568 return true;
569 }
570 } else {
571 if (ND_clust(v) != ND_clust(w))
572 return true;
573 }
574 adjmatrix_t *const M = GD_rank(g)[ND_rank(v)].flat;
575 if (M == NULL)
576 return false;
577 if (GD_flip(g)) {
578 SWAP(&v, &w);
579 }
580 return matrix_get(M, (size_t)flatindex(v), (size_t)flatindex(w));
581}
582
583static int64_t in_cross(node_t *v, node_t *w) {
584 edge_t **e1, **e2;
585 int inv, t;
586 int64_t cross = 0;
587
588 for (e2 = ND_in(w).list; *e2; e2++) {
589 int cnt = ED_xpenalty(*e2);
590
591 inv = ND_order(agtail(*e2));
592
593 for (e1 = ND_in(v).list; *e1; e1++) {
594 t = ND_order(agtail(*e1)) - inv;
595 if (t > 0 || (t == 0 && ED_tail_port(*e1).p.x > ED_tail_port(*e2).p.x))
596 cross += ED_xpenalty(*e1) * cnt;
597 }
598 }
599 return cross;
600}
601
602static int out_cross(node_t * v, node_t * w)
603{
604 edge_t **e1, **e2;
605 int inv, cross = 0, t;
606
607 for (e2 = ND_out(w).list; *e2; e2++) {
608 int cnt = ED_xpenalty(*e2);
609 inv = ND_order(aghead(*e2));
610
611 for (e1 = ND_out(v).list; *e1; e1++) {
612 t = ND_order(aghead(*e1)) - inv;
613 if (t > 0 || (t == 0 && ED_head_port(*e1).p.x > ED_head_port(*e2).p.x))
614 cross += ED_xpenalty(*e1) * cnt;
615 }
616 }
617 return cross;
618
619}
620
621static void exchange(node_t * v, node_t * w)
622{
623 int vi, wi, r;
624
625 r = ND_rank(v);
626 vi = ND_order(v);
627 wi = ND_order(w);
628 ND_order(v) = wi;
629 GD_rank(Root)[r].v[wi] = v;
630 ND_order(w) = vi;
631 GD_rank(Root)[r].v[vi] = w;
632}
633
634static int64_t transpose_step(graph_t *g, int r, bool reverse) {
635 int i;
636 node_t *v, *w;
637
638 int64_t rv = 0;
639 GD_rank(g)[r].candidate = false;
640 for (i = 0; i < GD_rank(g)[r].n - 1; i++) {
641 v = GD_rank(g)[r].v[i];
642 w = GD_rank(g)[r].v[i + 1];
643 assert(ND_order(v) < ND_order(w));
644 if (left2right(g, v, w))
645 continue;
646 int64_t c0 = 0;
647 int64_t c1 = 0;
648 if (r > 0) {
649 c0 += in_cross(v, w);
650 c1 += in_cross(w, v);
651 }
652 if (GD_rank(g)[r + 1].n > 0) {
653 c0 += out_cross(v, w);
654 c1 += out_cross(w, v);
655 }
656 if (c1 < c0 || (c0 > 0 && reverse && c1 == c0)) {
657 exchange(v, w);
658 rv += c0 - c1;
659 GD_rank(Root)[r].valid = false;
660 GD_rank(g)[r].candidate = true;
661
662 if (r > GD_minrank(g)) {
663 GD_rank(Root)[r - 1].valid = false;
664 GD_rank(g)[r - 1].candidate = true;
665 }
666 if (r < GD_maxrank(g)) {
667 GD_rank(Root)[r + 1].valid = false;
668 GD_rank(g)[r + 1].candidate = true;
669 }
670 }
671 }
672 return rv;
673}
674
675static void transpose(graph_t * g, bool reverse)
676{
677 int r;
678
679 for (r = GD_minrank(g); r <= GD_maxrank(g); r++)
680 GD_rank(g)[r].candidate = true;
681 int64_t delta;
682 do {
683 delta = 0;
684 for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
685 if (GD_rank(g)[r].candidate) {
686 delta += transpose_step(g, r, reverse);
687 }
688 }
689 } while (delta >= 1);
690}
691
692static int64_t mincross(graph_t *g, int startpass) {
693 const int endpass = 2;
694 int maxthispass = 0, iter, trying, pass;
695 int64_t cur_cross, best_cross;
696
697 if (startpass > 1) {
698 cur_cross = best_cross = ncross();
699 save_best(g);
700 } else
701 cur_cross = best_cross = INT64_MAX;
702 for (pass = startpass; pass <= endpass; pass++) {
703 if (pass <= 1) {
704 maxthispass = MIN(4, MaxIter);
705 if (g == dot_root(g))
706 if (build_ranks(g, pass) != 0) {
707 return -1;
708 }
709 if (pass == 0)
711 flat_reorder(g);
712
713 if ((cur_cross = ncross()) <= best_cross) {
714 save_best(g);
715 best_cross = cur_cross;
716 }
717 } else {
718 maxthispass = MaxIter;
719 if (cur_cross > best_cross)
720 restore_best(g);
721 cur_cross = best_cross;
722 }
723 trying = 0;
724 for (iter = 0; iter < maxthispass; iter++) {
725 if (Verbose)
726 fprintf(stderr,
727 "mincross: pass %d iter %d trying %d cur_cross %" PRId64 " best_cross %"
728 PRId64 "\n",
729 pass, iter, trying, cur_cross, best_cross);
730 if (trying++ >= MinQuit)
731 break;
732 if (cur_cross == 0)
733 break;
734 mincross_step(g, iter);
735 if ((cur_cross = ncross()) <= best_cross) {
736 save_best(g);
737 if (cur_cross < Convergence * (double)best_cross)
738 trying = 0;
739 best_cross = cur_cross;
740 }
741 }
742 if (cur_cross == 0)
743 break;
744 }
745 if (cur_cross > best_cross)
746 restore_best(g);
747 if (best_cross > 0) {
748 transpose(g, false);
749 best_cross = ncross();
750 }
751
752 return best_cross;
753}
754
755static void restore_best(graph_t * g)
756{
757 node_t *n;
758 int i, r;
759
760 for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
761 for (i = 0; i < GD_rank(g)[r].n; i++) {
762 n = GD_rank(g)[r].v[i];
763 ND_order(n) = saveorder(n);
764 }
765 }
766 for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
767 GD_rank(Root)[r].valid = false;
768 qsort(GD_rank(g)[r].v, GD_rank(g)[r].n, sizeof(GD_rank(g)[0].v[0]),
770 }
771}
772
773static void save_best(graph_t * g)
774{
775 node_t *n;
776 int i, r;
777 for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
778 for (i = 0; i < GD_rank(g)[r].n; i++) {
779 n = GD_rank(g)[r].v[i];
780 saveorder(n) = ND_order(n);
781 }
782 }
783}
784
785/* merges the connected components of g */
786static void merge_components(graph_t * g)
787{
788 node_t *u, *v;
789
790 if (GD_comp(g).size <= 1)
791 return;
792 u = NULL;
793 for (size_t c = 0; c < GD_comp(g).size; c++) {
794 v = GD_comp(g).list[c];
795 if (u)
796 ND_next(u) = v;
797 ND_prev(v) = u;
798 while (ND_next(v)) {
799 v = ND_next(v);
800 }
801 u = v;
802 }
803 GD_comp(g).size = 1;
804 GD_nlist(g) = GD_comp(g).list[0];
807}
808
809/* merge connected components, create globally consistent rank lists */
810static void merge2(graph_t * g)
811{
812 int i, r;
813 node_t *v;
814
815 /* merge the components and rank limits */
817
818 /* install complete ranks */
819 for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
820 GD_rank(g)[r].n = GD_rank(g)[r].an;
821 GD_rank(g)[r].v = GD_rank(g)[r].av;
822 for (i = 0; i < GD_rank(g)[r].n; i++) {
823 v = GD_rank(g)[r].v[i];
824 if (v == NULL) {
825 if (Verbose)
826 fprintf(stderr,
827 "merge2: graph %s, rank %d has only %d < %d nodes\n",
828 agnameof(g), r, i, GD_rank(g)[r].n);
829 GD_rank(g)[r].n = i;
830 break;
831 }
832 ND_order(v) = i;
833 }
834 }
835}
836
837static void cleanup2(graph_t *g, int64_t nc) {
838 int i, j, r, c;
839 node_t *v;
840 edge_t *e;
841
842 if (TI_list) {
843 free(TI_list);
844 TI_list = NULL;
845 }
846 if (TE_list) {
847 free(TE_list);
848 TE_list = NULL;
849 }
850 /* fix vlists of clusters */
851 for (c = 1; c <= GD_n_cluster(g); c++)
853
854 /* remove node temporary edges for ordering nodes */
855 for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
856 for (i = 0; i < GD_rank(g)[r].n; i++) {
857 v = GD_rank(g)[r].v[i];
858 ND_order(v) = i;
859 if (ND_flat_out(v).list) {
860 for (j = 0; (e = ND_flat_out(v).list[j]); j++)
861 if (ED_edge_type(e) == FLATORDER) {
863 free(e->base.data);
864 free(e);
865 j--;
866 }
867 }
868 }
869 free_matrix(GD_rank(g)[r].flat);
870 }
871 if (Verbose)
872 fprintf(stderr, "mincross %s: %" PRId64 " crossings, %.2f secs.\n",
873 agnameof(g), nc, elapsed_sec());
874}
875
876static node_t *neighbor(node_t * v, int dir)
877{
878 node_t *rv = NULL;
879assert(v);
880 if (dir < 0) {
881 if (ND_order(v) > 0)
882 rv = GD_rank(Root)[ND_rank(v)].v[ND_order(v) - 1];
883 } else
884 rv = GD_rank(Root)[ND_rank(v)].v[ND_order(v) + 1];
885assert(rv == 0 || (ND_order(rv)-ND_order(v))*dir > 0);
886 return rv;
887}
888
889static bool is_a_normal_node_of(graph_t *g, node_t *v) {
890 return ND_node_type(v) == NORMAL && agcontains(g, v);
891}
892
894 if (ND_node_type(v) == VIRTUAL
895 && ND_in(v).size == 1 && ND_out(v).size == 1) {
896 edge_t *e = ND_out(v).list[0];
897 while (ED_edge_type(e) != NORMAL)
898 e = ED_to_orig(e);
899 if (agcontains(g, e))
900 return true;
901 }
902 return false;
903}
904
905static bool inside_cluster(graph_t *g, node_t *v) {
906 return is_a_normal_node_of(g, v) || is_a_vnode_of_an_edge_of(g, v);
907}
908
909static node_t *furthestnode(graph_t * g, node_t * v, int dir)
910{
911 node_t *rv = v;
912 for (node_t *u = v; (u = neighbor(u, dir)); ) {
913 if (is_a_normal_node_of(g, u))
914 rv = u;
915 else if (is_a_vnode_of_an_edge_of(g, u))
916 rv = u;
917 }
918 return rv;
919}
920
922{
923 int r;
924
925 if (GD_rankleader(g))
926 for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
927 GD_rankleader(g)[r] = GD_rank(g)[r].v[0];
928 }
929}
930
932{
933 int c;
934
935 save_vlist(g);
936 for (c = 1; c <= GD_n_cluster(g); c++)
938}
939
940
942{
943 // fix vlists of sub-clusters
944 for (int c = 1; c <= GD_n_cluster(g); c++)
946
947 if (GD_rankleader(g))
948 for (int r = GD_minrank(g); r <= GD_maxrank(g); r++) {
949 node_t *const v = GD_rankleader(g)[r];
950 if (v == NULL) {
951 continue;
952 }
953#ifdef DEBUG
954 node_in_root_vlist(v);
955#endif
956 node_t *const u = furthestnode(g, v, -1);
957 node_t *const w = furthestnode(g, v, 1);
958 GD_rankleader(g)[r] = u;
959#ifdef DEBUG
960 assert(GD_rank(dot_root(g))[r].v[ND_order(u)] == u);
961#endif
962 GD_rank(g)[r].v = GD_rank(dot_root(g))[r].v + ND_order(u);
963 GD_rank(g)[r].n = ND_order(w) - ND_order(u) + 1;
964 }
965}
966
967/* The structures in crossing minimization and positioning require
968 * that clusters have some node on each rank. This function recursively
969 * guarantees this property. It takes into account nodes and edges in
970 * a cluster, the latter causing dummy nodes for intervening ranks.
971 * For any rank without node, we create a real node of small size. This
972 * is stored in the subgraph sg, for easy removal later.
973 *
974 * I believe it is not necessary to do this for the root graph, as these
975 * are laid out one component at a time and these will necessarily have a
976 * node on each rank from source to sink levels.
977 */
979 int i, c;
980 Agedge_t* e;
981 Agnode_t* n;
982
983 for (c = 1; c <= GD_n_cluster(g); c++)
984 sg = realFillRanks(GD_clust(g)[c], ranks, sg);
985
986 if (dot_root(g) == g)
987 return sg;
988 bitarray_clear(ranks);
989 for (n = agfstnode(g); n; n = agnxtnode(g,n)) {
990 bitarray_set(ranks, ND_rank(n), true);
991 for (e = agfstout(g,n); e; e = agnxtout(g,e)) {
992 for (i = ND_rank(n)+1; i <= ND_rank(aghead(e)); i++)
993 bitarray_set(ranks, i, true);
994 }
995 }
996 for (i = GD_minrank(g); i <= GD_maxrank(g); i++) {
997 if (!bitarray_get(*ranks, i)) {
998 if (!sg) {
999 sg = agsubg (dot_root(g), "_new_rank", 1);
1000 }
1001 n = agnode (sg, NULL, 1);
1002 agbindrec(n, "Agnodeinfo_t", sizeof(Agnodeinfo_t), true);
1003 ND_rank(n) = i;
1004 ND_lw(n) = ND_rw(n) = 0.5;
1005 ND_ht(n) = 1;
1006 ND_UF_size(n) = 1;
1007 alloc_elist(4, ND_in(n));
1008 alloc_elist(4, ND_out(n));
1009 agsubnode (g, n, 1);
1010 }
1011 }
1012 return sg;
1013}
1014
1015static void
1017{
1018 int rnks_sz = GD_maxrank(g) + 2;
1019 bitarray_t rnks = bitarray_new(rnks_sz);
1020 realFillRanks(g, &rnks, NULL);
1021 bitarray_reset(&rnks);
1022}
1023
1024static void init_mincross(graph_t * g)
1025{
1026 int size;
1027
1028 if (Verbose)
1029 start_timer();
1030
1031 ReMincross = false;
1032 Root = g;
1033 /* alloc +1 for the null terminator usage in do_ordering() */
1034 size = agnedges(dot_root(g)) + 1;
1035 TE_list = gv_calloc(size, sizeof(edge_t*));
1036 TI_list = gv_calloc(size, sizeof(int));
1038 if (GD_flags(g) & NEW_RANK)
1039 fillRanks (g);
1040 class2(g);
1041 decompose(g, 1);
1042 allocate_ranks(g);
1043 ordered_edges(g);
1046}
1047
1048static void flat_rev(Agraph_t * g, Agedge_t * e)
1049{
1050 int j;
1051 Agedge_t *rev;
1052
1053 if (!ND_flat_out(aghead(e)).list)
1054 rev = NULL;
1055 else
1056 for (j = 0; (rev = ND_flat_out(aghead(e)).list[j]); j++)
1057 if (aghead(rev) == agtail(e))
1058 break;
1059 if (rev) {
1060 merge_oneway(e, rev);
1061 if (ED_edge_type(rev) == FLATORDER && ED_to_orig(rev) == 0)
1062 ED_to_orig(rev) = e;
1064 } else {
1065 rev = new_virtual_edge(aghead(e), agtail(e), e);
1066 if (ED_edge_type(e) == FLATORDER)
1067 ED_edge_type(rev) = FLATORDER;
1068 else
1069 ED_edge_type(rev) = REVERSED;
1070 ED_label(rev) = ED_label(e);
1071 flat_edge(g, rev);
1072 }
1073}
1074
1075static void flat_search(graph_t * g, node_t * v)
1076{
1077 int i;
1078 bool hascl;
1079 edge_t *e;
1080 adjmatrix_t *M = GD_rank(g)[ND_rank(v)].flat;
1081
1082 ND_mark(v) = true;
1083 ND_onstack(v) = true;
1084 hascl = GD_n_cluster(dot_root(g)) > 0;
1085 if (ND_flat_out(v).list)
1086 for (i = 0; (e = ND_flat_out(v).list[i]); i++) {
1087 if (hascl && !(agcontains(g, agtail(e)) && agcontains(g, aghead(e))))
1088 continue;
1089 if (ED_weight(e) == 0)
1090 continue;
1091 if (ND_onstack(aghead(e))) {
1092 matrix_set(M, (size_t)flatindex(aghead(e)), (size_t)flatindex(agtail(e)));
1094 i--;
1095 if (ED_edge_type(e) == FLATORDER)
1096 continue;
1097 flat_rev(g, e);
1098 } else {
1099 matrix_set(M, (size_t)flatindex(agtail(e)), (size_t)flatindex(aghead(e)));
1100 if (!ND_mark(aghead(e)))
1101 flat_search(g, aghead(e));
1102 }
1103 }
1104 ND_onstack(v) = false;
1105}
1106
1108{
1109 int i, r;
1110 node_t *v;
1111
1112 for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
1113 bool flat = false;
1114 for (i = 0; i < GD_rank(g)[r].n; i++) {
1115 v = GD_rank(g)[r].v[i];
1116 ND_mark(v) = false;
1117 ND_onstack(v) = false;
1118 ND_low(v) = i;
1119 if (ND_flat_out(v).size > 0 && !flat) {
1120 GD_rank(g)[r].flat =
1121 new_matrix((size_t)GD_rank(g)[r].n, (size_t)GD_rank(g)[r].n);
1122 flat = true;
1123 }
1124 }
1125 if (flat) {
1126 for (i = 0; i < GD_rank(g)[r].n; i++) {
1127 v = GD_rank(g)[r].v[i];
1128 if (!ND_mark(v))
1129 flat_search(g, v);
1130 }
1131 }
1132 }
1133}
1134
1135/* Allocate rank structure, determining number of nodes per rank.
1136 * Note that no nodes are put into the structure yet.
1137 */
1139{
1140 int r, low, high;
1141 node_t *n;
1142 edge_t *e;
1143
1144 int *cn = gv_calloc(GD_maxrank(g) + 2, sizeof(int)); // must be 0 based, not GD_minrank
1145 for (n = agfstnode(g); n; n = agnxtnode(g, n)) {
1146 cn[ND_rank(n)]++;
1147 for (e = agfstout(g, n); e; e = agnxtout(g, e)) {
1148 low = ND_rank(agtail(e));
1149 high = ND_rank(aghead(e));
1150 if (low > high) {
1151 SWAP(&low, &high);
1152 }
1153 for (r = low + 1; r < high; r++)
1154 cn[r]++;
1155 }
1156 }
1157 GD_rank(g) = gv_calloc(GD_maxrank(g) + 2, sizeof(rank_t));
1158 for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
1159 GD_rank(g)[r].an = GD_rank(g)[r].n = cn[r] + 1;
1160 GD_rank(g)[r].av = GD_rank(g)[r].v = gv_calloc(cn[r] + 1, sizeof(node_t*));
1161 }
1162 free(cn);
1163}
1164
1165/* install a node at the current right end of its rank */
1167 int i, r;
1168
1169 r = ND_rank(n);
1170 i = GD_rank(g)[r].n;
1171 if (GD_rank(g)[r].an <= 0) {
1172 agerrorf("install_in_rank, line %d: %s %s rank %d i = %d an = 0\n",
1173 __LINE__, agnameof(g), agnameof(n), r, i);
1174 return -1;
1175 }
1176
1177 GD_rank(g)[r].v[i] = n;
1178 ND_order(n) = i;
1179 GD_rank(g)[r].n++;
1180 assert(GD_rank(g)[r].n <= GD_rank(g)[r].an);
1181#ifdef DEBUG
1182 {
1183 node_t *v;
1184
1185 for (v = GD_nlist(g); v; v = ND_next(v))
1186 if (v == n)
1187 break;
1188 assert(v != NULL);
1189 }
1190#endif
1191 if (ND_order(n) > GD_rank(Root)[r].an) {
1192 agerrorf("install_in_rank, line %d: ND_order(%s) [%d] > GD_rank(Root)[%d].an [%d]\n",
1193 __LINE__, agnameof(n), ND_order(n), r, GD_rank(Root)[r].an);
1194 return -1;
1195 }
1196 if (r < GD_minrank(g) || r > GD_maxrank(g)) {
1197 agerrorf("install_in_rank, line %d: rank %d not in rank range [%d,%d]\n",
1198 __LINE__, r, GD_minrank(g), GD_maxrank(g));
1199 return -1;
1200 }
1201 if (GD_rank(g)[r].v + ND_order(n) >
1202 GD_rank(g)[r].av + GD_rank(Root)[r].an) {
1203 agerrorf("install_in_rank, line %d: GD_rank(g)[%d].v + ND_order(%s) [%d] > GD_rank(g)[%d].av + GD_rank(Root)[%d].an [%d]\n",
1204 __LINE__, r, agnameof(n),ND_order(n), r, r, GD_rank(Root)[r].an);
1205 return -1;
1206 }
1207 return 0;
1208}
1209
1210/* install nodes in ranks. the initial ordering ensure that series-parallel
1211 * graphs such as trees are drawn with no crossings. it tries searching
1212 * in- and out-edges and takes the better of the two initial orderings.
1213 */
1214int build_ranks(graph_t *g, int pass) {
1215 int i, j;
1216 node_t *n, *ns;
1217 edge_t **otheredges;
1218 node_queue_t q = {0};
1219 for (n = GD_nlist(g); n; n = ND_next(n))
1220 MARK(n) = false;
1221
1222#ifdef DEBUG
1223 {
1224 edge_t *e;
1225 for (n = GD_nlist(g); n; n = ND_next(n)) {
1226 for (i = 0; (e = ND_out(n).list[i]); i++)
1227 assert(!MARK(aghead(e)));
1228 for (i = 0; (e = ND_in(n).list[i]); i++)
1229 assert(!MARK(agtail(e)));
1230 }
1231 }
1232#endif
1233
1234 for (i = GD_minrank(g); i <= GD_maxrank(g); i++)
1235 GD_rank(g)[i].n = 0;
1236
1237 const bool walkbackwards = g != agroot(g); // if this is a cluster, need to
1238 // walk GD_nlist backward to
1239 // preserve input node order
1240 if (walkbackwards) {
1241 for (ns = GD_nlist(g); ND_next(ns); ns = ND_next(ns)) {
1242 ;
1243 }
1244 } else {
1245 ns = GD_nlist(g);
1246 }
1247 for (n = ns; n; n = walkbackwards ? ND_prev(n) : ND_next(n)) {
1248 otheredges = pass == 0 ? ND_in(n).list : ND_out(n).list;
1249 if (otheredges[0] != NULL)
1250 continue;
1251 if (!MARK(n)) {
1252 MARK(n) = true;
1253 LIST_PUSH_BACK(&q, n);
1254 while (!LIST_IS_EMPTY(&q)) {
1255 node_t *n0 = LIST_POP_FRONT(&q);
1256 if (ND_ranktype(n0) != CLUSTER) {
1257 if (install_in_rank(g, n0) != 0) {
1258 LIST_FREE(&q);
1259 return -1;
1260 }
1261 enqueue_neighbors(&q, n0, pass);
1262 } else {
1263 const int rc = install_cluster(g, n0, pass, &q);
1264 if (rc != 0) {
1265 LIST_FREE(&q);
1266 return rc;
1267 }
1268 }
1269 }
1270 }
1271 }
1272 assert(LIST_IS_EMPTY(&q));
1273 for (i = GD_minrank(g); i <= GD_maxrank(g); i++) {
1274 GD_rank(Root)[i].valid = false;
1275 if (GD_flip(g) && GD_rank(g)[i].n > 0) {
1276 node_t **vlist = GD_rank(g)[i].v;
1277 int num_nodes_1 = GD_rank(g)[i].n - 1;
1278 int half_num_nodes_1 = num_nodes_1 / 2;
1279 for (j = 0; j <= half_num_nodes_1; j++)
1280 exchange(vlist[j], vlist[num_nodes_1 - j]);
1281 }
1282 }
1283
1284 if (g == dot_root(g) && ncross() > 0)
1285 transpose(g, false);
1286 LIST_FREE(&q);
1287 return 0;
1288}
1289
1290void enqueue_neighbors(node_queue_t *q, node_t *n0, int pass) {
1291 edge_t *e;
1292
1293 if (pass == 0) {
1294 for (size_t i = 0; i < ND_out(n0).size; i++) {
1295 e = ND_out(n0).list[i];
1296 if (!MARK(aghead(e))) {
1297 MARK(aghead(e)) = true;
1298 LIST_PUSH_BACK(q, aghead(e));
1299 }
1300 }
1301 } else {
1302 for (size_t i = 0; i < ND_in(n0).size; i++) {
1303 e = ND_in(n0).list[i];
1304 if (!MARK(agtail(e))) {
1305 MARK(agtail(e)) = true;
1306 LIST_PUSH_BACK(q, agtail(e));
1307 }
1308 }
1309 }
1310}
1311
1313 if (ED_weight(e) == 0)
1314 return false;
1315 if (!inside_cluster(g, agtail(e)))
1316 return false;
1317 if (!inside_cluster(g, aghead(e)))
1318 return false;
1319 return true;
1320}
1321
1322typedef LIST(node_t *) nodes_t;
1323
1324/* construct nodes reachable from 'here' in post-order.
1325* This is the same as doing a topological sort in reverse order.
1326*/
1327static void postorder(graph_t *g, node_t *v, nodes_t *list, int r) {
1328 edge_t *e;
1329 int i;
1330
1331 MARK(v) = true;
1332 if (ND_flat_out(v).size > 0) {
1333 for (i = 0; (e = ND_flat_out(v).list[i]); i++) {
1334 if (!constraining_flat_edge(g, e)) continue;
1335 if (!MARK(aghead(e)))
1336 postorder(g, aghead(e), list, r);
1337 }
1338 }
1339 assert(ND_rank(v) == r);
1340 LIST_APPEND(list, v);
1341}
1342
1343static void flat_reorder(graph_t * g)
1344{
1345 int i, r, local_in_cnt, local_out_cnt, base_order;
1346 node_t *v;
1347 nodes_t temprank = {0};
1348 edge_t *flat_e, *e;
1349
1350 if (!GD_has_flat_edges(g))
1351 return;
1352 for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
1353 if (GD_rank(g)[r].n == 0) continue;
1354 base_order = ND_order(GD_rank(g)[r].v[0]);
1355 for (i = 0; i < GD_rank(g)[r].n; i++)
1356 MARK(GD_rank(g)[r].v[i]) = false;
1357 LIST_CLEAR(&temprank);
1358
1359 /* construct reverse topological sort order in temprank */
1360 for (i = 0; i < GD_rank(g)[r].n; i++) {
1361 if (GD_flip(g)) v = GD_rank(g)[r].v[i];
1362 else v = GD_rank(g)[r].v[GD_rank(g)[r].n - i - 1];
1363
1364 local_in_cnt = local_out_cnt = 0;
1365 for (size_t j = 0; j < ND_flat_in(v).size; j++) {
1366 flat_e = ND_flat_in(v).list[j];
1367 if (constraining_flat_edge(g, flat_e)) local_in_cnt++;
1368 }
1369 for (size_t j = 0; j < ND_flat_out(v).size; j++) {
1370 flat_e = ND_flat_out(v).list[j];
1371 if (constraining_flat_edge(g, flat_e)) local_out_cnt++;
1372 }
1373 if (local_in_cnt == 0 && local_out_cnt == 0)
1374 LIST_APPEND(&temprank, v);
1375 else {
1376 if (!MARK(v) && local_in_cnt == 0) {
1377 postorder(g, v, &temprank, r);
1378 }
1379 }
1380 }
1381
1382 if (!LIST_IS_EMPTY(&temprank)) {
1383 if (!GD_flip(g)) {
1384 LIST_REVERSE(&temprank);
1385 }
1386 for (i = 0; i < GD_rank(g)[r].n; i++) {
1387 v = GD_rank(g)[r].v[i] = LIST_GET(&temprank, (size_t)i);
1388 ND_order(v) = i + base_order;
1389 }
1390
1391 /* nonconstraint flat edges must be made LR */
1392 for (i = 0; i < GD_rank(g)[r].n; i++) {
1393 v = GD_rank(g)[r].v[i];
1394 if (ND_flat_out(v).list) {
1395 for (size_t j = 0; (e = ND_flat_out(v).list[j]); j++) {
1396 if ((!GD_flip(g) && ND_order(aghead(e)) < ND_order(agtail(e))) ||
1397 (GD_flip(g) && ND_order(aghead(e)) > ND_order(agtail(e)))) {
1398 assert(!constraining_flat_edge(g, e));
1400 j--;
1401 flat_rev(g, e);
1402 }
1403 }
1404 }
1405 }
1406 /* postprocess to restore intended order */
1407 }
1408 /* else do no harm! */
1409 GD_rank(Root)[r].valid = false;
1410 }
1411 LIST_FREE(&temprank);
1412}
1413
1414static void reorder(graph_t * g, int r, bool reverse, bool hasfixed)
1415{
1416 int changed = 0, nelt;
1417 node_t **vlist = GD_rank(g)[r].v;
1418 node_t **lp, **rp, **ep = vlist + GD_rank(g)[r].n;
1419
1420 for (nelt = GD_rank(g)[r].n - 1; nelt >= 0; nelt--) {
1421 lp = vlist;
1422 while (lp < ep) {
1423 /* find leftmost node that can be compared */
1424 while (lp < ep && ND_mval(*lp) < 0)
1425 lp++;
1426 if (lp >= ep)
1427 break;
1428 /* find the node that can be compared */
1429 bool sawclust = false;
1430 bool muststay = false;
1431 for (rp = lp + 1; rp < ep; rp++) {
1432 if (sawclust && ND_clust(*rp))
1433 continue; /* ### */
1434 if (left2right(g, *lp, *rp)) {
1435 muststay = true;
1436 break;
1437 }
1438 if (ND_mval(*rp) >= 0)
1439 break;
1440 if (ND_clust(*rp))
1441 sawclust = true; /* ### */
1442 }
1443 if (rp >= ep)
1444 break;
1445 if (!muststay) {
1446 const double p1 = ND_mval(*lp);
1447 const double p2 = ND_mval(*rp);
1448 if (p1 > p2 || (p1 >= p2 && reverse)) {
1449 exchange(*lp, *rp);
1450 changed++;
1451 }
1452 }
1453 lp = rp;
1454 }
1455 if (!hasfixed && !reverse)
1456 ep--;
1457 }
1458
1459 if (changed) {
1460 GD_rank(Root)[r].valid = false;
1461 if (r > 0)
1462 GD_rank(Root)[r - 1].valid = false;
1463 }
1464}
1465
1466static void mincross_step(graph_t * g, int pass)
1467{
1468 int r, other, first, last, dir;
1469
1470 bool reverse = pass % 4 < 2;
1471
1472 if (pass % 2 == 0) { /* down pass */
1473 first = GD_minrank(g) + 1;
1474 if (GD_minrank(g) > GD_minrank(Root))
1475 first--;
1476 last = GD_maxrank(g);
1477 dir = 1;
1478 } else { /* up pass */
1479 first = GD_maxrank(g) - 1;
1480 last = GD_minrank(g);
1481 if (GD_maxrank(g) < GD_maxrank(Root))
1482 first++;
1483 dir = -1;
1484 }
1485
1486 for (r = first; r != last + dir; r += dir) {
1487 other = r - dir;
1488 bool hasfixed = medians(g, r, other);
1489 reorder(g, r, reverse, hasfixed);
1490 }
1491 transpose(g, !reverse);
1492}
1493
1494static int local_cross(elist l, int dir)
1495{
1496 int i, j;
1497 int cross = 0;
1498 edge_t *e, *f;
1499 bool is_out = dir > 0;
1500 for (i = 0; (e = l.list[i]); i++) {
1501 if (is_out)
1502 for (j = i + 1; (f = l.list[j]); j++) {
1503 if ((ND_order(aghead(f)) - ND_order(aghead(e)))
1504 * (ED_tail_port(f).p.x - ED_tail_port(e).p.x) < 0)
1505 cross += ED_xpenalty(e) * ED_xpenalty(f);
1506 } else
1507 for (j = i + 1; (f = l.list[j]); j++) {
1508 if ((ND_order(agtail(f)) - ND_order(agtail(e)))
1509 * (ED_head_port(f).p.x - ED_head_port(e).p.x) < 0)
1510 cross += ED_xpenalty(e) * ED_xpenalty(f);
1511 }
1512 }
1513 return cross;
1514}
1515
1516static int64_t rcross(graph_t *g, int r) {
1517 int top, bot, max, i, k;
1518 node_t **rtop, *v;
1519
1520 int64_t cross = 0;
1521 max = 0;
1522 rtop = GD_rank(g)[r].v;
1523
1524 int *Count = gv_calloc(GD_rank(Root)[r + 1].n + 1, sizeof(int));
1525
1526 for (top = 0; top < GD_rank(g)[r].n; top++) {
1527 edge_t *e;
1528 if (max > 0) {
1529 for (i = 0; (e = ND_out(rtop[top]).list[i]); i++) {
1530 for (k = ND_order(aghead(e)) + 1; k <= max; k++)
1531 cross += Count[k] * ED_xpenalty(e);
1532 }
1533 }
1534 for (i = 0; (e = ND_out(rtop[top]).list[i]); i++) {
1535 int inv = ND_order(aghead(e));
1536 if (inv > max)
1537 max = inv;
1538 Count[inv] += ED_xpenalty(e);
1539 }
1540 }
1541 for (top = 0; top < GD_rank(g)[r].n; top++) {
1542 v = GD_rank(g)[r].v[top];
1543 if (ND_has_port(v))
1544 cross += local_cross(ND_out(v), 1);
1545 }
1546 for (bot = 0; bot < GD_rank(g)[r + 1].n; bot++) {
1547 v = GD_rank(g)[r + 1].v[bot];
1548 if (ND_has_port(v))
1549 cross += local_cross(ND_in(v), -1);
1550 }
1551 free(Count);
1552 return cross;
1553}
1554
1555static int64_t ncross(void) {
1556 int r;
1557
1558 graph_t *g = Root;
1559 int64_t count = 0;
1560 for (r = GD_minrank(g); r < GD_maxrank(g); r++) {
1561 if (GD_rank(g)[r].valid)
1562 count += GD_rank(g)[r].cache_nc;
1563 else {
1564 const int64_t nc = GD_rank(g)[r].cache_nc = rcross(g, r);
1565 count += nc;
1566 GD_rank(g)[r].valid = true;
1567 }
1568 }
1569 return count;
1570}
1571
1572static int ordercmpf(const void *x, const void *y) {
1573 const int *i0 = x;
1574 const int *i1 = y;
1575 if (*i0 < *i1) {
1576 return -1;
1577 }
1578 if (*i0 > *i1) {
1579 return 1;
1580 }
1581 return 0;
1582}
1583
1584/* Calculate a mval for nodes with no in or out non-flat edges.
1585 * Assume (ND_out(n).size == 0) && (ND_in(n).size == 0)
1586 * Find flat edge a->n where a has the largest order and set
1587 * n.mval = a.mval+1, assuming a.mval is defined (>=0).
1588 * If there are no flat in edges, find flat edge n->a where a
1589 * has the smallest order and set * n.mval = a.mval-1, assuming
1590 * a.mval is > 0.
1591 * Return true if n.mval is left -1, indicating a fixed node for sorting.
1592 */
1593static bool flat_mval(node_t * n)
1594{
1595 int i;
1596 edge_t *e, **fl;
1597 node_t *nn;
1598
1599 if (ND_flat_in(n).size > 0) {
1600 fl = ND_flat_in(n).list;
1601 nn = agtail(fl[0]);
1602 for (i = 1; (e = fl[i]); i++)
1603 if (ND_order(agtail(e)) > ND_order(nn))
1604 nn = agtail(e);
1605 if (ND_mval(nn) >= 0) {
1606 ND_mval(n) = ND_mval(nn) + 1;
1607 return false;
1608 }
1609 } else if (ND_flat_out(n).size > 0) {
1610 fl = ND_flat_out(n).list;
1611 nn = aghead(fl[0]);
1612 for (i = 1; (e = fl[i]); i++)
1613 if (ND_order(aghead(e)) < ND_order(nn))
1614 nn = aghead(e);
1615 if (ND_mval(nn) > 0) {
1616 ND_mval(n) = ND_mval(nn) - 1;
1617 return false;
1618 }
1619 }
1620 return true;
1621}
1622
1623#define VAL(node,port) (MC_SCALE * ND_order(node) + (port).order)
1624
1625static bool medians(graph_t * g, int r0, int r1)
1626{
1627 int i, j0, lspan, rspan, *list;
1628 node_t *n, **v;
1629 edge_t *e;
1630 bool hasfixed = false;
1631
1632 list = TI_list;
1633 v = GD_rank(g)[r0].v;
1634 for (i = 0; i < GD_rank(g)[r0].n; i++) {
1635 n = v[i];
1636 size_t j = 0;
1637 if (r1 > r0)
1638 for (j0 = 0; (e = ND_out(n).list[j0]); j0++) {
1639 if (ED_xpenalty(e) > 0)
1640 list[j++] = VAL(aghead(e), ED_head_port(e));
1641 } else
1642 for (j0 = 0; (e = ND_in(n).list[j0]); j0++) {
1643 if (ED_xpenalty(e) > 0)
1644 list[j++] = VAL(agtail(e), ED_tail_port(e));
1645 }
1646 switch (j) {
1647 case 0:
1648 ND_mval(n) = -1;
1649 break;
1650 case 1:
1651 ND_mval(n) = list[0];
1652 break;
1653 case 2:
1654 ND_mval(n) = (list[0] + list[1]) / 2;
1655 break;
1656 default:
1657 qsort(list, j, sizeof(int), ordercmpf);
1658 if (j % 2)
1659 ND_mval(n) = list[j / 2];
1660 else {
1661 /* weighted median */
1662 size_t rm = j / 2;
1663 size_t lm = rm - 1;
1664 rspan = list[j - 1] - list[rm];
1665 lspan = list[lm] - list[0];
1666 if (lspan == rspan)
1667 ND_mval(n) = (list[lm] + list[rm]) / 2;
1668 else {
1669 double w = list[lm] * (double)rspan + list[rm] * (double)lspan;
1670 ND_mval(n) = w / (lspan + rspan);
1671 }
1672 }
1673 }
1674 }
1675 for (i = 0; i < GD_rank(g)[r0].n; i++) {
1676 n = v[i];
1677 if (ND_out(n).size == 0 && ND_in(n).size == 0)
1678 hasfixed |= flat_mval(n);
1679 }
1680 return hasfixed;
1681}
1682
1683static int nodeposcmpf(const void *x, const void *y) {
1684 node_t *const *const n0 = x;
1685 node_t *const *const n1 = y;
1686 if (ND_order(*n0) < ND_order(*n1)) {
1687 return -1;
1688 }
1689 if (ND_order(*n0) > ND_order(*n1)) {
1690 return 1;
1691 }
1692 return 0;
1693}
1694
1695static int edgeidcmpf(const void *x, const void *y) {
1696 edge_t *const *const e0 = x;
1697 edge_t *const *const e1 = y;
1698 if (AGSEQ(*e0) < AGSEQ(*e1)) {
1699 return -1;
1700 }
1701 if (AGSEQ(*e0) > AGSEQ(*e1)) {
1702 return 1;
1703 }
1704 return 0;
1705}
1706
1707/* following code deals with weights of edges of "virtual" nodes */
1708#define ORDINARY 0
1709#define SINGLETON 1
1710#define VIRTUALNODE 2
1711#define NTYPES 3
1712
1713#define C_EE 1
1714#define C_VS 2
1715#define C_SS 2
1716#define C_VV 4
1717
1718static const int table[NTYPES][NTYPES] = {
1719 /* ordinary */ {C_EE, C_EE, C_EE},
1720 /* singleton */ {C_EE, C_SS, C_VS},
1721 /* virtual */ {C_EE, C_VS, C_VV}
1722};
1723
1724static int endpoint_class(node_t * n)
1725{
1726 if (ND_node_type(n) == VIRTUAL)
1727 return VIRTUALNODE;
1728 if (ND_weight_class(n) <= 1)
1729 return SINGLETON;
1730 return ORDINARY;
1731}
1732
1734{
1735 int t;
1737
1738 /* check whether the upcoming computation will overflow */
1739 assert(t >= 0);
1740 if (INT_MAX / t < ED_weight(e)) {
1741 agerrorf("overflow when calculating virtual weight of edge\n");
1742 graphviz_exit(EXIT_FAILURE);
1743 }
1744
1745 ED_weight(e) *= t;
1746}
1747
1748#ifdef DEBUG
1749void check_order(void)
1750{
1751 int i, r;
1752 node_t *v;
1753 graph_t *g = Root;
1754
1755 for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
1756 assert(GD_rank(g)[r].v[GD_rank(g)[r].n] == NULL);
1757 for (i = 0; (v = GD_rank(g)[r].v[i]); i++) {
1758 assert(ND_rank(v) == r);
1759 assert(ND_order(v) == i);
1760 }
1761 }
1762}
1763#endif
1764
1766{
1767 char *p;
1768 double f;
1769
1770 /* set default values */
1771 MinQuit = 8;
1772 MaxIter = 24;
1773
1774 p = agget(g, "mclimit");
1775 if (p && (f = atof(p)) > 0.0) {
1776 MinQuit = MAX(1, scale_clamp(MinQuit, f));
1777 MaxIter = MAX(1, scale_clamp(MaxIter, f));
1778 }
1779}
1780
1781#ifdef DEBUG
1782void check_vlists(graph_t * g)
1783{
1784 int c, i, j, r;
1785 node_t *u;
1786
1787 for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
1788 for (i = 0; i < GD_rank(g)[r].n; i++) {
1789 u = GD_rank(g)[r].v[i];
1790 j = ND_order(u);
1791 assert(GD_rank(Root)[r].v[j] == u);
1792 }
1793 if (GD_rankleader(g)) {
1794 u = GD_rankleader(g)[r];
1795 j = ND_order(u);
1796 assert(GD_rank(Root)[r].v[j] == u);
1797 }
1798 }
1799 for (c = 1; c <= GD_n_cluster(g); c++)
1800 check_vlists(GD_clust(g)[c]);
1801}
1802
1803void node_in_root_vlist(node_t * n)
1804{
1805 node_t **vptr;
1806
1807 for (vptr = GD_rank(Root)[ND_rank(n)].v; *vptr; vptr++)
1808 if (*vptr == n)
1809 break;
1810 if (*vptr == 0)
1811 abort();
1812}
1813#endif /* DEBUG code */
static agxbuf last
last message
Definition agerror.c:31
Memory allocation wrappers that exit on failure.
static void * gv_calloc(size_t nmemb, size_t size)
Definition alloc.h:26
static void * gv_alloc(size_t size)
Definition alloc.h:47
#define MIN(a, b)
Definition arith.h:28
#define MAX(a, b)
Definition arith.h:33
API for compacted arrays of booleans.
static bitarray_t bitarray_new(size_t size_bits)
create an array of the given element length
Definition bitarray.h:47
static void bitarray_clear(bitarray_t *self)
clear all bits in a bit array
Definition bitarray.h:99
static bool bitarray_get(bitarray_t self, size_t index)
get the value of the given element
Definition bitarray.h:65
static void bitarray_set(bitarray_t *self, size_t index, bool value)
set or clear the value of the given element
Definition bitarray.h:80
static void bitarray_reset(bitarray_t *self)
free underlying resources and leave a bit array empty
Definition bitarray.h:114
abstract graph C library, Cgraph API
void class2(graph_t *g)
Definition class2.c:155
bool mapbool(const char *p)
Definition utils.c:341
char * late_string(void *obj, attrsym_t *attr, char *defaultValue)
Definition utils.c:84
#define NORMAL
Definition const.h:24
#define FLATORDER
Definition const.h:28
#define NEW_RANK
Definition const.h:243
#define VIRTUAL
Definition const.h:25
#define CLUSTER
Definition const.h:40
#define REVERSED
Definition const.h:27
void decompose(graph_t *g, int pass)
Definition decomp.c:108
Agraph_t * dot_root(void *p)
Definition dotinit.c:515
bool is_cluster(Agraph_t *)
Definition rank.c:532
void flat_edge(Agraph_t *, Agedge_t *)
Definition fastgr.c:215
void merge_oneway(Agedge_t *, Agedge_t *)
Definition fastgr.c:245
Agedge_t * new_virtual_edge(Agnode_t *, Agnode_t *, Agedge_t *)
Definition fastgr.c:131
Agedge_t * find_flat_edge(Agnode_t *, Agnode_t *)
Definition fastgr.c:56
void delete_flat_edge(Agedge_t *)
Definition fastgr.c:222
static NORETURN void graphviz_exit(int status)
Definition exit.h:23
int MaxIter
Definition globals.h:61
Agsym_t * G_ordering
Definition globals.h:71
Agsym_t * N_ordering
Definition globals.h:77
static bool Verbose
Definition gml2gv.c:26
void free(void *)
node NULL
Definition grammar.y:181
static int cnt(Dict_t *d, Dtlink_t **set)
Definition graph.c:198
int agnedges(Agraph_t *g)
Definition graph.c:163
int agdegree(Agraph_t *g, Agnode_t *n, int in, int out)
Definition graph.c:225
int agnnodes(Agraph_t *g)
Definition graph.c:157
char * agget(void *obj, char *name)
Definition attr.c:450
#define ED_to_orig(e)
Definition types.h:598
Agedge_t * agedge(Agraph_t *g, Agnode_t *t, Agnode_t *h, char *name, int createflag)
Definition edge.c:255
int agdeledge(Agraph_t *g, Agedge_t *arg_e)
Definition edge.c:329
Agedge_t * agnxtin(Agraph_t *g, Agedge_t *e)
Definition edge.c:73
#define ED_xpenalty(e)
Definition types.h:601
Agedge_t * agfstout(Agraph_t *g, Agnode_t *n)
Definition edge.c:28
#define agtail(e)
Definition cgraph.h:977
#define ED_edge_type(e)
Definition types.h:582
#define ED_weight(e)
Definition types.h:603
#define aghead(e)
Definition cgraph.h:978
Agedge_t * agnxtout(Agraph_t *g, Agedge_t *e)
Definition edge.c:43
#define ED_head_port(e)
Definition types.h:588
Agedge_t * agfstin(Agraph_t *g, Agnode_t *n)
Definition edge.c:59
#define ED_label(e)
Definition types.h:589
#define ED_tail_port(e)
Definition types.h:597
void agwarningf(const char *fmt,...)
Definition agerror.c:175
void agerrorf(const char *fmt,...)
Definition agerror.c:167
#define GD_minrank(g)
Definition types.h:384
#define GD_maxrank(g)
Definition types.h:382
#define GD_clust(g)
Definition types.h:360
int agclose(Agraph_t *g)
deletes a graph, freeing its associated storage
Definition graph.c:97
#define GD_flags(g)
Definition types.h:365
#define GD_rank(g)
Definition types.h:395
#define GD_has_flat_edges(g)
Definition types.h:370
#define GD_nlist(g)
Definition types.h:393
Agdesc_t Agstrictdirected
strict directed. A strict graph cannot have multi-edges or self-arcs.
Definition graph.c:273
#define GD_n_cluster(g)
Definition types.h:389
Agraph_t * agopen(char *name, Agdesc_t desc, Agdisc_t *disc)
creates a new graph with the given name and kind
Definition graph.c:44
#define GD_comp(g)
Definition types.h:362
#define GD_flip(g)
Definition types.h:378
#define GD_rankleader(g)
Definition types.h:396
Agnode_t * agnode(Agraph_t *g, char *name, int createflag)
Definition node.c:143
#define ND_rank(n)
Definition types.h:523
#define ND_prev(n)
Definition types.h:521
#define ND_ht(n)
Definition types.h:500
Agnode_t * agnxtnode(Agraph_t *g, Agnode_t *n)
Definition node.c:50
Agnode_t * agfstnode(Agraph_t *g)
Definition node.c:43
#define ND_has_port(n)
Definition types.h:495
#define ND_next(n)
Definition types.h:510
Agnode_t * agsubnode(Agraph_t *g, Agnode_t *n, int createflag)
Definition node.c:254
#define ND_clust(n)
Definition types.h:489
#define ND_other(n)
Definition types.h:514
#define ND_alg(n)
Definition types.h:484
#define ND_flat_out(n)
Definition types.h:493
#define ND_rw(n)
Definition types.h:525
#define ND_node_type(n)
Definition types.h:511
#define ND_lw(n)
Definition types.h:506
#define ND_mval(n)
Definition types.h:508
int agdelnode(Agraph_t *g, Agnode_t *arg_n)
removes a node from a graph or subgraph.
Definition node.c:192
#define ND_order(n)
Definition types.h:513
#define ND_UF_size(n)
Definition types.h:487
#define ND_weight_class(n)
Definition types.h:535
#define ND_low(n)
Definition types.h:505
#define ND_ranktype(n)
Definition types.h:524
#define ND_flat_in(n)
Definition types.h:492
#define ND_in(n)
Definition types.h:501
#define ND_out(n)
Definition types.h:515
char * agnameof(void *)
returns a string descriptor for the object.
Definition id.c:145
int agcontains(Agraph_t *, void *obj)
returns non-zero if obj is a member of (sub)graph
Definition obj.c:235
Agraph_t * agroot(void *obj)
Definition obj.c:170
#define AGSEQ(obj)
Definition cgraph.h:225
void * agbindrec(void *obj, const char *name, unsigned int recsize, int move_to_front)
attaches a new record of the given size to the object
Definition rec.c:91
Agraph_t * agfstsubg(Agraph_t *g)
Definition subg.c:75
Agraph_t * agnxtsubg(Agraph_t *subg)
Definition subg.c:80
Agraph_t * agsubg(Agraph_t *g, char *name, int cflag)
Definition subg.c:55
bool rm(Agraph_t *g)
Definition gv.cpp:588
Arithmetic helper functions.
static int scale_clamp(int original, double scale)
scale up or down a non-negative integer, clamping to [0, INT_MAX]
Definition gv_math.h:76
#define SWAP(a, b)
Definition gv_math.h:134
static size_t zmax(size_t a, size_t b)
maximum of two sizes
Definition gv_math.h:29
rows row
Definition htmlparse.y:320
static double cross(double *u, double *v)
#define ITOS(i)
Definition itos.h:43
#define ND_onstack(n)
Definition acyclic.c:31
#define ND_mark(n)
Definition acyclic.c:30
static Agedge_t * top(edge_stack_t *sp)
Definition tred.c:75
int install_cluster(graph_t *g, node_t *n, int pass, node_queue_t *q)
Definition cluster.c:380
int expand_cluster(graph_t *subg)
Definition cluster.c:280
void mark_lowclusters(Agraph_t *root)
Definition cluster.c:400
type-generic dynamically expanding list
#define LIST(type)
Definition list.h:55
#define LIST_POP_FRONT(list)
Definition list.h:394
#define LIST_CLEAR(list)
Definition list.h:240
#define LIST_APPEND(list, item)
Definition list.h:120
#define LIST_FREE(list)
Definition list.h:370
#define LIST_IS_EMPTY(list)
Definition list.h:90
#define LIST_PUSH_BACK(list, item)
Definition list.h:384
#define LIST_REVERSE(list)
Definition list.h:348
#define LIST_GET(list, index)
Definition list.h:155
#define neighbor(t, i, edim, elist)
Definition make_map.h:41
#define delta
Definition maze.c:136
#define isBackedge(e)
Definition mincross.c:190
static int betweenclust(edge_t *e)
Definition mincross.c:427
#define ND_hi(n)
Definition mincross.c:174
#define flatindex(v)
Definition mincross.c:114
static void free_matrix(adjmatrix_t *p)
Definition mincross.c:407
static bool ReMincross
Definition mincross.c:164
static bool flat_mval(node_t *n)
Definition mincross.c:1593
static bool inside_cluster(graph_t *g, node_t *v)
Definition mincross.c:905
#define ND_x(n)
Definition mincross.c:172
static int64_t mincross(graph_t *g, int startpass)
Definition mincross.c:692
#define ORDINARY
Definition mincross.c:1708
static void init_mccomp(graph_t *g, size_t c)
Definition mincross.c:415
static void mincross_step(graph_t *g, int pass)
Definition mincross.c:1466
static int topsort(Agraph_t *g, Agraph_t *sg, Agnode_t **arr)
Definition mincross.c:203
static bool medians(graph_t *g, int r0, int r1)
Definition mincross.c:1625
static void reorder(graph_t *g, int r, bool reverse, bool hasfixed)
Definition mincross.c:1414
#define VAL(node, port)
Definition mincross.c:1623
static int edgeidcmpf(const void *, const void *)
Definition mincross.c:1695
static void flat_breakcycles(graph_t *g)
Definition mincross.c:1107
static void cleanup2(graph_t *g, int64_t nc)
Definition mincross.c:837
#define MARK(v)
Definition mincross.c:112
static bool is_a_normal_node_of(graph_t *g, node_t *v)
Definition mincross.c:889
static void save_best(graph_t *g)
Definition mincross.c:773
static void exchange(node_t *v, node_t *w)
Definition mincross.c:621
#define C_VS
Definition mincross.c:1714
static void init_mincross(graph_t *g)
Definition mincross.c:1024
static int64_t rcross(graph_t *g, int r)
Definition mincross.c:1516
static Agraph_t * realFillRanks(Agraph_t *g, bitarray_t *ranks, Agraph_t *sg)
Definition mincross.c:978
#define ND_np(n)
Definition mincross.c:175
static int64_t transpose_step(graph_t *g, int r, bool reverse)
Definition mincross.c:634
static void fixLabelOrder(graph_t *g, rank_t *rk)
for each pair of nodes (labels), we add an edge
Definition mincross.c:245
void virtual_weight(edge_t *e)
Definition mincross.c:1733
static void merge2(graph_t *g)
Definition mincross.c:810
static int64_t mincross_clust(graph_t *g)
Definition mincross.c:533
static node_t * furthestnode(graph_t *g, node_t *v, int dir)
Definition mincross.c:909
static int out_cross(node_t *v, node_t *w)
Definition mincross.c:602
static int ordercmpf(const void *, const void *)
Definition mincross.c:1572
void enqueue_neighbors(node_queue_t *q, node_t *n0, int pass)
Definition mincross.c:1290
static void do_ordering(graph_t *g, bool outflag)
Definition mincross.c:473
static void matrix_set(adjmatrix_t *me, size_t row, size_t col)
Definition mincross.c:72
void checkLabelOrder(graph_t *g)
Definition mincross.c:299
static int GlobalMinRank
Definition mincross.c:161
static const double Convergence
Definition mincross.c:158
int build_ranks(graph_t *g, int pass)
Definition mincross.c:1214
#define SINGLETON
Definition mincross.c:1709
static const int table[NTYPES][NTYPES]
Definition mincross.c:1718
static Agnode_t * findSource(Agraph_t *g, Agraph_t *sg)
Definition mincross.c:193
static int * TI_list
Definition mincross.c:163
void rec_save_vlists(graph_t *g)
Definition mincross.c:931
#define C_EE
Definition mincross.c:1713
static void do_ordering_node(graph_t *g, node_t *n, bool outflag)
Definition mincross.c:434
static int64_t in_cross(node_t *v, node_t *w)
Definition mincross.c:583
static graph_t * Root
Definition mincross.c:160
static adjmatrix_t * new_matrix(size_t initial_rows, size_t initial_columns)
Definition mincross.c:398
#define C_VV
Definition mincross.c:1716
static void flat_search(graph_t *g, node_t *v)
Definition mincross.c:1075
static void ordered_edges(graph_t *g)
Definition mincross.c:506
static void transpose(graph_t *g, bool reverse)
Definition mincross.c:675
#define NTYPES
Definition mincross.c:1711
void rec_reset_vlists(graph_t *g)
Definition mincross.c:941
static void merge_components(graph_t *g)
Definition mincross.c:786
int dot_mincross(graph_t *g)
Definition mincross.c:333
static int64_t ncross(void)
Definition mincross.c:1555
static void mincross_options(graph_t *g)
Definition mincross.c:1765
static int endpoint_class(node_t *n)
Definition mincross.c:1724
static int getComp(graph_t *g, node_t *n, graph_t *comp, int *indices)
Definition mincross.c:222
static int GlobalMaxRank
Definition mincross.c:161
static bool left2right(graph_t *g, node_t *v, node_t *w)
Definition mincross.c:559
static int local_cross(elist l, int dir)
Definition mincross.c:1494
static void flat_reorder(graph_t *g)
Definition mincross.c:1343
#define C_SS
Definition mincross.c:1715
static void flat_rev(Agraph_t *g, Agedge_t *e)
Definition mincross.c:1048
static void emptyComp(graph_t *sg)
Definition mincross.c:179
static bool is_a_vnode_of_an_edge_of(graph_t *g, node_t *v)
Definition mincross.c:893
static void fillRanks(Agraph_t *g)
Definition mincross.c:1016
static void do_ordering_for_nodes(graph_t *g)
Definition mincross.c:482
static int MinQuit
Definition mincross.c:157
static edge_t ** TE_list
Definition mincross.c:162
void allocate_ranks(graph_t *g)
Definition mincross.c:1138
#define ND_lo(n)
Definition mincross.c:173
static void restore_best(graph_t *g)
Definition mincross.c:755
static bool matrix_get(adjmatrix_t *me, size_t row, size_t col)
Definition mincross.c:50
static bool constraining_flat_edge(Agraph_t *g, Agedge_t *e)
Definition mincross.c:1312
#define ND_idx(n)
Definition mincross.c:176
static int nodeposcmpf(const void *, const void *)
Definition mincross.c:1683
#define saveorder(v)
Definition mincross.c:113
void save_vlist(graph_t *g)
Definition mincross.c:921
int install_in_rank(graph_t *g, node_t *n)
Definition mincross.c:1166
#define VIRTUALNODE
Definition mincross.c:1710
#define M
Definition randomkit.c:92
static bool streq(const char *a, const char *b)
are a and b equal?
Definition streq.h:11
Agobj_t base
Definition cgraph.h:269
Agrec_t * data
stores programmer-defined data, access with AGDATA
Definition cgraph.h:212
graph or subgraph
Definition cgraph.h:424
implementation of Agrec_t
Definition cgraph.h:172
size_t nrows
how many rows have been allocated?
Definition mincross.c:39
uint8_t * data
bit-packed backing memory
Definition mincross.c:41
size_t ncols
how many columns have been allocated?
Definition mincross.c:40
Definition types.h:251
edge_t ** list
Definition types.h:252
int hi
Definition mincross.c:168
Agrec_t h
Definition mincross.c:167
Agnode_t * np
Definition mincross.c:169
node_t ** v
Definition types.h:202
int n
Definition types.h:201
double elapsed_sec(void)
Definition timing.c:23
void start_timer(void)
Definition timing.c:21
#define elist_append(item, L)
Definition types.h:261
#define alloc_elist(n, L)
Definition types.h:267
Definition grammar.c:90