Graphviz 13.1.2~dev.20250807.2324
Loading...
Searching...
No Matches
mincross.c
Go to the documentation of this file.
1/*************************************************************************
2 * Copyright (c) 2011 AT&T Intellectual Property
3 * All rights reserved. This program and the accompanying materials
4 * are made available under the terms of the Eclipse Public License v1.0
5 * which accompanies this distribution, and is available at
6 * https://www.eclipse.org/legal/epl-v10.html
7 *
8 * Contributors: Details at https://graphviz.org
9 *************************************************************************/
10
11
12/*
13 * dot_mincross(g) takes a ranked graphs, and finds an ordering
14 * that avoids edge crossings. clusters are expanded.
15 * N.B. the rank structure is global (not allocated per cluster)
16 * because mincross may compare nodes in different clusters.
17 */
18
19#include <assert.h>
20#include <cgraph/cgraph.h>
21#include <dotgen/dot.h>
22#include <inttypes.h>
23#include <limits.h>
24#include <stdbool.h>
25#include <stdint.h>
26#include <stdlib.h>
27#include <string.h>
28#include <util/alloc.h>
29#include <util/bitarray.h>
30#include <util/exit.h>
31#include <util/gv_math.h>
32#include <util/itos.h>
33#include <util/list.h>
34#include <util/streq.h>
35
37 size_t nrows;
38 size_t ncols;
39 uint8_t *data;
40 size_t allocated;
41};
42
49static bool matrix_get(adjmatrix_t *me, size_t row, size_t col) {
50 assert(me != NULL);
51
52 // if this index is beyond anything set, infer it as unset
53 const size_t index = row * me->ncols + col;
54 const size_t byte_index = index / 8;
55 const size_t bit_index = index % 8;
56 if (byte_index >= me->allocated) {
57 return false;
58 }
59
60 return (me->data[byte_index] >> bit_index) & 1;
61}
62
68static void matrix_set(adjmatrix_t *me, size_t row, size_t col) {
69 assert(me != NULL);
70
71 // if we are updating beyond allocated space, expand the backing store
72 const size_t index = row * me->ncols + col;
73 const size_t byte_index = index / 8;
74 const size_t bit_index = index % 8;
75 if (byte_index >= me->allocated) {
76 me->data = gv_recalloc(me->data, me->allocated, byte_index + 1,
77 sizeof(me->data[0]));
78 me->allocated = byte_index + 1;
79 }
80
81 me->data[byte_index] |= (uint8_t)(UINT8_C(1) << bit_index);
82}
83
84/* #define DEBUG */
85#define MARK(v) (ND_mark(v))
86#define saveorder(v) (ND_coord(v)).x
87#define flatindex(v) ((size_t)ND_low(v))
88
89 /* forward declarations */
90static bool medians(graph_t * g, int r0, int r1);
91static int nodeposcmpf(const void *, const void *);
92static int edgeidcmpf(const void *, const void *);
93static void flat_breakcycles(graph_t * g);
94static void flat_reorder(graph_t * g);
95static void flat_search(graph_t * g, node_t * v);
96static void init_mincross(graph_t * g);
97static void merge2(graph_t * g);
98static void init_mccomp(graph_t *g, size_t c);
99static void cleanup2(graph_t *g, int64_t nc);
101static int64_t mincross_clust(graph_t *g);
103static int64_t mincross(graph_t *g, int startpass);
104static void mincross_step(graph_t * g, int pass);
105static void mincross_options(graph_t * g);
106static void save_best(graph_t * g);
107static void restore_best(graph_t * g);
108static adjmatrix_t *new_matrix(size_t i, size_t j);
109static void free_matrix(adjmatrix_t * p);
110static int ordercmpf(const void *, const void *);
111static int64_t ncross(void);
112#ifdef DEBUG
113void check_rs(graph_t * g, int null_ok);
114void check_order(void);
115void check_vlists(graph_t * g);
116void node_in_root_vlist(node_t * n);
117#endif
118
119
120 /* mincross parameters */
121static int MinQuit;
122static const double Convergence = .995;
123
124static graph_t *Root;
127static int *TI_list;
128static bool ReMincross;
129
130#if defined(DEBUG) && DEBUG > 1
131static void indent(graph_t* g)
132{
133 if (g->parent) {
134 fprintf (stderr, " ");
135 indent(g->parent);
136 }
137}
138
140static void nname(node_t *v, FILE *stream) {
141 if (ND_node_type(v)) {
142 if (ND_ranktype(v) == CLUSTER)
143 fprintf(stream, "v%s_%p", agnameof(ND_clust(v)), v);
144 else
145 fprintf(stream, "v_%p", v);
146 } else
147 fputs(agnameof(v), stream);
148}
149static void dumpg (graph_t* g)
150{
151 int j, i, r;
152 node_t* v;
153 edge_t* e;
154
155 fprintf (stderr, "digraph A {\n");
156 for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
157 fprintf (stderr, " subgraph {rank=same ");
158 for (i = 0; i < GD_rank(g)[r].n; i++) {
159 v = GD_rank(g)[r].v[i];
160 if (i > 0)
161 fputs(" -> ", stderr);
162 nname(v, stderr);
163 }
164 if (i > 1) fprintf (stderr, " [style=invis]}\n");
165 else fprintf (stderr, " }\n");
166 }
167 for (r = GD_minrank(g); r < GD_maxrank(g); r++) {
168 for (i = 0; i < GD_rank(g)[r].n; i++) {
169 v = GD_rank(g)[r].v[i];
170 for (j = 0; (e = ND_out(v).list[j]); j++) {
171 nname(v, stderr);
172 fputs(" -> ", stderr);
173 nname(aghead(e), stderr);
174 fputc('\n', stderr);
175 }
176 }
177 }
178 fprintf (stderr, "}\n");
179}
180static void dumpr (graph_t* g, int edges)
181{
182 int j, i, r;
183 node_t* v;
184 edge_t* e;
185
186 for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
187 fprintf (stderr, "[%d] ", r);
188 for (i = 0; i < GD_rank(g)[r].n; i++) {
189 v = GD_rank(g)[r].v[i];
190 nname(v, stderr);
191 fprintf(stderr, "(%.02f,%d) ", saveorder(v),ND_order(v));
192 }
193 fprintf (stderr, "\n");
194 }
195 if (edges == 0) return;
196 for (r = GD_minrank(g); r < GD_maxrank(g); r++) {
197 for (i = 0; i < GD_rank(g)[r].n; i++) {
198 v = GD_rank(g)[r].v[i];
199 for (j = 0; (e = ND_out(v).list[j]); j++) {
200 nname(v, stderr);
201 fputs(" -> ", stderr);
202 nname(aghead(e), stderr);
203 fputc('\n', stderr);
204 }
205 }
206 }
207}
208#endif
209
210typedef struct {
212 int x, lo, hi;
214} info_t;
215
216#define ND_x(n) (((info_t*)AGDATA(n))->x)
217#define ND_lo(n) (((info_t*)AGDATA(n))->lo)
218#define ND_hi(n) (((info_t*)AGDATA(n))->hi)
219#define ND_np(n) (((info_t*)AGDATA(n))->np)
220#define ND_idx(n) (ND_order(ND_np(n)))
221
222static void
224{
225 Agnode_t* n;
226 Agnode_t* nxt;
227
228 for (n = agfstnode(sg); n; n = nxt) {
229 nxt = agnxtnode (sg, n);
230 agdelnode(sg,n);
231 }
232}
233
234#define isBackedge(e) (ND_idx(aghead(e)) > ND_idx(agtail(e)))
235
236static Agnode_t*
238{
239 Agnode_t* n;
240
241 for (n = agfstnode(sg); n; n = agnxtnode(sg, n))
242 if (agdegree(g,n,1,0) == 0) return n;
243 return NULL;
244}
245
246static int
248{
249 Agnode_t* n;
250 Agedge_t* e;
251 Agedge_t* nxte;
252 int cnt = 0;
253
254 while ((n = findSource(g, sg))) {
255 arr[cnt++] = ND_np(n);
256 agdelnode(sg, n);
257 for (e = agfstout(g, n); e; e = nxte) {
258 nxte = agnxtout(g, e);
259 agdeledge(g, e);
260 }
261 }
262 return cnt;
263}
264
265static int
266getComp (graph_t* g, node_t* n, graph_t* comp, int* indices)
267{
268 int backedge = 0;
269 Agedge_t* e;
270
271 ND_x(n) = 1;
272 indices[agnnodes(comp)] = ND_idx(n);
273 agsubnode(comp, n, 1);
274 for (e = agfstout(g,n); e; e = agnxtout(g,e)) {
275 if (isBackedge(e)) backedge++;
276 if (!ND_x(aghead(e)))
277 backedge += getComp(g, aghead(e), comp, indices);
278 }
279 for (e = agfstin(g,n); e; e = agnxtin(g,e)) {
280 if (isBackedge(e)) backedge++;
281 if (!ND_x(agtail(e)))
282 backedge += getComp(g, agtail(e), comp, indices);
283 }
284 return backedge;
285}
286
288static void
290{
291 int cnt;
292 bool haveBackedge = false;
293 Agraph_t* sg;
294 Agnode_t* n;
295 Agnode_t* nxtp;
296 Agnode_t* v;
297
298 for (n = agfstnode(g); n; n = nxtp) {
299 v = nxtp = agnxtnode(g, n);
300 for (; v; v = agnxtnode(g, v)) {
301 if (ND_hi(v) <= ND_lo(n)) {
302 haveBackedge = true;
303 agedge(g, v, n, NULL, 1);
304 }
305 else if (ND_hi(n) <= ND_lo(v)) {
306 agedge(g, n, v, NULL, 1);
307 }
308 }
309 }
310 if (!haveBackedge) return;
311
312 sg = agsubg(g, "comp", 1);
313 Agnode_t **arr = gv_calloc(agnnodes(g), sizeof(Agnode_t*));
314 int *indices = gv_calloc(agnnodes(g), sizeof(int));
315
316 for (n = agfstnode(g); n; n = agnxtnode(g,n)) {
317 if (ND_x(n) || agdegree(g,n,1,1) == 0) continue;
318 if (getComp(g, n, sg, indices)) {
319 int i, sz = agnnodes(sg);
320 cnt = topsort (g, sg, arr);
321 assert (cnt == sz);
322 qsort(indices, cnt, sizeof(int), ordercmpf);
323 for (i = 0; i < sz; i++) {
324 ND_order(arr[i]) = indices[i];
325 rk->v[indices[i]] = arr[i];
326 }
327 }
328 emptyComp(sg);
329 }
330 free(indices);
331 free (arr);
332}
333
334/* Check that the ordering of labels for flat edges is consistent.
335 * This is necessary because dot_position will attempt to force the label
336 * to be between the edge's vertices. This can lead to an infeasible problem.
337 *
338 * We check each rank for any flat edge labels (as dummy nodes) and create a
339 * graph with a node for each label. If the graph contains more than 1 node, we
340 * call fixLabelOrder to see if there really is a problem and, if so, fix it.
341 */
342void
344{
345 int j, r, lo, hi;
346 graph_t* lg = NULL;
347 rank_t* rk;
348 Agnode_t* u;
349 Agnode_t* n;
350 Agedge_t* e;
351
352 for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
353 rk = GD_rank(g)+r;
354 for (j = 0; j < rk->n; j++) {
355 u = rk->v[j];
356 if ((e = ND_alg(u))) {
357 if (!lg) lg = agopen ("lg", Agstrictdirected, 0);
358 n = agnode(lg, ITOS(j), 1);
359 agbindrec(n, "info", sizeof(info_t), true);
360 lo = ND_order(aghead(ND_out(u).list[0]));
361 hi = ND_order(aghead(ND_out(u).list[1]));
362 if (lo > hi) {
363 SWAP(&lo, &hi);
364 }
365 ND_lo(n) = lo;
366 ND_hi(n) = hi;
367 ND_np(n) = u;
368 }
369 }
370 if (lg) {
371 if (agnnodes(lg) > 1) fixLabelOrder (lg, rk);
372 agclose(lg);
373 lg = NULL;
374 }
375 }
376}
377
378/* Minimize edge crossings
379 * Note that nodes are not placed into GD_rank(g) until mincross()
380 * is called.
381 */
383 int64_t nc;
384 char *s;
385
386 /* check whether malformed input has led to empty cluster that the crossing
387 * functions will not anticipate
388 */
389 {
390 size_t i;
391 for (i = 1; i <= (size_t)GD_n_cluster(g); ) {
392 if (agfstnode(GD_clust(g)[i]) == NULL) {
393 agwarningf("removing empty cluster\n");
394 memmove(&GD_clust(g)[i], &GD_clust(g)[i + 1],
395 ((size_t)GD_n_cluster(g) - i) * sizeof(GD_clust(g)[0]));
396 --GD_n_cluster(g);
397 } else {
398 ++i;
399 }
400 }
401 }
402
403 init_mincross(g);
404
405 size_t comp;
406 for (nc = 0, comp = 0; comp < GD_comp(g).size; comp++) {
407 init_mccomp(g, comp);
408 const int64_t mc = mincross(g, 0);
409 if (mc < 0) {
410 return -1;
411 }
412 nc += mc;
413 }
414
415 merge2(g);
416
417 /* run mincross on contents of each cluster */
418 for (int c = 1; c <= GD_n_cluster(g); c++) {
419 const int64_t mc = mincross_clust(GD_clust(g)[c]);
420 if (mc < 0) {
421 return -1;
422 }
423 nc += mc;
424#ifdef DEBUG
425 check_vlists(GD_clust(g)[c]);
426 check_order();
427#endif
428 }
429
430 if (GD_n_cluster(g) > 0 && (!(s = agget(g, "remincross")) || mapbool(s))) {
432 ReMincross = true;
433 const int64_t mc = mincross(g, 2);
434 if (mc < 0) {
435 return -1;
436 }
437 nc = mc;
438#ifdef DEBUG
439 for (int c = 1; c <= GD_n_cluster(g); c++)
440 check_vlists(GD_clust(g)[c]);
441#endif
442 }
443 cleanup2(g, nc);
444 return 0;
445}
446
447static adjmatrix_t *new_matrix(size_t i, size_t j) {
448 adjmatrix_t *rv = gv_alloc(sizeof(adjmatrix_t));
449 rv->nrows = i;
450 rv->ncols = j;
451 return rv;
452}
453
454static void free_matrix(adjmatrix_t * p)
455{
456 if (p) {
457 free(p->data);
458 free(p);
459 }
460}
461
462static void init_mccomp(graph_t *g, size_t c) {
463 int r;
464
465 GD_nlist(g) = GD_comp(g).list[c];
466 if (c > 0) {
467 for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
468 GD_rank(g)[r].v = GD_rank(g)[r].v + GD_rank(g)[r].n;
469 GD_rank(g)[r].n = 0;
470 }
471 }
472}
473
474static int betweenclust(edge_t * e)
475{
476 while (ED_to_orig(e))
477 e = ED_to_orig(e);
478 return (ND_clust(agtail(e)) != ND_clust(aghead(e)));
479}
480
481static void do_ordering_node(graph_t *g, node_t *n, bool outflag) {
482 int i, ne;
483 node_t *u, *v;
484 edge_t *e, *f, *fe;
485 edge_t **sortlist = TE_list;
486
487 if (ND_clust(n))
488 return;
489 if (outflag) {
490 for (i = ne = 0; (e = ND_out(n).list[i]); i++)
491 if (!betweenclust(e))
492 sortlist[ne++] = e;
493 } else {
494 for (i = ne = 0; (e = ND_in(n).list[i]); i++)
495 if (!betweenclust(e))
496 sortlist[ne++] = e;
497 }
498 if (ne <= 1)
499 return;
500 /* write null terminator at end of list.
501 requires +1 in TE_list alloccation */
502 sortlist[ne] = 0;
503 qsort(sortlist, ne, sizeof(sortlist[0]), edgeidcmpf);
504 for (ne = 1; (f = sortlist[ne]); ne++) {
505 e = sortlist[ne - 1];
506 if (outflag) {
507 u = aghead(e);
508 v = aghead(f);
509 } else {
510 u = agtail(e);
511 v = agtail(f);
512 }
513 if (find_flat_edge(u, v))
514 return;
515 fe = new_virtual_edge(u, v, NULL);
517 flat_edge(g, fe);
518 }
519}
520
521static void do_ordering(graph_t *g, bool outflag) {
522 /* Order all nodes in graph */
523 node_t *n;
524
525 for (n = agfstnode(g); n; n = agnxtnode(g, n)) {
526 do_ordering_node (g, n, outflag);
527 }
528}
529
531{
532 /* Order nodes which have the "ordered" attribute */
533 node_t *n;
534 const char *ordering;
535
536 for (n = agfstnode(g); n; n = agnxtnode(g, n)) {
537 if ((ordering = late_string(n, N_ordering, NULL))) {
538 if (streq(ordering, "out"))
539 do_ordering_node(g, n, true);
540 else if (streq(ordering, "in"))
541 do_ordering_node(g, n, false);
542 else if (ordering[0])
543 agerrorf("ordering '%s' not recognized for node '%s'.\n", ordering, agnameof(n));
544 }
545 }
546}
547
548/* handle case where graph specifies edge ordering
549 * If the graph does not have an ordering attribute, we then
550 * check for nodes having the attribute.
551 * Note that, in this implementation, the value of G_ordering
552 * dominates the value of N_ordering.
553 */
554static void ordered_edges(graph_t * g)
555{
556 char *ordering;
557
558 if (!G_ordering && !N_ordering)
559 return;
560 if ((ordering = late_string(g, G_ordering, NULL))) {
561 if (streq(ordering, "out"))
562 do_ordering(g, true);
563 else if (streq(ordering, "in"))
564 do_ordering(g, false);
565 else if (ordering[0])
566 agerrorf("ordering '%s' not recognized.\n", ordering);
567 }
568 else
569 {
570 graph_t *subg;
571
572 for (subg = agfstsubg(g); subg; subg = agnxtsubg(subg)) {
573 /* clusters are processed by separate calls to ordered_edges */
574 if (!is_cluster(subg))
575 ordered_edges(subg);
576 }
578 }
579}
580
581static int64_t mincross_clust(graph_t *g) {
582 int c;
583
584 if (expand_cluster(g) != 0) {
585 return -1;
586 }
587 ordered_edges(g);
589 flat_reorder(g);
590 int64_t nc = mincross(g, 2);
591 if (nc < 0) {
592 return nc;
593 }
594
595 for (c = 1; c <= GD_n_cluster(g); c++) {
596 const int64_t mc = mincross_clust(GD_clust(g)[c]);
597 if (mc < 0) {
598 return mc;
599 }
600 nc += mc;
601 }
602
603 save_vlist(g);
604 return nc;
605}
606
607static bool left2right(graph_t *g, node_t *v, node_t *w) {
608 /* CLUSTER indicates orig nodes of clusters, and vnodes of skeletons */
609 if (!ReMincross) {
610 if (ND_clust(v) != ND_clust(w) && ND_clust(v) && ND_clust(w)) {
611 /* the following allows cluster skeletons to be swapped */
612 if (ND_ranktype(v) == CLUSTER && ND_node_type(v) == VIRTUAL)
613 return false;
614 if (ND_ranktype(w) == CLUSTER && ND_node_type(w) == VIRTUAL)
615 return false;
616 return true;
617 }
618 } else {
619 if (ND_clust(v) != ND_clust(w))
620 return true;
621 }
622 adjmatrix_t *const M = GD_rank(g)[ND_rank(v)].flat;
623 if (M == NULL)
624 return false;
625 if (GD_flip(g)) {
626 SWAP(&v, &w);
627 }
628 return matrix_get(M, (size_t)flatindex(v), (size_t)flatindex(w));
629}
630
631static int64_t in_cross(node_t *v, node_t *w) {
632 edge_t **e1, **e2;
633 int inv, t;
634 int64_t cross = 0;
635
636 for (e2 = ND_in(w).list; *e2; e2++) {
637 int cnt = ED_xpenalty(*e2);
638
639 inv = ND_order(agtail(*e2));
640
641 for (e1 = ND_in(v).list; *e1; e1++) {
642 t = ND_order(agtail(*e1)) - inv;
643 if (t > 0 || (t == 0 && ED_tail_port(*e1).p.x > ED_tail_port(*e2).p.x))
644 cross += ED_xpenalty(*e1) * cnt;
645 }
646 }
647 return cross;
648}
649
650static int out_cross(node_t * v, node_t * w)
651{
652 edge_t **e1, **e2;
653 int inv, cross = 0, t;
654
655 for (e2 = ND_out(w).list; *e2; e2++) {
656 int cnt = ED_xpenalty(*e2);
657 inv = ND_order(aghead(*e2));
658
659 for (e1 = ND_out(v).list; *e1; e1++) {
660 t = ND_order(aghead(*e1)) - inv;
661 if (t > 0 || (t == 0 && (ED_head_port(*e1)).p.x > (ED_head_port(*e2)).p.x))
662 cross += ED_xpenalty(*e1) * cnt;
663 }
664 }
665 return cross;
666
667}
668
669static void exchange(node_t * v, node_t * w)
670{
671 int vi, wi, r;
672
673 r = ND_rank(v);
674 vi = ND_order(v);
675 wi = ND_order(w);
676 ND_order(v) = wi;
677 GD_rank(Root)[r].v[wi] = v;
678 ND_order(w) = vi;
679 GD_rank(Root)[r].v[vi] = w;
680}
681
682static int64_t transpose_step(graph_t *g, int r, bool reverse) {
683 int i;
684 node_t *v, *w;
685
686 int64_t rv = 0;
687 GD_rank(g)[r].candidate = false;
688 for (i = 0; i < GD_rank(g)[r].n - 1; i++) {
689 v = GD_rank(g)[r].v[i];
690 w = GD_rank(g)[r].v[i + 1];
691 assert(ND_order(v) < ND_order(w));
692 if (left2right(g, v, w))
693 continue;
694 int64_t c0 = 0;
695 int64_t c1 = 0;
696 if (r > 0) {
697 c0 += in_cross(v, w);
698 c1 += in_cross(w, v);
699 }
700 if (GD_rank(g)[r + 1].n > 0) {
701 c0 += out_cross(v, w);
702 c1 += out_cross(w, v);
703 }
704 if (c1 < c0 || (c0 > 0 && reverse && c1 == c0)) {
705 exchange(v, w);
706 rv += c0 - c1;
707 GD_rank(Root)[r].valid = false;
708 GD_rank(g)[r].candidate = true;
709
710 if (r > GD_minrank(g)) {
711 GD_rank(Root)[r - 1].valid = false;
712 GD_rank(g)[r - 1].candidate = true;
713 }
714 if (r < GD_maxrank(g)) {
715 GD_rank(Root)[r + 1].valid = false;
716 GD_rank(g)[r + 1].candidate = true;
717 }
718 }
719 }
720 return rv;
721}
722
723static void transpose(graph_t * g, bool reverse)
724{
725 int r;
726
727 for (r = GD_minrank(g); r <= GD_maxrank(g); r++)
728 GD_rank(g)[r].candidate = true;
729 int64_t delta;
730 do {
731 delta = 0;
732 for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
733 if (GD_rank(g)[r].candidate) {
734 delta += transpose_step(g, r, reverse);
735 }
736 }
737 } while (delta >= 1);
738}
739
740static int64_t mincross(graph_t *g, int startpass) {
741 const int endpass = 2;
742 int maxthispass = 0, iter, trying, pass;
743 int64_t cur_cross, best_cross;
744
745 if (startpass > 1) {
746 cur_cross = best_cross = ncross();
747 save_best(g);
748 } else
749 cur_cross = best_cross = INT64_MAX;
750 for (pass = startpass; pass <= endpass; pass++) {
751 if (pass <= 1) {
752 maxthispass = MIN(4, MaxIter);
753 if (g == dot_root(g))
754 if (build_ranks(g, pass) != 0) {
755 return -1;
756 }
757 if (pass == 0)
759 flat_reorder(g);
760
761 if ((cur_cross = ncross()) <= best_cross) {
762 save_best(g);
763 best_cross = cur_cross;
764 }
765 } else {
766 maxthispass = MaxIter;
767 if (cur_cross > best_cross)
768 restore_best(g);
769 cur_cross = best_cross;
770 }
771 trying = 0;
772 for (iter = 0; iter < maxthispass; iter++) {
773 if (Verbose)
774 fprintf(stderr,
775 "mincross: pass %d iter %d trying %d cur_cross %" PRId64 " best_cross %"
776 PRId64 "\n",
777 pass, iter, trying, cur_cross, best_cross);
778 if (trying++ >= MinQuit)
779 break;
780 if (cur_cross == 0)
781 break;
782 mincross_step(g, iter);
783 if ((cur_cross = ncross()) <= best_cross) {
784 save_best(g);
785 if (cur_cross < Convergence * (double)best_cross)
786 trying = 0;
787 best_cross = cur_cross;
788 }
789 }
790 if (cur_cross == 0)
791 break;
792 }
793 if (cur_cross > best_cross)
794 restore_best(g);
795 if (best_cross > 0) {
796 transpose(g, false);
797 best_cross = ncross();
798 }
799
800 return best_cross;
801}
802
803static void restore_best(graph_t * g)
804{
805 node_t *n;
806 int i, r;
807
808 for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
809 for (i = 0; i < GD_rank(g)[r].n; i++) {
810 n = GD_rank(g)[r].v[i];
811 ND_order(n) = saveorder(n);
812 }
813 }
814 for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
815 GD_rank(Root)[r].valid = false;
816 qsort(GD_rank(g)[r].v, GD_rank(g)[r].n, sizeof(GD_rank(g)[0].v[0]),
818 }
819}
820
821static void save_best(graph_t * g)
822{
823 node_t *n;
824 int i, r;
825 for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
826 for (i = 0; i < GD_rank(g)[r].n; i++) {
827 n = GD_rank(g)[r].v[i];
828 saveorder(n) = ND_order(n);
829 }
830 }
831}
832
833/* merges the connected components of g */
834static void merge_components(graph_t * g)
835{
836 node_t *u, *v;
837
838 if (GD_comp(g).size <= 1)
839 return;
840 u = NULL;
841 for (size_t c = 0; c < GD_comp(g).size; c++) {
842 v = GD_comp(g).list[c];
843 if (u)
844 ND_next(u) = v;
845 ND_prev(v) = u;
846 while (ND_next(v)) {
847 v = ND_next(v);
848 }
849 u = v;
850 }
851 GD_comp(g).size = 1;
852 GD_nlist(g) = GD_comp(g).list[0];
855}
856
857/* merge connected components, create globally consistent rank lists */
858static void merge2(graph_t * g)
859{
860 int i, r;
861 node_t *v;
862
863 /* merge the components and rank limits */
865
866 /* install complete ranks */
867 for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
868 GD_rank(g)[r].n = GD_rank(g)[r].an;
869 GD_rank(g)[r].v = GD_rank(g)[r].av;
870 for (i = 0; i < GD_rank(g)[r].n; i++) {
871 v = GD_rank(g)[r].v[i];
872 if (v == NULL) {
873 if (Verbose)
874 fprintf(stderr,
875 "merge2: graph %s, rank %d has only %d < %d nodes\n",
876 agnameof(g), r, i, GD_rank(g)[r].n);
877 GD_rank(g)[r].n = i;
878 break;
879 }
880 ND_order(v) = i;
881 }
882 }
883}
884
885static void cleanup2(graph_t *g, int64_t nc) {
886 int i, j, r, c;
887 node_t *v;
888 edge_t *e;
889
890 if (TI_list) {
891 free(TI_list);
892 TI_list = NULL;
893 }
894 if (TE_list) {
895 free(TE_list);
896 TE_list = NULL;
897 }
898 /* fix vlists of clusters */
899 for (c = 1; c <= GD_n_cluster(g); c++)
901
902 /* remove node temporary edges for ordering nodes */
903 for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
904 for (i = 0; i < GD_rank(g)[r].n; i++) {
905 v = GD_rank(g)[r].v[i];
906 ND_order(v) = i;
907 if (ND_flat_out(v).list) {
908 for (j = 0; (e = ND_flat_out(v).list[j]); j++)
909 if (ED_edge_type(e) == FLATORDER) {
911 free(e->base.data);
912 free(e);
913 j--;
914 }
915 }
916 }
917 free_matrix(GD_rank(g)[r].flat);
918 }
919 if (Verbose)
920 fprintf(stderr, "mincross %s: %" PRId64 " crossings, %.2f secs.\n",
921 agnameof(g), nc, elapsed_sec());
922}
923
924static node_t *neighbor(node_t * v, int dir)
925{
926 node_t *rv = NULL;
927assert(v);
928 if (dir < 0) {
929 if (ND_order(v) > 0)
930 rv = GD_rank(Root)[ND_rank(v)].v[ND_order(v) - 1];
931 } else
932 rv = GD_rank(Root)[ND_rank(v)].v[ND_order(v) + 1];
933assert(rv == 0 || (ND_order(rv)-ND_order(v))*dir > 0);
934 return rv;
935}
936
937static bool is_a_normal_node_of(graph_t *g, node_t *v) {
938 return ND_node_type(v) == NORMAL && agcontains(g, v);
939}
940
942 if (ND_node_type(v) == VIRTUAL
943 && ND_in(v).size == 1 && ND_out(v).size == 1) {
944 edge_t *e = ND_out(v).list[0];
945 while (ED_edge_type(e) != NORMAL)
946 e = ED_to_orig(e);
947 if (agcontains(g, e))
948 return true;
949 }
950 return false;
951}
952
953static bool inside_cluster(graph_t *g, node_t *v) {
954 return is_a_normal_node_of(g, v) || is_a_vnode_of_an_edge_of(g, v);
955}
956
957static node_t *furthestnode(graph_t * g, node_t * v, int dir)
958{
959 node_t *rv = v;
960 for (node_t *u = v; (u = neighbor(u, dir)); ) {
961 if (is_a_normal_node_of(g, u))
962 rv = u;
963 else if (is_a_vnode_of_an_edge_of(g, u))
964 rv = u;
965 }
966 return rv;
967}
968
970{
971 int r;
972
973 if (GD_rankleader(g))
974 for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
975 GD_rankleader(g)[r] = GD_rank(g)[r].v[0];
976 }
977}
978
980{
981 int c;
982
983 save_vlist(g);
984 for (c = 1; c <= GD_n_cluster(g); c++)
986}
987
988
990{
991 // fix vlists of sub-clusters
992 for (int c = 1; c <= GD_n_cluster(g); c++)
994
995 if (GD_rankleader(g))
996 for (int r = GD_minrank(g); r <= GD_maxrank(g); r++) {
997 node_t *const v = GD_rankleader(g)[r];
998 if (v == NULL) {
999 continue;
1000 }
1001#ifdef DEBUG
1002 node_in_root_vlist(v);
1003#endif
1004 node_t *const u = furthestnode(g, v, -1);
1005 node_t *const w = furthestnode(g, v, 1);
1006 GD_rankleader(g)[r] = u;
1007#ifdef DEBUG
1008 assert(GD_rank(dot_root(g))[r].v[ND_order(u)] == u);
1009#endif
1010 GD_rank(g)[r].v = GD_rank(dot_root(g))[r].v + ND_order(u);
1011 GD_rank(g)[r].n = ND_order(w) - ND_order(u) + 1;
1012 }
1013}
1014
1015/* The structures in crossing minimization and positioning require
1016 * that clusters have some node on each rank. This function recursively
1017 * guarantees this property. It takes into account nodes and edges in
1018 * a cluster, the latter causing dummy nodes for intervening ranks.
1019 * For any rank without node, we create a real node of small size. This
1020 * is stored in the subgraph sg, for easy removal later.
1021 *
1022 * I believe it is not necessary to do this for the root graph, as these
1023 * are laid out one component at a time and these will necessarily have a
1024 * node on each rank from source to sink levels.
1025 */
1027 int i, c;
1028 Agedge_t* e;
1029 Agnode_t* n;
1030
1031 for (c = 1; c <= GD_n_cluster(g); c++)
1032 sg = realFillRanks(GD_clust(g)[c], ranks, sg);
1033
1034 if (dot_root(g) == g)
1035 return sg;
1036 bitarray_clear(ranks);
1037 for (n = agfstnode(g); n; n = agnxtnode(g,n)) {
1038 bitarray_set(ranks, ND_rank(n), true);
1039 for (e = agfstout(g,n); e; e = agnxtout(g,e)) {
1040 for (i = ND_rank(n)+1; i <= ND_rank(aghead(e)); i++)
1041 bitarray_set(ranks, i, true);
1042 }
1043 }
1044 for (i = GD_minrank(g); i <= GD_maxrank(g); i++) {
1045 if (!bitarray_get(*ranks, i)) {
1046 if (!sg) {
1047 sg = agsubg (dot_root(g), "_new_rank", 1);
1048 }
1049 n = agnode (sg, NULL, 1);
1050 agbindrec(n, "Agnodeinfo_t", sizeof(Agnodeinfo_t), true);
1051 ND_rank(n) = i;
1052 ND_lw(n) = ND_rw(n) = 0.5;
1053 ND_ht(n) = 1;
1054 ND_UF_size(n) = 1;
1055 alloc_elist(4, ND_in(n));
1056 alloc_elist(4, ND_out(n));
1057 agsubnode (g, n, 1);
1058 }
1059 }
1060 return sg;
1061}
1062
1063static void
1065{
1066 int rnks_sz = GD_maxrank(g) + 2;
1067 bitarray_t rnks = bitarray_new(rnks_sz);
1068 realFillRanks(g, &rnks, NULL);
1069 bitarray_reset(&rnks);
1070}
1071
1072static void init_mincross(graph_t * g)
1073{
1074 int size;
1075
1076 if (Verbose)
1077 start_timer();
1078
1079 ReMincross = false;
1080 Root = g;
1081 /* alloc +1 for the null terminator usage in do_ordering() */
1082 size = agnedges(dot_root(g)) + 1;
1083 TE_list = gv_calloc(size, sizeof(edge_t*));
1084 TI_list = gv_calloc(size, sizeof(int));
1086 if (GD_flags(g) & NEW_RANK)
1087 fillRanks (g);
1088 class2(g);
1089 decompose(g, 1);
1090 allocate_ranks(g);
1091 ordered_edges(g);
1094}
1095
1096static void flat_rev(Agraph_t * g, Agedge_t * e)
1097{
1098 int j;
1099 Agedge_t *rev;
1100
1101 if (!ND_flat_out(aghead(e)).list)
1102 rev = NULL;
1103 else
1104 for (j = 0; (rev = ND_flat_out(aghead(e)).list[j]); j++)
1105 if (aghead(rev) == agtail(e))
1106 break;
1107 if (rev) {
1108 merge_oneway(e, rev);
1109 if (ED_edge_type(rev) == FLATORDER && ED_to_orig(rev) == 0)
1110 ED_to_orig(rev) = e;
1112 } else {
1113 rev = new_virtual_edge(aghead(e), agtail(e), e);
1114 if (ED_edge_type(e) == FLATORDER)
1115 ED_edge_type(rev) = FLATORDER;
1116 else
1117 ED_edge_type(rev) = REVERSED;
1118 ED_label(rev) = ED_label(e);
1119 flat_edge(g, rev);
1120 }
1121}
1122
1123static void flat_search(graph_t * g, node_t * v)
1124{
1125 int i;
1126 bool hascl;
1127 edge_t *e;
1128 adjmatrix_t *M = GD_rank(g)[ND_rank(v)].flat;
1129
1130 ND_mark(v) = true;
1131 ND_onstack(v) = true;
1132 hascl = GD_n_cluster(dot_root(g)) > 0;
1133 if (ND_flat_out(v).list)
1134 for (i = 0; (e = ND_flat_out(v).list[i]); i++) {
1135 if (hascl && !(agcontains(g, agtail(e)) && agcontains(g, aghead(e))))
1136 continue;
1137 if (ED_weight(e) == 0)
1138 continue;
1139 if (ND_onstack(aghead(e))) {
1140 assert(flatindex(aghead(e)) < M->nrows);
1141 assert(flatindex(agtail(e)) < M->ncols);
1142 matrix_set(M, (size_t)flatindex(aghead(e)), (size_t)flatindex(agtail(e)));
1144 i--;
1145 if (ED_edge_type(e) == FLATORDER)
1146 continue;
1147 flat_rev(g, e);
1148 } else {
1149 assert(flatindex(aghead(e)) < M->nrows);
1150 assert(flatindex(agtail(e)) < M->ncols);
1151 matrix_set(M, (size_t)flatindex(agtail(e)), (size_t)flatindex(aghead(e)));
1152 if (!ND_mark(aghead(e)))
1153 flat_search(g, aghead(e));
1154 }
1155 }
1156 ND_onstack(v) = false;
1157}
1158
1160{
1161 int i, r;
1162 node_t *v;
1163
1164 for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
1165 bool flat = false;
1166 for (i = 0; i < GD_rank(g)[r].n; i++) {
1167 v = GD_rank(g)[r].v[i];
1168 ND_mark(v) = false;
1169 ND_onstack(v) = false;
1170 ND_low(v) = i;
1171 if (ND_flat_out(v).size > 0 && !flat) {
1172 GD_rank(g)[r].flat =
1173 new_matrix((size_t)GD_rank(g)[r].n, (size_t)GD_rank(g)[r].n);
1174 flat = true;
1175 }
1176 }
1177 if (flat) {
1178 for (i = 0; i < GD_rank(g)[r].n; i++) {
1179 v = GD_rank(g)[r].v[i];
1180 if (!ND_mark(v))
1181 flat_search(g, v);
1182 }
1183 }
1184 }
1185}
1186
1187/* Allocate rank structure, determining number of nodes per rank.
1188 * Note that no nodes are put into the structure yet.
1189 */
1191{
1192 int r, low, high;
1193 node_t *n;
1194 edge_t *e;
1195
1196 int *cn = gv_calloc(GD_maxrank(g) + 2, sizeof(int)); // must be 0 based, not GD_minrank
1197 for (n = agfstnode(g); n; n = agnxtnode(g, n)) {
1198 cn[ND_rank(n)]++;
1199 for (e = agfstout(g, n); e; e = agnxtout(g, e)) {
1200 low = ND_rank(agtail(e));
1201 high = ND_rank(aghead(e));
1202 if (low > high) {
1203 SWAP(&low, &high);
1204 }
1205 for (r = low + 1; r < high; r++)
1206 cn[r]++;
1207 }
1208 }
1209 GD_rank(g) = gv_calloc(GD_maxrank(g) + 2, sizeof(rank_t));
1210 for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
1211 GD_rank(g)[r].an = GD_rank(g)[r].n = cn[r] + 1;
1212 GD_rank(g)[r].av = GD_rank(g)[r].v = gv_calloc(cn[r] + 1, sizeof(node_t*));
1213 }
1214 free(cn);
1215}
1216
1217/* install a node at the current right end of its rank */
1219 int i, r;
1220
1221 r = ND_rank(n);
1222 i = GD_rank(g)[r].n;
1223 if (GD_rank(g)[r].an <= 0) {
1224 agerrorf("install_in_rank, line %d: %s %s rank %d i = %d an = 0\n",
1225 __LINE__, agnameof(g), agnameof(n), r, i);
1226 return -1;
1227 }
1228
1229 GD_rank(g)[r].v[i] = n;
1230 ND_order(n) = i;
1231 GD_rank(g)[r].n++;
1232 assert(GD_rank(g)[r].n <= GD_rank(g)[r].an);
1233#ifdef DEBUG
1234 {
1235 node_t *v;
1236
1237 for (v = GD_nlist(g); v; v = ND_next(v))
1238 if (v == n)
1239 break;
1240 assert(v != NULL);
1241 }
1242#endif
1243 if (ND_order(n) > GD_rank(Root)[r].an) {
1244 agerrorf("install_in_rank, line %d: ND_order(%s) [%d] > GD_rank(Root)[%d].an [%d]\n",
1245 __LINE__, agnameof(n), ND_order(n), r, GD_rank(Root)[r].an);
1246 return -1;
1247 }
1248 if (r < GD_minrank(g) || r > GD_maxrank(g)) {
1249 agerrorf("install_in_rank, line %d: rank %d not in rank range [%d,%d]\n",
1250 __LINE__, r, GD_minrank(g), GD_maxrank(g));
1251 return -1;
1252 }
1253 if (GD_rank(g)[r].v + ND_order(n) >
1254 GD_rank(g)[r].av + GD_rank(Root)[r].an) {
1255 agerrorf("install_in_rank, line %d: GD_rank(g)[%d].v + ND_order(%s) [%d] > GD_rank(g)[%d].av + GD_rank(Root)[%d].an [%d]\n",
1256 __LINE__, r, agnameof(n),ND_order(n), r, r, GD_rank(Root)[r].an);
1257 return -1;
1258 }
1259 return 0;
1260}
1261
1262/* install nodes in ranks. the initial ordering ensure that series-parallel
1263 * graphs such as trees are drawn with no crossings. it tries searching
1264 * in- and out-edges and takes the better of the two initial orderings.
1265 */
1266int build_ranks(graph_t *g, int pass) {
1267 int i, j;
1268 node_t *n, *ns;
1269 edge_t **otheredges;
1270 node_queue_t q = {0};
1271 for (n = GD_nlist(g); n; n = ND_next(n))
1272 MARK(n) = false;
1273
1274#ifdef DEBUG
1275 {
1276 edge_t *e;
1277 for (n = GD_nlist(g); n; n = ND_next(n)) {
1278 for (i = 0; (e = ND_out(n).list[i]); i++)
1279 assert(!MARK(aghead(e)));
1280 for (i = 0; (e = ND_in(n).list[i]); i++)
1281 assert(!MARK(agtail(e)));
1282 }
1283 }
1284#endif
1285
1286 for (i = GD_minrank(g); i <= GD_maxrank(g); i++)
1287 GD_rank(g)[i].n = 0;
1288
1289 const bool walkbackwards = g != agroot(g); // if this is a cluster, need to
1290 // walk GD_nlist backward to
1291 // preserve input node order
1292 if (walkbackwards) {
1293 for (ns = GD_nlist(g); ND_next(ns); ns = ND_next(ns)) {
1294 ;
1295 }
1296 } else {
1297 ns = GD_nlist(g);
1298 }
1299 for (n = ns; n; n = walkbackwards ? ND_prev(n) : ND_next(n)) {
1300 otheredges = pass == 0 ? ND_in(n).list : ND_out(n).list;
1301 if (otheredges[0] != NULL)
1302 continue;
1303 if (!MARK(n)) {
1304 MARK(n) = true;
1305 node_queue_push_back(&q, n);
1306 while (!node_queue_is_empty(&q)) {
1307 node_t *n0 = node_queue_pop_front(&q);
1308 if (ND_ranktype(n0) != CLUSTER) {
1309 if (install_in_rank(g, n0) != 0) {
1310 node_queue_free(&q);
1311 return -1;
1312 }
1313 enqueue_neighbors(&q, n0, pass);
1314 } else {
1315 const int rc = install_cluster(g, n0, pass, &q);
1316 if (rc != 0) {
1317 node_queue_free(&q);
1318 return rc;
1319 }
1320 }
1321 }
1322 }
1323 }
1324 assert(node_queue_is_empty(&q));
1325 for (i = GD_minrank(g); i <= GD_maxrank(g); i++) {
1326 GD_rank(Root)[i].valid = false;
1327 if (GD_flip(g) && GD_rank(g)[i].n > 0) {
1328 node_t **vlist = GD_rank(g)[i].v;
1329 int num_nodes_1 = GD_rank(g)[i].n - 1;
1330 int half_num_nodes_1 = num_nodes_1 / 2;
1331 for (j = 0; j <= half_num_nodes_1; j++)
1332 exchange(vlist[j], vlist[num_nodes_1 - j]);
1333 }
1334 }
1335
1336 if (g == dot_root(g) && ncross() > 0)
1337 transpose(g, false);
1338 node_queue_free(&q);
1339 return 0;
1340}
1341
1342void enqueue_neighbors(node_queue_t *q, node_t *n0, int pass) {
1343 edge_t *e;
1344
1345 if (pass == 0) {
1346 for (size_t i = 0; i < ND_out(n0).size; i++) {
1347 e = ND_out(n0).list[i];
1348 if (!MARK(aghead(e))) {
1349 MARK(aghead(e)) = true;
1350 node_queue_push_back(q, aghead(e));
1351 }
1352 }
1353 } else {
1354 for (size_t i = 0; i < ND_in(n0).size; i++) {
1355 e = ND_in(n0).list[i];
1356 if (!MARK(agtail(e))) {
1357 MARK(agtail(e)) = true;
1358 node_queue_push_back(q, agtail(e));
1359 }
1360 }
1361 }
1362}
1363
1365 if (ED_weight(e) == 0)
1366 return false;
1367 if (!inside_cluster(g, agtail(e)))
1368 return false;
1369 if (!inside_cluster(g, aghead(e)))
1370 return false;
1371 return true;
1372}
1373
1374DEFINE_LIST(nodes, node_t *)
1375
1376/* construct nodes reachable from 'here' in post-order.
1377* This is the same as doing a topological sort in reverse order.
1378*/
1379static void postorder(graph_t *g, node_t *v, nodes_t *list, int r) {
1380 edge_t *e;
1381 int i;
1382
1383 MARK(v) = true;
1384 if (ND_flat_out(v).size > 0) {
1385 for (i = 0; (e = ND_flat_out(v).list[i]); i++) {
1386 if (!constraining_flat_edge(g, e)) continue;
1387 if (!MARK(aghead(e)))
1388 postorder(g, aghead(e), list, r);
1389 }
1390 }
1391 assert(ND_rank(v) == r);
1392 nodes_append(list, v);
1393}
1394
1395static void flat_reorder(graph_t * g)
1396{
1397 int i, r, local_in_cnt, local_out_cnt, base_order;
1398 node_t *v;
1399 nodes_t temprank = {0};
1400 edge_t *flat_e, *e;
1401
1402 if (!GD_has_flat_edges(g))
1403 return;
1404 for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
1405 if (GD_rank(g)[r].n == 0) continue;
1406 base_order = ND_order(GD_rank(g)[r].v[0]);
1407 for (i = 0; i < GD_rank(g)[r].n; i++)
1408 MARK(GD_rank(g)[r].v[i]) = false;
1409 nodes_clear(&temprank);
1410
1411 /* construct reverse topological sort order in temprank */
1412 for (i = 0; i < GD_rank(g)[r].n; i++) {
1413 if (GD_flip(g)) v = GD_rank(g)[r].v[i];
1414 else v = GD_rank(g)[r].v[GD_rank(g)[r].n - i - 1];
1415
1416 local_in_cnt = local_out_cnt = 0;
1417 for (size_t j = 0; j < ND_flat_in(v).size; j++) {
1418 flat_e = ND_flat_in(v).list[j];
1419 if (constraining_flat_edge(g, flat_e)) local_in_cnt++;
1420 }
1421 for (size_t j = 0; j < ND_flat_out(v).size; j++) {
1422 flat_e = ND_flat_out(v).list[j];
1423 if (constraining_flat_edge(g, flat_e)) local_out_cnt++;
1424 }
1425 if (local_in_cnt == 0 && local_out_cnt == 0)
1426 nodes_append(&temprank, v);
1427 else {
1428 if (!MARK(v) && local_in_cnt == 0) {
1429 postorder(g, v, &temprank, r);
1430 }
1431 }
1432 }
1433
1434 if (nodes_size(&temprank) > 0) {
1435 if (!GD_flip(g)) {
1436 nodes_reverse(&temprank);
1437 }
1438 for (i = 0; i < GD_rank(g)[r].n; i++) {
1439 v = GD_rank(g)[r].v[i] = nodes_get(&temprank, (size_t)i);
1440 ND_order(v) = i + base_order;
1441 }
1442
1443 /* nonconstraint flat edges must be made LR */
1444 for (i = 0; i < GD_rank(g)[r].n; i++) {
1445 v = GD_rank(g)[r].v[i];
1446 if (ND_flat_out(v).list) {
1447 for (size_t j = 0; (e = ND_flat_out(v).list[j]); j++) {
1448 if ((!GD_flip(g) && ND_order(aghead(e)) < ND_order(agtail(e))) ||
1449 (GD_flip(g) && ND_order(aghead(e)) > ND_order(agtail(e)))) {
1450 assert(!constraining_flat_edge(g, e));
1452 j--;
1453 flat_rev(g, e);
1454 }
1455 }
1456 }
1457 }
1458 /* postprocess to restore intended order */
1459 }
1460 /* else do no harm! */
1461 GD_rank(Root)[r].valid = false;
1462 }
1463 nodes_free(&temprank);
1464}
1465
1466static void reorder(graph_t * g, int r, bool reverse, bool hasfixed)
1467{
1468 int changed = 0, nelt;
1469 node_t **vlist = GD_rank(g)[r].v;
1470 node_t **lp, **rp, **ep = vlist + GD_rank(g)[r].n;
1471
1472 for (nelt = GD_rank(g)[r].n - 1; nelt >= 0; nelt--) {
1473 lp = vlist;
1474 while (lp < ep) {
1475 /* find leftmost node that can be compared */
1476 while (lp < ep && ND_mval(*lp) < 0)
1477 lp++;
1478 if (lp >= ep)
1479 break;
1480 /* find the node that can be compared */
1481 bool sawclust = false;
1482 bool muststay = false;
1483 for (rp = lp + 1; rp < ep; rp++) {
1484 if (sawclust && ND_clust(*rp))
1485 continue; /* ### */
1486 if (left2right(g, *lp, *rp)) {
1487 muststay = true;
1488 break;
1489 }
1490 if (ND_mval(*rp) >= 0)
1491 break;
1492 if (ND_clust(*rp))
1493 sawclust = true; /* ### */
1494 }
1495 if (rp >= ep)
1496 break;
1497 if (!muststay) {
1498 const double p1 = ND_mval(*lp);
1499 const double p2 = ND_mval(*rp);
1500 if (p1 > p2 || (p1 >= p2 && reverse)) {
1501 exchange(*lp, *rp);
1502 changed++;
1503 }
1504 }
1505 lp = rp;
1506 }
1507 if (!hasfixed && !reverse)
1508 ep--;
1509 }
1510
1511 if (changed) {
1512 GD_rank(Root)[r].valid = false;
1513 if (r > 0)
1514 GD_rank(Root)[r - 1].valid = false;
1515 }
1516}
1517
1518static void mincross_step(graph_t * g, int pass)
1519{
1520 int r, other, first, last, dir;
1521
1522 bool reverse = pass % 4 < 2;
1523
1524 if (pass % 2 == 0) { /* down pass */
1525 first = GD_minrank(g) + 1;
1526 if (GD_minrank(g) > GD_minrank(Root))
1527 first--;
1528 last = GD_maxrank(g);
1529 dir = 1;
1530 } else { /* up pass */
1531 first = GD_maxrank(g) - 1;
1532 last = GD_minrank(g);
1533 if (GD_maxrank(g) < GD_maxrank(Root))
1534 first++;
1535 dir = -1;
1536 }
1537
1538 for (r = first; r != last + dir; r += dir) {
1539 other = r - dir;
1540 bool hasfixed = medians(g, r, other);
1541 reorder(g, r, reverse, hasfixed);
1542 }
1543 transpose(g, !reverse);
1544}
1545
1546static int local_cross(elist l, int dir)
1547{
1548 int i, j;
1549 int cross = 0;
1550 edge_t *e, *f;
1551 bool is_out = dir > 0;
1552 for (i = 0; (e = l.list[i]); i++) {
1553 if (is_out)
1554 for (j = i + 1; (f = l.list[j]); j++) {
1555 if ((ND_order(aghead(f)) - ND_order(aghead(e)))
1556 * (ED_tail_port(f).p.x - ED_tail_port(e).p.x) < 0)
1557 cross += ED_xpenalty(e) * ED_xpenalty(f);
1558 } else
1559 for (j = i + 1; (f = l.list[j]); j++) {
1560 if ((ND_order(agtail(f)) - ND_order(agtail(e)))
1561 * (ED_head_port(f).p.x - ED_head_port(e).p.x) < 0)
1562 cross += ED_xpenalty(e) * ED_xpenalty(f);
1563 }
1564 }
1565 return cross;
1566}
1567
1568static int64_t rcross(graph_t *g, int r) {
1569 int top, bot, max, i, k;
1570 node_t **rtop, *v;
1571
1572 int64_t cross = 0;
1573 max = 0;
1574 rtop = GD_rank(g)[r].v;
1575
1576 int *Count = gv_calloc(GD_rank(Root)[r + 1].n + 1, sizeof(int));
1577
1578 for (top = 0; top < GD_rank(g)[r].n; top++) {
1579 edge_t *e;
1580 if (max > 0) {
1581 for (i = 0; (e = ND_out(rtop[top]).list[i]); i++) {
1582 for (k = ND_order(aghead(e)) + 1; k <= max; k++)
1583 cross += Count[k] * ED_xpenalty(e);
1584 }
1585 }
1586 for (i = 0; (e = ND_out(rtop[top]).list[i]); i++) {
1587 int inv = ND_order(aghead(e));
1588 if (inv > max)
1589 max = inv;
1590 Count[inv] += ED_xpenalty(e);
1591 }
1592 }
1593 for (top = 0; top < GD_rank(g)[r].n; top++) {
1594 v = GD_rank(g)[r].v[top];
1595 if (ND_has_port(v))
1596 cross += local_cross(ND_out(v), 1);
1597 }
1598 for (bot = 0; bot < GD_rank(g)[r + 1].n; bot++) {
1599 v = GD_rank(g)[r + 1].v[bot];
1600 if (ND_has_port(v))
1601 cross += local_cross(ND_in(v), -1);
1602 }
1603 free(Count);
1604 return cross;
1605}
1606
1607static int64_t ncross(void) {
1608 int r;
1609
1610 graph_t *g = Root;
1611 int64_t count = 0;
1612 for (r = GD_minrank(g); r < GD_maxrank(g); r++) {
1613 if (GD_rank(g)[r].valid)
1614 count += GD_rank(g)[r].cache_nc;
1615 else {
1616 const int64_t nc = GD_rank(g)[r].cache_nc = rcross(g, r);
1617 count += nc;
1618 GD_rank(g)[r].valid = true;
1619 }
1620 }
1621 return count;
1622}
1623
1624static int ordercmpf(const void *x, const void *y) {
1625 const int *i0 = x;
1626 const int *i1 = y;
1627 if (*i0 < *i1) {
1628 return -1;
1629 }
1630 if (*i0 > *i1) {
1631 return 1;
1632 }
1633 return 0;
1634}
1635
1636/* Calculate a mval for nodes with no in or out non-flat edges.
1637 * Assume (ND_out(n).size == 0) && (ND_in(n).size == 0)
1638 * Find flat edge a->n where a has the largest order and set
1639 * n.mval = a.mval+1, assuming a.mval is defined (>=0).
1640 * If there are no flat in edges, find flat edge n->a where a
1641 * has the smallest order and set * n.mval = a.mval-1, assuming
1642 * a.mval is > 0.
1643 * Return true if n.mval is left -1, indicating a fixed node for sorting.
1644 */
1645static bool flat_mval(node_t * n)
1646{
1647 int i;
1648 edge_t *e, **fl;
1649 node_t *nn;
1650
1651 if (ND_flat_in(n).size > 0) {
1652 fl = ND_flat_in(n).list;
1653 nn = agtail(fl[0]);
1654 for (i = 1; (e = fl[i]); i++)
1655 if (ND_order(agtail(e)) > ND_order(nn))
1656 nn = agtail(e);
1657 if (ND_mval(nn) >= 0) {
1658 ND_mval(n) = ND_mval(nn) + 1;
1659 return false;
1660 }
1661 } else if (ND_flat_out(n).size > 0) {
1662 fl = ND_flat_out(n).list;
1663 nn = aghead(fl[0]);
1664 for (i = 1; (e = fl[i]); i++)
1665 if (ND_order(aghead(e)) < ND_order(nn))
1666 nn = aghead(e);
1667 if (ND_mval(nn) > 0) {
1668 ND_mval(n) = ND_mval(nn) - 1;
1669 return false;
1670 }
1671 }
1672 return true;
1673}
1674
1675#define VAL(node,port) (MC_SCALE * ND_order(node) + (port).order)
1676
1677static bool medians(graph_t * g, int r0, int r1)
1678{
1679 int i, j0, lspan, rspan, *list;
1680 node_t *n, **v;
1681 edge_t *e;
1682 bool hasfixed = false;
1683
1684 list = TI_list;
1685 v = GD_rank(g)[r0].v;
1686 for (i = 0; i < GD_rank(g)[r0].n; i++) {
1687 n = v[i];
1688 size_t j = 0;
1689 if (r1 > r0)
1690 for (j0 = 0; (e = ND_out(n).list[j0]); j0++) {
1691 if (ED_xpenalty(e) > 0)
1692 list[j++] = VAL(aghead(e), ED_head_port(e));
1693 } else
1694 for (j0 = 0; (e = ND_in(n).list[j0]); j0++) {
1695 if (ED_xpenalty(e) > 0)
1696 list[j++] = VAL(agtail(e), ED_tail_port(e));
1697 }
1698 switch (j) {
1699 case 0:
1700 ND_mval(n) = -1;
1701 break;
1702 case 1:
1703 ND_mval(n) = list[0];
1704 break;
1705 case 2:
1706 ND_mval(n) = (list[0] + list[1]) / 2;
1707 break;
1708 default:
1709 qsort(list, j, sizeof(int), ordercmpf);
1710 if (j % 2)
1711 ND_mval(n) = list[j / 2];
1712 else {
1713 /* weighted median */
1714 size_t rm = j / 2;
1715 size_t lm = rm - 1;
1716 rspan = list[j - 1] - list[rm];
1717 lspan = list[lm] - list[0];
1718 if (lspan == rspan)
1719 ND_mval(n) = (list[lm] + list[rm]) / 2;
1720 else {
1721 double w = list[lm] * (double)rspan + list[rm] * (double)lspan;
1722 ND_mval(n) = w / (lspan + rspan);
1723 }
1724 }
1725 }
1726 }
1727 for (i = 0; i < GD_rank(g)[r0].n; i++) {
1728 n = v[i];
1729 if (ND_out(n).size == 0 && ND_in(n).size == 0)
1730 hasfixed |= flat_mval(n);
1731 }
1732 return hasfixed;
1733}
1734
1735static int nodeposcmpf(const void *x, const void *y) {
1736// Suppress Clang/GCC -Wcast-qual warning. Casting away const here is acceptable
1737// as the later usage is const. We need the cast because the macros use
1738// non-const pointers for genericity.
1739#ifdef __GNUC__
1740#pragma GCC diagnostic push
1741#pragma GCC diagnostic ignored "-Wcast-qual"
1742#endif
1743 node_t **n0 = (node_t **)x;
1744 node_t **n1 = (node_t **)y;
1745#ifdef __GNUC__
1746#pragma GCC diagnostic pop
1747#endif
1748 if (ND_order(*n0) < ND_order(*n1)) {
1749 return -1;
1750 }
1751 if (ND_order(*n0) > ND_order(*n1)) {
1752 return 1;
1753 }
1754 return 0;
1755}
1756
1757static int edgeidcmpf(const void *x, const void *y) {
1758// Suppress Clang/GCC -Wcast-qual warning. Casting away const here is acceptable
1759// as the later usage is const. We need the cast because the macros use
1760// non-const pointers for genericity.
1761#ifdef __GNUC__
1762#pragma GCC diagnostic push
1763#pragma GCC diagnostic ignored "-Wcast-qual"
1764#endif
1765 edge_t **e0 = (edge_t **)x;
1766 edge_t **e1 = (edge_t **)y;
1767#ifdef __GNUC__
1768#pragma GCC diagnostic pop
1769#endif
1770 if (AGSEQ(*e0) < AGSEQ(*e1)) {
1771 return -1;
1772 }
1773 if (AGSEQ(*e0) > AGSEQ(*e1)) {
1774 return 1;
1775 }
1776 return 0;
1777}
1778
1779/* following code deals with weights of edges of "virtual" nodes */
1780#define ORDINARY 0
1781#define SINGLETON 1
1782#define VIRTUALNODE 2
1783#define NTYPES 3
1784
1785#define C_EE 1
1786#define C_VS 2
1787#define C_SS 2
1788#define C_VV 4
1789
1790static int table[NTYPES][NTYPES] = {
1791 /* ordinary */ {C_EE, C_EE, C_EE},
1792 /* singleton */ {C_EE, C_SS, C_VS},
1793 /* virtual */ {C_EE, C_VS, C_VV}
1794};
1795
1796static int endpoint_class(node_t * n)
1797{
1798 if (ND_node_type(n) == VIRTUAL)
1799 return VIRTUALNODE;
1800 if (ND_weight_class(n) <= 1)
1801 return SINGLETON;
1802 return ORDINARY;
1803}
1804
1806{
1807 int t;
1809
1810 /* check whether the upcoming computation will overflow */
1811 assert(t >= 0);
1812 if (INT_MAX / t < ED_weight(e)) {
1813 agerrorf("overflow when calculating virtual weight of edge\n");
1814 graphviz_exit(EXIT_FAILURE);
1815 }
1816
1817 ED_weight(e) *= t;
1818}
1819
1820#ifdef DEBUG
1821void check_rs(graph_t * g, int null_ok)
1822{
1823 int i, r;
1824 node_t *v, *prev;
1825
1826 fprintf(stderr, "\n\n%s:\n", agnameof(g));
1827 for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
1828 fprintf(stderr, "%d: ", r);
1829 prev = NULL;
1830 for (i = 0; i < GD_rank(g)[r].n; i++) {
1831 v = GD_rank(g)[r].v[i];
1832 if (v == NULL) {
1833 fprintf(stderr, "NULL\t");
1834 if (!null_ok)
1835 abort();
1836 } else {
1837 fprintf(stderr, "%s(%f)\t", agnameof(v), ND_mval(v));
1838 assert(ND_rank(v) == r);
1839 assert(v != prev);
1840 prev = v;
1841 }
1842 }
1843 fprintf(stderr, "\n");
1844 }
1845}
1846
1847void check_order(void)
1848{
1849 int i, r;
1850 node_t *v;
1851 graph_t *g = Root;
1852
1853 for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
1854 assert(GD_rank(g)[r].v[GD_rank(g)[r].n] == NULL);
1855 for (i = 0; (v = GD_rank(g)[r].v[i]); i++) {
1856 assert(ND_rank(v) == r);
1857 assert(ND_order(v) == i);
1858 }
1859 }
1860}
1861#endif
1862
1864{
1865 char *p;
1866 double f;
1867
1868 /* set default values */
1869 MinQuit = 8;
1870 MaxIter = 24;
1871
1872 p = agget(g, "mclimit");
1873 if (p && (f = atof(p)) > 0.0) {
1874 MinQuit = MAX(1, scale_clamp(MinQuit, f));
1875 MaxIter = MAX(1, scale_clamp(MaxIter, f));
1876 }
1877}
1878
1879#ifdef DEBUG
1880void check_exchange(node_t * v, node_t * w)
1881{
1882 int i, r;
1883 node_t *u;
1884
1885 if (ND_clust(v) == NULL && ND_clust(w) == NULL)
1886 return;
1887 assert(ND_clust(v) == NULL || ND_clust(w) == NULL);
1888 assert(ND_rank(v) == ND_rank(w));
1889 assert(ND_order(v) < ND_order(w));
1890 r = ND_rank(v);
1891
1892 for (i = ND_order(v) + 1; i < ND_order(w); i++) {
1893 u = GD_rank(dot_root(v))[r].v[i];
1894 if (ND_clust(u))
1895 abort();
1896 }
1897}
1898
1899void check_vlists(graph_t * g)
1900{
1901 int c, i, j, r;
1902 node_t *u;
1903
1904 for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
1905 for (i = 0; i < GD_rank(g)[r].n; i++) {
1906 u = GD_rank(g)[r].v[i];
1907 j = ND_order(u);
1908 assert(GD_rank(Root)[r].v[j] == u);
1909 }
1910 if (GD_rankleader(g)) {
1911 u = GD_rankleader(g)[r];
1912 j = ND_order(u);
1913 assert(GD_rank(Root)[r].v[j] == u);
1914 }
1915 }
1916 for (c = 1; c <= GD_n_cluster(g); c++)
1917 check_vlists(GD_clust(g)[c]);
1918}
1919
1920void node_in_root_vlist(node_t * n)
1921{
1922 node_t **vptr;
1923
1924 for (vptr = GD_rank(Root)[ND_rank(n)].v; *vptr; vptr++)
1925 if (*vptr == n)
1926 break;
1927 if (*vptr == 0)
1928 abort();
1929}
1930#endif /* DEBUG code */
static agxbuf last
last message
Definition agerror.c:29
Memory allocation wrappers that exit on failure.
static void * gv_recalloc(void *ptr, size_t old_nmemb, size_t new_nmemb, size_t size)
Definition alloc.h:73
static void * gv_calloc(size_t nmemb, size_t size)
Definition alloc.h:26
static void * gv_alloc(size_t size)
Definition alloc.h:47
#define MIN(a, b)
Definition arith.h:28
API for compacted arrays of booleans.
static bitarray_t bitarray_new(size_t size_bits)
create an array of the given element length
Definition bitarray.h:47
static void bitarray_clear(bitarray_t *self)
clear all bits in a bit array
Definition bitarray.h:99
static bool bitarray_get(bitarray_t self, size_t index)
get the value of the given element
Definition bitarray.h:65
static void bitarray_set(bitarray_t *self, size_t index, bool value)
set or clear the value of the given element
Definition bitarray.h:80
static void bitarray_reset(bitarray_t *self)
free underlying resources and leave a bit array empty
Definition bitarray.h:114
abstract graph C library, Cgraph API
void class2(graph_t *g)
Definition class2.c:153
#define exchange(h, i, j)
Definition closest.c:85
bool mapbool(const char *p)
Definition utils.c:339
char * late_string(void *obj, attrsym_t *attr, char *defaultValue)
Definition utils.c:81
#define NORMAL
Definition const.h:24
#define FLATORDER
Definition const.h:28
#define NEW_RANK
Definition const.h:257
#define VIRTUAL
Definition const.h:25
#define CLUSTER
Definition const.h:40
#define REVERSED
Definition const.h:27
void decompose(graph_t *g, int pass)
Definition decomp.c:106
Agraph_t * dot_root(void *p)
Definition dotinit.c:520
bool is_cluster(Agraph_t *)
Definition rank.c:528
void flat_edge(Agraph_t *, Agedge_t *)
Definition fastgr.c:213
void merge_oneway(Agedge_t *, Agedge_t *)
Definition fastgr.c:287
Agedge_t * new_virtual_edge(Agnode_t *, Agnode_t *, Agedge_t *)
Definition fastgr.c:129
Agedge_t * find_flat_edge(Agnode_t *, Agnode_t *)
Definition fastgr.c:54
void delete_flat_edge(Agedge_t *)
Definition fastgr.c:220
static NORETURN void graphviz_exit(int status)
Definition exit.h:23
int MaxIter
Definition globals.h:61
Agsym_t * G_ordering
Definition globals.h:71
Agsym_t * N_ordering
Definition globals.h:77
static bool Verbose
Definition gml2gv.c:24
void free(void *)
node NULL
Definition grammar.y:180
static int cnt(Dict_t *d, Dtlink_t **set)
Definition graph.c:196
int agnedges(Agraph_t *g)
Definition graph.c:161
int agdegree(Agraph_t *g, Agnode_t *n, int in, int out)
Definition graph.c:223
int agnnodes(Agraph_t *g)
Definition graph.c:155
char * agget(void *obj, char *name)
Definition attr.c:448
#define ED_to_orig(e)
Definition types.h:598
Agedge_t * agedge(Agraph_t *g, Agnode_t *t, Agnode_t *h, char *name, int createflag)
Definition edge.c:253
int agdeledge(Agraph_t *g, Agedge_t *arg_e)
Definition edge.c:327
Agedge_t * agnxtin(Agraph_t *g, Agedge_t *e)
Definition edge.c:71
#define ED_xpenalty(e)
Definition types.h:601
Agedge_t * agfstout(Agraph_t *g, Agnode_t *n)
Definition edge.c:26
#define agtail(e)
Definition cgraph.h:988
#define ED_edge_type(e)
Definition types.h:582
#define ED_weight(e)
Definition types.h:603
#define aghead(e)
Definition cgraph.h:989
Agedge_t * agnxtout(Agraph_t *g, Agedge_t *e)
Definition edge.c:41
#define ED_head_port(e)
Definition types.h:588
Agedge_t * agfstin(Agraph_t *g, Agnode_t *n)
Definition edge.c:57
#define ED_label(e)
Definition types.h:589
#define ED_tail_port(e)
Definition types.h:597
void agwarningf(const char *fmt,...)
Definition agerror.c:173
void agerrorf(const char *fmt,...)
Definition agerror.c:165
#define GD_minrank(g)
Definition types.h:384
#define GD_maxrank(g)
Definition types.h:382
#define GD_clust(g)
Definition types.h:360
int agclose(Agraph_t *g)
deletes a graph, freeing its associated storage
Definition graph.c:95
#define GD_flags(g)
Definition types.h:365
#define GD_rank(g)
Definition types.h:395
#define GD_has_flat_edges(g)
Definition types.h:370
#define GD_nlist(g)
Definition types.h:393
Agdesc_t Agstrictdirected
strict directed. A strict graph cannot have multi-edges or self-arcs.
Definition graph.c:271
#define GD_n_cluster(g)
Definition types.h:389
Agraph_t * agopen(char *name, Agdesc_t desc, Agdisc_t *disc)
creates a new graph with the given name and kind
Definition graph.c:42
#define GD_comp(g)
Definition types.h:362
#define GD_flip(g)
Definition types.h:378
#define GD_rankleader(g)
Definition types.h:396
Agnode_t * agnode(Agraph_t *g, char *name, int createflag)
Definition node.c:141
#define ND_rank(n)
Definition types.h:523
#define ND_prev(n)
Definition types.h:521
#define ND_ht(n)
Definition types.h:500
Agnode_t * agnxtnode(Agraph_t *g, Agnode_t *n)
Definition node.c:48
Agnode_t * agfstnode(Agraph_t *g)
Definition node.c:41
#define ND_has_port(n)
Definition types.h:495
#define ND_next(n)
Definition types.h:510
Agnode_t * agsubnode(Agraph_t *g, Agnode_t *n, int createflag)
Definition node.c:252
#define ND_clust(n)
Definition types.h:489
#define ND_other(n)
Definition types.h:514
#define ND_alg(n)
Definition types.h:484
#define ND_flat_out(n)
Definition types.h:493
#define ND_rw(n)
Definition types.h:525
#define ND_node_type(n)
Definition types.h:511
#define ND_lw(n)
Definition types.h:506
#define ND_mval(n)
Definition types.h:508
int agdelnode(Agraph_t *g, Agnode_t *arg_n)
removes a node from a graph or subgraph.
Definition node.c:190
#define ND_order(n)
Definition types.h:513
#define ND_UF_size(n)
Definition types.h:487
#define ND_weight_class(n)
Definition types.h:535
#define ND_low(n)
Definition types.h:505
#define ND_ranktype(n)
Definition types.h:524
#define ND_flat_in(n)
Definition types.h:492
#define ND_in(n)
Definition types.h:501
#define ND_out(n)
Definition types.h:515
char * agnameof(void *)
returns a string descriptor for the object.
Definition id.c:143
int agcontains(Agraph_t *, void *obj)
returns non-zero if obj is a member of (sub)graph
Definition obj.c:233
Agraph_t * agroot(void *obj)
Definition obj.c:168
#define AGSEQ(obj)
Definition cgraph.h:225
void * agbindrec(void *obj, const char *name, unsigned int recsize, int move_to_front)
attaches a new record of the given size to the object
Definition rec.c:89
Agraph_t * agfstsubg(Agraph_t *g)
Definition subg.c:73
Agraph_t * agnxtsubg(Agraph_t *subg)
Definition subg.c:78
Agraph_t * agsubg(Agraph_t *g, char *name, int cflag)
Definition subg.c:53
static void indent(int ix)
Definition gv2gml.c:94
bool rm(Agraph_t *g)
Definition gv.cpp:584
Arithmetic helper functions.
static int scale_clamp(int original, double scale)
scale up or down a non-negative integer, clamping to [0, INT_MAX]
Definition gv_math.h:73
#define SWAP(a, b)
Definition gv_math.h:131
$2 u p prev
Definition htmlparse.y:297
rows row
Definition htmlparse.y:326
static double cross(double *u, double *v)
#define ITOS(i)
Definition itos.h:43
#define ND_onstack(n)
Definition acyclic.c:29
#define ND_mark(n)
Definition acyclic.c:28
static Agedge_t * top(edge_stack_t *sp)
Definition tred.c:73
int install_cluster(graph_t *g, node_t *n, int pass, node_queue_t *q)
Definition cluster.c:378
int expand_cluster(graph_t *subg)
Definition cluster.c:278
void mark_lowclusters(Agraph_t *root)
Definition cluster.c:398
#define DEFINE_LIST(name, type)
Definition list.h:22
#define neighbor(t, i, edim, elist)
Definition make_map.h:41
#define delta
Definition maze.c:136
#define isBackedge(e)
Definition mincross.c:234
static int betweenclust(edge_t *e)
Definition mincross.c:474
#define ND_hi(n)
Definition mincross.c:218
#define flatindex(v)
Definition mincross.c:87
static void free_matrix(adjmatrix_t *p)
Definition mincross.c:454
static bool ReMincross
Definition mincross.c:128
static bool flat_mval(node_t *n)
Definition mincross.c:1645
static bool inside_cluster(graph_t *g, node_t *v)
Definition mincross.c:953
#define ND_x(n)
Definition mincross.c:216
static int64_t mincross(graph_t *g, int startpass)
Definition mincross.c:740
#define ORDINARY
Definition mincross.c:1780
static void init_mccomp(graph_t *g, size_t c)
Definition mincross.c:462
static void mincross_step(graph_t *g, int pass)
Definition mincross.c:1518
static int topsort(Agraph_t *g, Agraph_t *sg, Agnode_t **arr)
Definition mincross.c:247
static bool medians(graph_t *g, int r0, int r1)
Definition mincross.c:1677
static void reorder(graph_t *g, int r, bool reverse, bool hasfixed)
Definition mincross.c:1466
#define VAL(node, port)
Definition mincross.c:1675
static int edgeidcmpf(const void *, const void *)
Definition mincross.c:1757
static void flat_breakcycles(graph_t *g)
Definition mincross.c:1159
static void cleanup2(graph_t *g, int64_t nc)
Definition mincross.c:885
#define MARK(v)
Definition mincross.c:85
static bool is_a_normal_node_of(graph_t *g, node_t *v)
Definition mincross.c:937
static void save_best(graph_t *g)
Definition mincross.c:821
#define C_VS
Definition mincross.c:1786
static void init_mincross(graph_t *g)
Definition mincross.c:1072
static int64_t rcross(graph_t *g, int r)
Definition mincross.c:1568
static Agraph_t * realFillRanks(Agraph_t *g, bitarray_t *ranks, Agraph_t *sg)
Definition mincross.c:1026
#define ND_np(n)
Definition mincross.c:219
static int64_t transpose_step(graph_t *g, int r, bool reverse)
Definition mincross.c:682
static void fixLabelOrder(graph_t *g, rank_t *rk)
for each pair of nodes (labels), we add an edge
Definition mincross.c:289
void virtual_weight(edge_t *e)
Definition mincross.c:1805
static void merge2(graph_t *g)
Definition mincross.c:858
static int64_t mincross_clust(graph_t *g)
Definition mincross.c:581
static node_t * furthestnode(graph_t *g, node_t *v, int dir)
Definition mincross.c:957
static int out_cross(node_t *v, node_t *w)
Definition mincross.c:650
static int ordercmpf(const void *, const void *)
Definition mincross.c:1624
void enqueue_neighbors(node_queue_t *q, node_t *n0, int pass)
Definition mincross.c:1342
static void do_ordering(graph_t *g, bool outflag)
Definition mincross.c:521
static void matrix_set(adjmatrix_t *me, size_t row, size_t col)
Definition mincross.c:68
void checkLabelOrder(graph_t *g)
Definition mincross.c:343
static int GlobalMinRank
Definition mincross.c:125
static const double Convergence
Definition mincross.c:122
int build_ranks(graph_t *g, int pass)
Definition mincross.c:1266
static adjmatrix_t * new_matrix(size_t i, size_t j)
Definition mincross.c:447
#define SINGLETON
Definition mincross.c:1781
static Agnode_t * findSource(Agraph_t *g, Agraph_t *sg)
Definition mincross.c:237
static int * TI_list
Definition mincross.c:127
void rec_save_vlists(graph_t *g)
Definition mincross.c:979
#define C_EE
Definition mincross.c:1785
static void do_ordering_node(graph_t *g, node_t *n, bool outflag)
Definition mincross.c:481
static int64_t in_cross(node_t *v, node_t *w)
Definition mincross.c:631
static graph_t * Root
Definition mincross.c:124
#define C_VV
Definition mincross.c:1788
static void flat_search(graph_t *g, node_t *v)
Definition mincross.c:1123
static void ordered_edges(graph_t *g)
Definition mincross.c:554
static void transpose(graph_t *g, bool reverse)
Definition mincross.c:723
#define NTYPES
Definition mincross.c:1783
void rec_reset_vlists(graph_t *g)
Definition mincross.c:989
static void merge_components(graph_t *g)
Definition mincross.c:834
int dot_mincross(graph_t *g)
Definition mincross.c:382
static int64_t ncross(void)
Definition mincross.c:1607
static void mincross_options(graph_t *g)
Definition mincross.c:1863
static int endpoint_class(node_t *n)
Definition mincross.c:1796
static int getComp(graph_t *g, node_t *n, graph_t *comp, int *indices)
Definition mincross.c:266
static int GlobalMaxRank
Definition mincross.c:125
static bool left2right(graph_t *g, node_t *v, node_t *w)
Definition mincross.c:607
static int local_cross(elist l, int dir)
Definition mincross.c:1546
static void flat_reorder(graph_t *g)
Definition mincross.c:1395
#define C_SS
Definition mincross.c:1787
static void flat_rev(Agraph_t *g, Agedge_t *e)
Definition mincross.c:1096
static void emptyComp(graph_t *sg)
Definition mincross.c:223
static bool is_a_vnode_of_an_edge_of(graph_t *g, node_t *v)
Definition mincross.c:941
static void fillRanks(Agraph_t *g)
Definition mincross.c:1064
static void do_ordering_for_nodes(graph_t *g)
Definition mincross.c:530
static void postorder(graph_t *g, node_t *v, nodes_t *list, int r)
Definition mincross.c:1379
static int MinQuit
Definition mincross.c:121
static edge_t ** TE_list
Definition mincross.c:126
void allocate_ranks(graph_t *g)
Definition mincross.c:1190
#define ND_lo(n)
Definition mincross.c:217
static int table[NTYPES][NTYPES]
Definition mincross.c:1790
static void restore_best(graph_t *g)
Definition mincross.c:803
static bool matrix_get(adjmatrix_t *me, size_t row, size_t col)
Definition mincross.c:49
static bool constraining_flat_edge(Agraph_t *g, Agedge_t *e)
Definition mincross.c:1364
#define ND_idx(n)
Definition mincross.c:220
static int nodeposcmpf(const void *, const void *)
Definition mincross.c:1735
#define saveorder(v)
Definition mincross.c:86
void save_vlist(graph_t *g)
Definition mincross.c:969
int install_in_rank(graph_t *g, node_t *n)
Definition mincross.c:1218
#define VIRTUALNODE
Definition mincross.c:1782
#define M
Definition randomkit.c:90
static bool streq(const char *a, const char *b)
are a and b equal?
Definition streq.h:11
Agobj_t base
Definition cgraph.h:269
Agrec_t * data
stores programmer-defined data, access with AGDATA
Definition cgraph.h:212
graph or subgraph
Definition cgraph.h:424
Agraph_t * parent
Definition cgraph.h:433
implementation of Agrec_t
Definition cgraph.h:172
size_t nrows
Definition mincross.c:37
uint8_t * data
bit-packed backing memory
Definition mincross.c:39
size_t allocated
how many bytes have been allocated backing data?
Definition mincross.c:40
size_t ncols
Definition mincross.c:38
Definition types.h:251
edge_t ** list
Definition types.h:252
int hi
Definition mincross.c:212
Agrec_t h
Definition mincross.c:211
Agnode_t * np
Definition mincross.c:213
node_t ** v
Definition types.h:202
int n
Definition types.h:201
double elapsed_sec(void)
Definition timing.c:48
void start_timer(void)
Definition timing.c:43
#define elist_append(item, L)
Definition types.h:261
#define alloc_elist(n, L)
Definition types.h:267
Definition grammar.c:89
#define MAX(a, b)
Definition write.c:32