Graphviz 13.0.0~dev.20241220.2304
Loading...
Searching...
No Matches
smart_ini_x.c
Go to the documentation of this file.
1/*************************************************************************
2 * Copyright (c) 2011 AT&T Intellectual Property
3 * All rights reserved. This program and the accompanying materials
4 * are made available under the terms of the Eclipse Public License v1.0
5 * which accompanies this distribution, and is available at
6 * https://www.eclipse.org/legal/epl-v10.html
7 *
8 * Contributors: Details at https://graphviz.org
9 *************************************************************************/
10
11#include <float.h>
12#include <math.h>
13#include <neatogen/digcola.h>
14#include <util/alloc.h>
15#ifdef DIGCOLA
16#include <neatogen/kkutils.h>
17#include <neatogen/matrix_ops.h>
18#include <neatogen/conjgrad.h>
19#include <stdbool.h>
20
21static void
22standardize(double* orthog, int nvtxs)
23{
24 double len, avg = 0;
25 int i;
26 for (i=0; i<nvtxs; i++)
27 avg+=orthog[i];
28 avg/=nvtxs;
29
30 /* centralize: */
31 for (i=0; i<nvtxs; i++)
32 orthog[i]-=avg;
33
34 /* normalize: */
35 len = norm(orthog, nvtxs-1);
36
37 // if we have a degenerate length, do not attempt to scale by it
38 if (fabs(len) < DBL_EPSILON) {
39 return;
40 }
41
42 vectors_scalar_mult(nvtxs, orthog, 1.0 / len, orthog);
43}
44
45static void
46mat_mult_vec_orthog(float** mat, int dim1, int dim2, double* vec,
47 double* result, double* orthog)
48{
49 /* computes mat*vec, where mat is a dim1*dim2 matrix */
50 int i,j;
51 double sum;
52
53 for (i=0; i<dim1; i++) {
54 sum=0;
55 for (j=0; j<dim2; j++) {
56 sum += mat[i][j]*vec[j];
57 }
58 result[i]=sum;
59 }
60 assert(orthog != NULL);
61 double alpha = -vectors_inner_product(dim1, result, orthog);
62 scadd(result, dim1 - 1, alpha, orthog);
63}
64
65static void
66power_iteration_orthog(float** square_mat, int n, int neigs,
67 double** eigs, double* evals, double* orthog, double p_iteration_threshold)
68{
69 // Power-Iteration with
70 // (I - orthog × orthogᵀ) × square_mat × (I - orthog × orthogᵀ)
71
72 int i,j;
73 double *tmp_vec = gv_calloc(n, sizeof(double));
74 double *last_vec = gv_calloc(n, sizeof(double));
75 double *curr_vector;
76 double len;
77 double angle;
78 double alpha;
79 int largest_index;
80 double largest_eval;
81
83
84 if (neigs>=n) {
85 neigs=n;
86 }
87
88 for (i=0; i<neigs; i++) {
89 curr_vector = eigs[i];
90 /* guess the i-th eigen vector */
91choose:
92 for (j=0; j<n; j++) {
93 curr_vector[j] = rand()%100;
94 }
95
96 assert(orthog != NULL);
97 alpha = -vectors_inner_product(n, orthog, curr_vector);
98 scadd(curr_vector, n - 1, alpha, orthog);
99 // orthogonalize against higher eigenvectors
100 for (j=0; j<i; j++) {
101 alpha = -vectors_inner_product(n, eigs[j], curr_vector);
102 scadd(curr_vector, n-1, alpha, eigs[j]);
103 }
104 len = norm(curr_vector, n-1);
105 if (len<1e-10) {
106 /* We have chosen a vector colinear with prvious ones */
107 goto choose;
108 }
109 vectors_scalar_mult(n, curr_vector, 1.0 / len, curr_vector);
110 do {
111 copy_vector(n, curr_vector, last_vec);
112
113 mat_mult_vec_orthog(square_mat,n,n,curr_vector,tmp_vec,orthog);
114 copy_vector(n, tmp_vec, curr_vector);
115
116 /* orthogonalize against higher eigenvectors */
117 for (j=0; j<i; j++) {
118 alpha = -vectors_inner_product(n, eigs[j], curr_vector);
119 scadd(curr_vector, n-1, alpha, eigs[j]);
120 }
121 len = norm(curr_vector, n-1);
122 if (len<1e-10) {
123 /* We have reached the null space (e.vec. associated
124 * with e.val. 0)
125 */
126 goto exit;
127 }
128
129 vectors_scalar_mult(n, curr_vector, 1.0 / len, curr_vector);
130 angle = vectors_inner_product(n, curr_vector, last_vec);
131 } while (fabs(angle)<tol);
132 /* the Rayleigh quotient (up to errors due to orthogonalization):
133 * u*(A*u)/||A*u||)*||A*u||, where u=last_vec, and ||u||=1
134 */
135 evals[i]=angle*len;
136 }
137exit:
138 for (; i<neigs; i++) {
139 /* compute the smallest eigenvector, which are
140 * probably associated with eigenvalue 0 and for
141 * which power-iteration is dangerous
142 */
143 curr_vector = eigs[i];
144 /* guess the i-th eigen vector */
145 for (j=0; j<n; j++)
146 curr_vector[j] = rand()%100;
147 /* orthogonalize against higher eigenvectors */
148 for (j=0; j<i; j++) {
149 alpha = -vectors_inner_product(n, eigs[j], curr_vector);
150 scadd(curr_vector, n-1, alpha, eigs[j]);
151 }
152 len = norm(curr_vector, n-1);
153 vectors_scalar_mult(n, curr_vector, 1.0 / len, curr_vector);
154 evals[i]=0;
155
156 }
157
158 /* sort vectors by their evals, for overcoming possible mis-convergence: */
159 for (i=0; i<neigs-1; i++) {
160 largest_index=i;
161 largest_eval=evals[largest_index];
162 for (j=i+1; j<neigs; j++) {
163 if (largest_eval<evals[j]) {
164 largest_index=j;
165 largest_eval=evals[largest_index];
166 }
167 }
168 if (largest_index!=i) { // exchange eigenvectors:
169 copy_vector(n, eigs[i], tmp_vec);
170 copy_vector(n, eigs[largest_index], eigs[i]);
171 copy_vector(n, tmp_vec, eigs[largest_index]);
172
173 evals[largest_index]=evals[i];
174 evals[i]=largest_eval;
175 }
176 }
177
178 free (tmp_vec); free (last_vec);
179
180}
181
182static float*
183compute_avgs(DistType** Dij, int n, float* all_avg)
184{
185 float* row_avg = gv_calloc(n, sizeof(float));
186 int i,j;
187 double sum=0, sum_row;
188
189 for (i=0; i<n; i++) {
190 sum_row=0;
191 for (j=0; j<n; j++) {
192 sum+=(double)Dij[i][j]*(double)Dij[i][j];
193 sum_row+=(double)Dij[i][j]*(double)Dij[i][j];
194 }
195 row_avg[i]=(float)sum_row/n;
196 }
197 *all_avg=(float)sum/(n*n);
198 return row_avg;
199}
200
201static float**
202compute_Bij(DistType** Dij, int n)
203{
204 int i,j;
205 float *storage = gv_calloc(n * n, sizeof(float));
206 float **Bij = gv_calloc(n, sizeof(float *));
207 float* row_avg;
208 float all_avg;
209
210 for (i=0; i<n; i++)
211 Bij[i] = storage+i*n;
212
213 row_avg = compute_avgs(Dij, n, &all_avg);
214 for (i=0; i<n; i++) {
215 for (j=0; j<=i; j++) {
216 Bij[i][j]=-(float)Dij[i][j]*Dij[i][j]+row_avg[i]+row_avg[j]-all_avg;
217 Bij[j][i]=Bij[i][j];
218 }
219 }
220 free (row_avg);
221 return Bij;
222}
223
224static void
225CMDS_orthog(int n, int dim, double** eigs, double tol,
226 double* orthog, DistType** Dij)
227{
228 int i,j;
229 float** Bij = compute_Bij(Dij, n);
230 double *evals = gv_calloc(dim, sizeof(double));
231
232 assert(orthog != NULL);
233 double *orthog_aux = gv_calloc(n, sizeof(double));
234 for (i=0; i<n; i++) {
235 orthog_aux[i]=orthog[i];
236 }
237 standardize(orthog_aux,n);
238 power_iteration_orthog(Bij, n, dim, eigs, evals, orthog_aux, tol);
239
240 for (i=0; i<dim; i++) {
241 for (j=0; j<n; j++) {
242 eigs[i][j]*=sqrt(fabs(evals[i]));
243 }
244 }
245 free (Bij[0]); free (Bij);
246 free (evals); free (orthog_aux);
247}
248
249#define SCALE_FACTOR 256
250
251int IMDS_given_dim(vtx_data* graph, int n, double* given_coords,
252 double* new_coords, double conj_tol)
253{
254 int iterations2;
255 int i,j, rv = 0;
256 DistType** Dij;
257 double* x = given_coords;
258 double uniLength;
259 double* y = new_coords;
260 float **lap = gv_calloc(n, sizeof(float *));
261 float degree;
262 double pos_i;
263 double *balance = gv_calloc(n, sizeof(double));
264 double b;
265 bool converged;
266
267 Dij = compute_apsp(graph, n);
268
269 /* scaling up the distances to enable an 'sqrt' operation later
270 * (in case distances are integers)
271 */
272 for (i=0; i<n; i++)
273 for (j=0; j<n; j++)
274 Dij[i][j]*=SCALE_FACTOR;
275
276 assert(x!=NULL);
277 {
278 double sum1, sum2;
279
280 for (sum1=sum2=0,i=1; i<n; i++) {
281 for (j=0; j<i; j++) {
282 sum1+=1.0/(Dij[i][j])*fabs(x[i]-x[j]);
283 sum2+=1.0/(Dij[i][j]*Dij[i][j])*fabs(x[i]-x[j])*fabs(x[i]-x[j]);
284 }
285 }
286 uniLength = isinf(sum2) ? 0 : sum1 / sum2;
287 for (i=0; i<n; i++)
288 x[i]*=uniLength;
289 }
290
291 /* smart ini: */
292 CMDS_orthog(n, 1, &y, conj_tol, x, Dij);
293
294 /* Compute Laplacian: */
295 float *f_storage = gv_calloc(n * n, sizeof(float));
296
297 for (i=0; i<n; i++) {
298 lap[i]=f_storage+i*n;
299 degree=0;
300 for (j=0; j<n; j++) {
301 if (j==i)
302 continue;
303 degree-=lap[i][j]=-1.0f/((float)Dij[i][j]*(float)Dij[i][j]); // w_{ij}
304
305 }
306 lap[i][i]=degree;
307 }
308
309
310 /* compute residual distances */
311 /* if (x!=NULL) */
312 {
313 double diff;
314 for (i=1; i<n; i++) {
315 pos_i=x[i];
316 for (j=0; j<i; j++) {
317 diff=(double)Dij[i][j]*(double)Dij[i][j]-(pos_i-x[j])*(pos_i-x[j]);
318 Dij[i][j]=Dij[j][i]=diff>0 ? (DistType)sqrt(diff) : 0;
319 }
320 }
321 }
322
323 /* Compute the balance vector: */
324 for (i=0; i<n; i++) {
325 pos_i=y[i];
326 balance[i]=0;
327 for (j=0; j<n; j++) {
328 if (j==i)
329 continue;
330 if (pos_i>=y[j]) {
331 balance[i]+=Dij[i][j]*(-lap[i][j]); // w_{ij}*delta_{ij}
332 }
333 else {
334 balance[i]-=Dij[i][j]*(-lap[i][j]); // w_{ij}*delta_{ij}
335 }
336 }
337 }
338
339 for (converged=false,iterations2=0; iterations2<200 && !converged; iterations2++) {
340 if (conjugate_gradient_f(lap, y, balance, n, conj_tol, n, true) < 0) {
341 rv = 1;
342 goto cleanup;
343 }
344 converged = true;
345 for (i=0; i<n; i++) {
346 pos_i=y[i];
347 b=0;
348 for (j=0; j<n; j++) {
349 if (j==i)
350 continue;
351 if (pos_i>=y[j]) {
352 b+=Dij[i][j]*(-lap[i][j]);
353
354 }
355 else {
356 b-=Dij[i][j]*(-lap[i][j]);
357
358 }
359 }
360 if ((b != balance[i]) && (fabs(1-b/balance[i])>1e-5)) {
361 converged = false;
362 balance[i]=b;
363 }
364 }
365 }
366
367 for (i = 0; !(fabs(uniLength) < DBL_EPSILON) && i < n; i++) {
368 x[i] /= uniLength;
369 y[i] /= uniLength;
370 }
371
372cleanup:
373
374 free (Dij[0]); free (Dij);
375 free (lap[0]); free (lap);
376 free (balance);
377 return rv;
378}
379
380#endif /* DIGCOLA */
381
Memory allocation wrappers that exit on failure.
static void * gv_calloc(size_t nmemb, size_t size)
Definition alloc.h:26
int conjugate_gradient_f(float **A, double *x, double *b, int n, double tol, int max_iterations, bool ortho1)
Definition conjgrad.c:91
static double norm(int n, const double *x)
static double len(glCompPoint p)
Definition glutils.c:150
static void cleanup(void)
Definition gmlparse.c:128
void free(void *)
node NULL
Definition grammar.y:163
Agraph_t * graph(char *name)
Definition gv.cpp:30
DistType ** compute_apsp(vtx_data *graph, int n)
Definition kkutils.c:85
double vectors_inner_product(int n, const double *vector1, const double *vector2)
Definition matrix_ops.c:328
static double p_iteration_threshold
Definition matrix_ops.c:18
void scadd(double *vec1, int end, double fac, double *vec2)
Definition matrix_ops.c:220
void copy_vector(int n, const double *source, double *dest)
Definition matrix_ops.c:322
void vectors_scalar_mult(int n, const double *vector, double alpha, double *result)
Definition matrix_ops.c:314
static const int dim
#define alpha
Definition shapes.c:4058
int DistType
Definition sparsegraph.h:37
static const double tol