Graphviz 14.1.3~dev.20260207.0611
Loading...
Searching...
No Matches
smart_ini_x.c
Go to the documentation of this file.
1/*************************************************************************
2 * Copyright (c) 2011 AT&T Intellectual Property
3 * All rights reserved. This program and the accompanying materials
4 * are made available under the terms of the Eclipse Public License v1.0
5 * which accompanies this distribution, and is available at
6 * https://www.eclipse.org/legal/epl-v10.html
7 *
8 * Contributors: Details at https://graphviz.org
9 *************************************************************************/
10
11#include "config.h"
12
13#include <float.h>
14#include <math.h>
15#include <neatogen/digcola.h>
16#include <util/alloc.h>
17#ifdef DIGCOLA
18#include <neatogen/kkutils.h>
19#include <neatogen/matrix_ops.h>
20#include <neatogen/conjgrad.h>
21#include <stdbool.h>
22
23static void
24standardize(double* orthog, int nvtxs)
25{
26 double len, avg = 0;
27 int i;
28 for (i=0; i<nvtxs; i++)
29 avg+=orthog[i];
30 avg/=nvtxs;
31
32 /* centralize: */
33 for (i=0; i<nvtxs; i++)
34 orthog[i]-=avg;
35
36 /* normalize: */
37 len = norm(orthog, nvtxs-1);
38
39 // if we have a degenerate length, do not attempt to scale by it
40 if (fabs(len) < DBL_EPSILON) {
41 return;
42 }
43
44 vectors_scalar_mult(nvtxs, orthog, 1.0 / len, orthog);
45}
46
47static void
48mat_mult_vec_orthog(float** mat, int dim1, int dim2, double* vec,
49 double* result, double* orthog)
50{
51 /* computes mat*vec, where mat is a dim1*dim2 matrix */
52 int i,j;
53 double sum;
54
55 for (i=0; i<dim1; i++) {
56 sum=0;
57 for (j=0; j<dim2; j++) {
58 sum += mat[i][j]*vec[j];
59 }
60 result[i]=sum;
61 }
62 assert(orthog != NULL);
63 double alpha = -vectors_inner_product(dim1, result, orthog);
64 scadd(result, dim1 - 1, alpha, orthog);
65}
66
67static void
68power_iteration_orthog(float** square_mat, int n, int neigs,
69 double** eigs, double* evals, double* orthog, double p_iteration_threshold)
70{
71 // Power-Iteration with
72 // (I - orthog × orthogᵀ) × square_mat × (I - orthog × orthogᵀ)
73
74 int i,j;
75 double *tmp_vec = gv_calloc(n, sizeof(double));
76 double *last_vec = gv_calloc(n, sizeof(double));
77 double *curr_vector;
78 double len;
79 double angle;
80 double alpha;
81 int largest_index;
82 double largest_eval;
83
85
86 if (neigs>=n) {
87 neigs=n;
88 }
89
90 for (i=0; i<neigs; i++) {
91 curr_vector = eigs[i];
92 /* guess the i-th eigen vector */
93choose:
94 for (j=0; j<n; j++) {
95 curr_vector[j] = rand()%100;
96 }
97
98 assert(orthog != NULL);
99 alpha = -vectors_inner_product(n, orthog, curr_vector);
100 scadd(curr_vector, n - 1, alpha, orthog);
101 // orthogonalize against higher eigenvectors
102 for (j=0; j<i; j++) {
103 alpha = -vectors_inner_product(n, eigs[j], curr_vector);
104 scadd(curr_vector, n-1, alpha, eigs[j]);
105 }
106 len = norm(curr_vector, n-1);
107 if (len<1e-10) {
108 /* We have chosen a vector colinear with prvious ones */
109 goto choose;
110 }
111 vectors_scalar_mult(n, curr_vector, 1.0 / len, curr_vector);
112 do {
113 copy_vector(n, curr_vector, last_vec);
114
115 mat_mult_vec_orthog(square_mat,n,n,curr_vector,tmp_vec,orthog);
116 copy_vector(n, tmp_vec, curr_vector);
117
118 /* orthogonalize against higher eigenvectors */
119 for (j=0; j<i; j++) {
120 alpha = -vectors_inner_product(n, eigs[j], curr_vector);
121 scadd(curr_vector, n-1, alpha, eigs[j]);
122 }
123 len = norm(curr_vector, n-1);
124 if (len<1e-10) {
125 /* We have reached the null space (e.vec. associated
126 * with e.val. 0)
127 */
128 goto exit;
129 }
130
131 vectors_scalar_mult(n, curr_vector, 1.0 / len, curr_vector);
132 angle = vectors_inner_product(n, curr_vector, last_vec);
133 } while (fabs(angle)<tol);
134 /* the Rayleigh quotient (up to errors due to orthogonalization):
135 * u*(A*u)/||A*u||)*||A*u||, where u=last_vec, and ||u||=1
136 */
137 evals[i]=angle*len;
138 }
139exit:
140 for (; i<neigs; i++) {
141 /* compute the smallest eigenvector, which are
142 * probably associated with eigenvalue 0 and for
143 * which power-iteration is dangerous
144 */
145 curr_vector = eigs[i];
146 /* guess the i-th eigen vector */
147 for (j=0; j<n; j++)
148 curr_vector[j] = rand()%100;
149 /* orthogonalize against higher eigenvectors */
150 for (j=0; j<i; j++) {
151 alpha = -vectors_inner_product(n, eigs[j], curr_vector);
152 scadd(curr_vector, n-1, alpha, eigs[j]);
153 }
154 len = norm(curr_vector, n-1);
155 vectors_scalar_mult(n, curr_vector, 1.0 / len, curr_vector);
156 evals[i]=0;
157
158 }
159
160 /* sort vectors by their evals, for overcoming possible mis-convergence: */
161 for (i=0; i<neigs-1; i++) {
162 largest_index=i;
163 largest_eval=evals[largest_index];
164 for (j=i+1; j<neigs; j++) {
165 if (largest_eval<evals[j]) {
166 largest_index=j;
167 largest_eval=evals[largest_index];
168 }
169 }
170 if (largest_index!=i) { // exchange eigenvectors:
171 copy_vector(n, eigs[i], tmp_vec);
172 copy_vector(n, eigs[largest_index], eigs[i]);
173 copy_vector(n, tmp_vec, eigs[largest_index]);
174
175 evals[largest_index]=evals[i];
176 evals[i]=largest_eval;
177 }
178 }
179
180 free (tmp_vec); free (last_vec);
181
182}
183
184static float*
185compute_avgs(DistType** Dij, int n, float* all_avg)
186{
187 float* row_avg = gv_calloc(n, sizeof(float));
188 int i,j;
189 double sum=0, sum_row;
190
191 for (i=0; i<n; i++) {
192 sum_row=0;
193 for (j=0; j<n; j++) {
194 sum+=(double)Dij[i][j]*(double)Dij[i][j];
195 sum_row+=(double)Dij[i][j]*(double)Dij[i][j];
196 }
197 row_avg[i]=(float)sum_row/n;
198 }
199 *all_avg=(float)sum/(n*n);
200 return row_avg;
201}
202
203static float**
204compute_Bij(DistType** Dij, int n)
205{
206 int i,j;
207 float *storage = gv_calloc(n * n, sizeof(float));
208 float **Bij = gv_calloc(n, sizeof(float *));
209 float* row_avg;
210 float all_avg;
211
212 for (i=0; i<n; i++)
213 Bij[i] = storage+i*n;
214
215 row_avg = compute_avgs(Dij, n, &all_avg);
216 for (i=0; i<n; i++) {
217 for (j=0; j<=i; j++) {
218 Bij[i][j]=-(float)Dij[i][j]*Dij[i][j]+row_avg[i]+row_avg[j]-all_avg;
219 Bij[j][i]=Bij[i][j];
220 }
221 }
222 free (row_avg);
223 return Bij;
224}
225
226static void
227CMDS_orthog(int n, int dim, double** eigs, double tol,
228 double* orthog, DistType** Dij)
229{
230 int i,j;
231 float** Bij = compute_Bij(Dij, n);
232 double *evals = gv_calloc(dim, sizeof(double));
233
234 assert(orthog != NULL);
235 double *orthog_aux = gv_calloc(n, sizeof(double));
236 for (i=0; i<n; i++) {
237 orthog_aux[i]=orthog[i];
238 }
239 standardize(orthog_aux,n);
240 power_iteration_orthog(Bij, n, dim, eigs, evals, orthog_aux, tol);
241
242 for (i=0; i<dim; i++) {
243 for (j=0; j<n; j++) {
244 eigs[i][j]*=sqrt(fabs(evals[i]));
245 }
246 }
247 free (Bij[0]); free (Bij);
248 free (evals); free (orthog_aux);
249}
250
251#define SCALE_FACTOR 256
252
253int IMDS_given_dim(vtx_data* graph, int n, double* given_coords,
254 double* new_coords, double conj_tol)
255{
256 int iterations2;
257 int i,j, rv = 0;
258 DistType** Dij;
259 double* x = given_coords;
260 double uniLength;
261 double* y = new_coords;
262 float **lap = gv_calloc(n, sizeof(float *));
263 float degree;
264 double pos_i;
265 double *balance = gv_calloc(n, sizeof(double));
266 double b;
267 bool converged;
268
269 Dij = compute_apsp(graph, n);
270
271 /* scaling up the distances to enable an 'sqrt' operation later
272 * (in case distances are integers)
273 */
274 for (i=0; i<n; i++)
275 for (j=0; j<n; j++)
276 Dij[i][j]*=SCALE_FACTOR;
277
278 assert(x!=NULL);
279 {
280 double sum1, sum2;
281
282 for (sum1=sum2=0,i=1; i<n; i++) {
283 for (j=0; j<i; j++) {
284 sum1+=1.0/(Dij[i][j])*fabs(x[i]-x[j]);
285 sum2+=1.0/(Dij[i][j]*Dij[i][j])*fabs(x[i]-x[j])*fabs(x[i]-x[j]);
286 }
287 }
288 uniLength = isinf(sum2) ? 0 : sum1 / sum2;
289 for (i=0; i<n; i++)
290 x[i]*=uniLength;
291 }
292
293 /* smart ini: */
294 CMDS_orthog(n, 1, &y, conj_tol, x, Dij);
295
296 /* Compute Laplacian: */
297 float *f_storage = gv_calloc(n * n, sizeof(float));
298
299 for (i=0; i<n; i++) {
300 lap[i]=f_storage+i*n;
301 degree=0;
302 for (j=0; j<n; j++) {
303 if (j==i)
304 continue;
305 degree-=lap[i][j]=-1.0f/((float)Dij[i][j]*(float)Dij[i][j]); // w_{ij}
306
307 }
308 lap[i][i]=degree;
309 }
310
311
312 /* compute residual distances */
313 /* if (x!=NULL) */
314 {
315 double diff;
316 for (i=1; i<n; i++) {
317 pos_i=x[i];
318 for (j=0; j<i; j++) {
319 diff=(double)Dij[i][j]*(double)Dij[i][j]-(pos_i-x[j])*(pos_i-x[j]);
320 Dij[i][j]=Dij[j][i]=diff>0 ? (DistType)sqrt(diff) : 0;
321 }
322 }
323 }
324
325 /* Compute the balance vector: */
326 for (i=0; i<n; i++) {
327 pos_i=y[i];
328 balance[i]=0;
329 for (j=0; j<n; j++) {
330 if (j==i)
331 continue;
332 if (pos_i>=y[j]) {
333 balance[i]+=Dij[i][j]*(-lap[i][j]); // w_{ij}*delta_{ij}
334 }
335 else {
336 balance[i]-=Dij[i][j]*(-lap[i][j]); // w_{ij}*delta_{ij}
337 }
338 }
339 }
340
341 for (converged=false,iterations2=0; iterations2<200 && !converged; iterations2++) {
342 if (conjugate_gradient_f(lap, y, balance, n, conj_tol, n, true) < 0) {
343 rv = 1;
344 goto cleanup;
345 }
346 converged = true;
347 for (i=0; i<n; i++) {
348 pos_i=y[i];
349 b=0;
350 for (j=0; j<n; j++) {
351 if (j==i)
352 continue;
353 if (pos_i>=y[j]) {
354 b+=Dij[i][j]*(-lap[i][j]);
355
356 }
357 else {
358 b-=Dij[i][j]*(-lap[i][j]);
359
360 }
361 }
362 if ((b != balance[i]) && (fabs(1-b/balance[i])>1e-5)) {
363 converged = false;
364 balance[i]=b;
365 }
366 }
367 }
368
369 for (i = 0; !(fabs(uniLength) < DBL_EPSILON) && i < n; i++) {
370 x[i] /= uniLength;
371 y[i] /= uniLength;
372 }
373
374cleanup:
375
376 free (Dij[0]); free (Dij);
377 free (lap[0]); free (lap);
378 free (balance);
379 return rv;
380}
381
382#endif /* DIGCOLA */
383
Memory allocation wrappers that exit on failure.
static void * gv_calloc(size_t nmemb, size_t size)
Definition alloc.h:26
int conjugate_gradient_f(float **A, double *x, double *b, int n, double tol, int max_iterations, bool ortho1)
Definition conjgrad.c:93
static double norm(int n, const double *x)
static double len(glCompPoint p)
Definition glutils.c:138
static void cleanup(void)
Definition gmlparse.c:130
void free(void *)
node NULL
Definition grammar.y:181
Agraph_t * graph(char *name)
Definition gv.cpp:34
DistType ** compute_apsp(vtx_data *graph, int n)
Definition kkutils.c:85
double vectors_inner_product(int n, const double *vector1, const double *vector2)
Definition matrix_ops.c:324
void copy_vector(int n, const double *restrict source, double *restrict dest)
Definition matrix_ops.c:318
void scadd(double *vec1, int end, double fac, double *vec2)
Definition matrix_ops.c:216
static const double p_iteration_threshold
Definition matrix_ops.c:20
void vectors_scalar_mult(int n, const double *vector, double alpha, double *result)
Definition matrix_ops.c:310
static const int dim
#define alpha
Definition shapes.c:4058
int DistType
Definition sparsegraph.h:39
static const double tol