zvecop.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568
  1. /**************************************************************************
  2. **
  3. ** Copyright (C) 1993 David E. Steward & Zbigniew Leyk, all rights reserved.
  4. **
  5. ** Meschach Library
  6. **
  7. ** This Meschach Library is provided "as is" without any express
  8. ** or implied warranty of any kind with respect to this software.
  9. ** In particular the authors shall not be liable for any direct,
  10. ** indirect, special, incidental or consequential damages arising
  11. ** in any way from use of the software.
  12. **
  13. ** Everyone is granted permission to copy, modify and redistribute this
  14. ** Meschach Library, provided:
  15. ** 1. All copies contain this copyright notice.
  16. ** 2. All modified copies shall carry a notice stating who
  17. ** made the last modification and the date of such modification.
  18. ** 3. No charge is made for this software or works derived from it.
  19. ** This clause shall not be construed as constraining other software
  20. ** distributed on the same medium as this software, nor is a
  21. ** distribution fee considered a charge.
  22. **
  23. ***************************************************************************/
  24. #include <stdio.h>
  25. #include "matrix.h"
  26. #include "zmatrix.h"
  27. static char rcsid[] = "$Id: zvecop.c,v 1.3 1997/10/07 16:13:54 stewart Exp stewart $";
  28. /* _zin_prod -- inner product of two vectors from i0 downwards
  29. -- flag != 0 means compute sum_i a[i]*.b[i];
  30. -- flag == 0 means compute sum_i a[i].b[i] */
  31. #ifndef ANSI_C
  32. complex _zin_prod(a,b,i0,flag)
  33. ZVEC *a,*b;
  34. unsigned int i0, flag;
  35. #else
  36. complex _zin_prod(const ZVEC *a, const ZVEC *b,
  37. unsigned int i0, unsigned int flag)
  38. #endif
  39. {
  40. unsigned int limit;
  41. if ( a==ZVNULL || b==ZVNULL )
  42. error(E_NULL,"_zin_prod");
  43. limit = min(a->dim,b->dim);
  44. if ( i0 > limit )
  45. error(E_BOUNDS,"_zin_prod");
  46. return __zip__(&(a->ve[i0]),&(b->ve[i0]),(int)(limit-i0),flag);
  47. }
  48. /* zv_mlt -- scalar-vector multiply -- may be in-situ */
  49. #ifndef ANSI_C
  50. ZVEC *zv_mlt(scalar,vector,out)
  51. complex scalar;
  52. ZVEC *vector,*out;
  53. #else
  54. ZVEC *zv_mlt(complex scalar, const ZVEC *vector, ZVEC *out)
  55. #endif
  56. {
  57. /* unsigned int dim, i; */
  58. /* complex *out_ve, *vec_ve; */
  59. if ( vector==ZVNULL )
  60. error(E_NULL,"zv_mlt");
  61. if ( out==ZVNULL || out->dim != vector->dim )
  62. out = zv_resize(out,vector->dim);
  63. if ( scalar.re == 0.0 && scalar.im == 0.0 )
  64. return zv_zero(out);
  65. if ( scalar.re == 1.0 && scalar.im == 0.0 )
  66. return zv_copy(vector,out);
  67. __zmlt__(vector->ve,scalar,out->ve,(int)(vector->dim));
  68. return (out);
  69. }
  70. /* zv_add -- vector addition -- may be in-situ */
  71. #ifndef ANSI_C
  72. ZVEC *zv_add(vec1,vec2,out)
  73. ZVEC *vec1,*vec2,*out;
  74. #else
  75. ZVEC *zv_add(const ZVEC *vec1, const ZVEC *vec2, ZVEC *out)
  76. #endif
  77. {
  78. unsigned int dim;
  79. if ( vec1==ZVNULL || vec2==ZVNULL )
  80. error(E_NULL,"zv_add");
  81. if ( vec1->dim != vec2->dim )
  82. error(E_SIZES,"zv_add");
  83. if ( out==ZVNULL || out->dim != vec1->dim )
  84. out = zv_resize(out,vec1->dim);
  85. dim = vec1->dim;
  86. __zadd__(vec1->ve,vec2->ve,out->ve,(int)dim);
  87. return (out);
  88. }
  89. /* zv_mltadd -- scalar/vector multiplication and addition
  90. -- out = v1 + scale.v2 */
  91. #ifndef ANSI_C
  92. ZVEC *zv_mltadd(v1,v2,scale,out)
  93. ZVEC *v1,*v2,*out;
  94. complex scale;
  95. #else
  96. ZVEC *zv_mltadd(const ZVEC *v1, const ZVEC *v2, complex scale, ZVEC *out)
  97. #endif
  98. {
  99. /* register unsigned int dim, i; */
  100. /* complex *out_ve, *v1_ve, *v2_ve; */
  101. if ( v1==ZVNULL || v2==ZVNULL )
  102. error(E_NULL,"zv_mltadd");
  103. if ( v1->dim != v2->dim )
  104. error(E_SIZES,"zv_mltadd");
  105. if ( scale.re == 0.0 && scale.im == 0.0 )
  106. return zv_copy(v1,out);
  107. if ( scale.re == 1.0 && scale.im == 0.0 )
  108. return zv_add(v1,v2,out);
  109. if ( v2 != out )
  110. {
  111. tracecatch(out = zv_copy(v1,out),"zv_mltadd");
  112. /* dim = v1->dim; */
  113. __zmltadd__(out->ve,v2->ve,scale,(int)(v1->dim),0);
  114. }
  115. else
  116. {
  117. tracecatch(out = zv_mlt(scale,v2,out),"zv_mltadd");
  118. out = zv_add(v1,out,out);
  119. }
  120. return (out);
  121. }
  122. /* zv_sub -- vector subtraction -- may be in-situ */
  123. #ifndef ANSI_C
  124. ZVEC *zv_sub(vec1,vec2,out)
  125. ZVEC *vec1,*vec2,*out;
  126. #else
  127. ZVEC *zv_sub(const ZVEC *vec1, const ZVEC *vec2, ZVEC *out)
  128. #endif
  129. {
  130. /* unsigned int i, dim; */
  131. /* complex *out_ve, *vec1_ve, *vec2_ve; */
  132. if ( vec1==ZVNULL || vec2==ZVNULL )
  133. error(E_NULL,"zv_sub");
  134. if ( vec1->dim != vec2->dim )
  135. error(E_SIZES,"zv_sub");
  136. if ( out==ZVNULL || out->dim != vec1->dim )
  137. out = zv_resize(out,vec1->dim);
  138. __zsub__(vec1->ve,vec2->ve,out->ve,(int)(vec1->dim));
  139. return (out);
  140. }
  141. /* zv_map -- maps function f over components of x: out[i] = f(x[i])
  142. -- _zv_map sets out[i] = f(x[i],params) */
  143. #ifndef ANSI_C
  144. ZVEC *zv_map(f,x,out)
  145. #ifdef PROTOYPES_IN_STRUCT
  146. complex (*f)(complex);
  147. #else
  148. complex (*f)();
  149. #endif
  150. ZVEC *x, *out;
  151. #else
  152. ZVEC *zv_map(complex (*f)(complex), const ZVEC *x, ZVEC *out)
  153. #endif
  154. {
  155. complex *x_ve, *out_ve;
  156. int i, dim;
  157. if ( ! x || ! f )
  158. error(E_NULL,"zv_map");
  159. if ( ! out || out->dim != x->dim )
  160. out = zv_resize(out,x->dim);
  161. dim = x->dim; x_ve = x->ve; out_ve = out->ve;
  162. for ( i = 0; i < dim; i++ )
  163. out_ve[i] = (*f)(x_ve[i]);
  164. return out;
  165. }
  166. #ifndef ANSI_C
  167. ZVEC *_zv_map(f,params,x,out)
  168. #ifdef PROTOTYPES_IN_STRUCT
  169. complex (*f)(void *,complex);
  170. #else
  171. complex (*f)();
  172. #endif
  173. ZVEC *x, *out;
  174. void *params;
  175. #else
  176. ZVEC *_zv_map(complex (*f)(void *,complex), void *params,
  177. const ZVEC *x, ZVEC *out)
  178. #endif
  179. {
  180. complex *x_ve, *out_ve;
  181. int i, dim;
  182. if ( ! x || ! f )
  183. error(E_NULL,"_zv_map");
  184. if ( ! out || out->dim != x->dim )
  185. out = zv_resize(out,x->dim);
  186. dim = x->dim; x_ve = x->ve; out_ve = out->ve;
  187. for ( i = 0; i < dim; i++ )
  188. out_ve[i] = (*f)(params,x_ve[i]);
  189. return out;
  190. }
  191. /* zv_lincomb -- returns sum_i a[i].v[i], a[i] real, v[i] vectors */
  192. #ifndef ANSI_C
  193. ZVEC *zv_lincomb(n,v,a,out)
  194. int n; /* number of a's and v's */
  195. complex a[];
  196. ZVEC *v[], *out;
  197. #else
  198. ZVEC *zv_lincomb(int n, const ZVEC *v[], const complex a[], ZVEC *out)
  199. #endif
  200. {
  201. int i;
  202. if ( ! a || ! v )
  203. error(E_NULL,"zv_lincomb");
  204. if ( n <= 0 )
  205. return ZVNULL;
  206. for ( i = 1; i < n; i++ )
  207. if ( out == v[i] )
  208. error(E_INSITU,"zv_lincomb");
  209. out = zv_mlt(a[0],v[0],out);
  210. for ( i = 1; i < n; i++ )
  211. {
  212. if ( ! v[i] )
  213. error(E_NULL,"zv_lincomb");
  214. if ( v[i]->dim != out->dim )
  215. error(E_SIZES,"zv_lincomb");
  216. out = zv_mltadd(out,v[i],a[i],out);
  217. }
  218. return out;
  219. }
  220. #ifdef ANSI_C
  221. /* zv_linlist -- linear combinations taken from a list of arguments;
  222. calling:
  223. zv_linlist(out,v1,a1,v2,a2,...,vn,an,NULL);
  224. where vi are vectors (ZVEC *) and ai are numbers (complex)
  225. */
  226. ZVEC *zv_linlist(ZVEC *out,ZVEC *v1,complex a1,...)
  227. {
  228. va_list ap;
  229. ZVEC *par;
  230. complex a_par;
  231. if ( ! v1 )
  232. return ZVNULL;
  233. va_start(ap, a1);
  234. out = zv_mlt(a1,v1,out);
  235. while (par = va_arg(ap,ZVEC *)) { /* NULL ends the list*/
  236. a_par = va_arg(ap,complex);
  237. if (a_par.re == 0.0 && a_par.im == 0.0) continue;
  238. if ( out == par )
  239. error(E_INSITU,"zv_linlist");
  240. if ( out->dim != par->dim )
  241. error(E_SIZES,"zv_linlist");
  242. if (a_par.re == 1.0 && a_par.im == 0.0)
  243. out = zv_add(out,par,out);
  244. else if (a_par.re == -1.0 && a_par.im == 0.0)
  245. out = zv_sub(out,par,out);
  246. else
  247. out = zv_mltadd(out,par,a_par,out);
  248. }
  249. va_end(ap);
  250. return out;
  251. }
  252. #elif VARARGS
  253. /* zv_linlist -- linear combinations taken from a list of arguments;
  254. calling:
  255. zv_linlist(out,v1,a1,v2,a2,...,vn,an,NULL);
  256. where vi are vectors (ZVEC *) and ai are numbers (complex)
  257. */
  258. ZVEC *zv_linlist(va_alist) va_dcl
  259. {
  260. va_list ap;
  261. ZVEC *par, *out;
  262. complex a_par;
  263. va_start(ap);
  264. out = va_arg(ap,ZVEC *);
  265. par = va_arg(ap,ZVEC *);
  266. if ( ! par ) {
  267. va_end(ap);
  268. return ZVNULL;
  269. }
  270. a_par = va_arg(ap,complex);
  271. out = zv_mlt(a_par,par,out);
  272. while (par = va_arg(ap,ZVEC *)) { /* NULL ends the list*/
  273. a_par = va_arg(ap,complex);
  274. if (a_par.re == 0.0 && a_par.im == 0.0) continue;
  275. if ( out == par )
  276. error(E_INSITU,"zv_linlist");
  277. if ( out->dim != par->dim )
  278. error(E_SIZES,"zv_linlist");
  279. if (a_par.re == 1.0 && a_par.im == 0.0)
  280. out = zv_add(out,par,out);
  281. else if (a_par.re == -1.0 && a_par.im == 0.0)
  282. out = zv_sub(out,par,out);
  283. else
  284. out = zv_mltadd(out,par,a_par,out);
  285. }
  286. va_end(ap);
  287. return out;
  288. }
  289. #endif
  290. /* zv_star -- computes componentwise (Hadamard) product of x1 and x2
  291. -- result out is returned */
  292. #ifndef ANSI_C
  293. ZVEC *zv_star(x1, x2, out)
  294. ZVEC *x1, *x2, *out;
  295. #else
  296. ZVEC *zv_star(const ZVEC *x1, const ZVEC *x2, ZVEC *out)
  297. #endif
  298. {
  299. int i;
  300. Real t_re, t_im;
  301. if ( ! x1 || ! x2 )
  302. error(E_NULL,"zv_star");
  303. if ( x1->dim != x2->dim )
  304. error(E_SIZES,"zv_star");
  305. out = zv_resize(out,x1->dim);
  306. for ( i = 0; i < x1->dim; i++ )
  307. {
  308. /* out->ve[i] = x1->ve[i] * x2->ve[i]; */
  309. t_re = x1->ve[i].re*x2->ve[i].re - x1->ve[i].im*x2->ve[i].im;
  310. t_im = x1->ve[i].re*x2->ve[i].im + x1->ve[i].im*x2->ve[i].re;
  311. out->ve[i].re = t_re;
  312. out->ve[i].im = t_im;
  313. }
  314. return out;
  315. }
  316. /* zv_slash -- computes componentwise ratio of x2 and x1
  317. -- out[i] = x2[i] / x1[i]
  318. -- if x1[i] == 0 for some i, then raise E_SING error
  319. -- result out is returned */
  320. #ifndef ANSI_C
  321. ZVEC *zv_slash(x1, x2, out)
  322. ZVEC *x1, *x2, *out;
  323. #else
  324. ZVEC *zv_slash(const ZVEC *x1, const ZVEC *x2, ZVEC *out)
  325. #endif
  326. {
  327. int i;
  328. Real r2, t_re, t_im;
  329. complex tmp;
  330. if ( ! x1 || ! x2 )
  331. error(E_NULL,"zv_slash");
  332. if ( x1->dim != x2->dim )
  333. error(E_SIZES,"zv_slash");
  334. out = zv_resize(out,x1->dim);
  335. for ( i = 0; i < x1->dim; i++ )
  336. {
  337. r2 = x1->ve[i].re*x1->ve[i].re + x1->ve[i].im*x1->ve[i].im;
  338. if ( r2 == 0.0 )
  339. error(E_SING,"zv_slash");
  340. tmp.re = x1->ve[i].re / r2;
  341. tmp.im = - x1->ve[i].im / r2;
  342. t_re = tmp.re*x2->ve[i].re - tmp.im*x2->ve[i].im;
  343. t_im = tmp.re*x2->ve[i].im + tmp.im*x2->ve[i].re;
  344. out->ve[i].re = t_re;
  345. out->ve[i].im = t_im;
  346. }
  347. return out;
  348. }
  349. /* zv_sum -- returns sum of entries of a vector */
  350. #ifndef ANSI_C
  351. complex zv_sum(x)
  352. ZVEC *x;
  353. #else
  354. complex zv_sum(const ZVEC *x)
  355. #endif
  356. {
  357. int i;
  358. complex sum;
  359. if ( ! x )
  360. error(E_NULL,"zv_sum");
  361. sum.re = sum.im = 0.0;
  362. for ( i = 0; i < x->dim; i++ )
  363. {
  364. sum.re += x->ve[i].re;
  365. sum.im += x->ve[i].im;
  366. }
  367. return sum;
  368. }
  369. /* px_zvec -- permute vector */
  370. #ifndef ANSI_C
  371. ZVEC *px_zvec(px,vector,out)
  372. PERM *px;
  373. ZVEC *vector,*out;
  374. #else
  375. ZVEC *px_zvec(PERM *px, ZVEC *vector, ZVEC *out)
  376. #endif
  377. {
  378. unsigned int old_i, i, size, start;
  379. complex tmp;
  380. if ( px==PNULL || vector==ZVNULL )
  381. error(E_NULL,"px_zvec");
  382. if ( px->size > vector->dim )
  383. error(E_SIZES,"px_zvec");
  384. if ( out==ZVNULL || out->dim < vector->dim )
  385. out = zv_resize(out,vector->dim);
  386. size = px->size;
  387. if ( size == 0 )
  388. return zv_copy(vector,out);
  389. if ( out != vector )
  390. {
  391. for ( i=0; i<size; i++ )
  392. if ( px->pe[i] >= size )
  393. error(E_BOUNDS,"px_vec");
  394. else
  395. out->ve[i] = vector->ve[px->pe[i]];
  396. }
  397. else
  398. { /* in situ algorithm */
  399. start = 0;
  400. while ( start < size )
  401. {
  402. old_i = start;
  403. i = px->pe[old_i];
  404. if ( i >= size )
  405. {
  406. start++;
  407. continue;
  408. }
  409. tmp = vector->ve[start];
  410. while ( TRUE )
  411. {
  412. vector->ve[old_i] = vector->ve[i];
  413. px->pe[old_i] = i+size;
  414. old_i = i;
  415. i = px->pe[old_i];
  416. if ( i >= size )
  417. break;
  418. if ( i == start )
  419. {
  420. vector->ve[old_i] = tmp;
  421. px->pe[old_i] = i+size;
  422. break;
  423. }
  424. }
  425. start++;
  426. }
  427. for ( i = 0; i < size; i++ )
  428. if ( px->pe[i] < size )
  429. error(E_BOUNDS,"px_vec");
  430. else
  431. px->pe[i] = px->pe[i]-size;
  432. }
  433. return out;
  434. }
  435. /* pxinv_zvec -- apply the inverse of px to x, returning the result in out
  436. -- may NOT be in situ */
  437. #ifndef ANSI_C
  438. ZVEC *pxinv_zvec(px,x,out)
  439. PERM *px;
  440. ZVEC *x, *out;
  441. #else
  442. ZVEC *pxinv_zvec(PERM *px, ZVEC *x, ZVEC *out)
  443. #endif
  444. {
  445. unsigned int i, size;
  446. if ( ! px || ! x )
  447. error(E_NULL,"pxinv_zvec");
  448. if ( px->size > x->dim )
  449. error(E_SIZES,"pxinv_zvec");
  450. if ( ! out || out->dim < x->dim )
  451. out = zv_resize(out,x->dim);
  452. size = px->size;
  453. if ( size == 0 )
  454. return zv_copy(x,out);
  455. if ( out != x )
  456. {
  457. for ( i=0; i<size; i++ )
  458. if ( px->pe[i] >= size )
  459. error(E_BOUNDS,"pxinv_vec");
  460. else
  461. out->ve[px->pe[i]] = x->ve[i];
  462. }
  463. else
  464. { /* in situ algorithm --- cheat's way out */
  465. px_inv(px,px);
  466. px_zvec(px,x,out);
  467. px_inv(px,px);
  468. }
  469. return out;
  470. }
  471. /* zv_rand -- randomise a complex vector; uniform in [0,1)+[0,1)*i */
  472. #ifndef ANSI_C
  473. ZVEC *zv_rand(x)
  474. ZVEC *x;
  475. #else
  476. ZVEC *zv_rand(ZVEC *x)
  477. #endif
  478. {
  479. if ( ! x )
  480. error(E_NULL,"zv_rand");
  481. mrandlist((Real *)(x->ve),2*x->dim);
  482. return x;
  483. }