itersym.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648
  1. /**************************************************************************
  2. **
  3. ** Copyright (C) 1993 David E. Stewart & Zbigniew Leyk, all rights reserved.
  4. **
  5. ** Meschach Library
  6. **
  7. ** This Meschach Library is provided "as is" without any express
  8. ** or implied warranty of any kind with respect to this software.
  9. ** In particular the authors shall not be liable for any direct,
  10. ** indirect, special, incidental or consequential damages arising
  11. ** in any way from use of the software.
  12. **
  13. ** Everyone is granted permission to copy, modify and redistribute this
  14. ** Meschach Library, provided:
  15. ** 1. All copies contain this copyright notice.
  16. ** 2. All modified copies shall carry a notice stating who
  17. ** made the last modification and the date of such modification.
  18. ** 3. No charge is made for this software or works derived from it.
  19. ** This clause shall not be construed as constraining other software
  20. ** distributed on the same medium as this software, nor is a
  21. ** distribution fee considered a charge.
  22. **
  23. ***************************************************************************/
  24. /* itersym.c 17/09/93 */
  25. /*
  26. ITERATIVE METHODS - implementation of several iterative methods;
  27. see also iter0.c
  28. */
  29. #include <stdio.h>
  30. #include <math.h>
  31. #include "matrix.h"
  32. #include "matrix2.h"
  33. #include "sparse.h"
  34. #include "iter.h"
  35. static char rcsid[] = "$Id: itersym.c,v 1.2 1995/01/30 14:55:54 des Exp $";
  36. #ifdef ANSI_C
  37. VEC *spCHsolve(const SPMAT *,VEC *,VEC *);
  38. VEC *trieig(VEC *,VEC *,MAT *);
  39. #else
  40. VEC *spCHsolve();
  41. VEC *trieig();
  42. #endif
  43. /* iter_spcg -- a simple interface to iter_cg() which uses sparse matrix
  44. data structures
  45. -- assumes that LLT contains the Cholesky factorisation of the
  46. actual preconditioner;
  47. use always as follows:
  48. x = iter_spcg(A,LLT,b,eps,x,limit,steps);
  49. or
  50. x = iter_spcg(A,LLT,b,eps,VNULL,limit,steps);
  51. In the second case the solution vector is created.
  52. */
  53. #ifndef ANSI_C
  54. VEC *iter_spcg(A,LLT,b,eps,x,limit,steps)
  55. SPMAT *A, *LLT;
  56. VEC *b, *x;
  57. double eps;
  58. int *steps, limit;
  59. #else
  60. VEC *iter_spcg(SPMAT *A, SPMAT *LLT, VEC *b, double eps, VEC *x,
  61. int limit, int *steps)
  62. #endif
  63. {
  64. ITER *ip;
  65. ip = iter_get(0,0);
  66. ip->Ax = (Fun_Ax) sp_mv_mlt;
  67. ip->A_par = (void *)A;
  68. ip->Bx = (Fun_Ax) spCHsolve;
  69. ip->B_par = (void *)LLT;
  70. ip->info = (Fun_info) NULL;
  71. ip->b = b;
  72. ip->eps = eps;
  73. ip->limit = limit;
  74. ip->x = x;
  75. iter_cg(ip);
  76. x = ip->x;
  77. if (steps) *steps = ip->steps;
  78. ip->shared_x = ip->shared_b = TRUE;
  79. iter_free(ip); /* release only ITER structure */
  80. return x;
  81. }
  82. /*
  83. Conjugate gradients method;
  84. */
  85. #ifndef ANSI_C
  86. VEC *iter_cg(ip)
  87. ITER *ip;
  88. #else
  89. VEC *iter_cg(ITER *ip)
  90. #endif
  91. {
  92. STATIC VEC *r = VNULL, *p = VNULL, *q = VNULL, *z = VNULL;
  93. Real alpha, beta, inner, old_inner, nres;
  94. VEC *rr; /* rr == r or rr == z */
  95. if (ip == INULL)
  96. error(E_NULL,"iter_cg");
  97. if (!ip->Ax || !ip->b)
  98. error(E_NULL,"iter_cg");
  99. if ( ip->x == ip->b )
  100. error(E_INSITU,"iter_cg");
  101. if (!ip->stop_crit)
  102. error(E_NULL,"iter_cg");
  103. if ( ip->eps <= 0.0 )
  104. ip->eps = MACHEPS;
  105. r = v_resize(r,ip->b->dim);
  106. p = v_resize(p,ip->b->dim);
  107. q = v_resize(q,ip->b->dim);
  108. MEM_STAT_REG(r,TYPE_VEC);
  109. MEM_STAT_REG(p,TYPE_VEC);
  110. MEM_STAT_REG(q,TYPE_VEC);
  111. if (ip->Bx != (Fun_Ax)NULL) {
  112. z = v_resize(z,ip->b->dim);
  113. MEM_STAT_REG(z,TYPE_VEC);
  114. rr = z;
  115. }
  116. else rr = r;
  117. if (ip->x != VNULL) {
  118. if (ip->x->dim != ip->b->dim)
  119. error(E_SIZES,"iter_cg");
  120. ip->Ax(ip->A_par,ip->x,p); /* p = A*x */
  121. v_sub(ip->b,p,r); /* r = b - A*x */
  122. }
  123. else { /* ip->x == 0 */
  124. ip->x = v_get(ip->b->dim);
  125. ip->shared_x = FALSE;
  126. v_copy(ip->b,r);
  127. }
  128. old_inner = 0.0;
  129. for ( ip->steps = 0; ip->steps <= ip->limit; ip->steps++ )
  130. {
  131. if ( ip->Bx )
  132. (ip->Bx)(ip->B_par,r,rr); /* rr = B*r */
  133. inner = in_prod(rr,r);
  134. nres = sqrt(fabs(inner));
  135. if (ip->info) ip->info(ip,nres,r,rr);
  136. if (ip->steps == 0) ip->init_res = nres;
  137. if ( ip->stop_crit(ip,nres,r,rr) ) break;
  138. if ( ip->steps ) /* if ( ip->steps > 0 ) ... */
  139. {
  140. beta = inner/old_inner;
  141. p = v_mltadd(rr,p,beta,p);
  142. }
  143. else /* if ( ip->steps == 0 ) ... */
  144. {
  145. beta = 0.0;
  146. p = v_copy(rr,p);
  147. old_inner = 0.0;
  148. }
  149. (ip->Ax)(ip->A_par,p,q); /* q = A*p */
  150. alpha = in_prod(p,q);
  151. if (sqrt(fabs(alpha)) <= MACHEPS*ip->init_res)
  152. error(E_BREAKDOWN,"iter_cg");
  153. alpha = inner/alpha;
  154. v_mltadd(ip->x,p,alpha,ip->x);
  155. v_mltadd(r,q,-alpha,r);
  156. old_inner = inner;
  157. }
  158. #ifdef THREADSAFE
  159. V_FREE(r); V_FREE(p); V_FREE(q); V_FREE(z);
  160. #endif
  161. return ip->x;
  162. }
  163. /* iter_lanczos -- raw lanczos algorithm -- no re-orthogonalisation
  164. -- creates T matrix of size == m,
  165. but no larger than before beta_k == 0
  166. -- uses passed routine to do matrix-vector multiplies */
  167. #ifndef ANSI_C
  168. void iter_lanczos(ip,a,b,beta2,Q)
  169. ITER *ip;
  170. VEC *a, *b;
  171. Real *beta2;
  172. MAT *Q;
  173. #else
  174. void iter_lanczos(ITER *ip, VEC *a, VEC *b, Real *beta2, MAT *Q)
  175. #endif
  176. {
  177. int j;
  178. STATIC VEC *v = VNULL, *w = VNULL, *tmp = VNULL;
  179. Real alpha, beta, c;
  180. if ( ! ip )
  181. error(E_NULL,"iter_lanczos");
  182. if ( ! ip->Ax || ! ip->x || ! a || ! b )
  183. error(E_NULL,"iter_lanczos");
  184. if ( ip->k <= 0 )
  185. error(E_BOUNDS,"iter_lanczos");
  186. if ( Q && ( Q->n < ip->x->dim || Q->m < ip->k ) )
  187. error(E_SIZES,"iter_lanczos");
  188. a = v_resize(a,(unsigned int)ip->k);
  189. b = v_resize(b,(unsigned int)(ip->k-1));
  190. v = v_resize(v,ip->x->dim);
  191. w = v_resize(w,ip->x->dim);
  192. tmp = v_resize(tmp,ip->x->dim);
  193. MEM_STAT_REG(v,TYPE_VEC);
  194. MEM_STAT_REG(w,TYPE_VEC);
  195. MEM_STAT_REG(tmp,TYPE_VEC);
  196. beta = 1.0;
  197. v_zero(a);
  198. v_zero(b);
  199. if (Q) m_zero(Q);
  200. /* normalise x as w */
  201. c = v_norm2(ip->x);
  202. if (c <= MACHEPS) { /* ip->x == 0 */
  203. *beta2 = 0.0;
  204. return;
  205. }
  206. else
  207. sv_mlt(1.0/c,ip->x,w);
  208. (ip->Ax)(ip->A_par,w,v);
  209. for ( j = 0; j < ip->k; j++ )
  210. {
  211. /* store w in Q if Q not NULL */
  212. if ( Q ) set_row(Q,j,w);
  213. alpha = in_prod(w,v);
  214. a->ve[j] = alpha;
  215. v_mltadd(v,w,-alpha,v);
  216. beta = v_norm2(v);
  217. if ( beta == 0.0 )
  218. {
  219. *beta2 = 0.0;
  220. return;
  221. }
  222. if ( j < ip->k-1 )
  223. b->ve[j] = beta;
  224. v_copy(w,tmp);
  225. sv_mlt(1/beta,v,w);
  226. sv_mlt(-beta,tmp,v);
  227. (ip->Ax)(ip->A_par,w,tmp);
  228. v_add(v,tmp,v);
  229. }
  230. *beta2 = beta;
  231. #ifdef THREADSAFE
  232. V_FREE(v); V_FREE(w); V_FREE(tmp);
  233. #endif
  234. }
  235. /* iter_splanczos -- version that uses sparse matrix data structure */
  236. #ifndef ANSI_C
  237. void iter_splanczos(A,m,x0,a,b,beta2,Q)
  238. SPMAT *A;
  239. int m;
  240. VEC *x0, *a, *b;
  241. Real *beta2;
  242. MAT *Q;
  243. #else
  244. void iter_splanczos(SPMAT *A, int m, VEC *x0,
  245. VEC *a, VEC *b, Real *beta2, MAT *Q)
  246. #endif
  247. {
  248. ITER *ip;
  249. ip = iter_get(0,0);
  250. ip->shared_x = ip->shared_b = TRUE;
  251. ip->Ax = (Fun_Ax) sp_mv_mlt;
  252. ip->A_par = (void *) A;
  253. ip->x = x0;
  254. ip->k = m;
  255. iter_lanczos(ip,a,b,beta2,Q);
  256. iter_free(ip); /* release only ITER structure */
  257. }
  258. #ifndef ANSI_C
  259. extern double frexp(), ldexp();
  260. #else
  261. extern double frexp(double num, int *exponent),
  262. ldexp(double num, int exponent);
  263. #endif
  264. /* product -- returns the product of a long list of numbers
  265. -- answer stored in mant (mantissa) and expt (exponent) */
  266. #ifndef ANSI_C
  267. static double product(a,offset,expt)
  268. VEC *a;
  269. double offset;
  270. int *expt;
  271. #else
  272. static double product(VEC *a, double offset, int *expt)
  273. #endif
  274. {
  275. Real mant, tmp_fctr;
  276. int i, tmp_expt;
  277. if ( ! a )
  278. error(E_NULL,"product");
  279. mant = 1.0;
  280. *expt = 0;
  281. if ( offset == 0.0 )
  282. for ( i = 0; i < a->dim; i++ )
  283. {
  284. mant *= frexp(a->ve[i],&tmp_expt);
  285. *expt += tmp_expt;
  286. if ( ! (i % 10) )
  287. {
  288. mant = frexp(mant,&tmp_expt);
  289. *expt += tmp_expt;
  290. }
  291. }
  292. else
  293. for ( i = 0; i < a->dim; i++ )
  294. {
  295. tmp_fctr = a->ve[i] - offset;
  296. tmp_fctr += (tmp_fctr > 0.0 ) ? -MACHEPS*offset :
  297. MACHEPS*offset;
  298. mant *= frexp(tmp_fctr,&tmp_expt);
  299. *expt += tmp_expt;
  300. if ( ! (i % 10) )
  301. {
  302. mant = frexp(mant,&tmp_expt);
  303. *expt += tmp_expt;
  304. }
  305. }
  306. mant = frexp(mant,&tmp_expt);
  307. *expt += tmp_expt;
  308. return mant;
  309. }
  310. /* product2 -- returns the product of a long list of numbers (except the k'th)
  311. -- answer stored in mant (mantissa) and expt (exponent) */
  312. #ifndef ANSI_C
  313. static double product2(a,k,expt)
  314. VEC *a;
  315. int k; /* entry of a to leave out */
  316. int *expt;
  317. #else
  318. static double product2(VEC *a, int k, int *expt)
  319. #endif
  320. {
  321. Real mant, mu, tmp_fctr;
  322. int i, tmp_expt;
  323. if ( ! a )
  324. error(E_NULL,"product2");
  325. if ( k < 0 || k >= a->dim )
  326. error(E_BOUNDS,"product2");
  327. mant = 1.0;
  328. *expt = 0;
  329. mu = a->ve[k];
  330. for ( i = 0; i < a->dim; i++ )
  331. {
  332. if ( i == k )
  333. continue;
  334. tmp_fctr = a->ve[i] - mu;
  335. tmp_fctr += ( tmp_fctr > 0.0 ) ? -MACHEPS*mu : MACHEPS*mu;
  336. mant *= frexp(tmp_fctr,&tmp_expt);
  337. *expt += tmp_expt;
  338. if ( ! (i % 10) )
  339. {
  340. mant = frexp(mant,&tmp_expt);
  341. *expt += tmp_expt;
  342. }
  343. }
  344. mant = frexp(mant,&tmp_expt);
  345. *expt += tmp_expt;
  346. return mant;
  347. }
  348. /* dbl_cmp -- comparison function to pass to qsort() */
  349. #ifndef ANSI_C
  350. static int dbl_cmp(x,y)
  351. Real *x, *y;
  352. #else
  353. static int dbl_cmp(Real *x, Real *y)
  354. #endif
  355. {
  356. Real tmp;
  357. tmp = *x - *y;
  358. return (tmp > 0 ? 1 : tmp < 0 ? -1: 0);
  359. }
  360. /* iter_lanczos2 -- lanczos + error estimate for every e-val
  361. -- uses Cullum & Willoughby approach, Sparse Matrix Proc. 1978
  362. -- returns multiple e-vals where multiple e-vals may not exist
  363. -- returns evals vector */
  364. #ifndef ANSI_C
  365. VEC *iter_lanczos2(ip,evals,err_est)
  366. ITER *ip; /* ITER structure */
  367. VEC *evals; /* eigenvalue vector */
  368. VEC *err_est; /* error estimates of eigenvalues */
  369. #else
  370. VEC *iter_lanczos2(ITER *ip, VEC *evals, VEC *err_est)
  371. #endif
  372. {
  373. VEC *a;
  374. STATIC VEC *b=VNULL, *a2=VNULL, *b2=VNULL;
  375. Real beta, pb_mant, det_mant, det_mant1, det_mant2;
  376. int i, pb_expt, det_expt, det_expt1, det_expt2;
  377. if ( ! ip )
  378. error(E_NULL,"iter_lanczos2");
  379. if ( ! ip->Ax || ! ip->x )
  380. error(E_NULL,"iter_lanczos2");
  381. if ( ip->k <= 0 )
  382. error(E_RANGE,"iter_lanczos2");
  383. a = evals;
  384. a = v_resize(a,(unsigned int)ip->k);
  385. b = v_resize(b,(unsigned int)(ip->k-1));
  386. MEM_STAT_REG(b,TYPE_VEC);
  387. iter_lanczos(ip,a,b,&beta,MNULL);
  388. /* printf("# beta =%g\n",beta); */
  389. pb_mant = 0.0;
  390. if ( err_est )
  391. {
  392. pb_mant = product(b,(double)0.0,&pb_expt);
  393. /* printf("# pb_mant = %g, pb_expt = %d\n",pb_mant, pb_expt); */
  394. }
  395. /* printf("# diags =\n"); v_output(a); */
  396. /* printf("# off diags =\n"); v_output(b); */
  397. a2 = v_resize(a2,a->dim - 1);
  398. b2 = v_resize(b2,b->dim - 1);
  399. MEM_STAT_REG(a2,TYPE_VEC);
  400. MEM_STAT_REG(b2,TYPE_VEC);
  401. for ( i = 0; i < a2->dim - 1; i++ )
  402. {
  403. a2->ve[i] = a->ve[i+1];
  404. b2->ve[i] = b->ve[i+1];
  405. }
  406. a2->ve[a2->dim-1] = a->ve[a2->dim];
  407. trieig(a,b,MNULL);
  408. /* sort evals as a courtesy */
  409. qsort((void *)(a->ve),(int)(a->dim),sizeof(Real),(int (*)())dbl_cmp);
  410. /* error estimates */
  411. if ( err_est )
  412. {
  413. err_est = v_resize(err_est,(unsigned int)ip->k);
  414. trieig(a2,b2,MNULL);
  415. /* printf("# a =\n"); v_output(a); */
  416. /* printf("# a2 =\n"); v_output(a2); */
  417. for ( i = 0; i < a->dim; i++ )
  418. {
  419. det_mant1 = product2(a,i,&det_expt1);
  420. det_mant2 = product(a2,(double)a->ve[i],&det_expt2);
  421. /* printf("# det_mant1=%g, det_expt1=%d\n",
  422. det_mant1,det_expt1); */
  423. /* printf("# det_mant2=%g, det_expt2=%d\n",
  424. det_mant2,det_expt2); */
  425. if ( det_mant1 == 0.0 )
  426. { /* multiple e-val of T */
  427. err_est->ve[i] = 0.0;
  428. continue;
  429. }
  430. else if ( det_mant2 == 0.0 )
  431. {
  432. err_est->ve[i] = HUGE_VAL;
  433. continue;
  434. }
  435. if ( (det_expt1 + det_expt2) % 2 )
  436. /* if odd... */
  437. det_mant = sqrt(2.0*fabs(det_mant1*det_mant2));
  438. else /* if even... */
  439. det_mant = sqrt(fabs(det_mant1*det_mant2));
  440. det_expt = (det_expt1+det_expt2)/2;
  441. err_est->ve[i] = fabs(beta*
  442. ldexp(pb_mant/det_mant,pb_expt-det_expt));
  443. }
  444. }
  445. #ifdef THREADSAFE
  446. V_FREE(b); V_FREE(a2); V_FREE(b2);
  447. #endif
  448. return a;
  449. }
  450. /* iter_splanczos2 -- version of iter_lanczos2() that uses sparse matrix data
  451. structure */
  452. #ifndef ANSI_C
  453. VEC *iter_splanczos2(A,m,x0,evals,err_est)
  454. SPMAT *A;
  455. int m;
  456. VEC *x0; /* initial vector */
  457. VEC *evals; /* eigenvalue vector */
  458. VEC *err_est; /* error estimates of eigenvalues */
  459. #else
  460. VEC *iter_splanczos2(SPMAT *A, int m, VEC *x0, VEC *evals, VEC *err_est)
  461. #endif
  462. {
  463. ITER *ip;
  464. VEC *a;
  465. ip = iter_get(0,0);
  466. ip->Ax = (Fun_Ax) sp_mv_mlt;
  467. ip->A_par = (void *) A;
  468. ip->x = x0;
  469. ip->k = m;
  470. a = iter_lanczos2(ip,evals,err_est);
  471. ip->shared_x = ip->shared_b = TRUE;
  472. iter_free(ip); /* release only ITER structure */
  473. return a;
  474. }
  475. /*
  476. Conjugate gradient method
  477. Another variant - mainly for testing
  478. */
  479. #ifndef ANSI_C
  480. VEC *iter_cg1(ip)
  481. ITER *ip;
  482. #else
  483. VEC *iter_cg1(ITER *ip)
  484. #endif
  485. {
  486. STATIC VEC *r = VNULL, *p = VNULL, *q = VNULL, *z = VNULL;
  487. Real alpha;
  488. double inner,nres;
  489. VEC *rr; /* rr == r or rr == z */
  490. if (ip == INULL)
  491. error(E_NULL,"iter_cg");
  492. if (!ip->Ax || !ip->b)
  493. error(E_NULL,"iter_cg");
  494. if ( ip->x == ip->b )
  495. error(E_INSITU,"iter_cg");
  496. if (!ip->stop_crit)
  497. error(E_NULL,"iter_cg");
  498. if ( ip->eps <= 0.0 )
  499. ip->eps = MACHEPS;
  500. r = v_resize(r,ip->b->dim);
  501. p = v_resize(p,ip->b->dim);
  502. q = v_resize(q,ip->b->dim);
  503. MEM_STAT_REG(r,TYPE_VEC);
  504. MEM_STAT_REG(p,TYPE_VEC);
  505. MEM_STAT_REG(q,TYPE_VEC);
  506. if (ip->Bx != (Fun_Ax)NULL) {
  507. z = v_resize(z,ip->b->dim);
  508. MEM_STAT_REG(z,TYPE_VEC);
  509. rr = z;
  510. }
  511. else rr = r;
  512. if (ip->x != VNULL) {
  513. if (ip->x->dim != ip->b->dim)
  514. error(E_SIZES,"iter_cg");
  515. ip->Ax(ip->A_par,ip->x,p); /* p = A*x */
  516. v_sub(ip->b,p,r); /* r = b - A*x */
  517. }
  518. else { /* ip->x == 0 */
  519. ip->x = v_get(ip->b->dim);
  520. ip->shared_x = FALSE;
  521. v_copy(ip->b,r);
  522. }
  523. if (ip->Bx) (ip->Bx)(ip->B_par,r,p);
  524. else v_copy(r,p);
  525. inner = in_prod(p,r);
  526. nres = sqrt(fabs(inner));
  527. if (ip->info) ip->info(ip,nres,r,p);
  528. if ( nres == 0.0) return ip->x;
  529. for ( ip->steps = 0; ip->steps <= ip->limit; ip->steps++ )
  530. {
  531. ip->Ax(ip->A_par,p,q);
  532. inner = in_prod(q,p);
  533. if (sqrt(fabs(inner)) <= MACHEPS*ip->init_res)
  534. error(E_BREAKDOWN,"iter_cg1");
  535. alpha = in_prod(p,r)/inner;
  536. v_mltadd(ip->x,p,alpha,ip->x);
  537. v_mltadd(r,q,-alpha,r);
  538. rr = r;
  539. if (ip->Bx) {
  540. ip->Bx(ip->B_par,r,z);
  541. rr = z;
  542. }
  543. nres = in_prod(r,rr);
  544. if (nres < 0.0) {
  545. warning(WARN_RES_LESS_0,"iter_cg");
  546. break;
  547. }
  548. nres = sqrt(fabs(nres));
  549. if (ip->info) ip->info(ip,nres,r,z);
  550. if (ip->steps == 0) ip->init_res = nres;
  551. if ( ip->stop_crit(ip,nres,r,z) ) break;
  552. alpha = -in_prod(rr,q)/inner;
  553. v_mltadd(rr,p,alpha,p);
  554. }
  555. #ifdef THREADSAFE
  556. V_FREE(r); V_FREE(p); V_FREE(q); V_FREE(z);
  557. #endif
  558. return ip->x;
  559. }