OpenRadioss 2025.1.11
OpenRadioss project
Loading...
Searching...
No Matches
psgbtrs.f File Reference

Go to the source code of this file.

Functions/Subroutines

subroutine psgbtrs (trans, n, bwl, bwu, nrhs, a, ja, desca, ipiv, b, ib, descb, af, laf, work, lwork, info)

Function/Subroutine Documentation

◆ psgbtrs()

subroutine psgbtrs ( character trans,
integer n,
integer bwl,
integer bwu,
integer nrhs,
real, dimension( * ) a,
integer ja,
integer, dimension( * ) desca,
integer, dimension( * ) ipiv,
real, dimension( * ) b,
integer ib,
integer, dimension( * ) descb,
real, dimension( * ) af,
integer laf,
real, dimension( * ) work,
integer lwork,
integer info )

Definition at line 1 of file psgbtrs.f.

3*
4* -- ScaLAPACK routine (version 2.0.2) --
5* Univ. of Tennessee, Univ. of California Berkeley, Univ. of Colorado Denver
6* May 1 2012
7*
8* .. Scalar Arguments ..
9 CHARACTER TRANS
10 INTEGER BWL, BWU, IB, INFO, JA, LAF, LWORK, N, NRHS
11* ..
12* .. Array Arguments ..
13 INTEGER DESCA( * ), DESCB( * ), IPIV( * )
14 REAL A( * ), AF( * ), B( * ), WORK( * )
15* ..
16*
17* Purpose
18* =======
19*
20* PSGBTRS solves a system of linear equations
21*
22* A(1:N, JA:JA+N-1) * X = B(IB:IB+N-1, 1:NRHS)
23* or
24* A(1:N, JA:JA+N-1)' * X = B(IB:IB+N-1, 1:NRHS)
25*
26* where A(1:N, JA:JA+N-1) is the matrix used to produce the factors
27* stored in A(1:N,JA:JA+N-1) and AF by PSGBTRF.
28* A(1:N, JA:JA+N-1) is an N-by-N real
29* banded distributed
30* matrix with bandwidth BWL, BWU.
31*
32* Routine PSGBTRF MUST be called first.
33*
34* =====================================================================
35*
36* Arguments
37* =========
38*
39*
40* TRANS (global input) CHARACTER
41* = 'N': Solve with A(1:N, JA:JA+N-1);
42* = 'T' or 'C': Solve with A(1:N, JA:JA+N-1)^T;
43*
44* N (global input) INTEGER
45* The number of rows and columns to be operated on, i.e. the
46* order of the distributed submatrix A(1:N, JA:JA+N-1). N >= 0.
47*
48* BWL (global input) INTEGER
49* Number of subdiagonals. 0 <= BWL <= N-1
50*
51* BWU (global input) INTEGER
52* Number of superdiagonals. 0 <= BWU <= N-1
53*
54* NRHS (global input) INTEGER
55* The number of right hand sides, i.e., the number of columns
56* of the distributed submatrix B(IB:IB+N-1, 1:NRHS).
57* NRHS >= 0.
58*
59* A (local input/local output) REAL pointer into
60* local memory to an array with first dimension
61* LLD_A >=(2*bwl+2*bwu+1) (stored in DESCA).
62* On entry, this array contains the local pieces of the
63* N-by-N unsymmetric banded distributed Cholesky factor L or
64* L^T A(1:N, JA:JA+N-1).
65* This local portion is stored in the packed banded format
66* used in LAPACK. Please see the Notes below and the
67* ScaLAPACK manual for more detail on the format of
68* distributed matrices.
69*
70* JA (global input) INTEGER
71* The index in the global array A that points to the start of
72* the matrix to be operated on (which may be either all of A
73* or a submatrix of A).
74*
75* DESCA (global and local input) INTEGER array of dimension DLEN.
76* if 1D type (DTYPE_A=501), DLEN >= 7;
77* if 2D type (DTYPE_A=1), DLEN >= 9 .
78* The array descriptor for the distributed matrix A.
79* Contains information of mapping of A to memory. Please
80* see NOTES below for full description and options.
81*
82* IPIV (local output) INTEGER array, dimension >= DESCA( NB ).
83* Pivot indices for local factorizations.
84* Users *should not* alter the contents between
85* factorization and solve.
86*
87* B (local input/local output) REAL pointer into
88* local memory to an array of local lead dimension lld_b>=NB.
89* On entry, this array contains the
90* the local pieces of the right hand sides
91* B(IB:IB+N-1, 1:NRHS).
92* On exit, this contains the local piece of the solutions
93* distributed matrix X.
94*
95* IB (global input) INTEGER
96* The row index in the global array B that points to the first
97* row of the matrix to be operated on (which may be either
98* all of B or a submatrix of B).
99*
100* DESCB (global and local input) INTEGER array of dimension DLEN.
101* if 1D type (DTYPE_B=502), DLEN >=7;
102* if 2D type (DTYPE_B=1), DLEN >= 9.
103* The array descriptor for the distributed matrix B.
104* Contains information of mapping of B to memory. Please
105* see NOTES below for full description and options.
106*
107* AF (local output) REAL array, dimension LAF.
108* Auxiliary Fillin Space.
109* Fillin is created during the factorization routine
110* PSGBTRF and this is stored in AF. If a linear system
111* is to be solved using PSGBTRS after the factorization
112* routine, AF *must not be altered* after the factorization.
113*
114* LAF (local input) INTEGER
115* Size of user-input Auxiliary Fillin space AF. Must be >=
116* (NB+bwu)*(bwl+bwu)+6*(bwl+bwu)*(bwl+2*bwu)
117* If LAF is not large enough, an error code will be returned
118* and the minimum acceptable size will be returned in AF( 1 )
119*
120* WORK (local workspace/local output)
121* REAL temporary workspace. This space may
122* be overwritten in between calls to routines. WORK must be
123* the size given in LWORK.
124* On exit, WORK( 1 ) contains the minimal LWORK.
125*
126* LWORK (local input or global input) INTEGER
127* Size of user-input workspace WORK.
128* If LWORK is too small, the minimal acceptable size will be
129* returned in WORK(1) and an error code is returned. LWORK>=
130* NRHS*(NB+2*bwl+4*bwu)
131*
132* INFO (global output) INTEGER
133* = 0: successful exit
134* < 0: If the i-th argument is an array and the j-entry had
135* an illegal value, then INFO = -(i*100+j), if the i-th
136* argument is a scalar and had an illegal value, then
137* INFO = -i.
138*
139* =====================================================================
140*
141* Restrictions
142* ============
143*
144* The following are restrictions on the input parameters. Some of these
145* are temporary and will be removed in future releases, while others
146* may reflect fundamental technical limitations.
147*
148* Non-cyclic restriction: VERY IMPORTANT!
149* P*NB>= mod(JA-1,NB)+N.
150* The mapping for matrices must be blocked, reflecting the nature
151* of the divide and conquer algorithm as a task-parallel algorithm.
152* This formula in words is: no processor may have more than one
153* chunk of the matrix.
154*
155* Blocksize cannot be too small:
156* If the matrix spans more than one processor, the following
157* restriction on NB, the size of each block on each processor,
158* must hold:
159* NB >= (BWL+BWU)+1
160* The bulk of parallel computation is done on the matrix of size
161* O(NB) on each processor. If this is too small, divide and conquer
162* is a poor choice of algorithm.
163*
164* Submatrix reference:
165* JA = IB
166* Alignment restriction that prevents unnecessary communication.
167*
168* =====================================================================
169*
170* Notes
171* =====
172*
173* If the factorization routine and the solve routine are to be called
174* separately (to solve various sets of righthand sides using the same
175* coefficient matrix), the auxiliary space AF *must not be altered*
176* between calls to the factorization routine and the solve routine.
177*
178* The best algorithm for solving banded and tridiagonal linear systems
179* depends on a variety of parameters, especially the bandwidth.
180* Currently, only algorithms designed for the case N/P >> bw are
181* implemented. These go by many names, including Divide and Conquer,
182* Partitioning, domain decomposition-type, etc.
183*
184* Algorithm description: Divide and Conquer
185*
186* The Divide and Conqer algorithm assumes the matrix is narrowly
187* banded compared with the number of equations. In this situation,
188* it is best to distribute the input matrix A one-dimensionally,
189* with columns atomic and rows divided amongst the processes.
190* The basic algorithm divides the banded matrix up into
191* P pieces with one stored on each processor,
192* and then proceeds in 2 phases for the factorization or 3 for the
193* solution of a linear system.
194* 1) Local Phase:
195* The individual pieces are factored independently and in
196* parallel. These factors are applied to the matrix creating
197* fillin, which is stored in a non-inspectable way in auxiliary
198* space AF. Mathematically, this is equivalent to reordering
199* the matrix A as P A P^T and then factoring the principal
200* leading submatrix of size equal to the sum of the sizes of
201* the matrices factored on each processor. The factors of
202* these submatrices overwrite the corresponding parts of A
203* in memory.
204* 2) Reduced System Phase:
205* A small (max(bwl,bwu)* (P-1)) system is formed representing
206* interaction of the larger blocks, and is stored (as are its
207* factors) in the space AF. A parallel Block Cyclic Reduction
208* algorithm is used. For a linear system, a parallel front solve
209* followed by an analagous backsolve, both using the structure
210* of the factored matrix, are performed.
211* 3) Backsubsitution Phase:
212* For a linear system, a local backsubstitution is performed on
213* each processor in parallel.
214*
215*
216* Descriptors
217* ===========
218*
219* Descriptors now have *types* and differ from ScaLAPACK 1.0.
220*
221* Note: banded codes can use either the old two dimensional
222* or new one-dimensional descriptors, though the processor grid in
223* both cases *must be one-dimensional*. We describe both types below.
224*
225* Each global data object is described by an associated description
226* vector. This vector stores the information required to establish
227* the mapping between an object element and its corresponding process
228* and memory location.
229*
230* Let A be a generic term for any 2D block cyclicly distributed array.
231* Such a global array has an associated description vector DESCA.
232* In the following comments, the character _ should be read as
233* "of the global array".
234*
235* NOTATION STORED IN EXPLANATION
236* --------------- -------------- --------------------------------------
237* DTYPE_A(global) DESCA( DTYPE_ )The descriptor type. In this case,
238* DTYPE_A = 1.
239* CTXT_A (global) DESCA( CTXT_ ) The BLACS context handle, indicating
240* the BLACS process grid A is distribu-
241* ted over. The context itself is glo-
242* bal, but the handle (the integer
243* value) may vary.
244* M_A (global) DESCA( M_ ) The number of rows in the global
245* array A.
246* N_A (global) DESCA( N_ ) The number of columns in the global
247* array A.
248* MB_A (global) DESCA( MB_ ) The blocking factor used to distribute
249* the rows of the array.
250* NB_A (global) DESCA( NB_ ) The blocking factor used to distribute
251* the columns of the array.
252* RSRC_A (global) DESCA( RSRC_ ) The process row over which the first
253* row of the array A is distributed.
254* CSRC_A (global) DESCA( CSRC_ ) The process column over which the
255* first column of the array A is
256* distributed.
257* LLD_A (local) DESCA( LLD_ ) The leading dimension of the local
258* array. LLD_A >= MAX(1,LOCr(M_A)).
259*
260* Let K be the number of rows or columns of a distributed matrix,
261* and assume that its process grid has dimension p x q.
262* LOCr( K ) denotes the number of elements of K that a process
263* would receive if K were distributed over the p processes of its
264* process column.
265* Similarly, LOCc( K ) denotes the number of elements of K that a
266* process would receive if K were distributed over the q processes of
267* its process row.
268* The values of LOCr() and LOCc() may be determined via a call to the
269* ScaLAPACK tool function, NUMROC:
270* LOCr( M ) = NUMROC( M, MB_A, MYROW, RSRC_A, NPROW ),
271* LOCc( N ) = NUMROC( N, NB_A, MYCOL, CSRC_A, NPCOL ).
272* An upper bound for these quantities may be computed by:
273* LOCr( M ) <= ceil( ceil(M/MB_A)/NPROW )*MB_A
274* LOCc( N ) <= ceil( ceil(N/NB_A)/NPCOL )*NB_A
275*
276*
277* One-dimensional descriptors:
278*
279* One-dimensional descriptors are a new addition to ScaLAPACK since
280* version 1.0. They simplify and shorten the descriptor for 1D
281* arrays.
282*
283* Since ScaLAPACK supports two-dimensional arrays as the fundamental
284* object, we allow 1D arrays to be distributed either over the
285* first dimension of the array (as if the grid were P-by-1) or the
286* 2nd dimension (as if the grid were 1-by-P). This choice is
287* indicated by the descriptor type (501 or 502)
288* as described below.
289*
290* IMPORTANT NOTE: the actual BLACS grid represented by the
291* CTXT entry in the descriptor may be *either* P-by-1 or 1-by-P
292* irrespective of which one-dimensional descriptor type
293* (501 or 502) is input.
294* This routine will interpret the grid properly either way.
295* ScaLAPACK routines *do not support intercontext operations* so that
296* the grid passed to a single ScaLAPACK routine *must be the same*
297* for all array descriptors passed to that routine.
298*
299* NOTE: In all cases where 1D descriptors are used, 2D descriptors
300* may also be used, since a one-dimensional array is a special case
301* of a two-dimensional array with one dimension of size unity.
302* The two-dimensional array used in this case *must* be of the
303* proper orientation:
304* If the appropriate one-dimensional descriptor is DTYPEA=501
305* (1 by P type), then the two dimensional descriptor must
306* have a CTXT value that refers to a 1 by P BLACS grid;
307* If the appropriate one-dimensional descriptor is DTYPEA=502
308* (P by 1 type), then the two dimensional descriptor must
309* have a CTXT value that refers to a P by 1 BLACS grid.
310*
311*
312* Summary of allowed descriptors, types, and BLACS grids:
313* DTYPE 501 502 1 1
314* BLACS grid 1xP or Px1 1xP or Px1 1xP Px1
315* -----------------------------------------------------
316* A OK NO OK NO
317* B NO OK NO OK
318*
319* Note that a consequence of this chart is that it is not possible
320* for *both* DTYPE_A and DTYPE_B to be 2D_type(1), as these lead
321* to opposite requirements for the orientation of the BLACS grid,
322* and as noted before, the *same* BLACS context must be used in
323* all descriptors in a single ScaLAPACK subroutine call.
324*
325* Let A be a generic term for any 1D block cyclicly distributed array.
326* Such a global array has an associated description vector DESCA.
327* In the following comments, the character _ should be read as
328* "of the global array".
329*
330* NOTATION STORED IN EXPLANATION
331* --------------- ---------- ------------------------------------------
332* DTYPE_A(global) DESCA( 1 ) The descriptor type. For 1D grids,
333* TYPE_A = 501: 1-by-P grid.
334* TYPE_A = 502: P-by-1 grid.
335* CTXT_A (global) DESCA( 2 ) The BLACS context handle, indicating
336* the BLACS process grid A is distribu-
337* ted over. The context itself is glo-
338* bal, but the handle (the integer
339* value) may vary.
340* N_A (global) DESCA( 3 ) The size of the array dimension being
341* distributed.
342* NB_A (global) DESCA( 4 ) The blocking factor used to distribute
343* the distributed dimension of the array.
344* SRC_A (global) DESCA( 5 ) The process row or column over which the
345* first row or column of the array
346* is distributed.
347* LLD_A (local) DESCA( 6 ) The leading dimension of the local array
348* storing the local blocks of the distri-
349* buted array A. Minimum value of LLD_A
350* depends on TYPE_A.
351* TYPE_A = 501: LLD_A >=
352* size of undistributed dimension, 1.
353* TYPE_A = 502: LLD_A >=NB_A, 1.
354* Reserved DESCA( 7 ) Reserved for future use.
355*
356* =====================================================================
357*
358* Implemented for ScaLAPACK by:
359* Andrew J. Cleary, Livermore National Lab and University of Tenn.,
360* and Markus Hegland, Australian National University. Feb., 1997.
361* Based on code written by : Peter Arbenz, ETH Zurich, 1996.
362* Last modified by: Peter Arbenz, Institute of Scientific Computing,
363* ETH, Zurich.
364*
365* =====================================================================
366*
367* .. Parameters ..
368 REAL ONE
369 parameter( one = 1.0e+0 )
370 REAL ZERO
371 parameter( zero = 0.0e+0 )
372 INTEGER INT_ONE
373 parameter( int_one = 1 )
374 INTEGER DESCMULT, BIGNUM
375 parameter( descmult = 100, bignum = descmult*descmult )
376 INTEGER BLOCK_CYCLIC_2D, CSRC_, CTXT_, DLEN_, DTYPE_,
377 $ LLD_, MB_, M_, NB_, N_, RSRC_
378 parameter( block_cyclic_2d = 1, dlen_ = 9, dtype_ = 1,
379 $ ctxt_ = 2, m_ = 3, n_ = 4, mb_ = 5, nb_ = 6,
380 $ rsrc_ = 7, csrc_ = 8, lld_ = 9 )
381* ..
382* .. Local Scalars ..
383 INTEGER APTR, BBPTR, BM, BMN, BN, BNN, BW, CSRC,
384 $ FIRST_PROC, ICTXT, ICTXT_NEW, ICTXT_SAVE,
385 $ IDUM2, IDUM3, J, JA_NEW, L, LBWL, LBWU, LDBB,
386 $ LDW, LLDA, LLDB, LM, LMJ, LN, LPTR, MYCOL,
387 $ MYROW, NB, NEICOL, NP, NPACT, NPCOL, NPROW,
388 $ NPSTR, NP_SAVE, ODD_SIZE, PART_OFFSET,
389 $ RECOVERY_VAL, RETURN_CODE, STORE_M_B,
390 $ STORE_N_A, WORK_SIZE_MIN, WPTR
391* ..
392* .. Local Arrays ..
393 INTEGER DESCA_1XP( 7 ), DESCB_PX1( 7 ),
394 $ PARAM_CHECK( 17, 3 )
395* ..
396* .. External Subroutines ..
398 $ desc_convert, sgemm, sgemv, sger, sgerv2d,
399 $ sgesd2d, sgetrs, slamov, slaswp, sscal, sswap,
401* ..
402* .. External Functions ..
403 LOGICAL LSAME
404 INTEGER NUMROC
405 EXTERNAL lsame, numroc
406* ..
407* .. Intrinsic Functions ..
408 INTRINSIC ichar, max, min, mod
409* ..
410* .. Executable Statements ..
411*
412*
413* Test the input parameters
414*
415 info = 0
416*
417* Convert descriptor into standard form for easy access to
418* parameters, check that grid is of right shape.
419*
420 desca_1xp( 1 ) = 501
421 descb_px1( 1 ) = 502
422*
423 CALL desc_convert( desca, desca_1xp, return_code )
424*
425 IF( return_code.NE.0 ) THEN
426 info = -( 8*100+2 )
427 END IF
428*
429 CALL desc_convert( descb, descb_px1, return_code )
430*
431 IF( return_code.NE.0 ) THEN
432 info = -( 11*100+2 )
433 END IF
434*
435* Consistency checks for DESCA and DESCB.
436*
437* Context must be the same
438 IF( desca_1xp( 2 ).NE.descb_px1( 2 ) ) THEN
439 info = -( 11*100+2 )
440 END IF
441*
442* These are alignment restrictions that may or may not be removed
443* in future releases. -Andy Cleary, April 14, 1996.
444*
445* Block sizes must be the same
446 IF( desca_1xp( 4 ).NE.descb_px1( 4 ) ) THEN
447 info = -( 11*100+4 )
448 END IF
449*
450* Source processor must be the same
451*
452 IF( desca_1xp( 5 ).NE.descb_px1( 5 ) ) THEN
453 info = -( 11*100+5 )
454 END IF
455*
456* Get values out of descriptor for use in code.
457*
458 ictxt = desca_1xp( 2 )
459 csrc = desca_1xp( 5 )
460 nb = desca_1xp( 4 )
461 llda = desca_1xp( 6 )
462 store_n_a = desca_1xp( 3 )
463 lldb = descb_px1( 6 )
464 store_m_b = descb_px1( 3 )
465*
466* Get grid parameters
467*
468*
469 CALL blacs_gridinfo( ictxt, nprow, npcol, myrow, mycol )
470 np = nprow*npcol
471*
472*
473*
474 IF( lsame( trans, 'N' ) ) THEN
475 idum2 = ichar( 'N' )
476 ELSE IF( lsame( trans, 'T' ) ) THEN
477 idum2 = ichar( 'T' )
478 ELSE IF( lsame( trans, 'c' ) ) THEN
479 IDUM2 = ICHAR( 't' )
480 ELSE
481 INFO = -1
482 END IF
483*
484.LT. IF( LWORK-1 ) THEN
485 INFO = -16
486.EQ. ELSE IF( LWORK-1 ) THEN
487 IDUM3 = -1
488 ELSE
489 IDUM3 = 1
490 END IF
491*
492.LT. IF( N0 ) THEN
493 INFO = -2
494 END IF
495*
496.GT. IF( N+JA-1STORE_N_A ) THEN
497 INFO = -( 8*100+6 )
498 END IF
499*
500.GT..OR..LT. IF( ( BWLN-1 ) ( BWL0 ) ) THEN
501 INFO = -3
502 END IF
503*
504.GT..OR..LT. IF( ( BWUN-1 ) ( BWU0 ) ) THEN
505 INFO = -4
506 END IF
507*
508.LT. IF( LLDA( 2*BWL+2*BWU+1 ) ) THEN
509 INFO = -( 8*100+6 )
510 END IF
511*
512.LE. IF( NB0 ) THEN
513 INFO = -( 8*100+4 )
514 END IF
515*
516 BW = BWU + BWL
517*
518.GT. IF( N+IB-1STORE_M_B ) THEN
519 INFO = -( 11*100+3 )
520 END IF
521*
522.LT. IF( LLDBNB ) THEN
523 INFO = -( 11*100+6 )
524 END IF
525*
526.LT. IF( NRHS0 ) THEN
527 INFO = -5
528 END IF
529*
530* Current alignment restriction
531*
532.NE. IF( JAIB ) THEN
533 INFO = -7
534 END IF
535*
536* Argument checking that is specific to Divide & Conquer routine
537*
538.NE. IF( NPROW1 ) THEN
539 INFO = -( 8*100+2 )
540 END IF
541*
542.GT. IF( NNP*NB-MOD( JA-1, NB ) ) THEN
543 INFO = -( 2 )
544 CALL PXERBLA( ICTXT, 'psgbtrs, d&c alg.: only 1 block per proc'
545 $ , -INFO )
546 RETURN
547 END IF
548*
549.GT..AND..LT. IF( ( JA+N-1NB ) ( NB( BWL+BWU+1 ) ) ) THEN
550 INFO = -( 8*100+4 )
551 CALL PXERBLA( ICTXT, 'psgbtrs, d&c alg.: nb too small', -INFO )
552 RETURN
553 END IF
554*
555*
556* Check worksize
557*
558 WORK_SIZE_MIN = NRHS*( NB+2*BWL+4*BWU )
559*
560 WORK( 1 ) = WORK_SIZE_MIN
561*
562.LT. IF( LWORKWORK_SIZE_MIN ) THEN
563.NE. IF( LWORK-1 ) THEN
564 INFO = -16
565 CALL PXERBLA( ICTXT, 'psgbtrs: worksize error ', -INFO )
566 END IF
567 RETURN
568 END IF
569*
570* Pack params and positions into arrays for global consistency check
571*
572 PARAM_CHECK( 17, 1 ) = DESCB( 5 )
573 PARAM_CHECK( 16, 1 ) = DESCB( 4 )
574 PARAM_CHECK( 15, 1 ) = DESCB( 3 )
575 PARAM_CHECK( 14, 1 ) = DESCB( 2 )
576 PARAM_CHECK( 13, 1 ) = DESCB( 1 )
577 PARAM_CHECK( 12, 1 ) = IB
578 PARAM_CHECK( 11, 1 ) = DESCA( 5 )
579 PARAM_CHECK( 10, 1 ) = DESCA( 4 )
580 PARAM_CHECK( 9, 1 ) = DESCA( 3 )
581 PARAM_CHECK( 8, 1 ) = DESCA( 1 )
582 PARAM_CHECK( 7, 1 ) = JA
583 PARAM_CHECK( 6, 1 ) = NRHS
584 PARAM_CHECK( 5, 1 ) = BWU
585 PARAM_CHECK( 4, 1 ) = BWL
586 PARAM_CHECK( 3, 1 ) = N
587 PARAM_CHECK( 2, 1 ) = IDUM3
588 PARAM_CHECK( 1, 1 ) = IDUM2
589*
590 PARAM_CHECK( 17, 2 ) = 1105
591 PARAM_CHECK( 16, 2 ) = 1104
592 PARAM_CHECK( 15, 2 ) = 1103
593 PARAM_CHECK( 14, 2 ) = 1102
594 PARAM_CHECK( 13, 2 ) = 1101
595 PARAM_CHECK( 12, 2 ) = 10
596 PARAM_CHECK( 11, 2 ) = 805
597 PARAM_CHECK( 10, 2 ) = 804
598 PARAM_CHECK( 9, 2 ) = 803
599 PARAM_CHECK( 8, 2 ) = 801
600 PARAM_CHECK( 7, 2 ) = 7
601 PARAM_CHECK( 6, 2 ) = 5
602 PARAM_CHECK( 5, 2 ) = 4
603 PARAM_CHECK( 4, 2 ) = 3
604 PARAM_CHECK( 3, 2 ) = 2
605 PARAM_CHECK( 2, 2 ) = 16
606 PARAM_CHECK( 1, 2 ) = 1
607*
608* Want to find errors with MIN( ), so if no error, set it to a big
609* number. If there already is an error, multiply by the the
610* descriptor multiplier.
611*
612.GE. IF( INFO0 ) THEN
613 INFO = BIGNUM
614.LT. ELSE IF( INFO-DESCMULT ) THEN
615 INFO = -INFO
616 ELSE
617 INFO = -INFO*DESCMULT
618 END IF
619*
620* Check consistency across processors
621*
622 CALL GLOBCHK( ICTXT, 17, PARAM_CHECK, 17, PARAM_CHECK( 1, 3 ),
623 $ INFO )
624*
625* Prepare output: set info = 0 if no error, and divide by DESCMULT
626* if error is not in a descriptor entry.
627*
628.EQ. IF( INFOBIGNUM ) THEN
629 INFO = 0
630.EQ. ELSE IF( MOD( INFO, DESCMULT )0 ) THEN
631 INFO = -INFO / DESCMULT
632 ELSE
633 INFO = -INFO
634 END IF
635*
636.LT. IF( INFO0 ) THEN
637 CALL PXERBLA( ICTXT, 'psgbtrs', -INFO )
638 RETURN
639 END IF
640*
641* Quick return if possible
642*
643.EQ. IF( N0 )
644 $ RETURN
645*
646.EQ. IF( NRHS0 )
647 $ RETURN
648*
649*
650* Adjust addressing into matrix space to properly get into
651* the beginning part of the relevant data
652*
653 PART_OFFSET = NB*( ( JA-1 ) / ( NPCOL*NB ) )
654*
655.LT. IF( ( MYCOL-CSRC )( JA-PART_OFFSET-1 ) / NB ) THEN
656 PART_OFFSET = PART_OFFSET + NB
657 END IF
658*
659.LT. IF( MYCOLCSRC ) THEN
660 PART_OFFSET = PART_OFFSET - NB
661 END IF
662*
663* Form a new BLACS grid (the "standard form" grid) with only procs
664* holding part of the matrix, of size 1xNP where NP is adjusted,
665* starting at csrc=0, with JA modified to reflect dropped procs.
666*
667* First processor to hold part of the matrix:
668*
669 FIRST_PROC = MOD( ( JA-1 ) / NB+CSRC, NPCOL )
670*
671* Calculate new JA one while dropping off unused processors.
672*
673 JA_NEW = MOD( JA-1, NB ) + 1
674*
675* Save and compute new value of NP
676*
677 NP_SAVE = NP
678 NP = ( JA_NEW+N-2 ) / NB + 1
679*
680* Call utility routine that forms "standard-form" grid
681*
682 CALL RESHAPE( ICTXT, INT_ONE, ICTXT_NEW, INT_ONE, FIRST_PROC,
683 $ INT_ONE, NP )
684*
685* Use new context from standard grid as context.
686*
687 ICTXT_SAVE = ICTXT
688 ICTXT = ICTXT_NEW
689 DESCA_1XP( 2 ) = ICTXT_NEW
690 DESCB_PX1( 2 ) = ICTXT_NEW
691*
692* Get information about new grid.
693*
694 CALL BLACS_GRIDINFO( ICTXT, NPROW, NPCOL, MYROW, MYCOL )
695*
696* Drop out processors that do not have part of the matrix.
697*
698.LT. IF( MYROW0 ) THEN
699 GO TO 100
700 END IF
701*
702*
703*
704* Begin main code
705*
706* Move data into workspace - communicate/copy (overlap)
707*
708.LT. IF( MYCOLNPCOL-1 ) THEN
709 CALL SGESD2D( ICTXT, BWU, NRHS, B( NB-BWU+1 ), LLDB, 0,
710 $ MYCOL+1 )
711 END IF
712*
713.LT. IF( MYCOLNPCOL-1 ) THEN
714 LM = NB - BWU
715 ELSE
716 LM = NB
717 END IF
718*
719.GT. IF( MYCOL0 ) THEN
720 WPTR = BWU + 1
721 ELSE
722 WPTR = 1
723 END IF
724*
725 LDW = NB + BWU + 2*BW + BWU
726*
727 CALL SLAMOV( 'g', LM, NRHS, B( 1 ), LLDB, WORK( WPTR ), LDW )
728*
729* Zero out rest of work
730*
731 DO 20 J = 1, NRHS
732 DO 10 L = WPTR + LM, LDW
733 WORK( ( J-1 )*LDW+L ) = ZERO
734 10 CONTINUE
735 20 CONTINUE
736*
737.GT. IF( MYCOL0 ) THEN
738 CALL SGERV2D( ICTXT, BWU, NRHS, WORK( 1 ), LDW, 0, MYCOL-1 )
739 END IF
740*
741********************************************************************
742* PHASE 1: Local computation phase -- Solve L*X = B
743********************************************************************
744*
745* Size of main (or odd) partition in each processor
746*
747 ODD_SIZE = NUMROC( N, NB, MYCOL, 0, NPCOL )
748*
749.NE. IF( MYCOL0 ) THEN
750 LBWL = BW
751 LBWU = 0
752 APTR = 1
753 ELSE
754 LBWL = BWL
755 LBWU = BWU
756 APTR = 1 + BWU
757 END IF
758*
759.NE. IF( MYCOLNPCOL-1 ) THEN
760 LM = NB - LBWU
761 LN = NB - BW
762.NE. ELSE IF( MYCOL0 ) THEN
763 LM = ODD_SIZE + BWU
764 LN = MAX( ODD_SIZE-BW, 0 )
765 ELSE
766 LM = N
767 LN = MAX( N-BW, 0 )
768 END IF
769*
770 DO 30 J = 1, LN
771*
772 LMJ = MIN( LBWL, LM-J )
773 L = IPIV( J )
774*
775.NE. IF( LJ ) THEN
776 CALL SSWAP( NRHS, WORK( L ), LDW, WORK( J ), LDW )
777 END IF
778*
779 LPTR = BW + 1 + ( J-1 )*LLDA + APTR
780*
781 CALL SGER( LMJ, NRHS, -ONE, A( LPTR ), 1, WORK( J ), LDW,
782 $ WORK( J+1 ), LDW )
783*
784 30 CONTINUE
785*
786********************************************************************
787* PHASE 2: Global computation phase -- Solve L*X = B
788********************************************************************
789*
790* Define the initial dimensions of the diagonal blocks
791* The offdiagonal blocks (for MYCOL > 0) are of size BM by BW
792*
793.NE. IF( MYCOLNPCOL-1 ) THEN
794 BM = BW - LBWU
795 BN = BW
796 ELSE
797 BM = MIN( BW, ODD_SIZE ) + BWU
798 BN = MIN( BW, ODD_SIZE )
799 END IF
800*
801* Pointer to first element of block bidiagonal matrix in AF
802* Leading dimension of block bidiagonal system
803*
804 BBPTR = ( NB+BWU )*BW + 1
805 LDBB = 2*BW + BWU
806*
807.EQ. IF( NPCOL1 ) THEN
808*
809* In this case the loop over the levels will not be
810* performed.
811 CALL SGETRS( 'n', N-LN, NRHS, AF( BBPTR+BW*LDBB ), LDBB,
812 $ IPIV( LN+1 ), WORK( LN+1 ), LDW, INFO )
813*
814 END IF
815*
816* Loop over levels ...
817*
818* The two integers NPACT (nu. of active processors) and NPSTR
819* (stride between active processors) is used to control the
820* loop.
821*
822 NPACT = NPCOL
823 NPSTR = 1
824*
825* Begin loop over levels
826 40 CONTINUE
827.LE. IF( NPACT1 )
828 $ GO TO 50
829*
830* Test if processor is active
831.EQ. IF( MOD( MYCOL, NPSTR )0 ) THEN
832*
833* Send/Receive blocks
834*
835.EQ. IF( MOD( MYCOL, 2*NPSTR )0 ) THEN
836*
837 NEICOL = MYCOL + NPSTR
838*
839.LE. IF( NEICOL / NPSTRNPACT-1 ) THEN
840*
841.LT. IF( NEICOL / NPSTRNPACT-1 ) THEN
842 BMN = BW
843 ELSE
844 BMN = MIN( BW, NUMROC( N, NB, NEICOL, 0, NPCOL ) ) +
845 $ BWU
846 END IF
847*
848 CALL SGESD2D( ICTXT, BM, NRHS, WORK( LN+1 ), LDW, 0,
849 $ NEICOL )
850*
851.NE. IF( NPACT2 ) THEN
852*
853* Receive answers back from partner processor
854*
855 CALL SGERV2D( ICTXT, BM+BMN-BW, NRHS, WORK( LN+1 ),
856 $ LDW, 0, NEICOL )
857*
858 BM = BM + BMN - BW
859*
860 END IF
861*
862 END IF
863*
864 ELSE
865*
866 NEICOL = MYCOL - NPSTR
867*
868.EQ. IF( NEICOL0 ) THEN
869 BMN = BW - BWU
870 ELSE
871 BMN = BW
872 END IF
873*
874 CALL SLAMOV( 'g', BM, NRHS, WORK( LN+1 ), LDW,
875 $ WORK( NB+BWU+BMN+1 ), LDW )
876*
877 CALL SGERV2D( ICTXT, BMN, NRHS, WORK( NB+BWU+1 ), LDW, 0,
878 $ NEICOL )
879*
880* and do the permutations and eliminations
881*
882.NE. IF( NPACT2 ) THEN
883*
884* Solve locally for BW variables
885*
886 CALL SLASWP( NRHS, WORK( NB+BWU+1 ), LDW, 1, BW,
887 $ IPIV( LN+1 ), 1 )
888*
889 CALL STRSM( 'l', 'l', 'n', 'u', BW, NRHS, ONE,
890 $ AF( BBPTR+BW*LDBB ), LDBB, WORK( NB+BWU+1 ),
891 $ LDW )
892*
893* Use soln just calculated to update RHS
894*
895 CALL SGEMM( 'n', 'n', BM+BMN-BW, NRHS, BW, -ONE,
896 $ AF( BBPTR+BW*LDBB+BW ), LDBB,
897 $ WORK( NB+BWU+1 ), LDW, ONE,
898 $ WORK( NB+BWU+1+BW ), LDW )
899*
900* Give answers back to partner processor
901*
902 CALL SGESD2D( ICTXT, BM+BMN-BW, NRHS,
903 $ WORK( NB+BWU+1+BW ), LDW, 0, NEICOL )
904*
905 ELSE
906*
907* Finish up calculations for final level
908*
909 CALL SLASWP( NRHS, WORK( NB+BWU+1 ), LDW, 1, BM+BMN,
910 $ IPIV( LN+1 ), 1 )
911*
912 CALL STRSM( 'l', 'l', 'n', 'u', BM+BMN, NRHS, ONE,
913 $ AF( BBPTR+BW*LDBB ), LDBB, WORK( NB+BWU+1 ),
914 $ LDW )
915 END IF
916*
917 END IF
918*
919 NPACT = ( NPACT+1 ) / 2
920 NPSTR = NPSTR*2
921 GO TO 40
922*
923 END IF
924*
925 50 CONTINUE
926*
927*
928**************************************
929* BACKSOLVE
930********************************************************************
931* PHASE 2: Global computation phase -- Solve U*Y = X
932********************************************************************
933*
934.EQ. IF( NPCOL1 ) THEN
935*
936* In this case the loop over the levels will not be
937* performed.
938* In fact, the backsolve portion was done in the call to
939* SGETRS in the frontsolve.
940*
941 END IF
942*
943* Compute variable needed to reverse loop structure in
944* reduced system.
945*
946 RECOVERY_VAL = NPACT*NPSTR - NPCOL
947*
948* Loop over levels
949* Terminal values of NPACT and NPSTR from frontsolve are used
950*
951 60 CONTINUE
952.GE. IF( NPACTNPCOL )
953 $ GO TO 80
954*
955 NPSTR = NPSTR / 2
956*
957 NPACT = NPACT*2
958*
959* Have to adjust npact for non-power-of-2
960*
961 NPACT = NPACT - MOD( ( RECOVERY_VAL / NPSTR ), 2 )
962*
963* Find size of submatrix in this proc at this level
964*
965.LT. IF( MYCOL / NPSTRNPACT-1 ) THEN
966 BN = BW
967 ELSE
968 BN = MIN( BW, NUMROC( N, NB, NPCOL-1, 0, NPCOL ) )
969 END IF
970*
971* If this processor is even in this level...
972*
973.EQ. IF( MOD( MYCOL, 2*NPSTR )0 ) THEN
974*
975 NEICOL = MYCOL + NPSTR
976*
977.LE. IF( NEICOL / NPSTRNPACT-1 ) THEN
978*
979.LT. IF( NEICOL / NPSTRNPACT-1 ) THEN
980 BMN = BW
981 BNN = BW
982 ELSE
983 BMN = MIN( BW, NUMROC( N, NB, NEICOL, 0, NPCOL ) ) + BWU
984 BNN = MIN( BW, NUMROC( N, NB, NEICOL, 0, NPCOL ) )
985 END IF
986*
987.GT. IF( NPACT2 ) THEN
988*
989 CALL SGESD2D( ICTXT, 2*BW, NRHS, WORK( LN+1 ), LDW, 0,
990 $ NEICOL )
991*
992 CALL SGERV2D( ICTXT, BW, NRHS, WORK( LN+1 ), LDW, 0,
993 $ NEICOL )
994*
995 ELSE
996*
997 CALL SGERV2D( ICTXT, BW, NRHS, WORK( LN+1 ), LDW, 0,
998 $ NEICOL )
999*
1000 END IF
1001*
1002 END IF
1003*
1004 ELSE
1005* This processor is odd on this level
1006*
1007 NEICOL = MYCOL - NPSTR
1008*
1009.EQ. IF( NEICOL0 ) THEN
1010 BMN = BW - BWU
1011 ELSE
1012 BMN = BW
1013 END IF
1014*
1015.LT. IF( NEICOLNPCOL-1 ) THEN
1016 BNN = BW
1017 ELSE
1018 BNN = MIN( BW, NUMROC( N, NB, NEICOL, 0, NPCOL ) )
1019 END IF
1020*
1021.GT. IF( NPACT2 ) THEN
1022*
1023* Move RHS to make room for received solutions
1024*
1025 CALL SLAMOV( 'g', BW, NRHS, WORK( NB+BWU+1 ), LDW,
1026 $ WORK( NB+BWU+BW+1 ), LDW )
1027*
1028 CALL SGERV2D( ICTXT, 2*BW, NRHS, WORK( LN+1 ), LDW, 0,
1029 $ NEICOL )
1030*
1031 CALL SGEMM( 'n', 'n', BW, NRHS, BN, -ONE, AF( BBPTR ), LDBB,
1032 $ WORK( LN+1 ), LDW, ONE, WORK( NB+BWU+BW+1 ),
1033 $ LDW )
1034*
1035*
1036.GT. IF( MYCOLNPSTR ) THEN
1037*
1038 CALL SGEMM( 'n', 'n', BW, NRHS, BW, -ONE,
1039 $ AF( BBPTR+2*BW*LDBB ), LDBB, WORK( LN+BW+1 ),
1040 $ LDW, ONE, WORK( NB+BWU+BW+1 ), LDW )
1041*
1042 END IF
1043*
1044 CALL STRSM( 'l', 'u', 'n', 'n', bw, nrhs, one,
1045 $ af( bbptr+bw*ldbb ), ldbb, work( nb+bwu+bw+1 ),
1046 $ ldw )
1047*
1048* Send new solution to neighbor
1049*
1050 CALL sgesd2d( ictxt, bw, nrhs, work( nb+bwu+bw+1 ), ldw, 0,
1051 $ neicol )
1052*
1053* Copy new solution into expected place
1054*
1055 CALL slamov( 'G', bw, nrhs, work( nb+bwu+1+bw ), ldw,
1056 $ work( ln+bw+1 ), ldw )
1057*
1058 ELSE
1059*
1060* Solve with local diagonal block
1061*
1062 CALL strsm( 'L', 'U', 'N', 'N', bn+bnn, nrhs, one,
1063 $ af( bbptr+bw*ldbb ), ldbb, work( nb+bwu+1 ),
1064 $ ldw )
1065*
1066* Send new solution to neighbor
1067*
1068 CALL sgesd2d( ictxt, bw, nrhs, work( nb+bwu+1 ), ldw, 0,
1069 $ neicol )
1070*
1071* Shift solutions into expected positions
1072*
1073 CALL slamov( 'G', bnn+bn-bw, nrhs, work( nb+bwu+1+bw ), ldw,
1074 $ work( ln+1 ), ldw )
1075*
1076*
1077 IF( ( nb+bwu+1 ).NE.( ln+1+bw ) ) THEN
1078*
1079* Copy one row at a time since spaces may overlap
1080*
1081 DO 70 j = 1, bw
1082 CALL scopy( nrhs, work( nb+bwu+j ), ldw,
1083 $ work( ln+bw+j ), ldw )
1084 70 CONTINUE
1085*
1086 END IF
1087*
1088 END IF
1089*
1090 END IF
1091*
1092 GO TO 60
1093*
1094 80 CONTINUE
1095* End of loop over levels
1096*
1097********************************************************************
1098* PHASE 1: (Almost) Local computation phase -- Solve U*Y = X
1099********************************************************************
1100*
1101* Reset BM to value it had before reduced system frontsolve...
1102*
1103 IF( mycol.NE.npcol-1 ) THEN
1104 bm = bw - lbwu
1105 ELSE
1106 bm = min( bw, odd_size ) + bwu
1107 END IF
1108*
1109* First metastep is to account for the fillin blocks AF
1110*
1111 IF( mycol.LT.npcol-1 ) THEN
1112*
1113 CALL sgesd2d( ictxt, bw, nrhs, work( nb-bw+1 ), ldw, 0,
1114 $ mycol+1 )
1115*
1116 END IF
1117*
1118 IF( mycol.GT.0 ) THEN
1119*
1120 CALL sgerv2d( ictxt, bw, nrhs, work( nb+bwu+1 ), ldw, 0,
1121 $ mycol-1 )
1122*
1123* Modify local right hand sides with received rhs's
1124*
1125 CALL sgemm( 'T', 'N', lm-bm, nrhs, bw, -one, af( 1 ), bw,
1126 $ work( nb+bwu+1 ), ldw, one, work( 1 ), ldw )
1127*
1128 END IF
1129*
1130 DO 90 j = ln, 1, -1
1131*
1132 lmj = min( bw, odd_size-1 )
1133*
1134 lptr = bw - 1 + j*llda + aptr
1135*
1136* In the following, the TRANS=T option is used to reverse
1137* the order of multiplication, not as a true transpose
1138*
1139 CALL sgemv( 'T', lmj, nrhs, -one, work( j+1 ), ldw, a( lptr ),
1140 $ llda-1, one, work( j ), ldw )
1141*
1142* Divide by diagonal element
1143*
1144 CALL sscal( nrhs, one / a( lptr-llda+1 ), work( j ), ldw )
1145 90 CONTINUE
1146*
1147*
1148*
1149 CALL slamov( 'G', odd_size, nrhs, work( 1 ), ldw, b( 1 ), lldb )
1150*
1151* Free BLACS space used to hold standard-form grid.
1152*
1153 ictxt = ictxt_save
1154 IF( ictxt.NE.ictxt_new ) THEN
1155 CALL blacs_gridexit( ictxt_new )
1156 END IF
1157*
1158 100 CONTINUE
1159*
1160* Restore saved input parameters
1161*
1162 np = np_save
1163*
1164* Output worksize
1165*
1166 work( 1 ) = work_size_min
1167*
1168 RETURN
1169*
1170* End of PSGBTRS
1171*
subroutine desc_convert(desc_in, desc_out, info)
Definition desc_convert.f:2
logical function lsame(ca, cb)
LSAME
Definition lsame.f:53
subroutine sgetrs(trans, n, nrhs, a, lda, ipiv, b, ldb, info)
SGETRS
Definition sgetrs.f:121
subroutine slaswp(n, a, lda, k1, k2, ipiv, incx)
SLASWP performs a series of row interchanges on a general rectangular matrix.
Definition slaswp.f:115
subroutine sscal(n, sa, sx, incx)
SSCAL
Definition sscal.f:79
subroutine scopy(n, sx, incx, sy, incy)
SCOPY
Definition scopy.f:82
subroutine sswap(n, sx, incx, sy, incy)
SSWAP
Definition sswap.f:82
subroutine sgemv(trans, m, n, alpha, a, lda, x, incx, beta, y, incy)
SGEMV
Definition sgemv.f:156
subroutine sger(m, n, alpha, x, incx, y, incy, a, lda)
SGER
Definition sger.f:130
subroutine strsm(side, uplo, transa, diag, m, n, alpha, a, lda, b, ldb)
STRSM
Definition strsm.f:181
subroutine sgemm(transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc)
SGEMM
Definition sgemm.f:187
#define min(a, b)
Definition macros.h:20
#define max(a, b)
Definition macros.h:21
subroutine pxerbla(contxt, srname, info)
Definition mpi.f:1600
subroutine blacs_gridexit(cntxt)
Definition mpi.f:762
subroutine blacs_gridinfo(cntxt, nprow, npcol, myrow, mycol)
Definition mpi.f:754
integer function numroc(n, nb, iproc, isrcproc, nprocs)
Definition mpi.f:786
subroutine globchk(ictxt, n, x, ldx, iwork, info)
Definition pchkxmat.f:403
subroutine psgbtrs(trans, n, bwl, bwu, nrhs, a, ja, desca, ipiv, b, ib, descb, af, laf, work, lwork, info)
Definition psgbtrs.f:3
void reshape(Int *context_in, Int *major_in, Int *context_out, Int *major_out, Int *first_proc, Int *nprow_new, Int *npcol_new)
Definition reshape.c:80