34 SUBROUTINE telesc(N_JOINT,A,AR,V,VR,X,FS,MS,IN,ITASK)
44! - inertia / acc / force computations
for the secondary nodes
55#include "implicit_f.inc"
59 INTEGER,
INTENT(in) :: N_JOINT,ITASK
62 . a(3,*), ar(3,*), v(3,*), vr(3,*), x(3,*), fs(*), ms(*),
74 INTEGER NSN, NA, NB, , N
82 INTEGER :: NUMBER_NODE,NUMBER_NODE_WEIGHT
85 REAL(kind=8), dimension(:),
ALLOCATABLE :: buf_s,buf_r
87 number_node =
cyl_join(n_joint)%NUMBER_NODE
88 number_node_weight =
cyl_join(n_joint)%NUMBER_NODE_WEIGHT
100 s=sqrt(n1**2+n2**2+n3**2)
133 ALLOCATE( mass(number_node_weight) )
134 ALLOCATE( x_ms(number_node_weight),y_ms(number_node_weight),z_ms(number_node_weight) )
136 ALLOCATE( iner_vec(number_node_weight) )
137 ALLOCATE(ax_ms(number_node_weight),ay_ms(number_node_weight),az_ms(number_node_weight))
138 ALLOCATE(axx_vec(number_node_weight),ayy_vec(number_node_weight),azz_vec(number_node_weight))
139 ALLOCATE(vx_ms(number_node_weight),vy_ms(number_node_weight),vz_ms(number_node_weight))
140 ALLOCATE(vxx_vec(number_node_weight),vyy_vec(number_node_weight),vzz_vec(number_node_weight))
142 ALLOCATE(
mass_6(6,nthread) )
144 ALLOCATE(
iner_6(6,nthread) )
177 mass_6(1:6,itask+1) = zero
178 x_ms_6(1:6,itask+1) = zero
179 y_ms_6(1:6,itask+1) = zero
180 z_ms_6(1:6,itask+1) = zero
182 iner_6(1:6,itask+1) = zero
188 axx_6(1:6,itask+1) = zero
189 ayy_6(1:6,itask+1) = zero
190 azz_6(1:6,itask+1) = zero
196 vxx_6(1:6,itask+1) = zero
197 vyy_6(1:6,itask+1) = zero
198 vzz_6(1:6,itask+1) = zero
206 IF(number_node_weight>0)
THEN
209 first = 1 + itask * number_node_weight / nthread
210 last = (itask+1) * number_node_weight / nthread
212 n =
cyl_join(n_joint)%NODE_WEIGHT(i)
214 x_ms(i) = x(1,n)*ms(n)
215 y_ms(i) = x(2,n)*ms(n)
216 z_ms(i) = x(3,n)*ms(n)
232 ALLOCATE( buf_s(13*6) )
233 ALLOCATE( buf_r(13*6) )
234 buf_s(1:6) =
mass_6(1:6,1)
235 buf_s(7:12) =
x_ms_6(1:6,1)
236 buf_s(13:18) =
y_ms_6(1:6,1)
237 buf_s(19:24) =
z_ms_6(1:6,1)
240 buf_s(1:6) = buf_s(1:6) +
mass_6(1:6,i)
241 buf_s(7:12) = buf_s(7:12) +
x_ms_6(1:6,i)
242 buf_s(13:18) = buf_s(13:18) +
y_ms_6(1:6,i)
243 buf_s(19:24) = buf_s(19:24) +
z_ms_6(1:6,i)
248 CALL spmd_allreduce(buf_s,buf_r,24,spmd_sum,
cyl_join(n_joint)%COMM_MPI%COMM)
250 buf_r(1:24) = buf_s(1:24)
254 masse_global = masse_global + buf_r(i)
255 xcdg_global = xcdg_global + buf_r(6+i)
256 ycdg_global = ycdg_global + buf_r(12+i)
257 zcdg_global = zcdg_global + buf_r(18+i)
279 ! --------------------
280 ! partial reduction of iner / ax / ay / az / axx / ayy / azz / vxx / vyy / vzz
282 IF(number_node_weight>0)
THEN
285 first = 1 + itask * number_node_weight / nthread
286 last = (itask+1) * number_node_weight / nthread
288 n =
cyl_join(n_joint)%NODE_WEIGHT(i)
299 iner_vec(i) = rr**2*ms(n)+in(n)
301 ax_ms(i) = a(1,n)*ms(n)
302 ay_ms(i) = a(2,n)*ms(n)
303 az_ms(i) = a(3,n)*ms(n)
305 axx_vec(i) = ar(1,n)*in(n)+yy*a(3,n)*ms(n)-zz*a(2,n)*ms(n)
306 ayy_vec(i) = ar(2,n)*in(n)+zz*a(1,n)*ms(n)-xx*a(3,n)*ms(n)
307 azz_vec(i) = ar(3,n)*in(n)+xx*a(2,n)*ms(n)-yy*a(1,n)*ms(n)
309 vx_ms(i) = v(1,n)*ms(n)
310 vy_ms(i) = v(2,n)*ms(n)
311 vz_ms(i) = v(3,n)*ms(n)
313 vxx_vec(i) = vr(1,n)*in(n)+yy*v(3,n)*ms(n)-zz*v(2,n)*ms(n)
314 vyy_vec(i) = vr(2,n)*in(n)+zz*v(1,n)*ms(n)-xx*v(3,n)*ms(n)
315 vzz_vec(i) = vr(3,n)*in(n)+xx*v(2,n)*ms(n)-yy*v(1,n)*ms(n)
339 buf_s(1:6) =
iner_6(1:6,itask+1)
340 buf_s(7:12) =
ax_ms_6(1:6,itask+1)
341 buf_s(13:18) =
ay_ms_6(1:6,itask+1)
342 buf_s(19:24) =
az_ms_6(1:6,itask+1)
344 buf_s(25:30) =
axx_6(1:6,itask+1)
345 buf_s(31:36) =
ayy_6(1:6,itask
346 buf_s(37:42) =
azz_6(1:6,itask+1)
348 buf_s(43:48) =
vx_ms_6(1:6,itask+1)
349 buf_s(49:54) =
vy_ms_6(1:6,itask+1)
350 buf_s(55:60) =
vz_ms_6(1:6,itask+1)
352 buf_s(61:66) =
vxx_6(1:6,itask
353 buf_s(67:72) =
vyy_6(1:6,itask+1)
354 buf_s(73:78) =
vzz_6(1:6,itask+1)
360 buf_s(1:6) = buf_s(1:6) +
iner_6(1:6,i)
361 buf_s(7:12) = buf_s(7:12) +
ax_ms_6(1:6,i)
363 buf_s(19:24) = buf_s(19:24) +
az_ms_6(1:6,i)
365 buf_s(25:30) = buf_s(25:30) +
axx_6(1:6,i)
366 buf_s(31:36) = buf_s(31:36) +
ayy_6(1:6,i)
367 buf_s(37:42) = buf_s(37:42) +
azz_6(1:6,i)
369 buf_s(43:48) = buf_s(43:48) +
vx_ms_6(1:6,i)
370 buf_s(49:54) = buf_s(49:54) +
vy_ms_6(1:6,i)
371 buf_s(55:60) = buf_s(55:60) +
vz_ms_6(1:6,i)
373 buf_s(61:66) = buf_s(61:66) +
vxx_6(1:6,i)
374 buf_s(67:72) = buf_s(67:72) +
vyy_6(1:6,i)
375 buf_s(73:78) = buf_s(73:78) +
vzz_6(1:6,i)
380 CALL spmd_allreduce(buf_s,buf_r,13*6,spmd_sum,
cyl_join(n_joint)%COMM_MPI%COMM)
382 buf_r(1:78) = buf_s(1:78)
386 iner_global = iner_global + buf_r(i)
387 ax_global = ax_global + buf_r(6+i)
388 ay_global = ay_global + buf_r(12+i)
389 az_global = az_global + buf_r(18+i)
391 axx_global = axx_global + buf_r(24+i)
392 ayy_global = ayy_global + buf_r(30+i)
393 azz_global = azz_global + buf_r(36+i)
395 vx_global = vx_global + buf_r(42+i)
396 vy_global = vy_global + buf_r(48+i)
397 vz_global = vz_global + buf_r(54+i)
399 vxx_global = vxx_global + buf_r(60+i)
400 vyy_global = vyy_global + buf_r(66+i)
401 vzz_global = vzz_global + buf_r(72+i)
404 ! --------------------
430 a0=n1*axx+n2*ayy+n3*azz
439 a0=n1*vxx+n2*vyy+n3*vzz
445 IF(
cyl_join(n_joint)%PROC_MAIN==ispmd+1)
THEN
453#include "lockoff.inc"
482 first = 1 + itask * number_node / nthread
483 last = (itask+1) * number_node / nthread
496 a0=n1*a(1,n)+n2*a(2,n)+n3*a(3,n)
497 a(1,n)=ax-yy*azz+zz*ayy+n1*a0
498 a(2,n)=ay-zz*axx+xx*azz+n2*a0
499 a(3,n)=az-xx*ayy+yy*axx+n3*a0
501 a0=n1*ar(1,n)+n2*ar(2,n)+n3*ar(3,n)
506 a0=n1*v(1,n)+n2*v(2,n)+n3*v(3,n)
507 v(1,n)=vx-yy*vzz+zz*vyy+n1*a0
508 v(2,n)=vy-zz*vxx+xx*vzz+n2*a0
509 v(3,n)=vz-xx*vyy+yy*vxx+n3*a0
511 a0=n1*vr(1,n)+n2*vr(2,n)+n3*vr(3,n)
524 DEALLOCATE( x_ms,y_ms,z_ms )
526 DEALLOCATE( iner_vec )
527 DEALLOCATE(ax_ms,ay_ms,az_ms)
528 DEALLOCATE(axx_vec,ayy_vec,azz_vec)
529 DEALLOCATE(vx_ms,vy_ms,vz_ms)
530 DEALLOCATE(vxx_vec,vyy_vec,vzz_vec)
540 DEALLOCATE( buf_s,buf_r)