MPI-AMRVAC  2.0
The MPI - Adaptive Mesh Refinement - Versatile Advection Code
convert.t
Go to the documentation of this file.
1 !=============================================================================
2 subroutine generate_plotfile
7 !-----------------------------------------------------------------------------
8 
9 if(mype==0.and.level_io>0) write(unitterm,*)'reset tree to fixed level=',level_io
10 if(level_io>0 .or. level_io_min.ne.1 .or. level_io_max.ne.nlevelshi) then
12 else if(.not. phys_req_diagonal) then
13  call getbc(global_time,0.d0,ps,0,nwflux+nwaux)
14 end if
15 
16 select case(convert_type)
17  case('tecplot','tecplotCC','tecline')
18  call tecplot(unitconvert)
19  case('tecplotmpi','tecplotCCmpi','teclinempi')
21  case('vtu','vtuCC')
23  case('vtumpi','vtuCCmpi')
25  case('vtuB','vtuBCC','vtuBmpi','vtuBCCmpi')
27  case('pvtumpi','pvtuCCmpi')
29  case('pvtuBmpi','pvtuBCCmpi')
31  case('vtimpi','vtiCCmpi')
33  case('onegrid','onegridmpi')
34  call onegrid(unitconvert)
35  case('oneblock','oneblockB')
36  call oneblock(unitconvert)
37  case('user','usermpi')
38  if (.not. associated(usr_special_convert)) then
39  call mpistop("usr_special_convert not defined")
40  else
42  end if
43  case default
44  call mpistop("Error in generate_plotfile: Unknown convert_type")
45 end select
46 
47 end subroutine generate_plotfile
48 !=============================================================================
49 subroutine oneblock(qunit)
50 ! this is for turning an AMR run into a single block
51 ! the data will be all on selected level level_io
52 
53 ! this version should work for any dimension
54 ! only writes w_write selected 1:nw variables, also nwauxio
55 ! may use saveprim to switch to primitives
56 ! this version can not work on multiple CPUs
57 ! does not renormalize variables
58 
59 ! header info differs from onegrid below
60 
61 ! ASCII or binary output
62 
66 use mod_physics
68 integer, intent(in) :: qunit
69 
70 integer :: Morton_no,igrid,ix^D,ig^D,level
71 integer, pointer :: ig_to_igrid(:^d&,:)
72 logical :: fileopen,writeblk(max_blocks)
73 character(len=80) :: filename
74 integer :: filenr,ncells,ncells^D,ncellg,ncellx^D,jg^D,jig^D
75 
76 character(len=name_len) :: wnamei(1:nw+nwauxio),xandwnamei(1:ndim+nw+nwauxio)
77 character(len=1024) :: outfilehead
78 
79 double precision :: wval1,xval1
80 double precision, dimension({^D&1:1},1:nw+nwauxio) :: wval
81 double precision, dimension({^D&1:1},1:ndim) :: xval
82 double precision:: normconv(0:nw+nwauxio)
83 
84 integer :: iw,iiw,writenw,iwrite(1:nw+nwauxio),iigrid,idim
85 logical :: patchw(ixg^t)
86 !-----------------------------------------------------------------------------
87 
88 if(level_io<1)then
89  call mpistop('please specify level_io>0 for usage with oneblock')
90 end if
91 
92 if(npe>1)then
93  if(mype==0) print *,'ONEBLOCK as yet to be parallelized'
94  call mpistop('npe>1, oneblock')
95 end if
96 
97 ! only variables selected by w_write will be written out
98 normconv(0:nw+nwauxio)=one
99 normconv(0) = length_convert_factor
100 normconv(1:nw) = w_convert_factor
101 writenw=count(w_write(1:nw))+nwauxio
102 iiw=0
103 do iw =1,nw
104  if (.not.w_write(iw))cycle
105  iiw=iiw+1
106  iwrite(iiw)=iw
107 end do
108 if(nwauxio>0)then
109  do iw =nw+1,nw+nwauxio
110  iiw=iiw+1
111  iwrite(iiw)=iw
112  end do
113 endif
114 
115 allocate(ig_to_igrid(ng^d(level_io),0:npe-1))
116 ig_to_igrid=-1
117 writeblk=.false.
118 do morton_no=morton_start(mype),morton_stop(mype)
119  igrid=sfc_to_igrid(morton_no)
120  level=node(plevel_,igrid)
121  ig^d=igrid_to_node(igrid,mype)%node%ig^d;
122  ig_to_igrid(ig^d,mype)=igrid
123  if(({rnode(rpxmin^d_,igrid)>=xprobmin^d+(xprobmax^d-xprobmin^d)&
124  *writespshift(^d,1)|.and.}).and.({rnode(rpxmax^d_,igrid)&
125  <=xprobmax^d-(xprobmax^d-xprobmin^d)*writespshift(^d,2)|.and.})) then
126  writeblk(igrid)=.true.
127  end if
128 end do
129 
130 call getheadernames(wnamei,xandwnamei,outfilehead)
131 ncells=0
132 ncells^d=0;
133 ncellg=(^d&(ixmhi^d-ixmlo^d+1)*)
134 ncellx^d=ixmhi^d-ixmlo^d+1\
135 {do ig^d=1,ng^d(level_io)\}
136  igrid=ig_to_igrid(ig^d,mype)
137  if(writeblk(igrid)) go to 20
138 {end do\}
139 20 continue
140 jg^d=ig^d;
141 {
142 jig^dd=jg^dd;
143 do ig^d=1,ng^d(level_io)
144  jig^d=ig^d
145  igrid=ig_to_igrid(jig^dd,mype)
146  if(writeblk(igrid)) ncells^d=ncells^d+ncellx^d
147 end do
148 \}
149 
150 do iigrid=1,igridstail; igrid=igrids(iigrid)
151  if(.not.writeblk(igrid)) cycle
152  ncells=ncells+ncellg
153  ps1(igrid)%w(ixg^t,1:nw)=ps(igrid)%w(ixg^t,1:nw)
154 
155  if (nwauxio > 0) then
156  if (.not. associated(usr_aux_output)) then
157  call mpistop("usr_aux_output not defined")
158  else
159  call usr_aux_output(ixg^ll,ixm^ll^ladd1, &
160  ps1(igrid)%w,ps(igrid)%x,normconv)
161  end if
162  end if
163 end do
164 
165 if (saveprim) then
166  do iigrid=1,igridstail; igrid=igrids(iigrid)
167  if (.not.writeblk(igrid)) cycle
168  call phys_to_primitive(ixg^ll,ixg^ll^lsub1,ps1(igrid)%w,ps(igrid)%x)
169  if (allocated(ps(igrid)%B0)) then
170  ! add background magnetic field B0 to B
171  ps1(igrid)%w(ixg^t,iw_mag(:))=ps1(igrid)%w(ixg^t,iw_mag(:))+ps(igrid)%B0(ixg^t,:,0)
172  end if
173  end do
174 else
175  do iigrid=1,igridstail; igrid=igrids(iigrid)
176  if (.not.writeblk(igrid)) cycle
177  if (allocated(ps(igrid)%B0)) then
178  ! add background magnetic field B0 to B
179  if(phys_energy) &
180  ps1(igrid)%w(ixg^t,iw_e)=ps1(igrid)%w(ixg^t,iw_e)+0.5d0*sum(ps(igrid)%B0(ixg^t,:,0)**2,dim=ndim+1) &
181  + sum(ps1(igrid)%w(ixg^t,iw_mag(:))*ps(igrid)%B0(ixg^t,:,0),dim=ndim+1)
182  ps1(igrid)%w(ixg^t,iw_mag(:))=ps1(igrid)%w(ixg^t,iw_mag(:))+ps(igrid)%B0(ixg^t,:,0)
183  end if
184  end do
185 end if
186 
187 master_cpu_open : if (mype == 0) then
188  inquire(qunit,opened=fileopen)
189  if (.not.fileopen) then
190  ! generate filename
191  filenr=snapshotini
192  if (autoconvert) filenr=snapshotnext
193  write(filename,'(a,i4.4,a)') trim(base_filename),filenr,".blk"
194  select case(convert_type)
195  case("oneblock")
196  open(qunit,file=filename,status='unknown')
197  write(qunit,*) trim(outfilehead)
198  write(qunit,*) ncells,ncells^d
199  write(qunit,*) global_time*time_convert_factor
200  case("oneblockB")
201  open(qunit,file=filename,form='unformatted',status='unknown')
202  write(qunit) outfilehead
203  write(qunit) ncells,ncells^d
204  write(qunit) global_time*time_convert_factor
205  end select
206  end if
207 end if master_cpu_open
208 
209 {^ifthreed
210 do ig3=1,ng3(level_io)
211  do ix3=ixmlo3,ixmhi3}
212 
213  {^nooned
214  do ig2=1,ng2(level_io)
215  do ix2=ixmlo2,ixmhi2}
216 
217  do ig1=1,ng1(level_io)
218  igrid=ig_to_igrid(ig^d,mype)
219  if(.not.writeblk(igrid)) cycle
220  do ix1=ixmlo1,ixmhi1
221  master_write : if(mype==0) then
222  select case(convert_type)
223  case("oneblock")
224  write(qunit,fmt="(100(e14.6))") &
225  ps(igrid)%x(ix^d,1:ndim)*normconv(0),&
226  (ps1(igrid)%w(ix^d,iwrite(iw))*normconv(iwrite(iw)),iw=1,writenw)
227  case("oneblockB")
228  write(qunit) real(ps(igrid)%x(ix^D,1:ndim)*normconv(0)),&
229  (real(ps1(igrid)%w(ix^D,iwrite(iw))*normconv(iwrite(iw))),iw=1,writenw)
230  end select
231  end if master_write
232  end do
233  end do
234  {^nooned
235  end do
236  end do}
237  {^ifthreed
238  end do
239 end do}
240 
241 close(qunit)
242 
243 end subroutine oneblock
244 !=============================================================================
245 subroutine onegrid(qunit)
247 ! this is for turning an AMR run into a single grid
248 ! this version should work for any dimension, can be in parallel
249 ! in 1D, should behave much like oneblock, except for header info
250 
251 ! only writes all 1:nw variables, no nwauxio
252 ! may use saveprim to switch to primitives
253 ! this version can work on multiple CPUs
254 ! does not renormalize variables
255 ! ASCII output
256 
259 use mod_physics
261 integer, intent(in) :: qunit
262 
263 integer :: Morton_no,igrid,ix^D,iw
264 logical :: fileopen
265 character(len=80) :: filename
266 integer :: filenr
267 
268 character(len=name_len) :: wnamei(1:nw+nwauxio),xandwnamei(1:ndim+nw+nwauxio)
269 character(len=1024) :: outfilehead
270 
271 !.. MPI variables ..
272 integer :: igrid_recv,ipe
273 double precision :: w_recv(ixg^t,1:nw),x_recv(ixg^t,1:ndim)
274 integer, allocatable :: intstatus(:,:)
275 
276 !-----------------------------------------------------------------------------
277 
278 if(nwauxio>0)then
279  if(mype==0) print *,'ONEGRID to be used without nwauxio'
280  call mpistop('nwauxio>0, onegrid')
281 end if
282 
283 if(saveprim)then
284  if(mype==0.and.nwaux>0) print *,'warning: ONEGRID used with saveprim, check auxiliaries'
285 end if
286 
287 
288 
289 master_cpu_open : if (mype == 0) then
290  call getheadernames(wnamei,xandwnamei,outfilehead)
291  write(outfilehead,'(a)') "#"//" "//trim(outfilehead)
292  inquire(qunit,opened=fileopen)
293  if (.not.fileopen) then
294  ! generate filename
295  filenr=snapshotini
296  if (autoconvert) filenr=snapshotnext
297  write(filename,'(a,i4.4,a)') trim(base_filename),filenr,".blk"
298  open(qunit,file=filename,status='unknown')
299  end if
300  write(qunit,"(a)")outfilehead
301  write(qunit,"(i7)") ( {^d&(ixmhi^d-ixmlo^d+1)*} )*(morton_stop(npe-1)-morton_start(0)+1)
302 end if master_cpu_open
303 
304 do morton_no=morton_start(mype),morton_stop(mype)
305  igrid=sfc_to_igrid(morton_no)
306  if(saveprim) call phys_to_primitive(ixg^ll,ixm^ll,ps(igrid)%w,ps(igrid)%x)
307  if (mype/=0)then
308  itag=morton_no
309  call mpi_send(igrid,1,mpi_integer, 0,itag,icomm,ierrmpi)
310  call mpi_send(ps(igrid)%x,1,type_block_xcc_io, 0,itag,icomm,ierrmpi)
311  itag=igrid
312  call mpi_send(ps(igrid)%w,1,type_block_io, 0,itag,icomm,ierrmpi)
313  else
314  {do ix^db=ixmlo^db,ixmhi^db\}
315  do iw=1,nw
316  if( dabs(ps(igrid)%w(ix^d,iw)) < 1.0d-32 ) ps(igrid)%w(ix^d,iw) = zero
317  enddo
318  write(qunit,fmt="(100(e14.6))") ps(igrid)%x(ix^d,1:ndim)&
319  ,ps(igrid)%w(ix^d,1:nw)
320  {end do\}
321  end if
322 end do
323 
324 if(mype==0.and.npe>1) allocate(intstatus(mpi_status_size,1))
325 
326 manycpu : if (npe>1) then
327  if (mype==0) then
328  loop_cpu : do ipe =1, npe-1
329  loop_morton : do morton_no=morton_start(ipe),morton_stop(ipe)
330  itag=morton_no
331  call mpi_recv(igrid_recv,1,mpi_integer, ipe,itag,icomm,intstatus(:,1),ierrmpi)
332  call mpi_recv(x_recv,1,type_block_xcc_io, ipe,itag,icomm,intstatus(:,1),ierrmpi)
333  itag=igrid_recv
334  call mpi_recv(w_recv,1,type_block_io, ipe,itag,icomm,intstatus(:,1),ierrmpi)
335  {do ix^db=ixmlo^db,ixmhi^db\}
336  do iw=1,nw
337  if( dabs(ps(igrid)%w(ix^d,iw)) < smalldouble ) ps(igrid)%w(ix^d,iw) = zero
338  enddo
339  write(qunit,fmt="(100(e14.6))") x_recv(ix^d,1:ndim)&
340  ,w_recv(ix^d,1:nw)
341  {end do\}
342  end do loop_morton
343  end do loop_cpu
344  end if
345 end if manycpu
346 
347 if (npe>1) then
348  call mpi_barrier(icomm,ierrmpi)
349  if(mype==0)deallocate(intstatus)
350 endif
351 
352 if(mype==0) close(qunit)
353 end subroutine onegrid
354 !============================================================================
355 subroutine tecplot(qunit)
357 ! output for tecplot (ASCII format)
358 ! not parallel, uses calc_grid to compute nwauxio variables
359 ! allows renormalizing using convert factors
360 
363 
364 integer, intent(in) :: qunit
365 
366 integer:: igrid,iigrid,level,igonlevel,iw,idim,ix^D
367 integer:: NumGridsOnLevel(1:nlevelshi)
368 integer :: nx^D,nxC^D,nodesonlevel,elemsonlevel,ixC^L,ixCC^L
369 
370 integer :: nodes, elems
371 
372 
373 double precision :: x_TEC(ndim), w_TEC(nw+nwauxio)
374 
375 double precision, dimension(ixMlo^D-1:ixMhi^D,ndim) :: xC_TMP
376 double precision, dimension(ixMlo^D:ixMhi^D,ndim) :: xCC_TMP
377 double precision, dimension(ixMlo^D-1:ixMhi^D,ndim) :: xC
378 double precision, dimension(ixMlo^D:ixMhi^D,ndim) :: xCC
379 
380 double precision, dimension(ixMlo^D-1:ixMhi^D,nw+nwauxio) :: wC_TMP
381 double precision, dimension(ixMlo^D:ixMhi^D,nw+nwauxio) :: wCC_TMP
382 double precision, dimension(0:nw+nwauxio) :: normconv
383 logical :: fileopen,first
384 character(len=80) :: filename
385 integer :: filenr
386 
387 !!! possible length conflict
388 character(len=1024) :: tecplothead
389 
390 character(len=name_len) :: wnamei(1:nw+nwauxio),xandwnamei(1:ndim+nw+nwauxio)
391 character(len=1024) :: outfilehead
392 !-----------------------------------------------------------------------------
393 
394 if(npe>1)then
395  if(mype==0) print *,'tecplot not parallel, use tecplotmpi'
396  call mpistop('npe>1, tecplot')
397 end if
398 
399 if(nw/=count(w_write(1:nw)))then
400  if(mype==0) print *,'tecplot does not use w_write=F'
401  call mpistop('w_write, tecplot')
402 end if
403 
404 if(nocartesian)then
405  if(mype==0) print *,'tecplot with nocartesian and typeaxial=',typeaxial
406 endif
407 
408 inquire(qunit,opened=fileopen)
409 if (.not.fileopen) then
410  ! generate filename
411  filenr=snapshotini
412  if (autoconvert) filenr=snapshotnext
413  write(filename,'(a,i4.4,a)') trim(base_filename),filenr,".plt"
414  open(qunit,file=filename,status='unknown')
415 end if
416 
417 call getheadernames(wnamei,xandwnamei,outfilehead)
418 
419 write(tecplothead,'(a)') "VARIABLES = "//trim(outfilehead)
420 
421 write(qunit,'(a)') tecplothead(1:len_trim(tecplothead))
422 
423 numgridsonlevel(1:nlevelshi)=0
424 do level=levmin,levmax
425  numgridsonlevel(level)=0
426  do iigrid=1,igridstail; igrid=igrids(iigrid);
427  if (node(plevel_,igrid)/=level) cycle
428  numgridsonlevel(level)=numgridsonlevel(level)+1
429  end do
430 end do
431 
432 nx^d=ixmhi^d-ixmlo^d+1;
433 nxc^d=nx^d+1;
434 
435 {^ifoned
436 if(convert_type=='tecline') then
437  nodes=0
438  elems=0
439  do level=levmin,levmax
440  nodes=nodes + numgridsonlevel(level)*{nxc^d*}
441  elems=elems + numgridsonlevel(level)*{nx^d*}
442  enddo
443 
444  write(qunit,"(a,i7,a,1pe12.5,a)") &
445  'ZONE T="all levels", I=',elems, &
446  ', SOLUTIONTIME=',global_time*time_convert_factor,', F=POINT'
447 
448  igonlevel=0
449  do iigrid=1,igridstail; igrid=igrids(iigrid);
450  call calc_x(igrid,xc,xcc)
451  call calc_grid(qunit,igrid,xc,xcc,xc_tmp,xcc_tmp,wc_tmp,wcc_tmp,normconv,ixc^l,ixcc^l,.true.)
452  {do ix^db=ixccmin^db,ixccmax^db\}
453  x_tec(1:ndim)=xcc_tmp(ix^d,1:ndim)*normconv(0)
454  w_tec(1:nw+nwauxio)=wcc_tmp(ix^d,1:nw+nwauxio)*normconv(1:nw+nwauxio)
455  !write(qunit,fmt="(100(e14.6))") x_TEC, w_TEC
456  write(qunit,fmt="(100(e24.16))") x_tec, w_tec
457  {end do\}
458  enddo
459  close(qunit)
460 else
461 }
462 do level=levmin,levmax
463  nodesonlevel=numgridsonlevel(level)*{nxc^d*}
464  elemsonlevel=numgridsonlevel(level)*{nx^d*}
465  ! for all tecplot variants coded up here, we let the TECPLOT ZONES coincide
466  ! with the AMR grid LEVEL. Other options would be
467  ! let each grid define a zone: inefficient for TECPLOT internal workings
468  ! hence not implemented
469  ! let entire octree define 1 zone: no difference in interpolation
470  ! properties across TECPLOT zones detected as yet, hence not done
471  select case(convert_type)
472  case('tecplot')
473  ! in this option, we store the corner coordinates, as well as the corner
474  ! values of all variables (obtained by averaging). This allows POINT packaging,
475  ! and thus we can save full grid info by using one call to calc_grid
476  write(qunit,"(a,i7,a,a,i7,a,i7,a,f25.16,a,a)") &
477  'ZONE T="',level,'"',', N=',nodesonlevel,', E=',elemsonlevel, &
478  ', SOLUTIONTIME=',global_time*time_convert_factor,', DATAPACKING=POINT, ZONETYPE=', &
479  {^ifoned 'FELINESEG'}{^iftwod 'FEQUADRILATERAL'}{^ifthreed 'FEBRICK'}
480  do iigrid=1,igridstail; igrid=igrids(iigrid);
481  if (node(plevel_,igrid)/=level) cycle
482  call calc_x(igrid,xc,xcc)
483  call calc_grid(qunit,igrid,xc,xcc,xc_tmp,xcc_tmp,wc_tmp,wcc_tmp,normconv,&
484  ixc^l,ixcc^l,.true.)
485  {do ix^db=ixcmin^db,ixcmax^db\}
486  x_tec(1:ndim)=xc_tmp(ix^d,1:ndim)*normconv(0)
487  w_tec(1:nw+nwauxio)=wc_tmp(ix^d,1:nw+nwauxio)*normconv(1:nw+nwauxio)
488  write(qunit,fmt="(100(e14.6))") x_tec, w_tec
489  {end do\}
490  enddo
491  case('tecplotCC')
492  ! in this option, we store the corner coordinates, and the cell center
493  ! values of all variables. Due to this mix of corner/cell center, we must
494  ! use BLOCK packaging, and thus we have enormous overhead by using
495  ! calc_grid repeatedly to merely fill values of cell corner coordinates
496  ! and cell center values per dimension, per variable
497  if(ndim+nw+nwauxio>99) call mpistop("adjust format specification in writeout")
498  if(nw+nwauxio==1)then
499  ! to make tecplot happy: avoid [ndim+1-ndim+1] in varlocation varset
500  ! and just set [ndim+1]
501  write(qunit,"(a,i7,a,a,i7,a,i7,a,f25.16,a,i1,a,a)") &
502  'ZONE T="',level,'"',', N=',nodesonlevel,', E=',elemsonlevel, &
503  ', SOLUTIONTIME=',global_time*time_convert_factor,', DATAPACKING=BLOCK, VARLOCATION=([', &
504  ndim+1,']=CELLCENTERED), ZONETYPE=', &
505  {^ifoned 'FELINESEG'}{^iftwod 'FEQUADRILATERAL'}{^ifthreed 'FEBRICK'}
506  else
507  if(ndim+nw+nwauxio<10) then
508  ! difference only in length of integer format specification for ndim+nw+nwauxio
509  write(qunit,"(a,i7,a,a,i7,a,i7,a,f25.16,a,i1,a,i1,a,a)") &
510  'ZONE T="',level,'"',', N=',nodesonlevel,', E=',elemsonlevel, &
511  ', SOLUTIONTIME=',global_time*time_convert_factor,', DATAPACKING=BLOCK, VARLOCATION=([', &
512  ndim+1,'-',ndim+nw+nwauxio,']=CELLCENTERED), ZONETYPE=', &
513  {^ifoned 'FELINESEG'}{^iftwod 'FEQUADRILATERAL'}{^ifthreed 'FEBRICK'}
514  else
515  write(qunit,"(a,i7,a,a,i7,a,i7,a,f25.16,a,i1,a,i2,a,a)") &
516  'ZONE T="',level,'"',', N=',nodesonlevel,', E=',elemsonlevel, &
517  ', SOLUTIONTIME=',global_time*time_convert_factor,', DATAPACKING=BLOCK, VARLOCATION=([', &
518  ndim+1,'-',ndim+nw+nwauxio,']=CELLCENTERED), ZONETYPE=', &
519  {^ifoned 'FELINESEG'}{^iftwod 'FEQUADRILATERAL'}{^ifthreed 'FEBRICK'}
520  endif
521  endif
522  do idim=1,ndim
523  first=(idim==1)
524  do iigrid=1,igridstail; igrid=igrids(iigrid);
525  if (node(plevel_,igrid)/=level) cycle
526  call calc_x(igrid,xc,xcc)
527  call calc_grid(qunit,igrid,xc,xcc,xc_tmp,xcc_tmp,wc_tmp,wcc_tmp,normconv,&
528  ixc^l,ixcc^l,first)
529  write(qunit,fmt="(100(e14.6))") xc_tmp(ixc^s,idim)*normconv(0)
530  enddo
531  enddo
532  do iw=1,nw+nwauxio
533  do iigrid=1,igridstail; igrid=igrids(iigrid);
534  if (node(plevel_,igrid)/=level) cycle
535  call calc_x(igrid,xc,xcc)
536  call calc_grid(qunit,igrid,xc,xcc,xc_tmp,xcc_tmp,wc_tmp,wcc_tmp,normconv,&
537  ixc^l,ixcc^l,.true.)
538  write(qunit,fmt="(100(e14.6))") wcc_tmp(ixcc^s,iw)*normconv(iw)
539  enddo
540  enddo
541  case default
542  call mpistop('no such tecplot type')
543  end select
544  igonlevel=0
545  do iigrid=1,igridstail; igrid=igrids(iigrid);
546  if (node(plevel_,igrid)/=level) cycle
547  igonlevel=igonlevel+1
548  call save_conntec(qunit,igrid,igonlevel)
549  end do
550 end do
551 {^ifoned endif}
552 
553 close(qunit)
554 
555 end subroutine tecplot
556 !=============================================================================
557 subroutine save_conntec(qunit,igrid,igonlevel)
559 ! this saves the basic line, quad and brick connectivity,
560 ! as used by TECPLOT file outputs for unstructured grid
561 
563 
564 integer, intent(in) :: qunit, igrid, igonlevel
565 
566 integer :: nx^D, nxC^D, ix^D
567 {^ifoned integer, external:: nodenumbertec1D \}
568 {^iftwod integer, external:: nodenumbertec2D \}
569 {^ifthreed integer, external:: nodenumbertec3D \}
570 !-----------------------------------------------------------------------------
571 nx^d=ixmhi^d-ixmlo^d+1;
572 nxc^d=nx^d+1;
573 
574 ! connectivity list
575 {do ix^db=1,nx^db\}
576  {^ifthreed
577  ! basic brick connectivity
578  write(qunit,'(8(i7,1x))') &
579  nodenumbertec3d(ix1, ix2-1,ix3-1,nxc1,nxc2,nxc3,igonlevel,igrid),&
580  nodenumbertec3d(ix1+1,ix2-1,ix3-1,nxc1,nxc2,nxc3,igonlevel,igrid),&
581  nodenumbertec3d(ix1+1,ix2 ,ix3-1,nxc1,nxc2,nxc3,igonlevel,igrid),&
582  nodenumbertec3d(ix1 ,ix2 ,ix3-1,nxc1,nxc2,nxc3,igonlevel,igrid),&
583  nodenumbertec3d(ix1 ,ix2-1,ix3 ,nxc1,nxc2,nxc3,igonlevel,igrid),&
584  nodenumbertec3d(ix1+1,ix2-1,ix3 ,nxc1,nxc2,nxc3,igonlevel,igrid),&
585  nodenumbertec3d(ix1+1,ix2 ,ix3 ,nxc1,nxc2,nxc3,igonlevel,igrid),&
586  nodenumbertec3d(ix1 ,ix2 ,ix3 ,nxc1,nxc2,nxc3,igonlevel,igrid)
587  }
588  {^iftwod
589  ! basic quadrilateral connectivity
590  write(qunit,'(4(i7,1x))') &
591  nodenumbertec2d(ix1, ix2-1,nxc1,nxc2,igonlevel,igrid),&
592  nodenumbertec2d(ix1+1,ix2-1,nxc1,nxc2,igonlevel,igrid),&
593  nodenumbertec2d(ix1+1,ix2 ,nxc1,nxc2,igonlevel,igrid),&
594  nodenumbertec2d(ix1 ,ix2 ,nxc1,nxc2,igonlevel,igrid)
595  }
596  {^ifoned
597  ! basic line connectivity
598  write(qunit,'(2(i7,1x))') nodenumbertec1d(ix1,nxc1,igonlevel,igrid),&
599  nodenumbertec1d(ix1+1,nxc1,igonlevel,igrid)
600  }
601 {end do\}
602 
603 end subroutine save_conntec
604 !=============================================================================
605 integer function nodenumbertec1d(i1,nx1,ig,igrid)
607 integer, intent(in):: i1,nx1,ig,igrid
608 !-----------------------------------------------------------------------------
609 nodenumbertec1d=i1+(ig-1)*nx1
610 
611 if(nodenumbertec1d>9999999)call mpistop("too large nodenumber")
612 end function nodenumbertec1d
613 !====================================================================================
614 integer function nodenumbertec2d(i1,i2,nx1,nx2,ig,igrid)
616 integer, intent(in):: i1,i2,nx1,nx2,ig,igrid
617 !-----------------------------------------------------------------------------
618 nodenumbertec2d=i1+i2*nx1+(ig-1)*nx1*nx2
619 
620 if(nodenumbertec2d>9999999)call mpistop("too large nodenumber")
621 end function nodenumbertec2d
622 !====================================================================================
623 integer function nodenumbertec3d(i1,i2,i3,nx1,nx2,nx3,ig,igrid)
625 integer, intent(in):: i1,i2,i3,nx1,nx2,nx3,ig,igrid
626 !-----------------------------------------------------------------------------
627 nodenumbertec3d=i1+i2*nx1+i3*nx1*nx2+(ig-1)*nx1*nx2*nx3
628 
629 if(nodenumbertec3d>9999999)call mpistop("too large nodenumber")
630 end function nodenumbertec3d
631 !=============================================================================
632 subroutine unstructuredvtk(qunit)
634 ! output for vtu format to paraview
635 ! not parallel, uses calc_grid to compute nwauxio variables
636 ! allows renormalizing using convert factors
637 
640 
641 integer, intent(in) :: qunit
642 
643 double precision :: x_VTK(1:3)
644 
645 double precision, dimension(ixMlo^D-1:ixMhi^D,ndim) :: xC_TMP
646 double precision, dimension(ixMlo^D:ixMhi^D,ndim) :: xCC_TMP
647 double precision, dimension(ixMlo^D-1:ixMhi^D,ndim) :: xC
648 double precision, dimension(ixMlo^D:ixMhi^D,ndim) :: xCC
649 
650 double precision, dimension(ixMlo^D-1:ixMhi^D,nw+nwauxio) :: wC_TMP
651 double precision, dimension(ixMlo^D:ixMhi^D,nw+nwauxio) :: wCC_TMP
652 double precision, dimension(0:nw+nwauxio) :: normconv
653 integer:: igrid,iigrid,level,igonlevel,icel,ixC^L,ixCC^L,iw
654 integer:: NumGridsOnLevel(1:nlevelshi)
655 integer :: nx^D,nxC^D,nodesonlevel,elemsonlevel,nc,np,VTK_type,ix^D
656 
657 character(len=80):: filename
658 integer :: filenr
659 
660 character(len=name_len) :: wnamei(1:nw+nwauxio),xandwnamei(1:ndim+nw+nwauxio)
661 character(len=1024) :: outfilehead
662 
663 logical :: fileopen
664 !-----------------------------------------------------------------------------
665 
666 if(npe>1)then
667  if(mype==0) print *,'unstructuredvtk not parallel, use vtumpi'
668  call mpistop('npe>1, unstructuredvtk')
669 end if
670 
671 inquire(qunit,opened=fileopen)
672 if(.not.fileopen)then
673  ! generate filename
674  filenr=snapshotini
675  if (autoconvert) filenr=snapshotnext
676  write(filename,'(a,i4.4,a)') trim(base_filename),filenr,".vtu"
677  ! Open the file for the header part
678  open(qunit,file=filename,status='unknown')
679 endif
680 
681 call getheadernames(wnamei,xandwnamei,outfilehead)
682 
683 ! generate xml header
684 write(qunit,'(a)')'<?xml version="1.0"?>'
685 write(qunit,'(a)',advance='no') '<VTKFile type="UnstructuredGrid"'
686 write(qunit,'(a)')' version="0.1" byte_order="LittleEndian">'
687 write(qunit,'(a)')'<UnstructuredGrid>'
688 write(qunit,'(a)')'<FieldData>'
689 write(qunit,'(2a)')'<DataArray type="Float32" Name="TIME" ',&
690  'NumberOfTuples="1" format="ascii">'
691 write(qunit,*) dble(dble(global_time)*time_convert_factor)
692 write(qunit,'(a)')'</DataArray>'
693 write(qunit,'(a)')'</FieldData>'
694 
695 ! number of cells, number of corner points, per grid.
696 nx^d=ixmhi^d-ixmlo^d+1;
697 nxc^d=nx^d+1;
698 nc={nx^d*}
699 np={nxc^d*}
700 
701 ! Note: using the w_write, writelevel, writespshift
702 ! we can clip parts of the grid away, select variables, levels etc.
703 do level=levmin,levmax
704  if (writelevel(level)) then
705  do iigrid=1,igridstail; igrid=igrids(iigrid);
706  if (node(plevel_,igrid)/=level) cycle
707  ! only output a grid when fully within clipped region selected
708  ! by writespshift array
709  if (({rnode(rpxmin^d_,igrid)>=xprobmin^d+(xprobmax^d-xprobmin^d)&
710  *writespshift(^d,1)|.and.}).and.({rnode(rpxmax^d_,igrid)&
711  <=xprobmax^d-(xprobmax^d-xprobmin^d)*writespshift(^d,2)|.and.})) then
712  call calc_x(igrid,xc,xcc)
713  call calc_grid(qunit,igrid,xc,xcc,xc_tmp,xcc_tmp,wc_tmp,wcc_tmp,normconv,&
714  ixc^l,ixcc^l,.true.)
715  select case(convert_type)
716  case('vtu')
717  ! we write out every grid as one VTK PIECE
718  write(qunit,'(a,i7,a,i7,a)') &
719  '<Piece NumberOfPoints="',np,'" NumberOfCells="',nc,'">'
720  write(qunit,'(a)')'<PointData>'
721  do iw=1,nw+nwauxio
722  if(iw<=nw) then
723  if(.not.w_write(iw)) cycle
724  endif
725 
726  write(qunit,'(a,a,a)')&
727  '<DataArray type="Float64" Name="',trim(wnamei(iw)),'" format="ascii">'
728  write(qunit,'(200(1pe14.6))') {(|}wc_tmp(ix^d,iw)*normconv(iw),{ix^d=ixcmin^d,ixcmax^d)}
729  write(qunit,'(a)')'</DataArray>'
730  enddo
731  write(qunit,'(a)')'</PointData>'
732 
733  write(qunit,'(a)')'<Points>'
734  write(qunit,'(a)')'<DataArray type="Float32" NumberOfComponents="3" format="ascii">'
735  ! write cell corner coordinates in a backward dimensional loop, always 3D output
736  {do ix^db=ixcmin^db,ixcmax^db \}
737  x_vtk(1:3)=zero;
738  x_vtk(1:ndim)=xc_tmp(ix^d,1:ndim)*normconv(0);
739  write(qunit,'(3(1pe14.6))') x_vtk
740  {end do \}
741  write(qunit,'(a)')'</DataArray>'
742  write(qunit,'(a)')'</Points>'
743  case('vtuCC')
744  ! we write out every grid as one VTK PIECE
745  write(qunit,'(a,i7,a,i7,a)') &
746  '<Piece NumberOfPoints="',np,'" NumberOfCells="',nc,'">'
747  write(qunit,'(a)')'<CellData>'
748  do iw=1,nw+nwauxio
749  if(iw<=nw) then
750  if(.not.w_write(iw)) cycle
751  endif
752 
753  write(qunit,'(a,a,a)')&
754  '<DataArray type="Float64" Name="',trim(wnamei(iw)),'" format="ascii">'
755  write(qunit,'(200(1pe14.6))') {(|}wcc_tmp(ix^d,iw)*normconv(iw),{ix^d=ixccmin^d,ixccmax^d)}
756  write(qunit,'(a)')'</DataArray>'
757  enddo
758  write(qunit,'(a)')'</CellData>'
759 
760  write(qunit,'(a)')'<Points>'
761  write(qunit,'(a)')'<DataArray type="Float32" NumberOfComponents="3" format="ascii">'
762  ! write cell corner coordinates in a backward dimensional loop, always 3D output
763  {do ix^db=ixcmin^db,ixcmax^db \}
764  x_vtk(1:3)=zero;
765  x_vtk(1:ndim)=xc_tmp(ix^d,1:ndim)*normconv(0);
766  write(qunit,'(3(1pe14.6))') x_vtk
767  {end do \}
768  write(qunit,'(a)')'</DataArray>'
769  write(qunit,'(a)')'</Points>'
770  end select
771 
772 
773  write(qunit,'(a)')'<Cells>'
774 
775  ! connectivity part
776  write(qunit,'(a)')'<DataArray type="Int32" Name="connectivity" format="ascii">'
777  call save_connvtk(qunit,igrid)
778  write(qunit,'(a)')'</DataArray>'
779 
780  ! offsets data array
781  write(qunit,'(a)')'<DataArray type="Int32" Name="offsets" format="ascii">'
782  do icel=1,nc
783  write(qunit,'(i7)') icel*(2**^nd)
784  end do
785  write(qunit,'(a)')'</DataArray>'
786 
787  ! VTK cell type data array
788  write(qunit,'(a)')'<DataArray type="Int32" Name="types" format="ascii">'
789  ! VTK_LINE=3; VTK_PIXEL=8; VTK_VOXEL=11 -> vtk-syntax
790  {^ifoned vtk_type=3 \}
791  {^iftwod vtk_type=8 \}
792  {^ifthreed vtk_type=11 \}
793  do icel=1,nc
794  write(qunit,'(i2)') vtk_type
795  enddo
796  write(qunit,'(a)')'</DataArray>'
797 
798  write(qunit,'(a)')'</Cells>'
799 
800  write(qunit,'(a)')'</Piece>'
801  endif
802  enddo
803  endif
804 enddo
805 
806 write(qunit,'(a)')'</UnstructuredGrid>'
807 write(qunit,'(a)')'</VTKFile>'
808 close(qunit)
809 
810 end subroutine unstructuredvtk
811 !====================================================================================
812 subroutine unstructuredvtkb(qunit)
814 ! output for vtu format to paraview, binary version output
815 ! not parallel, uses calc_grid to compute nwauxio variables
816 ! allows renormalizing using convert factors
820 
821 integer, intent(in) :: qunit
822 
823 double precision :: x_VTK(1:3)
824 
825 double precision, dimension(ixMlo^D-1:ixMhi^D,ndim) :: xC_TMP
826 double precision, dimension(ixMlo^D:ixMhi^D,ndim) :: xCC_TMP
827 double precision, dimension(ixMlo^D-1:ixMhi^D,ndim) :: xC
828 double precision, dimension(ixMlo^D:ixMhi^D,ndim) :: xCC
829 
830 double precision, dimension(ixMlo^D-1:ixMhi^D,nw+nwauxio):: wC_TMP
831 double precision, dimension(ixMlo^D:ixMhi^D,nw+nwauxio) :: wCC_TMP
832 double precision :: normconv(0:nw+nwauxio)
833 
834 integer, allocatable :: intstatus(:,:)
835 integer :: ipe,igrid,level,icel,ixC^L,ixCC^L,Morton_no,Morton_length
836 integer :: nx^D,nxC^D,nc,np,VTK_type,ix^D,filenr
837 integer*8 :: offset
838 
839 integer:: k,iw
840 integer:: length,lengthcc,length_coords,length_conn,length_offsets
841 character:: buf
842 character(len=80):: filename
843 character(len=name_len) :: wnamei(1:nw+nwauxio),xandwnamei(1:ndim+nw+nwauxio)
844 character(len=1024) :: outfilehead
845 
846 logical :: fileopen,cell_corner=.true.
847 logical, allocatable :: Morton_aim(:),Morton_aim_p(:)
848 !-----------------------------------------------------------------------------
849 
850 normconv=one
851 morton_length=morton_stop(npe-1)-morton_start(0)+1
852 allocate(morton_aim(morton_start(0):morton_stop(npe-1)))
853 allocate(morton_aim_p(morton_start(0):morton_stop(npe-1)))
854 morton_aim=.false.
855 morton_aim_p=.false.
856 do morton_no=morton_start(mype),morton_stop(mype)
857  igrid=sfc_to_igrid(morton_no)
858  level=node(plevel_,igrid)
859  ! we can clip parts of the grid away, select variables, levels etc.
860  if(writelevel(level)) then
861  ! only output a grid when fully within clipped region selected
862  ! by writespshift array
863  if(({rnode(rpxmin^d_,igrid)>=xprobmin^d+(xprobmax^d-xprobmin^d)&
864  *writespshift(^d,1)|.and.}).and.({rnode(rpxmax^d_,igrid)&
865  <=xprobmax^d-(xprobmax^d-xprobmin^d)*writespshift(^d,2)|.and.})) then
866  morton_aim_p(morton_no)=.true.
867  end if
868  end if
869 end do
870 call mpi_allreduce(morton_aim_p,morton_aim,morton_length,mpi_logical,mpi_lor,&
871  icomm,ierrmpi)
872 select case(convert_type)
873  case('vtuB','vtuBmpi')
874  cell_corner=.true.
875  case('vtuBCC','vtuBCCmpi')
876  cell_corner=.false.
877 end select
878 if (mype /= 0) then
879  do morton_no=morton_start(mype),morton_stop(mype)
880  if(.not. morton_aim(morton_no)) cycle
881  igrid=sfc_to_igrid(morton_no)
882  call calc_x(igrid,xc,xcc)
883  call calc_grid(qunit,igrid,xc,xcc,xc_tmp,xcc_tmp,wc_tmp,wcc_tmp,normconv,&
884  ixc^l,ixcc^l,.true.)
885  itag=morton_no
886  call mpi_send(xc_tmp,1,type_block_xc_io, 0,itag,icomm,ierrmpi)
887  if(cell_corner) then
888  call mpi_send(wc_tmp,1,type_block_wc_io, 0,itag,icomm,ierrmpi)
889  else
890  call mpi_send(wcc_tmp,1,type_block_wcc_io, 0,itag,icomm,ierrmpi)
891  endif
892  end do
893 
894 else
895  ! mype==0
896  offset=0
897 
898  inquire(qunit,opened=fileopen)
899  if(.not.fileopen)then
900  ! generate filename
901  filenr=snapshotini
902  if (autoconvert) filenr=snapshotnext
903  write(filename,'(a,i4.4,a)') trim(base_filename),filenr,".vtu"
904  ! Open the file for the header part
905  open(qunit,file=filename,status='replace')
906  endif
907 
908  call getheadernames(wnamei,xandwnamei,outfilehead)
909 
910  ! generate xml header
911  write(qunit,'(a)')'<?xml version="1.0"?>'
912  write(qunit,'(a)',advance='no') '<VTKFile type="UnstructuredGrid"'
913  write(qunit,'(a)')' version="0.1" byte_order="LittleEndian">'
914  write(qunit,'(a)')'<UnstructuredGrid>'
915  write(qunit,'(a)')'<FieldData>'
916  write(qunit,'(2a)')'<DataArray type="Float32" Name="TIME" ',&
917  'NumberOfTuples="1" format="ascii">'
918  write(qunit,*) real(global_time*time_convert_factor)
919  write(qunit,'(a)')'</DataArray>'
920  write(qunit,'(a)')'</FieldData>'
921 
922  ! number of cells, number of corner points, per grid.
923  nx^d=ixmhi^d-ixmlo^d+1;
924  nxc^d=nx^d+1;
925  nc={nx^d*}
926  np={nxc^d*}
927 
928  length=np*size_real
929  lengthcc=nc*size_real
930 
931  length_coords=3*length
932  length_conn=2**^nd*size_int*nc
933  length_offsets=nc*size_int
934 
935  ! Note: using the w_write, writelevel, writespshift
936  do morton_no=morton_start(0),morton_stop(0)
937  if(.not. morton_aim(morton_no)) cycle
938  if(cell_corner) then
939  ! we write out every grid as one VTK PIECE
940  write(qunit,'(a,i7,a,i7,a)') &
941  '<Piece NumberOfPoints="',np,'" NumberOfCells="',nc,'">'
942  write(qunit,'(a)')'<PointData>'
943  do iw=1,nw+nwauxio
944  if(iw<=nw) then
945  if(.not.w_write(iw)) cycle
946  endif
947 
948  write(qunit,'(a,a,a,i16,a)')&
949  '<DataArray type="Float32" Name="',trim(wnamei(iw)), &
950  '" format="appended" offset="',offset,'">'
951  write(qunit,'(a)')'</DataArray>'
952  offset=offset+length+size_int
953  enddo
954  write(qunit,'(a)')'</PointData>'
955 
956  write(qunit,'(a)')'<Points>'
957  write(qunit,'(a,i16,a)') &
958  '<DataArray type="Float32" NumberOfComponents="3" format="appended" offset="',offset,'"/>'
959  ! write cell corner coordinates in a backward dimensional loop, always 3D output
960  offset=offset+length_coords+size_int
961  write(qunit,'(a)')'</Points>'
962  else
963  ! we write out every grid as one VTK PIECE
964  write(qunit,'(a,i7,a,i7,a)') &
965  '<Piece NumberOfPoints="',np,'" NumberOfCells="',nc,'">'
966  write(qunit,'(a)')'<CellData>'
967  do iw=1,nw+nwauxio
968  if(iw<=nw) then
969  if(.not.w_write(iw)) cycle
970  endif
971 
972  write(qunit,'(a,a,a,i16,a)')&
973  '<DataArray type="Float32" Name="',trim(wnamei(iw)), &
974  '" format="appended" offset="',offset,'">'
975  write(qunit,'(a)')'</DataArray>'
976  offset=offset+lengthcc+size_int
977  enddo
978  write(qunit,'(a)')'</CellData>'
979 
980  write(qunit,'(a)')'<Points>'
981  write(qunit,'(a,i16,a)') &
982  '<DataArray type="Float32" NumberOfComponents="3" format="appended" offset="',offset,'"/>'
983  ! write cell corner coordinates in a backward dimensional loop, always 3D output
984  offset=offset+length_coords+size_int
985  write(qunit,'(a)')'</Points>'
986  end if
987 
988  write(qunit,'(a)')'<Cells>'
989 
990  ! connectivity part
991  write(qunit,'(a,i16,a)')&
992  '<DataArray type="Int32" Name="connectivity" format="appended" offset="',offset,'"/>'
993  offset=offset+length_conn+size_int
994 
995  ! offsets data array
996  write(qunit,'(a,i16,a)') &
997  '<DataArray type="Int32" Name="offsets" format="appended" offset="',offset,'"/>'
998  offset=offset+length_offsets+size_int
999 
1000  ! VTK cell type data array
1001  write(qunit,'(a,i16,a)') &
1002  '<DataArray type="Int32" Name="types" format="appended" offset="',offset,'"/>'
1003  offset=offset+size_int+nc*size_int
1004 
1005  write(qunit,'(a)')'</Cells>'
1006 
1007  write(qunit,'(a)')'</Piece>'
1008  end do
1009  ! write metadata communicated from other processors
1010  if(npe>1)then
1011  do ipe=1, npe-1
1012  do morton_no=morton_start(ipe),morton_stop(ipe)
1013  if(.not. morton_aim(morton_no)) cycle
1014  if(cell_corner) then
1015  ! we write out every grid as one VTK PIECE
1016  write(qunit,'(a,i7,a,i7,a)') &
1017  '<Piece NumberOfPoints="',np,'" NumberOfCells="',nc,'">'
1018  write(qunit,'(a)')'<PointData>'
1019  do iw=1,nw+nwauxio
1020  if(iw<=nw) then
1021  if(.not.w_write(iw)) cycle
1022  endif
1023 
1024  write(qunit,'(a,a,a,i16,a)')&
1025  '<DataArray type="Float32" Name="',trim(wnamei(iw)), &
1026  '" format="appended" offset="',offset,'">'
1027  write(qunit,'(a)')'</DataArray>'
1028  offset=offset+length+size_int
1029  enddo
1030  write(qunit,'(a)')'</PointData>'
1031 
1032  write(qunit,'(a)')'<Points>'
1033  write(qunit,'(a,i16,a)') &
1034  '<DataArray type="Float32" NumberOfComponents="3" format="appended" offset="',offset,'"/>'
1035  ! write cell corner coordinates in a backward dimensional loop, always 3D output
1036  offset=offset+length_coords+size_int
1037  write(qunit,'(a)')'</Points>'
1038  else
1039  ! we write out every grid as one VTK PIECE
1040  write(qunit,'(a,i7,a,i7,a)') &
1041  '<Piece NumberOfPoints="',np,'" NumberOfCells="',nc,'">'
1042  write(qunit,'(a)')'<CellData>'
1043  do iw=1,nw+nwauxio
1044  if(iw<=nw) then
1045  if(.not.w_write(iw)) cycle
1046  endif
1047 
1048  write(qunit,'(a,a,a,i16,a)')&
1049  '<DataArray type="Float32" Name="',trim(wnamei(iw)), &
1050  '" format="appended" offset="',offset,'">'
1051  write(qunit,'(a)')'</DataArray>'
1052  offset=offset+lengthcc+size_int
1053  enddo
1054  write(qunit,'(a)')'</CellData>'
1055 
1056  write(qunit,'(a)')'<Points>'
1057  write(qunit,'(a,i16,a)') &
1058  '<DataArray type="Float32" NumberOfComponents="3" format="appended" offset="',offset,'"/>'
1059  ! write cell corner coordinates in a backward dimensional loop, always 3D output
1060  offset=offset+length_coords+size_int
1061  write(qunit,'(a)')'</Points>'
1062  end if
1063 
1064  write(qunit,'(a)')'<Cells>'
1065 
1066  ! connectivity part
1067  write(qunit,'(a,i16,a)')&
1068  '<DataArray type="Int32" Name="connectivity" format="appended" offset="',offset,'"/>'
1069  offset=offset+length_conn+size_int
1070 
1071  ! offsets data array
1072  write(qunit,'(a,i16,a)') &
1073  '<DataArray type="Int32" Name="offsets" format="appended" offset="',offset,'"/>'
1074  offset=offset+length_offsets+size_int
1075 
1076  ! VTK cell type data array
1077  write(qunit,'(a,i16,a)') &
1078  '<DataArray type="Int32" Name="types" format="appended" offset="',offset,'"/>'
1079  offset=offset+size_int+nc*size_int
1080 
1081  write(qunit,'(a)')'</Cells>'
1082 
1083  write(qunit,'(a)')'</Piece>'
1084  end do
1085  end do
1086  end if
1087 
1088  write(qunit,'(a)')'</UnstructuredGrid>'
1089  write(qunit,'(a)')'<AppendedData encoding="raw">'
1090  close(qunit)
1091  open(qunit,file=filename,access='stream',form='unformatted',position='append')
1092  buf='_'
1093  write(qunit) trim(buf)
1094 
1095  do morton_no=morton_start(0),morton_stop(0)
1096  if(.not. morton_aim(morton_no)) cycle
1097  igrid=sfc_to_igrid(morton_no)
1098  call calc_x(igrid,xc,xcc)
1099  call calc_grid(qunit,igrid,xc,xcc,xc_tmp,xcc_tmp,wc_tmp,wcc_tmp,normconv,&
1100  ixc^l,ixcc^l,.true.)
1101  do iw=1,nw+nwauxio
1102  if(iw<=nw) then
1103  if(.not.w_write(iw)) cycle
1104  endif
1105  if(cell_corner) then
1106  write(qunit) length
1107  write(qunit) {(|}real(wC_TMP(ix^D,iw)*normconv(iw)),{ix^D=ixcmin^d,ixcmax^d)}
1108  else
1109  write(qunit) lengthcc
1110  write(qunit) {(|}real(wCC_TMP(ix^D,iw)*normconv(iw)),{ix^D=ixccmin^d,ixccmax^d)}
1111  end if
1112  enddo
1113 
1114  write(qunit) length_coords
1115  {do ix^db=ixcmin^db,ixcmax^db \}
1116  x_vtk(1:3)=zero;
1117  x_vtk(1:ndim)=xc_tmp(ix^d,1:ndim)*normconv(0);
1118  do k=1,3
1119  write(qunit) real(x_vtk(k))
1120  end do
1121  {end do \}
1122 
1123  write(qunit) length_conn
1124  {do ix^db=1,nx^db\}
1125  {^ifoned write(qunit)ix1-1,ix1 \}
1126  {^iftwod
1127  write(qunit)(ix2-1)*nxc1+ix1-1, &
1128  (ix2-1)*nxc1+ix1,ix2*nxc1+ix1-1,ix2*nxc1+ix1
1129  \}
1130  {^ifthreed
1131  write(qunit)&
1132  (ix3-1)*nxc2*nxc1+(ix2-1)*nxc1+ix1-1, &
1133  (ix3-1)*nxc2*nxc1+(ix2-1)*nxc1+ix1,&
1134  (ix3-1)*nxc2*nxc1+ ix2*nxc1+ix1-1,&
1135  (ix3-1)*nxc2*nxc1+ ix2*nxc1+ix1,&
1136  ix3*nxc2*nxc1+(ix2-1)*nxc1+ix1-1,&
1137  ix3*nxc2*nxc1+(ix2-1)*nxc1+ix1,&
1138  ix3*nxc2*nxc1+ ix2*nxc1+ix1-1,&
1139  ix3*nxc2*nxc1+ ix2*nxc1+ix1
1140  \}
1141  {end do\}
1142 
1143  write(qunit) length_offsets
1144  do icel=1,nc
1145  write(qunit) icel*(2**^nd)
1146  end do
1147 
1148 
1149  {^ifoned vtk_type=3 \}
1150  {^iftwod vtk_type=8 \}
1151  {^ifthreed vtk_type=11 \}
1152  write(qunit) size_int*nc
1153  do icel=1,nc
1154  write(qunit) vtk_type
1155  end do
1156  end do
1157  allocate(intstatus(mpi_status_size,1))
1158  if(npe>1)then
1159  ixccmin^d=ixmlo^d; ixccmax^d=ixmhi^d;
1160  ixcmin^d=ixmlo^d-1; ixcmax^d=ixmhi^d;
1161  do ipe=1, npe-1
1162  do morton_no=morton_start(ipe),morton_stop(ipe)
1163  if(.not. morton_aim(morton_no)) cycle
1164  itag=morton_no
1165  call mpi_recv(xc_tmp,1,type_block_xc_io, ipe,itag,icomm,intstatus(:,1),ierrmpi)
1166  if(cell_corner) then
1167  call mpi_recv(wc_tmp,1,type_block_wc_io, ipe,itag,icomm,intstatus(:,1),ierrmpi)
1168  else
1169  call mpi_recv(wcc_tmp,1,type_block_wcc_io, ipe,itag,icomm,intstatus(:,1),ierrmpi)
1170  end if
1171  do iw=1,nw+nwauxio
1172  if(iw<=nw) then
1173  if(.not.w_write(iw)) cycle
1174  endif
1175  if(cell_corner) then
1176  write(qunit) length
1177  write(qunit) {(|}real(wC_TMP(ix^D,iw)*normconv(iw)),{ix^D=ixcmin^d,ixcmax^d)}
1178  else
1179  write(qunit) lengthcc
1180  write(qunit) {(|}real(wCC_TMP(ix^D,iw)*normconv(iw)),{ix^D=ixccmin^d,ixccmax^d)}
1181  end if
1182  enddo
1183 
1184  write(qunit) length_coords
1185  {do ix^db=ixcmin^db,ixcmax^db \}
1186  x_vtk(1:3)=zero;
1187  x_vtk(1:ndim)=xc_tmp(ix^d,1:ndim)*normconv(0);
1188  do k=1,3
1189  write(qunit) real(x_vtk(k))
1190  end do
1191  {end do \}
1192 
1193  write(qunit) length_conn
1194  {do ix^db=1,nx^db\}
1195  {^ifoned write(qunit)ix1-1,ix1 \}
1196  {^iftwod
1197  write(qunit)(ix2-1)*nxc1+ix1-1, &
1198  (ix2-1)*nxc1+ix1,ix2*nxc1+ix1-1,ix2*nxc1+ix1
1199  \}
1200  {^ifthreed
1201  write(qunit)&
1202  (ix3-1)*nxc2*nxc1+(ix2-1)*nxc1+ix1-1, &
1203  (ix3-1)*nxc2*nxc1+(ix2-1)*nxc1+ix1,&
1204  (ix3-1)*nxc2*nxc1+ ix2*nxc1+ix1-1,&
1205  (ix3-1)*nxc2*nxc1+ ix2*nxc1+ix1,&
1206  ix3*nxc2*nxc1+(ix2-1)*nxc1+ix1-1,&
1207  ix3*nxc2*nxc1+(ix2-1)*nxc1+ix1,&
1208  ix3*nxc2*nxc1+ ix2*nxc1+ix1-1,&
1209  ix3*nxc2*nxc1+ ix2*nxc1+ix1
1210  \}
1211  {end do\}
1212 
1213  write(qunit) length_offsets
1214  do icel=1,nc
1215  write(qunit) icel*(2**^nd)
1216  end do
1217  {^ifoned vtk_type=3 \}
1218  {^iftwod vtk_type=8 \}
1219  {^ifthreed vtk_type=11 \}
1220  write(qunit) size_int*nc
1221  do icel=1,nc
1222  write(qunit) vtk_type
1223  end do
1224  end do
1225  end do
1226  end if
1227  close(qunit)
1228  open(qunit,file=filename,status='unknown',form='formatted',position='append')
1229  write(qunit,'(a)')'</AppendedData>'
1230  write(qunit,'(a)')'</VTKFile>'
1231  close(qunit)
1232  deallocate(intstatus)
1233 end if
1234 
1235 deallocate(morton_aim,morton_aim_p)
1236 if (npe>1) then
1237  call mpi_barrier(icomm,ierrmpi)
1238 endif
1239 
1240 end subroutine unstructuredvtkb
1241 !====================================================================================
1242 subroutine save_connvtk(qunit,igrid)
1244 ! this saves the basic line, pixel and voxel connectivity,
1245 ! as used by VTK file outputs for unstructured grid
1246 
1248 
1249 integer, intent(in) :: qunit, igrid
1250 
1251 integer :: nx^D, nxC^D, ix^D
1252 !-----------------------------------------------------------------------------
1253 nx^d=ixmhi^d-ixmlo^d+1;
1254 nxc^d=nx^d+1;
1255 
1256 {do ix^db=1,nx^db\}
1257  {^ifoned write(qunit,'(2(i7,1x))')ix1-1,ix1 \}
1258  {^iftwod
1259  write(qunit,'(4(i7,1x))')(ix2-1)*nxc1+ix1-1, &
1260  (ix2-1)*nxc1+ix1,ix2*nxc1+ix1-1,ix2*nxc1+ix1
1261  \}
1262  {^ifthreed
1263  write(qunit,'(8(i7,1x))')&
1264  (ix3-1)*nxc2*nxc1+(ix2-1)*nxc1+ix1-1, &
1265  (ix3-1)*nxc2*nxc1+(ix2-1)*nxc1+ix1,&
1266  (ix3-1)*nxc2*nxc1+ ix2*nxc1+ix1-1,&
1267  (ix3-1)*nxc2*nxc1+ ix2*nxc1+ix1,&
1268  ix3*nxc2*nxc1+(ix2-1)*nxc1+ix1-1,&
1269  ix3*nxc2*nxc1+(ix2-1)*nxc1+ix1,&
1270  ix3*nxc2*nxc1+ ix2*nxc1+ix1-1,&
1271  ix3*nxc2*nxc1+ ix2*nxc1+ix1
1272  \}
1273 {end do\}
1274 
1275 end subroutine save_connvtk
1276 !=============================================================================
1277 subroutine imagedatavtk_mpi(qunit)
1279 ! output for vti format to paraview, non-binary version output
1280 ! parallel, uses calc_grid to compute nwauxio variables
1281 ! allows renormalizing using convert factors
1282 ! allows skipping of w_write selected variables
1283 
1284 ! implementation such that length of ASCII output is identical when
1285 ! run on 1 versus multiple CPUs (however, the order of the vtu pieces can differ)
1286 
1289 use mod_calculate_xw
1290 
1291 integer, intent(in) :: qunit
1292 
1293 double precision, dimension(ixMlo^D-1:ixMhi^D,ndim) :: xC_TMP,xC_TMP_recv
1294 double precision, dimension(ixMlo^D:ixMhi^D,ndim) :: xCC_TMP,xCC_TMP_recv
1295 double precision, dimension(ixMlo^D-1:ixMhi^D,ndim) :: xC
1296 double precision, dimension(ixMlo^D:ixMhi^D,ndim) :: xCC
1297 
1298 double precision, dimension(ixMlo^D-1:ixMhi^D,nw+nwauxio) :: wC_TMP,wC_TMP_recv
1299 double precision, dimension(ixMlo^D:ixMhi^D,nw+nwauxio) :: wCC_TMP,wCC_TMP_recv
1300 double precision, dimension(0:nw+nwauxio) :: normconv
1301 integer:: igrid,iigrid,level,ixC^L,ixCC^L
1302 integer:: NumGridsOnLevel(1:nlevelshi)
1303 integer :: nx^D
1304 
1305 character(len=80):: filename
1306 integer :: filenr
1307 
1308 integer, allocatable :: intstatus(:,:)
1309 logical, allocatable :: Morton_aim(:),Morton_aim_p(:)
1310 
1311 character(len=name_len) :: wnamei(1:nw+nwauxio),xandwnamei(1:ndim+nw+nwauxio)
1312 character(len=1024) :: outfilehead
1313 
1314 logical :: fileopen
1315 integer :: ipe,Morton_no,Morton_length
1316 integer :: ixrvC^L, ixrvCC^L, siz_ind, ind_send(5*^nd), ind_recv(5*^nd)
1317 double precision :: origin(1:3), spacing(1:3)
1318 integer :: wholeExtent(1:6), ig^D
1319 type(tree_node_ptr) :: tree
1320 !-----------------------------------------------------------------------------
1321 if(levmin/=levmax) call mpistop('ImageData can only be used when levmin=levmax')
1322 
1323 normconv(0) = length_convert_factor
1324 normconv(1:nw) = w_convert_factor
1325 siz_ind=5*^nd
1326 morton_length=morton_stop(npe-1)-morton_start(0)+1
1327 allocate(morton_aim(morton_start(0):morton_stop(npe-1)))
1328 allocate(morton_aim_p(morton_start(0):morton_stop(npe-1)))
1329 morton_aim=.false.
1330 morton_aim_p=.false.
1331 do morton_no=morton_start(mype),morton_stop(mype)
1332  igrid=sfc_to_igrid(morton_no)
1333  level=node(plevel_,igrid)
1334  ! we can clip parts of the grid away, select variables, levels etc.
1335  if(writelevel(level)) then
1336  ! only output a grid when fully within clipped region selected
1337  ! by writespshift array
1338  if(({rnode(rpxmin^d_,igrid)>=xprobmin^d+(xprobmax^d-xprobmin^d)&
1339  *writespshift(^d,1)|.and.}).and.({rnode(rpxmax^d_,igrid)&
1340  <=xprobmax^d-(xprobmax^d-xprobmin^d)*writespshift(^d,2)|.and.})) then
1341  morton_aim_p(morton_no)=.true.
1342  end if
1343  end if
1344 end do
1345 call mpi_allreduce(morton_aim_p,morton_aim,morton_length,mpi_logical,mpi_lor,&
1346  icomm,ierrmpi)
1347 
1348 
1349 if (mype /= 0) then
1350  do morton_no=morton_start(mype),morton_stop(mype)
1351  if(.not. morton_aim(morton_no)) cycle
1352  igrid=sfc_to_igrid(morton_no)
1353  call calc_x(igrid,xc,xcc)
1354  call calc_grid(qunit,igrid,xc,xcc,xc_tmp,xcc_tmp,wc_tmp,wcc_tmp,normconv,&
1355  ixc^l,ixcc^l,.true.)
1356  tree%node => igrid_to_node(igrid, mype)%node
1357  {^d& ig^d = tree%node%ig^d; }
1358  itag=morton_no
1359  ind_send=(/ ixc^l,ixcc^l, ig^d /)
1360  call mpi_send(ind_send,siz_ind,mpi_integer, 0,itag,icomm,ierrmpi)
1361  call mpi_send(wc_tmp,1,type_block_wc_io, 0,itag,icomm,ierrmpi)
1362  call mpi_send(wcc_tmp,1,type_block_wcc_io, 0,itag,icomm,ierrmpi)
1363  end do
1364 
1365 else
1366 
1367  inquire(qunit,opened=fileopen)
1368  if(.not.fileopen)then
1369  ! generate filename
1370  filenr=snapshotini
1371  if (autoconvert) filenr=snapshotnext
1372  write(filename,'(a,i4.4,a)') trim(base_filename),filenr,".vti"
1373  ! Open the file for the header part
1374  open(qunit,file=filename,status='unknown',form='formatted')
1375  endif
1376 
1377 call getheadernames(wnamei,xandwnamei,outfilehead)
1378 
1379 ! number of cells per grid.
1380 nx^d=ixmhi^d-ixmlo^d+1;
1381 
1382 origin = 0
1383 {^d& origin(^d) = xprobmin^d*normconv(0); }
1384 spacing = zero
1385 {^d&spacing(^d) = dxlevel(^d)*normconv(0); }
1386 
1387 wholeextent = 0
1388 ! if we use writespshift, the whole extent has to be calculated:
1389 {^d&wholeextent(^d*2-1) = nx^d * ceiling(((xprobmax^d-xprobmin^d)*writespshift(^d,1)) &
1390  /(nx^d*dxlevel(^d))) \}
1391 {^d&wholeextent(^d*2) = nx^d * floor(((xprobmax^d-xprobmin^d)*(1.0d0-writespshift(^d,2))) &
1392  /(nx^d*dxlevel(^d))) \}
1393 
1394 ! generate xml header
1395 write(qunit,'(a)')'<?xml version="1.0"?>'
1396 write(qunit,'(a)',advance='no') '<VTKFile type="ImageData"'
1397 write(qunit,'(a)')' version="0.1" byte_order="LittleEndian">'
1398 write(qunit,'(a,3(1pe14.6),a,6(i10),a,3(1pe14.6),a)')' <ImageData Origin="',&
1399  origin,'" WholeExtent="',wholeextent,'" Spacing="',spacing,'">'
1400  write(qunit,'(a)')'<FieldData>'
1401  write(qunit,'(2a)')'<DataArray type="Float32" Name="TIME" ',&
1402  'NumberOfTuples="1" format="ascii">'
1403  write(qunit,*) real(global_time*time_convert_factor)
1404  write(qunit,'(a)')'</DataArray>'
1405  write(qunit,'(a)')'</FieldData>'
1406 
1407 ! write the data from proc 0
1408 do morton_no=morton_start(0),morton_stop(0)
1409  if(.not. morton_aim(morton_no)) cycle
1410  igrid=sfc_to_igrid(morton_no)
1411  tree%node => igrid_to_node(igrid, 0)%node
1412  {^d& ig^d = tree%node%ig^d; }
1413  call calc_x(igrid,xc,xcc)
1414  call calc_grid(qunit,igrid,xc,xcc,xc_tmp,xcc_tmp,wc_tmp,wcc_tmp,normconv,&
1415  ixc^l,ixcc^l,.true.)
1416  call write_vti(qunit,ixg^ll,ixc^l,ixcc^l,ig^d,&
1417  nx^d,normconv,wnamei,wc_tmp,wcc_tmp)
1418 end do
1419 
1420 if(npe>1)then
1421  allocate(intstatus(mpi_status_size,1))
1422  do ipe=1, npe-1
1423  do morton_no=morton_start(ipe),morton_stop(ipe)
1424  if(.not. morton_aim(morton_no)) cycle
1425  itag=morton_no
1426  call mpi_recv(ind_recv,siz_ind, mpi_integer, ipe,itag,icomm,intstatus(:,1),ierrmpi)
1427  ixrvcmin^d=ind_recv(^d);ixrvcmax^d=ind_recv(^nd+^d);
1428  ixrvccmin^d=ind_recv(2*^nd+^d);ixrvccmax^d=ind_recv(3*^nd+^d);
1429  ig^d=ind_recv(4*^nd+^d);
1430  call mpi_recv(wc_tmp,1,type_block_wc_io, ipe,itag,icomm,intstatus(:,1),ierrmpi)
1431  call mpi_recv(wcc_tmp,1,type_block_wcc_io, ipe,itag,icomm,intstatus(:,1),ierrmpi)
1432  call write_vti(qunit,ixg^ll,ixrvc^l,ixrvcc^l,ig^d,&
1433  nx^d,normconv,wnamei,wc_tmp,wcc_tmp)
1434  end do
1435  end do
1436 end if
1437 
1438 write(qunit,'(a)')'</ImageData>'
1439 write(qunit,'(a)')'</VTKFile>'
1440 close(qunit)
1441 if(npe>1) deallocate(intstatus)
1442 endif
1443 
1444 deallocate(morton_aim,morton_aim_p)
1445 if (npe>1) then
1446  call mpi_barrier(icomm,ierrmpi)
1447 endif
1448 
1449 end subroutine imagedatavtk_mpi
1450 !============================================================================
1451 subroutine punstructuredvtk_mpi(qunit)
1453 ! Write one pvtu and vtu files for each processor
1454 ! Otherwise like unstructuredvtk_mpi
1455 
1458 use mod_calculate_xw
1459 
1460 integer, intent(in) :: qunit
1461 !
1462 double precision, dimension(0:nw+nwauxio) :: normconv
1463 double precision, dimension(ixMlo^D-1:ixMhi^D,ndim) :: xC_TMP
1464 double precision, dimension(ixMlo^D:ixMhi^D,ndim) :: xCC_TMP
1465 double precision, dimension(ixMlo^D-1:ixMhi^D,ndim) :: xC
1466 double precision, dimension(ixMlo^D:ixMhi^D,ndim) :: xCC
1467 double precision, dimension(ixMlo^D-1:ixMhi^D,nw+nwauxio) :: wC_TMP
1468 double precision, dimension(ixMlo^D:ixMhi^D,nw+nwauxio) :: wCC_TMP
1469 character(len=name_len) :: wnamei(1:nw+nwauxio),xandwnamei(1:ndim+nw+nwauxio)
1470 character(len=1024) :: outfilehead
1471 integer :: nx^D,nxC^D,nc,np, igrid,ixC^L,ixCC^L,level,Morton_no
1472 character(len=80) :: pfilename
1473 integer :: filenr
1474 logical :: fileopen,conv_grid
1475 !----------------------------------------------------------------------------
1476 
1477 ! Write pvtu-file:
1478 if (mype==0) then
1479  call write_pvtu(qunit)
1480 endif
1481 ! Now write the Source files:
1482 
1483 inquire(qunit,opened=fileopen)
1484 if(.not.fileopen)then
1485  ! generate filename
1486  filenr=snapshotini
1487  if (autoconvert) filenr=snapshotnext
1488  ! Open the file for the header part
1489  write(pfilename,'(a,i4.4,a,i4.4,a)') trim(base_filename),filenr,"p",mype,".vtu"
1490  open(qunit,file=pfilename,status='unknown',form='formatted')
1491 endif
1492 ! generate xml header
1493 write(qunit,'(a)')'<?xml version="1.0"?>'
1494 write(qunit,'(a)',advance='no') '<VTKFile type="UnstructuredGrid"'
1495 write(qunit,'(a)')' version="0.1" byte_order="LittleEndian">'
1496 write(qunit,'(a)')' <UnstructuredGrid>'
1497 write(qunit,'(a)')'<FieldData>'
1498 write(qunit,'(2a)')'<DataArray type="Float32" Name="TIME" ',&
1499  'NumberOfTuples="1" format="ascii">'
1500 write(qunit,*) real(global_time*time_convert_factor)
1501 write(qunit,'(a)')'</DataArray>'
1502 write(qunit,'(a)')'</FieldData>'
1503 
1504 call getheadernames(wnamei,xandwnamei,outfilehead)
1505 
1506 ! number of cells, number of corner points, per grid.
1507 nx^d=ixmhi^d-ixmlo^d+1;
1508 nxc^d=nx^d+1;
1509 nc={nx^d*}
1510 np={nxc^d*}
1511 
1512 ! Note: using the w_write, writelevel, writespshift
1513 ! we can clip parts of the grid away, select variables, levels etc.
1514 do level=levmin,levmax
1515  if (.not.writelevel(level)) cycle
1516  do morton_no=morton_start(mype),morton_stop(mype)
1517  igrid=sfc_to_igrid(morton_no)
1518  if (node(plevel_,igrid)/=level) cycle
1519 
1520  ! only output a grid when fully within clipped region selected
1521  ! by writespshift array
1522  conv_grid=({rnode(rpxmin^d_,igrid)>=xprobmin^d+(xprobmax^d-xprobmin^d)&
1523  *writespshift(^d,1)|.and.}).and.({rnode(rpxmax^d_,igrid)&
1524  <=xprobmax^d-(xprobmax^d-xprobmin^d)*writespshift(^d,2)|.and.})
1525  if (.not.conv_grid) cycle
1526 
1527  call calc_x(igrid,xc,xcc)
1528  call calc_grid(qunit,igrid,xc,xcc,xc_tmp,xcc_tmp,wc_tmp,wcc_tmp,normconv,&
1529  ixc^l,ixcc^l,.true.)
1530 
1531  call write_vtk(qunit,ixg^ll,ixc^l,ixcc^l,igrid,nc,np,nx^d,nxc^d,&
1532  normconv,wnamei,xc_tmp,xcc_tmp,wc_tmp,wcc_tmp)
1533  end do ! Morton_no loop
1534 end do ! level loop
1535 
1536  write(qunit,'(a)')' </UnstructuredGrid>'
1537  write(qunit,'(a)')'</VTKFile>'
1538  close(qunit)
1539 
1540 if (npe>1) then
1541  call mpi_barrier(icomm,ierrmpi)
1542 endif
1543 end subroutine punstructuredvtk_mpi
1544 !============================================================================
1545 subroutine unstructuredvtk_mpi(qunit)
1547 ! output for vtu format to paraview, non-binary version output
1548 ! parallel, uses calc_grid to compute nwauxio variables
1549 ! allows renormalizing using convert factors
1550 ! allows skipping of w_write selected variables
1551 
1552 ! implementation such that length of ASCII output is identical when
1553 ! run on 1 versus multiple CPUs (however, the order of the vtu pieces can differ)
1554 
1557 use mod_calculate_xw
1558 
1559 integer, intent(in) :: qunit
1560 
1561 double precision :: x_VTK(1:3)
1562 
1563 double precision, dimension(ixMlo^D-1:ixMhi^D,ndim) :: xC_TMP,xC_TMP_recv
1564 double precision, dimension(ixMlo^D:ixMhi^D,ndim) :: xCC_TMP,xCC_TMP_recv
1565 double precision, dimension(ixMlo^D-1:ixMhi^D,ndim) :: xC
1566 double precision, dimension(ixMlo^D:ixMhi^D,ndim) :: xCC
1567 
1568 double precision, dimension(ixMlo^D-1:ixMhi^D,nw+nwauxio) :: wC_TMP,wC_TMP_recv
1569 double precision, dimension(ixMlo^D:ixMhi^D,nw+nwauxio) :: wCC_TMP,wCC_TMP_recv
1570 double precision, dimension(0:nw+nwauxio) :: normconv
1571 integer:: igrid,iigrid,level,ixC^L,ixCC^L
1572 integer:: NumGridsOnLevel(1:nlevelshi)
1573 integer :: nx^D,nxC^D,nodesonlevel,elemsonlevel,nc,np,ix^D
1574 
1575 character(len=80):: filename
1576 integer :: filenr
1577 
1578 integer, allocatable :: intstatus(:,:)
1579 
1580 character(len=name_len) :: wnamei(1:nw+nwauxio),xandwnamei(1:ndim+nw+nwauxio)
1581 character(len=1024) :: outfilehead
1582 
1583 logical :: fileopen,conv_grid,cond_grid_recv
1584 integer :: ipe,Morton_no,siz_ind
1585 integer :: ind_send(4*^nd),ind_recv(4*^nd)
1586 integer :: levmin_recv,levmax_recv,level_recv,igrid_recv,ixrvC^L,ixrvCC^L
1587 !-----------------------------------------------------------------------------
1588 if (mype==0) then
1589  inquire(qunit,opened=fileopen)
1590  if(.not.fileopen)then
1591  ! generate filename
1592  filenr=snapshotini
1593  if (autoconvert) filenr=snapshotnext
1594  write(filename,'(a,i4.4,a)') trim(base_filename),filenr,".vtu"
1595  ! Open the file for the header part
1596  open(qunit,file=filename,status='unknown',form='formatted')
1597  endif
1598  ! generate xml header
1599  write(qunit,'(a)')'<?xml version="1.0"?>'
1600  write(qunit,'(a)',advance='no') '<VTKFile type="UnstructuredGrid"'
1601  write(qunit,'(a)')' version="0.1" byte_order="LittleEndian">'
1602  write(qunit,'(a)')'<UnstructuredGrid>'
1603  write(qunit,'(a)')'<FieldData>'
1604  write(qunit,'(2a)')'<DataArray type="Float32" Name="TIME" ',&
1605  'NumberOfTuples="1" format="ascii">'
1606  write(qunit,*) real(global_time*time_convert_factor)
1607  write(qunit,'(a)')'</DataArray>'
1608  write(qunit,'(a)')'</FieldData>'
1609 end if
1610 
1611 call getheadernames(wnamei,xandwnamei,outfilehead)
1612 ! number of cells, number of corner points, per grid.
1613 nx^d=ixmhi^d-ixmlo^d+1;
1614 nxc^d=nx^d+1;
1615 nc={nx^d*}
1616 np={nxc^d*}
1617 
1618 ! all slave processors send their minmal/maximal levels
1619 if (mype/=0) then
1620  if (morton_stop(mype)==0) call mpistop("nultag")
1621  itag=1000*morton_stop(mype)
1622  !print *,'ype,itag for levmin=',mype,itag,levmin
1623  call mpi_send(levmin,1,mpi_integer, 0,itag,icomm,ierrmpi)
1624  itag=2000*morton_stop(mype)
1625  !print *,'mype,itag for levmax=',mype,itag,levmax
1626  call mpi_send(levmax,1,mpi_integer, 0,itag,icomm,ierrmpi)
1627 end if
1628 
1629 
1630 ! Note: using the w_write, writelevel, writespshift
1631 ! we can clip parts of the grid away, select variables, levels etc.
1632 do level=levmin,levmax
1633  if (.not.writelevel(level)) cycle
1634  do morton_no=morton_start(mype),morton_stop(mype)
1635  igrid=sfc_to_igrid(morton_no)
1636  if (mype/=0)then
1637  itag=morton_no
1638  call mpi_send(igrid,1,mpi_integer, 0,itag,icomm,ierrmpi)
1639  itag=igrid
1640  call mpi_send(node(plevel_,igrid),1,mpi_integer, 0,itag,icomm,ierrmpi)
1641  end if
1642  if (node(plevel_,igrid)/=level) cycle
1643 
1644  ! only output a grid when fully within clipped region selected
1645  ! by writespshift array
1646  conv_grid=({rnode(rpxmin^d_,igrid)>=xprobmin^d+(xprobmax^d-xprobmin^d)&
1647  *writespshift(^d,1)|.and.}).and.({rnode(rpxmax^d_,igrid)&
1648  <=xprobmax^d-(xprobmax^d-xprobmin^d)*writespshift(^d,2)|.and.})
1649  if (mype/=0)then
1650  call mpi_send(conv_grid,1,mpi_logical,0,itag,icomm,ierrmpi)
1651  end if
1652  if (.not.conv_grid) cycle
1653 
1654  call calc_x(igrid,xc,xcc)
1655  call calc_grid(qunit,igrid,xc,xcc,xc_tmp,xcc_tmp,wc_tmp,wcc_tmp,normconv,&
1656  ixc^l,ixcc^l,.true.)
1657 
1658  if (mype/=0) then
1659  itag=morton_no
1660  ind_send=(/ ixc^l,ixcc^l /)
1661  siz_ind=4*^nd
1662  call mpi_send(ind_send,siz_ind,mpi_integer, 0,itag,icomm,ierrmpi)
1663  call mpi_send(normconv,nw+nwauxio+1,mpi_double_precision, 0,itag,icomm,ierrmpi)
1664 
1665  call mpi_send(wc_tmp,1,type_block_wc_io, 0,itag,icomm,ierrmpi)
1666  call mpi_send(xc_tmp,1,type_block_xc_io, 0,itag,icomm,ierrmpi)
1667  itag=igrid
1668  call mpi_send(wcc_tmp,1,type_block_wcc_io, 0,itag,icomm,ierrmpi)
1669  call mpi_send(xcc_tmp,1,type_block_xcc_io, 0,itag,icomm,ierrmpi)
1670  else
1671  call write_vtk(qunit,ixg^ll,ixc^l,ixcc^l,igrid,nc,np,nx^d,nxc^d,&
1672  normconv,wnamei,xc_tmp,xcc_tmp,wc_tmp,wcc_tmp)
1673  end if
1674  end do ! Morton_no loop
1675 end do ! level loop
1676 
1677 
1678 if (mype==0) then
1679  allocate(intstatus(mpi_status_size,1))
1680  if(npe>1)then
1681  do ipe=1,npe-1
1682  itag=1000*morton_stop(ipe)
1683  call mpi_recv(levmin_recv,1,mpi_integer, ipe,itag,icomm,intstatus(:,1),ierrmpi)
1684  !!print *,'mype RECEIVES,itag for levmin=',mype,itag,levmin_recv
1685  itag=2000*morton_stop(ipe)
1686  call mpi_recv(levmax_recv,1,mpi_integer, ipe,itag,icomm,intstatus(:,1),ierrmpi)
1687  !!print *,'mype RECEIVES itag for levmax=',mype,itag,levmax_recv
1688  do level=levmin_recv,levmax_recv
1689  if (.not.writelevel(level)) cycle
1690  do morton_no=morton_start(ipe),morton_stop(ipe)
1691  itag=morton_no
1692  call mpi_recv(igrid_recv,1,mpi_integer, ipe,itag,icomm,intstatus(:,1),ierrmpi)
1693  itag=igrid_recv
1694  call mpi_recv(level_recv,1,mpi_integer, ipe,itag,icomm,intstatus(:,1),ierrmpi)
1695  if (level_recv/=level) cycle
1696 
1697  call mpi_recv(cond_grid_recv,1,mpi_logical, ipe,itag,icomm,intstatus(:,1),ierrmpi)
1698  if(.not.cond_grid_recv)cycle
1699 
1700  itag=morton_no
1701  siz_ind=4*^nd
1702  call mpi_recv(ind_recv,siz_ind, mpi_integer, ipe,itag,icomm,intstatus(:,1),ierrmpi)
1703  ixrvcmin^d=ind_recv(^d);ixrvcmax^d=ind_recv(^nd+^d);
1704  ixrvccmin^d=ind_recv(2*^nd+^d);ixrvccmax^d=ind_recv(3*^nd+^d);
1705  call mpi_recv(normconv,nw+nwauxio+1, mpi_double_precision,ipe,itag&
1706  ,icomm,intstatus(:,1),ierrmpi)
1707 
1708  call mpi_recv(wc_tmp_recv,1,type_block_wc_io, ipe,itag,icomm,intstatus(:,1),ierrmpi)
1709  call mpi_recv(xc_tmp_recv,1,type_block_xc_io, ipe,itag,icomm,intstatus(:,1),ierrmpi)
1710 
1711  itag=igrid_recv
1712  call mpi_recv(wcc_tmp_recv,1,type_block_wcc_io, ipe,itag,icomm,intstatus(:,1),ierrmpi)
1713  call mpi_recv(xcc_tmp_recv,1,type_block_xcc_io, ipe,itag,icomm,intstatus(:,1),ierrmpi)
1714  call write_vtk(qunit,ixg^ll,ixrvc^l,ixrvcc^l,igrid_recv,&
1715  nc,np,nx^d,nxc^d,normconv,wnamei,&
1716  xc_tmp_recv,xcc_tmp_recv,wc_tmp_recv,wcc_tmp_recv)
1717  enddo ! Morton_no loop
1718  enddo ! level loop
1719  enddo ! processor loop
1720  endif ! multiple processors
1721  write(qunit,'(a)')'</UnstructuredGrid>'
1722  write(qunit,'(a)')'</VTKFile>'
1723  close(qunit)
1724 endif
1725 
1726 if (npe>1) then
1727  call mpi_barrier(icomm,ierrmpi)
1728  if(mype==0)deallocate(intstatus)
1729 endif
1730 
1731 end subroutine unstructuredvtk_mpi
1732 !============================================================================
1733 subroutine write_vtk(qunit,ixI^L,ixC^L,ixCC^L,igrid,nc,np,nx^D,nxC^D,&
1734  normconv,wnamei,xC,xCC,wC,wCC)
1737 
1738 integer, intent(in) :: qunit
1739 integer, intent(in) :: ixI^L,ixC^L,ixCC^L
1740 integer, intent(in) :: igrid,nc,np,nx^D,nxC^D
1741 double precision, intent(in) :: normconv(0:nw+nwauxio)
1742 character(len=name_len), intent(in):: wnamei(1:nw+nwauxio)
1743 
1744 double precision, dimension(ixMlo^D-1:ixMhi^D,ndim) :: xC
1745 double precision, dimension(ixMlo^D:ixMhi^D,ndim) :: xCC
1746 
1747 double precision, dimension(ixMlo^D-1:ixMhi^D,nw+nwauxio) :: wC
1748 double precision, dimension(ixMlo^D:ixMhi^D,nw+nwauxio) :: wCC
1749 
1750 double precision :: x_VTK(1:3)
1751 integer :: iw,ix^D,icel,VTK_type
1752 !----------------------------------------------------------------------------
1753 
1754 select case(convert_type)
1755  case('vtumpi','pvtumpi')
1756  ! we write out every grid as one VTK PIECE
1757  write(qunit,'(a,i7,a,i7,a)') &
1758  '<Piece NumberOfPoints="',np,'" NumberOfCells="',nc,'">'
1759  write(qunit,'(a)')'<PointData>'
1760  do iw=1,nw+nwauxio
1761  if(iw<=nw) then
1762  if(.not.w_write(iw)) cycle
1763  endif
1764 
1765  write(qunit,'(a,a,a)')&
1766  '<DataArray type="Float64" Name="',trim(wnamei(iw)),'" format="ascii">'
1767  write(qunit,'(200(1pe14.6))') {(|}wc(ix^d,iw)*normconv(iw),{ix^d=ixcmin^d,ixcmax^d)}
1768  write(qunit,'(a)')'</DataArray>'
1769  enddo
1770  write(qunit,'(a)')'</PointData>'
1771 
1772  write(qunit,'(a)')'<Points>'
1773  write(qunit,'(a)')'<DataArray type="Float32" NumberOfComponents="3" format="ascii">'
1774  ! write cell corner coordinates in a backward dimensional loop, always 3D output
1775  {do ix^db=ixcmin^db,ixcmax^db \}
1776  x_vtk(1:3)=zero;
1777  x_vtk(1:ndim)=xc(ix^d,1:ndim)*normconv(0);
1778  write(qunit,'(3(1pe14.6))') x_vtk
1779  {end do \}
1780  write(qunit,'(a)')'</DataArray>'
1781  write(qunit,'(a)')'</Points>'
1782 
1783  case('vtuCCmpi','pvtuCCmpi')
1784  ! we write out every grid as one VTK PIECE
1785  write(qunit,'(a,i7,a,i7,a)') &
1786  '<Piece NumberOfPoints="',np,'" NumberOfCells="',nc,'">'
1787  write(qunit,'(a)')'<CellData>'
1788  do iw=1,nw+nwauxio
1789  if(iw<=nw) then
1790  if(.not.w_write(iw)) cycle
1791  endif
1792  write(qunit,'(a,a,a)')&
1793  '<DataArray type="Float64" Name="',trim(wnamei(iw)),'" format="ascii">'
1794  write(qunit,'(200(1pe14.6))') {(|}wcc(ix^d,iw)*normconv(iw),{ix^d=ixccmin^d,ixccmax^d)}
1795  write(qunit,'(a)')'</DataArray>'
1796  enddo
1797  write(qunit,'(a)')'</CellData>'
1798 
1799  write(qunit,'(a)')'<Points>'
1800  write(qunit,'(a)')'<DataArray type="Float32" NumberOfComponents="3" format="ascii">'
1801  ! write cell corner coordinates in a backward dimensional loop, always 3D output
1802  {do ix^db=ixcmin^db,ixcmax^db \}
1803  x_vtk(1:3)=zero;
1804  x_vtk(1:ndim)=xc(ix^d,1:ndim)*normconv(0);
1805  write(qunit,'(3(1pe14.6))') x_vtk
1806  {end do \}
1807  write(qunit,'(a)')'</DataArray>'
1808  write(qunit,'(a)')'</Points>'
1809 end select
1810 
1811 write(qunit,'(a)')'<Cells>'
1812 
1813 ! connectivity part
1814 write(qunit,'(a)')'<DataArray type="Int32" Name="connectivity" format="ascii">'
1815 call save_connvtk(qunit,igrid)
1816 write(qunit,'(a)')'</DataArray>'
1817 
1818 ! offsets data array
1819 write(qunit,'(a)')'<DataArray type="Int32" Name="offsets" format="ascii">'
1820 do icel=1,nc
1821  write(qunit,'(i7)') icel*(2**^nd)
1822 end do
1823 write(qunit,'(a)')'</DataArray>'
1824 
1825 ! VTK cell type data array
1826 write(qunit,'(a)')'<DataArray type="Int32" Name="types" format="ascii">'
1827 ! VTK_LINE=3; VTK_PIXEL=8; VTK_VOXEL=11 -> vtk-syntax
1828 {^ifoned vtk_type=3 \}
1829 {^iftwod vtk_type=8 \}
1830 {^ifthreed vtk_type=11 \}
1831 do icel=1,nc
1832  write(qunit,'(i2)') vtk_type
1833 enddo
1834 write(qunit,'(a)')'</DataArray>'
1835 
1836 write(qunit,'(a)')'</Cells>'
1837 
1838 write(qunit,'(a)')'</Piece>'
1839 
1840 end subroutine write_vtk
1841 !============================================================================
1842 subroutine write_vti(qunit,ixI^L,ixC^L,ixCC^L,ig^D,nx^D,&
1843  normconv,wnamei,wC,wCC)
1845 
1846 integer, intent(in) :: qunit
1847 integer, intent(in) :: ixI^L,ixC^L,ixCC^L
1848 integer, intent(in) :: ig^D,nx^D
1849 double precision, intent(in) :: normconv(0:nw+nwauxio)
1850 character(len=name_len), intent(in):: wnamei(1:nw+nwauxio)
1851 
1852 double precision, dimension(ixMlo^D-1:ixMhi^D,nw+nwauxio) :: wC
1853 double precision, dimension(ixMlo^D:ixMhi^D,nw+nwauxio) :: wCC
1854 
1855 integer :: iw,ix^D
1856 integer :: extent(1:6)
1857 !----------------------------------------------------------------------------
1858 
1859 extent = 0
1860 {^d& extent(^d*2-1) = (ig^d-1) * nx^d; }
1861 {^d& extent(^d*2) = (ig^d) * nx^d; }
1862 
1863 
1864 select case(convert_type)
1865  case('vtimpi','pvtimpi')
1866  ! we write out every grid as one VTK PIECE
1867  write(qunit,'(a,6(i10),a)') &
1868  '<Piece Extent="',extent,'">'
1869  write(qunit,'(a)')'<PointData>'
1870  do iw=1,nw+nwauxio
1871  if(iw<=nw) then
1872  if(.not.w_write(iw)) cycle
1873  endif
1874 
1875  write(qunit,'(a,a,a)')&
1876  '<DataArray type="Float64" Name="',trim(wnamei(iw)),'" format="ascii">'
1877  write(qunit,'(200(1pe20.12))') {(|}wc(ix^d,iw)*normconv(iw),{ix^d=ixcmin^d,ixcmax^d)}
1878  write(qunit,'(a)')'</DataArray>'
1879  enddo
1880  write(qunit,'(a)')'</PointData>'
1881 
1882  case('vtiCCmpi','pvtiCCmpi')
1883  ! we write out every grid as one VTK PIECE
1884  write(qunit,'(a,6(i10),a)') &
1885  '<Piece Extent="',extent,'">'
1886  write(qunit,'(a)')'<CellData>'
1887  do iw=1,nw+nwauxio
1888  if(iw<=nw) then
1889  if(.not.w_write(iw)) cycle
1890  endif
1891  write(qunit,'(a,a,a)')&
1892  '<DataArray type="Float64" Name="',trim(wnamei(iw)),'" format="ascii">'
1893  write(qunit,'(200(1pe20.12))') {(|}wcc(ix^d,iw)*normconv(iw),{ix^d=ixccmin^d,ixccmax^d)}
1894  write(qunit,'(a)')'</DataArray>'
1895  enddo
1896  write(qunit,'(a)')'</CellData>'
1897 end select
1898 
1899 write(qunit,'(a)')'</Piece>'
1900 
1901 end subroutine write_vti
1902 !=============================================================================
1903 subroutine write_pvtu(qunit)
1906 use mod_calculate_xw
1907 
1908 integer, intent(in) :: qunit
1909 
1910 character(len=name_len) :: wnamei(1:nw+nwauxio),xandwnamei(1:ndim+nw+nwauxio),outtype
1911 character(len=1024) :: outfilehead
1912 character(len=80) :: filename,pfilename
1913 integer :: filenr,iw,ipe,iscalars
1914 logical :: fileopen
1915 
1916 select case(convert_type)
1917 case('pvtumpi','pvtuBmpi')
1918  outtype="PPointData"
1919 case('pvtuCCmpi','pvtuBCCmpi')
1920  outtype="PCellData"
1921 end select
1922 inquire(qunit,opened=fileopen)
1923 if(.not.fileopen)then
1924  ! generate filename
1925  filenr=snapshotini
1926  if (autoconvert) filenr=snapshotnext
1927  write(filename,'(a,i4.4,a)') trim(base_filename),filenr,".pvtu"
1928  ! Open the file
1929  open(qunit,file=filename,status='unknown',form='formatted')
1930 endif
1931 
1932 call getheadernames(wnamei,xandwnamei,outfilehead)
1933 
1934 ! Get the default selection:
1935 iscalars=1
1936 do iw=nw,1, -1
1937  if (w_write(iw)) iscalars=iw
1938 end do
1939 
1940 
1941 ! generate xml header
1942 write(qunit,'(a)')'<?xml version="1.0"?>'
1943 write(qunit,'(a)',advance='no') '<VTKFile type="PUnstructuredGrid"'
1944 write(qunit,'(a)')' version="0.1" byte_order="LittleEndian">'
1945 write(qunit,'(a)')' <PUnstructuredGrid GhostLevel="0">'
1946 ! Either celldata or pointdata:
1947 write(qunit,'(a,a,a,a,a)')&
1948  ' <',trim(outtype),' Scalars="',trim(wnamei(iscalars))//'">'
1949 do iw=1,nw
1950  if(.not.w_write(iw))cycle
1951  write(qunit,'(a,a,a)')&
1952  ' <PDataArray type="Float32" Name="',trim(wnamei(iw)),'"/>'
1953 end do
1954 do iw=nw+1,nw+nwauxio
1955  write(qunit,'(a,a,a)')&
1956  ' <PDataArray type="Float32" Name="',trim(wnamei(iw)),'"/>'
1957 end do
1958 write(qunit,'(a,a,a)')' </',trim(outtype),'>'
1959 
1960 write(qunit,'(a)')' <PPoints>'
1961 write(qunit,'(a)')' <PDataArray type="Float32" NumberOfComponents="3"/>'
1962 write(qunit,'(a)')' </PPoints>'
1963 
1964 do ipe=0,npe-1
1965  write(pfilename,'(a,i4.4,a,i4.4,a)') trim(base_filename(&
1966  index(base_filename, '/', back = .true.)+1:&
1967  len(base_filename))),filenr,"p",&
1968  ipe,".vtu"
1969  write(qunit,'(a,a,a)')' <Piece Source="',trim(pfilename),'"/>'
1970 end do
1971 write(qunit,'(a)')' </PUnstructuredGrid>'
1972 write(qunit,'(a)')'</VTKFile>'
1973 
1974 close(qunit)
1975 
1976 end subroutine write_pvtu
1977 !=============================================================================
1978 subroutine tecplot_mpi(qunit)
1980 ! output for tecplot (ASCII format)
1981 ! parallel, uses calc_grid to compute nwauxio variables
1982 ! allows renormalizing using convert factors
1983 
1984 ! the current implementation is such that tecplotmpi and tecplotCCmpi will
1985 ! create different length output ASCII files when used on 1 versus multiple CPUs
1986 ! in fact, on 1 CPU, there will be as many zones as there are levels
1987 ! on multiple CPUs, there will be a number of zones up to the number of
1988 ! levels times the number of CPUs (can be less, when some level not on a CPU)
1989 
1992 use mod_calculate_xw
1993 
1994 integer, intent(in) :: qunit
1995 
1996 integer:: igrid,iigrid,level,igonlevel,iw,idim,ix^D
1997 integer:: NumGridsOnLevel(1:nlevelshi)
1998 integer :: nx^D,nxC^D,nodesonlevel,elemsonlevel,ixC^L,ixCC^L
1999 integer :: nodesonlevelmype,elemsonlevelmype
2000 
2001 integer :: nodes, elems
2002 
2003 integer, allocatable :: intstatus(:,:)
2004 
2005 double precision :: x_TEC(ndim), w_TEC(nw+nwauxio)
2006 
2007 double precision, dimension(ixMlo^D-1:ixMhi^D,ndim) :: xC_TMP,xC_TMP_recv
2008 double precision, dimension(ixMlo^D:ixMhi^D,ndim) :: xCC_TMP,xCC_TMP_recv
2009 double precision, dimension(ixMlo^D-1:ixMhi^D,ndim) :: xC
2010 double precision, dimension(ixMlo^D:ixMhi^D,ndim) :: xCC
2011 
2012 double precision, dimension(ixMlo^D-1:ixMhi^D,nw+nwauxio) :: wC_TMP,wC_TMP_recv
2013 double precision, dimension(ixMlo^D:ixMhi^D,nw+nwauxio) :: wCC_TMP,wCC_TMP_recv
2014 double precision, dimension(0:nw+nwauxio) :: normconv
2015 logical :: fileopen,first
2016 integer :: Morton_no,ipe,levmin_recv,levmax_recv,igrid_recv,level_recv
2017 integer :: ixrvC^L,ixrvCC^L
2018 integer :: ind_send(2*^nd),ind_recv(2*^nd),siz_ind,igonlevel_recv
2019 integer :: NumGridsOnLevel_mype(1:nlevelshi,0:npe-1)
2020 character(len=80) :: filename
2021 integer :: filenr
2022 character(len=1024) :: tecplothead
2023 
2024 character(len=name_len) :: wnamei(1:nw+nwauxio),xandwnamei(1:ndim+nw+nwauxio)
2025 character(len=1024) :: outfilehead
2026 !-----------------------------------------------------------------------------
2027 if(nw/=count(w_write(1:nw)))then
2028  if(mype==0) print *,'tecplot_mpi does not use w_write=F'
2029  call mpistop('w_write, tecplot')
2030 end if
2031 
2032 if(nocartesian)then
2033  if(mype==0) print *,'tecplot_mpi with nocartesian and typeaxial=',typeaxial
2034 endif
2035 
2036 master_cpu_open : if (mype == 0) then
2037  inquire(qunit,opened=fileopen)
2038  if (.not.fileopen) then
2039  ! generate filename
2040  filenr=snapshotini
2041  if (autoconvert) filenr=snapshotnext
2042  write(filename,'(a,i4.4,a)') trim(base_filename),filenr,".plt"
2043  open(qunit,file=filename,status='unknown')
2044  end if
2045 
2046  call getheadernames(wnamei,xandwnamei,outfilehead)
2047 
2048  write(tecplothead,'(a)') "VARIABLES = "//trim(outfilehead)
2049  write(qunit,'(a)') tecplothead(1:len_trim(tecplothead))
2050 end if master_cpu_open
2051 
2052 
2053 ! determine overall number of grids per level, and the same info per CPU
2054 numgridsonlevel(1:nlevelshi)=0
2055 do level=levmin,levmax
2056  numgridsonlevel(level)=0
2057  do morton_no=morton_start(mype),morton_stop(mype)
2058  igrid = sfc_to_igrid(morton_no)
2059  if (node(plevel_,igrid)/=level) cycle
2060  numgridsonlevel(level)=numgridsonlevel(level)+1
2061  end do
2062  numgridsonlevel_mype(level,0:npe-1)=0
2063  numgridsonlevel_mype(level,mype) = numgridsonlevel(level)
2064  call mpi_allreduce(mpi_in_place,numgridsonlevel_mype(level,0:npe-1),npe,mpi_integer,&
2065  mpi_max,icomm,ierrmpi)
2066  call mpi_allreduce(mpi_in_place,numgridsonlevel(level),1,mpi_integer,mpi_sum, &
2067  icomm,ierrmpi)
2068 end do
2069 
2070 
2071 !!do level=levmin,levmax
2072 !! print *,'mype, level en NumGridsOnLevel_mype(level,0:npe-1)=', &
2073 !! mype,level,NumGridsOnLevel_mype(level,0:npe-1)
2074 !! print *,'mype, level en NumGridsOnLevel(level)=', &
2075 !! mype,level,NumGridsOnLevel(level)
2076 !!enddo
2077 
2078 
2079 nx^d=ixmhi^d-ixmlo^d+1;
2080 nxc^d=nx^d+1;
2081 
2082 if(mype==0.and.npe>1) allocate(intstatus(mpi_status_size,1))
2083 
2084 {^ifoned
2085 if(convert_type=='teclinempi') then
2086  nodes=0
2087  elems=0
2088  do level=levmin,levmax
2089  nodes=nodes + numgridsonlevel(level)*{nxc^d*}
2090  elems=elems + numgridsonlevel(level)*{nx^d*}
2091  enddo
2092 
2093  if (mype==0) write(qunit,"(a,i7,a,1pe12.5,a)") &
2094  'ZONE T="all levels", I=',elems, &
2095  ', SOLUTIONTIME=',global_time*time_convert_factor,', F=POINT'
2096 
2097  igonlevel=0
2098  do morton_no=morton_start(mype),morton_stop(mype)
2099  igrid = sfc_to_igrid(morton_no)
2100  call calc_x(igrid,xc,xcc)
2101  call calc_grid(qunit,igrid,xc,xcc,xc_tmp,xcc_tmp,wc_tmp,wcc_tmp,normconv,ixc^l,ixcc^l,.true.)
2102  if (mype==0) then
2103  {do ix^db=ixccmin^db,ixccmax^db\}
2104  x_tec(1:ndim)=xcc_tmp(ix^d,1:ndim)*normconv(0)
2105  w_tec(1:nw+nwauxio)=wcc_tmp(ix^d,1:nw+nwauxio)*normconv(1:nw+nwauxio)
2106  write(qunit,fmt="(100(e14.6))") x_tec, w_tec
2107  {end do\}
2108  else if (mype/=0) then
2109  itag=morton_no
2110  call mpi_send(igrid,1,mpi_integer, 0,itag,icomm,ierrmpi)
2111  call mpi_send(normconv,nw+nwauxio+1,mpi_double_precision,0,itag,icomm,ierrmpi)
2112  call mpi_send(wcc_tmp,1,type_block_wcc_io, 0,itag,icomm,ierrmpi)
2113  call mpi_send(xcc_tmp,1,type_block_xcc_io, 0,itag,icomm,ierrmpi)
2114  end if
2115  enddo
2116  if (mype==0) then
2117  do ipe=1,npe-1
2118  do morton_no=morton_start(ipe),morton_stop(ipe)
2119  itag=morton_no
2120  call mpi_recv(igrid_recv,1,mpi_integer, ipe,itag,icomm,intstatus(:,1),ierrmpi)
2121  call mpi_recv(normconv,nw+nwauxio+1, mpi_double_precision,ipe,&
2122  itag,icomm,intstatus(:,1),ierrmpi)
2123  call mpi_recv(wcc_tmp_recv,1,type_block_wcc_io, ipe,itag,&
2124  icomm,intstatus(:,1),ierrmpi)
2125  call mpi_recv(xcc_tmp_recv,1,type_block_xcc_io, ipe,itag,&
2126  icomm,intstatus(:,1),ierrmpi)
2127  {do ix^db=ixccmin^db,ixccmax^db\}
2128  x_tec(1:ndim)=xcc_tmp_recv(ix^d,1:ndim)*normconv(0)
2129  w_tec(1:nw+nwauxio)=wcc_tmp_recv(ix^d,1:nw+nwauxio)*normconv(1:nw+nwauxio)
2130  write(qunit,fmt="(100(e14.6))") x_tec, w_tec
2131  {end do\}
2132  end do
2133  end do
2134  close(qunit)
2135  end if
2136 else
2137 }
2138 
2139 if (mype/=0) then
2140  itag=1000*morton_stop(mype)
2141  call mpi_send(levmin,1,mpi_integer, 0,itag,icomm,ierrmpi)
2142  itag=2000*morton_stop(mype)
2143  call mpi_send(levmax,1,mpi_integer, 0,itag,icomm,ierrmpi)
2144 end if
2145 
2146 do level=levmin,levmax
2147  nodesonlevelmype=numgridsonlevel_mype(level,mype)*{nxc^d*}
2148  elemsonlevelmype=numgridsonlevel_mype(level,mype)*{nx^d*}
2149  nodesonlevel=numgridsonlevel(level)*{nxc^d*}
2150  elemsonlevel=numgridsonlevel(level)*{nx^d*}
2151  ! for all tecplot variants coded up here, we let the TECPLOT ZONES coincide
2152  ! with the AMR grid LEVEL. Other options would be
2153  ! let each grid define a zone: inefficient for TECPLOT internal workings
2154  ! hence not implemented
2155  ! let entire octree define 1 zone: no difference in interpolation
2156  ! properties across TECPLOT zones detected as yet, hence not done
2157  select case(convert_type)
2158  case('tecplotmpi')
2159  ! in this option, we store the corner coordinates, as well as the corner
2160  ! values of all variables (obtained by averaging). This allows POINT packaging,
2161  ! and thus we can save full grid info by using one call to calc_grid
2162  if (mype==0.and.(nodesonlevelmype>0.and.elemsonlevelmype>0))&
2163  write(qunit,"(a,i7,a,a,i7,a,i7,a,f25.16,a,a)") &
2164  'ZONE T="',level,'"',', N=',nodesonlevelmype,', E=',elemsonlevelmype, &
2165  ', SOLUTIONTIME=',global_time*time_convert_factor,', DATAPACKING=POINT, ZONETYPE=', &
2166  {^ifoned 'FELINESEG'}{^iftwod 'FEQUADRILATERAL'}{^ifthreed 'FEBRICK'}
2167  do morton_no=morton_start(mype),morton_stop(mype)
2168  igrid = sfc_to_igrid(morton_no)
2169  if (mype/=0)then
2170  itag=morton_no
2171  call mpi_send(igrid,1,mpi_integer, 0,itag,icomm,ierrmpi)
2172  itag=igrid
2173  call mpi_send(node(plevel_,igrid),1,mpi_integer, 0,itag,icomm,ierrmpi)
2174  end if
2175  if (node(plevel_,igrid)/=level) cycle
2176  call calc_x(igrid,xc,xcc)
2177  call calc_grid(qunit,igrid,xc,xcc,xc_tmp,xcc_tmp,wc_tmp,wcc_tmp,normconv,&
2178  ixc^l,ixcc^l,.true.)
2179  if (mype/=0) then
2180  itag=morton_no
2181  ind_send=(/ ixc^l /)
2182  siz_ind=2*^nd
2183  call mpi_send(ind_send,siz_ind, mpi_integer, 0,itag,icomm,ierrmpi)
2184  call mpi_send(normconv,nw+nwauxio+1,mpi_double_precision, 0,itag,icomm,ierrmpi)
2185 
2186  call mpi_send(wc_tmp,1,type_block_wc_io, 0,itag,icomm,ierrmpi)
2187  call mpi_send(xc_tmp,1,type_block_xc_io, 0,itag,icomm,ierrmpi)
2188  else
2189  {do ix^db=ixcmin^db,ixcmax^db\}
2190  x_tec(1:ndim)=xc_tmp(ix^d,1:ndim)*normconv(0)
2191  w_tec(1:nw+nwauxio)=wc_tmp(ix^d,1:nw+nwauxio)*normconv(1:nw+nwauxio)
2192  write(qunit,fmt="(100(e14.6))") x_tec, w_tec
2193  {end do\}
2194  end if
2195  enddo
2196 
2197  case('tecplotCCmpi')
2198  ! in this option, we store the corner coordinates, and the cell center
2199  ! values of all variables. Due to this mix of corner/cell center, we must
2200  ! use BLOCK packaging, and thus we have enormous overhead by using
2201  ! calc_grid repeatedly to merely fill values of cell corner coordinates
2202  ! and cell center values per dimension, per variable
2203  if(ndim+nw+nwauxio>99) call mpistop("adjust format specification in writeout")
2204  if(nw+nwauxio==1)then
2205  ! to make tecplot happy: avoid [ndim+1-ndim+1] in varlocation varset
2206  ! and just set [ndim+1]
2207  if (mype==0.and.(nodesonlevelmype>0.and.elemsonlevelmype>0))&
2208  write(qunit,"(a,i7,a,a,i7,a,i7,a,f25.16,a,i1,a,a)") &
2209  'ZONE T="',level,'"',', N=',nodesonlevelmype,', E=',elemsonlevelmype, &
2210  ', SOLUTIONTIME=',global_time*time_convert_factor,', DATAPACKING=BLOCK, VARLOCATION=([', &
2211  ndim+1,']=CELLCENTERED), ZONETYPE=', &
2212  {^ifoned 'FELINESEG'}{^iftwod 'FEQUADRILATERAL'}{^ifthreed 'FEBRICK'}
2213  else
2214  if(ndim+nw+nwauxio<10) then
2215  ! difference only in length of integer format specification for ndim+nw+nwauxio
2216  if (mype==0.and.(nodesonlevelmype>0.and.elemsonlevelmype>0))&
2217  write(qunit,"(a,i7,a,a,i7,a,i7,a,f25.16,a,i1,a,i1,a,a)") &
2218  'ZONE T="',level,'"',', N=',nodesonlevelmype,', E=',elemsonlevelmype, &
2219  ', SOLUTIONTIME=',global_time*time_convert_factor,', DATAPACKING=BLOCK, VARLOCATION=([', &
2220  ndim+1,'-',ndim+nw+nwauxio,']=CELLCENTERED), ZONETYPE=', &
2221  {^ifoned 'FELINESEG'}{^iftwod 'FEQUADRILATERAL'}{^ifthreed 'FEBRICK'}
2222  else
2223  if (mype==0.and.(nodesonlevelmype>0.and.elemsonlevelmype>0))&
2224  write(qunit,"(a,i7,a,a,i7,a,i7,a,f25.16,a,i1,a,i2,a,a)") &
2225  'ZONE T="',level,'"',', N=',nodesonlevelmype,', E=',elemsonlevelmype, &
2226  ', SOLUTIONTIME=',global_time*time_convert_factor,', DATAPACKING=BLOCK, VARLOCATION=([', &
2227  ndim+1,'-',ndim+nw+nwauxio,']=CELLCENTERED), ZONETYPE=', &
2228  {^ifoned 'FELINESEG'}{^iftwod 'FEQUADRILATERAL'}{^ifthreed 'FEBRICK'}
2229  endif
2230  endif
2231 
2232  do idim=1,ndim
2233  first=(idim==1)
2234  do morton_no=morton_start(mype),morton_stop(mype)
2235  igrid = sfc_to_igrid(morton_no)
2236  if (mype/=0)then
2237  itag=morton_no*idim
2238  call mpi_send(igrid,1,mpi_integer, 0,itag,icomm,ierrmpi)
2239  itag=igrid*idim
2240  call mpi_send(node(plevel_,igrid),1,mpi_integer, 0,itag,icomm,ierrmpi)
2241  end if
2242  if (node(plevel_,igrid)/=level) cycle
2243 
2244  call calc_x(igrid,xc,xcc)
2245  call calc_grid(qunit,igrid,xc,xcc,xc_tmp,xcc_tmp,wc_tmp,wcc_tmp,normconv,&
2246  ixc^l,ixcc^l,first)
2247  if (mype/=0)then
2248  ind_send=(/ ixc^l /)
2249  siz_ind=2*^nd
2250  itag=igrid*idim
2251  call mpi_send(ind_send,siz_ind, mpi_integer, 0,itag,icomm,ierrmpi)
2252  call mpi_send(normconv,nw+nwauxio+1,mpi_double_precision, 0,itag,icomm,ierrmpi)
2253  call mpi_send(xc_tmp,1,type_block_xc_io, 0,itag,icomm,ierrmpi)
2254  else
2255  write(qunit,fmt="(100(e14.6))") xc_tmp(ixc^s,idim)*normconv(0)
2256  end if
2257  enddo
2258  enddo
2259 
2260  do iw=1,nw+nwauxio
2261  do morton_no=morton_start(mype),morton_stop(mype)
2262  igrid = sfc_to_igrid(morton_no)
2263  if (mype/=0)then
2264  itag=morton_no*(ndim+iw)
2265  call mpi_send(igrid,1,mpi_integer, 0,itag,icomm,ierrmpi)
2266  itag=igrid*(ndim+iw)
2267  call mpi_send(node(plevel_,igrid),1,mpi_integer, 0,itag,icomm,ierrmpi)
2268  end if
2269  if (node(plevel_,igrid)/=level) cycle
2270 
2271  call calc_x(igrid,xc,xcc)
2272  call calc_grid(qunit,igrid,xc,xcc,xc_tmp,xcc_tmp,wc_tmp,wcc_tmp,normconv,&
2273  ixc^l,ixcc^l,.true.)
2274 
2275  if (mype/=0)then
2276  ind_send=(/ ixcc^l /)
2277  siz_ind=2*^nd
2278  itag=igrid*(ndim+iw)
2279  call mpi_send(ind_send,siz_ind, mpi_integer, 0,itag,icomm,ierrmpi)
2280  call mpi_send(normconv,nw+nwauxio+1,mpi_double_precision, 0,itag,icomm,ierrmpi)
2281  call mpi_send(wcc_tmp,1,type_block_wcc_io, 0,itag,icomm,ierrmpi)
2282  else
2283  write(qunit,fmt="(100(e14.6))") wcc_tmp(ixcc^s,iw)*normconv(iw)
2284  endif
2285  enddo
2286  enddo
2287  case default
2288  call mpistop('no such tecplot type')
2289  end select
2290 
2291 
2292  igonlevel=0
2293  do morton_no=morton_start(mype),morton_stop(mype)
2294  igrid = sfc_to_igrid(morton_no)
2295  if (mype/=0)then
2296  itag=morton_no
2297  call mpi_send(igrid,1,mpi_integer, 0,itag,icomm,ierrmpi)
2298  itag=igrid
2299  call mpi_send(node(plevel_,igrid),1,mpi_integer, 0,itag,icomm,ierrmpi)
2300  end if
2301  if (node(plevel_,igrid)/=level) cycle
2302 
2303  igonlevel=igonlevel+1
2304  if (mype/=0)then
2305  itag=igrid
2306  call mpi_send(igonlevel,1,mpi_integer, 0,itag,icomm,ierrmpi)
2307  end if
2308  if(mype==0)then
2309  call save_conntec(qunit,igrid,igonlevel)
2310  endif
2311  end do
2312 end do
2313 
2314 if (mype==0) then
2315  if (npe>1) then
2316  do ipe=1,npe-1
2317  itag=1000*morton_stop(ipe)
2318  call mpi_recv(levmin_recv,1,mpi_integer, ipe,itag,icomm,intstatus(:,1),ierrmpi)
2319  itag=2000*morton_stop(ipe)
2320  call mpi_recv(levmax_recv,1,mpi_integer, ipe,itag,icomm,intstatus(:,1),ierrmpi)
2321  do level=levmin_recv,levmax_recv
2322  nodesonlevelmype=numgridsonlevel_mype(level,ipe)*{nxc^d*}
2323  elemsonlevelmype=numgridsonlevel_mype(level,ipe)*{nx^d*}
2324  nodesonlevel=numgridsonlevel(level)*{nxc^d*}
2325  elemsonlevel=numgridsonlevel(level)*{nx^d*}
2326  select case(convert_type)
2327  case('tecplotmpi')
2328  ! in this option, we store the corner coordinates, as well as the corner
2329  ! values of all variables (obtained by averaging). This allows POINT packaging,
2330  ! and thus we can save full grid info by using one call to calc_grid
2331  if(nodesonlevelmype>0.and.elemsonlevelmype>0) &
2332  write(qunit,"(a,i7,a,a,i7,a,i7,a,f25.16,a,a)") &
2333  'ZONE T="',level,'"',', N=',nodesonlevelmype,', E=',elemsonlevelmype, &
2334  ', SOLUTIONTIME=',global_time*time_convert_factor,', DATAPACKING=POINT, ZONETYPE=', &
2335  {^ifoned 'FELINESEG'}{^iftwod 'FEQUADRILATERAL'}{^ifthreed 'FEBRICK'}
2336  do morton_no=morton_start(ipe),morton_stop(ipe)
2337  itag=morton_no
2338  call mpi_recv(igrid_recv,1,mpi_integer, ipe,itag,icomm,intstatus(:,1),ierrmpi)
2339  itag=igrid_recv
2340  call mpi_recv(level_recv,1,mpi_integer, ipe,itag,icomm,intstatus(:,1),ierrmpi)
2341  if (level_recv/=level) cycle
2342 
2343  itag=morton_no
2344  siz_ind=2*^nd
2345  call mpi_recv(ind_recv,siz_ind, mpi_integer, ipe,itag,&
2346  icomm,intstatus(:,1),ierrmpi)
2347  ixrvcmin^d=ind_recv(^d);ixrvcmax^d=ind_recv(^nd+^d);
2348  call mpi_recv(normconv,nw+nwauxio+1, mpi_double_precision,ipe,itag&
2349  ,icomm,intstatus(:,1),ierrmpi)
2350 
2351  call mpi_recv(wc_tmp_recv,1,type_block_wc_io, ipe,itag,&
2352  icomm,intstatus(:,1),ierrmpi)
2353  call mpi_recv(xc_tmp_recv,1,type_block_xc_io, ipe,itag,&
2354  icomm,intstatus(:,1),ierrmpi)
2355 
2356  {do ix^db=ixrvcmin^db,ixrvcmax^db\}
2357  x_tec(1:ndim)=xc_tmp_recv(ix^d,1:ndim)*normconv(0)
2358  w_tec(1:nw+nwauxio)=wc_tmp_recv(ix^d,1:nw+nwauxio)*normconv(1:nw+nwauxio)
2359  write(qunit,fmt="(100(e14.6))") x_tec, w_tec
2360  {end do\}
2361  end do
2362  case('tecplotCCmpi')
2363  ! in this option, we store the corner coordinates, and the cell center
2364  ! values of all variables. Due to this mix of corner/cell center, we must
2365  ! use BLOCK packaging, and thus we have enormous overhead by using
2366  ! calc_grid repeatedly to merely fill values of cell corner coordinates
2367  ! and cell center values per dimension, per variable
2368  if(ndim+nw+nwauxio>99) call mpistop("adjust format specification in writeout")
2369  if(nw+nwauxio==1)then
2370  ! to make tecplot happy: avoid [ndim+1-ndim+1] in varlocation varset
2371  ! and just set [ndim+1]
2372  if(nodesonlevelmype>0.and.elemsonlevelmype>0) &
2373  write(qunit,"(a,i7,a,a,i7,a,i7,a,f25.16,a,i1,a,a)") &
2374  'ZONE T="',level,'"',', N=',nodesonlevelmype,', E=',elemsonlevelmype, &
2375  ', SOLUTIONTIME=',global_time*time_convert_factor,', DATAPACKING=BLOCK, VARLOCATION=([', &
2376  ndim+1,']=CELLCENTERED), ZONETYPE=', &
2377  {^ifoned 'FELINESEG'}{^iftwod 'FEQUADRILATERAL'}{^ifthreed 'FEBRICK'}
2378  else
2379  if(ndim+nw+nwauxio<10) then
2380  ! difference only in length of integer format specification for ndim+nw+nwauxio
2381  if(nodesonlevelmype>0.and.elemsonlevelmype>0) &
2382  write(qunit,"(a,i7,a,a,i7,a,i7,a,f25.16,a,i1,a,i1,a,a)") &
2383  'ZONE T="',level,'"',', N=',nodesonlevelmype,', E=',elemsonlevelmype, &
2384  ', SOLUTIONTIME=',global_time*time_convert_factor,', DATAPACKING=BLOCK, VARLOCATION=([', &
2385  ndim+1,'-',ndim+nw+nwauxio,']=CELLCENTERED), ZONETYPE=', &
2386  {^ifoned 'FELINESEG'}{^iftwod 'FEQUADRILATERAL'}{^ifthreed 'FEBRICK'}
2387  else
2388  if(nodesonlevelmype>0.and.elemsonlevelmype>0) &
2389  write(qunit,"(a,i7,a,a,i7,a,i7,a,f25.16,a,i1,a,i2,a,a)") &
2390  'ZONE T="',level,'"',', N=',nodesonlevelmype,', E=',elemsonlevelmype, &
2391  ', SOLUTIONTIME=',global_time*time_convert_factor,', DATAPACKING=BLOCK, VARLOCATION=([', &
2392  ndim+1,'-',ndim+nw+nwauxio,']=CELLCENTERED), ZONETYPE=', &
2393  {^ifoned 'FELINESEG'}{^iftwod 'FEQUADRILATERAL'}{^ifthreed 'FEBRICK'}
2394  endif
2395  endif
2396 
2397  do idim=1,ndim
2398  do morton_no=morton_start(ipe),morton_stop(ipe)
2399  itag=morton_no*idim
2400  call mpi_recv(igrid_recv,1,mpi_integer, ipe,itag,icomm,intstatus(:,1),ierrmpi)
2401  itag=igrid_recv*idim
2402  call mpi_recv(level_recv,1,mpi_integer, ipe,itag,icomm,intstatus(:,1),ierrmpi)
2403  if (level_recv/=level) cycle
2404 
2405  siz_ind=2*^nd
2406  itag=igrid_recv*idim
2407  call mpi_recv(ind_recv,siz_ind, mpi_integer, ipe,itag,icomm,intstatus(:,1),ierrmpi)
2408  ixrvcmin^d=ind_recv(^d);ixrvcmax^d=ind_recv(^nd+^d);
2409  call mpi_recv(normconv,nw+nwauxio+1, mpi_double_precision,ipe,itag&
2410  ,icomm,intstatus(:,1),ierrmpi)
2411  call mpi_recv(xc_tmp_recv,1,type_block_xc_io, ipe,itag,icomm,intstatus(:,1),ierrmpi)
2412  write(qunit,fmt="(100(e14.6))") xc_tmp_recv(ixrvc^s,idim)*normconv(0)
2413  end do
2414  end do
2415 
2416  do iw=1,nw+nwauxio
2417  do morton_no=morton_start(ipe),morton_stop(ipe)
2418  itag=morton_no*(ndim+iw)
2419  call mpi_recv(igrid_recv,1,mpi_integer, ipe,itag,icomm,intstatus(:,1),ierrmpi)
2420  itag=igrid_recv*(ndim+iw)
2421  call mpi_recv(level_recv,1,mpi_integer, ipe,itag,icomm,intstatus(:,1),ierrmpi)
2422  if (level_recv/=level) cycle
2423 
2424  siz_ind=2*^nd
2425  itag=igrid_recv*(ndim+iw)
2426  call mpi_recv(ind_recv,siz_ind, mpi_integer, ipe,itag,icomm,intstatus(:,1),ierrmpi)
2427  ixrvccmin^d=ind_recv(^d);ixrvccmax^d=ind_recv(^nd+^d);
2428  call mpi_recv(normconv,nw+nwauxio+1, mpi_double_precision,ipe,itag&
2429  ,icomm,intstatus(:,1),ierrmpi)
2430  call mpi_recv(wcc_tmp_recv,1,type_block_wcc_io, ipe,itag,icomm,intstatus(:,1),ierrmpi)
2431  write(qunit,fmt="(100(e14.6))") wcc_tmp_recv(ixrvcc^s,iw)*normconv(iw)
2432  enddo
2433  enddo
2434  case default
2435  call mpistop('no such tecplot type')
2436  end select
2437 
2438  do morton_no=morton_start(ipe),morton_stop(ipe)
2439  itag=morton_no
2440  call mpi_recv(igrid_recv,1,mpi_integer, ipe,itag,icomm,intstatus(:,1),ierrmpi)
2441  itag=igrid_recv
2442  call mpi_recv(level_recv,1,mpi_integer, ipe,itag,icomm,intstatus(:,1),ierrmpi)
2443  if (level_recv/=level) cycle
2444 
2445  itag=igrid_recv
2446  call mpi_recv(igonlevel_recv,1,mpi_integer, ipe,itag,icomm,intstatus(:,1),ierrmpi)
2447  call save_conntec(qunit,igrid_recv,igonlevel_recv)
2448  end do ! morton loop
2449  end do ! level loop
2450  end do ! ipe loop
2451  end if ! npe>1 if
2452 end if ! mype=0 if
2453 {^ifoned endif}
2454 
2455 if (npe>1) then
2456  call mpi_barrier(icomm,ierrmpi)
2457  if(mype==0)deallocate(intstatus)
2458 endif
2459 
2460 end subroutine tecplot_mpi
2461 !=============================================================================
2462 subroutine punstructuredvtkb_mpi(qunit)
2464 ! Write one pvtu and vtu files for each processor
2465 ! Otherwise like unstructuredvtk_mpi
2466 ! output for vtu format to paraview, binary version output
2467 ! uses calc_grid to compute nwauxio variables
2468 ! allows renormalizing using convert factors
2469 
2472 use mod_calculate_xw
2473 
2474 integer, intent(in) :: qunit
2475 
2476 double precision :: x_VTK(1:3)
2477 
2478 double precision, dimension(ixMlo^D-1:ixMhi^D,ndim) :: xC_TMP
2479 double precision, dimension(ixMlo^D:ixMhi^D,ndim) :: xCC_TMP
2480 double precision, dimension(ixMlo^D-1:ixMhi^D,ndim) :: xC
2481 double precision, dimension(ixMlo^D:ixMhi^D,ndim) :: xCC
2482 
2483 double precision, dimension(ixMlo^D-1:ixMhi^D,nw+nwauxio) :: wC_TMP
2484 double precision, dimension(ixMlo^D:ixMhi^D,nw+nwauxio) :: wCC_TMP
2485 
2486 integer :: igrid,iigrid,level,igonlevel,icel,ixC^L,ixCC^L,Morton_no
2487 integer :: NumGridsOnLevel(1:nlevelshi)
2488 integer :: nx^D,nxC^D,nodesonlevel,elemsonlevel,nc,np,VTK_type,ix^D
2489 double precision :: normconv(0:nw+nwauxio)
2490 character(len=80) :: pfilename
2491 character(len=name_len) :: wnamei(1:nw+nwauxio),xandwnamei(1:ndim+nw+nwauxio)
2492 character(len=1024) :: outfilehead
2493 
2494 integer*8 :: offset
2495 integer:: recsep,k,iw,filenr
2496 integer:: length,lengthcc,offset_points,offset_cells, &
2497  length_coords,length_conn,length_offsets
2498 character:: buf
2499 character(len=6):: bufform
2500 
2501 logical :: fileopen
2502 !-----------------------------------------------------------------------------
2503 
2504 ! Write pvtu-file:
2505 if (mype==0) then
2506  call write_pvtu(qunit)
2507 endif
2508 ! Now write the Source files:
2509 inquire(qunit,opened=fileopen)
2510 if(.not.fileopen)then
2511  ! generate filename
2512  filenr=snapshotnext-1
2513  if (autoconvert) filenr=snapshotnext
2514  ! Open the file for the header part
2515  write(pfilename,'(a,i4.4,a,i4.4,a)') trim(base_filename),filenr,"p",mype,".vtu"
2516  open(qunit,file=pfilename,status='unknown',form='formatted')
2517 endif
2518 ! generate xml header
2519 write(qunit,'(a)')'<?xml version="1.0"?>'
2520 write(qunit,'(a)',advance='no') '<VTKFile type="UnstructuredGrid"'
2521 write(qunit,'(a)')' version="0.1" byte_order="LittleEndian">'
2522 write(qunit,'(a)')' <UnstructuredGrid>'
2523 write(qunit,'(a)')'<FieldData>'
2524 write(qunit,'(2a)')'<DataArray type="Float32" Name="TIME" ',&
2525  'NumberOfTuples="1" format="ascii">'
2526 write(qunit,*) real(global_time*time_convert_factor)
2527 write(qunit,'(a)')'</DataArray>'
2528 write(qunit,'(a)')'</FieldData>'
2529 
2530 
2531 offset=0
2532 recsep=4
2533 
2534 call getheadernames(wnamei,xandwnamei,outfilehead)
2535 
2536 ! number of cells, number of corner points, per grid.
2537 nx^d=ixmhi^d-ixmlo^d+1;
2538 nxc^d=nx^d+1;
2539 nc={nx^d*}
2540 np={nxc^d*}
2541 
2542 length=np*size_real
2543 lengthcc=nc*size_real
2544 
2545 length_coords=3*length
2546 length_conn=2**^nd*size_int*nc
2547 length_offsets=nc*size_int
2548 
2549 ! Note: using the w_write, writelevel, writespshift
2550 ! we can clip parts of the grid away, select variables, levels etc.
2551 do level=levmin,levmax
2552  if (writelevel(level)) then
2553  do morton_no=morton_start(mype),morton_stop(mype)
2554  igrid=sfc_to_igrid(morton_no)
2555  if (node(plevel_,igrid)/=level) cycle
2556  ! only output a grid when fully within clipped region selected
2557  ! by writespshift array
2558  if (({rnode(rpxmin^d_,igrid)>=xprobmin^d+(xprobmax^d-xprobmin^d)&
2559  *writespshift(^d,1)|.and.}).and.({rnode(rpxmax^d_,igrid)&
2560  <=xprobmax^d-(xprobmax^d-xprobmin^d)*writespshift(^d,2)|.and.})) then
2561  select case(convert_type)
2562  case('pvtuBmpi')
2563  ! we write out every grid as one VTK PIECE
2564  write(qunit,'(a,i7,a,i7,a)') &
2565  '<Piece NumberOfPoints="',np,'" NumberOfCells="',nc,'">'
2566  write(qunit,'(a)')'<PointData>'
2567  do iw=1,nw
2568  if(.not.w_write(iw))cycle
2569 
2570  write(qunit,'(a,a,a,i16,a)')&
2571  '<DataArray type="Float32" Name="',trim(wnamei(iw)), &
2572  '" format="appended" offset="',offset,'">'
2573  write(qunit,'(a)')'</DataArray>'
2574  offset=offset+length+size_int
2575  enddo
2576  do iw=nw+1,nw+nwauxio
2577 
2578  write(qunit,'(a,a,a,i16,a)')&
2579  '<DataArray type="Float32" Name="',trim(wnamei(iw)), &
2580  '" format="appended" offset="',offset,'">'
2581  write(qunit,'(a)')'</DataArray>'
2582  offset=offset+length+size_int
2583  enddo
2584  write(qunit,'(a)')'</PointData>'
2585 
2586  write(qunit,'(a)')'<Points>'
2587  write(qunit,'(a,i16,a)') &
2588  '<DataArray type="Float32" NumberOfComponents="3" format="appended" offset="',offset,'"/>'
2589  ! write cell corner coordinates in a backward dimensional loop, always 3D output
2590  offset=offset+length_coords+size_int
2591  write(qunit,'(a)')'</Points>'
2592  case('pvtuBCCmpi')
2593  ! we write out every grid as one VTK PIECE
2594  write(qunit,'(a,i7,a,i7,a)') &
2595  '<Piece NumberOfPoints="',np,'" NumberOfCells="',nc,'">'
2596  write(qunit,'(a)')'<CellData>'
2597  do iw=1,nw
2598  if(.not.w_write(iw))cycle
2599 
2600  write(qunit,'(a,a,a,i16,a)')&
2601  '<DataArray type="Float32" Name="',trim(wnamei(iw)), &
2602  '" format="appended" offset="',offset,'">'
2603  write(qunit,'(a)')'</DataArray>'
2604  offset=offset+lengthcc+size_int
2605  enddo
2606  do iw=nw+1,nw+nwauxio
2607 
2608  write(qunit,'(a,a,a,i16,a)')&
2609  '<DataArray type="Float32" Name="',trim(wnamei(iw)), &
2610  '" format="appended" offset="',offset,'">'
2611  write(qunit,'(a)')'</DataArray>'
2612  offset=offset+lengthcc+size_int
2613  enddo
2614  write(qunit,'(a)')'</CellData>'
2615 
2616  write(qunit,'(a)')'<Points>'
2617  write(qunit,'(a,i16,a)') &
2618  '<DataArray type="Float32" NumberOfComponents="3" format="appended" offset="',offset,'"/>'
2619  ! write cell corner coordinates in a backward dimensional loop, always 3D output
2620  offset=offset+length_coords+size_int
2621  write(qunit,'(a)')'</Points>'
2622  end select
2623 
2624 
2625  write(qunit,'(a)')'<Cells>'
2626 
2627  ! connectivity part
2628  write(qunit,'(a,i16,a)')&
2629  '<DataArray type="Int32" Name="connectivity" format="appended" offset="',offset,'"/>'
2630  offset=offset+length_conn+size_int
2631 
2632  ! offsets data array
2633  write(qunit,'(a,i16,a)') &
2634  '<DataArray type="Int32" Name="offsets" format="appended" offset="',offset,'"/>'
2635  offset=offset+length_offsets+size_int
2636 
2637  ! VTK cell type data array
2638  write(qunit,'(a,i16,a)') &
2639  '<DataArray type="Int32" Name="types" format="appended" offset="',offset,'"/>'
2640  offset=offset+size_int+nc*size_int
2641 
2642  write(qunit,'(a)')'</Cells>'
2643 
2644  write(qunit,'(a)')'</Piece>'
2645  endif
2646  enddo
2647  endif
2648 enddo
2649 
2650 write(qunit,'(a)')'</UnstructuredGrid>'
2651 write(qunit,'(a)')'<AppendedData encoding="raw">'
2652 
2653 close(qunit)
2654 ! next to make gfortran compiler happy, as it does not know
2655 ! form='binary' and produces error on compilation
2656 !bufform='binary'
2657 !open(qunit,file=pfilename,form=bufform,position='append')
2658 !This should in principle do also for gfortran (tested with gfortran 4.6.0 and Intel 11.1):
2659 open(qunit,file=pfilename,access='stream',form='unformatted',position='append')
2660 buf='_'
2661 write(qunit) trim(buf)
2662 
2663 do level=levmin,levmax
2664  if (writelevel(level)) then
2665  do morton_no=morton_start(mype),morton_stop(mype)
2666  igrid=sfc_to_igrid(morton_no)
2667  if (node(plevel_,igrid)/=level) cycle
2668  ! only output a grid when fully within clipped region selected
2669  ! by writespshift array
2670  if (({rnode(rpxmin^d_,igrid)>=xprobmin^d+(xprobmax^d-xprobmin^d)&
2671  *writespshift(^d,1)|.and.}).and.({rnode(rpxmax^d_,igrid)&
2672  <=xprobmax^d-(xprobmax^d-xprobmin^d)*writespshift(^d,2)|.and.})) then
2673  call calc_x(igrid,xc,xcc)
2674  call calc_grid(qunit,igrid,xc,xcc,xc_tmp,xcc_tmp,wc_tmp,wcc_tmp,normconv,&
2675  ixc^l,ixcc^l,.true.)
2676  do iw=1,nw
2677  if(.not.w_write(iw))cycle
2678  select case(convert_type)
2679  case('pvtuBmpi')
2680  write(qunit) length
2681  write(qunit) {(|}real(wC_TMP(ix^D,iw)*normconv(iw)),{ix^D=ixcmin^d,ixcmax^d)}
2682  case('pvtuBCCmpi')
2683  write(qunit) lengthcc
2684  write(qunit) {(|}real(wCC_TMP(ix^D,iw)*normconv(iw)),{ix^D=ixccmin^d,ixccmax^d)}
2685  end select
2686  enddo
2687  do iw=nw+1,nw+nwauxio
2688  select case(convert_type)
2689  case('pvtuBmpi')
2690  write(qunit) length
2691  write(qunit) {(|}real(wC_TMP(ix^D,iw)*normconv(iw)),{ix^D=ixcmin^d,ixcmax^d)}
2692  case('pvtuBCCmpi')
2693  write(qunit) lengthcc
2694  write(qunit) {(|}real(wCC_TMP(ix^D,iw)*normconv(iw)),{ix^D=ixccmin^d,ixccmax^d)}
2695  end select
2696  enddo
2697 
2698  write(qunit) length_coords
2699  {do ix^db=ixcmin^db,ixcmax^db \}
2700  x_vtk(1:3)=zero;
2701  x_vtk(1:ndim)=xc_tmp(ix^d,1:ndim)*normconv(0);
2702  do k=1,3
2703  write(qunit) real(x_vtk(k))
2704  end do
2705  {end do \}
2706 
2707  write(qunit) length_conn
2708  {do ix^db=1,nx^db\}
2709  {^ifoned write(qunit)ix1-1,ix1 \}
2710  {^iftwod
2711  write(qunit)(ix2-1)*nxc1+ix1-1, &
2712  (ix2-1)*nxc1+ix1,ix2*nxc1+ix1-1,ix2*nxc1+ix1
2713  \}
2714  {^ifthreed
2715  write(qunit)&
2716  (ix3-1)*nxc2*nxc1+(ix2-1)*nxc1+ix1-1, &
2717  (ix3-1)*nxc2*nxc1+(ix2-1)*nxc1+ix1,&
2718  (ix3-1)*nxc2*nxc1+ ix2*nxc1+ix1-1,&
2719  (ix3-1)*nxc2*nxc1+ ix2*nxc1+ix1,&
2720  ix3*nxc2*nxc1+(ix2-1)*nxc1+ix1-1,&
2721  ix3*nxc2*nxc1+(ix2-1)*nxc1+ix1,&
2722  ix3*nxc2*nxc1+ ix2*nxc1+ix1-1,&
2723  ix3*nxc2*nxc1+ ix2*nxc1+ix1
2724  \}
2725  {end do\}
2726 
2727  write(qunit) length_offsets
2728  do icel=1,nc
2729  write(qunit) icel*(2**^nd)
2730  end do
2731 
2732 
2733  {^ifoned vtk_type=3 \}
2734  {^iftwod vtk_type=8 \}
2735  {^ifthreed vtk_type=11 \}
2736  write(qunit) size_int*nc
2737  do icel=1,nc
2738  write(qunit) vtk_type
2739  enddo
2740  endif
2741  end do
2742  endif
2743 end do
2744 
2745 close(qunit)
2746 open(qunit,file=pfilename,status='unknown',form='formatted',position='append')
2747 
2748 write(qunit,'(a)')'</AppendedData>'
2749 write(qunit,'(a)')'</VTKFile>'
2750 close(qunit)
2751 
2752 end subroutine punstructuredvtkb_mpi
2753 !=============================================================================
subroutine write_pvtu(qunit)
Definition: convert.t:1904
This module contains definitions of global parameters and variables and some generic functions/subrou...
subroutine imagedatavtk_mpi(qunit)
Definition: convert.t:1278
update ghost cells of all blocks including physical boundaries
logical, dimension(:), allocatable w_write
subroutine onegrid(qunit)
Definition: convert.t:246
logical saveprim
If true, convert from conservative to primitive variables in output.
integer max_blocks
The maximum number of grid blocks in a processor.
type(tree_node_ptr), dimension(:,:), allocatable, save igrid_to_node
Array to go from an [igrid, ipe] index to a node pointer.
Definition: mod_forest.t:32
subroutine oneblock(qunit)
Definition: convert.t:50
integer type_block_wc_io
MPI type for IO: cell corner (wc) or cell center (wcc) variables.
subroutine getheadernames(wnamei, xandwnamei, outfilehead)
double precision, dimension(ndim, 2) writespshift
double precision time_convert_factor
Conversion factor for time unit.
character(len=std_len) convert_type
Which format to use when converting.
integer, parameter plevel_
subroutine write_vti(qunit, ixIL, ixCL, ixCCL, igD, nxD, normconv, wnamei, wC, wCC)
Definition: convert.t:1844
character(len=std_len) base_filename
Base file name for simulation output, which will be followed by a number.
Module with basic grid data structures.
Definition: mod_forest.t:2
subroutine calc_grid(qunit, igrid, xC, xCC, xC_TMP, xCC_TMP, wC_TMP, wCC_TMP, normconv, ixCL, ixCCL, first)
Compute both corner as well as cell-centered values for output.
character(len=std_len) typeaxial
logical phys_req_diagonal
Whether the physics routines require diagonal ghost cells, for example for computing a curl...
Definition: mod_physics.t:22
integer npe
The number of MPI tasks.
subroutine save_conntec(qunit, igrid, igonlevel)
Definition: convert.t:558
logical, dimension(:), allocatable writelevel
subroutine unstructuredvtk(qunit)
Definition: convert.t:633
integer function nodenumbertec1d(i1, nx1, ig, igrid)
Definition: convert.t:606
Pointer to a tree_node.
Definition: mod_forest.t:6
integer, dimension(:), allocatable ng
number of grid blocks in domain per dimension, in array over levels
subroutine calc_x(igrid, xC, xCC)
computes cell corner (xC) and cell center (xCC) coordinates
procedure(aux_output), pointer usr_aux_output
Module with all the methods that users can customize in AMRVAC.
integer, dimension(:), allocatable, save sfc_to_igrid
Go from a Morton number to an igrid index (for a single processor)
Definition: mod_forest.t:53
integer type_block_io
MPI type for IO: block excluding ghost cells.
integer, parameter unitconvert
subroutine punstructuredvtkb_mpi(qunit)
Definition: convert.t:2463
logical nocartesian
IO switches for conversion.
integer ierrmpi
A global MPI error return code.
integer ixm
the mesh range (within a block with ghost cells)
integer snapshotnext
IO: snapshot and collapsed views output numbers/labels.
subroutine unstructuredvtk_mpi(qunit)
Definition: convert.t:1546
subroutine tecplot(qunit)
Definition: convert.t:356
subroutine write_vtk(qunit, ixIL, ixCL, ixCCL, igrid, nc, np, nxD, nxCD, normconv, wnamei, xC, xCC, wC, wCC)
Definition: convert.t:1735
double precision, dimension(:), allocatable w_convert_factor
Conversion factors the primitive variables.
subroutine punstructuredvtk_mpi(qunit)
Definition: convert.t:1452
integer, dimension(:), allocatable, save morton_start
First Morton number per processor.
Definition: mod_forest.t:62
subroutine getbc(time, qdt, psb, nwstart, nwbc, req_diag)
subroutine unstructuredvtkb(qunit)
Definition: convert.t:813
integer, parameter unitterm
Unit for standard output.
integer, dimension(:), allocatable, save morton_stop
Last Morton number per processor.
Definition: mod_forest.t:65
integer mype
The rank of the current MPI task.
double precision global_time
The global simulation time.
subroutine resettree_convert
Force the tree to desired level(s) from level_io(_min/_max) used for conversion to vtk output...
Definition: amrgrid.t:61
subroutine save_connvtk(qunit, igrid)
Definition: convert.t:1243
subroutine tecplot_mpi(qunit)
Definition: convert.t:1979
subroutine mpistop(message)
Exit MPI-AMRVAC with an error message.
Definition: comm_lib.t:160
integer function nodenumbertec3d(i1, i2, i3, nx1, nx2, nx3, ig, igrid)
Definition: convert.t:624
double precision length_convert_factor
double precision, dimension(ndim) dxlevel
integer snapshotini
Resume from the snapshot with this index.
integer, parameter ndim
Number of spatial dimensions for grid variables.
subroutine generate_plotfile
Definition: convert.t:3
integer nwauxio
Number of auxiliary variables that are only included in output.
integer icomm
The MPI communicator.
integer, parameter nlevelshi
The maximum number of levels in the grid refinement.
This module defines the procedures of a physics module. It contains function pointers for the various...
Definition: mod_physics.t:4
procedure(sub_convert), pointer phys_to_primitive
Definition: mod_physics.t:37
integer, dimension(:,:), allocatable node
logical phys_energy
Solve energy equation or not.
double precision, dimension(:,:), allocatable rnode
Corner coordinates.
integer type_block_xc_io
MPI type for IO: cell corner (xc) or cell center (xcc) coordinates.
logical autoconvert
If true, already convert to output format during the run.
integer function nodenumbertec2d(i1, i2, nx1, nx2, ig, igrid)
Definition: convert.t:615
procedure(special_convert), pointer usr_special_convert
Handles computations for coordinates and variables in output.