Multiple changed mostly focusing on active particle search

* Added optional active particle search via GRAPite.

* Fixed ETICS_DTSCF in the Makefile.

* Disabled ETICS_CEP by default.

* Renamed and improved the initialization Python script
This commit is contained in:
Yohai Meiron 2020-02-26 18:54:40 -05:00
parent 8ae1f0991d
commit 953a8286eb
4 changed files with 136 additions and 78 deletions

View file

@ -134,8 +134,10 @@
#ifdef ETICS
#include "grapite.h"
// why do we need CEP as a compilaion flag... just have it always on when ETICS is on. IF there is no CEP, there should be a graceful skipping of those operations.
#define ETICS_CEP
#define ETICS_DTSCF 0.125
//#define ETICS_CEP
#ifndef ETICS_DTSCF
#error "ETICS_DTSCF must be defined"
#endif
#endif
#define TIMING
@ -163,6 +165,10 @@
// #define ACT_DEF_LL
#if defined(ACT_DEF_LL) && defined(ACT_DEF_GRAPITE)
#error "Contradicting preprocessor flags!"
#endif
/****************************************************************************/
#include <stdio.h>
#include <stdlib.h>
@ -5630,6 +5636,9 @@ for(i=0; i<N; i++)
// j_loc_beg = myRank*n_loc;
// j_loc_end = (myRank+1)*n_loc;
#ifdef ACT_DEF_GRAPITE
min_t_loc = grapite_get_minimum_time();
#else
min_t_loc = t[0]+dt[0];
for(j=0; j<n_loc; j++)
@ -5638,6 +5647,7 @@ for(i=0; i<N; i++)
tmp = t[jjj] + dt[jjj];
if( tmp < min_t_loc ) min_t_loc = tmp;
}
#endif
/* Wait to all processors to finish his works... */
MPI_Barrier(MPI_COMM_WORLD);
@ -5659,6 +5669,22 @@ for(i=0; i<N; i++)
get_CPU_time(&CPU_tmp_real0, &CPU_tmp_user0, &CPU_tmp_syst0);
#endif
#ifdef ACT_DEF_GRAPITE
int ind_act_loc[N_MAX], n_act_loc;
grapite_active_search(min_t, ind_act_loc, &n_act_loc);
if (myRank > 0)
for(int i=0; i<n_act_loc; i++)
ind_act_loc[i] += myRank*n_loc;
int n_act_arr[256], displs[256]; // Assuming maximum of 256 processes... seems safe.
MPI_Allgather(&n_act_loc, 1, MPI_INT, n_act_arr, 1, MPI_INT, MPI_COMM_WORLD);
n_act = n_act_arr[0];
for (int i=1; i<n_proc; i++)
n_act += n_act_arr[i];
displs[0] = 0;
for (int i=1; i<n_proc; i++)
displs[i]=displs[i-1]+n_act_arr[i-1];
MPI_Allgatherv(ind_act_loc, n_act_loc, MPI_INT, ind_act, n_act_arr, displs, MPI_INT, MPI_COMM_WORLD);
#else
n_act = 0;
//#pragma omp parallel for
@ -5673,6 +5699,7 @@ for(i=0; i<N; i++)
}
// }
} /* i */
#endif // ACT_DEF_GRAPITE
#ifdef TIMING
get_CPU_time(&CPU_tmp_real, &CPU_tmp_user, &CPU_tmp_syst);