Actual source code: mpi.h

petsc-3.7.6 2017-04-24
Report Typos and Errors
  1: /*
  2:    This is a special set of bindings for uni-processor use of MPI by the PETSc library.

  4:    NOT ALL THE MPI CALLS ARE IMPLEMENTED CORRECTLY! Only those needed in PETSc.

  6:    For example,
  7:    * Does not implement send to self.
  8:    * Does not implement attributes correctly.
  9: */

 11: /*
 12:   The following info is a response to one of the petsc-maint questions
 13:   regarding MPIUNI.

 15:   MPIUNI was developed with the aim of getting PETSc compiled, and
 16:   usable in the absence of a full MPI implementation. With this, we
 17:   were able to provide PETSc on Windows, Windows64 even before any MPI
 18:   implementation was available on these platforms. [Or with certain
 19:   compilers - like borland, that do not have a usable MPI
 20:   implementation]

 22:   However - providing a seqential, standards compliant MPI
 23:   implementation is *not* the goal of MPIUNI. The development strategy
 24:   was - to make enough changes to it so that PETSc sources, examples
 25:   compile without errors, and runs in the uni-processor mode. This is
 26:   the reason each function is not documented.

 28:   PETSc usage of MPIUNI is primarily from C. However a minimal fortran
 29:   interface is also provided - to get PETSc fortran examples with a
 30:   few MPI calls working.

 32:   One of the optimzation with MPIUNI, is to avoid the function call
 33:   overhead, when possible. Hence most of the C functions are
 34:   implemented as macros. However the function calls cannot be avoided
 35:   with fortran usage.

 37:   Most PETSc objects have both sequential and parallel
 38:   implementations, which are separate. For eg: We have two types of
 39:   sparse matrix storage formats - SeqAIJ, and MPIAIJ. Some MPI
 40:   routines are used in the Seq part, but most of them are used in the
 41:   MPI part. The send/receive calls can be found mostly in the MPI
 42:   part.

 44:   When MPIUNI is used, only the Seq version of the PETSc objects are
 45:   used, even though the MPI variant of the objects are compiled. Since
 46:   there are no send/receive calls in the Seq variant, PETSc works fine
 47:   with MPIUNI in seq mode.

 49:   The reason some send/receive functions are defined to abort(), is to
 50:   detect sections of code that use send/receive functions, and gets
 51:   executed in the sequential mode. (which shouldn't happen in case of
 52:   PETSc).

 54:   Proper implementation of send/receive would involve writing a
 55:   function for each of them. Inside each of these functions, we have
 56:   to check if the send is to self or receive is from self, and then
 57:   doing the buffering accordingly (until the receive is called) - or
 58:   what if a nonblocking receive is called, do a copy etc.. Handling
 59:   the buffering aspects might be complicated enough, that in this
 60:   case, a proper implementation of MPI might as well be used. This is
 61:   the reason the send to self is not implemented in MPIUNI, and never
 62:   will be.

 64:   Proper implementations of MPI [for eg: MPICH & OpenMPI] are
 65:   available for most machines. When these packages are available, Its
 66:   generally preferable to use one of them instead of MPIUNI - even if
 67:   the user is using PETSc sequentially.

 69:     - MPIUNI does not support all MPI functions [or functionality].
 70:     Hence it might not work with external packages or user code that
 71:     might have MPI calls in it.

 73:     - MPIUNI is not a standards compliant implementation for np=1.
 74:     For eg: if the user code has send/recv to self, then it will
 75:     abort. [Similar issues with a number of other MPI functionality]
 76:     However MPICH & OpenMPI are the correct implementations of MPI
 77:     standard for np=1.

 79:     - When user code uses multiple MPI based packages that have their
 80:     own *internal* stubs equivalent to MPIUNI - in sequential mode,
 81:     invariably these multiple implementations of MPI for np=1 conflict
 82:     with each other. The correct thing to do is: make all such
 83:     packages use the *same* MPI implementation for np=1. MPICH/OpenMPI
 84:     satisfy this requirement correctly [and hence the correct choice].

 86:     - Using MPICH/OpenMPI sequentially should have minimal
 87:     disadvantages. [for eg: these binaries can be run without
 88:     mpirun/mpiexec as ./executable, without requiring any extra
 89:     configurations for ssh/rsh/daemons etc..]. This should not be a
 90:     reason to avoid these packages for sequential use.

 92:     Instructions for building standalone MPIUNI [for eg: linux/gcc+gfortran]:
 93:     - extract include/mpiuni/mpi.h,mpif.f, src/sys/mpiuni/mpi.c from PETSc
 94:     - remove reference to petscconf.h from mpi.h
 95:     - gcc -c mpi.c -DPETSC_HAVE_STDLIB_H -DPETSC_HAVE_FORTRAN_UNDERSCORE
 96:     - ar cr libmpiuni.a mpi.o

 98: */


103: /* Requred by abort() in mpi.c & for win64 */
104: #include <petscconf.h>

106: /*  This is reproduced from petscsys.h so that mpi.h can be used standalone without first including petscsys.h */
107: #if defined(_WIN32) && defined(PETSC_USE_SHARED_LIBRARIES)
108: #  define MPIUni_ __declspec(dllexport)
109: #  define MPIUni_PETSC_DLLIMPORT __declspec(dllimport)
110: #elif defined(PETSC_USE_VISIBILITY_CXX) && defined(__cplusplus)
111: #  define MPIUni_ __attribute__((visibility ("default")))
112: #  define MPIUni_PETSC_DLLIMPORT __attribute__((visibility ("default")))
113: #elif defined(PETSC_USE_VISIBILITY_C) && !defined(__cplusplus)
114: #  define MPIUni_ __attribute__((visibility ("default")))
115: #  define MPIUni_PETSC_DLLIMPORT __attribute__((visibility ("default")))
116: #else
117: #  define MPIUni_
118: #  define MPIUni_PETSC_DLLIMPORT
119: #endif

121: #if defined(petsc_EXPORTS)
122: #  define MPIUni_PETSC_VISIBILITY_PUBLIC MPIUni_
123: #else  /* Win32 users need this to import symbols from petsc.dll */
124: #  define MPIUni_PETSC_VISIBILITY_PUBLIC MPIUni_PETSC_DLLIMPORT
125: #endif

127: #if defined(__cplusplus)
128: #define MPIUni_PETSC_EXTERN extern "C" MPIUni_PETSC_VISIBILITY_PUBLIC
129: #else
130: #define MPIUni_PETSC_EXTERN extern MPIUni_PETSC_VISIBILITY_PUBLIC
131: #endif

133: #if defined(__cplusplus)
134: extern "C" {
135: #endif

137: /* require an int variable large enough to hold a pointer */
138: #if (PETSC_SIZEOF_LONG == PETSC_SIZEOF_VOID_P)
139: typedef long MPIUNI_INTPTR;
140: #elif (PETSC_SIZEOF_SIZE_T == PETSC_SIZEOF_VOID_P)
141: typedef size_t MPIUNI_INTPTR;
142: #else
143: typedef unknownuniptr MPIUNI_INTPTR;
144: #endif

146: /* old 32bit MS compiler does not support long long */
147: #if defined(PETSC_SIZEOF_LONG_LONG)
148: typedef long long MPIUNI_INT64;
149: typedef unsigned long long MPIUNI_UINT64;
150: #elif defined(PETSC_HAVE___INT64)
151: typedef _int64 MPIUNI_INT64;
152: typedef unsigned _int64 MPIUNI_UINT64;
153: #else
154: #error "cannot determine MPIUNI_INT64, MPIUNI_UINT64 types"
155: #endif

157: /*

159:     MPIUNI_TMP is used in the macros below only to stop various C/C++ compilers
160: from generating warning messages about unused variables while compiling PETSc.
161: */
162: MPIUni_PETSC_EXTERN void *MPIUNI_TMP;

164: #define MPI_COMM_SELF        1
165: #define MPI_COMM_WORLD       2
166: #define MPI_COMM_NULL        0
167: #define MPI_SUCCESS          0
168: #define MPI_IDENT            0
169: #define MPI_CONGRUENT        1
170: #define MPI_SIMILAR          2
171: #define MPI_UNEQUAL          3
172: #define MPI_ANY_SOURCE     (-2)
173: #define MPI_KEYVAL_INVALID   0
174: #define MPI_ERR_UNKNOWN     18
175: #define MPI_ERR_INTERN      21
176: #define MPI_ERR_OTHER        1
177: #define MPI_TAG_UB           0
178: #define MPI_ERRORS_RETURN    0
179: #define MPI_UNDEFINED      (-32766)
180: #define MPI_ERRORS_ARE_FATAL (-32765)
181: #define MPI_MAXLOC           5
182: #define MPI_MINLOC           6


185: /* External types */
186: typedef int    MPI_Comm;
187: typedef void   *MPI_Request;
188: typedef void   *MPI_Group;
189: typedef struct {int MPI_TAG,MPI_SOURCE,MPI_ERROR;} MPI_Status;
190: typedef char   *MPI_Errhandler;
191: typedef int    MPI_Fint;
192: typedef int    MPI_File;
193: typedef int    MPI_Info;
194: typedef int    MPI_Offset;

196: /* In order to handle datatypes, we make them into "sizeof(raw-type)";
197:     this allows us to do the MPIUNI_Memcpy's easily */
198: #define MPI_Datatype         int
199: #define MPI_FLOAT            (1 << 16 | sizeof(float))
200: #define MPI_DOUBLE           (1 << 16 | sizeof(double))
201: #define MPI_LONG_DOUBLE      (1 << 16 | sizeof(long double))

203: #define MPI_COMPLEX          (2 << 16 | 2*sizeof(float))
204: #define MPI_C_COMPLEX        (2 << 16 | 2*sizeof(float))
205: #define MPI_C_DOUBLE_COMPLEX (2 << 16 | 2*sizeof(double))

207: #define MPI_CHAR             (3 << 16 | sizeof(char))
208: #define MPI_BYTE             (3 << 16 | sizeof(char))
209: #define MPI_UNSIGNED_CHAR    (3 << 16 | sizeof(unsigned char))

211: #define MPI_INT              (4 << 16 | sizeof(int))
212: #define MPI_LONG             (4 << 16 | sizeof(long))
213: #define MPI_LONG_LONG_INT    (4 << 16 | sizeof(MPIUNI_INT64))
214: #define MPI_SHORT            (4 << 16 | sizeof(short))

216: #define MPI_UNSIGNED_SHORT   (5 << 16 | sizeof(unsigned short))
217: #define MPI_UNSIGNED         (5 << 16 | sizeof(unsigned))
218: #define MPI_UNSIGNED_LONG    (5 << 16 | sizeof(unsigned long))
219: #define MPI_UNSIGNED_LONG_LONG (5 << 16 | sizeof(MPIUNI_UINT64))

221: #define MPI_FLOAT_INT        (10 << 16 | (sizeof(float) + sizeof(int)))
222: #define MPI_DOUBLE_INT       (11 << 16 | (sizeof(double) + sizeof(int)))
223: #define MPI_LONG_INT         (12 << 16 | (sizeof(long) + sizeof(int)))
224: #define MPI_SHORT_INT        (13 << 16 | (sizeof(short) + sizeof(int)))
225: #define MPI_2INT             (14 << 16 | (2* sizeof(int)))

227: #if defined(PETSC_USE_REAL___FLOAT128)
228: extern MPI_Datatype MPIU___FLOAT128;
229: #define MPI_sizeof(datatype) ((datatype == MPIU___FLOAT128) ? 2*sizeof(double) : (datatype) & 0xff)
230: #else
231: #define MPI_sizeof(datatype) ((datatype) & 0xff)
232: #endif
233: MPIUni_PETSC_EXTERN int MPIUNI_Memcpy(void*,const void*,int);

235: #define MPI_MAX_PROCESSOR_NAME 1024

237: #define MPI_REQUEST_NULL     ((MPI_Request)0)
238: #define MPI_GROUP_NULL       ((MPI_Group)0)
239: #define MPI_INFO_NULL        ((MPI_Info)0)
240: #define MPI_BOTTOM           (void *)0
241: typedef int MPI_Op;

243: #define MPI_MODE_RDONLY   0
244: #define MPI_MODE_WRONLY   0
245: #define MPI_MODE_CREATE   0

247: #define MPI_SUM           1
248: #define MPI_MAX           2
249: #define MPI_MIN           3
250: #define MPI_REPLACE       4
251: #define MPI_PROD          5
252: #define MPI_LAND          6
253: #define MPI_BAND          7
254: #define MPI_LOR           8
255: #define MPI_BOR           9
256: #define MPI_LXOR          10
257: #define MPI_BXOR          11
258: #define MPI_ANY_TAG     (-1)
259: #define MPI_DATATYPE_NULL 0
260: #define MPI_PACKED        0
261: #define MPI_MAX_ERROR_STRING 2056
262: #define MPI_STATUS_IGNORE (MPI_Status *)1
263: #define MPI_STATUSES_IGNORE (MPI_Status *)1
264: #define MPI_ORDER_FORTRAN        57
265: #define MPI_IN_PLACE      (void *) -1

267: /*
268:   Prototypes of some functions which are implemented in mpi.c
269: */
270: typedef int   (MPI_Copy_function)(MPI_Comm,int,void *,void *,void *,int *);
271: typedef int   (MPI_Delete_function)(MPI_Comm,int,void *,void *);
272: typedef void  (MPI_User_function)(void*, void *, int *, MPI_Datatype *);

274: /*
275:   To enable linking PETSc+MPIUNI with any other package that might have its
276:   own MPIUNI (equivalent implementation) we need to avoid using 'MPI'
277:   namespace for MPIUNI functions that go into the petsc library.

279:   For C functions below (that get compiled into petsc library) - we map
280:   the 'MPI' functions to use 'Petsc_MPI' namespace.

282:   However we cannot do such maping for fortran MPIUNI functions. One
283:   can use the configure option --with-mpiuni-fortran-binding=0 to
284:   prevent compiling MPIUNI fortran interface.
285: */
286: #define MPI_Abort         Petsc_MPI_Abort
287: #define MPI_Attr_get      Petsc_MPI_Attr_get
288: #define MPI_Keyval_free   Petsc_MPI_Keyval_free
289: #define MPI_Attr_put      Petsc_MPI_Attr_put
290: #define MPI_Attr_delete   Petsc_MPI_Attr_delete
291: #define MPI_Keyval_create Petsc_MPI_Keyval_create
292: #define MPI_Comm_free     Petsc_MPI_Comm_free
293: #define MPI_Comm_dup      Petsc_MPI_Comm_dup
294: #define MPI_Comm_create   Petsc_MPI_Comm_create
295: #define MPI_Init          Petsc_MPI_Init
296: #define MPI_Finalize      Petsc_MPI_Finalize
297: #define MPI_Initialized   Petsc_MPI_Initialized
298: #define MPI_Finalized     Petsc_MPI_Finalized
299: #define MPI_Comm_size     Petsc_MPI_Comm_size
300: #define MPI_Comm_rank     Petsc_MPI_Comm_rank
301: #define MPI_Wtime         Petsc_MPI_Wtime

303: /* identical C bindings */
304: #define MPI_Comm_create_keyval Petsc_MPI_Keyval_create
305: #define MPI_Comm_free_keyval   Petsc_MPI_Keyval_free
306: #define MPI_Comm_get_attr      Petsc_MPI_Attr_get
307: #define MPI_Comm_set_attr      Petsc_MPI_Attr_put

309: MPIUni_PETSC_EXTERN int    MPI_Abort(MPI_Comm,int);
310: MPIUni_PETSC_EXTERN int    MPI_Attr_get(MPI_Comm comm,int keyval,void *attribute_val,int *flag);
311: MPIUni_PETSC_EXTERN int    MPI_Keyval_free(int*);
312: MPIUni_PETSC_EXTERN int    MPI_Attr_put(MPI_Comm,int,void *);
313: MPIUni_PETSC_EXTERN int    MPI_Attr_delete(MPI_Comm,int);
314: MPIUni_PETSC_EXTERN int    MPI_Keyval_create(MPI_Copy_function *,MPI_Delete_function *,int *,void *);
315: MPIUni_PETSC_EXTERN int    MPI_Comm_free(MPI_Comm*);
316: MPIUni_PETSC_EXTERN int    MPI_Comm_dup(MPI_Comm,MPI_Comm *);
317: MPIUni_PETSC_EXTERN int    MPI_Comm_create(MPI_Comm,MPI_Group,MPI_Comm *);
318: MPIUni_PETSC_EXTERN int    MPI_Init(int *, char ***);
319: MPIUni_PETSC_EXTERN int    MPI_Finalize(void);
320: MPIUni_PETSC_EXTERN int    MPI_Initialized(int*);
321: MPIUni_PETSC_EXTERN int    MPI_Finalized(int*);
322: MPIUni_PETSC_EXTERN int    MPI_Comm_size(MPI_Comm,int*);
323: MPIUni_PETSC_EXTERN int    MPI_Comm_rank(MPI_Comm,int*);
324: MPIUni_PETSC_EXTERN double MPI_Wtime(void);

326: #define MPI_Aint MPIUNI_INTPTR
327: /*
328:     Routines we have replace with macros that do nothing
329:     Some return error codes others return success
330: */

332: #define MPI_Comm_f2c(comm) (MPI_Comm)(comm)
333: #define MPI_Comm_c2f(comm) (MPI_Fint)(comm)
334: #define MPI_Type_f2c(type) (MPI_Datatype)(type)
335: #define MPI_Type_c2f(type) (MPI_Fint)(type)
336: #define MPI_Op_c2f(op) (MPI_Fint)(op)

338: #define MPI_Send(buf,count,datatype,dest,tag,comm)  \
339:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
340:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
341:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
342:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
343:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
344:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
345:       MPI_Abort(MPI_COMM_WORLD,0))
346: #define MPI_Recv(buf,count,datatype,source,tag,comm,status) \
347:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
348:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
349:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
350:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (source),\
351:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
352:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
353:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (status),\
354:       MPI_Abort(MPI_COMM_WORLD,0))
355: #define MPI_Get_count(status, datatype,count) \
356:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (status),\
357:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
358:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
359:       MPI_Abort(MPI_COMM_WORLD,0))
360: #define MPI_Bsend(buf,count,datatype,dest,tag,comm)  \
361:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
362:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
363:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
364:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
365:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
366:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
367:       MPI_Abort(MPI_COMM_WORLD,0))
368: #define MPI_Ssend(buf,count, datatype,dest,tag,comm) \
369:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
370:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
371:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
372:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
373:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
374:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
375:       MPI_Abort(MPI_COMM_WORLD,0))
376: #define MPI_Rsend(buf,count, datatype,dest,tag,comm) \
377:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
378:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
379:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
380:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
381:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
382:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
383:       MPI_Abort(MPI_COMM_WORLD,0))
384: #define MPI_Buffer_attach(buffer,size) \
385:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buffer),\
386:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (size),\
387:       MPI_SUCCESS)
388: #define MPI_Buffer_detach(buffer,size)\
389:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buffer),\
390:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (size),\
391:       MPI_SUCCESS)
392: #define MPI_Ibsend(buf,count, datatype,dest,tag,comm,request) \
393:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
394:        MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
395:        MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
396:        MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
397:        MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
398:        MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
399:        MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
400:        MPI_Abort(MPI_COMM_WORLD,0))
401: #define MPI_Issend(buf,count, datatype,dest,tag,comm,request) \
402:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
403:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
404:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
405:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
406:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
407:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
408:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
409:       MPI_Abort(MPI_COMM_WORLD,0))
410: #define MPI_Irsend(buf,count, datatype,dest,tag,comm,request) \
411:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
412:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
413:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
414:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
415:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
416:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
417:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
418:       MPI_Abort(MPI_COMM_WORLD,0))
419: #define MPI_Irecv(buf,count, datatype,source,tag,comm,request) \
420:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
421:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
422:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
423:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (source),\
424:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
425:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
426:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
427:       MPI_Abort(MPI_COMM_WORLD,0))
428: #define MPI_Isend(buf,count, datatype,dest,tag,comm,request) \
429:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
430:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
431:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
432:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
433:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
434:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
435:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
436:       MPI_Abort(MPI_COMM_WORLD,0))
437: #define MPI_Wait(request,status) \
438:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
439:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (status),\
440:       MPI_SUCCESS)
441: #define MPI_Test(request,flag,status) \
442:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
443:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (status),\
444:       *(flag) = 0, \
445:       MPI_SUCCESS)
446: #define MPI_Request_free(request) \
447:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
448:       MPI_SUCCESS)
449: #define MPI_Waitany(a,b,c,d) \
450:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (a),\
451:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (b),\
452:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (c),\
453:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (d),(*c = 0), \
454:       MPI_SUCCESS)
455: #define MPI_Testany(a,b,c,d,e) \
456:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (a),\
457:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (b),\
458:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (c),\
459:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (d),\
460:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (e),\
461:       MPI_SUCCESS)
462: #define MPI_Waitall(count,array_of_requests,array_of_statuses) \
463:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
464:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_requests),\
465:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_statuses),\
466:       MPI_SUCCESS)
467: #define MPI_Testall(count,array_of_requests,flag,array_of_statuses) \
468:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
469:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_requests),\
470:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (flag),\
471:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_statuses),\
472:       MPI_SUCCESS)
473: #define MPI_Waitsome(incount,array_of_requests,outcount,\
474:                      array_of_indices,array_of_statuses) \
475:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (incount),\
476:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_requests),\
477:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (outcount),\
478:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_indices),\
479:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_statuses),\
480:       MPI_SUCCESS)
481: #define MPI_Comm_group(comm,group) \
482:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
483:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (group),\
484:       MPI_SUCCESS)
485: #define MPI_Group_incl(group,n,ranks,newgroup) \
486:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (group),\
487:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (n),\
488:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (ranks),\
489:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (newgroup),\
490:       MPI_SUCCESS)
491: #define MPI_Testsome(incount,array_of_requests,outcount,\
492:                      array_of_indices,array_of_statuses) MPI_SUCCESS
493: #define MPI_Iprobe(source,tag,comm,flag,status) (*(flag)=0, MPI_SUCCESS)
494: #define MPI_Probe(source,tag,comm,status) MPI_SUCCESS
495: #define MPI_Cancel(request) (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),MPI_SUCCESS)
496: #define MPI_Test_cancelled(status,flag) (*(flag)=0,MPI_SUCCESS)
497: #define MPI_Send_init(buf,count, datatype,dest,tag,comm,request) \
498:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
499:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
500:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
501:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
502:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
503:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
504:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
505:      MPI_SUCCESS)
506: #define MPI_Bsend_init(buf,count, datatype,dest,tag,comm,request) \
507:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
508:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
509:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
510:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
511:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
512:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
513:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
514:      MPI_SUCCESS)
515: #define MPI_Ssend_init(buf,count, datatype,dest,tag,comm,request) \
516:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
517:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
518:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
519:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
520:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
521:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
522:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
523:      MPI_SUCCESS)
524: #define MPI_Bsend_init(buf,count, datatype,dest,tag,comm,request) \
525:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
526:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
527:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
528:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
529:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
530:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
531:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
532:      MPI_SUCCESS)
533: #define MPI_Rsend_init(buf,count, datatype,dest,tag,comm,request) \
534:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
535:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
536:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
537:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
538:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
539:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
540:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
541:      MPI_SUCCESS)
542: #define MPI_Recv_init(buf,count, datatype,source,tag,comm,request) \
543:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
544:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
545:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
546:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (source),\
547:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
548:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
549:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
550:      MPI_SUCCESS)
551: #define MPI_Start(request) (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),MPI_SUCCESS)
552: #define MPI_Startall(count,array_of_requests) \
553:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
554:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_requests),\
555:      MPI_SUCCESS)
556: #define MPI_Op_create(function,commute,op) \
557:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (function),\
558:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (commute),\
559:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (op),\
560:      MPI_SUCCESS)
561: #define MPI_Op_free(op) \
562:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (op),\
563:      MPI_SUCCESS)
564:      /* Need to determine sizeof "sendtype" */
565: #define MPI_Sendrecv(sendbuf,sendcount, sendtype,\
566:      dest,sendtag,recvbuf,recvcount,\
567:      recvtype,source,recvtag,\
568:      comm,status) \
569:   MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount) * MPI_sizeof(sendtype))
570: #define MPI_Sendrecv_replace(buf,count, datatype,dest,sendtag,\
571:      source,recvtag,comm,status) MPI_SUCCESS
572: #define MPI_Type_contiguous(count, oldtype,newtype) \
573:      (*(newtype) = (count)*(oldtype),MPI_SUCCESS)
574: #define MPI_Type_vector(count,blocklength,stride,oldtype, newtype) MPI_SUCCESS
575: #define MPI_Type_hvector(count,blocklength,stride,oldtype, newtype) MPI_SUCCESS
576: #define MPI_Type_indexed(count,array_of_blocklengths,\
577:      array_of_displacements, oldtype,\
578:      newtype) MPI_SUCCESS
579: #define MPI_Type_hindexed(count,array_of_blocklengths,\
580:      array_of_displacements, oldtype,\
581:      newtype) MPI_SUCCESS
582: #define MPI_Type_struct(count,array_of_blocklengths,\
583:      array_of_displacements,\
584:      array_of_types, newtype) \
585:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
586:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_blocklengths),\
587:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_displacements),\
588:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_types),\
589:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (newtype),\
590:       MPI_SUCCESS)
591: #define MPI_Address(location,address) \
592:      (*(address) = (MPIUNI_INTPTR)(char *)(location),MPI_SUCCESS)
593: #define MPI_Type_extent(datatype,extent) *(extent) = datatype
594: #define MPI_Type_size(datatype,size) (*(size) = (datatype) & 0xff, MPI_SUCCESS)
595: #define MPI_Type_lb(datatype,displacement) \
596:      MPI_Abort(MPI_COMM_WORLD,0)
597: #define MPI_Type_ub(datatype,displacement) \
598:      MPI_Abort(MPI_COMM_WORLD,0)
599: #define MPI_Type_commit(datatype) (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
600:      MPI_SUCCESS)
601: #define MPI_Type_free(datatype) MPI_SUCCESS
602: #define MPI_Get_elements(status, datatype,count) \
603:      MPI_Abort(MPI_COMM_WORLD,0)
604: #define MPI_Pack(inbuf,incount, datatype,outbuf,\
605:      outsize,position, comm) \
606:      MPI_Abort(MPI_COMM_WORLD,0)
607: #define MPI_Unpack(inbuf,insize,position,outbuf,\
608:      outcount, datatype,comm) \
609:      MPI_Abort(MPI_COMM_WORLD,0)
610: #define MPI_Pack_size(incount, datatype,comm,size) \
611:      MPI_Abort(MPI_COMM_WORLD,0)
612: #define MPI_Barrier(comm) \
613:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
614:      MPI_SUCCESS)
615: #define MPI_Bcast(buffer,count,datatype,root,comm) \
616:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buffer),\
617:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
618:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
619:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (root),\
620:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
621:      MPI_SUCCESS)
622: #define MPI_Gather(sendbuf,sendcount, sendtype,\
623:      recvbuf,recvcount, recvtype,\
624:      root,comm) \
625:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvcount),\
626:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (root),\
627:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvtype),\
628:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
629:      MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount)*MPI_sizeof(sendtype)),\
630:      MPI_SUCCESS)
631: #define MPI_Gatherv(sendbuf,sendcount, sendtype,\
632:      recvbuf,recvcounts,displs,\
633:      recvtype,root,comm) \
634:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvcounts),\
635:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (displs),\
636:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvtype),\
637:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (root),\
638:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
639:      MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount)*MPI_sizeof(sendtype)),\
640:      MPI_SUCCESS)
641: #define MPI_Scatter(sendbuf,sendcount, sendtype,\
642:      recvbuf,recvcount, recvtype,\
643:      root,comm) \
644:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (sendbuf),\
645:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (sendcount),\
646:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (sendtype),\
647:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvbuf),\
648:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvcount),\
649:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvtype),\
650:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (root),\
651:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),MPI_Abort(MPI_COMM_WORLD,0))
652: #define MPI_Scatterv(sendbuf,sendcounts,displs,\
653:      sendtype, recvbuf,recvcount,\
654:      recvtype,root,comm) \
655:      (MPIUNI_Memcpy(recvbuf,sendbuf,(recvcount)*MPI_sizeof(recvtype)),\
656:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (displs),\
657:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (sendtype),\
658:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (sendcounts),\
659:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (root),\
660:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
661:      MPI_SUCCESS)
662: #define MPI_Allgather(sendbuf,sendcount, sendtype,\
663:      recvbuf,recvcount, recvtype,comm) \
664:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvcount),\
665:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvtype),\
666:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
667:      MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount)*MPI_sizeof(sendtype)),\
668:      MPI_SUCCESS)
669: #define MPI_Allgatherv(sendbuf,sendcount, sendtype,\
670:      recvbuf,recvcounts,displs,recvtype,comm) \
671:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvcounts),\
672:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (displs),\
673:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvtype),\
674:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
675:      MPIUNI_Memcpy((recvbuf),(sendbuf),(sendcount)*MPI_sizeof(sendtype)), \
676:      MPI_SUCCESS)
677: #define MPI_Alltoall(sendbuf,sendcount, sendtype,\
678:      recvbuf,recvcount, recvtype,comm) \
679:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvcount),\
680:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvtype),\
681:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
682:       MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount)*MPI_sizeof(sendtype)),\
683:       MPI_SUCCESS)
684: #define MPI_Alltoallv(sendbuf,sendcounts,sdispls,\
685:      sendtype, recvbuf,recvcounts,\
686:      rdispls, recvtype,comm) MPI_Abort(MPI_COMM_WORLD,0)
687: #define MPI_Alltoallw(sendbuf,sendcounts,sdispls,\
688:      sendtypes, recvbuf,recvcounts,\
689:      rdispls, recvtypes,comm) MPI_Abort(MPI_COMM_WORLD,0)
690: #define MPI_Reduce(sendbuf, recvbuf,count,\
691:      datatype,op,root,comm) \
692:      (MPIUNI_Memcpy(recvbuf,sendbuf,(count)*MPI_sizeof(datatype)),\
693:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),MPI_SUCCESS)
694: #define MPI_Allreduce(sendbuf, recvbuf,count,datatype,op,comm) \
695:     (MPIUNI_Memcpy(recvbuf,sendbuf,(count)*MPI_sizeof(datatype)), \
696:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),MPI_SUCCESS)
697: #define MPI_Scan(sendbuf, recvbuf,count,datatype,op,comm) \
698:      (MPIUNI_Memcpy(recvbuf,sendbuf,(count)*MPI_sizeof(datatype)),\
699:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),MPI_SUCCESS)
700: #define MPI_Exscan(sendbuf, recvbuf,count,datatype,op,comm) MPI_SUCCESS
701: #define MPI_Reduce_scatter(sendbuf, recvbuf,recvcounts,\
702:      datatype,op,comm) \
703:      MPI_Abort(MPI_COMM_WORLD,0)
704: #define MPI_Group_size(group,size) (*(size)=1,MPI_SUCCESS)
705: #define MPI_Group_rank(group,rank) (*(rank)=0,MPI_SUCCESS)
706: #define MPI_Group_translate_ranks(group1,n,ranks1,group2,ranks2) \
707:   (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (group1),                 \
708:    MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (group2),                 \
709:    MPIUNI_Memcpy((ranks2),(ranks1),(n) * sizeof(int)),           \
710:    MPI_SUCCESS)
711: #define MPI_Group_compare(group1,group2,result) \
712:      (*(result)=1,MPI_SUCCESS)
713: #define MPI_Group_union(group1,group2,newgroup) MPI_SUCCESS
714: #define MPI_Group_intersection(group1,group2,newgroup) MPI_SUCCESS
715: #define MPI_Group_difference(group1,group2,newgroup) MPI_SUCCESS
716: #define MPI_Group_excl(group,n,ranks,newgroup) MPI_SUCCESS
717: #define MPI_Group_range_incl(group,n,ranges,newgroup) MPI_SUCCESS
718: #define MPI_Group_range_excl(group,n,ranges,newgroup) MPI_SUCCESS
719: #define MPI_Group_free(group) \
720:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (group),\
721:      MPI_SUCCESS)
722: #define MPI_Comm_compare(comm1,comm2,result) \
723:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm1),\
724:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm2),\
725:      *(result)=MPI_IDENT,\
726:      MPI_SUCCESS)
727: #define MPI_Comm_split(comm,color,key,newcomm) \
728:   (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (color),\
729:   MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (key),\
730:    MPI_Comm_dup(comm,newcomm))
731: #define MPI_Comm_test_inter(comm,flag) (*(flag)=1,MPI_SUCCESS)
732: #define MPI_Comm_remote_size(comm,size) (*(size)=1,MPI_SUCCESS)
733: #define MPI_Comm_remote_group(comm,group) MPI_SUCCESS
734: #define MPI_Intercomm_create(local_comm,local_leader,peer_comm,\
735:      remote_leader,tag,newintercomm) MPI_SUCCESS
736: #define MPI_Intercomm_merge(intercomm,high,newintracomm) MPI_SUCCESS

738: #define MPI_Topo_test(comm,status) MPI_SUCCESS
739: #define MPI_Cart_create(comm_old,ndims,dims,periods,\
740:      reorder,comm_cart) MPI_SUCCESS
741: #define MPI_Dims_create(nnodes,ndims,dims) MPI_Abort(MPI_COMM_WORLD,0)
742: #define MPI_Graph_create(comm,a,b,c,d,e) MPI_SUCCESS
743: #define MPI_Graphdims_Get(comm,nnodes,nedges) MPI_Abort(MPI_COMM_WORLD,0)
744: #define MPI_Graph_get(comm,a,b,c,d) MPI_Abort(MPI_COMM_WORLD,0)
745: #define MPI_Cartdim_get(comm,ndims) MPI_Abort(MPI_COMM_WORLD,0)
746: #define MPI_Cart_get(comm,maxdims,dims,periods,coords) \
747:      MPI_Abort(MPI_COMM_WORLD,0)
748: #define MPI_Cart_rank(comm,coords,rank) MPI_Abort(MPI_COMM_WORLD,0)
749: #define MPI_Cart_coords(comm,rank,maxdims,coords) \
750:      MPI_Abort(MPI_COMM_WORLD,0)
751: #define MPI_Graph_neighbors_count(comm,rank,nneighbors) \
752:      MPI_Abort(MPI_COMM_WORLD,0)
753: #define MPI_Graph_neighbors(comm,rank,maxneighbors,neighbors) \
754:      MPI_Abort(MPI_COMM_WORLD,0)
755: #define MPI_Cart_shift(comm,direction,disp,rank_source,rank_dest) \
756:      MPI_Abort(MPI_COMM_WORLD,0)
757: #define MPI_Cart_sub(comm,remain_dims,newcomm) MPI_Abort(MPI_COMM_WORLD,0)
758: #define MPI_Cart_map(comm,ndims,dims,periods,newrank) MPI_Abort(MPI_COMM_WORLD,0)
759: #define MPI_Graph_map(comm,a,b,c,d) MPI_Abort(MPI_COMM_WORLD,0)
760: #define MPI_Get_processor_name(name,result_len) \
761:      (MPIUNI_Memcpy(name,"localhost",9*sizeof(char)),name[10] = 0,*(result_len) = 10)
762: #define MPI_Errhandler_create(function,errhandler) (*(errhandler) = (MPI_Errhandler) 0, MPI_SUCCESS)
763: #define MPI_Errhandler_set(comm,errhandler) \
764:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
765:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (errhandler),\
766:      MPI_SUCCESS)
767: #define MPI_Errhandler_get(comm,errhandler) MPI_SUCCESS
768: #define MPI_Errhandler_free(errhandler) MPI_SUCCESS
769: #define MPI_Error_string(errorcode,string,result_len) MPI_SUCCESS
770: #define MPI_Error_class(errorcode,errorclass) MPI_SUCCESS
771: #define MPI_Wtick() 1.0
772: #define MPI_Pcontrol(level) MPI_SUCCESS

774: #define MPI_NULL_COPY_FN   0
775: #define MPI_NULL_DELETE_FN 0

777:   /* MPI-IO additions */

779: #define MPI_File_open(comm,filename,amode,info,mpi_fh) \
780:   (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),  \
781:    MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (filename), \
782:    MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (amode), \
783:    MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (info), \
784:    MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (mpi_fh), \
785:    MPI_Abort(MPI_COMM_WORLD,0))

787: #define MPI_File_close(mpi_fh) \
788:   (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (mpi_fh),  \
789:    MPI_Abort(MPI_COMM_WORLD,0))

791: #define MPI_File_set_view(mpi_fh,disp,etype,filetype,datarep,info) \
792:   (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (mpi_fh),  \
793:    MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (disp), \
794:    MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (etype), \
795:    MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (filetype), \
796:    MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datarep), \
797:    MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (info), \
798:    MPI_Abort(MPI_COMM_WORLD,0))

800: #define MPI_Type_get_extent(datatype,lb,extent) \
801:   (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),      \
802:    *(lb) = 0, *(extent) = datatype,0)

804: #define MPI_File_write_all(mpi_fh,buf,count,datatype,status) \
805:   (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (mpi_fh),             \
806:    MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf), \
807:    MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count), \
808:    MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype), \
809:    MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (status), \
810:    MPI_Abort(MPI_COMM_WORLD,0))

812: #define MPI_File_read_all(mpi_fh,buf,count,datatype,status) \
813:   (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (mpi_fh),            \
814:    MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf), \
815:    MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count), \
816:    MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype), \
817:    MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (status), \
818:    MPI_Abort(MPI_COMM_WORLD,0))

820:   /* called from PetscInitialize() - so return success */
821: #define MPI_Register_datarep(name,read_conv_fn,write_conv_fn,extent_fn,state) \
822:   (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (name),                          \
823:    MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (read_conv_fn), \
824:    MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (write_conv_fn), \
825:    MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (extent_fn), \
826:    MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (state), \
827:    MPI_SUCCESS)

829: #define MPI_Type_create_subarray(ndims,array_of_sizes,array_of_subsizes,array_of_starts,order,oldtype,newtype) \
830:   (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (ndims),                         \
831:    MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_sizes), \
832:    MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_subsizes), \
833:    MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_starts), \
834:    MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (order), \
835:    MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (oldtype), \
836:    MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (newtype), \
837:    MPI_Abort(MPI_COMM_WORLD,0))

839: #define MPI_Type_create_resized(oldtype,lb,extent,newtype) \
840:   (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (oldtype),   \
841:    MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (lb),   \
842:    MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (extent), \
843:    MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (newtype), \
844:    MPI_Abort(MPI_COMM_WORLD,0))

846: #if defined(__cplusplus)
847: }
848: #endif
849: #endif