Hello World MPI

Using MPC to compile and run MPI programs is relatively simple. In most cases, you simply compile your code with the MPC compiler (mpc_cc, mpc_cxx, mpc_f77, etc.).

We consider the sample code at the bottom of this page .

Compile the Code

Use the MPC C compiler command, mpc_cc, to compile the MPI code:

mpc_cc mpi_demo.c -o mpi_demo

Run the Code

Use the mpcrun command to launch the executable:

# Run on four MPI processes sharing the same MPC process
mpcrun -n=4 ./mpi_demo

You should get an output similar to:

Running MPC with HYDRA job manager
MPC version 3.3.0 C/C++ (4 tasks 1 processes 4 cpus (2.39GHz) ethread_mxn) MPC allocator  none
Initialization time: 0.1s - Memory used: 21MB
MPC OpenMP version 3.1
        Tasking on
        OMP_SCHEDULE 1
        Default #threads (OMP_NUM_THREADS)
        OMP_DYNAMIC 1
        OMP_NESTED 0
        Default #microVPs (OMP_MICROVP_NUMBER)
        OMP_TREE default
        NUMA allocation for tree nodes
#[rank] [runtime] [sort_time] [host] [buffvalue] [nums_sorted]
0 57.5457 25.3636 paratools07.rrp.net 0.099487 50000000
1 60.1933 25.2468 paratools07.rrp.net 0.099487 50000000
3 60.5007 25.3489 paratools07.rrp.net 0.099487 50000000
2 60.537 25.3605 paratools07.rrp.net 0.099487 50000000

Sample Code

/*
 * mpi_demo.c
 *
 * Tyler Simon
 * John C. Linford
 * 26 June 2014
 *
 * Do some CPU intensive work with quicksort
 * Print out usage per MPI rank
 */

#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <sys/types.h>
#include <time.h>
#include <unistd.h>
#include<mpi.h>


int ascending(const void * a, const void * b)
{
  int temp = *(int*)a - *(int*)b;
  if (temp > 0)
    return 1;
  else if (temp < 0)
    return -1;
  else
    return 0;
}

int descending(const void * a, const void * b)
{
  int temp = *(int*)a - *(int*)b;
  if (temp < 0)
    return 1;
  else if (temp > 0)
    return -1;
  else
    return 0;
}


double cpu_test(char *host,long int size, int rank)
{
  int i;
  double start, elapsed;
  int * nums;

  srand(time(NULL)+rank);

  nums = (int*)malloc(sizeof(int) * size);
  if(!nums){
    printf("ERROR ALLOCING MEM\n");
    exit(1);
  }

  for(i=0; i<size; i++){
    nums[i] = rand();
  }

  start = MPI_Wtime();
  qsort(nums, size, sizeof(int), ascending);
  qsort(nums, size, sizeof(int), descending);
  elapsed = MPI_Wtime() - start;

  return elapsed;
}


int main(int argc, char **argv)
{
  size_t const N=200000000;

  int rank, size;
  size_t sortnum;
  float sendbuf;
  char hostname[255];
  double * elapsed;
  double * sort_time;

  srand48(time(NULL));

  MPI_Init(&argc, &argv);
  MPI_Comm_size(MPI_COMM_WORLD, &size);
  MPI_Comm_rank(MPI_COMM_WORLD, &rank);

  elapsed = (double*)malloc(size*sizeof(double));
  sort_time = (double*)malloc(size*sizeof(double));

  sortnum = N/size;
  gethostname(hostname,255);
  sendbuf=drand48();

  if(rank == 0) {
    printf("#[rank] [runtime] [sort_time] [host] [buffvalue] [nums_sorted]\n");
  }

  elapsed[rank] = MPI_Wtime();
  MPI_Bcast(&sendbuf, 1, MPI_FLOAT, 0, MPI_COMM_WORLD);
  sort_time[rank] = cpu_test(hostname,sortnum,rank);
  elapsed[rank] = MPI_Wtime() - elapsed[rank];
  printf("%d %g %g %s %f %ld\n", rank, elapsed[rank], sort_time[rank], hostname, sendbuf, sortnum);

  MPI_Finalize();
  return 0;
}