[mvapich-discuss] truly one-sided communication

Thiago Ize thiago at sci.utah.edu
Wed Jan 19 02:41:16 EST 2011


Skipped content of type multipart/alternative-------------- next part --------------
#include <mpi.h>
#include <iostream>
#include <stdio.h>
#include <cmath>
using namespace std;

int main(int argc, char* argv[]) {

  int provided=-1;
  int requested = MPI_THREAD_SINGLE;//MPI_THREAD_MULTIPLE;
  MPI_Init_thread(&argc, &argv, requested, &provided);

  const int rank = MPI::COMM_WORLD.Get_rank();

  const unsigned long NUM_GETS = 10000; // something big
  const unsigned long BUFFER_SIZE = 2048*32;
  char* sendBuffer = new char[BUFFER_SIZE];

  MPI::Win win = MPI::Win::Create(sendBuffer, BUFFER_SIZE, 1, MPI_INFO_NULL, MPI_COMM_WORLD);


  double startTime = MPI::Wtime();

  char* receiveBuffer = new char[BUFFER_SIZE];

  for (int j=0; j < 5; ++j) {
    const int repeat = (int)pow(10.0,j);
    if (rank == 0)
      printf("performing %d non-mpi work for rank 0\n", repeat);
    for (unsigned long i = 0; i < NUM_GETS; ++i) {

      if (rank == 0) {
        for (int i=0; i < repeat; ++i)
          if (sqrt(float(i))== -3)
            printf("did some computation that will never be used.\n");
      }

      int owner = 0;
      int err = MPI_Win_lock(MPI_LOCK_SHARED, owner, 0, win);
      if (err != MPI_SUCCESS) cout << "ERROR lock: " <<err<<endl;

      err = MPI_Get(receiveBuffer, BUFFER_SIZE, MPI_CHAR,
                    owner, 0, BUFFER_SIZE, MPI_CHAR, win);
      if (err != MPI_SUCCESS) cout << "ERROR get: " <<err<<endl;

      err = MPI_Win_unlock(owner, win);
      if (err != MPI_SUCCESS) cout << "ERROR unlock: " <<err<<endl;
    }

    double endTime = MPI::Wtime();
    double sec = (endTime-startTime);
    unsigned long bitsGot = 1*8*BUFFER_SIZE*NUM_GETS;
    float GbitsGot = bitsGot/1073741824;
    printf("rank %d did %.3lf Gb/s in %.3lf seconds \n",rank, (GbitsGot/sec), sec);

  MPI_Barrier(MPI_COMM_WORLD);
  sleep(1); // so that the output is in sync and looks pretty
  }
  MPI_Win_free(win);

  MPI::Finalize();
  return 0;
}


More information about the mvapich-discuss mailing list