none
mpi dystra program RRS feed

  • Question


  • Can anybody please explain me how this program works

     

     

    At each iteration, the algorithm finds the closest v ert e x J to 0 among all those not yet processed, and then updates the list of minimum distances to each v ert e x from 0 by considering paths that go through J. T w o o b vious potential candidate part of the algorithm for parallelization are the “find J” and “for K” lines .

    #include <stdio.h>

    #include < mpi .h>

    #include <stdlib.h>

    // global variables (but of course not shared across nodes)

     

    int nv, // number of vertices

    *notdone, // vertices not checked yet

    nnodes, // number of MPI nodes in the computation

    chunk, // number of vertices handled by each node

    startv,endv, // start, end vertices for this node

    me, // my node number

    dbg;

    unsigned largeint, // max possible unsigned int

    mymin[2], // mymin[0] is min for my chunk,

    // mymin[1] is vertex which achieves that min

    overallmin[2], // overallmin[0] is current min over all nodes,

    // overallmin[1] is vertex which achieves that min

    *ohd, // 1-hop distances between vertices; "ohd[i][j]" is

    // ohd[i*nv+j]

    *mind; // min distances found so far

     

    double T1,T2; // start and finish times

     

    void init(int ac, char **av)

    { int i,j,tmp; unsigned u;

    nv = atoi(av[1]) ;

    dbg = atoi(av[3]) ;

    MPI_Init(&ac,&av);

    MPI_Comm_size(MPI_COMM_WORLD,&nnodes);

    MPI_Comm_rank(MPI_COMM_WORLD,&me);

    chunk = nv/nnodes;

    startv = me * chunk;

    endv = startv + chunk - 1;

    u = -1;

    largeint = u >> 1;

    ohd = malloc(nv*nv*sizeof(int));

    mind = malloc(nv*sizeof(int));

    notdone = malloc(nv*sizeof(int));

     

    // random graph

    // note that this will be generated at all nodes; could generate just

    // at node 0 and then send to others, but faster this way

     

    for (i = 0; i < nv; i++)

    for (j = i; j < nv; j++) {

    if (j == i) ohd[i*nv+i] = 0;

    else {

    ohd[nv*i+j] = rand() % 20;

    ohd[nv*j+i] = ohd[nv*i+j];

    }

    }

    for (i = 0; i < nv; i++) {

    notdone[i] = 1;

    mind[i] = largeint;

    }

    mind[0] = 0;

    while (dbg) ; // stalling so can attach debugger

    }

    // finds closest to 0 among notdone, among startv through endv

    void findmymin()

    { int i;

    mymin[0] = largeint;

    for (i = startv; i <= endv; i++)

    if (notdone[i] && mind[i] < mymin[0]) {

    mymin[0] = mind[i];

    mymin[1] = i;

    }

    }

     

    void updatemymind() // update my mind segment

    { // for each i in [startv,endv], ask whether a shorter path to i

    // exists, through mv

    int i, mv = overallmin[1];

    unsigned md = overallmin[0];

    for (i = startv; i <= endv; i++)

    if (md + ohd[mv*nv+i] < mind[i])

    mind[i] = md + ohd[mv*nv+i];

    }

     

    void printmind() // partly for debugging (call from GDB)

    { int i;

    printf("minimum distances:\n");

    for (i = 1; i < nv; i++)

    printf("%u\n",mind[i]);

    }

     

    void dowork()

    { int step, // index for loop of nv steps

    i;

    if (me == 0) T1 = MPI_Wtime();

    for (step = 0; step < nv; step++) {

    findmymin();

    MPI_Reduce(mymin,overallmin,1,MPI_2INT,MPI_MINLOC,0,MPI_COMM_WORLD);

    MPI_Bcast(overallmin,1,MPI_2INT,0,MPI_COMM_WORLD);

    // mark new vertex as done

    notdone[overallmin[1]] = 0;

    updatemymind(startv,endv);

    }

    // now need to collect all the mind values from other nodes to node 0

    MPI_Gather(mind+startv,chunk,MPI_INT,mind,chunk,MPI_INT,0,MPI_COMM_WORLD);

    T2 = MPI_Wtime();

    }

    int main(int ac, char **av)

    { int i,j,print;

    init(ac,av);

    dowork();

    print = atoi(av[2]) ;

    if (print && me == 0) {

    printf("graph weights:\n");

    for (i = 0; i < nv; i++) {

    for (j = 0; j < nv; j++)

    printf("%u ",ohd[nv*i+j]);

    printf("\n");

    }

    printmind();

    }

    if (me == 0) printf("time at node 0: %f\n",(float)(T2-T1));

    MPI_Finalize();

    }

    Saturday, May 8, 2010 10:56 PM

Answers

All replies