locked
mpi_vector RRS feed

  • Question

  • hi,

    i am trying to write a program using MPI and i just want to sent a part of one array from one processor to another. I wrote that simple code but i am not sure if this is right.. Can somebody help me pls??

    if(rank==0){

    MPI_Type_Vector(M/2,1,N/2,MPI_DOUBLE,new1);
    MPI_Ssend(&new[0][N/2],1,new1,1...);

    MPI_Recv(...);

    }

    if(rank==1){
    MPI_Recv(&array[0][N/2],1,new1,0...);

    .......

    MPI_Ssend(&array[N/2][N],1,new1,0...);

    }

    Tuesday, April 24, 2012 3:05 PM

All replies

  • Hello.

    No time for rolling around your code, sorry, but i paste below code of my test program (you have two vectors and program multiplies them) if you have any questions please ask:

    #include "mpi.h"
    #include <stdio.h>
    #include <stdlib.h>
    #include <math.h>
    #include <signal.h>

    #define MYTAG 1

    int myid, j;
    char    processor_name[MPI_MAX_PROCESSOR_NAME];
    double startwtime = 0.0, endwtime;

    void
    quit_sig(int sig)
    {
            printf("Process %d on node %d received signal %d at %f sec\n",
                    myid, processor_name, sig, MPI_Wtime()-startwtime);
    }

    int main(int argc,char *argv[])
    {
            int    total, n, numprocs, i, dest;
            double *a, *b, sum, result;
            int    namelen;
            MPI_Status status;

            signal(SIGTERM, quit_sig);

            MPI_Init(&argc,&argv);
            MPI_Comm_size(MPI_COMM_WORLD,&numprocs);
            MPI_Comm_rank(MPI_COMM_WORLD,&myid);
            MPI_Get_processor_name(processor_name,&namelen);

            if (myid == 0) {
                    if (argc != 2) {
                            printf("Usage: %s <length of vector>\n", argv[0]);
                            exit(1);
                    }
                    total = atoi(argv[1]);
            }

            printf("Process %d of %d is on %s\n",
                   myid, numprocs, processor_name);

            /*if (myid == 0)*/
                    startwtime = MPI_Wtime();

            n = total / numprocs + 1;
            MPI_Bcast(&n, 1, MPI_INT, 0, MPI_COMM_WORLD);

            a = malloc(n*sizeof(double));
            b = malloc(n*sizeof(double));

            if ((a == NULL) || (b == NULL)) {
                    fprintf(stderr,"Error allocating vectors (not enough memory?)\n"                                                                                        );
                    exit(1);
            }

            if (myid == 0) {
                    for (dest=1; dest < numprocs; dest++) {
                            for (i=0; i < n; i++) {
                                    a[i] = rand();
                                    b[i] = rand();
                            }
                            MPI_Send(a, n, MPI_DOUBLE, dest, MYTAG, MPI_COMM_WORLD);
                            MPI_Send(b, n, MPI_DOUBLE, dest, MYTAG, MPI_COMM_WORLD);
                    }
                    n = total - n*(numprocs-1);
                    for (i=0; i < n; i++) {
                            a[i] = rand();
                            b[i] = rand();
                    }
            } else {
                    MPI_Recv(a, n, MPI_DOUBLE, 0, MYTAG, MPI_COMM_WORLD, &status);
                    MPI_Recv(b, n, MPI_DOUBLE, 0, MYTAG, MPI_COMM_WORLD, &status);
            }

            printf("Process %d on node %s starting calc at %f sec\n",
                            myid, processor_name, MPI_Wtime()-startwtime);
    for (j=0; j<30; j++) {
            sum = 0.0;
            for (i=0; i<n; i++)
                    sum += a[i]*b[i];
    }
            printf("Process %d on node %s ending calc at %f sec\n",
                            myid, processor_name, MPI_Wtime()-startwtime);
            MPI_Reduce(&sum, &result, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);

            if (myid == 0) {
                    endwtime = MPI_Wtime();
                    printf("Answer is %.16f\n", result);
                    printf("wall clock time = %f\n", endwtime-startwtime);
                    fflush(stdout);
            }

            MPI_Finalize();
            return 0;
    }

    Wednesday, April 25, 2012 1:54 AM
  • ok this is a very nice example! thank u! but in that way you send the whole array to the other processors right?
    Wednesday, April 25, 2012 4:28 PM