From af3556ddbeebbf80e799c61335ac5576934a8380 Mon Sep 17 00:00:00 2001 From: test2 Date: Wed, 17 Jun 2020 20:00:53 +0300 Subject: [PATCH] examples --- examples/Makefile | 23 ++++-- examples/bcast.c | 51 ++++++++++++ examples/cart.c | 56 +++++++++++++ examples/cpi.c | 69 ++++++++++++++++ examples/gather.c | 59 ++++++++++++++ examples/gatherv.c | 121 ++++++++++++++++++++++++++++ examples/group.c | 45 +++++++++++ examples/hello.c | 24 ++++++ examples/max.c | 53 +++++++++++++ examples/mm.c | 124 +++++++++++++++++++++++++++++ examples/mv.c | 58 ++++++++++++++ examples/nblock.c | 33 ++++++++ examples/nsquare.c | 55 +++++++++++++ examples/pack.c | 66 ++++++++++++++++ examples/pingpong.c | 50 ++++++++++++ examples/run | 21 ----- examples/runafter | 4 - examples/runbefore | 4 - examples/scatter.c | 63 +++++++++++++++ examples/scatterv.c | 112 ++++++++++++++++++++++++++ examples/search.c | 61 +++++++++++++++ examples/squares.c | 60 ++++++++++++++ examples/srtest.c | 53 +++++++++++++ examples/struct.c | 66 ++++++++++++++++ examples/systest.c | 186 ++++++++++++++++++++++++++++++++++++++++++++ 25 files changed, 1482 insertions(+), 35 deletions(-) create mode 100644 examples/bcast.c create mode 100644 examples/cart.c create mode 100644 examples/cpi.c create mode 100644 examples/gather.c create mode 100644 examples/gatherv.c create mode 100644 examples/group.c create mode 100644 examples/hello.c create mode 100644 examples/max.c create mode 100644 examples/mm.c create mode 100644 examples/mv.c create mode 100644 examples/nblock.c create mode 100644 examples/nsquare.c create mode 100644 examples/pack.c create mode 100644 examples/pingpong.c delete mode 100644 examples/run delete mode 100644 examples/runafter delete mode 100644 examples/runbefore create mode 100644 examples/scatter.c create mode 100644 examples/scatterv.c create mode 100644 examples/search.c create mode 100644 examples/squares.c create mode 100644 examples/srtest.c create mode 100644 examples/struct.c create mode 100644 examples/systest.c diff --git a/examples/Makefile b/examples/Makefile index b2678cc..8940ce4 100644 --- a/examples/Makefile +++ b/examples/Makefile @@ -1,13 +1,24 @@ all: config install log -config: - /bin/bash ./runbefore +DIRhosts=/home/ubuntu/hosts +DIR=/home/ubuntu/ +swarmlab-getworkers > $(DIRhosts) -install: +helloworld: + @mpicc -o $(DIR)/mpi_hello_world ./mpi_hello_world.c ; \ + cat $DIRhosts | while read -r line ; \ + do \ + scp $(DIR)/mpi_hello_world ubuntu@$(line):$(DIR)/mpi_hello_world ; \ + done \ + mpirun -n 10 -f $(DIRhosts) $(DIR)/mpi_hello_world 2>&1 | tee /tmp/output.log ; \ - /bin/bash ./run -log: +cpi: + @mpicc -o $(DIR)/cpi ./cpi.c ; \ + cat $(DIRhosts) | while read -r line ; \ + do \ + scp $(DIR)/cpi ubuntu@$(line):$(DIR)/cpi.c ; \ + done \ + mpirun -n 10 -f $(DIRhosts) $(DIR)/cpi 2>&1 | tee /tmp/output.log ; \ - /bin/bash ./runafter diff --git a/examples/bcast.c b/examples/bcast.c new file mode 100644 index 0000000..68813d3 --- /dev/null +++ b/examples/bcast.c @@ -0,0 +1,51 @@ +#include +#include "mpi.h" + +main(int argc, char** argv) +{ +int my_rank; +int p, k; +int root; +int a1_local; +int a2_local; +int local_num; +int endnum; +int local_res; +int final_res; +int namelen; +char proc_name[MPI_MAX_PROCESSOR_NAME]; + +MPI_Init(&argc, &argv); +MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); +MPI_Comm_size(MPI_COMM_WORLD, &p); +MPI_Get_processor_name(proc_name, &namelen); + +if (my_rank == 0) + { + printf("Dose plithos arithmvn:\n"); + scanf("%d", &endnum); + } + +root = 0; +MPI_Bcast(&endnum, 1, MPI_INT, root, MPI_COMM_WORLD); + +local_res = 0; +local_num = endnum/p; +a1_local = (my_rank * local_num) + 1; +a2_local = a1_local + local_num - 1; +for (k=a1_local; k<=a2_local; k++) + local_res = local_res + (k*k); +printf("\n Process %d on %s : local result = %d \n", my_rank, proc_name, local_res); + +root = 0; +MPI_Reduce(&local_res, &final_res, 1, MPI_INT, MPI_SUM, root, MPI_COMM_WORLD); + +if (my_rank == 0) + { + printf("\n Total result for N = %d is equal to : %d \n", endnum, final_res); + } + +MPI_Finalize(); +} + + diff --git a/examples/cart.c b/examples/cart.c new file mode 100644 index 0000000..c4270c2 --- /dev/null +++ b/examples/cart.c @@ -0,0 +1,56 @@ +#include "mpi.h" +#include +#define SIZE 16 +#define UP 0 +#define DOWN 1 +#define LEFT 2 +#define RIGHT 3 + +int main(argc,argv) +int argc; +char *argv[]; { +int numtasks, rank, source, dest, outbuf, i, tag=1, + inbuf[4]={MPI_PROC_NULL,MPI_PROC_NULL,MPI_PROC_NULL,MPI_PROC_NULL,}, + nbrs[4], dims[2]={4,4}, + periods[2]={0,0}, reorder=0, coords[2]; + +MPI_Request reqs[8]; +MPI_Status stats[8]; +MPI_Comm cartcomm; + +MPI_Init(&argc,&argv); +MPI_Comm_size(MPI_COMM_WORLD, &numtasks); + +if (numtasks == SIZE) { + MPI_Cart_create(MPI_COMM_WORLD, 2, dims, periods, reorder, &cartcomm); + MPI_Comm_rank(cartcomm, &rank); + MPI_Cart_coords(cartcomm, rank, 2, coords); + MPI_Cart_shift(cartcomm, 0, 1, &nbrs[UP], &nbrs[DOWN]); + MPI_Cart_shift(cartcomm, 1, 1, &nbrs[LEFT], &nbrs[RIGHT]); + + outbuf = rank; + + for (i=0; i<4; i++) { + dest = nbrs[i]; + source = nbrs[i]; + MPI_Isend(&outbuf, 1, MPI_INT, dest, tag, + MPI_COMM_WORLD, &reqs[i]); + MPI_Irecv(&inbuf[i], 1, MPI_INT, source, tag, + MPI_COMM_WORLD, &reqs[i+4]); + } + + MPI_Waitall(8, reqs, stats); + + printf("rank= %d coords= %d %d neighbors(u,d,l,r)= %d %d %d %d\n", + rank,coords[0],coords[1],nbrs[UP],nbrs[DOWN],nbrs[LEFT], + nbrs[RIGHT]); + printf("rank= %d inbuf(u,d,l,r)= %d %d %d %d\n", + rank,inbuf[UP],inbuf[DOWN],inbuf[LEFT],inbuf[RIGHT]); + } +else + printf("Must specify %d processors. Terminating.\n",SIZE); + +MPI_Finalize(); +} + + diff --git a/examples/cpi.c b/examples/cpi.c new file mode 100644 index 0000000..e175fff --- /dev/null +++ b/examples/cpi.c @@ -0,0 +1,69 @@ +#include "mpi.h" +#include +#include + +double f( double ); + +double f( double a) +{ + return (4.0 / (1.0 + a*a)); +} + +int main( int argc, char *argv[] ) +{ + int done = 0, n, myid, numprocs, i; + double PI25DT = 3.141592653589793238462643; + double mypi, pi, h, sum, x; + double startwtime=0.0, endwtime; + int namelen; + char processor_name[MPI_MAX_PROCESSOR_NAME]; + + MPI_Init(&argc,&argv); + MPI_Comm_size(MPI_COMM_WORLD,&numprocs); + MPI_Comm_rank(MPI_COMM_WORLD,&myid); + MPI_Get_processor_name(processor_name,&namelen); + + fprintf(stderr,"Process %d on %s\n", + myid, processor_name); + + while (!done) + { + if (myid == 0) + { + printf("Enter the number of intervals (0 quits):\n"); + scanf("%d",&n); + + startwtime = MPI_Wtime(); + } + MPI_Bcast(&n, 1, MPI_INT, 0, MPI_COMM_WORLD); + if (n == 0) + done = 1; + else + { + h = 1.0 / (double) n; + sum = 0.0; + for (i = myid + 1; i <= n; i += numprocs) + { + x = h * ((double)i - 0.5); + sum += f(x); + } + mypi = h * sum; + + MPI_Reduce(&mypi, &pi, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD); + + if (myid == 0) + { + printf("pi is approximately %.16f, Error is %.16f\n", + pi, fabs(pi - PI25DT)); + endwtime = MPI_Wtime(); + printf("wall clock time = %f\n", + endwtime-startwtime); + } + } + } + MPI_Finalize(); +} + + + + diff --git a/examples/gather.c b/examples/gather.c new file mode 100644 index 0000000..b69799d --- /dev/null +++ b/examples/gather.c @@ -0,0 +1,59 @@ +#include +#include "mpi.h" + +main(int argc, char** argv) +{ +int my_rank; +int p, k, count; +int root; +int a1_local; +int a2_local; +int local_num; +int endnum; +int local_res[50]; +int final_res[50]; +int namelen; +char proc_name[MPI_MAX_PROCESSOR_NAME]; + +MPI_Init(&argc, &argv); +MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); +MPI_Comm_size(MPI_COMM_WORLD, &p); +MPI_Get_processor_name(proc_name, &namelen); + +if (my_rank == 0) + { + printf("Dose plithos arithmvn:\n"); + scanf("%d", &endnum); + } + +root = 0; +MPI_Bcast(&endnum, 1, MPI_INT, root, MPI_COMM_WORLD); + +local_num = endnum/p; +a1_local = (my_rank * local_num) + 1; +a2_local = a1_local + local_num - 1; +count=0; +for (k=a1_local; k<=a2_local; k++) + { + local_res[count] = (k*k); + count++; + } +printf("\n Process %d on %s : local squares are : ", my_rank, proc_name); +for (k=0; k +#include +#include "mpi.h" +#define MAXPROC 8 /* Max number of procsses */ +#define LENGTH 8 /* Size of matrix is LENGTH * LENGTH */ + +main(int argc, char* argv[]) { + int i, j, np, me; + const int root = 0; /* Root process in scatter */ + + int x[LENGTH][LENGTH]; /* Send buffer */ + int y[LENGTH]; /* Receive buffer */ + int res[LENGTH][LENGTH]; /* Final receive buffer */ + + int *sendcount, *recvcount; /* Arrays for sendcounts and recvcounts */ + int *displs1, *displs2; /* Arrays for displacements */ + + MPI_Init(&argc, &argv); /* Initialize MPI */ + MPI_Comm_size(MPI_COMM_WORLD, &np); /* Get nr of processes */ + MPI_Comm_rank(MPI_COMM_WORLD, &me); /* Get own identifier */ + + /* Check that we have one process for each row in the matrix */ + if (np != LENGTH) { + if (me == 0) { + printf("You have to use %d processes\n", LENGTH); + } + MPI_Finalize(); + exit(0); + } + + /* Allocate memory for the sendcount and displacements arrays */ + sendcount = (int *)malloc(LENGTH*sizeof(int)); + displs1 = (int *)malloc(LENGTH*sizeof(int)); + + /* Initialize sendcount and displacements arrays */ + for (i=0; i +#define NPROCS 8 + +int main(argc,argv) +int argc; +char *argv[]; { +int rank, new_rank, sendbuf, recvbuf, numtasks, + ranks1[4]={0,1,2,3}, ranks2[4]={4,5,6,7}; +MPI_Group orig_group, new_group; +MPI_Comm new_comm; + +MPI_Init(&argc,&argv); +MPI_Comm_rank(MPI_COMM_WORLD, &rank); +MPI_Comm_size(MPI_COMM_WORLD, &numtasks); + +if (numtasks != NPROCS) { + printf("Must specify MP_PROCS= %d. Terminating.\n",NPROCS); + MPI_Finalize(); + } + +sendbuf = rank; + +/* Extract the original group handle */ +MPI_Comm_group(MPI_COMM_WORLD, &orig_group); + +/* Divide tasks into two distinct groups based upon rank */ +if (rank < NPROCS/2) { + MPI_Group_incl(orig_group, NPROCS/2, ranks1, &new_group); + } +else { + MPI_Group_incl(orig_group, NPROCS/2, ranks2, &new_group); + } + +/* Create new new communicator and then perform collective communications */ +MPI_Comm_create(MPI_COMM_WORLD, new_group, &new_comm); +MPI_Allreduce(&sendbuf, &recvbuf, 1, MPI_INT, MPI_SUM, new_comm); + +MPI_Group_rank (new_group, &new_rank); +printf("rank= %d newrank= %d recvbuf= %d\n",rank,new_rank,recvbuf); + +MPI_Finalize(); +} + + diff --git a/examples/hello.c b/examples/hello.c new file mode 100644 index 0000000..0361994 --- /dev/null +++ b/examples/hello.c @@ -0,0 +1,24 @@ +#include +#include "mpi.h" + +main(int argc, char** argv) +{ + +int my_rank; +int size; +int namelen; +char proc_name[MPI_MAX_PROCESSOR_NAME]; + +MPI_Init(&argc, &argv); +MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); +MPI_Comm_size(MPI_COMM_WORLD, &size); +MPI_Get_processor_name(proc_name, &namelen); + +if (my_rank == 2) +printf("Hello – I am process 2\n"); +else +printf("Hello from process %d of %d on %s\n", my_rank, size, proc_name); + +MPI_Finalize(); +} + diff --git a/examples/max.c b/examples/max.c new file mode 100644 index 0000000..53a53f3 --- /dev/null +++ b/examples/max.c @@ -0,0 +1,53 @@ +#include +#include "mpi.h" + +main(int argc, char** argv) +{ +int my_rank; +int k, p; +int threshold; +int root; +int local_max; +int total_max; +int data[10]; +int namelen; +char proc_name[MPI_MAX_PROCESSOR_NAME]; + +MPI_Init(&argc, &argv); +MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); +MPI_Comm_size(MPI_COMM_WORLD, &p); +MPI_Get_processor_name(proc_name, &namelen); + +for (k=0; k<10; k++) +data[k]=my_rank+k; + +if (my_rank == 0) + { + printf("Dose ena orio anazitisis:\n"); + scanf("%d", &threshold); + } + +root = 0; +MPI_Bcast(&threshold, 1, MPI_INT, root, MPI_COMM_WORLD); + +local_max = 0; +for (k=0; k<10; k++) + { + if ((data[k] > threshold) && (data[k] > local_max)) local_max=data[k]; + } +printf("\n Process %d on %s : local maximum = %d \n", my_rank, proc_name, local_max); + +root = 0; +MPI_Reduce(&local_max, &total_max, 1, MPI_INT, MPI_MAX, root, MPI_COMM_WORLD); + +if (my_rank == 0) + { + if (total_max > threshold) + printf("\n Maximum number found for threshold %d is equal to : %d \n", threshold, total_max); + else + printf("\n No number found greater than threshold %d \n", threshold); + } + +MPI_Finalize(); +} + diff --git a/examples/mm.c b/examples/mm.c new file mode 100644 index 0000000..f952aa4 --- /dev/null +++ b/examples/mm.c @@ -0,0 +1,124 @@ +/****************************************************************************** + * * MPI Matrix Multiply - C Version + * * FILE: mpi_mm.c + * * OTHER FILES: make.mpi_mm.c + * * DESCRIPTION: + * * In this code, the master task distributes a matrix multiply + * * operation to numtasks-1 worker tasks. + * * NOTE1: C and Fortran versions of this code differ because of the way + * * arrays are stored/passed. C arrays are row-major order but Fortran + * * arrays are column-major order. + * * AUTHOR: Ros Leibensperger / Blaise Barney + * * CONVERTED TO MPI: George L. Gusciora (1/25/95) + * ******************************************************************************/ +#include "mpi.h" +#include +#define NRA 62 /* number of rows in matrix A */ +#define NCA 15 /* number of columns in matrix A */ +#define NCB 7 /* number of columns in matrix B */ +#define MASTER 0 /* taskid of first task */ +#define FROM_MASTER 1 /* setting a message type */ +#define FROM_WORKER 2 /* setting a message type */ + +int main(argc,argv) +int argc; +char *argv[]; +{ +int numtasks, /* number of tasks in partition */ + taskid, /* a task identifier */ + numworkers, /* number of worker tasks */ + source, /* task id of message source */ + dest, /* task id of message destination */ + nbytes, /* number of bytes in message */ + mtype, /* message type */ + rows, /* rows of matrix A sent to each worker */ + averow, extra, offset, + /* used to determine rows sent to each worker */ + i, j, k, rc; /* misc */ +double a[NRA][NCA], /* matrix A to be multiplied */ + b[NCA][NCB], /* matrix B to be multiplied */ + c[NRA][NCB]; /* result matrix C */ +MPI_Status status; + + rc = MPI_Init(&argc,&argv); + rc|= MPI_Comm_size(MPI_COMM_WORLD,&numtasks); + rc|= MPI_Comm_rank(MPI_COMM_WORLD,&taskid); + if (rc != 0) + printf ("error initializing MPI and obtaining task ID information\n"); + else + printf ("mpi_pi_mm MPI task ID = %d\n", taskid); + numworkers = numtasks-1; +/**************************** master task ************************************/ + if (taskid == MASTER) + { + printf("Number of worker tasks = %d\n",numworkers); + for (i=0; i MASTER) + { + mtype = FROM_MASTER; + MPI_Recv(&offset, 1, MPI_INT, MASTER, mtype, MPI_COMM_WORLD, &status); + MPI_Recv(&rows, 1, MPI_INT, MASTER, mtype, MPI_COMM_WORLD, &status); + MPI_Recv(&a, rows*NCA, MPI_DOUBLE, MASTER, mtype, MPI_COMM_WORLD, &status); + MPI_Recv(&b, NCA*NCB, MPI_DOUBLE, MASTER, mtype, MPI_COMM_WORLD, &status); + + for (k=0; k +#include +#define N 10 + +main(int argc, char **argv) { + int j,k,A_loc; + int rank,size,root; + int A[N]; + int B_loc[N]; + int C[N]; + int B[N][N]; + +MPI_Init(&argc, &argv); +MPI_Comm_rank(MPI_COMM_WORLD, &rank); +MPI_Comm_size(MPI_COMM_WORLD, &size); + +root = 0; + +/* Αρχικοποίησε τους πίνακες Β και C */ + +if (rank == root) + { + + for (k=0; k + +int main(argc,argv) +int argc; +char *argv[]; { +int numtasks, rank, next, prev, buf[2], tag1=1, tag2=2; +MPI_Request reqs[4]; +MPI_Status stats[4]; + +MPI_Init(&argc,&argv); +MPI_Comm_size(MPI_COMM_WORLD, &numtasks); +MPI_Comm_rank(MPI_COMM_WORLD, &rank); + +prev = rank-1; +next = rank+1; +if (rank == 0) prev = numtasks - 1; +if (rank == (numtasks - 1)) next = 0; + +MPI_Irecv(&buf[0], 1, MPI_INT, prev, tag1, MPI_COMM_WORLD, &reqs[0]); +MPI_Irecv(&buf[1], 1, MPI_INT, next, tag2, MPI_COMM_WORLD, &reqs[1]); + +MPI_Isend(&rank, 1, MPI_INT, prev, tag2, MPI_COMM_WORLD, &reqs[2]); +MPI_Isend(&rank, 1, MPI_INT, next, tag1, MPI_COMM_WORLD, &reqs[3]); + +MPI_Waitall(4, reqs, stats); + +printf("Process %d: Parelava ta stoixeia %d and %d\n", rank, buf[0], buf[1]); + +MPI_Finalize(); +} + + diff --git a/examples/nsquare.c b/examples/nsquare.c new file mode 100644 index 0000000..0e7e3ee --- /dev/null +++ b/examples/nsquare.c @@ -0,0 +1,55 @@ +#include +#include "mpi.h" + +main(int argc, char** argv) { +int my_rank; +int p,k,res,finres,a1,b1,num; +int source; +int target; +int tag1 = 50; +int tag2 = 60; +int endnum; +MPI_Status status; + + +MPI_Init(&argc, &argv); +MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); +MPI_Comm_size(MPI_COMM_WORLD, &p); + + +if (my_rank == 0) { + printf("Dose plithos aritmon:\n"); + scanf("%d", &endnum); + for (target = 1; target < p; target++) + MPI_Send(&endnum, 1, MPI_INT, target, tag1, MPI_COMM_WORLD); + } +else + MPI_Recv(&endnum, 1, MPI_INT, 0, tag1, MPI_COMM_WORLD, &status); + + +res = 0; +num = endnum/p; +a1 = (my_rank * num) + 1; +b1 = a1 + num - 1; +for (k=a1; k<=b1; k++) +res = res + (k*k); + + +if (my_rank != 0) { + MPI_Send(&res, 1, MPI_INT, 0, tag2, MPI_COMM_WORLD); + } +else { + finres = res; + printf("\n Apotelesma of process %d: %d\n", my_rank, res); + for (source = 1; source < p; source++) { + MPI_Recv(&res, 1, MPI_INT, source, tag2, MPI_COMM_WORLD, &status); + finres = finres + res; + printf("\n Apotelesma of process %d: %d\n", source, res); + } + printf("\n\n\n Teliko Apotelesma: %d\n", finres); +} + +MPI_Finalize(); +} + + diff --git a/examples/pack.c b/examples/pack.c new file mode 100644 index 0000000..bbcdb47 --- /dev/null +++ b/examples/pack.c @@ -0,0 +1,66 @@ +#include +#include "mpi.h" +main(int argc, char** argv) +{ +int my_rank, p, k, size; +float b; +int root, position; +char buffer[50]; +int matrixA[100]; +int loc_num; +int loc_matrix[100]; +float loc_res[100]; +float final_res[100]; +int namelen; +char proc_name[MPI_MAX_PROCESSOR_NAME]; + +MPI_Init(&argc, &argv); +MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); +MPI_Comm_size(MPI_COMM_WORLD, &p); +MPI_Get_processor_name(proc_name, &namelen); + +if (my_rank == 0) + { + printf("YPOLOGISMOS THS PARASTASHS b * A \n\n"); + printf("DOSE THN TIMH TOY B:\n"); + scanf("%f", &b); + printf("DOSE TO MHKOS TOY PINAKA A:\n"); + scanf("%d", &size); + printf("DOSE TA STOIXEIA TOY PINAKA A MHKOYS %d:\n", size); + for (k=0; k +#include "mpi.h" + +int main(argc,argv) +int argc; +char *argv[]; +{ +int numtasks, rank, dest, source, tag = 1; +char msg1[15], msg2[15]; +MPI_Status stat; + +MPI_Init(&argc,&argv); +MPI_Comm_size(MPI_COMM_WORLD, &numtasks); +MPI_Comm_rank(MPI_COMM_WORLD, &rank); + +sprintf(msg1, "Sample message"); + +if (rank == 0) { + dest = 1; + source = 1; + + while (1) { + MPI_Send(msg1, 15, MPI_CHAR, dest, tag, + MPI_COMM_WORLD); + sprintf(msg1, "\0"); + MPI_Recv(msg1, 15, MPI_CHAR, source, tag, + MPI_COMM_WORLD, &stat); + printf("Process %d Message = %s \n", rank, msg1); + sleep(2); + } + } + +else if (rank == 1) { + dest = 0; + source = 0; + + while (1) { + sprintf(msg2, "\0"); + MPI_Recv(msg2, 15, MPI_CHAR, source, tag, + MPI_COMM_WORLD, &stat); + printf("Process %d Message = %s \n", rank, msg2); + sleep(2); + MPI_Send(msg2, 15, MPI_CHAR, dest, tag, + MPI_COMM_WORLD); + } + } + + MPI_Finalize(); +} + diff --git a/examples/run b/examples/run deleted file mode 100644 index c81fa2d..0000000 --- a/examples/run +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash - - -# compile -DIRhosts=/home/ubuntu/mpitmp/hosts -DIR=/home/ubuntu/mpitmp - -mkdir -p $DIR -mpicc -o $DIR/mpi_hello_world ./mpi_hello_world.c - -# scp hello wrold to all clients. sudo -u mpi for auto scp connect. see /home/mpi/.ssh -swarmlab-getworkers > $DIRhosts -while read -r line; - do - scp $DIR/mpi_hello_world ubuntu@$line:$DIR/mpi_hello_world -done < $DIRhosts - - -# run it! - mpirun -n 10 -f $DIRhosts $DIR/mpi_hello_world 2>&1 | tee /tmp/output.log - diff --git a/examples/runafter b/examples/runafter deleted file mode 100644 index f6af0fe..0000000 --- a/examples/runafter +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/bash - -echo "do more jobs" -#cat /tmp/output.log diff --git a/examples/runbefore b/examples/runbefore deleted file mode 100644 index 946d5dd..0000000 --- a/examples/runbefore +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/bash - -#swarmlab-getworkers > /home/ubuntu/hosts - diff --git a/examples/scatter.c b/examples/scatter.c new file mode 100644 index 0000000..14382db --- /dev/null +++ b/examples/scatter.c @@ -0,0 +1,63 @@ +#include +#include "mpi.h" + +main(int argc, char** argv) +{ +int my_rank; +int p, k; +int b, size; +int root; +int matrixA[100]; +int loc_num; +int loc_matrix[100]; +int loc_res[100]; +int final_res[100]; +int namelen; +char proc_name[MPI_MAX_PROCESSOR_NAME]; + +MPI_Init(&argc, &argv); +MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); +MPI_Comm_size(MPI_COMM_WORLD, &p); +MPI_Get_processor_name(proc_name, &namelen); + +if (my_rank == 0) + { + printf("YPOLOGISMOS THS PARASTASHS b * A \n\n"); + printf("DOSE THN TIMH TOY B:\n"); + scanf("%d", &b); + printf("DOSE TO MHKOS TOY PINAKA A:\n"); + scanf("%d", &size); + printf("DOSE TA STOIXEIA TOY PINAKA A MHKOYS %d:\n", size); + for (k=0; k +#include +#include "mpi.h" +#define MAXPROC 8 /* Max number of procsses */ +#define NAMELEN 80 /* Max length of machine name */ +#define LENGTH 8 /* Size of matrix is LENGTH * LENGTH */ + +main(int argc, char* argv[]) { + int i, j, np, me, count; + const int nametag = 42; /* Tag value for sending name */ + const int datatag = 43; /* Tag value for sending data */ + const int root = 0; /* Root process in scatter */ + MPI_Status status; /* Status object for receive */ + + char myname[NAMELEN]; /* Local host name string */ + char hostname[MAXPROC][NAMELEN]; /* Received host names */ + + int x[LENGTH][LENGTH]; /* Send buffer */ + int y[LENGTH]; /* Receive buffer */ + + int *sendcount, *displs; /* Arrays for sendcounts and displacements */ + + MPI_Init(&argc, &argv); /* Initialize MPI */ + MPI_Comm_size(MPI_COMM_WORLD, &np); /* Get nr of processes */ + MPI_Comm_rank(MPI_COMM_WORLD, &me); /* Get own identifier */ + + gethostname(&myname, NAMELEN); /* Get host name */ + + /* Check that we have one process for each row in the matrix */ + if (np != LENGTH) { + if (me == 0) { + printf("You have to use %d processes\n", LENGTH); + } + MPI_Finalize(); + exit(0); + } + + /* Allocate memory for the sendcount and displacements arrays */ + sendcount = (int *)malloc(LENGTH*sizeof(int)); + displs = (int *)malloc(LENGTH*sizeof(int)); + + /* Initialize sendcount and displacements arrays */ + for (i=0; i +#include "mpi.h" + +main(int argc, char** argv) +{ +int my_rank, size; +int source, dest; +int tag1= 50; +int tag2 = 60; +int tag3 =70; +int found = 0; +int other_found; +int k, code; +int data[10]; +int namelen; +MPI_Status status; +char proc_name[MPI_MAX_PROCESSOR_NAME]; +char other_proc_name[MPI_MAX_PROCESSOR_NAME]; + +MPI_Init(&argc, &argv); +MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); +MPI_Comm_size(MPI_COMM_WORLD, &size); +MPI_Get_processor_name(proc_name, &namelen); + +for (k=0; k<10; k++) +data[k]=my_rank+k; + +if (my_rank == 0) + { + printf("Dose ena kodiko anazitisis:\n"); + scanf("%d", &code); + for (dest =1; dest +#include "mpi.h" + +main(int argc, char** argv) +{ + int my_rank; + int p,k,res,finres,num; + int source,target; + int tag1=50, tag2=60, tag3=70; + int plithos; + int data[100]; + int data_loc[100]; + MPI_Status status; + + MPI_Init(&argc, &argv); + MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); + MPI_Comm_size(MPI_COMM_WORLD, &p); + + if (my_rank == 0) { + printf("Dose plithos aritmon:\n"); + scanf("%d", &plithos); + printf("Dose tous %d arithmous:\n", plithos); + for (k=0; k +#include + +#define BUFLEN 512 + +int main(int argc, char *argv[]) +{ + int myid, numprocs, next, namelen; + char buffer[BUFLEN], processor_name[MPI_MAX_PROCESSOR_NAME]; + MPI_Status status; + + MPI_Init(&argc,&argv); + MPI_Comm_size(MPI_COMM_WORLD,&numprocs); + MPI_Comm_rank(MPI_COMM_WORLD,&myid); + MPI_Get_processor_name(processor_name,&namelen); + + fprintf(stderr,"Process %d on %s\n", myid, processor_name); + strcpy(buffer,"hello there"); + if (myid == numprocs-1) + next = 0; + else + next = myid+1; + + if (myid == 0) + { + printf("%d sending '%s' \n",myid,buffer); + MPI_Send(buffer, strlen(buffer)+1, MPI_CHAR, next, 99, MPI_COMM_WORLD); + MPI_Send(buffer, strlen(buffer)+1, MPI_CHAR, MPI_PROC_NULL, 299, MPI_COMM_WORLD); + printf("%d receiving \n",myid); + MPI_Recv(buffer, BUFLEN, MPI_CHAR, MPI_ANY_SOURCE, 99, MPI_COMM_WORLD, + &status); + printf("%d received '%s' \n",myid,buffer); + /* mpdprintf(001,"%d receiving \n",myid); */ + } + else + { + printf("%d receiving \n",myid); + MPI_Recv(buffer, BUFLEN, MPI_CHAR, MPI_ANY_SOURCE, 99, MPI_COMM_WORLD, + &status); + MPI_Recv(buffer, BUFLEN, MPI_CHAR, MPI_PROC_NULL, 299, MPI_COMM_WORLD, + &status); + printf("%d received '%s' \n",myid,buffer); + /* mpdprintf(001,"%d receiving \n",myid); */ + MPI_Send(buffer, strlen(buffer)+1, MPI_CHAR, next, 99, MPI_COMM_WORLD); + printf("%d sent '%s' \n",myid,buffer); + } + MPI_Barrier(MPI_COMM_WORLD); + MPI_Finalize(); + return (0); +} + + diff --git a/examples/struct.c b/examples/struct.c new file mode 100644 index 0000000..bc3f8f0 --- /dev/null +++ b/examples/struct.c @@ -0,0 +1,66 @@ +#include "mpi.h" +#include +#define NELEM 25 + +int main(argc,argv) +int argc; +char *argv[]; +{ +int numtasks, rank, source=0, dest, tag=1, i; +typedef struct { + float x, y, z; + float velocity; + int n, type; + } Particle; +Particle p[NELEM], particles[NELEM]; +MPI_Datatype particletype, oldtypes[2]; +int blockcounts[2]; + +/* MPI_Aint type used to be consistent with syntax of */ +/* MPI_Type_extent routine */ +MPI_Aint offsets[2], extent; + +MPI_Status stat; + +MPI_Init(&argc,&argv); +MPI_Comm_rank(MPI_COMM_WORLD, &rank); +MPI_Comm_size(MPI_COMM_WORLD, &numtasks); + +/* Setup description of the 4 MPI_FLOAT fields x, y, z, velocity */ +offsets[0] = 0; +oldtypes[0] = MPI_FLOAT; +blockcounts[0] = 4; + +/* Setup description of the 2 MPI_INT fields n, type */ +/* Need to first figure offset by getting size of MPI_FLOAT */ +MPI_Type_extent(MPI_FLOAT, &extent); +offsets[1] = 4 * extent; +oldtypes[1] = MPI_INT; +blockcounts[1] = 2; + +/* Now define structured type and commit it */ +MPI_Type_struct(2, blockcounts, offsets, oldtypes, &particletype); +MPI_Type_commit(&particletype); + +/* Initialize the particle array and then send it to each task */ +if (rank == 0) { + for (i=0; i +/* stdlib is needed for declaring malloc */ +#include + +#define MAX2(a,b) (((a)>(b)) ? (a) : (b)) + +int GlobalReadInteger( void ); +void Hello( void ); +void Ring( void ); +/* + * void Stress( void ); + * void Globals( void ); + * */ + +int main(int argc, char *argv[]) +{ + + int me, option, namelen, size; + char processor_name[MPI_MAX_PROCESSOR_NAME]; + + MPI_Init(&argc, &argv); + MPI_Comm_rank(MPI_COMM_WORLD,&me); + MPI_Comm_size(MPI_COMM_WORLD,&size); + + if (size < 2) { + fprintf(stderr, "systest requires at least 2 processes" ); + MPI_Abort(MPI_COMM_WORLD,1); + } + + MPI_Get_processor_name(processor_name,&namelen); + + fprintf(stderr,"Process %d is alive on %s\n", + me, processor_name); + + while (1) { + + MPI_Barrier(MPI_COMM_WORLD); + + again: + if (me == 0) { + /* Read user input for action */ + /* (void) printf("\nOptions: 0=quit, 1=Hello, 2=Ring, 3=Stress, "); + * (void) printf("4=Globals : "); */ + (void) printf("\nOptions: 0=quit, 1=Hello, 2=Ring : "); + (void) fflush(stdout); + } + option = GlobalReadInteger(); + if ( (option < 0) || (option > 4) ) + goto again; + + switch (option) { + case 0: + MPI_Finalize(); + return(0); + case 1: + Hello(); break; + case 2: + Ring(); break; +/* + * case 3: + * Stress(); break; + * case 4: + * Globals(); break; + * */ + default: + fprintf(stderr,"systest: invalid option %d\n", option); break; + } + } +} + +int GlobalReadInteger( void ) +/* + * Process zero reads an integer from stdin and broadcasts + * to everyone else + * */ +{ + int me, value; + + MPI_Comm_rank(MPI_COMM_WORLD, &me); + if (me == 0) { + if (scanf("%d", &value) != 1) + fprintf(stderr,"failed reading integer value from stdin\n"); + } + MPI_Bcast(&value, 1, MPI_INT, 0, MPI_COMM_WORLD); + return value; +} + +void Hello( void ) +/* + * Everyone exchanges a hello message with everyone else. + * The hello message just comprises the sending and target nodes. + * */ +{ + int nproc, me; + int type = 1; + int buffer[2], node; + MPI_Status status; + + MPI_Comm_rank(MPI_COMM_WORLD, &me); + MPI_Comm_size(MPI_COMM_WORLD, &nproc); + + if (me == 0) { + printf("\nHello test ... show network integrity\n----------\n\n"); + fflush(stdout); + } + + for (node = 0; node= 4*1024*1024) ) + max_len = 512*1024; + if ( (buffer = (char *) malloc((unsigned) max_len)) == (char *) NULL) { + printf("process %d could not allocate buffer of size %d\n",me,max_len); + MPI_Abort(MPI_COMM_WORLD,7777); + } + + lenbuf = 1; + while (lenbuf <= max_len) { + start_ustime = MPI_Wtime(); + if (me == 0) { + MPI_Send(buffer,lenbuf,MPI_CHAR,left, type,MPI_COMM_WORLD); + MPI_Recv(buffer,lenbuf,MPI_CHAR,right,type,MPI_COMM_WORLD,&status); + } + else { + MPI_Recv(buffer,lenbuf,MPI_CHAR,right,type,MPI_COMM_WORLD,&status); + MPI_Send(buffer,lenbuf,MPI_CHAR,left, type,MPI_COMM_WORLD); + } + used_ustime = MPI_Wtime() - start_ustime; + + if (used_ustime > 0) /* rate is megabytes per second */ + us_rate = (double)(nproc*lenbuf / (used_ustime*(double)1000000)); + else + us_rate = 0.0; + if (me == 0) { + printf("len=%d bytes, used= %f sec., rate=%f Mbytes/sec\n", + lenbuf, used_ustime, us_rate); + } + lenbuf *= 2; + } + if (me == 0) + printf("clock resolution in seconds: %10.8f\n", MPI_Wtick()); + free(buffer); +} + +