Browse Source

examples

master
zeus 4 years ago
parent
commit
692a64cb98
  1. 103
      README.md
  2. BIN
      images/swarmlab-network.png
  3. 37
      install/usr/share/swarmlab.io/sec/project/examples/Makefile
  4. 51
      install/usr/share/swarmlab.io/sec/project/examples/bcast.c
  5. 56
      install/usr/share/swarmlab.io/sec/project/examples/cart.c
  6. 69
      install/usr/share/swarmlab.io/sec/project/examples/cpi.c
  7. 59
      install/usr/share/swarmlab.io/sec/project/examples/gather.c
  8. 121
      install/usr/share/swarmlab.io/sec/project/examples/gatherv.c
  9. 45
      install/usr/share/swarmlab.io/sec/project/examples/group.c
  10. 24
      install/usr/share/swarmlab.io/sec/project/examples/hello.c
  11. 53
      install/usr/share/swarmlab.io/sec/project/examples/max.c
  12. 124
      install/usr/share/swarmlab.io/sec/project/examples/mm.c
  13. 30
      install/usr/share/swarmlab.io/sec/project/examples/mpi_hello_world.c
  14. 58
      install/usr/share/swarmlab.io/sec/project/examples/mv.c
  15. 33
      install/usr/share/swarmlab.io/sec/project/examples/nblock.c
  16. 55
      install/usr/share/swarmlab.io/sec/project/examples/nsquare.c
  17. 66
      install/usr/share/swarmlab.io/sec/project/examples/pack.c
  18. 50
      install/usr/share/swarmlab.io/sec/project/examples/pingpong.c
  19. 63
      install/usr/share/swarmlab.io/sec/project/examples/scatter.c
  20. 112
      install/usr/share/swarmlab.io/sec/project/examples/scatterv.c
  21. 61
      install/usr/share/swarmlab.io/sec/project/examples/search.c
  22. 60
      install/usr/share/swarmlab.io/sec/project/examples/squares.c
  23. 53
      install/usr/share/swarmlab.io/sec/project/examples/srtest.c
  24. 66
      install/usr/share/swarmlab.io/sec/project/examples/struct.c
  25. 186
      install/usr/share/swarmlab.io/sec/project/examples/systest.c
  26. 1
      install/usr/share/swarmlab.io/sec/swarmlab-sec

103
README.md

@ -1,2 +1,103 @@
# hybrid-mpi # hybri-mpi !
Instances of Ubuntu Linux with [MPICH](https://www.mpich.org) -- portable implementation of Message Passing Interface (MPI) standard. Designed for MPI program development and deployment.
## LabInstance mpi2
![alt text](images/swarmlab-network.png "")
## Quickstart
This is a quickstart guide of howto use this *LabInstance to deploy MPI programs*
### HowTo use it
### Default Configuration
- Working Directory
> /project
- Default user
> docker
- Default password
> docker
- Default password4root
> pass
#### Find lab workers
> ifconfig
>
> nmap -sP 172.130.0.0/24
>
#### connect to workers
```bash
ssh docker@[IP]
```
#### Compile
> mpicc -o /home/ubuntu/mpi_hello_world examples/mpi_hello_world.c
#### run the MPI hello world program
> mpirun -n 10 -f /home/ubuntu/hosts /home/ubuntu/mpi_hello_world
---
**NOTE:** copy hello world to all clients
```bash
# scp hello wrold to all clients. sudo -u mpi for auto scp connect. see /home/mpi/.ssh
while read -r line;
do
scp /home/ubuntu/mpi_hello_world ubuntu@$line:/home/ubuntu/mpi_hello_world
done < /home/ubuntu/hosts
```
---
#### Publish Your Work
```bash
mkdir /home/ubuntu/directory
put your files and other directories into /home/ubuntu/directory
swarmlab-publish /home/ubuntu/directory
open URL in Browser
```
---
**MORE INFO**
See examples directory
```bash
cd examples
make mpi_hello_world
make example ex=squares
...
```
---

BIN
images/swarmlab-network.png

Binary file not shown.

After

Width:  |  Height:  |  Size: 80 KiB

37
install/usr/share/swarmlab.io/sec/project/examples/Makefile

@ -0,0 +1,37 @@
DIRhosts=/home/ubuntu/hosts
DIR=/home/ubuntu
options=-env I_MPI_DEBUG=3
number=10
ex=
all: mpi_hello_world example
.PHONY: all
mpi_hello_world:
@swarmlab-getworkers > $(DIRhosts) ;\
mpicc ./mpi_hello_world.c -o $(DIR)/mpi_hello_world ;\
while read line; do\
echo "scp $(DIR)/mpi_hello_world ubuntu@$$line:$(DIR)/mpi_hello_world " ;\
scp $(DIR)/mpi_hello_world ubuntu@$$line:$(DIR)/mpi_hello_world ;\
echo "$$line" ;\
done < $(DIRhosts);\
echo "mpirun $(options) -wdir=$(DIR) -n $(number) -f $(DIRhosts) $(DIR)/mpi_hello_world" ; \
mpirun $(options) -wdir=$(DIR) -n $(number) -f $(DIRhosts) $(DIR)/mpi_hello_world
example:
@if [ ! -z $(ex) ]; then\
swarmlab-getworkers > $(DIRhosts) ;\
mpicc ./$(ex).c -o $(DIR)/$(ex) ;\
while read line; do\
echo "scp $(DIR)/$(ex) ubuntu@$$line:$(DIR)/$(ex) " ;\
scp $(DIR)/$(ex) ubuntu@$$line:$(DIR)/$(ex) ;\
echo "$$line" ;\
done < $(DIRhosts);\
echo "mpirun $(options) -wdir=$(DIR) -n $(number) -f $(DIRhosts) $(DIR)/$(ex)" ; \
mpirun $(options) -wdir=$(DIR) -n $(number) -f $(DIRhosts) $(DIR)/$(ex) ; \
else\
echo "usage: make example ex=source file" ; \
echo "e.g.: make example ex=mv" ; \
fi\

51
install/usr/share/swarmlab.io/sec/project/examples/bcast.c

@ -0,0 +1,51 @@
#include <stdio.h>
#include "mpi.h"
main(int argc, char** argv)
{
int my_rank;
int p, k;
int root;
int a1_local;
int a2_local;
int local_num;
int endnum;
int local_res;
int final_res;
int namelen;
char proc_name[MPI_MAX_PROCESSOR_NAME];
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
MPI_Comm_size(MPI_COMM_WORLD, &p);
MPI_Get_processor_name(proc_name, &namelen);
if (my_rank == 0)
{
printf("Dose plithos arithmvn:\n");
scanf("%d", &endnum);
}
root = 0;
MPI_Bcast(&endnum, 1, MPI_INT, root, MPI_COMM_WORLD);
local_res = 0;
local_num = endnum/p;
a1_local = (my_rank * local_num) + 1;
a2_local = a1_local + local_num - 1;
for (k=a1_local; k<=a2_local; k++)
local_res = local_res + (k*k);
printf("\n Process %d on %s : local result = %d \n", my_rank, proc_name, local_res);
root = 0;
MPI_Reduce(&local_res, &final_res, 1, MPI_INT, MPI_SUM, root, MPI_COMM_WORLD);
if (my_rank == 0)
{
printf("\n Total result for N = %d is equal to : %d \n", endnum, final_res);
}
MPI_Finalize();
}

56
install/usr/share/swarmlab.io/sec/project/examples/cart.c

@ -0,0 +1,56 @@
#include "mpi.h"
#include <stdio.h>
#define SIZE 16
#define UP 0
#define DOWN 1
#define LEFT 2
#define RIGHT 3
int main(argc,argv)
int argc;
char *argv[]; {
int numtasks, rank, source, dest, outbuf, i, tag=1,
inbuf[4]={MPI_PROC_NULL,MPI_PROC_NULL,MPI_PROC_NULL,MPI_PROC_NULL,},
nbrs[4], dims[2]={4,4},
periods[2]={0,0}, reorder=0, coords[2];
MPI_Request reqs[8];
MPI_Status stats[8];
MPI_Comm cartcomm;
MPI_Init(&argc,&argv);
MPI_Comm_size(MPI_COMM_WORLD, &numtasks);
if (numtasks == SIZE) {
MPI_Cart_create(MPI_COMM_WORLD, 2, dims, periods, reorder, &cartcomm);
MPI_Comm_rank(cartcomm, &rank);
MPI_Cart_coords(cartcomm, rank, 2, coords);
MPI_Cart_shift(cartcomm, 0, 1, &nbrs[UP], &nbrs[DOWN]);
MPI_Cart_shift(cartcomm, 1, 1, &nbrs[LEFT], &nbrs[RIGHT]);
outbuf = rank;
for (i=0; i<4; i++) {
dest = nbrs[i];
source = nbrs[i];
MPI_Isend(&outbuf, 1, MPI_INT, dest, tag,
MPI_COMM_WORLD, &reqs[i]);
MPI_Irecv(&inbuf[i], 1, MPI_INT, source, tag,
MPI_COMM_WORLD, &reqs[i+4]);
}
MPI_Waitall(8, reqs, stats);
printf("rank= %d coords= %d %d neighbors(u,d,l,r)= %d %d %d %d\n",
rank,coords[0],coords[1],nbrs[UP],nbrs[DOWN],nbrs[LEFT],
nbrs[RIGHT]);
printf("rank= %d inbuf(u,d,l,r)= %d %d %d %d\n",
rank,inbuf[UP],inbuf[DOWN],inbuf[LEFT],inbuf[RIGHT]);
}
else
printf("Must specify %d processors. Terminating.\n",SIZE);
MPI_Finalize();
}

69
install/usr/share/swarmlab.io/sec/project/examples/cpi.c

@ -0,0 +1,69 @@
#include "mpi.h"
#include <stdio.h>
#include <math.h>
double f( double );
double f( double a)
{
return (4.0 / (1.0 + a*a));
}
int main( int argc, char *argv[] )
{
int done = 0, n, myid, numprocs, i;
double PI25DT = 3.141592653589793238462643;
double mypi, pi, h, sum, x;
double startwtime=0.0, endwtime;
int namelen;
char processor_name[MPI_MAX_PROCESSOR_NAME];
MPI_Init(&argc,&argv);
MPI_Comm_size(MPI_COMM_WORLD,&numprocs);
MPI_Comm_rank(MPI_COMM_WORLD,&myid);
MPI_Get_processor_name(processor_name,&namelen);
fprintf(stderr,"Process %d on %s\n",
myid, processor_name);
while (!done)
{
if (myid == 0)
{
printf("Enter the number of intervals (0 quits):\n");
scanf("%d",&n);
startwtime = MPI_Wtime();
}
MPI_Bcast(&n, 1, MPI_INT, 0, MPI_COMM_WORLD);
if (n == 0)
done = 1;
else
{
h = 1.0 / (double) n;
sum = 0.0;
for (i = myid + 1; i <= n; i += numprocs)
{
x = h * ((double)i - 0.5);
sum += f(x);
}
mypi = h * sum;
MPI_Reduce(&mypi, &pi, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
if (myid == 0)
{
printf("pi is approximately %.16f, Error is %.16f\n",
pi, fabs(pi - PI25DT));
endwtime = MPI_Wtime();
printf("wall clock time = %f\n",
endwtime-startwtime);
}
}
}
MPI_Finalize();
}

59
install/usr/share/swarmlab.io/sec/project/examples/gather.c

@ -0,0 +1,59 @@
#include <stdio.h>
#include "mpi.h"
main(int argc, char** argv)
{
int my_rank;
int p, k, count;
int root;
int a1_local;
int a2_local;
int local_num;
int endnum;
int local_res[50];
int final_res[50];
int namelen;
char proc_name[MPI_MAX_PROCESSOR_NAME];
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
MPI_Comm_size(MPI_COMM_WORLD, &p);
MPI_Get_processor_name(proc_name, &namelen);
if (my_rank == 0)
{
printf("Dose plithos arithmvn:\n");
scanf("%d", &endnum);
}
root = 0;
MPI_Bcast(&endnum, 1, MPI_INT, root, MPI_COMM_WORLD);
local_num = endnum/p;
a1_local = (my_rank * local_num) + 1;
a2_local = a1_local + local_num - 1;
count=0;
for (k=a1_local; k<=a2_local; k++)
{
local_res[count] = (k*k);
count++;
}
printf("\n Process %d on %s : local squares are : ", my_rank, proc_name);
for (k=0; k<local_num; k++) printf("%d ", local_res[k]);
printf("\n\n");
root = 0;
MPI_Gather(local_res, local_num, MPI_INT, final_res, local_num, MPI_INT, root, MPI_COMM_WORLD);
if (my_rank == 0)
{
printf("\n The %d squares are the following: ", endnum);
for (k=0; k<endnum; k++) printf("%d ",final_res[k]);
printf("\n\n");
}
MPI_Finalize();
}

121
install/usr/share/swarmlab.io/sec/project/examples/gatherv.c

@ -0,0 +1,121 @@
/*******************************************************************
*
* A similar example that uses MPI_Scatterv and MPI_Gatherv
*
* The program should be run with exactly 8 processes.
* Process 0 initializes a matrix (x) of 8 by 8 integers and distributes
* the lower triangular part of the matrix to the processes using
* MPI_Scatterv. The processes receive different number of elements:
* process 0 gets one element, process 1 gets 2 elements and process i
* gets i+1 elements. The elements of all processes are then gathered
* (using MPI_Gatherv) again in another 8 by 8 matrix (res) of process 0
* (which is initialized with zeros) in triangular shape.
*
* ********************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include "mpi.h"
#define MAXPROC 8 /* Max number of procsses */
#define LENGTH 8 /* Size of matrix is LENGTH * LENGTH */
main(int argc, char* argv[]) {
int i, j, np, me;
const int root = 0; /* Root process in scatter */
int x[LENGTH][LENGTH]; /* Send buffer */
int y[LENGTH]; /* Receive buffer */
int res[LENGTH][LENGTH]; /* Final receive buffer */
int *sendcount, *recvcount; /* Arrays for sendcounts and recvcounts */
int *displs1, *displs2; /* Arrays for displacements */
MPI_Init(&argc, &argv); /* Initialize MPI */
MPI_Comm_size(MPI_COMM_WORLD, &np); /* Get nr of processes */
MPI_Comm_rank(MPI_COMM_WORLD, &me); /* Get own identifier */
/* Check that we have one process for each row in the matrix */
if (np != LENGTH) {
if (me == 0) {
printf("You have to use %d processes\n", LENGTH);
}
MPI_Finalize();
exit(0);
}
/* Allocate memory for the sendcount and displacements arrays */
sendcount = (int *)malloc(LENGTH*sizeof(int));
displs1 = (int *)malloc(LENGTH*sizeof(int));
/* Initialize sendcount and displacements arrays */
for (i=0; i<LENGTH; i++) {
sendcount[i] = i+1;
displs1[i] = i*LENGTH;
}
if (me == 0) { /* Process 0 does this */
/* Initialize the matrix x with values 0 .. LENGTH*LENGTH-1 */
for (i=0; i<LENGTH; i++) {
for (j=0; j<LENGTH; j++) {
x[i][j] = i*LENGTH+j;
}
}
/* Print out the matrix before it is distributed */
printf("The initial matrix is\n");
for (i=0; i<LENGTH; i++) {
for (j=0; j<LENGTH; j++) {
printf("%4d ", x[i][j]);
}
printf("\n");
}
}
/* Scatter the lower triangular part of array x to all proceses, place it in y */
MPI_Scatterv(&x, sendcount, displs1, MPI_INT, &y, sendcount[me], MPI_INT, root, MPI_COMM_WORLD);
if (me==0) {
/* Initialize the result matrix res with values 0 */
for (i=0; i<LENGTH; i++) {
for (j=0; j<LENGTH; j++) {
res[i][j] = 0;
}
}
/* Print out the result matrix res before gathering */
printf("The result matrix before gathering is\n");
for (i=0; i<LENGTH; i++) {
for (j=0; j<LENGTH; j++) {
printf("%4d ", res[i][j]);
}
printf("\n");
}
}
/* Allocate memory for the recvcount and displacements arrays */
recvcount = (int *)malloc(LENGTH*sizeof(int));
displs2 = (int *)malloc(LENGTH*sizeof(int));
for (i=0; i<LENGTH; i++) {
recvcount[i] = i+1;
displs2[i] = i*LENGTH;
}
/* Gather the local elements of each process to form a triangular matrix in the root */
MPI_Gatherv(&y, recvcount[me], MPI_INT, &res, recvcount, displs2, MPI_INT, root, MPI_COMM_WORLD);
if (me == 0) { /* Process 0 does this */
/* Print out the result matrix after gathering */
printf("The result matrix after gathering is\n");
for (i=0; i<LENGTH; i++) {
for (j=0; j<LENGTH; j++) {
printf("%4d ", res[i][j]);
}
printf("\n");
}
}
MPI_Finalize();
}

45
install/usr/share/swarmlab.io/sec/project/examples/group.c

@ -0,0 +1,45 @@
#include "mpi.h"
#include <stdio.h>
#define NPROCS 8
int main(argc,argv)
int argc;
char *argv[]; {
int rank, new_rank, sendbuf, recvbuf, numtasks,
ranks1[4]={0,1,2,3}, ranks2[4]={4,5,6,7};
MPI_Group orig_group, new_group;
MPI_Comm new_comm;
MPI_Init(&argc,&argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &numtasks);
if (numtasks != NPROCS) {
printf("Must specify MP_PROCS= %d. Terminating.\n",NPROCS);
MPI_Finalize();
}
sendbuf = rank;
/* Extract the original group handle */
MPI_Comm_group(MPI_COMM_WORLD, &orig_group);
/* Divide tasks into two distinct groups based upon rank */
if (rank < NPROCS/2) {
MPI_Group_incl(orig_group, NPROCS/2, ranks1, &new_group);
}
else {
MPI_Group_incl(orig_group, NPROCS/2, ranks2, &new_group);
}
/* Create new new communicator and then perform collective communications */
MPI_Comm_create(MPI_COMM_WORLD, new_group, &new_comm);
MPI_Allreduce(&sendbuf, &recvbuf, 1, MPI_INT, MPI_SUM, new_comm);
MPI_Group_rank (new_group, &new_rank);
printf("rank= %d newrank= %d recvbuf= %d\n",rank,new_rank,recvbuf);
MPI_Finalize();
}

24
install/usr/share/swarmlab.io/sec/project/examples/hello.c

@ -0,0 +1,24 @@
#include <stdio.h>
#include "mpi.h"
main(int argc, char** argv)
{
int my_rank;
int size;
int namelen;
char proc_name[MPI_MAX_PROCESSOR_NAME];
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Get_processor_name(proc_name, &namelen);
if (my_rank == 2)
printf("Hello – I am process 2\n");
else
printf("Hello from process %d of %d on %s\n", my_rank, size, proc_name);
MPI_Finalize();
}

53
install/usr/share/swarmlab.io/sec/project/examples/max.c

@ -0,0 +1,53 @@
#include <stdio.h>
#include "mpi.h"
main(int argc, char** argv)
{
int my_rank;
int k, p;
int threshold;
int root;
int local_max;
int total_max;
int data[10];
int namelen;
char proc_name[MPI_MAX_PROCESSOR_NAME];
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
MPI_Comm_size(MPI_COMM_WORLD, &p);
MPI_Get_processor_name(proc_name, &namelen);
for (k=0; k<10; k++)
data[k]=my_rank+k;
if (my_rank == 0)
{
printf("Dose ena orio anazitisis:\n");
scanf("%d", &threshold);
}
root = 0;
MPI_Bcast(&threshold, 1, MPI_INT, root, MPI_COMM_WORLD);
local_max = 0;
for (k=0; k<10; k++)
{
if ((data[k] > threshold) && (data[k] > local_max)) local_max=data[k];
}
printf("\n Process %d on %s : local maximum = %d \n", my_rank, proc_name, local_max);
root = 0;
MPI_Reduce(&local_max, &total_max, 1, MPI_INT, MPI_MAX, root, MPI_COMM_WORLD);
if (my_rank == 0)
{
if (total_max > threshold)
printf("\n Maximum number found for threshold %d is equal to : %d \n", threshold, total_max);
else
printf("\n No number found greater than threshold %d \n", threshold);
}
MPI_Finalize();
}

124
install/usr/share/swarmlab.io/sec/project/examples/mm.c

@ -0,0 +1,124 @@
/******************************************************************************
* * MPI Matrix Multiply - C Version
* * FILE: mpi_mm.c
* * OTHER FILES: make.mpi_mm.c
* * DESCRIPTION:
* * In this code, the master task distributes a matrix multiply
* * operation to numtasks-1 worker tasks.
* * NOTE1: C and Fortran versions of this code differ because of the way
* * arrays are stored/passed. C arrays are row-major order but Fortran
* * arrays are column-major order.
* * AUTHOR: Ros Leibensperger / Blaise Barney
* * CONVERTED TO MPI: George L. Gusciora (1/25/95)
* ******************************************************************************/
#include "mpi.h"
#include <stdio.h>
#define NRA 62 /* number of rows in matrix A */
#define NCA 15 /* number of columns in matrix A */
#define NCB 7 /* number of columns in matrix B */
#define MASTER 0 /* taskid of first task */
#define FROM_MASTER 1 /* setting a message type */
#define FROM_WORKER 2 /* setting a message type */
int main(argc,argv)
int argc;
char *argv[];
{
int numtasks, /* number of tasks in partition */
taskid, /* a task identifier */
numworkers, /* number of worker tasks */
source, /* task id of message source */
dest, /* task id of message destination */
nbytes, /* number of bytes in message */
mtype, /* message type */
rows, /* rows of matrix A sent to each worker */
averow, extra, offset,
/* used to determine rows sent to each worker */
i, j, k, rc; /* misc */
double a[NRA][NCA], /* matrix A to be multiplied */
b[NCA][NCB], /* matrix B to be multiplied */
c[NRA][NCB]; /* result matrix C */
MPI_Status status;
rc = MPI_Init(&argc,&argv);
rc|= MPI_Comm_size(MPI_COMM_WORLD,&numtasks);
rc|= MPI_Comm_rank(MPI_COMM_WORLD,&taskid);
if (rc != 0)
printf ("error initializing MPI and obtaining task ID information\n");
else
printf ("mpi_pi_mm MPI task ID = %d\n", taskid);
numworkers = numtasks-1;
/**************************** master task ************************************/
if (taskid == MASTER)
{
printf("Number of worker tasks = %d\n",numworkers);
for (i=0; i<NRA; i++)
for (j=0; j<NCA; j++)
a[i][j]= i+j;
for (i=0; i<NCA; i++)
for (j=0; j<NCB; j++)
b[i][j]= i*j;
/* send matrix data to the worker tasks */
averow = NRA/numworkers;
extra = NRA%numworkers;
offset = 0;
mtype = FROM_MASTER;
for (dest=1; dest<=numworkers; dest++)
{
rows = (dest <= extra) ? averow+1 : averow;
printf(" sending %d rows to task %d\n",rows,dest);
MPI_Send(&offset, 1, MPI_INT, dest, mtype, MPI_COMM_WORLD);
MPI_Send(&rows, 1, MPI_INT, dest, mtype, MPI_COMM_WORLD);
MPI_Send(&a[offset][0], rows*NCA, MPI_DOUBLE, dest, mtype,
MPI_COMM_WORLD);
MPI_Send(&b, NCA*NCB, MPI_DOUBLE, dest, mtype, MPI_COMM_WORLD);
offset = offset + rows;
}
/* wait for results from all worker tasks */
mtype = FROM_WORKER;
for (i=1; i<=numworkers; i++)
{
source = i;
MPI_Recv(&offset, 1, MPI_INT, source, mtype, MPI_COMM_WORLD, &status);
MPI_Recv(&rows, 1, MPI_INT, source, mtype, MPI_COMM_WORLD, &status);
MPI_Recv(&c[offset][0], rows*NCB, MPI_DOUBLE, source, mtype, MPI_COMM_WORLD, &status);
}
/* print results */
printf("Here is the result matrix\n");
for (i=0; i<NRA; i++)
{
printf("\n");
for (j=0; j<NCB; j++)
printf("%6.2f ", c[i][j]);
}
printf ("\n");
}
/**************************** worker task ************************************/
if (taskid > MASTER)
{
mtype = FROM_MASTER;
MPI_Recv(&offset, 1, MPI_INT, MASTER, mtype, MPI_COMM_WORLD, &status);
MPI_Recv(&rows, 1, MPI_INT, MASTER, mtype, MPI_COMM_WORLD, &status);
MPI_Recv(&a, rows*NCA, MPI_DOUBLE, MASTER, mtype, MPI_COMM_WORLD, &status);
MPI_Recv(&b, NCA*NCB, MPI_DOUBLE, MASTER, mtype, MPI_COMM_WORLD, &status);
for (k=0; k<NCB; k++)
for (i=0; i<rows; i++)
{
c[i][k] = 0.0;
for (j=0; j<NCA; j++)
c[i][k] = c[i][k] + a[i][j] * b[j][k];
}
mtype = FROM_WORKER;
MPI_Send(&offset, 1, MPI_INT, MASTER, mtype, MPI_COMM_WORLD);
MPI_Send(&rows, 1, MPI_INT, MASTER, mtype, MPI_COMM_WORLD);
MPI_Send(&c, rows*NCB, MPI_DOUBLE, MASTER, mtype, MPI_COMM_WORLD);
}
MPI_Finalize();
}

30
install/usr/share/swarmlab.io/sec/project/examples/mpi_hello_world.c

@ -0,0 +1,30 @@
#include <mpi.h>
#include <stdio.h>
int main(int argc, char** argv) {
// Initialize the MPI environment. The two arguments to MPI Init are not
// currently used by MPI implementations, but are there in case future
// implementations might need the arguments.
MPI_Init(NULL, NULL);
// Get the number of processes
int world_size;
MPI_Comm_size(MPI_COMM_WORLD, &world_size);
// Get the rank of the process
int world_rank;
MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
// Get the name of the processor
char processor_name[MPI_MAX_PROCESSOR_NAME];
int name_len;
MPI_Get_processor_name(processor_name, &name_len);
// Print off a hello world message
printf("Hello world from processor %s, rank %d out of %d processors\n",
processor_name, world_rank, world_size);
// Finalize the MPI environment. No more MPI calls can be made after this
MPI_Finalize();
}

58
install/usr/share/swarmlab.io/sec/project/examples/mv.c

@ -0,0 +1,58 @@
#include <stdio.h>
#include <mpi.h>
#define N 10
main(int argc, char **argv) {
int j,k,A_loc;
int rank,size,root;
int A[N];
int B_loc[N];
int C[N];
int B[N][N];
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
root = 0;
/* Αρχικοποίησε τους πίνακες Β και C */
if (rank == root)
{
for (k=0; k<N; k++)
for (j=0; j<N; j++)
B[k][j] = k+j;
for (k=0; k<N; k++)
C[k] = k;
}
/* Διαμοίρασε τον πίνακα B κατά γραμμές με χρήση της MPI_Scatter */
MPI_Scatter(B, N, MPI_INT, B_loc, N, MPI_INT, root, MPI_COMM_WORLD);
/* Στείλε σε όλες το διάνυσμα C με χρήση της MPI_Bcast */
MPI_Bcast(C, N, MPI_INT, root, MPI_COMM_WORLD);
/* Κάνε τους απαραίτητους τοπικούς πολλαπλασιασμούς και προσθέσεις προκειμένου να υπολογίσεις το τοπικό σου στοιχείο του πίνακα Α */
A_loc = 0;
for(j=0; j<N; j++)
A_loc += C[j]*B_loc[j];
/* και συγκέντρωσε τα τελικά αποτελέσματα απλά με μία MPI_Gather */
MPI_Gather(&A_loc, 1, MPI_INT, A, 1, MPI_INT, root, MPI_COMM_WORLD);
/* τύπωσε το τελικό αποτέλεσμα */
if (rank == 0)
{
for (k=0; k<N; k++)
printf("A[%d]=%d\n", k, A[k]);
}
MPI_Finalize();
}

33
install/usr/share/swarmlab.io/sec/project/examples/nblock.c

@ -0,0 +1,33 @@
#include "mpi.h"
#include <stdio.h>
int main(argc,argv)
int argc;
char *argv[]; {
int numtasks, rank, next, prev, buf[2], tag1=1, tag2=2;
MPI_Request reqs[4];
MPI_Status stats[4];
MPI_Init(&argc,&argv);
MPI_Comm_size(MPI_COMM_WORLD, &numtasks);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
prev = rank-1;
next = rank+1;
if (rank == 0) prev = numtasks - 1;
if (rank == (numtasks - 1)) next = 0;
MPI_Irecv(&buf[0], 1, MPI_INT, prev, tag1, MPI_COMM_WORLD, &reqs[0]);
MPI_Irecv(&buf[1], 1, MPI_INT, next, tag2, MPI_COMM_WORLD, &reqs[1]);
MPI_Isend(&rank, 1, MPI_INT, prev, tag2, MPI_COMM_WORLD, &reqs[2]);
MPI_Isend(&rank, 1, MPI_INT, next, tag1, MPI_COMM_WORLD, &reqs[3]);
MPI_Waitall(4, reqs, stats);
printf("Process %d: Parelava ta stoixeia %d and %d\n", rank, buf[0], buf[1]);
MPI_Finalize();
}

55
install/usr/share/swarmlab.io/sec/project/examples/nsquare.c

@ -0,0 +1,55 @@
#include <stdio.h>
#include "mpi.h"
main(int argc, char** argv) {
int my_rank;
int p,k,res,finres,a1,b1,num;
int source;
int target;
int tag1 = 50;
int tag2 = 60;
int endnum;
MPI_Status status;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
MPI_Comm_size(MPI_COMM_WORLD, &p);
if (my_rank == 0) {
printf("Dose plithos aritmon:\n");
scanf("%d", &endnum);
for (target = 1; target < p; target++)
MPI_Send(&endnum, 1, MPI_INT, target, tag1, MPI_COMM_WORLD);
}
else
MPI_Recv(&endnum, 1, MPI_INT, 0, tag1, MPI_COMM_WORLD, &status);
res = 0;
num = endnum/p;
a1 = (my_rank * num) + 1;
b1 = a1 + num - 1;
for (k=a1; k<=b1; k++)
res = res + (k*k);
if (my_rank != 0) {
MPI_Send(&res, 1, MPI_INT, 0, tag2, MPI_COMM_WORLD);
}
else {
finres = res;
printf("\n Apotelesma of process %d: %d\n", my_rank, res);
for (source = 1; source < p; source++) {
MPI_Recv(&res, 1, MPI_INT, source, tag2, MPI_COMM_WORLD, &status);
finres = finres + res;
printf("\n Apotelesma of process %d: %d\n", source, res);
}
printf("\n\n\n Teliko Apotelesma: %d\n", finres);
}
MPI_Finalize();
}

66
install/usr/share/swarmlab.io/sec/project/examples/pack.c

@ -0,0 +1,66 @@
#include <stdio.h>
#include "mpi.h"
main(int argc, char** argv)
{
int my_rank, p, k, size;
float b;
int root, position;
char buffer[50];
int matrixA[100];
int loc_num;
int loc_matrix[100];
float loc_res[100];
float final_res[100];
int namelen;
char proc_name[MPI_MAX_PROCESSOR_NAME];
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
MPI_Comm_size(MPI_COMM_WORLD, &p);
MPI_Get_processor_name(proc_name, &namelen);
if (my_rank == 0)
{
printf("YPOLOGISMOS THS PARASTASHS b * A \n\n");
printf("DOSE THN TIMH TOY B:\n");
scanf("%f", &b);
printf("DOSE TO MHKOS TOY PINAKA A:\n");
scanf("%d", &size);
printf("DOSE TA STOIXEIA TOY PINAKA A MHKOYS %d:\n", size);
for (k=0; k<size; k++)
scanf("%d", &matrixA[k]);
position = 0;
MPI_Pack(&size, 1, MPI_INT, buffer, 50, &position, MPI_COMM_WORLD);
MPI_Pack(&b, 1, MPI_FLOAT, buffer, 50, &position, MPI_COMM_WORLD);
}
root = 0;
MPI_Bcast(buffer, 50, MPI_PACKED, root, MPI_COMM_WORLD);
position = 0;
MPI_Unpack(buffer, 50, &position, &size, 1, MPI_INT, MPI_COMM_WORLD);
MPI_Unpack(buffer, 50, &position, &b, 1, MPI_FLOAT, MPI_COMM_WORLD);
loc_num = size/p;
root = 0;
MPI_Scatter(matrixA, loc_num, MPI_INT, loc_matrix, loc_num, MPI_INT, root, MPI_COMM_WORLD);
for (k=0; k<loc_num; k++)
loc_res[k] = b*loc_matrix[k];
printf("\n Process %d on %s : local results are : ", my_rank, proc_name);
for (k=0; k<loc_num; k++) printf("%f ", loc_res[k]);
printf("\n\n");
root = 0;
MPI_Gather(loc_res, loc_num, MPI_INT, final_res, loc_num, MPI_FLOAT, root, MPI_COMM_WORLD);
if (my_rank == 0)
{
printf("\n TELIKO APOTELESMA %f * A =\n", b);
for (k=0; k<size; k++) printf("%f ", final_res[k]);
printf("\n\n");
}
MPI_Finalize();
}

50
install/usr/share/swarmlab.io/sec/project/examples/pingpong.c

@ -0,0 +1,50 @@
#include <stdio.h>
#include "mpi.h"
int main(argc,argv)
int argc;
char *argv[];
{
int numtasks, rank, dest, source, tag = 1;
char msg1[15], msg2[15];
MPI_Status stat;
MPI_Init(&argc,&argv);
MPI_Comm_size(MPI_COMM_WORLD, &numtasks);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
sprintf(msg1, "Sample message");
if (rank == 0) {
dest = 1;
source = 1;
while (1) {
MPI_Send(msg1, 15, MPI_CHAR, dest, tag,
MPI_COMM_WORLD);
sprintf(msg1, "\0");
MPI_Recv(msg1, 15, MPI_CHAR, source, tag,
MPI_COMM_WORLD, &stat);
printf("Process %d Message = %s \n", rank, msg1);
sleep(2);
}
}
else if (rank == 1) {
dest = 0;
source = 0;
while (1) {
sprintf(msg2, "\0");
MPI_Recv(msg2, 15, MPI_CHAR, source, tag,
MPI_COMM_WORLD, &stat);
printf("Process %d Message = %s \n", rank, msg2);
sleep(2);
MPI_Send(msg2, 15, MPI_CHAR, dest, tag,
MPI_COMM_WORLD);
}
}
MPI_Finalize();
}

63
install/usr/share/swarmlab.io/sec/project/examples/scatter.c

@ -0,0 +1,63 @@
#include <stdio.h>
#include "mpi.h"
main(int argc, char** argv)
{
int my_rank;
int p, k;
int b, size;
int root;
int matrixA[100];
int loc_num;
int loc_matrix[100];
int loc_res[100];
int final_res[100];
int namelen;
char proc_name[MPI_MAX_PROCESSOR_NAME];
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
MPI_Comm_size(MPI_COMM_WORLD, &p);
MPI_Get_processor_name(proc_name, &namelen);
if (my_rank == 0)
{
printf("YPOLOGISMOS THS PARASTASHS b * A \n\n");
printf("DOSE THN TIMH TOY B:\n");
scanf("%d", &b);
printf("DOSE TO MHKOS TOY PINAKA A:\n");
scanf("%d", &size);
printf("DOSE TA STOIXEIA TOY PINAKA A MHKOYS %d:\n", size);
for (k=0; k<size; k++)
scanf("%d", &matrixA[k]);
}
root = 0;
MPI_Bcast(&size, 1, MPI_INT, root, MPI_COMM_WORLD);
MPI_Bcast(&b, 1, MPI_INT, root, MPI_COMM_WORLD);
loc_num = size/p;
root = 0;
MPI_Scatter(matrixA, loc_num, MPI_INT, loc_matrix, loc_num, MPI_INT, root, MPI_COMM_WORLD);
for (k=0; k<loc_num; k++)
loc_res[k] = b*loc_matrix[k];
printf("\n Process %d on %s : local results are : ", my_rank, proc_name);
for (k=0; k<loc_num; k++) printf("%d ", loc_res[k]);
printf("\n\n");
root = 0;
MPI_Gather(loc_res, loc_num, MPI_INT, final_res, loc_num, MPI_INT, root, MPI_COMM_WORLD);
if (my_rank == 0)
{
printf("\n TELIKO APOTELESMA %d * A =\n", b);
for (k=0; k<size; k++) printf("%d ", final_res[k]);
printf("\n\n");
}
MPI_Finalize();
}

112
install/usr/share/swarmlab.io/sec/project/examples/scatterv.c

@ -0,0 +1,112 @@
#include <stdio.h>
#include <stdlib.h>
#include "mpi.h"
#define MAXPROC 8 /* Max number of procsses */
#define NAMELEN 80 /* Max length of machine name */
#define LENGTH 8 /* Size of matrix is LENGTH * LENGTH */
main(int argc, char* argv[]) {
int i, j, np, me, count;
const int nametag = 42; /* Tag value for sending name */
const int datatag = 43; /* Tag value for sending data */
const int root = 0; /* Root process in scatter */
MPI_Status status; /* Status object for receive */
char myname[NAMELEN]; /* Local host name string */
char hostname[MAXPROC][NAMELEN]; /* Received host names */
int x[LENGTH][LENGTH]; /* Send buffer */
int y[LENGTH]; /* Receive buffer */
int *sendcount, *displs; /* Arrays for sendcounts and displacements */
MPI_Init(&argc, &argv); /* Initialize MPI */
MPI_Comm_size(MPI_COMM_WORLD, &np); /* Get nr of processes */
MPI_Comm_rank(MPI_COMM_WORLD, &me); /* Get own identifier */
gethostname(&myname, NAMELEN); /* Get host name */
/* Check that we have one process for each row in the matrix */
if (np != LENGTH) {
if (me == 0) {
printf("You have to use %d processes\n", LENGTH);
}
MPI_Finalize();
exit(0);
}
/* Allocate memory for the sendcount and displacements arrays */
sendcount = (int *)malloc(LENGTH*sizeof(int));
displs = (int *)malloc(LENGTH*sizeof(int));
/* Initialize sendcount and displacements arrays */
for (i=0; i<LENGTH; i++) {
sendcount[i] = i+1;
displs[i] = i*LENGTH;
}
if (me == 0) { /* Process 0 does this */
/* Initialize the matrix x with values 0 .. LENGTH*LENGTH-1 */
for (i=0; i<LENGTH; i++) {
for (j=0; j<LENGTH; j++) {
x[i][j] = i*LENGTH+j;
}
}
/* Print out the matrix before it is distributed */
printf("The matrix is\n");
for (i=0; i<LENGTH; i++) {
for (j=0; j<LENGTH; j++) {
printf("%4d ", x[i][j]);
}
printf("\n");
}
printf("\nProcess %d on host %s is scatteringing the lower triangular part of the matrix x to all %d processes\n\n", me, myname, np);
/* Scatter the lower triangular part of array x to all proceses, place it in y */
MPI_Scatterv(&x, sendcount, displs, MPI_INT, &y, LENGTH, MPI_INT, root, MPI_COMM_WORLD);
/* Process zero prints out own portion of the scattered array */
for (i=0; i<sendcount[me]; i++) {
printf("%4d", y[i]);
}
printf(" ");
printf(" from process %d on host %s\n", me, myname);
/* Receive messages with hostname and the scattered data */
/* from all other processes */
for (i=1; i<np; i++) {
MPI_Recv (&hostname[i], NAMELEN, MPI_CHAR, i, nametag, MPI_COMM_WORLD, \
&status);
MPI_Recv (&y, LENGTH, MPI_INT, i, datatag, MPI_COMM_WORLD, &status);
MPI_Get_count(&status, MPI_INT, &count); /* Get nr of elements in message */
/* Print out the elements we received from this process */
for (j=0; j<count; j++) {
printf("%4d", y[j]);
}
for (j=0; j<LENGTH-count; j++) { /* Format output to look like a matrix */
printf(" ");
}
printf(" from process %d on host %s\n", i, hostname[i]);
}
printf("Ready\n");
} else { /* all other processes do this */
/* Receive the scattered matrix from process 0, place it in array y */
MPI_Scatterv(&x, sendcount, displs, MPI_INT, &y, LENGTH, MPI_INT, root, MPI_COMM_WORLD);
/* Send own name back to process 0 */
MPI_Send (&myname, NAMELEN, MPI_CHAR, 0, nametag, MPI_COMM_WORLD);
/* Send the received array back to process 0 */
MPI_Send (&y, sendcount[me], MPI_INT, 0, datatag, MPI_COMM_WORLD);
}
MPI_Finalize();
}

61
install/usr/share/swarmlab.io/sec/project/examples/search.c

@ -0,0 +1,61 @@
#include <stdio.h>
#include "mpi.h"
main(int argc, char** argv)
{
int my_rank, size;
int source, dest;
int tag1= 50;
int tag2 = 60;
int tag3 =70;
int found = 0;
int other_found;
int k, code;
int data[10];
int namelen;
MPI_Status status;
char proc_name[MPI_MAX_PROCESSOR_NAME];
char other_proc_name[MPI_MAX_PROCESSOR_NAME];
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Get_processor_name(proc_name, &namelen);
for (k=0; k<10; k++)
data[k]=my_rank+k;
if (my_rank == 0)
{
printf("Dose ena kodiko anazitisis:\n");
scanf("%d", &code);
for (dest =1; dest<size; dest++)
MPI_Send(&code, 1, MPI_INT, dest, tag1, MPI_COMM_WORLD);
}
else
{
MPI_Recv(&code, 1, MPI_INT, 0, tag1, MPI_COMM_WORLD, &status);
}
for (k=0; k<10; k++)
{ if (data[k] == code) found=1; }
if (my_rank != 0)
{ MPI_Send(&found, 1, MPI_INT, 0, tag2, MPI_COMM_WORLD);
MPI_Send(proc_name, namelen+1, MPI_CHAR, 0, tag3, MPI_COMM_WORLD); }
if (my_rank == 0) {
for (source=1; source<size; source++) {
MPI_Recv(&other_found, 1, MPI_INT, source, tag2, MPI_COMM_WORLD, &status);
MPI_Recv(other_proc_name, 50, MPI_CHAR, source, tag3, MPI_COMM_WORLD, &status);
if (other_found)
printf("\n Code %d found in database of process %d on %s \n", code, source, other_proc_name);
}
if (found)
printf("\n Code %d found in database of process %d on %s \n", code, my_rank, proc_name);
}
MPI_Finalize();
}

60
install/usr/share/swarmlab.io/sec/project/examples/squares.c

@ -0,0 +1,60 @@
#include <stdio.h>
#include "mpi.h"
main(int argc, char** argv)
{
int my_rank;
int p,k,res,finres,num;
int source,target;
int tag1=50, tag2=60, tag3=70;
int plithos;
int data[100];
int data_loc[100];
MPI_Status status;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
MPI_Comm_size(MPI_COMM_WORLD, &p);
if (my_rank == 0) {
printf("Dose plithos aritmon:\n");
scanf("%d", &plithos);
printf("Dose tous %d arithmous:\n", plithos);
for (k=0; k<plithos; k++)
scanf("%d", &data[k]);
for (target = 1; target < p; target++)
MPI_Send(&plithos, 1, MPI_INT, target, tag1, MPI_COMM_WORLD);
num = plithos/p; k=num;
for (target = 1; target < p; target++) {
MPI_Send(&data[k], num, MPI_INT, target, tag2, MPI_COMM_WORLD);
k+=num; }
for (k=0; k<num; k++)
data_loc[k]=data[k];
}
else {
MPI_Recv(&plithos, 1, MPI_INT, 0, tag1, MPI_COMM_WORLD, &status);
num = plithos/p;
MPI_Recv(&data_loc[0], num, MPI_INT, 0, tag2, MPI_COMM_WORLD, &status);
}
res = 0;
for (k=0; k<num; k++)
res = res + (data_loc[k]*data_loc[k]);
if (my_rank != 0) {
MPI_Send(&res, 1, MPI_INT, 0, tag3, MPI_COMM_WORLD);
}
else {
finres = res;
printf("\n Apotelesma of process %d: %d\n", my_rank, res);
for (source = 1; source < p; source++) {
MPI_Recv(&res, 1, MPI_INT, source, tag3, MPI_COMM_WORLD, &status);
finres = finres + res;
printf("\n Apotelesma of process %d: %d\n", source, res);
}
printf("\n\n\n Teliko Apotelesma: %d\n", finres);
}
MPI_Finalize();
}

53
install/usr/share/swarmlab.io/sec/project/examples/srtest.c

@ -0,0 +1,53 @@
#include "mpi.h"
#include <stdio.h>
#include <string.h>
#define BUFLEN 512
int main(int argc, char *argv[])
{
int myid, numprocs, next, namelen;
char buffer[BUFLEN], processor_name[MPI_MAX_PROCESSOR_NAME];
MPI_Status status;
MPI_Init(&argc,&argv);
MPI_Comm_size(MPI_COMM_WORLD,&numprocs);
MPI_Comm_rank(MPI_COMM_WORLD,&myid);
MPI_Get_processor_name(processor_name,&namelen);
fprintf(stderr,"Process %d on %s\n", myid, processor_name);
strcpy(buffer,"hello there");
if (myid == numprocs-1)
next = 0;
else
next = myid+1;
if (myid == 0)
{
printf("%d sending '%s' \n",myid,buffer);
MPI_Send(buffer, strlen(buffer)+1, MPI_CHAR, next, 99, MPI_COMM_WORLD);
MPI_Send(buffer, strlen(buffer)+1, MPI_CHAR, MPI_PROC_NULL, 299, MPI_COMM_WORLD);
printf("%d receiving \n",myid);
MPI_Recv(buffer, BUFLEN, MPI_CHAR, MPI_ANY_SOURCE, 99, MPI_COMM_WORLD,
&status);
printf("%d received '%s' \n",myid,buffer);
/* mpdprintf(001,"%d receiving \n",myid); */
}
else
{
printf("%d receiving \n",myid);
MPI_Recv(buffer, BUFLEN, MPI_CHAR, MPI_ANY_SOURCE, 99, MPI_COMM_WORLD,
&status);
MPI_Recv(buffer, BUFLEN, MPI_CHAR, MPI_PROC_NULL, 299, MPI_COMM_WORLD,
&status);
printf("%d received '%s' \n",myid,buffer);
/* mpdprintf(001,"%d receiving \n",myid); */
MPI_Send(buffer, strlen(buffer)+1, MPI_CHAR, next, 99, MPI_COMM_WORLD);
printf("%d sent '%s' \n",myid,buffer);
}
MPI_Barrier(MPI_COMM_WORLD);
MPI_Finalize();
return (0);
}

66
install/usr/share/swarmlab.io/sec/project/examples/struct.c

@ -0,0 +1,66 @@
#include "mpi.h"
#include <stdio.h>
#define NELEM 25
int main(argc,argv)
int argc;
char *argv[];
{
int numtasks, rank, source=0, dest, tag=1, i;
typedef struct {
float x, y, z;
float velocity;
int n, type;
} Particle;
Particle p[NELEM], particles[NELEM];
MPI_Datatype particletype, oldtypes[2];
int blockcounts[2];
/* MPI_Aint type used to be consistent with syntax of */
/* MPI_Type_extent routine */
MPI_Aint offsets[2], extent;
MPI_Status stat;
MPI_Init(&argc,&argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &numtasks);
/* Setup description of the 4 MPI_FLOAT fields x, y, z, velocity */
offsets[0] = 0;
oldtypes[0] = MPI_FLOAT;
blockcounts[0] = 4;
/* Setup description of the 2 MPI_INT fields n, type */
/* Need to first figure offset by getting size of MPI_FLOAT */
MPI_Type_extent(MPI_FLOAT, &extent);
offsets[1] = 4 * extent;
oldtypes[1] = MPI_INT;
blockcounts[1] = 2;
/* Now define structured type and commit it */
MPI_Type_struct(2, blockcounts, offsets, oldtypes, &particletype);
MPI_Type_commit(&particletype);
/* Initialize the particle array and then send it to each task */
if (rank == 0) {
for (i=0; i<NELEM; i++) {
particles[i].x = i * 1.0;
particles[i].y = i * -1.0;
particles[i].z = i * 1.0;
particles[i].velocity = 0.25;
particles[i].n = i;
particles[i].type = i % 2;
}
for (i=0; i<numtasks; i++)
MPI_Send(particles, NELEM, particletype, i, tag, MPI_COMM_WORLD);
}
MPI_Recv(p, NELEM, particletype, source, tag, MPI_COMM_WORLD, &stat);
/* Print a sample of what was received */
printf("rank= %d %3.2f %3.2f %3.2f %3.2f %d %d\n", rank,p[3].x,
p[3].y,p[3].z,p[3].velocity,p[3].n,p[3].type);
MPI_Finalize(); }

186
install/usr/share/swarmlab.io/sec/project/examples/systest.c

@ -0,0 +1,186 @@
#include "mpi.h"
#include <stdio.h>
/* stdlib is needed for declaring malloc */
#include <stdlib.h>
#define MAX2(a,b) (((a)>(b)) ? (a) : (b))
int GlobalReadInteger( void );
void Hello( void );
void Ring( void );
/*
* void Stress( void );
* void Globals( void );
* */
int main(int argc, char *argv[])
{
int me, option, namelen, size;
char processor_name[MPI_MAX_PROCESSOR_NAME];
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD,&me);
MPI_Comm_size(MPI_COMM_WORLD,&size);
if (size < 2) {
fprintf(stderr, "systest requires at least 2 processes" );
MPI_Abort(MPI_COMM_WORLD,1);
}
MPI_Get_processor_name(processor_name,&namelen);
fprintf(stderr,"Process %d is alive on %s\n",
me, processor_name);
while (1) {
MPI_Barrier(MPI_COMM_WORLD);
again:
if (me == 0) {
/* Read user input for action */
/* (void) printf("\nOptions: 0=quit, 1=Hello, 2=Ring, 3=Stress, ");
* (void) printf("4=Globals : "); */
(void) printf("\nOptions: 0=quit, 1=Hello, 2=Ring : ");
(void) fflush(stdout);
}
option = GlobalReadInteger();
if ( (option < 0) || (option > 4) )
goto again;
switch (option) {
case 0:
MPI_Finalize();
return(0);
case 1:
Hello(); break;
case 2:
Ring(); break;
/*
* case 3:
* Stress(); break;
* case 4:
* Globals(); break;
* */
default:
fprintf(stderr,"systest: invalid option %d\n", option); break;
}
}
}
int GlobalReadInteger( void )
/*
* Process zero reads an integer from stdin and broadcasts
* to everyone else
* */
{
int me, value;
MPI_Comm_rank(MPI_COMM_WORLD, &me);
if (me == 0) {
if (scanf("%d", &value) != 1)
fprintf(stderr,"failed reading integer value from stdin\n");
}
MPI_Bcast(&value, 1, MPI_INT, 0, MPI_COMM_WORLD);
return value;
}
void Hello( void )
/*
* Everyone exchanges a hello message with everyone else.
* The hello message just comprises the sending and target nodes.
* */
{
int nproc, me;
int type = 1;
int buffer[2], node;
MPI_Status status;
MPI_Comm_rank(MPI_COMM_WORLD, &me);
MPI_Comm_size(MPI_COMM_WORLD, &nproc);
if (me == 0) {
printf("\nHello test ... show network integrity\n----------\n\n");
fflush(stdout);
}
for (node = 0; node<nproc; node++) {
if (node != me) {
buffer[0] = me;
buffer[1] = node;
MPI_Send(buffer, 2, MPI_INT, node, type, MPI_COMM_WORLD);
MPI_Recv(buffer, 2, MPI_INT, node, type, MPI_COMM_WORLD, &status);
if ( (buffer[0] != node) || (buffer[1] != me) ) {
(void) fprintf(stderr, "Hello: %d!=%d or %d!=%d\n",
buffer[0], node, buffer[1], me);
printf("Mismatch on hello process ids; node = %d\n", node);
}
printf("Hello from %d to %d\n", me, node);
fflush(stdout);
}
}
}
void Ring( void ) /* Time passing a message round a ring */
{
int nproc, me;
MPI_Status status;
int type = 4;
int left, right;
char *buffer;
int lenbuf, max_len;
double us_rate;
double start_ustime, used_ustime;
MPI_Comm_rank(MPI_COMM_WORLD, &me);
MPI_Comm_size(MPI_COMM_WORLD, &nproc);
left = (me + nproc - 1) % nproc;
right = (me + 1) % nproc;
/* Find out how big a message to use */
if (me == 0) {
(void) printf("\nRing test...time network performance\n---------\n\n");
(void) printf("Input maximum message size: ");
(void) fflush(stdout);
}
max_len = GlobalReadInteger();
if ( (max_len <= 0) || (max_len >= 4*1024*1024) )
max_len = 512*1024;
if ( (buffer = (char *) malloc((unsigned) max_len)) == (char *) NULL) {
printf("process %d could not allocate buffer of size %d\n",me,max_len);
MPI_Abort(MPI_COMM_WORLD,7777);
}
lenbuf = 1;
while (lenbuf <= max_len) {
start_ustime = MPI_Wtime();
if (me == 0) {
MPI_Send(buffer,lenbuf,MPI_CHAR,left, type,MPI_COMM_WORLD);
MPI_Recv(buffer,lenbuf,MPI_CHAR,right,type,MPI_COMM_WORLD,&status);
}
else {
MPI_Recv(buffer,lenbuf,MPI_CHAR,right,type,MPI_COMM_WORLD,&status);
MPI_Send(buffer,lenbuf,MPI_CHAR,left, type,MPI_COMM_WORLD);
}
used_ustime = MPI_Wtime() - start_ustime;
if (used_ustime > 0) /* rate is megabytes per second */
us_rate = (double)(nproc*lenbuf / (used_ustime*(double)1000000));
else
us_rate = 0.0;
if (me == 0) {
printf("len=%d bytes, used= %f sec., rate=%f Mbytes/sec\n",
lenbuf, used_ustime, us_rate);
}
lenbuf *= 2;
}
if (me == 0)
printf("clock resolution in seconds: %10.8f\n", MPI_Wtick());
free(buffer);
}

1
install/usr/share/swarmlab.io/sec/swarmlab-sec

@ -311,6 +311,7 @@ fi
/bin/mkdir -p $Wdir/project /bin/mkdir -p $Wdir/project
/bin/cp -f $DIR/project/mpich-3.2.tar.gz $Wdir /bin/cp -f $DIR/project/mpich-3.2.tar.gz $Wdir
/bin/cp -f $DIR/project/examples $Wdir/project
/bin/cp -f $DIR/project/hello_world.sh $Wdir/project /bin/cp -f $DIR/project/hello_world.sh $Wdir/project
/bin/cp -f $DIR/$bootstrap $Wdir/$bootstrap /bin/cp -f $DIR/$bootstrap $Wdir/$bootstrap
/bin/cp -f $DIR/$hostnames $Wdir/$hostnames /bin/cp -f $DIR/$hostnames $Wdir/$hostnames

Loading…
Cancel
Save