TRACE
/*
trace.c
trace of an NxN matrix blockwise distributed on a PxP grid
usage:
trace size
size: linear dimension N of global array
*/
#include <stdio.h>
#include <math.h>
#include <mpi.h>
/*
* datatypes
*/
typedef struct {
int dim; /* linear dimension of the quadratic grid */
int row; /* row position of current task */
int col; /* column position of current task */
} Grid;
/*
* function declarations
*/
void Startup(int argc, char *argv[], Grid *grid, int *mysize_p);
void MakeMatrix(int **C, int N, Grid grid);
int PrintTrace(int *C, int size, Grid grid);
void main(int argc, char *argv[]) {
Grid grid; /* geometric information of a task */
int *C; /* local part of matrix */
int localsize; /* size of local submatrix */
int trace;
int i;
/* start tasks and broadcast infos */
Startup(argc, argv, &grid, &localsize);
/* initialize matrix */
MakeMatrix(&C, localsize, grid);
/* compute and print the trace of the result matrix */
trace = PrintTrace(C, localsize, grid);
/* root prints the global result */
if ( (grid.col == 0) && (grid.row == 0) ) { /* I'm the root */
printf("************** Total trace = %d **************\n", trace);
}
/* clean up and exit */
MPI_Finalize();
}
void Startup(int argc, char *argv[], Grid *grid, int *mysize_p) {
/* get matrix size and dimension, start tasks and broadcast data */
int ptid; /* parent's TID */
int *tids; /* array of task id */
int nproc; /* Number of processors */
int size; /* linear matrix size */
int me; /* instance number in WORLD group */
int i,j;
MPI_Init(&argc, &argv); /* enroll in MPI */
MPI_Comm_size(MPI_COMM_WORLD, &nproc);
MPI_Comm_rank(MPI_COMM_WORLD, &me);
grid->dim = (int) floor(sqrt(nproc) + 0.5); /* get grid dimension */
if (me == 0) { /* I am the master */
/* check nproc */
if (nproc != grid->dim * grid->dim) {
fprintf(stderr,"Number of processes has to be a square.\n");
MPI_Abort(MPI_COMM_WORLD, -1);
exit(1);
}
/* check arguments */
if (argc != 2) {
fprintf(stderr,"Usage: %s <matrix size> \n", argv[0]);
MPI_Abort(MPI_COMM_WORLD, -1);
exit(1);
}
/* Get matrix size and check it */
size = atoi(argv[1]);
if (size % grid->dim) {
fprintf(stderr,"The grid dimension must divide the matrix size.\n");
MPI_Abort(MPI_COMM_WORLD, -1);
exit(1);
}
/* broadcast matrix size */
MPI_Bcast(&size, 1, MPI_INT, 0, MPI_COMM_WORLD);
} else { /* otherwise, receive data from node 0 */
MPI_Bcast(&size, 1, MPI_INT, 0, MPI_COMM_WORLD);
}
/* compute my local info */
*mysize_p = size / grid->dim;
grid->row = me / grid->dim;
grid->col = me % grid->dim;
}
void MakeMatrix(int **C, int N, Grid grid) {
/* initialize the local submatrix of size NxN */
/* globally: Cglob(i,j) = k1 + k2*(i-j) + k3*i*j */
/* k1, k2, k3: as in the code */
/* locally: iglob = i + row*N, jglob = j + col*N */
int i,j;
int iglob, jglob;
int Nglob;
int k1, k2, k3;
int c1, c2, c3;
/* Declare matrix storage in a one-dimensional array */
*C = (int *) malloc(N*N*sizeof(int));
/* compute constants which describe the - global - matrix C */
Nglob = N * grid.dim;
k1 = (Nglob*(Nglob-1)*(2*Nglob-1))/6;
k2 = (Nglob*(Nglob-1))/2;
k3 = -Nglob;
c1 = k1 + k2*N*(grid.row - grid.col) + k3*N*N*grid.row*grid.col;
c2 = k2 + k3*N*grid.col;
c3 = -k2 + k3*N*grid.row;
/* Initialize the matrix */
for (i=0; i<N; i++) {
for (j=0; j<N; j++) {
(*C)[N*i + j] = c1 + c2*i + c3*j + k3*i*j;
}
}
}
int PrintTrace(int *C, int size, Grid grid) {
/* computes and prints the trace of the result matrix */
/* called only by "diagonal" tasks */
int i, color;
int root_tid; /* tid of the master process */
int root; /* instance number of master in group DIAG */
int me_diag; /* my instance number in group DIAG */
int local_trace; /* local trace of matrix C */
int trace; /* global trace of matrix C */
MPI_Group diag_group, world_group;
MPI_Comm diag_comm;
int *diag_ranks; /* array of ranks of diagonal group */
/* construct group of diagonal processes and communicator for them */
/* processes are sorted by row (or column) index) */
color = (grid.row == grid.col)? 0 : MPI_UNDEFINED;
MPI_Comm_split(MPI_COMM_WORLD, color, grid.row, &diag_comm);
if (diag_comm == MPI_COMM_NULL) {
return(-1); /* task is not used for computation of the trace */
}
/* compute local trace */
local_trace = 0;
for (i=0; i<size; i++) {
local_trace += C[i*size+i];
}
printf("*** local trace on (%d,%d): %d\n", grid.row, grid.col, local_trace);
/* reduce local values to global trace */
/* root is the task with rank 0 in world and diag group */
MPI_Reduce(&local_trace, &trace, 1, MPI_INT, MPI_SUM, 0, diag_comm);
MPI_Comm_free(&diag_comm);
return(trace);
}

Peter Junglas 11.5.2000