PBS is now running on the beowulf. Please go here for instructions on using PBS. Usage is REQUIRED . Any jobs found running without reservations will be killed without warning. Repeated offenders will be penalized.
For running interactive jobs, the script /share/getresv could be used. /share/getresv 2 will return a shell to a 2 nodes allocation.
Although we now have all 8 compute nodes available, giving us 16 processors, please refrain from using -np 16 unless you really need the 16 processors. Usually an -np 4 is good enough.
#include "mpi.h"
#include "unistd.h"
#include "stdio.h"
int main(int argc, char **argv) {
size_t len=256;
char *hostname = new char[len];
int size,rank;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
gethostname(hostname, len);
printf("Hi, I am %d of %d and my hostname is %s\n", rank, size, hostname);
MPI_Finalize();
}
#include <math.h>
#include <stdio.h>
#define N 16384
#define M 10
double dotproduct(int, double *);
double dotproduct(int i, double *x) {
double temp=0.0, denom;
int j;
for (j=0; j<N; j++) {
// zero based!!
denom = (i+j)*(i+j+1)/2 + i+1;
temp = temp + x[j]*(1/denom);
}
return temp;
}
int main() {
double *x = new double[N];
double *y = new double[N];
double eig = sqrt(N);
double denom,temp;
int i,j,k;
for (i=0; i<N; i++) {
x[i] = 1/eig;
}
for (k=0;k<M;k++) {
y[i]=0;
// compute y = Ax
#pragma omp parallel for shared(y)
for (i=0; i<N; i++) {
y[i] = dotproduct(i,x);
}
// find largest eigenvalue of y
eig = 0;
for (i=0; i<N; i++) {
eig = eig + y[i]*y[i];
}
eig = sqrt(eig);
printf("The largest eigenvalue after %2d iteration is %16.15e\n",k+1, eig);
// normalize
for (i=0; i<N; i++) {
x[i] = y[i]/eig;
}
}
}