MPI兑现fft的迭代算法 源于并行计算——结构。算法。编程中伪码 更新1
MPI实现fft的迭代算法 源于并行计算——结构。算法。编程中伪码 更新1
对于节点内部传送不通过mpi
#include "mpi.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include <sys/stat.h> #include <memory.h> typedef struct { double real; double img; } com; double PI; int readBinary(char* file,void *buf,int fsize);//if named read causes override and miscall int writeBinary(char *file,com *array,int size); //don't use the omega array,not every node needs a whole copy of omega,not efficient static inline void cp(com f,com *t);//copy complex static inline void add(com a,com b,com* r); static inline void mul(com a,com b,com* r); static inline void sub(com a,com b,com* r); int br(int src,int size);//bit reverse void send(com *c,com *r,int s,int t); void show(com a) { printf("%.4f %.4f \n",a.real,a.img); } int main(int argc,char *argv[]) { if(argc<3) { printf("wtf\n"); return 1; } double st,et; PI=atan(1)*4; int self,size;//process id and total number of processes MPI_Init(&argc,&argv); st=MPI_Wtime(); MPI_Comm_rank(MPI_COMM_WORLD,&self); MPI_Comm_size(MPI_COMM_WORLD,&size); int fsize,n; void *buf; com *in; if(0==self) { //printf("start \n"); struct stat fstat; stat(argv[1],&fstat); fsize=fstat.st_size; buf=malloc(fsize); n=readBinary(argv[1],buf,fsize)/2;//n stands for total complex number in=(com*)malloc(n*sizeof(com)); memcpy(in,((int*)buf+1),n*sizeof(com)); free(buf); } MPI_Bcast(&n,1,MPI_INT,0,MPI_COMM_WORLD);//every thread should know the total size if(-1==n) { printf("error reading \n"); MPI_Finalize(); } int psize=n/size; //mpi communication obj MPI_Request isReq[psize]; MPI_Request reReq[psize]; MPI_Status s[psize]; //data //com a[psize];//input com w;//omega com r[psize];//recieve //com *r=(com*)malloc(psize*sizeof(com)); int l=log(n)/log(2); com c[psize];//temp //com *c=(com*)malloc(psize*sizeof(com)); int off=self*psize; for(int h=l-1;h>=0;--h) { //initialize data each round if(0==self) printf("test scatter "); MPI_Scatter(in,psize*2,MPI_DOUBLE,c,psize*2,MPI_DOUBLE,0,MPI_COMM_WORLD); if(0==self) printf("done \n"); //memcpy(c,a,psize*sizeof(com)); //calculate int p=pow(2,h); int q=n/p; int k; for(k=off;k<off+psize;++k) {//post all comunications first if(k%p==k%(2*p)) {//tag is always the sending row number //printf("self %d row %d dest row %d dest %d \n",self,k+self,k+p,(k+p)/psize); if(k+p<(self+1)*psize) {//same node r[k-off]=c[k+p-off]; //if(0==self) printf("inside up s\n"); } else { MPI_Issend(&c[k-off],2,MPI_DOUBLE,(k+p)/psize,k,MPI_COMM_WORLD,&isReq[k-off]); MPI_Irecv(&r[k-off],2,MPI_DOUBLE,(k+p)/psize,k+p,MPI_COMM_WORLD,&reReq[k-off]); } } else { if(k-p>=off) {//same node r[k-off]=c[k-p-off]; //if(0==self) printf("inside down s\n"); } else { MPI_Issend(&c[k-off],2,MPI_DOUBLE,(k-p)/psize,k,MPI_COMM_WORLD,&isReq[k-off]); MPI_Irecv(&r[k-off],2,MPI_DOUBLE,(k-p)/psize,k-p,MPI_COMM_WORLD,&reReq[k-off]); } } } for(k=off;k<off+psize;++k) {//wait and calculate if(k%p==k%(2*p)) { int time=p*(br(k,l)%q);//compute while recieving and sending w.real=cos(2*PI*time/n); w.img=sin(2*PI*time/n); if(k+p<(self+1)*psize) {//not same node //if(0==self) printf("inside up w\n"); } else { MPI_Wait(&reReq[k-off],&s[k-off]); MPI_Wait(&isReq[k-off],&s[k-off]); } mul(r[k-off],w,&r[k-off]); add(c[k-off],r[k-off],&c[k-off]); } else { int time=p*(br(k-p,l)%q);//compute while recieving and sending w.real=cos(2*PI*time/n); w.img=sin(2*PI*time/n); if(k-p>=off) {//not same node //if(0==self) printf("inside down w\n"); } else { MPI_Wait(&reReq[k-off],&s[k-off]); MPI_Wait(&isReq[k-off],&s[k-off]); } mul(c[k-off],w,&c[k-off]);//can't modify until sending comes to an end sub(r[k-off],c[k-off],&c[k-off]); } } if(0==self) { printf("loop %d \n",h); } MPI_Gather(c,2*psize,MPI_DOUBLE,in,2*psize,MPI_DOUBLE,0,MPI_COMM_WORLD); if(0==self) { printf("gather done\n"); } } MPI_Scatter(in,psize*2,MPI_DOUBLE,c,psize*2,MPI_DOUBLE,0,MPI_COMM_WORLD); //MPI_Abort(MPI_COMM_WORLD,1); //reverse all data int rs=0; for(int k=off;k<off+psize;++k) {//post all comunications first //tag is always the sending row number int brv=br(k,l); if(brv>=off&&brv<off+psize) { r[k-off]=c[brv-off]; } else { MPI_Issend(&c[k-off],2,MPI_DOUBLE,brv/psize,k,MPI_COMM_WORLD,&isReq[rs]); MPI_Irecv(&r[k-off],2,MPI_DOUBLE,brv/psize,brv,MPI_COMM_WORLD,&reReq[rs++]); } } MPI_Waitall(rs,isReq,s); MPI_Waitall(rs,reReq,s); MPI_Gather(r,psize*2,MPI_DOUBLE,in,psize*2,MPI_DOUBLE,0,MPI_COMM_WORLD); if(0==self) { // for(int i=0;i<n;++i) { // printf("b%d %d :%.4f %.4f \n",i,br(i,l),in[i].real,in[i].img); // } writeBinary(argv[2],in,n); free(in); } //free(r); //free(c); et=MPI_Wtime(); MPI_Finalize(); printf("%f \n",et-st); return 0; } int readBinary(char* file,void *buf,int fsize) { FILE *in; if(!(in=fopen(file,"r"))) { printf("can't open \n"); return -1; } fread(buf,sizeof(char),fsize,in); int size=((int*)buf)[0]; fclose(in); return size; } int writeBinary(char *file,com *array,int size) { FILE *out; if(!(out=fopen(file,"w"))) { printf("can't open \n"); return -1; } int bsize=sizeof(int)+size*sizeof(com); void *buf=malloc(bsize); ((int*)buf)[0]=2*size; memcpy(((int*)buf+1),array,size*sizeof(com)); fwrite(buf,sizeof(char),bsize,out); free(buf); fclose(out); return 0; } void cp(com f,com *t) { t->real=f.real; t->img=f.img; } void add(com a,com b,com *c) { c->real=a.real+b.real; c->img=a.img+b.img; } void mul(com a,com b,com *c) { c->real=a.real*b.real-a.img*b.img; c->img=a.real*b.img+a.img*b.real; } void sub(com a,com b,com *c) { c->real=a.real-b.real; c->img=a.img-b.img; } int br(int src,int size) { int tmp=src; int des=0; for(int i=size-1;i>=0;--i) { des=((tmp&1)<<i)|des; tmp=tmp>>1; } return des; }