zoukankan      html  css  js  c++  java
  • 云计算--MPI

    [root@localhost mpi]# mpicc -c base.c
    [root@localhost mpi]# mpicc -o base base.o
    [root@localhost mpi]# mpirun -np 4 ./base

    mpicc cos.c -o cos -lm

    一.运行实例
    1.hello world

    #include <stdio.h>
    #include "mpi.h"
    int main(int argc, char**argv){
    MPI_Init(&argc, &argv);
    printf("Hello world. ");
    MPI_Finalize();
    return 0;
    }

    2.每个进程输出自己是第几个进程

    #include <stdio.h>
    #include "mpi.h"
    int main(int argc, char **argv){
    MPI_Comm comm = MPI_COMM_WORLD;
    int size, rank;
    MPI_Init(&argc, &argv);
    MPI_Comm_size(comm, &size);
    MPI_Comm_rank(comm, &rank);
    printf("This is process %d of %d processes. ", rank, size);
    MPI_Finalize();
    return 0;
    }

    3.点对点。0号进程给1号进程发消息,1号进程接收消息

    #include<stdio.h>
    #include<mpi.h>
    #include<string.h>
    int main(int argc, char **argv){
    MPI_Comm comm = MPI_COMM_WORLD;
    MPI_Status status; int size, rank; char str[100];
    MPI_Init(&argc, &argv);
    MPI_Comm_size(comm, &size); MPI_Comm_rank(comm, &rank);
    if (rank == 0) {
    strcpy(str, "hello world");
    printf("Process 0 send 1 to process %s. ", str);
    MPI_Send(str, strlen(str) + 1, MPI_CHAR, 1, 99, comm);
    }
    else if (rank == 1) {
    MPI_Recv(str, 100, MPI_CHAR, 0, 99, comm, &status);
    printf("Process 1 receives messages %s. ", str);
    }
    MPI_Finalize();
    return 0;
    }

    4.点对点。进程之间互相接收和发送消息
    #include<stdio.h>
    #include<mpi.h>
    #include<string.h>
    int main(int argc, char **argv){
    const int limit = 10;
    int rank, count = 0;
    MPI_Status status;
    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    while (count < limit){
    if (rank == 0){
    count++;
    MPI_Send(&count, 1, MPI_INT, 1, 10, MPI_COMM_WORLD);
    printf("0 sent %d to 1 ", count);
    MPI_Recv(&count, 1, MPI_INT, 1, 20, MPI_COMM_WORLD, &status);
    printf("0 received %d from 1 ", count);
    }
    else {
    MPI_Recv(&count, 1, MPI_INT, 0, 10, MPI_COMM_WORLD, &status);
    printf("1 received %d from 0 ", count);
    count++;
    MPI_Send(&count, 1, MPI_INT, 0, 20, MPI_COMM_WORLD);
    printf("1 sent %d to 0 ", count);
    }
    }
    MPI_Finalize();
    }

    5.点对点。进程之间循环发送一个数,0->1->2->0(假设开启了两个进程)

    #include<stdio.h>
    #include<mpi.h>
    #include<string.h>
    int main(int argc, char** argv){
    int rank, size, token, source, dest;
    MPI_Init(&argc, &argv);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    source = rank == 0 ? size -1 : rank - 1;
    dest = (rank + 1) % size;
    token = 100;
    if (rank == 0){
    MPI_Ssend(&token, 1, MPI_INT, dest, 1, MPI_COMM_WORLD);
    printf("Process %d sends token to %d. ", rank, dest);
    MPI_Recv(&token, 1, MPI_INT, source, 1, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
    printf("Process %d receives token from %d. ", rank, source);
    }
    else {
    MPI_Recv(&token, 1, MPI_INT, source, 1, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
    printf("Process %d receives token from %d. ", rank, source);
    MPI_Ssend(&token, 1, MPI_INT, dest, 1, MPI_COMM_WORLD);
    printf("Process %d sends token to %d. ", rank, dest);
    }
    MPI_Finalize();
    return 0;
    }

    6.给0号进程赋值。然后把0号进程的值广播出去。每个进程接收到的值和0号进程一样

    #include<stdio.h>
    #include<mpi.h>
    #include<string.h>
    int main(int argc, char** argv){
    int arr[3], i, rank;
    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    if (rank == 0){
    for (i = 0; i < 3; i++)
    arr[i] = i + 1;
    }
    MPI_Bcast(arr, 3, MPI_INT, 0, MPI_COMM_WORLD);
    printf("Process %d receives:", rank);
    for (i = 0; i < 3; i++)
    printf("%d ", arr[i]);
    putchar(' ');
    MPI_Finalize();
    return 0;
    }

    7.0号进程收集其他所有进程的值
    #include<stdio.h>
    #include<mpi.h>
    #include<stdlib.h>
    int main(int argc, char **argv){
    int rank, size, sbuf[3], *rbuf, i;
    MPI_Init(&argc, &argv);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    for (i = 0; i < 3; i++)
    sbuf[i] = rank * 10 + i;
    if (rank == 0)
    rbuf = (int*)malloc(sizeof(int) * 3 * size);
    MPI_Gather(sbuf, 3, MPI_INT, rbuf, 3, MPI_INT, 0, MPI_COMM_WORLD);
    if (rank == 0){
    printf("Process 0 receives:");
    for (i = 0; i < size * 3; i++)
    printf("%d ", rbuf[i]);
    putchar(' ');
    }
    MPI_Finalize();
    return 0;
    }

    8.把0号进程的值发给所有进程,每个进程得到的值不一样
    #include<stdio.h>
    #include<mpi.h>
    #include<stdlib.h>
    int main(int argc, char** argv){
    int rank, size, *sbuf, rbuf[3], i;
    MPI_Init(&argc, &argv);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    if (rank == 0){
    sbuf = (int *) malloc(sizeof(int) * 3 * size);
    for (i = 0; i < size * 3; i++)
    sbuf[i] = i + 1;
    }
    MPI_Scatter(sbuf, 3, MPI_INT, rbuf, 3, MPI_INT, 0, MPI_COMM_WORLD);
    printf("Process %d receives: ", rank);
    for (i = 0; i < 3; i++)
    printf("%d ", rbuf[i]);
    putchar(' ');
    MPI_Finalize();
    return 0;
    }

    9.与gather不同,所有进程都收集到其他进程的消息
    #include<stdio.h>
    #include<mpi.h>
    #include<stdlib.h>
    int main(int argc, char **argv){
    int rank, size, sbuf[3], *rbuf, i;
    MPI_Init(&argc, &argv);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    for (i = 0; i < 3; i++)
    sbuf[i] = rank * 10 + i;
    rbuf = (int*)malloc(sizeof(int) * 3 * size);
    MPI_Allgather(sbuf, 3, MPI_INT, rbuf, 3, MPI_INT, MPI_COMM_WORLD);
    printf("Process %d receives:", rank);
    for (i = 0; i < size * 3; i++)
    printf("%d ", rbuf[i]);
    putchar(' ');
    free(rbuf);
    MPI_Finalize();
    return 0;
    }

    10.有三个任务,把

    task1 task2 task3
    0 1 2
    3 4 5
    6 7 8
    9 10 11
    转换成:
    rank0 rank1 rank2 rank3
    0 3 6 9
    1 4 7 10
    2 5 8 11
    #include<stdio.h>
    #include<mpi.h>
    #include<stdlib.h>
    int main(int argc, char** argv){
    int rank, size, *sbuf, *rbuf, i;
    MPI_Init(&argc, &argv);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    sbuf = (int*)malloc(size * 3 * sizeof(int));
    rbuf = (int*)malloc(size * 3 * sizeof(int));
    for (i = 0; i < size * 3; i++)
    sbuf[i] = rank * 10 + i;
    printf("Before exchange, process %d has ", rank);
    for (i = 0; i < size * 3; i++)
    printf("%d ", sbuf[i]);
    putchar(' ');
    MPI_Alltoall(sbuf, 3, MPI_INT, rbuf, 3, MPI_INT, MPI_COMM_WORLD);
    printf("After exchange, process %d has ", rank);
    for (i = 0; i < size * 3; i++)
    printf("%d ", rbuf[i]);
    putchar(' ');
    MPI_Finalize();
    free(sbuf);
    free(rbuf);
    return 0;
    }

    11.对每个任务(一列)求和
    #include<stdio.h>
    #include<mpi.h>
    #include<stdlib.h>
    int main(int argc, char** argv) {
    int size, rank, sbuf[3], rbuf[3], i;
    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    for (i = 0; i < 3; i++) sbuf[i] = rank * 10 + i;
    printf("Process %d has: ", rank);
    for (i = 0; i < 3; i++) printf("%d ", sbuf[i]);
    putchar(' ');
    MPI_Reduce(sbuf, rbuf, 3, MPI_INT, MPI_SUM, 0,MPI_COMM_WORLD);
    if (rank == 0) {
    printf("Total sum = ");
    for (i = 0; i < 3; i++) printf("%d ",rbuf[i]);
    putchar(' ');
    }
    MPI_Finalize();
    }

    12.对每个任务(一列)求最大值,并求出其所在进程编号,0号进程输出结果
    #include<stdio.h>
    #include<mpi.h>
    #include<stdlib.h>
    typedef struct{ int val; int rank;}DATATYPE;
    int main(int argc, char** argv){
    int size, rank, i; DATATYPE sbuf[3], rbuf[3];
    MPI_Init(&argc, &argv);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    srand(time(NULL) + rank);
    printf("Process %d has ", rank);
    for (i = 0; i < 3; i++){
    sbuf[i].val = rand() % 100;
    sbuf[i].rank = rank;
    printf("%d ", sbuf[i].val);
    }
    putchar(' ');
    MPI_Reduce(sbuf, rbuf, 3, MPI_2INT, MPI_MAXLOC, 0, MPI_COMM_WORLD);
    if (rank == 0){
    printf("max value and location are: ");
    for (i = 0; i < 3; i++) printf("value = %d, location = %d ", rbuf
    [i].val, rbuf[i].rank);
    }
    MPI_Finalize();
    return 0;
    }

    13.对每一列求最大值,每个进程都得到结果并输出
    #include<stdio.h>
    #include<mpi.h>
    #include<stdlib.h>
    int main(int argc, char** argv) {
    int size, rank, sbuf[3], rbuf[3], i;
    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    for (i = 0; i < 3; i++) sbuf[i] = rank * 10 + i;
    printf("Process %d has: ", rank);
    for (i = 0; i < 3; i++) printf("%d ", sbuf[i]);
    putchar(' ');
    MPI_Allreduce(sbuf, rbuf, 3, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
    printf("Total sum of process %d = ", rank);
    for (i = 0; i < 3; i++) printf("%d ",rbuf[i]);
    putchar(' ');
    MPI_Finalize();
    }

    14.每遍历一个进程对其与前面的进程的每一列求和
    #include<stdio.h>
    #include<mpi.h>
    #include<stdlib.h>
    int main(int argc, char** argv) {
    int size, rank, sbuf[3], rbuf[3], i;
    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    for (i = 0; i < 3; i++) sbuf[i] = rank * 10 + i;
    printf("Process %d has: ", rank);
    for (i = 0; i < 3; i++) printf("%d ", sbuf[i]); putchar(' ');
    MPI_Scan(sbuf, rbuf, 3, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
    printf("Process %d has results ", rank);
    for (i = 0; i < 3; i++) printf("%d ", rbuf[i]);
    putchar(' ');
    MPI_Finalize();
    }

    二.函数
    1.send

    MPI_Send(
    void* data,// starting address of the data to be sent
    int count,//number of elements to be sent (not bytes)
    MPI_Datatype datatype,//MPI datatype of each element
    int destination, //rank of destination process
    int tag,//message identifier (set by user)
    MPI_Comm comm) //MPI communicator of processors involved

    2.recv

    MPI_Recv(
    void* data, //starting address of buffer to store message
    int count, //number of elements to be received (not bytes)
    MPI_Datatype datatype, //MPI datatype of each element
    int source,// rank of source process
    int tag,//message identifier (set by user)
    MPI_Comm comm, //MPI communicator of processors involved
    MPI_Status* status) //structure of information about the message

    3.Broadcast把一个进程的内容发送给所有进程广播

    int MPI_Bcast(
    void* buffer,//starting address of buffer
    int count, //number of entries in buffer
    MPI_Datatype datatype,//data type of buffer
    int root, //rank of broadcast root
    MPI_Comm comm)//communicator

    4.gather0号进程收集其他进程的消息

    int MPI_Gather(
    const void* sendbuf, //starting address of send buffer
    int sendcount, //number of elements in send buffer
    MPI_Datatype sendtype,//data type of send buffer elements
    void* recvbuf, //address of receive buffer (significant only at root)
    int recvcount,//number of elements for any single receive (significant only at root)
    MPI_Datatype recvtype,//data type of recv buffer elements(significant only at root)
    int root,//rank of receiving process
    MPI_Comm comm)//communicator

    5.scatter 把0号进程的值发给所有进程,每个进程得到的值不一样

    int MPI_Scatter (
    void * sendbuf , // pointer to send buffer
    int sendcount , // items to send per process
    MPI_Datatype sendtype , // type of send buffer data
    void * recvbuf , // pointer to receive buffer
    int recvcount , // number of items to receive
    MPI_Datatype recvtype , // type of receive buffer data
    int root , // rank of sending process
    MPI_Comm comm ) // MPI communicator to use

    6.allgather与gather不同,allgather不只0号进程收集其他进程的消息,所有进程都会收集到其他进程的消息

    int MPI_Allgather (
    void * sendbuf , // pointer to send buffer
    int sendcount , // number of items to send
    MPI_Datatype sendtype , // type of send buffer data
    void * recvbuf , // pointer to receive buffer
    int recvcount , // items to receive per process
    MPI_Datatype recvtype , // type of receive buffer data
    MPI_Comm comm ) // MPI communicator to use

    7.alltoall

    int MPI_Alltoall(
    const void *sendbuf,
    int sendcount, //number of elements to send to each process
    MPI_Datatype sendtype, //data type of send buffer elements
    void *recvbuf, //address of receive buffer
    int recvcount, //number of elements received from any process
    MPI_Datatype recvtype, //data type of receive buffer elements
    MPI_Comm comm) //communicator

    8.reduce

    MPI_Reduce(
    void* send_data, //address of send buffer
    void* recv_data, //address of receive buffer
    int count,//number of elements in send buffer
    MPI_Datatype datatype, //data type of elements of send buffer
    MPI_Op op, //reduce operation MPI的操作函数PPT87页
    int root, rank of root process
    MPI_Comm communicator) //communicator

    9.Allreduce

    MPI_Allreduce(
    void* send_data, //address of send buffer
    void* recv_data,//address of receive buffer
    int count, //number of elements in send buffer
    MPI_Datatype datatype, //data type of elements of send buffer
    MPI_Op op, //reduce operation
    MPI_Comm communicator) //communicator

    10.Scan

    int MPI_Scan(
    const void* sendbuf,//address of send buffer
    void* recvbuf, //address of receive buffer
    int count, //number of elements in send buffer
    MPI_Datatype datatype, //data type of elements of send buffer
    MPI_Op op,//reduce operation
    MPI_Comm comm) //communicator

  • 相关阅读:
    CF codeforces A. New Year Garland【Educational Codeforces Round 79 (Rated for Div. 2)】
    链表,结构体,指针,CG作业,删除单链表中某区间的数
    链表,结构体,指针,CG作业. 倒数第k个元素,已知带头结点的非空单链表中存放着若干整数,请找出该链表中倒数第k个元素
    codeforces 1236 A. Bad Ugly Numbers思维题!!!
    js延迟加载优化页面响应速度
    不得不知的npm常用指令
    提示框中的三角边框
    不得不提的前端性能优化
    利用带关联子查询Update语句更新数据
    SELECT INTO 和 INSERT INTO SELECT 两种表复制语句
  • 原文地址:https://www.cnblogs.com/ximiaomiao/p/7003449.html
Copyright © 2011-2022 走看看