root / Pi / MPI / PiMPI.py @ 46
Historique | Voir | Annoter | Télécharger (1,9 ko)
1 | 7 | equemene | #!/usr/bin/env python
|
---|---|---|---|
2 | 7 | equemene | #
|
3 | 7 | equemene | # PiMC model using mpi4py MPI implementation for Python
|
4 | 7 | equemene | #
|
5 | 7 | equemene | # CC BY-NC-SA 2011 : <emmanuel.quemener@ens-lyon.fr>
|
6 | 7 | equemene | #
|
7 | 7 | equemene | # Thanks to Lisandro Dalcin for MPI4PY :
|
8 | 7 | equemene | # http://mpi4py.scipy.org/
|
9 | 7 | equemene | |
10 | 7 | equemene | import sys |
11 | 7 | equemene | from math import exp |
12 | 7 | equemene | from random import random |
13 | 7 | equemene | import time |
14 | 7 | equemene | # MPI librairie call
|
15 | 7 | equemene | import mpi4py |
16 | 7 | equemene | from mpi4py import MPI |
17 | 7 | equemene | |
18 | 7 | equemene | def MainLoop(iterations): |
19 | 7 | equemene | |
20 | 7 | equemene | total=0
|
21 | 7 | equemene | for i in xrange(iterations): |
22 | 7 | equemene | # Random access coordonate
|
23 | 7 | equemene | x,y=random(),random() |
24 | 7 | equemene | |
25 | 7 | equemene | if ((x*x+y*y) < 1.0): |
26 | 7 | equemene | total+=1
|
27 | 7 | equemene | |
28 | 7 | equemene | return(total)
|
29 | 7 | equemene | |
30 | 7 | equemene | if __name__=='__main__': |
31 | 7 | equemene | |
32 | 7 | equemene | # MPI Init
|
33 | 7 | equemene | comm = MPI.COMM_WORLD |
34 | 7 | equemene | rank = comm.Get_rank() |
35 | 7 | equemene | |
36 | 7 | equemene | # Define number of Nodes on with computing is performed (exclude 0)
|
37 | 7 | equemene | NODES=comm.Get_size() |
38 | 7 | equemene | |
39 | 7 | equemene | # Au dessus de 4 10^7, le MPI ne veut plus se lancer...
|
40 | 7 | equemene | Iterations=1000000000
|
41 | 7 | equemene | |
42 | 7 | equemene | # pass explicit MPI datatypes
|
43 | 7 | equemene | total=0
|
44 | 7 | equemene | if rank == 0: |
45 | 7 | equemene | # Define iterations to send to each node
|
46 | 7 | equemene | if Iterations%NODES==0: |
47 | 7 | equemene | iterations=Iterations/NODES |
48 | 7 | equemene | else:
|
49 | 7 | equemene | iterations=Iterations/NODES+1
|
50 | 7 | equemene | print "%i iterations will be send to each node" % iterations |
51 | 7 | equemene | |
52 | 7 | equemene | for i in range(1,NODES): |
53 | 7 | equemene | |
54 | 7 | equemene | print "Send from 0 to node %i" % i |
55 | 7 | equemene | ToSend=iterations |
56 | 7 | equemene | # Send MPI call to each node
|
57 | 7 | equemene | comm.send(ToSend, dest=i, tag=11)
|
58 | 7 | equemene | |
59 | 7 | equemene | # Master does part of job !
|
60 | 7 | equemene | total=MainLoop(iterations) |
61 | 7 | equemene | print "Partial Result from master %i: %i" % (rank,total) |
62 | 7 | equemene | |
63 | 7 | equemene | print "Retreive results..." |
64 | 7 | equemene | for i in range(1,NODES): |
65 | 7 | equemene | # Receive MPI call from each node
|
66 | 7 | equemene | Output=comm.recv(source=i,tag=11)
|
67 | 7 | equemene | print "Partial Result from %i: %s" % (i,Output) |
68 | 7 | equemene | total+=Output |
69 | 7 | equemene | |
70 | 7 | equemene | print "Global Result: %i" % (total) |
71 | 7 | equemene | else:
|
72 | 7 | equemene | # Slave applies simulation to set provided by master
|
73 | 7 | equemene | # Receive MPI call with Input set
|
74 | 7 | equemene | ToReceive=comm.recv(source=0, tag=11) |
75 | 7 | equemene | iterations=ToReceive |
76 | 7 | equemene | print "Rank %i receives with job with %i" % (rank,iterations) |
77 | 7 | equemene | Output=MainLoop(iterations) |
78 | 7 | equemene | |
79 | 7 | equemene | comm.send(Output, dest=0, tag=11) |