root / Pi / MPI / PiMPI.py @ 81
Historique | Voir | Annoter | Télécharger (1,9 ko)
1 |
#!/usr/bin/env python
|
---|---|
2 |
#
|
3 |
# PiMC model using mpi4py MPI implementation for Python
|
4 |
#
|
5 |
# CC BY-NC-SA 2011 : <emmanuel.quemener@ens-lyon.fr>
|
6 |
#
|
7 |
# Thanks to Lisandro Dalcin for MPI4PY :
|
8 |
# http://mpi4py.scipy.org/
|
9 |
|
10 |
import sys |
11 |
from math import exp |
12 |
from random import random |
13 |
import time |
14 |
# MPI librairie call
|
15 |
import mpi4py |
16 |
from mpi4py import MPI |
17 |
|
18 |
def MainLoop(iterations): |
19 |
|
20 |
total=0
|
21 |
for i in xrange(iterations): |
22 |
# Random access coordonate
|
23 |
x,y=random(),random() |
24 |
|
25 |
if ((x*x+y*y) < 1.0): |
26 |
total+=1
|
27 |
|
28 |
return(total)
|
29 |
|
30 |
if __name__=='__main__': |
31 |
|
32 |
# MPI Init
|
33 |
comm = MPI.COMM_WORLD |
34 |
rank = comm.Get_rank() |
35 |
|
36 |
# Define number of Nodes on with computing is performed (exclude 0)
|
37 |
NODES=comm.Get_size() |
38 |
|
39 |
# Au dessus de 4 10^7, le MPI ne veut plus se lancer...
|
40 |
Iterations=1000000000
|
41 |
|
42 |
# pass explicit MPI datatypes
|
43 |
total=0
|
44 |
if rank == 0: |
45 |
# Define iterations to send to each node
|
46 |
if Iterations%NODES==0: |
47 |
iterations=Iterations/NODES |
48 |
else:
|
49 |
iterations=Iterations/NODES+1
|
50 |
print "%i iterations will be send to each node" % iterations |
51 |
|
52 |
for i in range(1,NODES): |
53 |
|
54 |
print "Send from 0 to node %i" % i |
55 |
ToSend=iterations |
56 |
# Send MPI call to each node
|
57 |
comm.send(ToSend, dest=i, tag=11)
|
58 |
|
59 |
# Master does part of job !
|
60 |
total=MainLoop(iterations) |
61 |
print "Partial Result from master %i: %i" % (rank,total) |
62 |
|
63 |
print "Retreive results..." |
64 |
for i in range(1,NODES): |
65 |
# Receive MPI call from each node
|
66 |
Output=comm.recv(source=i,tag=11)
|
67 |
print "Partial Result from %i: %s" % (i,Output) |
68 |
total+=Output |
69 |
|
70 |
print "Global Result: %i" % (total) |
71 |
else:
|
72 |
# Slave applies simulation to set provided by master
|
73 |
# Receive MPI call with Input set
|
74 |
ToReceive=comm.recv(source=0, tag=11) |
75 |
iterations=ToReceive |
76 |
print "Rank %i receives with job with %i" % (rank,iterations) |
77 |
Output=MainLoop(iterations) |
78 |
|
79 |
comm.send(Output, dest=0, tag=11) |