Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Batch building&testing #3

Merged
merged 56 commits into from
Jul 10, 2016
Merged
Changes from 1 commit
Commits
Show all changes
56 commits
Select commit Hold shift + click to select a range
0af8d09
automating a few things
ocramz Jul 8, 2016
7965eca
trying out compose in travis
ocramz Jul 8, 2016
a8b8a9d
typo
ocramz Jul 8, 2016
2d4769c
typo
ocramz Jul 8, 2016
f00556f
typo
ocramz Jul 8, 2016
0accdb4
typo
ocramz Jul 8, 2016
7462c6c
typo
ocramz Jul 8, 2016
aa31067
trying out everything together
ocramz Jul 8, 2016
a7ed3c4
trying out everything together
ocramz Jul 8, 2016
1540474
trying out everything together
ocramz Jul 8, 2016
d873eab
trying out everything together
ocramz Jul 8, 2016
60d2240
trying out everything together
ocramz Jul 8, 2016
0d18ac6
trying out everything together
ocramz Jul 8, 2016
dbd1e58
trying out everything together
ocramz Jul 8, 2016
93eac5f
typo
ocramz Jul 8, 2016
607b21a
typo
ocramz Jul 8, 2016
368d57c
typo
ocramz Jul 8, 2016
b5fe7ce
typo
ocramz Jul 8, 2016
246353b
typo
ocramz Jul 8, 2016
7409059
typo
ocramz Jul 8, 2016
e1086a7
typo
ocramz Jul 8, 2016
7309665
typo
ocramz Jul 9, 2016
030d41e
typo
ocramz Jul 9, 2016
d89a42f
typo
ocramz Jul 9, 2016
350e93a
typo
ocramz Jul 9, 2016
0878595
typo
ocramz Jul 9, 2016
57bbcef
absolute python script paths
ocramz Jul 9, 2016
7592ed2
all mpi4py tests in a single py file
ocramz Jul 9, 2016
45bf7eb
all mpi4py tests in a single py file
ocramz Jul 9, 2016
a18ec49
all mpi4py tests in a single py file
ocramz Jul 9, 2016
aea6f48
all mpi4py tests in a single py file
ocramz Jul 9, 2016
89b0aae
all mpi4py tests in a single py file
ocramz Jul 9, 2016
a627ad3
all mpi4py tests in a single py file
ocramz Jul 9, 2016
a95236e
all mpi4py tests in a single py file
ocramz Jul 9, 2016
c40bb6f
all mpi4py tests in a single py file
ocramz Jul 9, 2016
886af12
fixed matvec test output
ocramz Jul 9, 2016
5d66fd3
fixed matvec test output
ocramz Jul 9, 2016
d0bdf45
fixed matvec test output
ocramz Jul 9, 2016
ef7a428
fixed matvec test output
ocramz Jul 9, 2016
585bd07
fixed matvec test output
ocramz Jul 9, 2016
a0a16bd
fixed all_tests.py
ocramz Jul 9, 2016
c9683f5
fixed all_tests.py
ocramz Jul 9, 2016
cb9d1e9
upd readme
ocramz Jul 9, 2016
5d22b76
upd readme
ocramz Jul 9, 2016
caec288
upd makefile
ocramz Jul 9, 2016
174d4f7
upd readme
ocramz Jul 9, 2016
cfa7d92
upd readme
ocramz Jul 9, 2016
0cde14f
upd readme
ocramz Jul 9, 2016
f6cee0a
upd readme
ocramz Jul 9, 2016
78165e1
upd readme
ocramz Jul 9, 2016
0ddbe43
upd readme
ocramz Jul 9, 2016
2a80e7d
upd readme
ocramz Jul 9, 2016
8ef570b
upd dockerfile
ocramz Jul 9, 2016
a64cd19
upd dockerfile, using Phusion
ocramz Jul 9, 2016
a872d59
rm run_tests.sh
ocramz Jul 9, 2016
7bfce30
openmpi-checkpoint not present for phusion/baseimage
ocramz Jul 9, 2016
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
all mpi4py tests in a single py file
  • Loading branch information
ocramz committed Jul 9, 2016
commit 89b0aae11e4a900d8916be7bc34d149183cc1d39
162 changes: 162 additions & 0 deletions mpi4py_benchmarks/all_tests.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,162 @@
"""
MVP : Demonstrating a MPI parallel Matrix-Vector Multiplication.
This code will run *iter* iterations of
v(t+1) = M * v(t)
where v is a vector of length *size* and M a dense size*size
matrix. *size* must be an integer multiple of comm.size.
v is initialized to be zero except of v[0] = 1.0
M is a "off-by-one" diagonal matrix M[i, i+1] = 1.0
In effect, after *iter* iterations, the vector v should look like
v[iter] = 1. (all others zero).
In this example every MPI process is responsible for calculating a
different portion of v. Every process only knows the stripe of M, that
is relevant for it's calculation. At the end of every iteration,
Allgather is used to distribute the partial vectors v to all other
processes.
"""

from __future__ import division

import numpy as np
# from numpy.fft import fft2, ifft2
from math import ceil, fabs
from mpi4py import MPI

#=============================================================================
# I/O Utilities

def pprint(str="", end="\n", comm=MPI.COMM_WORLD):
"""Print for MPI parallel programs: Only rank 0 prints *str*."""
if comm.rank == 0:
print str+end,

#=============================================================================
# Main

def mvp_main(size=10000, iter=2000):
# size = 10000 # length of vector v
# iter = 2000 # number of iterations to run

counter = 0

comm = MPI.COMM_WORLD

pprint("============================================================================")
pprint(" Running %d parallel MPI processes" % comm.size)

my_size = size // comm.size # Every process computes a vector of lenth *my_size*
size = comm.size*my_size # Make sure size is a integer multiple of comm.size
my_offset = comm.rank*my_size

# This is the complete vector
vec = np.zeros(size) # Every element zero...
vec[0] = 1.0 # ... besides vec[0]

# Create my (local) slice of the matrix
my_M = np.zeros((my_size, size))
for i in xrange(my_size):
j = (my_offset+i-1) % size
my_M[i,j] = 1.0


while counter < iter:
comm.Barrier() ### Start stopwatch ###
t_start = MPI.Wtime()

for t in xrange(20):
my_new_vec = np.inner(my_M, vec)

comm.Allgather(
[my_new_vec, MPI.DOUBLE],
[vec, MPI.DOUBLE]
)

comm.Barrier()
t_diff = MPI.Wtime() - t_start ### Stop stopwatch ###

# if fabs(vec[iter]-1.0) > 0.01:
# pprint("!! Error: Wrong result!")

pprint(" %d iterations of size %d in %5.2fs: %5.2f iterations per second" %
(20, size, t_diff, 20/t_diff)
)

counter += 20




def osu_latency(
BENCHMARH = "MPI Latency Test",
skip = 1000,
loop = 10000,
skip_large = 10,
loop_large = 100,
large_message_size = 8192,
MAX_MSG_SIZE = 1<<22,
):

comm = MPI.COMM_WORLD
myid = comm.Get_rank()
numprocs = comm.Get_size()

if numprocs != 2:
if myid == 0:
errmsg = "This test requires exactly two processes"
else:
errmsg = None
raise SystemExit(errmsg)

s_buf = allocate(MAX_MSG_SIZE)
r_buf = allocate(MAX_MSG_SIZE)

if myid == 0:
print ('# %s' % (BENCHMARH,))
if myid == 0:
print ('# %-8s%20s' % ("Size [B]", "Latency [us]"))

message_sizes = [0] + [2**i for i in range(30)]
for size in message_sizes:
if size > MAX_MSG_SIZE:
break
if size > large_message_size:
skip = skip_large
loop = loop_large
iterations = list(range(loop+skip))
s_msg = [s_buf, size, MPI.BYTE]
r_msg = [r_buf, size, MPI.BYTE]
#
comm.Barrier()
if myid == 0:
for i in iterations:
if i == skip:
t_start = MPI.Wtime()
comm.Send(s_msg, 1, 1)
comm.Recv(r_msg, 1, 1)
t_end = MPI.Wtime()
elif myid == 1:
for i in iterations:
comm.Recv(r_msg, 0, 1)
comm.Send(s_msg, 0, 1)
#
if myid == 0:
latency = (t_end - t_start) * 1e6 / (2 * loop)
print ('%-10d%20.2f' % (size, latency))


def allocate(n):
try:
import mmap
return mmap.mmap(-1, n)
except (ImportError, EnvironmentError):
try:
from numpy import zeros
return zeros(n, 'B')
except ImportError:
from array import array
return array('B', [0]) * n


if __name__ == '__main__':
mvp_main()
osu_latency()