Commit 372cc968 authored by Roman Yurchak's avatar Roman Yurchak

Autopep8 on all benchmarks

parent ffe51346
......@@ -118,7 +118,8 @@ test: all
lint:
flake8 src test tools pyodide_build benchmark/benchmark.py benchmark/plot_benchmark.py
flake8 src test tools pyodide_build benchmark/benchmark.py benchmark/plot_benchmark.py \
benchmark/benchmarks/
clang-format -output-replacements-xml src/*.c src/*.h src/*.js | (! grep '<replacement ')
......
......@@ -66,7 +66,7 @@ def parse_numpy_benchmark(filename):
lines = []
with open(filename) as fp:
for line in fp:
m = re.match('^#(setup|run): (.*)$', line)
m = re.match('^#\s*(setup|run): (.*)$', line)
if m:
line = '{} = {!r}\n'.format(m.group(1), m.group(2))
lines.append(line)
......
From https://github.com/serge-sans-paille/numpy-benchmarks
From https: // github.com / serge - sans - paille / numpy - benchmarks
#setup: import numpy as np ; N = 50 ; X, Y = np.random.randn(100,N), np.random.randn(40,N)
#run: allpairs_distances(X, Y)
# setup: import numpy as np ; N = 50 ; X, Y = np.random.randn(100,N), np.random.randn(40,N) # noqa
# run: allpairs_distances(X, Y)
#pythran export allpairs_distances(float64[][], float64[][])
# pythran export allpairs_distances(float64[][], float64[][])
import numpy as np
......@@ -11,4 +11,4 @@ def allpairs_distances(A, B):
"""
A2 = np.einsum('ij,ij->i', A, A)
B2 = np.einsum('ij,ij->i', B, B)
return A2[:, None] + B2[None, :] - 2*np.dot(A, B.T)
return A2[:, None] + B2[None, :] - 2 * np.dot(A, B.T)
#setup: import numpy as np ; N = 50 ; X, Y = np.random.randn(30,N), np.random.randn(20,N)
#run: allpairs_distances_loops(X, Y)
# setup: import numpy as np ; N = 50 ; X, Y = np.random.randn(30,N), np.random.randn(20,N) # noqa
# run: allpairs_distances_loops(X, Y)
#pythran export allpairs_distances_loops(float64[][], float64[][])
# pythran export allpairs_distances_loops(float64[][], float64[][])
import numpy as np
def allpairs_distances_loops(X,Y):
result = np.zeros( (X.shape[0], Y.shape[0]), X.dtype)
for i in range(X.shape[0]):
for j in range(Y.shape[0]):
result[i,j] = np.sum( (X[i,:] - Y[j,:]) ** 2)
return result
def allpairs_distances_loops(X, Y):
result = np.zeros((X.shape[0], Y.shape[0]), X.dtype)
for i in range(X.shape[0]):
for j in range(Y.shape[0]):
result[i, j] = np.sum((X[i, :] - Y[j, :]) ** 2)
return result
#setup: N = 5000 ; import numpy as np ; t0, p0, t1, p1 = np.random.randn(N), np.random.randn(N), np.random.randn(N), np.random.randn(N)
#run: arc_distance(t0, p0, t1, p1)
# setup: N = 5000 ; import numpy as np ; t0, p0, t1, p1 = np.random.randn(N), np.random.randn(N), np.random.randn(N), np.random.randn(N) # noqa
# run: arc_distance(t0, p0, t1, p1)
#pythran export arc_distance(float64 [], float64[], float64[], float64[])
# pythran export arc_distance(float64 [], float64[], float64[], float64[])
import numpy as np
......@@ -10,7 +10,7 @@ def arc_distance(theta_1, phi_1, theta_2, phi_2):
"""
Calculates the pairwise arc distance between all points in vector a and b.
"""
temp = (np.sin((theta_2-theta_1)/2)**2
+ np.cos(theta_1) * np.cos(theta_2) * np.sin((phi_2-phi_1)/2)**2)
distance_matrix = 2 * (np.arctan2(np.sqrt(temp), np.sqrt(1-temp)))
temp = (np.sin((theta_2 - theta_1) / 2)**2 + np.cos(theta_1)
* np.cos(theta_2) * np.sin((phi_2 - phi_1) / 2)**2)
distance_matrix = 2 * (np.arctan2(np.sqrt(temp), np.sqrt(1 - temp)))
return distance_matrix
#setup: n=100 ; import numpy as np; db = np.array(np.random.randint(2, size=(n, 4)), dtype=bool)
#run: check_mask(db)
#from: http://stackoverflow.com/questions/34500913/numba-slower-for-numpy-bitwise-and-on-boolean-arrays
# setup: n=100 ; import numpy; db = numpy.random.randint(2, size=(n, 4), dtype='bool') # noqa
# run: check_mask(db)
# from:
# http://stackoverflow.com/questions/34500913/numba-slower-for-numpy-bitwise-and-on-boolean-arrays
#pythran export check_mask(bool[][])
# pythran export check_mask(bool[][])
import numpy as np
def check_mask(db, mask=[1, 0, 1]):
out = np.zeros(db.shape[0],dtype=bool)
out = np.zeros(db.shape[0], dtype=bool)
for idx, line in enumerate(db):
target, vector = line[0], line[1:]
if (mask == np.bitwise_and(mask, vector)).all():
......
#from: http://stackoverflow.com/questions/13815719/creating-grid-with-numpy-performance
#pythran export create_grid(float [])
#setup: import numpy as np ; N = 800 ; x = np.arange(0,1,1./N)
#run: create_grid(x)
# http://stackoverflow.com/questions/13815719/creating-grid-with-numpy-performance
# pythran export create_grid(float [])
# setup: import numpy as np ; N = 800 ; x = np.arange(0,1,1./N)
# run: create_grid(x)
import numpy as np
def create_grid(x):
N = x.shape[0]
z = np.zeros((N, N, 3))
z[:,:,0] = x.reshape(-1,1)
z[:,:,1] = x
fast_grid = z.reshape(N*N, 3)
z[:, :, 0] = x.reshape(-1, 1)
z[:, :, 1] = x
fast_grid = z.reshape(N * N, 3)
return fast_grid
#from: http://stackoverflow.com/questions/20799403/improving-performance-of-cronbach-alpha-code-python-numpy
#pythran export cronbach(float [][])
#setup: import numpy as np ; N = 600 ; items = np.random.rand(N,N)
#run: cronbach(items)
# http://stackoverflow.com/questions/20799403/improving-performance-of-cronbach-alpha-code-python-numpy
# pythran export cronbach(float [][])
# setup: import numpy as np ; N = 600 ; items = np.random.rand(N,N)
# run: cronbach(items)
def cronbach(itemscores):
itemvars = itemscores.var(axis=1, ddof=1)
tscores = itemscores.sum(axis=0)
nitems = len(itemscores)
return nitems / (nitems-1) * (1 - itemvars.sum() / tscores.var(ddof=1))
return nitems / (nitems - 1) * (1 - itemvars.sum() / tscores.var(ddof=1))
#setup: import numpy as np;lx,ly=(2**6,2**6);u=np.zeros([lx,ly],dtype=np.double);u[lx//2,ly//2]=1000.0;tempU=np.zeros([lx,ly],dtype=np.double)
#run: diffusion(u,tempU,100)
# setup: import numpy as np;lx,ly=(2**6,2**6);u=np.zeros([lx,ly],dtype=np.double);u[lx//2,ly//2]=1000.0;tempU=np.zeros([lx,ly],dtype=np.double) # noqa
# run: diffusion(u,tempU,100)
#pythran export diffusion(float [][], float [][], int)
import numpy as np
# pythran export diffusion(float [][], float [][], int)
def diffusion(u, tempU, iterNum):
......
#setup: import numpy as np ; grid_shape = (512, 512) ; grid = np.zeros(grid_shape) ; block_low = int(grid_shape[0] * .4) ; block_high = int(grid_shape[0] * .5) ; grid[block_low:block_high, block_low:block_high] = 0.005
#run: evolve(grid, 0.1)
#from: High Performance Python by Micha Gorelick and Ian Ozsvald, http://shop.oreilly.com/product/0636920028963.do
# setup: import numpy as np ; grid_shape = (512, 512) ; grid = np.zeros(grid_shape) ; block_low = int(grid_shape[0] * .4) ; block_high = int(grid_shape[0] * .5) ; grid[block_low:block_high, block_low:block_high] = 0.005 # noqa
# run: evolve(grid, 0.1)
# from: High Performance Python by Micha Gorelick and Ian Ozsvald,
# http://shop.oreilly.com/product/0636920028963.do
#pythran export evolve(float64[][], float)
# pythran export evolve(float64[][], float)
import numpy as np
......
#from: http://stackoverflow.com/questions/19367488/converting-function-to-numbapro-cuda
#setup: N = 10 ; import numpy ; a = numpy.random.rand(N,N)
#run: fdtd(a,10)
# http://stackoverflow.com/questions/19367488/converting-function-to-numbapro-cuda
# setup: N = 10 ; import numpy ; a = numpy.random.rand(N,N)
# run: fdtd(a,10)
#pythran export fdtd(float[][], int)
# pythran export fdtd(float[][], int)
import numpy as np
def fdtd(input_grid, steps):
grid = input_grid.copy()
old_grid = np.zeros_like(input_grid)
......@@ -19,17 +20,17 @@ def fdtd(input_grid, steps):
for x in range(l_x):
for y in range(l_y):
grid[x,y] = 0.0
if 0 < x+1 < l_x:
grid[x,y] += old_grid[x+1,y]
if 0 < x-1 < l_x:
grid[x,y] += old_grid[x-1,y]
if 0 < y+1 < l_y:
grid[x,y] += old_grid[x,y+1]
if 0 < y-1 < l_y:
grid[x,y] += old_grid[x,y-1]
grid[x,y] /= 2.0
grid[x,y] -= previous_grid[x,y]
grid[x, y] = 0.0
if 0 < x + 1 < l_x:
grid[x, y] += old_grid[x + 1, y]
if 0 < x - 1 < l_x:
grid[x, y] += old_grid[x - 1, y]
if 0 < y + 1 < l_y:
grid[x, y] += old_grid[x, y + 1]
if 0 < y - 1 < l_y:
grid[x, y] += old_grid[x, y - 1]
grid[x, y] /= 2.0
grid[x, y] -= previous_grid[x, y]
return grid
#setup: N = 2**11 ; import numpy ; a = numpy.array(numpy.random.rand(N), dtype=complex)
#run: fft(a)
# setup: N = 2**11 ; import numpy ; a = numpy.array(numpy.random.rand(N), dtype=complex) # noqa
# run: fft(a)
#pythran export fft(complex [])
# pythran export fft(complex [])
import numpy as np
def fft(x):
return np.fft(x)
#from http://stackoverflow.com/questions/26823312/numba-or-cython-acceleration-in-reaction-diffusion-algorithm
#setup: pass
#run: grayscott(40, 0.16, 0.08, 0.04, 0.06)
# http://stackoverflow.com/questions/26823312/numba-or-cython-acceleration-in-reaction-diffusion-algorithm
# setup: pass
# run: grayscott(40, 0.16, 0.08, 0.04, 0.06)
#pythran export grayscott(int, float, float, float, float)
# pythran export grayscott(int, float, float, float, float)
import numpy as np
def grayscott(counts, Du, Dv, F, k):
n = 100
U = np.zeros((n+2,n+2), dtype=np.float32)
V = np.zeros((n+2,n+2), dtype=np.float32)
u, v = U[1:-1,1:-1], V[1:-1,1:-1]
U = np.zeros((n + 2, n + 2), dtype=np.float32)
V = np.zeros((n + 2, n + 2), dtype=np.float32)
u, v = U[1:-1, 1:-1], V[1:-1, 1:-1]
r = 20
u[:] = 1.0
U[n//2-r:n//2+r,n//2-r:n//2+r] = 0.50
V[n//2-r:n//2+r,n//2-r:n//2+r] = 0.25
u += 0.15*np.random.random((n,n))
v += 0.15*np.random.random((n,n))
U[n // 2 - r:n // 2 + r, n // 2 - r:n // 2 + r] = 0.50
V[n // 2 - r:n // 2 + r, n // 2 - r:n // 2 + r] = 0.25
u += 0.15 * np.random.random((n, n))
v += 0.15 * np.random.random((n, n))
for i in range(counts):
Lu = ( U[0:-2,1:-1] +
U[1:-1,0:-2] - 4*U[1:-1,1:-1] + U[1:-1,2:] +
U[2: ,1:-1] )
Lv = ( V[0:-2,1:-1] +
V[1:-1,0:-2] - 4*V[1:-1,1:-1] + V[1:-1,2:] +
V[2: ,1:-1] )
uvv = u*v*v
u += Du*Lu - uvv + F*(1 - u)
v += Dv*Lv + uvv - (F + k)*v
Lu = (U[0:-2, 1:-1] +
U[1:-1, 0:-2] - 4 * U[1:-1, 1:-1] + U[1:-1, 2:] +
U[2:, 1:-1])
Lv = (V[0:-2, 1:-1] +
V[1:-1, 0:-2] - 4 * V[1:-1, 1:-1] + V[1:-1, 2:] +
V[2:, 1:-1])
uvv = u * v * v
u += Du * Lu - uvv + F * (1 - u)
v += Dv * Lv + uvv - (F + k) * v
return V
#from: http://stackoverflow.com/questions/4651683/numpy-grouping-using-itertools-groupby-performance
#setup: import numpy as np ; N = 350000 ; values = np.array(np.random.randint(0,3298,size=N),dtype='u4') ; values.sort()
#run: grouping(values)
# http://stackoverflow.com/questions/4651683/numpy-grouping-using-itertools-groupby-performance
# setup: import numpy as np ; N = 350000 ; values = np.array(np.random.randint(0,3298,size=N),dtype='u4') ; values.sort() # noqa
# run: grouping(values)
# pythran export grouping(uint32 [])
#pythran export grouping(uint32 [])
def grouping(values):
import numpy as np
......
#from: http://continuum.io/blog/numba_performance
#setup: N = 10 ; import numpy as np ; image = np.random.rand(N, N, 3) ; state = np.zeros((N, N, 2)) ; state_next = np.empty_like(state) ; state[0, 0, 0] = state[0, 0, 1] = 1
#run: growcut(image, state, state_next, 2)
# from: http://continuum.io/blog/numba_performance
# setup: N = 10 ; import numpy as np ; image = np.random.rand(N, N, 3) ; state = np.zeros((N, N, 2)) ; state_next = np.empty_like(state) ; state[0, 0, 0] = state[0, 0, 1] = 1 # noqa
# run: growcut(image, state, state_next, 2)
#pythran export growcut(float[][][], float[][][], float[][][], int)
# pythran export growcut(float[][][], float[][][], float[][][], int)
import math
import numpy as np
def window_floor(idx, radius):
if radius > idx:
return 0
......@@ -18,6 +19,7 @@ def window_ceil(idx, ceil, radius):
else:
return idx + radius
def growcut(image, state, state_next, window_radius):
changes = 0
sqrt_3 = math.sqrt(3.0)
......@@ -32,16 +34,16 @@ def growcut(image, state, state_next, window_radius):
defense_strength = state[i, j, 1]
for jj in range(window_floor(j, window_radius),
window_ceil(j+1, width, window_radius)):
window_ceil(j + 1, width, window_radius)):
for ii in range(window_floor(i, window_radius),
window_ceil(i+1, height, window_radius)):
window_ceil(i + 1, height, window_radius)):
if (ii != i and jj != j):
d = image[i, j, 0] - image[ii, jj, 0]
s = d * d
for k in range(1, 3):
d = image[i, j, k] - image[ii, jj, k]
s += d * d
gval = 1.0 - math.sqrt(s)/sqrt_3
gval = 1.0 - math.sqrt(s) / sqrt_3
attack_strength = gval * state[ii, jj, 1]
......
#from: parakeet testbed
#setup: import numpy as np ; M, N = 512, 512 ; I = np.random.randn(M,N)
#run: harris(I)
#pythran export harris(float64[][])
import numpy as np
# from: parakeet testbed
# setup: import numpy as np ; M, N = 512, 512 ; I = np.random.randn(M,N)
# run: harris(I)
# pythran export harris(float64[][])
def harris(I):
m,n = I.shape
dx = (I[1:, :] - I[:m-1, :])[:, 1:]
dy = (I[:, 1:] - I[:, :n-1])[1:, :]
m, n = I.shape
dx = (I[1:, :] - I[:m - 1, :])[:, 1:]
dy = (I[:, 1:] - I[:, :n - 1])[1:, :]
#
# At each point we build a matrix
# of derivative products
# M =
# | A = dx^2 C = dx * dy |
# | C = dy * dx B = dy * dy |
#
# and the score at that point is:
# det(M) - k*trace(M)^2
#
A = dx * dx
B = dy * dy
C = dx * dy
tr = A + B
det = A * B - C * C
k = 0.05
return det - k * tr * tr
#
# At each point we build a matrix
# of derivative products
# M =
# | A = dx^2 C = dx * dy |
# | C = dy * dx B = dy * dy |
#
# and the score at that point is:
# det(M) - k*trace(M)^2
#
A = dx * dx
B = dy * dy
C = dx * dy
tr = A + B
det = A * B - C * C
k = 0.05
return det - k * tr * tr
#from: http://wiki.scipy.org/Cookbook/Theoretical_Ecology/Hastings_and_Powell
#setup: import numpy as np ; y = np.random.rand(3) ; args = np.random.rand(7)
#run: hasting(y, *args)
# from: http://wiki.scipy.org/Cookbook/Theoretical_Ecology/Hastings_and_Powell
# setup: import numpy as np ; y = np.random.rand(3) ; args = np.random.rand(7)
# run: hasting(y, *args)
#pythran export hasting(float [], float, float, float, float, float, float, float)
# pythran export hasting(float [], float, float, float, float, float,
# float, float)
import numpy as np
def hasting(y, t, a1, a2, b1, b2, d1, d2):
yprime = np.empty((3,))
yprime[0] = y[0] * (1. - y[0]) - a1*y[0]*y[1]/(1. + b1 * y[0])
yprime[1] = a1*y[0]*y[1] / (1. + b1 * y[0]) - a2 * y[1]*y[2] / (1. + b2 * y[1]) - d1 * y[1]
yprime[2] = a2*y[1]*y[2]/(1. + b2*y[1]) - d2*y[2]
yprime[0] = y[0] * (1. - y[0]) - a1 * y[0] * y[1] / (1. + b1 * y[0])
yprime[1] = a1 * y[0] * y[1] / (1. + b1 * y[0]) - \
a2 * y[1] * y[2] / (1. + b2 * y[1]) - d1 * y[1]
yprime[2] = a2 * y[1] * y[2] / (1. + b2 * y[1]) - d2 * y[2]
return yprime
#setup: import numpy ; a = numpy.array([ [i/10., i/10., i/20.] for i in range(44440)], dtype=numpy.double)
#run: hyantes(0, 0, 90, 90, 1, 100, 80, 80, a)
# setup: import numpy ; a = numpy.array([ [i/10., i/10., i/20.] for i in range(44440)], dtype=numpy.double) # noqa
# run: hyantes(0, 0, 90, 90, 1, 100, 80, 80, a)
#pythran export hyantes(float, float, float, float, float, float, int, int, float[][])
# pythran export hyantes(float, float, float, float, float, float, int,
# int, float[][])
import numpy as np
def hyantes(xmin, ymin, xmax, ymax, step, range_, range_x, range_y, t):
X,Y = t.shape
pt = np.zeros((X,Y))
X, Y = t.shape
pt = np.zeros((X, Y))
for i in range(X):
for j in range(Y):
for k in t:
tmp = 6368.* np.arccos( np.cos(xmin+step*i)*np.cos( k[0] ) * np.cos((ymin+step*j)-k[1])+ np.sin(xmin+step*i)*np.sin(k[0]))
tmp = (6368. * np.arccos(np.cos(xmin + step * i)
* np.cos(k[0]) * np.cos((ymin + step * j) - k[1])
+ np.sin(xmin + step * i) * np.sin(k[0])))
if tmp < range_:
pt[i,j]+=k[2] / (1+tmp)
pt[i, j] += k[2] / (1 + tmp)
return pt
#setup: N=10
#run: julia(1., 1., N, 1.5, 10., 1e4)
# setup: N=10
# run: julia(1., 1., N, 1.5, 10., 1e4)
#pythran export julia(float, float, int, float, float, float)
# pythran export julia(float, float, int, float, float, float)
import numpy as np
from time import time
def kernel(zr, zi, cr, ci, lim, cutoff):
''' Computes the number of iterations `n` such that
|z_n| > `lim`, where `z_n = z_{n-1}**2 + c`.
'''
count = 0
while ((zr*zr + zi*zi) < (lim*lim)) and count < cutoff:
while ((zr * zr + zi * zi) < (lim * lim)) and count < cutoff:
zr, zi = zr * zr - zi * zi + cr, 2 * zr * zi + ci
count += 1
return count
def julia(cr, ci, N, bound=1.5, lim=1000., cutoff=1e6):
''' Pure Python calculation of the Julia set for a given `c`. No NumPy
array operations are used.
......@@ -23,5 +24,5 @@ def julia(cr, ci, N, bound=1.5, lim=1000., cutoff=1e6):
grid_x = np.linspace(-bound, bound, N)
for i, x in enumerate(grid_x):
for j, y in enumerate(grid_x):
julia[i,j] = kernel(x, y, cr, ci, lim, cutoff)
julia[i, j] = kernel(x, y, cr, ci, lim, cutoff)
return julia
#from: http://stackoverflow.com/questions/7741878/how-to-apply-numpy-linalg-norm-to-each-row-of-a-matrix/7741976#7741976
#setup: import numpy as np ; N = 1000; x = np.random.rand(N,N)
#run: l2norm(x)
# http://stackoverflow.com/questions/7741878/how-to-apply-numpy-linalg-norm-to-each-row-of-a-matrix/7741976#7741976
# setup: import numpy as np ; N = 1000; x = np.random.rand(N,N)
# run: l2norm(x)
#pythran export l2norm(float64[][])
# pythran export l2norm(float64[][])
import numpy as np
......
#from: https://github.com/iskandr/parakeet/blob/master/benchmarks/nd_local_maxima.py
#setup: import numpy as np ; shape = (3,2,3,2) ; x = np.arange(36, dtype=np.float64).reshape(*shape)
#run: local_maxima(x)
# https://github.com/iskandr/parakeet/blob/master/benchmarks/nd_local_maxima.py
# setup: import numpy as np ; shape = (3,2,3,2) ; x = np.arange(36, dtype=np.float64).reshape(*shape) # noqa
# run: local_maxima(x)
#pythran export local_maxima(float [][][][])
# pythran export local_maxima(float [][][][])
import numpy as np
......@@ -11,12 +11,12 @@ def wrap(pos, offset, bound):
def clamp(pos, offset, bound):
return min(bound-1, max(0, pos+offset))
return min(bound - 1, max(0, pos + offset))
def reflect(pos, offset, bound):
idx = pos+offset
return min(2*(bound-1)-idx, max(idx, -idx))
idx = pos + offset
return min(2 * (bound - 1) - idx, max(idx, -idx))
def local_maxima(data, mode=wrap):
......@@ -25,7 +25,7 @@ def local_maxima(data, mode=wrap):
for pos in np.ndindex(data.shape):
myval = data[pos]
for offset in np.ndindex(wsize):
neighbor_idx = tuple(mode(p, o-w//2, w)
neighbor_idx = tuple(mode(p, o - w // 2, w)
for (p, o, w) in zip(pos, offset, wsize))
result[pos] &= (data[neighbor_idx] <= myval)
return result
#setup: import numpy as np ; N = 100000 ; a = np.random.random(N); b = 0.1; c =1.1
#run: log_likelihood(a, b, c)
#from: http://arogozhnikov.github.io/2015/09/08/SpeedBenchmarks.html
# setup: import numpy as np ; N = 100000 ; a = np.random.random(N); b = 0.1; c =1.1 # noqa
# run: log_likelihood(a, b, c)
# from: http://arogozhnikov.github.io/2015/09/08/SpeedBenchmarks.html
import numpy
#pythran export log_likelihood(float64[], float64, float64)
# pythran export log_likelihood(float64[], float64, float64)
def log_likelihood(data, mean, sigma):
s = (data - mean) ** 2 / (2 * (sigma ** 2))
......
#setup: import numpy as np ; N = 500000 ; X, Y = np.random.rand(N), np.random.rand(N)
#run: lstsqr(X, Y)
#from: http://nbviewer.ipython.org/github/rasbt/One-Python-benchmark-per-day/blob/master/ipython_nbs/day10_fortran_lstsqr.ipynb
# setup: import numpy as np ; N = 500000 ; X, Y = np.random.rand(N), np.random.rand(N) # noqa
# run: lstsqr(X, Y)
# from:
# http://nbviewer.ipython.org/github/rasbt/One-Python-benchmark-per-day/blob/master/ipython_nbs/day10_fortran_lstsqr.ipynb
#pythran export lstsqr(float[], float[])
# pythran export lstsqr(float[], float[])
import numpy as np
......@@ -11,9 +12,8 @@ def lstsqr(x, y):
x_avg = np.average(x)
y_avg = np.average(y)
dx = x - x_avg
dy = y - y_avg
var_x = np.sum(dx**2)
cov_xy = np.sum(dx * (y - y_avg))
slope = cov_xy / var_x
y_interc = y_avg - slope*x_avg
y_interc = y_avg - slope * x_avg
return (slope, y_interc)
#setup: import numpy as np; image = np.zeros((64, 32), dtype = np.uint8)
#run: mandel(-2.0, 1.0, -1.0, 1.0, image, 20)
# setup: import numpy as np; image = np.zeros((64, 32), dtype = np.uint8)
# run: mandel(-2.0, 1.0, -1.0, 1.0, image, 20)
#pythran export mandel(float, float, float, float, uint8[][], int)
# pythran export mandel(float, float, float, float, uint8[][], int)
def kernel(x, y, max_iters):
......@@ -13,8 +13,8 @@ def kernel(x, y, max_iters):
c = complex(x, y)
z = 0.0j
for i in range(max_iters):
z = z*z + c
if (z.real*z.real + z.imag*z.imag) >= 4:
z = z * z + c
if (z.real * z.real + z.imag * z.imag) >= 4:
return i
return max_iters
......
#from http://stackoverflow.com/questions/77999777799977/numpy-vs-cython-speed
#pythran export multiple_sum(float[][])
#setup: import numpy as np ; r = np.random.rand(100,100)
#run: multiple_sum(r)
# from http://stackoverflow.com/questions/77999777799977/numpy-vs-cython-speed
# pythran export multiple_sum(float[][])
# setup: import numpy as np ; r = np.random.rand(100,100)
# run: multiple_sum(r)
import numpy as np
......
#from: http://jakevdp.github.com/blog/2012/08/24/numba-vs-cython/
#setup: import numpy as np ; X = np.linspace(0,10,200).reshape(20,10)
#run: pairwise_loop(X)
# from: http://jakevdp.github.com/blog/2012/08/24/numba-vs-cython/
# setup: import numpy as np ; X = np.linspace(0,10,200).reshape(20,10)
# run: pairwise_loop(X)
#pythran export pairwise_loop(float [][])
# pythran export pairwise_loop(float [][])
import numpy as np
def pairwise_loop(X):
M, N = X.shape
D = np.empty((M,M))
D = np.empty((M, M))
for i in range(M):
for j in range(M):
d = 0.0
for k in range(N):
tmp = X[i,k] - X[j,k]
tmp = X[i, k] - X[j, k]
d += tmp * tmp
D[i,j] = np.sqrt(d)
D[i, j] = np.sqrt(d)
return D
#setup: import numpy as np ; N = 20 ; x = y = z = np.arange(0., N, 0.1) ; L = 4 ; periodic = True
#run: periodic_dist(x, x, x, L,periodic, periodic, periodic)
# setup: import numpy as np ; N = 20 ; x = y = z = np.arange(0., N, 0.1) ; L = 4 ; periodic = True # noqa
# run: periodic_dist(x, x, x, L,periodic, periodic, periodic)
# pythran export periodic_dist(float [], float[], float[], int, bool,
# bool, bool)
import numpy as np
#pythran export periodic_dist(float [], float[], float[], int, bool, bool, bool)
import numpy as np
def periodic_dist(x, y, z, L, periodicX, periodicY, periodicZ):
" ""Computes distances between all particles and places the result in a matrix such that the ij th matrix entry corresponds to the distance between particle i and j"" "
"""Computes distances between all particles and places the result
in a matrix such that the ij th matrix entry corresponds to the
distance between particle i and j"""
N = len(x)
xtemp = np.tile(x,(N,1))
xtemp = np.tile(x, (N, 1))
dx = xtemp - xtemp.T
ytemp = np.tile(y,(N,1))
ytemp = np.tile(y, (N, 1))
dy = ytemp - ytemp.T
ztemp = np.tile(z,(N,1))
ztemp = np.tile(z, (N, 1))
dz = ztemp - ztemp.T
# Particles 'feel' each other across the periodic boundaries
if periodicX:
dx[dx>L/2]=dx[dx > L/2]-L
dx[dx<-L/2]=dx[dx < -L/2]+L
dx[dx > L / 2] = dx[dx > L / 2] - L
dx[dx < -L / 2] = dx[dx < -L / 2] + L
if periodicY:
dy[dy>L/2]=dy[dy>L/2]-L
dy[dy<-L/2]=dy[dy<-L/2]+L
dy[dy > L / 2] = dy[dy > L / 2] - L
dy[dy < -L / 2] = dy[dy < -L / 2] + L
if periodicZ:
dz[dz>L/2]=dz[dz>L/2]-L
dz[dz<-L/2]=dz[dz<-L/2]+L
dz[dz > L / 2] = dz[dz > L / 2] - L
dz[dz < -L / 2] = dz[dz < -L / 2] + L
# Total Distances
d = np.sqrt(dx**2+dy**2+dz**2)
d = np.sqrt(dx**2 + dy**2 + dz**2)
# Mark zero entries with negative 1 to avoid divergences
d[d==0] = -1
d[d == 0] = -1
return d, dx, dy, dz
#from: http://stackoverflow.com/questions/14553331/how-to-improve-numpy-performance-in-this-short-code
#pythran export repeating(float[], int)
#setup: import numpy as np ; a = np.random.rand(10000)
#run: repeating(a, 20)
# http://stackoverflow.com/questions/14553331/how-to-improve-numpy-performance-in-this-short-code
# pythran export repeating(float[], int)
# setup: import numpy as np ; a = np.random.rand(10000)
# run: repeating(a, 20)
import numpy as np
def repeating(x, nvar_y):
nvar_x = x.shape[0]
y = np.empty(nvar_x*(1+nvar_y))
y = np.empty(nvar_x * (1 + nvar_y))
y[0:nvar_x] = x[0:nvar_x]
y[nvar_x:] = np.repeat(x,nvar_y)
y[nvar_x:] = np.repeat(x, nvar_y)
return y
#from: http://stackoverflow.com/questions/16541618/perform-a-reverse-cumulative-sum-on-a-numpy-array
#pythran export reverse_cumsum(float[])
#setup: import numpy as np ; r = np.random.rand(1000000)
#run: reverse_cumsum(r)
# http://stackoverflow.com/questions/16541618/perform-a-reverse-cumulative-sum-on-a-numpy-array
# pythran export reverse_cumsum(float[])
# setup: import numpy as np ; r = np.random.rand(1000000)
# run: reverse_cumsum(r)
import numpy as np
def reverse_cumsum(x):
return np.cumsum(x[::-1])[::-1]
#setup: import numpy as np; r = np.arange(1000000, dtype=float)
#run: rosen(r)
# setup: import numpy as np; r = np.arange(1000000, dtype=float)
# run: rosen(r)
import numpy as np
#pythran export rosen(float[])
# pythran export rosen(float[])
def rosen(x):
......
#from: https://groups.google.com/forum/#!topic/parakeet-python/p-flp2kdE4U
#setup: import numpy as np ;d = 10 ;re = 5 ;params = (d, re, np.ones((2*d, d+1, re)), np.ones((d, d+1, re)), np.ones((d, 2*d)), np.ones((d, 2*d)), np.ones((d+1, re, d)), np.ones((d+1, re, d)), 1)
#run: slowparts(*params)
# from: https://groups.google.com/forum/#!topic/parakeet-python/p-flp2kdE4U
# setup: import numpy as np ;d = 10 ;re = 5 ;params = (d, re, np.ones((2*d, d+1, re)), np.ones((d, d+1, re)), np.ones((d, 2*d)), np.ones((d, 2*d)), np.ones((d+1, re, d)), np.ones((d+1, re, d)), 1) # noqa
# run: slowparts(*params)
#pythran export slowparts(int, int, float [][][], float [][][], float [][], float [][], float [][][], float [][][], int)
# pythran export slowparts(int, int, float [][][], float [][][], float
# [][], float [][], float [][][], float [][][], int)
from numpy import zeros, power, tanh
def slowparts(d, re, preDz, preWz, SRW, RSW, yxV, xyU, resid):
""" computes the linear algebra intensive part of the gradients of the grae
"""
fprime = lambda x: 1 - power(tanh(x), 2)
def fprime(x): return 1 - power(tanh(x), 2)
partialDU = zeros((d+1, re, 2*d, d))
for k in range(2*d):
partialDU = zeros((d + 1, re, 2 * d, d))
for k in range(2 * d):
for i in range(d):
partialDU[:,:,k,i] = fprime(preDz[k]) * fprime(preWz[i]) * (SRW[i,k] + RSW[i,k]) * yxV[:,:,i]
partialDU[:, :, k, i] = (
fprime(preDz[k]) * fprime(preWz[i])
* (SRW[i, k] + RSW[i, k]) * yxV[:, :, i])
return partialDU
#setup: import numpy as np ; N = 1000 ; a = np.arange(0,1,N)
#run: smoothing(a, .4)
#from: http://www.parakeetpython.com/
# setup: import numpy as np ; N = 1000 ; a = np.arange(0,1,N)
# run: smoothing(a, .4)
# from: http://www.parakeetpython.com/
# pythran export smoothing(float[], float)
#pythran export smoothing(float[], float)
def smoothing(x, alpha):
"""
Exponential smoothing of a time series
For x = 10**6 floats
- Python runtime: 9 seconds
- Parakeet runtime: .01 seconds
"""
s = x.copy()
for i in range(1, len(x)):
s[i] = alpha * x[i] + (1 - alpha) * s[i-1]
return s
"""
Exponential smoothing of a time series
For x = 10**6 floats
- Python runtime: 9 seconds
- Parakeet runtime: .01 seconds
"""
s = x.copy()
for i in range(1, len(x)):
s[i] = alpha * x[i] + (1 - alpha) * s[i - 1]
return s
#from: http://stackoverflow.com/questions/2196693/improving-numpy-performance
#pythran export specialconvolve(uint32 [][])
#setup: import numpy as np ; r = np.arange(100*10000, dtype=np.uint32).reshape(1000,1000)
#run: specialconvolve(r)
# from: http://stackoverflow.com/questions/2196693/improving-numpy-performance
# pythran export specialconvolve(uint32 [][])
# setup: import numpy as np ; r = np.arange(100*10000, dtype=np.uint32).reshape(1000,1000) # noqa
# run: specialconvolve(r)
def specialconvolve(a):
# sorry, you must pad the input yourself
rowconvol = a[1:-1,:] + a[:-2,:] + a[2:,:]
colconvol = rowconvol[:,1:-1] + rowconvol[:,:-2] + rowconvol[:,2:] - 9*a[1:-1,1:-1]
rowconvol = a[1:-1, :] + a[:-2, :] + a[2:, :]
colconvol = rowconvol[:, 1:-1] + rowconvol[:, :-2] + \
rowconvol[:, 2:] - 9 * a[1:-1, 1:-1]
return colconvol
#from: http://stackoverflow.com/questions/17112550/python-and-numba-for-vectorized-functions
#setup: import numpy as np ; N = 100000 ; a, b, c = np.random.rand(N), np.random.rand(N), np.random.rand(N)
#run: vibr_energy(a, b, c)
# from: http://stackoverflow.com/questions/17112550/python-and-numba-for-vectorized-functions # noqa
# setup: import numpy as np ; N = 100000 ; a, b, c = np.random.rand(N), np.random.rand(N), np.random.rand(N) # noqa
# run: vibr_energy(a, b, c)
#pythran export vibr_energy(float64[], float64[], float64[])
# pythran export vibr_energy(float64[], float64[], float64[])
import numpy
......
#from https://github.com/sklam/numba-example-wavephysics
#setup: N=100
#run: wave(N)
# from https://github.com/sklam/numba-example-wavephysics
# setup: N=100
# run: wave(N)
import numpy as np
from math import ceil
def physics(masspoints, dt, plunk, which):
ppos = masspoints[1]
cpos = masspoints[0]
N = cpos.shape[0]
# apply hooke's law
HOOKE_K = 2100000.
DAMPING = 0.0001
MASS = .01
force = np.zeros((N, 2))
for i in range(1, N):
dx, dy = cpos[i] - cpos[i - 1]
dist = np.sqrt(dx**2 + dy**2)
assert dist != 0
fmag = -HOOKE_K * dist
cosine = dx / dist
sine = dy / dist
fvec = np.array([fmag * cosine, fmag * sine])
force[i - 1] -= fvec
force[i] += fvec
force[0] = force[-1] = 0, 0
force[which][1] += plunk
accel = force / MASS
# verlet integration
npos = (2 - DAMPING) * cpos - (1 - DAMPING) * ppos + accel * (dt**2)
masspoints[1] = cpos
masspoints[0] = npos
#pythran export wave(int)
ppos = masspoints[1]
cpos = masspoints[0]
N = cpos.shape[0]
# apply hooke's law
HOOKE_K = 2100000.
DAMPING = 0.0001
MASS = .01
force = np.zeros((N, 2))
for i in range(1, N):
dx, dy = cpos[i] - cpos[i - 1]
dist = np.sqrt(dx**2 + dy**2)
assert dist != 0
fmag = -HOOKE_K * dist
cosine = dx / dist
sine = dy / dist
fvec = np.array([fmag * cosine, fmag * sine])
force[i - 1] -= fvec
force[i] += fvec
force[0] = force[-1] = 0, 0
force[which][1] += plunk
accel = force / MASS
# verlet integration
npos = (2 - DAMPING) * cpos - (1 - DAMPING) * ppos + accel * (dt**2)
masspoints[1] = cpos
masspoints[0] = npos
# pythran export wave(int)
def wave(PARTICLE_COUNT):
SUBDIVISION = 300
FRAMERATE = 60
......@@ -50,5 +52,5 @@ def wave(PARTICLE_COUNT):
masspoints[:, :, 1] = height / 2
f = 15
plunk_pos = count // 2
physics( masspoints, 1./ (SUBDIVISION * FRAMERATE), f, plunk_pos)
physics(masspoints, 1. / (SUBDIVISION * FRAMERATE), f, plunk_pos)
return masspoints[0, count // 2]
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment