Commit 906462a3 authored by Kirill Smelkov's avatar Kirill Smelkov

X neotest: Move cluster / node out fro benchmark name to label in environment

This should make inter-host or inter-cluster comparisions of benchmark more straightforward.
parent 321dfcc7
......@@ -46,9 +46,9 @@ class SeriesSet(OrderedDict):
self.unit = unit
_n_re = re.compile(r'.*(-\d+)$')
_n_re = re.compile(ur'.*(·\d+)$')
# seriesof extracts "-<n>" series from benchmark B.
# seriesof extracts "·<n>" series from benchmark B.
#
# all values must have the same unit.
#
......@@ -59,9 +59,9 @@ def seriesof(B):
for name in Bn:
m = _n_re.match(name)
if m is None:
continue # no -<n>
continue # no ·<n>
name_ = name[:m.start(1)] # without -<n>
name_ = name[:m.start(1)] # without ·<n>
n = m.group(1)
n = n[1:]
n = int(n)
......@@ -83,7 +83,7 @@ def seriesof(B):
return S
# plotseries makes plot of benchmark series how they change by "-<n>"
# plotseries makes plot of benchmark series how they change by "·<n>"
#
# S should be {} name -> BenchSeries.
#
......@@ -91,7 +91,7 @@ def seriesof(B):
def plotseries(labkey, S):
plt.title("XXX ZODB server handling read requests")
# order plots (and thus their order in legend automatically) by value at "-1"
# order plots (and thus their order in legend automatically) by value at "·1"
namev = S.keys()
namev.sort(key = lambda _: S[_].series[0][1].avg, reverse=True)
......@@ -141,8 +141,8 @@ def main():
if S is None:
continue # nothing found
# XXX hack just fs1 works very fast and makes seeing other variants hard
del S['deco/fs1/zwrk.go']
# working directly with fs1 is very fast and makes seeing other variants hard.
del S['fs1-zwrk.go']
# only show !log for neo/py as this are faster
for k in S.keys():
......
......@@ -903,6 +903,8 @@ _nrunpar() {
# bench_cpu - microbenchmark CPU
bench_cpu() {
echo -ne "node:\t"; xhostname
echo "cluster:"
nrun sh -c "python -m test.pystone |tail -1 |sed -e \
\"s|^This machine benchmarks at \([0-9.]\+\) pystones/second$|Benchmark`xhostname`/pystone 1 \1 pystone/s|\""
......@@ -926,6 +928,8 @@ bench_cpu() {
# bench_disk - benchmark direct (uncached) and cached random reads
bench_disk() {
echo -ne "node:\t"; xhostname
echo "cluster:"
# ioping2bench <topic> - converts timings from ioping to std benchmark
ioping2bench() {
# min/avg/max/mdev = 102.2 us / 138.6 us / 403.3 us / 12.2 us
......@@ -943,7 +947,7 @@ Benchmark$1-avg 1 \\3 \\4/op\
for size in $sizev; do
echo -e "\n*** disk: random direct (no kernel cache) $size-read latency"
nrun ioping -D -i 0ms -s $size -S 1024M -w $benchtime -q -k . |\
ioping2bench "`xhostname`/disk/randread/direct/$size"
ioping2bench "disk/randread/direct/$size"
done
......@@ -955,7 +959,7 @@ Benchmark$1-avg 1 \\3 \\4/op\
for size in $sizev; do
echo -e "\n*** disk: random cached $size-read latency"
nrun ioping -C -i 0ms -s $size -S 1024M -w $benchtime -q -k . |\
ioping2bench "`xhostname`/disk/randread/pagecache/$size"
ioping2bench "disk/randread/pagecache/$size"
done
}
......@@ -969,6 +973,10 @@ hostof() {
bench_net() {
url=$1
peer=`hostof $url`
shortpeer=`echo $peer |sed -e 's/\./ /' |awk '{print $1}'` # name.domain -> name
echo "node:"
echo -e "cluster:\t`xhostname`-$shortpeer"
echo -e "\n*** link latency:"
......@@ -991,13 +999,13 @@ Benchmark$1-avg 1 \\2 ms/op\
echo -e "\n# `xhostname`$peer (ping ${size}B)"
{ $profile sudo -n ping -i0 -w 3 -s $size -q $peer || \
echo "# skipped -> enable ping in sudo for `whoami`@`xhostname`"; } | \
ping2bench `xhostname`-$peer/pingrtt/${size}B
ping2bench pingrtt-/${size}B
echo -e "\n# $peer`xhostname` (ping ${size}B)"
# TODO profile remotely
on $url "sudo -n ping -i0 -w3 -s ${size} -q \$(echo \${SSH_CONNECTION%% *}) || \
echo \\\"# skipped -> enable ping in sudo for \`whoami\`@\`xhostname\`\\\"" | \
ping2bench $peer-`xhostname`/pingrtt/${size}B
echo \\\"# skipped -> enable ping in sudo for \`whoami\`@${peer}\\\"" | \
ping2bench -pingrtt/${size}B
done
# TODO
......@@ -1027,25 +1035,25 @@ Benchmark$1-avg 1 \\2 ms/op\
echo -e "\n# `xhostname`$peer (lat_tcp.c ${size}B -> lat_tcp.c -s)"
# TODO profile remotely
on $url "nohup lat_tcp -s </dev/null >/dev/null 2>/dev/null &"
nrun lat_tcp -m $size $peer | lattcp2bench "`xhostname`-$peer/tcprtt(c-c)/${size}B"
nrun lat_tcp -m $size $peer | lattcp2bench "tcprtt(c_c)-/${size}B"
lat_tcp -S $peer
echo -e "\n# `xhostname`$peer (lat_tcp.c ${size}B -> lat_tcp.go -s)"
# TODO profile remotely
on $url "nohup lat_tcp_go -s </dev/null >/dev/null 2>/dev/null &"
nrun lat_tcp -m $size $peer | lattcp2bench "`xhostname`-$peer/tcprtt(c-go)/${size}B"
nrun lat_tcp -m $size $peer | lattcp2bench "tcprtt(c_go)-/${size}B"
lat_tcp -S $peer
echo -e "\n# $peer`xhostname` (lat_tcp.c ${size}B -> lat_tcp.c -s)"
lat_tcp -s
# TODO profile remotely
nrun on $url "lat_tcp -m $size \${SSH_CONNECTION%% *}" | lattcp2bench "$peer-`xhostname`/tcprtt(c-c)/${size}B"
nrun on $url "lat_tcp -m $size \${SSH_CONNECTION%% *}" | lattcp2bench "-tcprtt(c_c)/${size}B"
lat_tcp -S localhost
echo -e "\n# $peer`xhostname` (lat_tcp.c ${size}B -> lat_tcp.go -s)"
lat_tcp_go -s 2>/dev/null &
# TODO profile remotely
nrun on $url "lat_tcp -m $size \${SSH_CONNECTION%% *}" | lattcp2bench "$peer-`xhostname`/tcprtt(c-go)/${size}B"
nrun on $url "lat_tcp -m $size \${SSH_CONNECTION%% *}" | lattcp2bench "-tcprtt(c_go)/${size}B"
lat_tcp -S localhost
done
}
......@@ -1061,10 +1069,10 @@ zbench() {
zhashok=$3
# nrun time demo-zbigarray read $url
nrun tzodb.py zhash --check=$zhashok --bench=$topic/%s --$zhashfunc $url
nrun tzodb.py zhash --check=$zhashok --bench=$topic-%s --$zhashfunc $url
# XXX running processes in parallel is deprecated in favour of zwrk.
# echo -e "\n# ${Npar} clients in parallel"
# nrunpar tzodb.py zhash --check=$zhashok --bench=$topic/%s-P$Npar --$zhashfunc $url
# nrunpar tzodb.py zhash --check=$zhashok --bench=$topic-%s·P$Npar --$zhashfunc $url
echo
zbench_go $url $topic $zhashok
}
......@@ -1074,16 +1082,16 @@ zbench_go() {
url=$1
topic=$2
zhashok=$3
nrun tzodb_go -log_dir=$log zhash -check=$zhashok -bench=$topic/%s -$zhashfunc $url
nrun tzodb_go -log_dir=$log zhash -check=$zhashok -bench=$topic/%s -$zhashfunc -useprefetch $url
nrun tzodb_go -log_dir=$log zhash -check=$zhashok -bench=$topic-%s -$zhashfunc $url
nrun tzodb_go -log_dir=$log zhash -check=$zhashok -bench=$topic-%s -$zhashfunc -useprefetch $url
# XXX running processes in parallel is deprecated in favour of zwrk.
# echo -e "\n# ${Npar} clients in parallel"
# nrunpar tzodb_go -log_dir=$log zhash -check=$zhashok -bench=$topic/%s-P$Npar -$zhashfunc $url
# nrunpar tzodb_go -log_dir=$log zhash -check=$zhashok -bench=$topic-%s·P$Npar -$zhashfunc $url
for i in ${Nparv}; do
echo -e "\n# $i clients in parallel"
nrun tzodb_go -log_dir=$log zwrk -nclient $i -check=$zhashok -bench=$topic/%s -$zhashfunc $url
nrun tzodb_go -log_dir=$log zwrk -nclient $i -check=$zhashok -bench=$topic-%s -$zhashfunc $url
done
}
......@@ -1110,35 +1118,38 @@ cmd_zbench-local() {
# zodb part of cmd_bench-local
zbench_local() {
echo "node:"
echo -ne "cluster:\t"; xhostname
gen_data
zhashok=`cat $ds/zhash.ok`
echo -e "\n*** FileStorage"
zbench $fs1/data.fs `xhostname`/fs1 $zhashok
zbench $fs1/data.fs fs1 $zhashok
echo -e "\n*** ZEO"
Zpy $fs1/data.fs
Zpy_job=$!
zbench zeo://$Zbind `xhostname`/zeo/py/fs1 $zhashok
zbench zeo://$Zbind zeo/py/fs1 $zhashok
kill $Zpy_job
wait $Zpy_job
echo -e "\n*** NEO/py sqlite"
NEOpylite
zbench neo://$neocluster@$Mbind `xhostname`/neo/py/sqlite $zhashok
zbench neo://$neocluster@$Mbind neo/py/sqlite $zhashok
xneoctl set cluster stopping
wait
# XXX JM asked to also have NEO/py with logging disabled
echo -e "\n*** NEO/py sqlite (logging disabled)"
X_NEOPY_LOG_SKIP=y NEOpylite
zbench neo://$neocluster@$Mbind "`xhostname`/neo/py(!log)/sqlite" $zhashok
zbench neo://$neocluster@$Mbind "neo/py(!log)/sqlite" $zhashok
xneoctl set cluster stopping
wait
echo -e "\n*** NEO/py sql"
NEOpysql
zbench neo://$neocluster@$Mbind `xhostname`/neo/py/sql $zhashok
zbench neo://$neocluster@$Mbind neo/py/sql $zhashok
xneoctl set cluster stopping
xmysql -e "SHUTDOWN"
wait
......@@ -1146,32 +1157,32 @@ zbench_local() {
# XXX JM asked to also have NEO/py with logging disabled
echo -e "\n*** NEO/py sql (logging disabled)"
X_NEOPY_LOG_SKIP=y NEOpysql
zbench neo://$neocluster@$Mbind "`xhostname`/neo/py(!log)/sql" $zhashok
zbench neo://$neocluster@$Mbind "neo/py(!log)/sql" $zhashok
xneoctl set cluster stopping
xmysql -e "SHUTDOWN"
wait
echo -e "\n*** NEO/go fs1"
NEOgofs1
zbench neo://$neocluster@$Mbind `xhostname`/neo/go/fs1 $zhashok
zbench neo://$neocluster@$Mbind neo/go/fs1 $zhashok
xneoctl set cluster stopping
wait
echo -e "\n*** NEO/go fs1 (sha1 disabled)"
X_NEOGO_SHA1_SKIP=y NEOgofs1
X_NEOGO_SHA1_SKIP=y zbench_go neo://$neocluster@$Mbind "`xhostname`/neo/go/fs1(!sha1)" $zhashok
X_NEOGO_SHA1_SKIP=y zbench_go neo://$neocluster@$Mbind "neo/go/fs1(!sha1)" $zhashok
xneoctl set cluster stopping
wait
echo -e "\n*** NEO/go sqlite"
NEOgolite
zbench neo://$neocluster@$Mbind `xhostname`/neo/go/sqlite $zhashok
zbench neo://$neocluster@$Mbind neo/go/sqlite $zhashok
xneoctl set cluster stopping
wait
echo -e "\n*** NEO/go sqlite (sha1 disabled)"
X_NEOGO_SHA1_SKIP=y NEOgolite
X_NEOGO_SHA1_SKIP=y zbench_go neo://$neocluster@$Mbind "`xhostname`/neo/go/sqlite(!sha1)" $zhashok
X_NEOGO_SHA1_SKIP=y zbench_go neo://$neocluster@$Mbind "neo/go/sqlite(!sha1)" $zhashok
xneoctl set cluster stopping
wait
}
......@@ -1221,32 +1232,35 @@ zbench_cluster() {
url=$1
peer=`hostof $url`
echo "node:"
echo -e "cluster:\t`xhostname`-$peer"
gen_data
zhashok=`cat $ds/zhash.ok`
echo -e "\n*** ZEO"
Zpy $fs1/data.fs
Zpy_job=$!
on $url ./neotest zbench-client zeo://$Zbind "`xhostname`-$peer/zeo/py/fs1" $zhashok
on $url ./neotest zbench-client zeo://$Zbind zeo/py/fs1 $zhashok
kill $Zpy_job
wait $Zpy_job
echo -e "\n*** NEO/py sqlite"
NEOpylite
on $url ./neotest zbench-client neo://$neocluster@$Mbind "`xhostname`-$peer/neo/py/sqlite" $zhashok
on $url ./neotest zbench-client neo://$neocluster@$Mbind neo/py/sqlite $zhashok
xneoctl set cluster stopping
wait
# XXX JM asked to also have NEO/py with logging disabled
echo -e "\n*** NEO/py sqlite (logging disabled)"
X_NEOPY_LOG_SKIP=y NEOpylite
on $url ./neotest zbench-client neo://$neocluster@$Mbind "\\\"`xhostname`-$peer/neo/py(!log)/sqlite\\\"" $zhashok
on $url ./neotest zbench-client neo://$neocluster@$Mbind "\\\"neo/py(!log)/sqlite\\\"" $zhashok
xneoctl set cluster stopping
wait
echo -e "\n*** NEO/py sql"
NEOpysql
on $url ./neotest zbench-client neo://$neocluster@$Mbind "`xhostname`-$peer/neo/py/sql" $zhashok
on $url ./neotest zbench-client neo://$neocluster@$Mbind neo/py/sql $zhashok
xneoctl set cluster stopping
xmysql -e "SHUTDOWN"
wait
......@@ -1254,32 +1268,32 @@ zbench_cluster() {
# XXX JM asked to also have NEO/py with logging disabled
echo -e "\n*** NEO/py sql (logging disabled)"
X_NEOPY_LOG_SKIP=y NEOpysql
on $url ./neotest zbench-client neo://$neocluster@$Mbind "\\\"`xhostname`-$peer/neo/py(!log)/sql\\\"" $zhashok
on $url ./neotest zbench-client neo://$neocluster@$Mbind "\\\"neo/py(!log)/sql\\\"" $zhashok
xneoctl set cluster stopping
xmysql -e "SHUTDOWN"
wait
echo -e "\n*** NEO/go fs"
NEOgofs1
on $url ./neotest zbench-client neo://$neocluster@$Mbind "`xhostname`-$peer/neo/go/fs1" $zhashok
on $url ./neotest zbench-client neo://$neocluster@$Mbind neo/go/fs1 $zhashok
xneoctl set cluster stopping
wait
echo -e "\n*** NEO/go fs1 (sha1 disabled)"
X_NEOGO_SHA1_SKIP=y NEOgofs1
on $url X_NEOGO_SHA1_SKIP=y ./neotest zbench-client --goonly neo://$neocluster@$Mbind "\\\"`xhostname`-$peer/neo/go/fs1(!sha1)\\\"" $zhashok
on $url X_NEOGO_SHA1_SKIP=y ./neotest zbench-client --goonly neo://$neocluster@$Mbind "\\\"neo/go/fs1(!sha1)\\\"" $zhashok
xneoctl set cluster stopping
wait
echo -e "\n*** NEO/go sqlite"
NEOgolite
on $url ./neotest zbench-client neo://$neocluster@$Mbind "`xhostname`-$peer/neo/go/sqlite" $zhashok
on $url ./neotest zbench-client neo://$neocluster@$Mbind neo/go/sqlite $zhashok
xneoctl set cluster stopping
wait
echo -e "\n*** NEO/go sqlite (sha1 disabled)"
X_NEOGO_SHA1_SKIP=y NEOgolite
on $url X_NEOGO_SHA1_SKIP=y ./neotest zbench-client --goonly neo://$neocluster@$Mbind "\\\"`xhostname`-$peer/neo/go/sqlite(!sha1)\\\"" $zhashok
on $url X_NEOGO_SHA1_SKIP=y ./neotest zbench-client --goonly neo://$neocluster@$Mbind "\\\"neo/go/sqlite(!sha1)\\\"" $zhashok
xneoctl set cluster stopping
wait
}
......
......@@ -414,7 +414,7 @@ func zwrk(ctx context.Context, url string, nwrk int, h hasher, bench, check stri
latavg := float64(nwrk) * tavg
rps := float64(r.N) / r.T.Seconds()
topic := fmt.Sprintf(bench, "zwrk.go")
fmt.Printf("Benchmark%s-%d %d\t%.1f req/s %.3f latency-µs/object\n",
fmt.Printf("Benchmark%s·%d %d\t%.1f req/s %.3f latency-µs/object\n",
topic, nwrk, r.N, rps, latavg)
return nil
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment