simplebench: move results_to_text() into separate file
authorVladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Wed, 21 Oct 2020 14:58:55 +0000 (17:58 +0300)
committerMax Reitz <mreitz@redhat.com>
Fri, 18 Dec 2020 11:35:55 +0000 (12:35 +0100)
Let's keep view part in separate: this way it's better to improve it in
the following commits.

Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Message-Id: <20201021145859.11201-18-vsementsov@virtuozzo.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
scripts/simplebench/bench-example.py
scripts/simplebench/bench_write_req.py
scripts/simplebench/results_to_text.py [new file with mode: 0644]
scripts/simplebench/simplebench.py

index f24cf22fe9ff9c14296d488335b02ea43a8288d5..d9c7f7bc17e71b5838d418b67fed8f14afb8d263 100644 (file)
@@ -19,6 +19,7 @@
 #
 
 import simplebench
+from results_to_text import results_to_text
 from bench_block_job import bench_block_copy, drv_file, drv_nbd
 
 
@@ -77,4 +78,4 @@ test_envs = [
 ]
 
 result = simplebench.bench(bench_func, test_envs, test_cases, count=3)
-print(simplebench.results_to_text(result))
+print(results_to_text(result))
index e175bcd7a49a32d74018958ee5749c680aa7c04b..da601ea2fe508615fb63e07210032f6f4b4a4344 100755 (executable)
@@ -26,6 +26,7 @@ import sys
 import os
 import subprocess
 import simplebench
+from results_to_text import results_to_text
 
 
 def bench_func(env, case):
@@ -167,4 +168,4 @@ if __name__ == '__main__':
 
     result = simplebench.bench(bench_func, test_envs, test_cases, count=3,
                                initial_run=False)
-    print(simplebench.results_to_text(result))
+    print(results_to_text(result))
diff --git a/scripts/simplebench/results_to_text.py b/scripts/simplebench/results_to_text.py
new file mode 100644 (file)
index 0000000..58d909f
--- /dev/null
@@ -0,0 +1,48 @@
+# Simple benchmarking framework
+#
+# Copyright (c) 2019 Virtuozzo International GmbH.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+
+
+def result_to_text(result):
+    """Return text representation of bench_one() returned dict."""
+    if 'average' in result:
+        s = '{:.2f} +- {:.2f}'.format(result['average'], result['stdev'])
+        if 'n-failed' in result:
+            s += '\n({} failed)'.format(result['n-failed'])
+        return s
+    else:
+        return 'FAILED'
+
+
+def results_to_text(results):
+    """Return text representation of bench() returned dict."""
+    from tabulate import tabulate
+
+    dim = None
+    tab = [[""] + [c['id'] for c in results['envs']]]
+    for case in results['cases']:
+        row = [case['id']]
+        for env in results['envs']:
+            res = results['tab'][case['id']][env['id']]
+            if dim is None:
+                dim = res['dimension']
+            else:
+                assert dim == res['dimension']
+            row.append(result_to_text(res))
+        tab.append(row)
+
+    return f'All results are in {dim}\n\n' + tabulate(tab)
index aa74b78a046b074618dbffce97f275f02ab564e8..f61513af9022d79fae4127d956e0a3e9209851b1 100644 (file)
@@ -79,17 +79,6 @@ def bench_one(test_func, test_env, test_case, count=5, initial_run=True):
     return result
 
 
-def result_to_text(result):
-    """Return text representation of bench_one() returned dict."""
-    if 'average' in result:
-        s = '{:.2f} +- {:.2f}'.format(result['average'], result['stdev'])
-        if 'n-failed' in result:
-            s += '\n({} failed)'.format(result['n-failed'])
-        return s
-    else:
-        return 'FAILED'
-
-
 def bench(test_func, test_envs, test_cases, *args, **vargs):
     """Fill benchmark table
 
@@ -125,23 +114,3 @@ def bench(test_func, test_envs, test_cases, *args, **vargs):
 
     print('Done')
     return results
-
-
-def results_to_text(results):
-    """Return text representation of bench() returned dict."""
-    from tabulate import tabulate
-
-    dim = None
-    tab = [[""] + [c['id'] for c in results['envs']]]
-    for case in results['cases']:
-        row = [case['id']]
-        for env in results['envs']:
-            res = results['tab'][case['id']][env['id']]
-            if dim is None:
-                dim = res['dimension']
-            else:
-                assert dim == res['dimension']
-            row.append(result_to_text(res))
-        tab.append(row)
-
-    return f'All results are in {dim}\n\n' + tabulate(tab)