diff options
author | Antoine Prouvost <AntoinePrv@users.noreply.github.com> | 2020-09-10 04:57:30 -0400 |
---|---|---|
committer | GitHub <noreply@github.com> | 2020-09-10 09:57:30 +0100 |
commit | 12e85b2eeb6df095b1366a44d8e8464599b5e1b8 (patch) | |
tree | c6b6b133c8d16dfe1277af924cbe14c6fd60f1d8 /bindings/python/google_benchmark/example.py | |
parent | beb360d03e2a1a2673d9c2cf408c13b69fdb5627 (diff) | |
download | google-benchmark-12e85b2eeb6df095b1366a44d8e8464599b5e1b8.tar.gz google-benchmark-12e85b2eeb6df095b1366a44d8e8464599b5e1b8.zip |
Bind more State methods/attributes to Python (#1037)
* Bind Counter to Python
* Bind State methods to Python
* Bind state.counters to Python
* Import _benchmark.Counter
* Add Python example of state usage
Co-authored-by: Dominic Hamon <dominichamon@users.noreply.github.com>
Diffstat (limited to 'bindings/python/google_benchmark/example.py')
-rw-r--r-- | bindings/python/google_benchmark/example.py | 54 |
1 files changed, 51 insertions, 3 deletions
diff --git a/bindings/python/google_benchmark/example.py b/bindings/python/google_benchmark/example.py index 0dead75..9bb23c4 100644 --- a/bindings/python/google_benchmark/example.py +++ b/bindings/python/google_benchmark/example.py @@ -20,7 +20,11 @@ In the extracted directory, execute: python setup.py install """ +import random +import time + import google_benchmark as benchmark +from google_benchmark import Counter @benchmark.register @@ -34,15 +38,59 @@ def sum_million(state): while state: sum(range(1_000_000)) +@benchmark.register +def pause_timing(state): + """Pause timing every iteration.""" + while state: + # Construct a list of random ints every iteration without timing it + state.pause_timing() + random_list = [random.randint(0, 100) for _ in range(100)] + state.resume_timing() + # Time the in place sorting algorithm + random_list.sort() + @benchmark.register def skipped(state): if True: # Test some predicate here. - state.skip_with_error('some error') + state.skip_with_error("some error") return # NOTE: You must explicitly return, or benchmark will continue. - # Benchmark code would be here. + ... # Benchmark code would be here. + + +@benchmark.register +def manual_timing(state): + while state: + # Manually count Python CPU time + start = time.perf_counter() # perf_counter_ns() in Python 3.7+ + # Somehting to benchmark + time.sleep(0.01) + end = time.perf_counter() + state.set_iteration_time(end - start) + + +@benchmark.register +def custom_counters(state): + """Collect cutom metric using benchmark.Counter.""" + num_foo = 0.0 + while state: + # Benchmark some code here + pass + # Collect some custom metric named foo + num_foo += 0.13 + + # Automatic Counter from numbers. + state.counters["foo"] = num_foo + # Set a counter as a rate. + state.counters["foo_rate"] = Counter(num_foo, Counter.kIsRate) + # Set a counter as an inverse of rate. + state.counters["foo_inv_rate"] = Counter(num_foo, Counter.kIsRate | Counter.kInvert) + # Set a counter as a thread-average quantity. + state.counters["foo_avg"] = Counter(num_foo, Counter.kAvgThreads) + # There's also a combined flag: + state.counters["foo_avg_rate"] = Counter(num_foo, Counter.kAvgThreadsRate) -if __name__ == '__main__': +if __name__ == "__main__": benchmark.main() |