perftester 0.6.3__py3-none-any.whl → 0.8.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- perftester/__init__.py +16 -17
- perftester/__main__.py +178 -178
- perftester/perftester.py +113 -95
- {perftester-0.6.3.dist-info → perftester-0.8.0.dist-info}/METADATA +475 -558
- perftester-0.8.0.dist-info/RECORD +7 -0
- {perftester-0.6.3.dist-info → perftester-0.8.0.dist-info}/WHEEL +1 -1
- {perftester-0.6.3.dist-info → perftester-0.8.0.dist-info}/top_level.txt +0 -0
- perftester-0.6.3.dist-info/LICENSE +0 -21
- perftester-0.6.3.dist-info/RECORD +0 -9
- perftester-0.6.3.dist-info/entry_points.txt +0 -3
perftester/perftester.py
CHANGED
|
@@ -17,6 +17,8 @@ memory_profiler.memory_usage() and pympler.asizeof.asizeof() are provided in
|
|
|
17
17
|
the same units. If you want to recalculate the data to MiB, you can divide the
|
|
18
18
|
memory by perftester.MiB_TO_MB_FACTOR.
|
|
19
19
|
|
|
20
|
+
WARNING: Calculating memory can take quite some time when the
|
|
21
|
+
|
|
20
22
|
For the sake of pretty-printing the benchmarks, perftester comes with a pp
|
|
21
23
|
function, which rounds all numbers to four significant digits and prints
|
|
22
24
|
the object using pprint.pprint:
|
|
@@ -32,13 +34,20 @@ You can change this behavior, however:
|
|
|
32
34
|
Let's return to previous settings:
|
|
33
35
|
>>> pt.config.digits_for_printing = 4
|
|
34
36
|
"""
|
|
37
|
+
import warnings
|
|
38
|
+
from pympler.asizeof import asizeof
|
|
39
|
+
with warnings.catch_warnings():
|
|
40
|
+
warnings.simplefilter("ignore")
|
|
41
|
+
STARTING_MEMORY = asizeof(all=True)
|
|
42
|
+
|
|
35
43
|
import builtins
|
|
36
44
|
import copy
|
|
45
|
+
import inspect
|
|
46
|
+
import gc
|
|
37
47
|
import os
|
|
38
48
|
import rounder
|
|
39
49
|
import sys
|
|
40
50
|
import timeit
|
|
41
|
-
import warnings
|
|
42
51
|
|
|
43
52
|
from collections import namedtuple
|
|
44
53
|
from collections.abc import Callable
|
|
@@ -46,47 +55,50 @@ from easycheck import (
|
|
|
46
55
|
check_argument,
|
|
47
56
|
check_if,
|
|
48
57
|
check_if_not,
|
|
49
|
-
|
|
58
|
+
check_type,
|
|
50
59
|
check_if_paths_exist,
|
|
51
|
-
assert_instance,
|
|
60
|
+
assert_instance, # required for doctests
|
|
52
61
|
)
|
|
53
62
|
from functools import wraps
|
|
54
63
|
from memory_profiler import memory_usage
|
|
55
64
|
from pathlib import Path
|
|
56
65
|
from pprint import pprint
|
|
57
|
-
from pympler.asizeof import asizeof
|
|
58
66
|
from statistics import mean
|
|
59
67
|
|
|
60
68
|
|
|
61
69
|
MiB_TO_MB_FACTOR = 1.048576
|
|
62
70
|
|
|
63
71
|
|
|
72
|
+
class IncorrectUseOfMEMLOGSError(Exception):
|
|
73
|
+
"""MEMLOGS was used incorrectly."""
|
|
74
|
+
|
|
75
|
+
|
|
64
76
|
class CLIPathError(Exception):
|
|
65
77
|
"""Exception class to be used for the CLI perftester app."""
|
|
66
78
|
|
|
67
79
|
|
|
68
80
|
class LogFilePathError(Exception):
|
|
69
|
-
"""
|
|
81
|
+
"""Incorrect path provided for a log file."""
|
|
70
82
|
|
|
71
83
|
|
|
72
84
|
class LackingLimitsError(Exception):
|
|
73
|
-
"""
|
|
85
|
+
"""No limits has been set for test."""
|
|
74
86
|
|
|
75
87
|
|
|
76
88
|
class IncorrectArgumentError(Exception):
|
|
77
|
-
"""
|
|
89
|
+
"""Function arguments are incorrect."""
|
|
78
90
|
|
|
79
91
|
|
|
80
92
|
class TimeTestError(Exception):
|
|
81
|
-
"""
|
|
93
|
+
"""The time test has not passed."""
|
|
82
94
|
|
|
83
95
|
|
|
84
96
|
class MemoryTestError(Exception):
|
|
85
|
-
"""
|
|
97
|
+
"""The memory usage test has not passed."""
|
|
86
98
|
|
|
87
99
|
|
|
88
100
|
class FunctionError(Exception):
|
|
89
|
-
"""
|
|
101
|
+
"""The tested code has thrown an error."""
|
|
90
102
|
|
|
91
103
|
|
|
92
104
|
# Configuration
|
|
@@ -143,7 +155,7 @@ class Config:
|
|
|
143
155
|
|
|
144
156
|
@digits_for_printing.setter
|
|
145
157
|
def digits_for_printing(self, value):
|
|
146
|
-
|
|
158
|
+
check_type(
|
|
147
159
|
value,
|
|
148
160
|
int,
|
|
149
161
|
message=f"Argument value must be an int, not {type(value).__name__}",
|
|
@@ -156,7 +168,7 @@ class Config:
|
|
|
156
168
|
|
|
157
169
|
@log_to_file.setter
|
|
158
170
|
def log_to_file(self, value):
|
|
159
|
-
|
|
171
|
+
check_type(
|
|
160
172
|
value,
|
|
161
173
|
bool,
|
|
162
174
|
message=f"Argument value must be an int, not {type(value).__name__}",
|
|
@@ -224,7 +236,7 @@ class Config:
|
|
|
224
236
|
"""
|
|
225
237
|
whiches = ("time", "memory")
|
|
226
238
|
items = ("number", "repeat")
|
|
227
|
-
|
|
239
|
+
check_type(
|
|
228
240
|
func,
|
|
229
241
|
Callable,
|
|
230
242
|
IncorrectArgumentError,
|
|
@@ -297,7 +309,9 @@ class Config:
|
|
|
297
309
|
memory_usage((self.benchmark_function, (), {}))
|
|
298
310
|
for _ in range(self.defaults["memory"]["repeat"])
|
|
299
311
|
]
|
|
300
|
-
self.memory_benchmark = MiB_TO_MB_FACTOR * min(
|
|
312
|
+
self.memory_benchmark = MiB_TO_MB_FACTOR * min(
|
|
313
|
+
max(r) for r in memory_results
|
|
314
|
+
)
|
|
301
315
|
|
|
302
316
|
def set_defaults(
|
|
303
317
|
self, which, number=None, repeat=None, Number=None, Repeat=None
|
|
@@ -371,7 +385,7 @@ class Config:
|
|
|
371
385
|
|
|
372
386
|
def _check_args(self, func, which, number, repeat):
|
|
373
387
|
"""Check instances of arguments func, which, number and repeat."""
|
|
374
|
-
|
|
388
|
+
check_type(
|
|
375
389
|
func,
|
|
376
390
|
Callable,
|
|
377
391
|
IncorrectArgumentError,
|
|
@@ -405,7 +419,7 @@ class Config:
|
|
|
405
419
|
if repeat is not None:
|
|
406
420
|
if int(repeat) == repeat:
|
|
407
421
|
repeat = int(repeat)
|
|
408
|
-
|
|
422
|
+
check_type(
|
|
409
423
|
number,
|
|
410
424
|
(int, None),
|
|
411
425
|
IncorrectArgumentError,
|
|
@@ -414,7 +428,7 @@ class Config:
|
|
|
414
428
|
f"{type(number).__name__}"
|
|
415
429
|
),
|
|
416
430
|
)
|
|
417
|
-
|
|
431
|
+
check_type(
|
|
418
432
|
repeat,
|
|
419
433
|
(int, None),
|
|
420
434
|
IncorrectArgumentError,
|
|
@@ -617,7 +631,7 @@ def memory_usage_test(
|
|
|
617
631
|
When you use Repeat, it has a higher priority than the corresponding
|
|
618
632
|
setting from config.settings, and it will be used. This is used in this
|
|
619
633
|
single call only, and so it does not overwrite the config settings.
|
|
620
|
-
|
|
634
|
+
|
|
621
635
|
WARNING: Unlike memory_profiler.memory_usage(), which reports memory in MiB,
|
|
622
636
|
perftester provides data in MB.
|
|
623
637
|
|
|
@@ -661,7 +675,7 @@ def memory_usage_test(
|
|
|
661
675
|
True
|
|
662
676
|
>>> memory_usage_test(sum1, raw_limit=first_run['max']*2, n=100_000)
|
|
663
677
|
"""
|
|
664
|
-
|
|
678
|
+
check_type(
|
|
665
679
|
func,
|
|
666
680
|
Callable,
|
|
667
681
|
IncorrectArgumentError,
|
|
@@ -686,15 +700,15 @@ def memory_usage_test(
|
|
|
686
700
|
),
|
|
687
701
|
)
|
|
688
702
|
if relative_limit is not None:
|
|
689
|
-
|
|
703
|
+
relative_got_memory = results["max"] / config.memory_benchmark
|
|
690
704
|
check_if(
|
|
691
|
-
|
|
705
|
+
relative_got_memory <= relative_limit,
|
|
692
706
|
handle_with=MemoryTestError,
|
|
693
707
|
message=(
|
|
694
708
|
f"Memory test not passed for function {func.__name__}:\n"
|
|
695
709
|
f"relative memory limit = {relative_limit}\n"
|
|
696
710
|
f"maximum obtained relative memory usage = "
|
|
697
|
-
f"{rounder.signif(
|
|
711
|
+
f"{rounder.signif(relative_got_memory, config.digits_for_printing)}"
|
|
698
712
|
),
|
|
699
713
|
)
|
|
700
714
|
config.full_traceback()
|
|
@@ -714,7 +728,7 @@ def memory_usage_benchmark(func, *args, Repeat=None, **kwargs):
|
|
|
714
728
|
single call only, and so it does not overwrite the config settings.
|
|
715
729
|
|
|
716
730
|
The function returns a dict that you can pretty-print using function pp().
|
|
717
|
-
|
|
731
|
+
|
|
718
732
|
WARNING: Unlike memory_profiler.memory_usage(), which reports memory in MiB,
|
|
719
733
|
perftester provides data in MB.
|
|
720
734
|
|
|
@@ -733,16 +747,13 @@ def memory_usage_benchmark(func, *args, Repeat=None, **kwargs):
|
|
|
733
747
|
>>> f_bench.keys()
|
|
734
748
|
dict_keys(['raw_results', 'relative_results', 'mean_result_per_run', 'max_result_per_run', 'max_result_per_run_relative', 'mean', 'max', 'max_relative'])
|
|
735
749
|
"""
|
|
736
|
-
|
|
750
|
+
check_type(func, Callable, message="Argument func must be a callable.")
|
|
737
751
|
_add_func_to_config(func)
|
|
738
752
|
|
|
739
753
|
n = Repeat or config.settings[func]["memory"]["repeat"]
|
|
740
754
|
|
|
741
755
|
try:
|
|
742
|
-
memory_results = [
|
|
743
|
-
memory_usage((func, args, kwargs))
|
|
744
|
-
for i in range(n)
|
|
745
|
-
]
|
|
756
|
+
memory_results = [memory_usage((func, args, kwargs)) for i in range(n)]
|
|
746
757
|
except Exception as e:
|
|
747
758
|
raise FunctionError(
|
|
748
759
|
f"The tested function raised {type(e).__name__}: {str(e)}"
|
|
@@ -752,10 +763,7 @@ def memory_usage_benchmark(func, *args, Repeat=None, **kwargs):
|
|
|
752
763
|
for j, _ in enumerate(result):
|
|
753
764
|
memory_results[i][j] *= MiB_TO_MB_FACTOR
|
|
754
765
|
|
|
755
|
-
memory_results_mean = [
|
|
756
|
-
mean(this_result)
|
|
757
|
-
for this_result in memory_results
|
|
758
|
-
]
|
|
766
|
+
memory_results_mean = [mean(this_result) for this_result in memory_results]
|
|
759
767
|
memory_results_max = [max(this_result) for this_result in memory_results]
|
|
760
768
|
overall_mean = mean(memory_results_mean)
|
|
761
769
|
# We take the min of the max values
|
|
@@ -765,7 +773,7 @@ def memory_usage_benchmark(func, *args, Repeat=None, **kwargs):
|
|
|
765
773
|
for i, result in enumerate(relative_results):
|
|
766
774
|
for j, r in enumerate(result):
|
|
767
775
|
relative_results[i][j] = r / config.memory_benchmark
|
|
768
|
-
|
|
776
|
+
|
|
769
777
|
return {
|
|
770
778
|
"raw_results": memory_results,
|
|
771
779
|
"relative_results": relative_results,
|
|
@@ -821,7 +829,7 @@ def time_benchmark(func, *args, Number=None, Repeat=None, **kwargs):
|
|
|
821
829
|
True
|
|
822
830
|
|
|
823
831
|
"""
|
|
824
|
-
|
|
832
|
+
check_type(func, Callable, message="Argument func must be a callable.")
|
|
825
833
|
_add_func_to_config(func)
|
|
826
834
|
|
|
827
835
|
try:
|
|
@@ -861,8 +869,33 @@ def pp(*args):
|
|
|
861
869
|
>>> pp(dict(a=.12121212, b=23.234234234), ["system failure", 345345.345])
|
|
862
870
|
{'a': 0.1212, 'b': 23.23}
|
|
863
871
|
['system failure', 345300.0]
|
|
872
|
+
>>> t = time_benchmark(lambda: 0, Number=1, Repeat=1)
|
|
873
|
+
>>> pp(t)
|
|
874
|
+
Time data are printed in seconds.
|
|
875
|
+
{'max': ...,
|
|
876
|
+
'mean': ...,
|
|
877
|
+
'min': ...,
|
|
878
|
+
'min_relative': ...,
|
|
879
|
+
'raw_times': [...],
|
|
880
|
+
'raw_times_relative': [...]}
|
|
881
|
+
>>> m = memory_usage_benchmark(lambda: 0)
|
|
882
|
+
>>> pp(m)
|
|
883
|
+
Memory data are printed in MB.
|
|
884
|
+
{'max': ...,
|
|
885
|
+
'max_relative': ...,
|
|
886
|
+
'max_result_per_run': [...],
|
|
887
|
+
'max_result_per_run_relative': [...],
|
|
888
|
+
'mean': ...,
|
|
889
|
+
'mean_result_per_run': [...],
|
|
890
|
+
'raw_results': [[..., ..., ...]],
|
|
891
|
+
'relative_results': [[..., ..., ...]]}
|
|
864
892
|
"""
|
|
865
893
|
for arg in args:
|
|
894
|
+
is_benchmark = _check_if_benchmarks(arg)
|
|
895
|
+
if is_benchmark == "time benchmark":
|
|
896
|
+
print("Time data are printed in seconds.")
|
|
897
|
+
elif is_benchmark == "memory benchmark":
|
|
898
|
+
print("Memory data are printed in MB.")
|
|
866
899
|
pprint(
|
|
867
900
|
rounder.signif_object(
|
|
868
901
|
arg, digits=config.digits_for_printing, use_copy=True
|
|
@@ -870,6 +903,52 @@ def pp(*args):
|
|
|
870
903
|
)
|
|
871
904
|
|
|
872
905
|
|
|
906
|
+
def _check_if_benchmarks(obj):
|
|
907
|
+
"""Check if obj comes from time or memory benchmarks.
|
|
908
|
+
|
|
909
|
+
>>> _check_if_benchmarks(10)
|
|
910
|
+
>>> _check_if_benchmarks("10")
|
|
911
|
+
>>> _check_if_benchmarks([10, ])
|
|
912
|
+
>>> _check_if_benchmarks((10, ))
|
|
913
|
+
>>> _check_if_benchmarks({10, 20})
|
|
914
|
+
>>> _check_if_benchmarks(dict(x=10, y=20))
|
|
915
|
+
>>> t = time_benchmark(lambda: 0, Number=1, Repeat=1)
|
|
916
|
+
>>> m = memory_usage_benchmark(lambda: 0)
|
|
917
|
+
>>> _check_if_benchmarks(t)
|
|
918
|
+
'time benchmark'
|
|
919
|
+
>>> _check_if_benchmarks(m)
|
|
920
|
+
'memory benchmark'
|
|
921
|
+
"""
|
|
922
|
+
time_keys = {
|
|
923
|
+
"min",
|
|
924
|
+
"min_relative",
|
|
925
|
+
"raw_times",
|
|
926
|
+
"raw_times_relative",
|
|
927
|
+
"mean",
|
|
928
|
+
"max",
|
|
929
|
+
}
|
|
930
|
+
memory_keys = {
|
|
931
|
+
"raw_results",
|
|
932
|
+
"relative_results",
|
|
933
|
+
"mean_result_per_run",
|
|
934
|
+
"max_result_per_run",
|
|
935
|
+
"max_result_per_run_relative",
|
|
936
|
+
"mean",
|
|
937
|
+
"max",
|
|
938
|
+
"max_relative",
|
|
939
|
+
}
|
|
940
|
+
try:
|
|
941
|
+
if obj.keys() == time_keys:
|
|
942
|
+
return "time benchmark"
|
|
943
|
+
except AttributeError:
|
|
944
|
+
pass
|
|
945
|
+
try:
|
|
946
|
+
if obj.keys() == memory_keys:
|
|
947
|
+
return "memory benchmark"
|
|
948
|
+
except AttributeError:
|
|
949
|
+
return None
|
|
950
|
+
|
|
951
|
+
|
|
873
952
|
def _add_func_to_config(func):
|
|
874
953
|
if func not in config.settings.keys():
|
|
875
954
|
config.settings[func] = {}
|
|
@@ -882,67 +961,6 @@ def _add_func_to_config(func):
|
|
|
882
961
|
)
|
|
883
962
|
|
|
884
963
|
|
|
885
|
-
# Full memory measurement
|
|
886
|
-
|
|
887
|
-
builtins.__dict__["MEMLOGS"] = []
|
|
888
|
-
|
|
889
|
-
|
|
890
|
-
MemLog = namedtuple("MemLog", "ID memory")
|
|
891
|
-
|
|
892
|
-
|
|
893
|
-
def MEMPRINT():
|
|
894
|
-
"""Pretty-print MEMLOGS."""
|
|
895
|
-
for i, memlog in enumerate(MEMLOGS): # type: ignore
|
|
896
|
-
ID = memlog.ID if memlog.ID else ""
|
|
897
|
-
print(
|
|
898
|
-
f"{i: < 4} "
|
|
899
|
-
f"{round(memlog.memory / 1024/1024, 1): <6} → "
|
|
900
|
-
f"{ID}"
|
|
901
|
-
)
|
|
902
|
-
|
|
903
|
-
|
|
904
|
-
def MEMPOINT(ID=None):
|
|
905
|
-
"""Global function to measure full memory and log it into MEMLOGS.
|
|
906
|
-
|
|
907
|
-
The function is available from any module of a session. It logs into
|
|
908
|
-
MEMLOGS, also available from any module.
|
|
909
|
-
|
|
910
|
-
Memory is collected using pympler.asizeof.asizeof(), and reported in
|
|
911
|
-
bytes. So, the function measures the size of all current gc objects,
|
|
912
|
-
including module, global and stack frame objects, minus the size
|
|
913
|
-
of `MEMLOGS`.
|
|
914
|
-
"""
|
|
915
|
-
with warnings.catch_warnings():
|
|
916
|
-
warnings.simplefilter("ignore")
|
|
917
|
-
MEMLOGS.append(
|
|
918
|
-
MemLog( # type: ignore
|
|
919
|
-
ID, (asizeof(all=True) - asizeof(MEMLOGS))
|
|
920
|
-
) # type: ignore
|
|
921
|
-
)
|
|
922
|
-
|
|
923
|
-
|
|
924
|
-
def MEMTRACE(func, ID_before=None, ID_after=None):
|
|
925
|
-
"""Decorator to log memory before and after running a function."""
|
|
926
|
-
|
|
927
|
-
@wraps(func)
|
|
928
|
-
def inner(*args, **kwargs):
|
|
929
|
-
before = ID_before if ID_before else f"Before {func.__name__}()"
|
|
930
|
-
MEMPOINT(before)
|
|
931
|
-
f = func(*args, **kwargs)
|
|
932
|
-
after = ID_after if ID_after else f"After {func.__name__}()"
|
|
933
|
-
MEMPOINT(after)
|
|
934
|
-
return f
|
|
935
|
-
|
|
936
|
-
return inner
|
|
937
|
-
|
|
938
|
-
|
|
939
|
-
builtins.__dict__["MEMPOINT"] = MEMPOINT
|
|
940
|
-
builtins.__dict__["MEMPRINT"] = MEMPRINT
|
|
941
|
-
builtins.__dict__["MEMTRACE"] = MEMTRACE
|
|
942
|
-
|
|
943
|
-
MEMPOINT("perftester import")
|
|
944
|
-
|
|
945
|
-
|
|
946
964
|
if __name__ == "__main__":
|
|
947
965
|
import doctest
|
|
948
966
|
|