perftester 0.5.1__py3-none-any.whl → 0.6.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- perftester/__init__.py +2 -1
- perftester/perftester.py +57 -0
- {perftester-0.5.1.dist-info → perftester-0.6.0.dist-info}/METADATA +107 -28
- perftester-0.6.0.dist-info/RECORD +12 -0
- perftester-0.5.1.dist-info/RECORD +0 -12
- {perftester-0.5.1.dist-info → perftester-0.6.0.dist-info}/LICENSE +0 -0
- {perftester-0.5.1.dist-info → perftester-0.6.0.dist-info}/WHEEL +0 -0
- {perftester-0.5.1.dist-info → perftester-0.6.0.dist-info}/entry_points.txt +0 -0
- {perftester-0.5.1.dist-info → perftester-0.6.0.dist-info}/top_level.txt +0 -0
perftester/__init__.py
CHANGED
perftester/perftester.py
CHANGED
|
@@ -26,6 +26,7 @@ You can change this behavior, however:
|
|
|
26
26
|
Let's return to previous settings:
|
|
27
27
|
>>> pt.config.digits_for_printing = 4
|
|
28
28
|
"""
|
|
29
|
+
import builtins
|
|
29
30
|
import copy
|
|
30
31
|
import os
|
|
31
32
|
import rounder
|
|
@@ -42,9 +43,11 @@ from easycheck import (
|
|
|
42
43
|
check_if_paths_exist,
|
|
43
44
|
assert_instance,
|
|
44
45
|
)
|
|
46
|
+
from functools import wraps
|
|
45
47
|
from memory_profiler import memory_usage
|
|
46
48
|
from pathlib import Path
|
|
47
49
|
from pprint import pprint
|
|
50
|
+
from pympler.asizeof import asizeof
|
|
48
51
|
from statistics import mean
|
|
49
52
|
|
|
50
53
|
|
|
@@ -842,6 +845,60 @@ def _add_func_to_config(func):
|
|
|
842
845
|
)
|
|
843
846
|
|
|
844
847
|
|
|
848
|
+
# Full memory measurement
|
|
849
|
+
|
|
850
|
+
builtins.__dict__["MEMLOGS"] = []
|
|
851
|
+
|
|
852
|
+
|
|
853
|
+
MemLog = namedtuple("MemLog", "ID memory")
|
|
854
|
+
|
|
855
|
+
|
|
856
|
+
def MEMPRINT():
|
|
857
|
+
"""Pretty-print MEMLOGS."""
|
|
858
|
+
for i, memlog in enumerate(MEMLOGS): # type: ignore
|
|
859
|
+
ID = memlog.ID if memlog.ID else ""
|
|
860
|
+
print(f"{i: < 4} "
|
|
861
|
+
f"{round(memlog.memory / 1024/1024, 1): <6} → "
|
|
862
|
+
f"{ID}")
|
|
863
|
+
|
|
864
|
+
|
|
865
|
+
def MEMPOINT(ID=None):
|
|
866
|
+
"""Global function to measure full memory and log it into MEMLOGS.
|
|
867
|
+
|
|
868
|
+
The function is available from any module of a session. It logs into
|
|
869
|
+
MEMLOGS, also available from any module.
|
|
870
|
+
|
|
871
|
+
Memory is collected using pympler.asizeof.asizeof(), and reported in
|
|
872
|
+
bytes. So, the function measures the size of all current gc objects,
|
|
873
|
+
including module, global and stack frame objects, minus the size
|
|
874
|
+
of `MEMLOGS`.
|
|
875
|
+
"""
|
|
876
|
+
MEMLOGS.append(MemLog( # type: ignore
|
|
877
|
+
ID,
|
|
878
|
+
(asizeof(all=True) - asizeof(MEMLOGS))) # type: ignore
|
|
879
|
+
)
|
|
880
|
+
|
|
881
|
+
|
|
882
|
+
def MEMTRACE(func, ID_before=None, ID_after=None):
|
|
883
|
+
"""Decorator to log memory before and after running a function."""
|
|
884
|
+
@wraps(func)
|
|
885
|
+
def inner(*args, **kwargs):
|
|
886
|
+
before = ID_before if ID_before else f"Before {func.__name__}()"
|
|
887
|
+
MEMPOINT(before)
|
|
888
|
+
f = func(*args, **kwargs)
|
|
889
|
+
after = ID_after if ID_after else f"After {func.__name__}()"
|
|
890
|
+
MEMPOINT(after)
|
|
891
|
+
return f
|
|
892
|
+
return inner
|
|
893
|
+
|
|
894
|
+
|
|
895
|
+
builtins.__dict__["MEMPOINT"] = MEMPOINT
|
|
896
|
+
builtins.__dict__["MEMPRINT"] = MEMPRINT
|
|
897
|
+
builtins.__dict__["MEMTRACE"] = MEMTRACE
|
|
898
|
+
|
|
899
|
+
MEMPOINT("perftester import")
|
|
900
|
+
|
|
901
|
+
|
|
845
902
|
if __name__ == "__main__":
|
|
846
903
|
import doctest
|
|
847
904
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: perftester
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.6.0
|
|
4
4
|
Summary: Lightweight performance testing in Python
|
|
5
5
|
Home-page: https://github.com/nyggus/perftester
|
|
6
6
|
Author: Nyggus
|
|
@@ -14,6 +14,7 @@ Requires-Python: >=3.8
|
|
|
14
14
|
Description-Content-Type: text/markdown
|
|
15
15
|
Requires-Dist: easycheck
|
|
16
16
|
Requires-Dist: memory-profiler
|
|
17
|
+
Requires-Dist: pympler
|
|
17
18
|
Requires-Dist: rounder
|
|
18
19
|
Provides-Extra: dev
|
|
19
20
|
Requires-Dist: black ; extra == 'dev'
|
|
@@ -31,13 +32,12 @@ pip install perftester
|
|
|
31
32
|
|
|
32
33
|
The package has three external dependencies: [`memory_profiler`](https://pypi.org/project/memory-profiler/) ([repo](https://github.com/pythonprofilers/memory_profiler)), [`easycheck`](https://pypi.org/project/easycheck/) ([repo](https://github.com/nyggus/easycheck)), and [`rounder`](https://pypi.org/project/rounder/) ([repo](https://github.com/nyggus/rounder)).
|
|
33
34
|
|
|
34
|
-
> `perftester` is still under heavy testing. If you find anything that does not work as intended, please let me know via nyggus
|
|
35
|
+
> `perftester` is still under heavy testing. If you find anything that does not work as intended, please let me know via nyggus `<at>` gmail.com.
|
|
35
36
|
|
|
36
37
|
## Pre-introduction: TL;DR
|
|
37
38
|
|
|
38
39
|
At the most basic level, using `perftester` is simple. It offers you two functions for benchmarking (one for execution time and one for memory), and two functions for performance testing (likewise). Read below for a very short introduction of them. If you want to learn more, however, do not stop there, but read on.
|
|
39
40
|
|
|
40
|
-
|
|
41
41
|
### Benchmarking
|
|
42
42
|
|
|
43
43
|
You have `time_benchmark()` and `memory_benchmark()` functions:
|
|
@@ -47,6 +47,7 @@ import perftester as pt
|
|
|
47
47
|
def foo(x, n): return [x] * n
|
|
48
48
|
pt.time_benchmark(foo, x=129, n=100)
|
|
49
49
|
```
|
|
50
|
+
|
|
50
51
|
and this will print the results of the time benchmark, with raw results similar to those that `timeit.repeat()` returns, but unlike it, `pt.time_benchmark()` returns mean raw time per function run, not overall; in additional, you will see some summaries of the results.
|
|
51
52
|
|
|
52
53
|
The above call did actually run `timeit.repeat()` function, with the default configuration of `Number=100_000` and `Repeat=5`. If you want to change any of these, you can use arguments `Number` and `Repeat`, correspondigly:
|
|
@@ -101,7 +102,7 @@ The API of `perftester` testinf functions is similar to that of benchmarking fun
|
|
|
101
102
|
>>> pt.memory_usage_test(foo, raw_limit=25, x=129, n=100)
|
|
102
103
|
|
|
103
104
|
# A relative test
|
|
104
|
-
>>> pt.memory_usage_test(foo, relative_limit=1.
|
|
105
|
+
>>> pt.memory_usage_test(foo, relative_limit=1.2, x=129, n=100)
|
|
105
106
|
|
|
106
107
|
```
|
|
107
108
|
|
|
@@ -122,16 +123,17 @@ That's all in this short introduction. If you're interested in more advanced use
|
|
|
122
123
|
|
|
123
124
|
## Introduction
|
|
124
125
|
|
|
125
|
-
|
|
126
126
|
`perftester` is a lightweight package for simple performance testing in Python. Here, performance refers to execution time and memory usage, so performance testing means testing if a function performs quickly enough and does not use too much RAM. In addition, the module offers you simple functions for straightforward benchmarking, in terms of both execution time and memory.
|
|
127
127
|
|
|
128
128
|
Under the hood, `perftester` is a wrapper around two functions from other modules:
|
|
129
|
+
|
|
129
130
|
* `perftester.time_benchmark()` and `perftester.time_test()` use `timeit.repeat()`
|
|
130
131
|
* `perftester.memory_usage_benchmark()` and `perftester.memory_usage_test()` use `memory_profiler.memory_usage()`
|
|
131
132
|
|
|
132
133
|
What `perftester` offers is a testing framework with as simple syntax as possible.
|
|
133
134
|
|
|
134
135
|
You can use `perftester` in three main ways:
|
|
136
|
+
|
|
135
137
|
* in an interactive session, for simple benchmarking of functions;
|
|
136
138
|
* as part of another testing framework, like `doctest` or `pytest`s; and
|
|
137
139
|
* as an independent testing framework.
|
|
@@ -140,7 +142,6 @@ The first way is a different type of use from the other two. I use it to learn t
|
|
|
140
142
|
|
|
141
143
|
When it comes to actual testing, it's difficult to say which of the last two ways is better or more convinient: it may depend on how many performance tests you have, and how much time they take. If the tests do not take more than a couple of seconds, then you can combine them with unit tests. But if they take much time, you should likely make them independent of unit tests, and run them from time to time.
|
|
142
144
|
|
|
143
|
-
|
|
144
145
|
## Using `perftester`
|
|
145
146
|
|
|
146
147
|
### Use it as a separate testing framework
|
|
@@ -161,10 +162,9 @@ Read more about using perftester that way [here](docs/use_perftester_as_CLI.md).
|
|
|
161
162
|
|
|
162
163
|
> There is no best approach, but remember to choose one that suits your needs.
|
|
163
164
|
|
|
164
|
-
|
|
165
165
|
### Use `perftester` inside `pytest`
|
|
166
166
|
|
|
167
|
-
This is a very simple approach, perhaps the simplest one: When you use `pytest`, you can simply add `perftester` testing functions to `pytest` testing functions, and that way both frameworks will be combined, or rather the `pytest` framework will run `perftester` tests. The amount of additional work is minimal.
|
|
167
|
+
This is a very simple approach, perhaps the simplest one: When you use `pytest`, you can simply add `perftester` testing functions to `pytest` testing functions, and that way both frameworks will be combined, or rather the `pytest` framework will run `perftester` tests. The amount of additional work is minimal.
|
|
168
168
|
|
|
169
169
|
For instance, you can write the following test function:
|
|
170
170
|
|
|
@@ -197,19 +197,16 @@ If you now run `pytest` and the test passes, nothing will happen — just like w
|
|
|
197
197
|
|
|
198
198
|
This is the easiest way to use `perftester`. Its only drawback is that if the performance tests take much time, `pytest` will also take much time, something usually to be avoided. You can then do some `pytest` tricks to not run `perftester` tests, and run them only when you want — or you can simply use the above-described command-line `perftester` framework for performance testing.
|
|
199
199
|
|
|
200
|
-
|
|
201
200
|
### Use `perftester` inside `doctest`
|
|
202
201
|
|
|
203
|
-
In the same way, you can use `perftester` in `doctest`. You will find plenty of examples in the documentation here, and in the [tests/ folder](tests/).
|
|
202
|
+
In the same way, you can use `perftester` in `doctest`. You will find plenty of examples in the documentation here, and in the [tests/ folder](tests/).
|
|
204
203
|
|
|
205
|
-
> A great fan of `doctest`ing, I do **not** recommend using `perftester` in docstrings. For me, `doctest`s in docstrings should clarify things and explain how functions work, and adding a performance test to a function's docstring would decrease readability.
|
|
204
|
+
> A great fan of `doctest`ing, I do **not** recommend using `perftester` in docstrings. For me, `doctest`s in docstrings should clarify things and explain how functions work, and adding a performance test to a function's docstring would decrease readability.
|
|
206
205
|
|
|
207
206
|
The best way, thus, is to write performance tests as separate `doctest` files, dedicated to performance testing. You can collect such files in a shell script that runs performance tests.
|
|
208
207
|
|
|
209
|
-
|
|
210
208
|
## Basic use of `perftester`
|
|
211
209
|
|
|
212
|
-
|
|
213
210
|
### Simple benchmarking
|
|
214
211
|
|
|
215
212
|
To create a performance test for a function, you likely need to know how it behaves. You can run two simple benchmarking functions, `pt.memory_usage_benchmark()` and `pt.time_benchmark()`, which will run time and memory benchmarks, respectively. First, we will decrease `number` (passed to `timeit.repeat`), in order to shorten the benchmarks (which here serve as `doctest`s):
|
|
@@ -273,7 +270,6 @@ True
|
|
|
273
270
|
|
|
274
271
|
For time tests, we have the `pt.time_test()` function. First, a raw time test:
|
|
275
272
|
|
|
276
|
-
|
|
277
273
|
```python
|
|
278
274
|
>>> pt.time_test(f, raw_limit=2e-05, n=100)
|
|
279
275
|
|
|
@@ -302,8 +298,7 @@ We also can combine both:
|
|
|
302
298
|
|
|
303
299
|
```
|
|
304
300
|
|
|
305
|
-
You can read about relative testing below, [in section](#raw-and-relative-performance-testing).
|
|
306
|
-
|
|
301
|
+
You can read about relative testing below, [in section](#raw-and-relative-performance-testing).
|
|
307
302
|
|
|
308
303
|
### Memory testing
|
|
309
304
|
|
|
@@ -311,15 +306,15 @@ Memory tests use `pt.memory_usage_test()` function, which is used in the same wa
|
|
|
311
306
|
|
|
312
307
|
```python
|
|
313
308
|
>>> pt.memory_usage_test(f, raw_limit=27, n=100) # test on raw memory
|
|
314
|
-
>>> pt.memory_usage_test(f, relative_limit=1.
|
|
315
|
-
>>> pt.memory_usage_test(f, raw_limit=27, relative_limit=1.
|
|
309
|
+
>>> pt.memory_usage_test(f, relative_limit=1.2, n=100) # relative time test
|
|
310
|
+
>>> pt.memory_usage_test(f, raw_limit=27, relative_limit=1.2, n=100) # both
|
|
316
311
|
|
|
317
312
|
```
|
|
318
313
|
|
|
319
314
|
In a memory usage test, a function is called only once. You can change that — but do that only if you have solid reasons — using, for example, `pt.config.set(f, "time", "repeat", 2)`, which will set this setting for the function in the configuration (so it will be used for all next calls for function `f()`). You can also do it just once (so, without saving the setting in `pt.config.settings`), using the `Repeat` argument:
|
|
320
315
|
|
|
321
316
|
```python
|
|
322
|
-
>>> pt.memory_usage_test(f, raw_limit=27, relative_limit=1.
|
|
317
|
+
>>> pt.memory_usage_test(f, raw_limit=27, relative_limit=1.2, n=100, Repeat=100)
|
|
323
318
|
|
|
324
319
|
```
|
|
325
320
|
|
|
@@ -327,7 +322,6 @@ In a memory usage test, a function is called only once. You can change that —
|
|
|
327
322
|
|
|
328
323
|
Of course, memory tests do not have to be very useful for functions that do not have to allocate too much memory, but as you will see in other documentation files in `perftester`, some function do use a lot of memory, and such tests do make quite a lot sense for them.
|
|
329
324
|
|
|
330
|
-
|
|
331
325
|
## Configuration: `pt.config`
|
|
332
326
|
|
|
333
327
|
The whole configuration is stored in the `pt.config` object, which you can easily change. Here's a short example of how you can use it:
|
|
@@ -365,7 +359,6 @@ and so on. You can also change settings in each testing file itself, preferably
|
|
|
365
359
|
|
|
366
360
|
When you use `perftester` in an interactive session, you update `pt.config` in a normal way, in the session. And when you use `perftester` inside `pytest`, you can do it in conftest.py and in each testing function.
|
|
367
361
|
|
|
368
|
-
|
|
369
362
|
## Output
|
|
370
363
|
|
|
371
364
|
If a test fails, you will see something like this:
|
|
@@ -396,7 +389,6 @@ You can locate where a particular test failed, using the module, `perftester_` f
|
|
|
396
389
|
|
|
397
390
|
> Like in `pytest`, a recommended approach is to use one performance test per `perftester_` function. This can save you some time and trouble, but also this will ensure that all tests will be run.
|
|
398
391
|
|
|
399
|
-
|
|
400
392
|
#### Summary output
|
|
401
393
|
|
|
402
394
|
At the end, you will see a simple summary of the results, something like this:
|
|
@@ -417,7 +409,6 @@ perftester_for_testing.perftester_f2_time_and_memory
|
|
|
417
409
|
perftester_for_testing.perftester_f_2
|
|
418
410
|
```
|
|
419
411
|
|
|
420
|
-
|
|
421
412
|
## Relative tests against another function
|
|
422
413
|
|
|
423
414
|
In the basic use, when you choose a relative benchmark, you compare the performance of your function with that of a built-in (empty) function `pt.config.benchmark_function()`. In most cases, this is what you need. Sometimes, however, you may wish to benchmark against another function. For instance, you may want to build your own function that does the same thing as a Python built-in function, and you want to test (and show) that your function performs better. There are two ways of achieving this:
|
|
@@ -425,7 +416,6 @@ In the basic use, when you choose a relative benchmark, you compare the performa
|
|
|
425
416
|
* you can use a simple trick; [see here](benchmarking_against_another_function.md);
|
|
426
417
|
* you can overwrite the built-in benchmark functions; [see here](change_benchmarking_function.md).
|
|
427
418
|
|
|
428
|
-
|
|
429
419
|
## Raw and relative performance testing
|
|
430
420
|
|
|
431
421
|
Surely, any performance tests are strongly environment-dependent, so you need to remember that when writing and conducting any performance tests. `perftester`, however, offers a solution to this: You can define tests based on
|
|
@@ -441,6 +431,99 @@ You can of course combine both types of tests, and you can do it in a very simpl
|
|
|
441
431
|
|
|
442
432
|
> Warning! Relative results can be different between operating systems.
|
|
443
433
|
|
|
434
|
+
## Tracing full memory usage
|
|
435
|
+
|
|
436
|
+
Currently, `perftester` contains a beta version (under heavy testing) of a new feature that can be used to trace full memory usage of a Python program.
|
|
437
|
+
|
|
438
|
+
> Warning: Backward compatibility of this feature is not guaranteed! It does not affect the main functionality of `perftester`, however, so its backward compatibility should be kept.
|
|
439
|
+
|
|
440
|
+
The feature works in the following way. When you import `perftester` — but you need to do it with `import perftester`, not via importing particular objects — you will be able to see new objects in the global space. One of the is `MEMLOGS`:
|
|
441
|
+
|
|
442
|
+
```python-repl
|
|
443
|
+
>>> import perftester
|
|
444
|
+
>>> MEMLOGS[0].ID
|
|
445
|
+
'perftester import'
|
|
446
|
+
|
|
447
|
+
```
|
|
448
|
+
|
|
449
|
+
It's an empty list for the moment. When you start tracing memory using `perftester`, this list will collect the subsequent measurements. You can measure them in two ways. One is via a `MEMPOINT()` function, and another via a `MEMTRACE` decorator. They, too, are in the global scope, so you can use them in any module inside a session in which `perftester` was already imported.
|
|
450
|
+
|
|
451
|
+
The `MEMLOGS` list will contain elements being instances of `MemLog`, which is a `functools.namedtuple `data type, with two attributes:`"ID"`and `"memory"`. This data type is imported with `perftester`, so if you want to use it, you can reach it as `perftester.MemLog`. You don't have to use it, though. Since it's a named tuple, you can treat it as a regular tuple.
|
|
452
|
+
|
|
453
|
+
#### What sort of memory is measured?
|
|
454
|
+
|
|
455
|
+
The feature uses `pympler.asizeof.asizeof(all=True)` to measure the size of all current gc objects, including module, global and stack frame objects, minus the size of `MEMLOGS`. The memory is measured in MB.
|
|
456
|
+
|
|
457
|
+
#### Using `MEMPOINT()`
|
|
458
|
+
|
|
459
|
+
`MEMPOINT()` creates a point of full-memory measurement. It will be appended into `MEMLOGS`.
|
|
460
|
+
|
|
461
|
+
```python-repl
|
|
462
|
+
3>>> import perftester
|
|
463
|
+
>>> def foo(n):
|
|
464
|
+
... x = [i for i in range(n)]
|
|
465
|
+
... MEMPOINT()
|
|
466
|
+
... return x
|
|
467
|
+
>>> _ = foo(100)
|
|
468
|
+
>>> _ = foo(1_000_000)
|
|
469
|
+
>>> len(MEMLOGS)
|
|
470
|
+
3
|
|
471
|
+
>>> MEMLOGS[2].memory > MEMLOGS[1].memory
|
|
472
|
+
True
|
|
473
|
+
|
|
474
|
+
```
|
|
475
|
+
|
|
476
|
+
The last tests checks whether the second measurement — that is, from the function with `n` of a million — uses more memory that the function using `n` of a hundred. Makes sense, and indeed the test passes.
|
|
477
|
+
|
|
478
|
+
When creating a point, you can use an ID, for instance, `MEMPOINT("from sth() function")`.
|
|
479
|
+
|
|
480
|
+
`MEMPOINT()` can be used to create a point anywhere inside the code. Nevertheless, if you want to trace memory for a function, you can use a `MEMTRACE` decorator:
|
|
481
|
+
|
|
482
|
+
```python-repl
|
|
483
|
+
>>> @MEMTRACE
|
|
484
|
+
... def bar(n):
|
|
485
|
+
... return [i for i in range(n)]
|
|
486
|
+
>>> _ = bar(1_000_000)
|
|
487
|
+
>>> MEMLOGS[-2].memory < MEMLOGS[-1].memory
|
|
488
|
+
True
|
|
489
|
+
|
|
490
|
+
```
|
|
491
|
+
|
|
492
|
+
The decorator creates two points: one right before running the test and another right after returning.
|
|
493
|
+
|
|
494
|
+
The last line tests whether memory before running the function is smaller than that after running it — and given so big `n`, it should be.
|
|
495
|
+
|
|
496
|
+
Look here:
|
|
497
|
+
|
|
498
|
+
```python-repl
|
|
499
|
+
>>> @MEMTRACE
|
|
500
|
+
... def bar(n):
|
|
501
|
+
... x = [i for i in range(n)]
|
|
502
|
+
... y = [i/3 for i in x]
|
|
503
|
+
... z = [i/3 for i in y]
|
|
504
|
+
... MEMPOINT("with x, y, z")
|
|
505
|
+
... del x
|
|
506
|
+
... MEMPOINT("without x")
|
|
507
|
+
... del y
|
|
508
|
+
... MEMPOINT("without x and y")
|
|
509
|
+
... del z
|
|
510
|
+
... MEMPOINT("without x and y and z")
|
|
511
|
+
... return
|
|
512
|
+
>>> _ = bar(100_000)
|
|
513
|
+
>>> MEMLOGS[-3].memory > MEMLOGS[-2].memory > MEMLOGS[-1].memory
|
|
514
|
+
True
|
|
515
|
+
|
|
516
|
+
```
|
|
517
|
+
|
|
518
|
+
### Print `MEMLOGS`
|
|
519
|
+
|
|
520
|
+
You can do whatever you want with `MEMLOGS`. However, when you want to see this object nicely printed, use the `MEMPRINT()` function, available from the global scope, too. You will see the results printed in a pretty way, with memory provided in MB.
|
|
521
|
+
|
|
522
|
+
### Why the global scope?
|
|
523
|
+
|
|
524
|
+
Since this feature of `perftester` is to be used to debug memory use from various modules, it'd be inconvinient to import the required objects in all these modules. That's why for the moment, the required objects are kept in the global scope — but this can change in future versions.
|
|
525
|
+
|
|
526
|
+
If you have any comments about this, please share them via Issues of the package's repository.
|
|
444
527
|
|
|
445
528
|
## Other tools
|
|
446
529
|
|
|
@@ -452,26 +535,22 @@ Of course, Python comes with various powerful tools for profiling, benchmarking
|
|
|
452
535
|
|
|
453
536
|
In fact, `perftester` is just a simple wrapper around `timeit` and `memory_profiler`, since `perftester` itself does not come with its own solutions. It simply uses these functions and offers an easy-to-use API to benchmark and test memory and time performance.
|
|
454
537
|
|
|
455
|
-
|
|
456
538
|
## Manipulating the traceback
|
|
457
539
|
|
|
458
540
|
The default behavior of `perftester` is to **not** include the full traceback when a test does not pass. This is because when running performance tests, you're not interested in finding bugs, and this is what traceback is for. Instead, you want to see which test did not pass and how.
|
|
459
541
|
|
|
460
542
|
> This behavior will not affect any other function than the two `perftester` testing functions: `pt.time_test()` and `pt.memory_usage_test()`. If you want to use this behavior for other functions, too, you can use `pt.config.cut_traceback()`; to reverse, use `pt.config.full_traceback()`.
|
|
461
543
|
|
|
462
|
-
|
|
463
544
|
## Caveats
|
|
464
545
|
|
|
465
546
|
* `perftester` does not work with multiple threads or processes.
|
|
466
547
|
* `perftester` is still in a beta version and so is still under testing.
|
|
467
548
|
* Watch out when you're running the same test in different operating systems. Even relative tests can differ from OS to OS.
|
|
468
549
|
|
|
469
|
-
|
|
470
550
|
## Operating systems
|
|
471
551
|
|
|
472
552
|
The package is developed in Linux (actually, under WSL) and checked in Windows 10, so it works in both these environments.
|
|
473
553
|
|
|
474
|
-
|
|
475
554
|
## Support
|
|
476
555
|
|
|
477
556
|
Any contribution will be welcome. You can submit an issue in the [repository](https://github.com/nyggus/perftester). You can also create your own pull requests.
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
perftester/__init__.py,sha256=hKwAZUH1tVe9lA82sIE6nxjhJCqB2AcoSk6hqDeNTA8,284
|
|
2
|
+
perftester/__main__.py,sha256=aX_J60lLY2yoz5jXtNoqTnAE_wm_s4llQHeR3z2Mx68,5119
|
|
3
|
+
perftester/perftester.py,sha256=Tb1_JtqcsWClgHFF87nUG59idg5haj8eYCeu-5WfOEM,33256
|
|
4
|
+
perftester/tmp.py,sha256=jqCDGCRO5fv7Uv7cJLu4PNmnaUMIApuu3nT_2zQwWqQ,1657
|
|
5
|
+
perftester/tmp_working.py,sha256=7ub5M6PFFfhSTCp_a-YQOZSFD05VNxa8Y7tDnEcBZzk,820
|
|
6
|
+
perftester/understand.py,sha256=H70Yjt3MPSB8rSjEi88Rwik15ZaVKn7OFBizC5H72NA,15670
|
|
7
|
+
perftester-0.6.0.dist-info/LICENSE,sha256=mZFAdfuYFAyBYiir4m3CTQu151mpXbMbh7Mm5M1bZAE,1063
|
|
8
|
+
perftester-0.6.0.dist-info/METADATA,sha256=ikxGNZAp9O2tNRcbugbN20mV-QgH48LSW02CR8Vwor4,28718
|
|
9
|
+
perftester-0.6.0.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92
|
|
10
|
+
perftester-0.6.0.dist-info/entry_points.txt,sha256=gM6Vf1BEeLY-1X9IQlO1TPAO3lJ5vToKfnJHT4MruIk,57
|
|
11
|
+
perftester-0.6.0.dist-info/top_level.txt,sha256=i1-4oWlkta2MsNKlZwJCibhn7aBexQfxncoPy2a6dfA,11
|
|
12
|
+
perftester-0.6.0.dist-info/RECORD,,
|
|
@@ -1,12 +0,0 @@
|
|
|
1
|
-
perftester/__init__.py,sha256=x9O2UW8vfTuF7uKzPUeZ397FEzjcv42X2H26je9cyOI,273
|
|
2
|
-
perftester/__main__.py,sha256=aX_J60lLY2yoz5jXtNoqTnAE_wm_s4llQHeR3z2Mx68,5119
|
|
3
|
-
perftester/perftester.py,sha256=q8fj5hl0vW5rmB5FmdVqKZ5Vk43SkMgsC8tLX8m77Gw,31559
|
|
4
|
-
perftester/tmp.py,sha256=jqCDGCRO5fv7Uv7cJLu4PNmnaUMIApuu3nT_2zQwWqQ,1657
|
|
5
|
-
perftester/tmp_working.py,sha256=7ub5M6PFFfhSTCp_a-YQOZSFD05VNxa8Y7tDnEcBZzk,820
|
|
6
|
-
perftester/understand.py,sha256=H70Yjt3MPSB8rSjEi88Rwik15ZaVKn7OFBizC5H72NA,15670
|
|
7
|
-
perftester-0.5.1.dist-info/LICENSE,sha256=mZFAdfuYFAyBYiir4m3CTQu151mpXbMbh7Mm5M1bZAE,1063
|
|
8
|
-
perftester-0.5.1.dist-info/METADATA,sha256=gieG4jHDVbnwmfPwh_RB8jRlWnHPvjtiQ06SmfP1lgM,24670
|
|
9
|
-
perftester-0.5.1.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92
|
|
10
|
-
perftester-0.5.1.dist-info/entry_points.txt,sha256=gM6Vf1BEeLY-1X9IQlO1TPAO3lJ5vToKfnJHT4MruIk,57
|
|
11
|
-
perftester-0.5.1.dist-info/top_level.txt,sha256=i1-4oWlkta2MsNKlZwJCibhn7aBexQfxncoPy2a6dfA,11
|
|
12
|
-
perftester-0.5.1.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|