pytest-split 0.9.0__tar.gz → 0.11.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {pytest_split-0.9.0 → pytest_split-0.11.0}/PKG-INFO +9 -8
- {pytest_split-0.9.0 → pytest_split-0.11.0}/README.md +1 -1
- {pytest_split-0.9.0 → pytest_split-0.11.0}/pyproject.toml +8 -8
- pytest_split-0.11.0/src/pytest_split/algorithms.py +186 -0
- {pytest_split-0.9.0 → pytest_split-0.11.0}/src/pytest_split/cli.py +1 -5
- {pytest_split-0.9.0 → pytest_split-0.11.0}/src/pytest_split/ipynb_compatibility.py +2 -4
- {pytest_split-0.9.0 → pytest_split-0.11.0}/src/pytest_split/plugin.py +3 -5
- pytest_split-0.9.0/src/pytest_split/algorithms.py +0 -162
- {pytest_split-0.9.0 → pytest_split-0.11.0}/LICENSE +0 -0
- {pytest_split-0.9.0 → pytest_split-0.11.0}/src/pytest_split/__init__.py +0 -0
- {pytest_split-0.9.0 → pytest_split-0.11.0}/src/pytest_split/py.typed +0 -0
|
@@ -1,28 +1,29 @@
|
|
|
1
|
-
Metadata-Version: 2.
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
2
|
Name: pytest-split
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.11.0
|
|
4
4
|
Summary: Pytest plugin which splits the test suite to equally sized sub suites based on test execution time.
|
|
5
|
-
Home-page: https://jerry-git.github.io/pytest-split
|
|
6
5
|
License: MIT
|
|
6
|
+
License-File: LICENSE
|
|
7
7
|
Keywords: pytest,plugin,split,tests
|
|
8
8
|
Author: Jerry Pussinen
|
|
9
9
|
Author-email: jerry.pussinen@gmail.com
|
|
10
|
-
Requires-Python: >=3.
|
|
10
|
+
Requires-Python: >=3.10,<4.0
|
|
11
11
|
Classifier: Development Status :: 4 - Beta
|
|
12
12
|
Classifier: Intended Audience :: Developers
|
|
13
13
|
Classifier: License :: OSI Approved :: MIT License
|
|
14
14
|
Classifier: Operating System :: OS Independent
|
|
15
15
|
Classifier: Programming Language :: Python
|
|
16
16
|
Classifier: Programming Language :: Python :: 3
|
|
17
|
-
Classifier: Programming Language :: Python :: 3.9
|
|
18
17
|
Classifier: Programming Language :: Python :: 3.10
|
|
19
18
|
Classifier: Programming Language :: Python :: 3.11
|
|
20
19
|
Classifier: Programming Language :: Python :: 3.12
|
|
21
|
-
Classifier: Programming Language :: Python :: 3.
|
|
20
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
21
|
+
Classifier: Programming Language :: Python :: 3.14
|
|
22
22
|
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
23
23
|
Classifier: Typing :: Typed
|
|
24
|
-
Requires-Dist: pytest (>=5,<
|
|
24
|
+
Requires-Dist: pytest (>=5,<10)
|
|
25
25
|
Project-URL: Documentation, https://jerry-git.github.io/pytest-split
|
|
26
|
+
Project-URL: Homepage, https://jerry-git.github.io/pytest-split
|
|
26
27
|
Project-URL: Repository, https://github.com/jerry-git/pytest-split
|
|
27
28
|
Description-Content-Type: text/markdown
|
|
28
29
|
|
|
@@ -124,7 +125,7 @@ The `least_duration` algorithm walks the list of tests and assigns each test to
|
|
|
124
125
|
* Clone this repository
|
|
125
126
|
* Requirements:
|
|
126
127
|
* [Poetry](https://python-poetry.org/)
|
|
127
|
-
* Python 3.
|
|
128
|
+
* Python 3.10+
|
|
128
129
|
* Create a virtual environment and install the dependencies
|
|
129
130
|
|
|
130
131
|
```sh
|
|
@@ -96,7 +96,7 @@ The `least_duration` algorithm walks the list of tests and assigns each test to
|
|
|
96
96
|
* Clone this repository
|
|
97
97
|
* Requirements:
|
|
98
98
|
* [Poetry](https://python-poetry.org/)
|
|
99
|
-
* Python 3.
|
|
99
|
+
* Python 3.10+
|
|
100
100
|
* Create a virtual environment and install the dependencies
|
|
101
101
|
|
|
102
102
|
```sh
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
[tool.poetry]
|
|
2
2
|
name = "pytest-split"
|
|
3
|
-
version = "0.
|
|
3
|
+
version = "0.11.0"
|
|
4
4
|
description = "Pytest plugin which splits the test suite to equally sized sub suites based on test execution time."
|
|
5
5
|
authors = [
|
|
6
6
|
"Jerry Pussinen <jerry.pussinen@gmail.com>",
|
|
@@ -18,11 +18,11 @@ classifiers = [
|
|
|
18
18
|
"Operating System :: OS Independent",
|
|
19
19
|
"Programming Language :: Python",
|
|
20
20
|
"Programming Language :: Python :: 3",
|
|
21
|
-
"Programming Language :: Python :: 3.8",
|
|
22
|
-
"Programming Language :: Python :: 3.9",
|
|
23
21
|
"Programming Language :: Python :: 3.10",
|
|
24
22
|
"Programming Language :: Python :: 3.11",
|
|
25
23
|
"Programming Language :: Python :: 3.12",
|
|
24
|
+
"Programming Language :: Python :: 3.13",
|
|
25
|
+
"Programming Language :: Python :: 3.14",
|
|
26
26
|
"Topic :: Software Development :: Libraries :: Python Modules",
|
|
27
27
|
"Typing :: Typed",
|
|
28
28
|
]
|
|
@@ -32,11 +32,11 @@ packages = [{ include = 'pytest_split', from = 'src' }]
|
|
|
32
32
|
|
|
33
33
|
|
|
34
34
|
[tool.poetry.dependencies]
|
|
35
|
-
python = ">=3.
|
|
36
|
-
pytest = "^5 | ^6 | ^7 | ^8"
|
|
35
|
+
python = ">=3.10, <4.0"
|
|
36
|
+
pytest = "^5 | ^6 | ^7 | ^8 | ^9"
|
|
37
37
|
|
|
38
38
|
|
|
39
|
-
[tool.poetry.dev
|
|
39
|
+
[tool.poetry.group.dev.dependencies]
|
|
40
40
|
importlib-metadata = "==4.11.*"
|
|
41
41
|
mkdocstrings = {version = ">=0.18", extras = ["python"]}
|
|
42
42
|
mkdocs-material = "*"
|
|
@@ -60,7 +60,7 @@ slowest-tests = "pytest_split.cli:list_slowest_tests"
|
|
|
60
60
|
pytest-split = "pytest_split.plugin"
|
|
61
61
|
|
|
62
62
|
[tool.black]
|
|
63
|
-
target-version = ["
|
|
63
|
+
target-version = ["py310", "py311", "py312", "py313", "py314"]
|
|
64
64
|
include = '\.pyi?$'
|
|
65
65
|
|
|
66
66
|
[tool.pytest.ini_options]
|
|
@@ -92,7 +92,7 @@ disallow_untyped_calls = false
|
|
|
92
92
|
|
|
93
93
|
|
|
94
94
|
[tool.ruff]
|
|
95
|
-
target-version = "
|
|
95
|
+
target-version = "py310" # The lowest supported version
|
|
96
96
|
|
|
97
97
|
[tool.ruff.lint]
|
|
98
98
|
# By default, enable all the lint rules.
|
|
@@ -0,0 +1,186 @@
|
|
|
1
|
+
import enum
|
|
2
|
+
import heapq
|
|
3
|
+
from abc import ABC, abstractmethod
|
|
4
|
+
from operator import itemgetter
|
|
5
|
+
from typing import TYPE_CHECKING, NamedTuple
|
|
6
|
+
|
|
7
|
+
if TYPE_CHECKING:
|
|
8
|
+
from _pytest import nodes
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class TestGroup(NamedTuple):
|
|
12
|
+
selected: "list[nodes.Item]"
|
|
13
|
+
deselected: "list[nodes.Item]"
|
|
14
|
+
duration: float
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class AlgorithmBase(ABC):
|
|
18
|
+
"""Abstract base class for the algorithm implementations."""
|
|
19
|
+
|
|
20
|
+
@abstractmethod
|
|
21
|
+
def __call__(
|
|
22
|
+
self, splits: int, items: "list[nodes.Item]", durations: "dict[str, float]"
|
|
23
|
+
) -> "list[TestGroup]":
|
|
24
|
+
pass
|
|
25
|
+
|
|
26
|
+
def __hash__(self) -> int:
|
|
27
|
+
return hash(self.__class__.__name__)
|
|
28
|
+
|
|
29
|
+
def __eq__(self, other: object) -> bool:
|
|
30
|
+
if not isinstance(other, AlgorithmBase):
|
|
31
|
+
return NotImplemented
|
|
32
|
+
return self.__class__.__name__ == other.__class__.__name__
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
class LeastDurationAlgorithm(AlgorithmBase):
|
|
36
|
+
"""
|
|
37
|
+
Split tests into groups by runtime.
|
|
38
|
+
It walks the test items, starting with the test with largest duration.
|
|
39
|
+
It assigns the test with the largest runtime to the group with the smallest duration sum.
|
|
40
|
+
|
|
41
|
+
The algorithm sorts the items by their duration. Since the sorting algorithm is stable, ties will be broken by
|
|
42
|
+
maintaining the original order of items. It is therefore important that the order of items be identical on all nodes
|
|
43
|
+
that use this plugin. Due to issue #25 this might not always be the case.
|
|
44
|
+
|
|
45
|
+
:param splits: How many groups we're splitting in.
|
|
46
|
+
:param items: Test items passed down by Pytest.
|
|
47
|
+
:param durations: Our cached test runtimes. Assumes contains timings only of relevant tests
|
|
48
|
+
:return:
|
|
49
|
+
List of groups
|
|
50
|
+
"""
|
|
51
|
+
|
|
52
|
+
def __call__(
|
|
53
|
+
self, splits: int, items: "list[nodes.Item]", durations: "dict[str, float]"
|
|
54
|
+
) -> "list[TestGroup]":
|
|
55
|
+
items_with_durations = _get_items_with_durations(items, durations)
|
|
56
|
+
|
|
57
|
+
# add index of item in list
|
|
58
|
+
items_with_durations_indexed = [
|
|
59
|
+
(*tup, i) for i, tup in enumerate(items_with_durations)
|
|
60
|
+
]
|
|
61
|
+
|
|
62
|
+
# Sort by name to ensure it's always the same order
|
|
63
|
+
items_with_durations_indexed = sorted(
|
|
64
|
+
items_with_durations_indexed, key=lambda tup: str(tup[0])
|
|
65
|
+
)
|
|
66
|
+
|
|
67
|
+
# sort in ascending order
|
|
68
|
+
sorted_items_with_durations = sorted(
|
|
69
|
+
items_with_durations_indexed, key=lambda tup: tup[1], reverse=True
|
|
70
|
+
)
|
|
71
|
+
|
|
72
|
+
selected: list[list[tuple[nodes.Item, int]]] = [[] for _ in range(splits)]
|
|
73
|
+
deselected: list[list[nodes.Item]] = [[] for _ in range(splits)]
|
|
74
|
+
duration: list[float] = [0 for _ in range(splits)]
|
|
75
|
+
|
|
76
|
+
# create a heap of the form (summed_durations, group_index)
|
|
77
|
+
heap: list[tuple[float, int]] = [(0, i) for i in range(splits)]
|
|
78
|
+
heapq.heapify(heap)
|
|
79
|
+
for item, item_duration, original_index in sorted_items_with_durations:
|
|
80
|
+
# get group with smallest sum
|
|
81
|
+
summed_durations, group_idx = heapq.heappop(heap)
|
|
82
|
+
new_group_durations = summed_durations + item_duration
|
|
83
|
+
|
|
84
|
+
# store assignment
|
|
85
|
+
selected[group_idx].append((item, original_index))
|
|
86
|
+
duration[group_idx] = new_group_durations
|
|
87
|
+
for i in range(splits):
|
|
88
|
+
if i != group_idx:
|
|
89
|
+
deselected[i].append(item)
|
|
90
|
+
|
|
91
|
+
# store new duration - in case of ties it sorts by the group_idx
|
|
92
|
+
heapq.heappush(heap, (new_group_durations, group_idx))
|
|
93
|
+
|
|
94
|
+
groups = []
|
|
95
|
+
for i in range(splits):
|
|
96
|
+
# sort the items by their original index to maintain relative ordering
|
|
97
|
+
# we don't care about the order of deselected items
|
|
98
|
+
s = [
|
|
99
|
+
item
|
|
100
|
+
for item, original_index in sorted(selected[i], key=lambda tup: tup[1])
|
|
101
|
+
]
|
|
102
|
+
group = TestGroup(
|
|
103
|
+
selected=s, deselected=deselected[i], duration=duration[i]
|
|
104
|
+
)
|
|
105
|
+
groups.append(group)
|
|
106
|
+
return groups
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
class DurationBasedChunksAlgorithm(AlgorithmBase):
|
|
110
|
+
"""
|
|
111
|
+
Split tests into groups by runtime.
|
|
112
|
+
Ensures tests are split into non-overlapping groups.
|
|
113
|
+
The original list of test items is split into groups by finding boundary indices i_0, i_1, i_2
|
|
114
|
+
and creating group_1 = items[0:i_0], group_2 = items[i_0, i_1], group_3 = items[i_1, i_2], ...
|
|
115
|
+
|
|
116
|
+
:param splits: How many groups we're splitting in.
|
|
117
|
+
:param items: Test items passed down by Pytest.
|
|
118
|
+
:param durations: Our cached test runtimes. Assumes contains timings only of relevant tests
|
|
119
|
+
:return: List of TestGroup
|
|
120
|
+
"""
|
|
121
|
+
|
|
122
|
+
def __call__(
|
|
123
|
+
self, splits: int, items: "list[nodes.Item]", durations: "dict[str, float]"
|
|
124
|
+
) -> "list[TestGroup]":
|
|
125
|
+
items_with_durations = _get_items_with_durations(items, durations)
|
|
126
|
+
time_per_group = sum(map(itemgetter(1), items_with_durations)) / splits
|
|
127
|
+
|
|
128
|
+
selected: list[list[nodes.Item]] = [[] for i in range(splits)]
|
|
129
|
+
deselected: list[list[nodes.Item]] = [[] for i in range(splits)]
|
|
130
|
+
duration: list[float] = [0 for i in range(splits)]
|
|
131
|
+
|
|
132
|
+
group_idx = 0
|
|
133
|
+
for item, item_duration in items_with_durations:
|
|
134
|
+
if duration[group_idx] >= time_per_group:
|
|
135
|
+
group_idx += 1
|
|
136
|
+
|
|
137
|
+
selected[group_idx].append(item)
|
|
138
|
+
for i in range(splits):
|
|
139
|
+
if i != group_idx:
|
|
140
|
+
deselected[i].append(item)
|
|
141
|
+
duration[group_idx] += item_duration
|
|
142
|
+
|
|
143
|
+
return [
|
|
144
|
+
TestGroup(
|
|
145
|
+
selected=selected[i], deselected=deselected[i], duration=duration[i]
|
|
146
|
+
)
|
|
147
|
+
for i in range(splits)
|
|
148
|
+
]
|
|
149
|
+
|
|
150
|
+
|
|
151
|
+
def _get_items_with_durations(
|
|
152
|
+
items: "list[nodes.Item]", durations: "dict[str, float]"
|
|
153
|
+
) -> "list[tuple[nodes.Item, float]]":
|
|
154
|
+
durations = _remove_irrelevant_durations(items, durations)
|
|
155
|
+
avg_duration_per_test = _get_avg_duration_per_test(durations)
|
|
156
|
+
items_with_durations = [
|
|
157
|
+
(item, durations.get(item.nodeid, avg_duration_per_test)) for item in items
|
|
158
|
+
]
|
|
159
|
+
return items_with_durations
|
|
160
|
+
|
|
161
|
+
|
|
162
|
+
def _get_avg_duration_per_test(durations: "dict[str, float]") -> float:
|
|
163
|
+
if durations:
|
|
164
|
+
avg_duration_per_test = sum(durations.values()) / len(durations)
|
|
165
|
+
else:
|
|
166
|
+
# If there are no durations, give every test the same arbitrary value
|
|
167
|
+
avg_duration_per_test = 1
|
|
168
|
+
return avg_duration_per_test
|
|
169
|
+
|
|
170
|
+
|
|
171
|
+
def _remove_irrelevant_durations(
|
|
172
|
+
items: "list[nodes.Item]", durations: "dict[str, float]"
|
|
173
|
+
) -> "dict[str, float]":
|
|
174
|
+
# Filtering down durations to relevant ones ensures the avg isn't skewed by irrelevant data
|
|
175
|
+
test_ids = [item.nodeid for item in items]
|
|
176
|
+
durations = {name: durations[name] for name in test_ids if name in durations}
|
|
177
|
+
return durations
|
|
178
|
+
|
|
179
|
+
|
|
180
|
+
class Algorithms(enum.Enum):
|
|
181
|
+
duration_based_chunks = DurationBasedChunksAlgorithm()
|
|
182
|
+
least_duration = LeastDurationAlgorithm()
|
|
183
|
+
|
|
184
|
+
@staticmethod
|
|
185
|
+
def names() -> "list[str]":
|
|
186
|
+
return [x.name for x in Algorithms]
|
|
@@ -1,9 +1,5 @@
|
|
|
1
1
|
import argparse
|
|
2
2
|
import json
|
|
3
|
-
from typing import TYPE_CHECKING
|
|
4
|
-
|
|
5
|
-
if TYPE_CHECKING:
|
|
6
|
-
from typing import Dict
|
|
7
3
|
|
|
8
4
|
|
|
9
5
|
def list_slowest_tests() -> None:
|
|
@@ -28,7 +24,7 @@ def list_slowest_tests() -> None:
|
|
|
28
24
|
return _list_slowest_tests(json.load(args.durations_path), args.count)
|
|
29
25
|
|
|
30
26
|
|
|
31
|
-
def _list_slowest_tests(durations: "
|
|
27
|
+
def _list_slowest_tests(durations: "dict[str, float]", count: int) -> None:
|
|
32
28
|
slowest_tests = tuple(
|
|
33
29
|
sorted(durations.items(), key=lambda item: item[1], reverse=True)
|
|
34
30
|
)[:count]
|
|
@@ -1,8 +1,6 @@
|
|
|
1
1
|
from typing import TYPE_CHECKING
|
|
2
2
|
|
|
3
3
|
if TYPE_CHECKING:
|
|
4
|
-
from typing import List
|
|
5
|
-
|
|
6
4
|
from pytest_split.algorithms import TestGroup
|
|
7
5
|
|
|
8
6
|
|
|
@@ -45,8 +43,8 @@ def ensure_ipynb_compatibility(group: "TestGroup", items: list) -> None: # type
|
|
|
45
43
|
|
|
46
44
|
|
|
47
45
|
def _find_sibiling_ipynb_cells(
|
|
48
|
-
ipynb_node_id: str, item_node_ids: "
|
|
49
|
-
) -> "
|
|
46
|
+
ipynb_node_id: str, item_node_ids: "list[str]"
|
|
47
|
+
) -> "list[str]":
|
|
50
48
|
"""
|
|
51
49
|
Returns all sibling IPyNb cells given an IPyNb cell nodeid.
|
|
52
50
|
"""
|
|
@@ -10,8 +10,6 @@ from pytest_split import algorithms
|
|
|
10
10
|
from pytest_split.ipynb_compatibility import ensure_ipynb_compatibility
|
|
11
11
|
|
|
12
12
|
if TYPE_CHECKING:
|
|
13
|
-
from typing import Dict, List, Optional, Union
|
|
14
|
-
|
|
15
13
|
from _pytest import nodes
|
|
16
14
|
from _pytest.config import Config
|
|
17
15
|
from _pytest.config.argparsing import Parser
|
|
@@ -77,7 +75,7 @@ def pytest_addoption(parser: "Parser") -> None:
|
|
|
77
75
|
|
|
78
76
|
|
|
79
77
|
@pytest.hookimpl(tryfirst=True)
|
|
80
|
-
def pytest_cmdline_main(config: "Config") -> "
|
|
78
|
+
def pytest_cmdline_main(config: "Config") -> "int | ExitCode | None":
|
|
81
79
|
"""
|
|
82
80
|
Validate options.
|
|
83
81
|
"""
|
|
@@ -153,7 +151,7 @@ class PytestSplitPlugin(Base):
|
|
|
153
151
|
|
|
154
152
|
@hookimpl(trylast=True)
|
|
155
153
|
def pytest_collection_modifyitems(
|
|
156
|
-
self, config: "Config", items: "
|
|
154
|
+
self, config: "Config", items: "list[nodes.Item]"
|
|
157
155
|
) -> None:
|
|
158
156
|
"""
|
|
159
157
|
Collect and select the tests we want to run, and deselect the rest.
|
|
@@ -193,7 +191,7 @@ class PytestSplitCachePlugin(Base):
|
|
|
193
191
|
https://github.com/pytest-dev/pytest/blob/main/src/_pytest/main.py#L308
|
|
194
192
|
"""
|
|
195
193
|
terminal_reporter = self.config.pluginmanager.get_plugin("terminalreporter")
|
|
196
|
-
test_durations:
|
|
194
|
+
test_durations: dict[str, float] = {}
|
|
197
195
|
|
|
198
196
|
for test_reports in terminal_reporter.stats.values(): # type: ignore[union-attr]
|
|
199
197
|
for test_report in test_reports:
|
|
@@ -1,162 +0,0 @@
|
|
|
1
|
-
import enum
|
|
2
|
-
import functools
|
|
3
|
-
import heapq
|
|
4
|
-
from operator import itemgetter
|
|
5
|
-
from typing import TYPE_CHECKING, NamedTuple
|
|
6
|
-
|
|
7
|
-
if TYPE_CHECKING:
|
|
8
|
-
from typing import Dict, List, Tuple
|
|
9
|
-
|
|
10
|
-
from _pytest import nodes
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
class TestGroup(NamedTuple):
|
|
14
|
-
selected: "List[nodes.Item]"
|
|
15
|
-
deselected: "List[nodes.Item]"
|
|
16
|
-
duration: float
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
def least_duration(
|
|
20
|
-
splits: int, items: "List[nodes.Item]", durations: "Dict[str, float]"
|
|
21
|
-
) -> "List[TestGroup]":
|
|
22
|
-
"""
|
|
23
|
-
Split tests into groups by runtime.
|
|
24
|
-
It walks the test items, starting with the test with largest duration.
|
|
25
|
-
It assigns the test with the largest runtime to the group with the smallest duration sum.
|
|
26
|
-
|
|
27
|
-
The algorithm sorts the items by their duration. Since the sorting algorithm is stable, ties will be broken by
|
|
28
|
-
maintaining the original order of items. It is therefore important that the order of items be identical on all nodes
|
|
29
|
-
that use this plugin. Due to issue #25 this might not always be the case.
|
|
30
|
-
|
|
31
|
-
:param splits: How many groups we're splitting in.
|
|
32
|
-
:param items: Test items passed down by Pytest.
|
|
33
|
-
:param durations: Our cached test runtimes. Assumes contains timings only of relevant tests
|
|
34
|
-
:return:
|
|
35
|
-
List of groups
|
|
36
|
-
"""
|
|
37
|
-
items_with_durations = _get_items_with_durations(items, durations)
|
|
38
|
-
|
|
39
|
-
# add index of item in list
|
|
40
|
-
items_with_durations_indexed = [
|
|
41
|
-
(*tup, i) for i, tup in enumerate(items_with_durations)
|
|
42
|
-
]
|
|
43
|
-
|
|
44
|
-
# Sort by name to ensure it's always the same order
|
|
45
|
-
items_with_durations_indexed = sorted(
|
|
46
|
-
items_with_durations_indexed, key=lambda tup: str(tup[0])
|
|
47
|
-
)
|
|
48
|
-
|
|
49
|
-
# sort in ascending order
|
|
50
|
-
sorted_items_with_durations = sorted(
|
|
51
|
-
items_with_durations_indexed, key=lambda tup: tup[1], reverse=True
|
|
52
|
-
)
|
|
53
|
-
|
|
54
|
-
selected: List[List[Tuple[nodes.Item, int]]] = [[] for _ in range(splits)]
|
|
55
|
-
deselected: List[List[nodes.Item]] = [[] for _ in range(splits)]
|
|
56
|
-
duration: List[float] = [0 for _ in range(splits)]
|
|
57
|
-
|
|
58
|
-
# create a heap of the form (summed_durations, group_index)
|
|
59
|
-
heap: List[Tuple[float, int]] = [(0, i) for i in range(splits)]
|
|
60
|
-
heapq.heapify(heap)
|
|
61
|
-
for item, item_duration, original_index in sorted_items_with_durations:
|
|
62
|
-
# get group with smallest sum
|
|
63
|
-
summed_durations, group_idx = heapq.heappop(heap)
|
|
64
|
-
new_group_durations = summed_durations + item_duration
|
|
65
|
-
|
|
66
|
-
# store assignment
|
|
67
|
-
selected[group_idx].append((item, original_index))
|
|
68
|
-
duration[group_idx] = new_group_durations
|
|
69
|
-
for i in range(splits):
|
|
70
|
-
if i != group_idx:
|
|
71
|
-
deselected[i].append(item)
|
|
72
|
-
|
|
73
|
-
# store new duration - in case of ties it sorts by the group_idx
|
|
74
|
-
heapq.heappush(heap, (new_group_durations, group_idx))
|
|
75
|
-
|
|
76
|
-
groups = []
|
|
77
|
-
for i in range(splits):
|
|
78
|
-
# sort the items by their original index to maintain relative ordering
|
|
79
|
-
# we don't care about the order of deselected items
|
|
80
|
-
s = [
|
|
81
|
-
item for item, original_index in sorted(selected[i], key=lambda tup: tup[1])
|
|
82
|
-
]
|
|
83
|
-
group = TestGroup(selected=s, deselected=deselected[i], duration=duration[i])
|
|
84
|
-
groups.append(group)
|
|
85
|
-
return groups
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
def duration_based_chunks(
|
|
89
|
-
splits: int, items: "List[nodes.Item]", durations: "Dict[str, float]"
|
|
90
|
-
) -> "List[TestGroup]":
|
|
91
|
-
"""
|
|
92
|
-
Split tests into groups by runtime.
|
|
93
|
-
Ensures tests are split into non-overlapping groups.
|
|
94
|
-
The original list of test items is split into groups by finding boundary indices i_0, i_1, i_2
|
|
95
|
-
and creating group_1 = items[0:i_0], group_2 = items[i_0, i_1], group_3 = items[i_1, i_2], ...
|
|
96
|
-
|
|
97
|
-
:param splits: How many groups we're splitting in.
|
|
98
|
-
:param items: Test items passed down by Pytest.
|
|
99
|
-
:param durations: Our cached test runtimes. Assumes contains timings only of relevant tests
|
|
100
|
-
:return: List of TestGroup
|
|
101
|
-
"""
|
|
102
|
-
items_with_durations = _get_items_with_durations(items, durations)
|
|
103
|
-
time_per_group = sum(map(itemgetter(1), items_with_durations)) / splits
|
|
104
|
-
|
|
105
|
-
selected: List[List[nodes.Item]] = [[] for i in range(splits)]
|
|
106
|
-
deselected: List[List[nodes.Item]] = [[] for i in range(splits)]
|
|
107
|
-
duration: List[float] = [0 for i in range(splits)]
|
|
108
|
-
|
|
109
|
-
group_idx = 0
|
|
110
|
-
for item, item_duration in items_with_durations:
|
|
111
|
-
if duration[group_idx] >= time_per_group:
|
|
112
|
-
group_idx += 1
|
|
113
|
-
|
|
114
|
-
selected[group_idx].append(item)
|
|
115
|
-
for i in range(splits):
|
|
116
|
-
if i != group_idx:
|
|
117
|
-
deselected[i].append(item)
|
|
118
|
-
duration[group_idx] += item_duration
|
|
119
|
-
|
|
120
|
-
return [
|
|
121
|
-
TestGroup(selected=selected[i], deselected=deselected[i], duration=duration[i])
|
|
122
|
-
for i in range(splits)
|
|
123
|
-
]
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
def _get_items_with_durations(
|
|
127
|
-
items: "List[nodes.Item]", durations: "Dict[str, float]"
|
|
128
|
-
) -> "List[Tuple[nodes.Item, float]]":
|
|
129
|
-
durations = _remove_irrelevant_durations(items, durations)
|
|
130
|
-
avg_duration_per_test = _get_avg_duration_per_test(durations)
|
|
131
|
-
items_with_durations = [
|
|
132
|
-
(item, durations.get(item.nodeid, avg_duration_per_test)) for item in items
|
|
133
|
-
]
|
|
134
|
-
return items_with_durations
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
def _get_avg_duration_per_test(durations: "Dict[str, float]") -> float:
|
|
138
|
-
if durations:
|
|
139
|
-
avg_duration_per_test = sum(durations.values()) / len(durations)
|
|
140
|
-
else:
|
|
141
|
-
# If there are no durations, give every test the same arbitrary value
|
|
142
|
-
avg_duration_per_test = 1
|
|
143
|
-
return avg_duration_per_test
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
def _remove_irrelevant_durations(
|
|
147
|
-
items: "List[nodes.Item]", durations: "Dict[str, float]"
|
|
148
|
-
) -> "Dict[str, float]":
|
|
149
|
-
# Filtering down durations to relevant ones ensures the avg isn't skewed by irrelevant data
|
|
150
|
-
test_ids = [item.nodeid for item in items]
|
|
151
|
-
durations = {name: durations[name] for name in test_ids if name in durations}
|
|
152
|
-
return durations
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
class Algorithms(enum.Enum):
|
|
156
|
-
# values have to wrapped inside functools to avoid them being considered method definitions
|
|
157
|
-
duration_based_chunks = functools.partial(duration_based_chunks)
|
|
158
|
-
least_duration = functools.partial(least_duration)
|
|
159
|
-
|
|
160
|
-
@staticmethod
|
|
161
|
-
def names() -> "List[str]":
|
|
162
|
-
return [x.name for x in Algorithms]
|
|
File without changes
|
|
File without changes
|
|
File without changes
|