pytest-plugins 0.1.2__tar.gz → 0.1.4__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {pytest_plugins-0.1.2/pytest_plugins.egg-info → pytest_plugins-0.1.4}/PKG-INFO +6 -11
- {pytest_plugins-0.1.2 → pytest_plugins-0.1.4}/README.md +5 -10
- {pytest_plugins-0.1.2 → pytest_plugins-0.1.4}/pytest_plugins/add_better_report.py +47 -31
- {pytest_plugins-0.1.2 → pytest_plugins-0.1.4}/pytest_plugins/fail2skip.py +5 -4
- {pytest_plugins-0.1.2 → pytest_plugins-0.1.4}/pytest_plugins/max_fail_streak.py +16 -20
- {pytest_plugins-0.1.2 → pytest_plugins-0.1.4/pytest_plugins.egg-info}/PKG-INFO +6 -11
- {pytest_plugins-0.1.2 → pytest_plugins-0.1.4}/setup.py +1 -1
- {pytest_plugins-0.1.2 → pytest_plugins-0.1.4}/LICENSE +0 -0
- {pytest_plugins-0.1.2 → pytest_plugins-0.1.4}/MANIFEST.in +0 -0
- {pytest_plugins-0.1.2 → pytest_plugins-0.1.4}/pyproject.toml +0 -0
- {pytest_plugins-0.1.2 → pytest_plugins-0.1.4}/pytest_plugins/__init__.py +0 -0
- {pytest_plugins-0.1.2 → pytest_plugins-0.1.4}/pytest_plugins/helper.py +0 -0
- {pytest_plugins-0.1.2 → pytest_plugins-0.1.4}/pytest_plugins/models/__init__.py +0 -0
- {pytest_plugins-0.1.2 → pytest_plugins-0.1.4}/pytest_plugins/models/base_class_test.py +0 -0
- {pytest_plugins-0.1.2 → pytest_plugins-0.1.4}/pytest_plugins/models/execution_data.py +0 -0
- {pytest_plugins-0.1.2 → pytest_plugins-0.1.4}/pytest_plugins/models/status.py +0 -0
- {pytest_plugins-0.1.2 → pytest_plugins-0.1.4}/pytest_plugins/models/test_data.py +0 -0
- {pytest_plugins-0.1.2 → pytest_plugins-0.1.4}/pytest_plugins/pytest_helper.py +0 -0
- {pytest_plugins-0.1.2 → pytest_plugins-0.1.4}/pytest_plugins.egg-info/SOURCES.txt +0 -0
- {pytest_plugins-0.1.2 → pytest_plugins-0.1.4}/pytest_plugins.egg-info/dependency_links.txt +0 -0
- {pytest_plugins-0.1.2 → pytest_plugins-0.1.4}/pytest_plugins.egg-info/entry_points.txt +0 -0
- {pytest_plugins-0.1.2 → pytest_plugins-0.1.4}/pytest_plugins.egg-info/requires.txt +0 -0
- {pytest_plugins-0.1.2 → pytest_plugins-0.1.4}/pytest_plugins.egg-info/top_level.txt +0 -0
- {pytest_plugins-0.1.2 → pytest_plugins-0.1.4}/requirements.txt +0 -0
- {pytest_plugins-0.1.2 → pytest_plugins-0.1.4}/setup.cfg +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pytest-plugins
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.4
|
|
4
4
|
Summary: A Python package for managing pytest plugins.
|
|
5
5
|
Home-page: https://github.com/aviz92/pytest-plugins
|
|
6
6
|
Author: Avi Zaguri
|
|
@@ -35,9 +35,10 @@ It includes improved `conftest.py` fixtures, automated test result reporting, de
|
|
|
35
35
|
|
|
36
36
|
---
|
|
37
37
|
|
|
38
|
-
## Features
|
|
38
|
+
## 🚀 Features
|
|
39
39
|
- ✅ **`pytest-better-report`**: Enhanced test result tracking and structured JSON reporting.
|
|
40
40
|
- ✅ **`pytest-maxfail-streak`**: Stop test execution after a configurable number of consecutive failures.
|
|
41
|
+
- ✅ **`pytest-fail2skip`**: Automatically skip tests that fail due to known issues, preventing unnecessary test runs.
|
|
41
42
|
|
|
42
43
|
---
|
|
43
44
|
|
|
@@ -47,22 +48,16 @@ It includes improved `conftest.py` fixtures, automated test result reporting, de
|
|
|
47
48
|
- pytest --better-report-enable --pr-number=123
|
|
48
49
|
- pytest-maxfail-streak
|
|
49
50
|
- pytest --maxfail-streak-enable --maxfail-streak=3
|
|
51
|
+
- pytest-fail2skip
|
|
52
|
+
- pytest --fail2skip-enable # must add `@pytest.mark.fail2skip` decorator to the test function
|
|
50
53
|
|
|
51
54
|
or use the `pytest.ini` configuration file to set default values for these plugins.
|
|
52
55
|
|
|
53
56
|
```ini
|
|
54
57
|
[pytest]
|
|
55
|
-
addopts = --better-report-enable --maxfail-streak-enable --maxfail-streak=3
|
|
58
|
+
addopts = --better-report-enable --pr-number=123 --maxfail-streak-enable --maxfail-streak=3 --fail2skip-enable
|
|
56
59
|
```
|
|
57
60
|
|
|
58
|
-
or
|
|
59
|
-
|
|
60
|
-
```ini
|
|
61
|
-
[pytest]
|
|
62
|
-
addopts = --better-report-enable --pr-number=123 --maxfail-streak-enable --maxfail-streak=3
|
|
63
|
-
```
|
|
64
|
-
|
|
65
|
-
|
|
66
61
|
---
|
|
67
62
|
|
|
68
63
|
## 🤝 Contributing
|
|
@@ -4,9 +4,10 @@ It includes improved `conftest.py` fixtures, automated test result reporting, de
|
|
|
4
4
|
|
|
5
5
|
---
|
|
6
6
|
|
|
7
|
-
## Features
|
|
7
|
+
## 🚀 Features
|
|
8
8
|
- ✅ **`pytest-better-report`**: Enhanced test result tracking and structured JSON reporting.
|
|
9
9
|
- ✅ **`pytest-maxfail-streak`**: Stop test execution after a configurable number of consecutive failures.
|
|
10
|
+
- ✅ **`pytest-fail2skip`**: Automatically skip tests that fail due to known issues, preventing unnecessary test runs.
|
|
10
11
|
|
|
11
12
|
---
|
|
12
13
|
|
|
@@ -16,22 +17,16 @@ It includes improved `conftest.py` fixtures, automated test result reporting, de
|
|
|
16
17
|
- pytest --better-report-enable --pr-number=123
|
|
17
18
|
- pytest-maxfail-streak
|
|
18
19
|
- pytest --maxfail-streak-enable --maxfail-streak=3
|
|
20
|
+
- pytest-fail2skip
|
|
21
|
+
- pytest --fail2skip-enable # must add `@pytest.mark.fail2skip` decorator to the test function
|
|
19
22
|
|
|
20
23
|
or use the `pytest.ini` configuration file to set default values for these plugins.
|
|
21
24
|
|
|
22
25
|
```ini
|
|
23
26
|
[pytest]
|
|
24
|
-
addopts = --better-report-enable --maxfail-streak-enable --maxfail-streak=3
|
|
27
|
+
addopts = --better-report-enable --pr-number=123 --maxfail-streak-enable --maxfail-streak=3 --fail2skip-enable
|
|
25
28
|
```
|
|
26
29
|
|
|
27
|
-
or
|
|
28
|
-
|
|
29
|
-
```ini
|
|
30
|
-
[pytest]
|
|
31
|
-
addopts = --better-report-enable --pr-number=123 --maxfail-streak-enable --maxfail-streak=3
|
|
32
|
-
```
|
|
33
|
-
|
|
34
|
-
|
|
35
30
|
---
|
|
36
31
|
|
|
37
32
|
## 🤝 Contributing
|
|
@@ -21,7 +21,7 @@ test_results = {}
|
|
|
21
21
|
logger = logging.getLogger('pytest_plugins.add_better_report')
|
|
22
22
|
|
|
23
23
|
|
|
24
|
-
def pytest_addoption(parser: Parser):
|
|
24
|
+
def pytest_addoption(parser: Parser) -> None:
|
|
25
25
|
parser.addoption(
|
|
26
26
|
"--better-report-enable",
|
|
27
27
|
action="store_true",
|
|
@@ -36,7 +36,7 @@ def pytest_addoption(parser: Parser):
|
|
|
36
36
|
)
|
|
37
37
|
|
|
38
38
|
|
|
39
|
-
def pytest_configure(config: Config):
|
|
39
|
+
def pytest_configure(config: Config) -> None:
|
|
40
40
|
if flag_is_enabled(config=config, flag_name="--better-report-enable"):
|
|
41
41
|
config._better_report_enabled = True
|
|
42
42
|
else:
|
|
@@ -44,7 +44,6 @@ def pytest_configure(config: Config):
|
|
|
44
44
|
|
|
45
45
|
|
|
46
46
|
def pytest_sessionstart(session: Session) -> None:
|
|
47
|
-
"""Called before the test session starts."""
|
|
48
47
|
global_interface['session'] = session # Store the session object in the global interface
|
|
49
48
|
|
|
50
49
|
execution_results["execution_info"] = ExecutionData(
|
|
@@ -52,12 +51,11 @@ def pytest_sessionstart(session: Session) -> None:
|
|
|
52
51
|
execution_status=ExecutionStatus.STARTED,
|
|
53
52
|
execution_start_time=datetime.now(timezone.utc).isoformat(),
|
|
54
53
|
)
|
|
54
|
+
logger.debug("Better report: Test session started")
|
|
55
55
|
|
|
56
56
|
|
|
57
57
|
@pytest.hookimpl(tryfirst=True)
|
|
58
58
|
def pytest_collection_modifyitems(items: list[Function]) -> None:
|
|
59
|
-
""" This hook is called after the collection has been performed, but before the tests are executed """
|
|
60
|
-
|
|
61
59
|
for item in items:
|
|
62
60
|
test_name = get_test_name_without_parameters(item=item)
|
|
63
61
|
test_full_name = get_test_full_name(item=item)
|
|
@@ -77,55 +75,75 @@ def pytest_collection_modifyitems(items: list[Function]) -> None:
|
|
|
77
75
|
@pytest.fixture(scope="session", autouse=True)
|
|
78
76
|
def session_setup_teardown() -> Generator[None, Any, None]:
|
|
79
77
|
yield
|
|
78
|
+
exec_info = execution_results.get("execution_info")
|
|
79
|
+
if not exec_info:
|
|
80
|
+
logger.error("Execution info missing at session teardown")
|
|
81
|
+
return
|
|
80
82
|
|
|
81
83
|
# update execution end time
|
|
82
|
-
|
|
84
|
+
exec_info.execution_end_time = datetime.now(timezone.utc).isoformat()
|
|
83
85
|
|
|
84
86
|
# update execution duration time
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
(
|
|
87
|
+
try:
|
|
88
|
+
start_obj = datetime.fromisoformat(exec_info.execution_start_time)
|
|
89
|
+
end_obj = datetime.fromisoformat(exec_info.execution_end_time)
|
|
90
|
+
exec_info.execution_duration_sec = (end_obj - start_obj).total_seconds()
|
|
91
|
+
except Exception as e:
|
|
92
|
+
logger.error(f"Error computing execution duration: {e}")
|
|
93
|
+
exec_info.execution_duration_sec = None
|
|
94
|
+
|
|
89
95
|
|
|
90
96
|
# update execution status
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
97
|
+
exec_info.execution_status = (
|
|
98
|
+
ExecutionStatus.PASSED if all(t.test_status == ExecutionStatus.PASSED for t in test_results.values())
|
|
99
|
+
else ExecutionStatus.FAILED
|
|
100
|
+
)
|
|
94
101
|
|
|
95
|
-
|
|
102
|
+
exec_info.test_list = list(test_results.keys())
|
|
96
103
|
|
|
97
|
-
|
|
98
|
-
|
|
104
|
+
output_dir = Path('results_output')
|
|
105
|
+
output_dir.mkdir(parents=True, exist_ok=True)
|
|
106
|
+
|
|
107
|
+
save_as_json(path=output_dir / 'execution_results.json', data=execution_results, default=serialize_data)
|
|
108
|
+
save_as_json(path=output_dir / 'test_results.json', data=test_results, default=serialize_data)
|
|
109
|
+
logger.info("Better report: Execution results saved")
|
|
99
110
|
|
|
100
111
|
|
|
101
112
|
@pytest.fixture(autouse=True)
|
|
102
|
-
def
|
|
113
|
+
def save_test_results(request: FixtureRequest) -> None:
|
|
103
114
|
test_item = request.node
|
|
104
|
-
|
|
105
|
-
# log the test results after each test.
|
|
106
115
|
test_full_name = get_test_full_name(item=test_item)
|
|
107
|
-
|
|
116
|
+
if test_full_name in test_results:
|
|
117
|
+
logger.debug(f'Test Results: \n{json.dumps(test_results[test_full_name], indent=4, default=serialize_data)}')
|
|
118
|
+
else:
|
|
119
|
+
logger.warning(f"Test {test_full_name} missing in test_results during report")
|
|
108
120
|
|
|
109
121
|
|
|
110
122
|
def pytest_runtest_teardown(item: Function) -> None:
|
|
111
|
-
"""This runs after each test."""
|
|
112
|
-
|
|
113
123
|
test_full_name = get_test_full_name(item=item)
|
|
114
124
|
test_item = test_results[test_full_name]
|
|
125
|
+
if not test_item:
|
|
126
|
+
logger.warning(f"Test {test_full_name} missing in test_results during teardown")
|
|
127
|
+
return
|
|
115
128
|
|
|
116
129
|
test_item.test_end_time = datetime.now(timezone.utc).isoformat()
|
|
117
130
|
if test_item.test_start_time: # Add test duration only if start time is set
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
131
|
+
try:
|
|
132
|
+
start_obj = datetime.fromisoformat(test_item.test_start_time)
|
|
133
|
+
end_obj = datetime.fromisoformat(test_item.test_end_time)
|
|
134
|
+
test_item.test_duration_sec = (end_obj - start_obj).total_seconds()
|
|
135
|
+
except Exception as e:
|
|
136
|
+
logger.error(f"Error computing test duration for {test_full_name}: {e}")
|
|
121
137
|
|
|
122
138
|
|
|
123
139
|
@pytest.hookimpl(tryfirst=True)
|
|
124
140
|
def pytest_runtest_makereport(item: Function, call: Any) -> None:
|
|
125
|
-
""" This hook is called after each test is run """
|
|
126
|
-
|
|
127
141
|
if call.when == "call":
|
|
128
|
-
|
|
142
|
+
test_full_name = get_test_full_name(item=item)
|
|
143
|
+
test_item = test_results.get(test_full_name)
|
|
144
|
+
if not test_item:
|
|
145
|
+
logger.warning(f"Test {test_full_name} missing in test_results during makereport")
|
|
146
|
+
return
|
|
129
147
|
|
|
130
148
|
test_item.test_status = ExecutionStatus.PASSED if call.excinfo is None else ExecutionStatus.FAILED
|
|
131
149
|
|
|
@@ -143,10 +161,8 @@ def pytest_runtest_makereport(item: Function, call: Any) -> None:
|
|
|
143
161
|
|
|
144
162
|
|
|
145
163
|
def pytest_sessionfinish(session: Session) -> None:
|
|
146
|
-
"""Called after the whole test session finishes."""
|
|
147
|
-
|
|
148
164
|
exit_status_code = session.session.exitstatus
|
|
149
165
|
logger.info(f'Test session finished with exit status: {exit_status_code}')
|
|
150
166
|
if exit_status_code != 0:
|
|
151
|
-
failed_tests = [v for
|
|
167
|
+
failed_tests = [v for v in test_results.values() if v.test_status == ExecutionStatus.FAILED]
|
|
152
168
|
logger.debug(f'Failed tests: {json.dumps(failed_tests, indent=4, default=serialize_data)}')
|
|
@@ -1,8 +1,9 @@
|
|
|
1
1
|
import logging
|
|
2
|
+
from typing import Any
|
|
3
|
+
|
|
2
4
|
import pytest
|
|
3
5
|
from _pytest.config import Config, Parser
|
|
4
|
-
from _pytest.
|
|
5
|
-
from _pytest.outcomes import Skipped
|
|
6
|
+
from _pytest.python import Function
|
|
6
7
|
|
|
7
8
|
from pytest_plugins.add_better_report import test_results
|
|
8
9
|
from pytest_plugins.models import ExecutionStatus
|
|
@@ -27,8 +28,8 @@ def pytest_configure(config: Config) -> None:
|
|
|
27
28
|
config._fail2skip_enabled = config.getoption("--fail2skip-enable")
|
|
28
29
|
|
|
29
30
|
|
|
30
|
-
@pytest.hookimpl(hookwrapper=True)
|
|
31
|
-
def pytest_runtest_makereport(item, call):
|
|
31
|
+
@pytest.hookimpl(hookwrapper=True, tryfirst=True)
|
|
32
|
+
def pytest_runtest_makereport(item: Function, call: Any):
|
|
32
33
|
outcome = yield
|
|
33
34
|
report = outcome.get_result()
|
|
34
35
|
if (
|
|
@@ -9,11 +9,11 @@ from pytest_plugins.add_better_report import test_results
|
|
|
9
9
|
from pytest_plugins.models import ExecutionStatus
|
|
10
10
|
from pytest_plugins.pytest_helper import get_test_full_name, flag_is_enabled
|
|
11
11
|
|
|
12
|
-
logger = logging.getLogger('pytest_plugins.
|
|
12
|
+
logger = logging.getLogger('pytest_plugins.max_fail_streak')
|
|
13
13
|
global_interface = {}
|
|
14
14
|
|
|
15
15
|
|
|
16
|
-
def pytest_addoption(parser: Parser):
|
|
16
|
+
def pytest_addoption(parser: Parser) -> None:
|
|
17
17
|
parser.addoption(
|
|
18
18
|
"--maxfail-streak-enable",
|
|
19
19
|
action="store_true",
|
|
@@ -28,7 +28,7 @@ def pytest_addoption(parser: Parser):
|
|
|
28
28
|
) # for using maxfail not streak, you can use the built-in pytest option `--maxfail`
|
|
29
29
|
|
|
30
30
|
|
|
31
|
-
def pytest_configure(config: Config):
|
|
31
|
+
def pytest_configure(config: Config) -> None:
|
|
32
32
|
if flag_is_enabled(config=config, flag_name="--maxfail-streak-enable"):
|
|
33
33
|
config._max_fail_streak_enabled = True
|
|
34
34
|
else:
|
|
@@ -36,39 +36,35 @@ def pytest_configure(config: Config):
|
|
|
36
36
|
|
|
37
37
|
|
|
38
38
|
def pytest_sessionstart(session: Session) -> None:
|
|
39
|
-
"""Called before the test session starts."""
|
|
40
39
|
_max_fail_streak = session.config.getoption("--maxfail-streak")
|
|
41
40
|
global_interface['max_fail_streak'] = int(_max_fail_streak) if _max_fail_streak else None
|
|
42
41
|
global_interface['fail_streak'] = 0
|
|
43
42
|
|
|
44
43
|
|
|
45
44
|
def pytest_runtest_setup(item: Function) -> None:
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
global_interface['fail_streak'] >= global_interface['max_fail_streak']
|
|
50
|
-
):
|
|
45
|
+
max_streak = global_interface['max_fail_streak']
|
|
46
|
+
fail_streak = global_interface['fail_streak']
|
|
47
|
+
if max_streak and fail_streak >= max_streak:
|
|
51
48
|
_skip_message = 'Skipping test due to maximum consecutive failures reached.'
|
|
52
49
|
|
|
53
|
-
|
|
54
|
-
test_results[
|
|
50
|
+
test_name = get_test_full_name(item=item)
|
|
51
|
+
test_results[test_name].test_status = ExecutionStatus.SKIPPED
|
|
52
|
+
test_results[test_name].exception_message = {
|
|
55
53
|
"exception_type": "MaxFailStreakReached",
|
|
56
54
|
"message": _skip_message,
|
|
57
55
|
}
|
|
56
|
+
logger.info(f"Skipping test {test_name} because fail streak {fail_streak} reached max {max_streak}")
|
|
58
57
|
pytest.skip(_skip_message)
|
|
59
58
|
|
|
60
|
-
if getattr(item.cls, 'component', None):
|
|
61
|
-
logger.debug(f"Test class {item.cls.__name__} has parameter 'component' with value: {item.cls.component}")
|
|
62
59
|
|
|
63
|
-
|
|
64
|
-
def pytest_runtest_logreport(report: TestReport):
|
|
60
|
+
def pytest_runtest_logreport(report: TestReport) -> None:
|
|
65
61
|
if report.when == "call":
|
|
66
62
|
global_interface['fail_streak'] = global_interface['fail_streak'] + 1 if report.failed else 0
|
|
67
63
|
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
):
|
|
64
|
+
max_streak = global_interface['max_fail_streak']
|
|
65
|
+
fail_streak = global_interface['fail_streak']
|
|
66
|
+
if max_streak and fail_streak >= max_streak:
|
|
72
67
|
logger.error(
|
|
73
|
-
f'Maximum consecutive test failures reached: {global_interface["max_fail_streak"]}.
|
|
68
|
+
f'Maximum consecutive test failures reached: {global_interface["max_fail_streak"]}. '
|
|
69
|
+
f'Stopping execution.'
|
|
74
70
|
)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pytest-plugins
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.4
|
|
4
4
|
Summary: A Python package for managing pytest plugins.
|
|
5
5
|
Home-page: https://github.com/aviz92/pytest-plugins
|
|
6
6
|
Author: Avi Zaguri
|
|
@@ -35,9 +35,10 @@ It includes improved `conftest.py` fixtures, automated test result reporting, de
|
|
|
35
35
|
|
|
36
36
|
---
|
|
37
37
|
|
|
38
|
-
## Features
|
|
38
|
+
## 🚀 Features
|
|
39
39
|
- ✅ **`pytest-better-report`**: Enhanced test result tracking and structured JSON reporting.
|
|
40
40
|
- ✅ **`pytest-maxfail-streak`**: Stop test execution after a configurable number of consecutive failures.
|
|
41
|
+
- ✅ **`pytest-fail2skip`**: Automatically skip tests that fail due to known issues, preventing unnecessary test runs.
|
|
41
42
|
|
|
42
43
|
---
|
|
43
44
|
|
|
@@ -47,22 +48,16 @@ It includes improved `conftest.py` fixtures, automated test result reporting, de
|
|
|
47
48
|
- pytest --better-report-enable --pr-number=123
|
|
48
49
|
- pytest-maxfail-streak
|
|
49
50
|
- pytest --maxfail-streak-enable --maxfail-streak=3
|
|
51
|
+
- pytest-fail2skip
|
|
52
|
+
- pytest --fail2skip-enable # must add `@pytest.mark.fail2skip` decorator to the test function
|
|
50
53
|
|
|
51
54
|
or use the `pytest.ini` configuration file to set default values for these plugins.
|
|
52
55
|
|
|
53
56
|
```ini
|
|
54
57
|
[pytest]
|
|
55
|
-
addopts = --better-report-enable --maxfail-streak-enable --maxfail-streak=3
|
|
58
|
+
addopts = --better-report-enable --pr-number=123 --maxfail-streak-enable --maxfail-streak=3 --fail2skip-enable
|
|
56
59
|
```
|
|
57
60
|
|
|
58
|
-
or
|
|
59
|
-
|
|
60
|
-
```ini
|
|
61
|
-
[pytest]
|
|
62
|
-
addopts = --better-report-enable --pr-number=123 --maxfail-streak-enable --maxfail-streak=3
|
|
63
|
-
```
|
|
64
|
-
|
|
65
|
-
|
|
66
61
|
---
|
|
67
62
|
|
|
68
63
|
## 🤝 Contributing
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|