testtrain-pytest 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- testtrain_pytest-0.1.0/PKG-INFO +118 -0
- testtrain_pytest-0.1.0/README.md +102 -0
- testtrain_pytest-0.1.0/pyproject.toml +34 -0
- testtrain_pytest-0.1.0/setup.cfg +4 -0
- testtrain_pytest-0.1.0/src/testtrain_pytest/__init__.py +420 -0
- testtrain_pytest-0.1.0/src/testtrain_pytest.egg-info/PKG-INFO +118 -0
- testtrain_pytest-0.1.0/src/testtrain_pytest.egg-info/SOURCES.txt +14 -0
- testtrain_pytest-0.1.0/src/testtrain_pytest.egg-info/dependency_links.txt +1 -0
- testtrain_pytest-0.1.0/src/testtrain_pytest.egg-info/entry_points.txt +2 -0
- testtrain_pytest-0.1.0/src/testtrain_pytest.egg-info/requires.txt +4 -0
- testtrain_pytest-0.1.0/src/testtrain_pytest.egg-info/top_level.txt +1 -0
- testtrain_pytest-0.1.0/tests/test_allure_descriptions.py +137 -0
- testtrain_pytest-0.1.0/tests/test_allure_steps.py +243 -0
- testtrain_pytest-0.1.0/tests/test_allure_tags.py +140 -0
- testtrain_pytest-0.1.0/tests/test_allure_titles.py +134 -0
- testtrain_pytest-0.1.0/tests/test_reporting_lifecycle.py +99 -0
|
@@ -0,0 +1,118 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: testtrain-pytest
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: Testtrain Pytest Plugin — Real-time test result reporting.
|
|
5
|
+
License: GPL-3.0-only
|
|
6
|
+
Classifier: Framework :: Pytest
|
|
7
|
+
Classifier: Programming Language :: Python :: 3
|
|
8
|
+
Classifier: License :: OSI Approved :: GNU General Public License v3 (GPLv3)
|
|
9
|
+
Classifier: Operating System :: OS Independent
|
|
10
|
+
Requires-Python: >=3.8
|
|
11
|
+
Description-Content-Type: text/markdown
|
|
12
|
+
Requires-Dist: pytest>=7.0.0
|
|
13
|
+
Requires-Dist: requests
|
|
14
|
+
Requires-Dist: allure-pytest
|
|
15
|
+
Requires-Dist: pytest-mock>=3.14.1
|
|
16
|
+
|
|
17
|
+
# testtrain-pytest
|
|
18
|
+
|
|
19
|
+
Testtrain Pytest Plugin — Real-time test result reporting.
|
|
20
|
+
|
|
21
|
+
Sends each test result to the Testtrain platform API immediately after the test finishes, enabling real-time visibility into your test runs.
|
|
22
|
+
|
|
23
|
+
## Installation
|
|
24
|
+
|
|
25
|
+
You can install `testtrain-pytest` via pip:
|
|
26
|
+
|
|
27
|
+
```bash
|
|
28
|
+
pip install testtrain-pytest
|
|
29
|
+
```
|
|
30
|
+
|
|
31
|
+
Alternatively, you can install the development version from GitHub:
|
|
32
|
+
|
|
33
|
+
```bash
|
|
34
|
+
pip install git+https://github.com/njxqlus/testtrain-pytest.git
|
|
35
|
+
```
|
|
36
|
+
|
|
37
|
+
## Configuration
|
|
38
|
+
|
|
39
|
+
The plugin supports the following settings:
|
|
40
|
+
- **Run ID** (Mandatory): The UUID of an existing test run in Testtrain.
|
|
41
|
+
- **Auth Token** (Mandatory): Your bearer authentication token.
|
|
42
|
+
- **URL** (Optional): The platform base URL. Defaults to `https://testtrain.io`.
|
|
43
|
+
|
|
44
|
+
You can configure these using environment variables, command-line arguments, or your `pytest.ini` file.
|
|
45
|
+
|
|
46
|
+
### Option 1: Environment Variables (Recommended)
|
|
47
|
+
|
|
48
|
+
Set these in your shell before running pytest. This is standard for CI/CD environments.
|
|
49
|
+
|
|
50
|
+
```bash
|
|
51
|
+
export TESTTRAIN_RUN_ID="your-run-uuid"
|
|
52
|
+
export TESTTRAIN_AUTH_TOKEN="your-token"
|
|
53
|
+
export TESTTRAIN_URL="https://testtrain.io" # Optional
|
|
54
|
+
pytest
|
|
55
|
+
```
|
|
56
|
+
|
|
57
|
+
> [!TIP]
|
|
58
|
+
> If you want to use a `.env` file, you should install `pytest-dotenv` separately as this plugin does not load `.env` files automatically.
|
|
59
|
+
|
|
60
|
+
### Option 2: Command Line Arguments
|
|
61
|
+
|
|
62
|
+
Pass them directly to the `pytest` command.
|
|
63
|
+
|
|
64
|
+
```bash
|
|
65
|
+
pytest --testtrain-run-id=your-run-uuid --testtrain-auth-token=your-token --testtrain-url=https://custom.testtrain.io
|
|
66
|
+
```
|
|
67
|
+
|
|
68
|
+
### Option 3: Configuration File (`pytest.ini` or `pyproject.toml`)
|
|
69
|
+
|
|
70
|
+
Add them to your project's configuration file.
|
|
71
|
+
|
|
72
|
+
**pytest.ini**:
|
|
73
|
+
```ini
|
|
74
|
+
[pytest]
|
|
75
|
+
testtrain_run_id = your-run-uuid
|
|
76
|
+
testtrain_auth_token = your-token
|
|
77
|
+
testtrain_url = https://testtrain.io
|
|
78
|
+
```
|
|
79
|
+
|
|
80
|
+
**pyproject.toml**:
|
|
81
|
+
```toml
|
|
82
|
+
[tool.pytest.ini_options]
|
|
83
|
+
testtrain_run_id = "your-run-uuid"
|
|
84
|
+
testtrain_auth_token = "your-token"
|
|
85
|
+
testtrain_url = "https://testtrain.io"
|
|
86
|
+
```
|
|
87
|
+
|
|
88
|
+
## Usage
|
|
89
|
+
|
|
90
|
+
Once configured, the plugin works automatically. If the required configuration is missing, the plugin will remain inactive and won't affect your tests.
|
|
91
|
+
|
|
92
|
+
### Allure Integration
|
|
93
|
+
|
|
94
|
+
To capture Allure metadata (like custom titles and labels), you must run your tests with the Allure plugin enabled:
|
|
95
|
+
|
|
96
|
+
```bash
|
|
97
|
+
pytest --alluredir=allure-results
|
|
98
|
+
```
|
|
99
|
+
|
|
100
|
+
Without the `--alluredir` flag, Allure metadata will not be available to the Testtrain plugin during the test run.
|
|
101
|
+
|
|
102
|
+
## Running Tests
|
|
103
|
+
|
|
104
|
+
To verify that the plugin correctly handles Allure fields and reports to Testtrain, you can run the provided test suite. These tests use `pytester` to simulate real test runs and verify that the plugin sends the correct data without making real network requests.
|
|
105
|
+
|
|
106
|
+
### Prerequisites
|
|
107
|
+
|
|
108
|
+
Install the development dependencies and fix the environment:
|
|
109
|
+
|
|
110
|
+
```bash
|
|
111
|
+
uv sync
|
|
112
|
+
```
|
|
113
|
+
|
|
114
|
+
### Run Allure Reporting Tests
|
|
115
|
+
|
|
116
|
+
```bash
|
|
117
|
+
uv run pytest -v -p pytester tests/
|
|
118
|
+
```
|
|
@@ -0,0 +1,102 @@
|
|
|
1
|
+
# testtrain-pytest
|
|
2
|
+
|
|
3
|
+
Testtrain Pytest Plugin — Real-time test result reporting.
|
|
4
|
+
|
|
5
|
+
Sends each test result to the Testtrain platform API immediately after the test finishes, enabling real-time visibility into your test runs.
|
|
6
|
+
|
|
7
|
+
## Installation
|
|
8
|
+
|
|
9
|
+
You can install `testtrain-pytest` via pip:
|
|
10
|
+
|
|
11
|
+
```bash
|
|
12
|
+
pip install testtrain-pytest
|
|
13
|
+
```
|
|
14
|
+
|
|
15
|
+
Alternatively, you can install the development version from GitHub:
|
|
16
|
+
|
|
17
|
+
```bash
|
|
18
|
+
pip install git+https://github.com/njxqlus/testtrain-pytest.git
|
|
19
|
+
```
|
|
20
|
+
|
|
21
|
+
## Configuration
|
|
22
|
+
|
|
23
|
+
The plugin supports the following settings:
|
|
24
|
+
- **Run ID** (Mandatory): The UUID of an existing test run in Testtrain.
|
|
25
|
+
- **Auth Token** (Mandatory): Your bearer authentication token.
|
|
26
|
+
- **URL** (Optional): The platform base URL. Defaults to `https://testtrain.io`.
|
|
27
|
+
|
|
28
|
+
You can configure these using environment variables, command-line arguments, or your `pytest.ini` file.
|
|
29
|
+
|
|
30
|
+
### Option 1: Environment Variables (Recommended)
|
|
31
|
+
|
|
32
|
+
Set these in your shell before running pytest. This is standard for CI/CD environments.
|
|
33
|
+
|
|
34
|
+
```bash
|
|
35
|
+
export TESTTRAIN_RUN_ID="your-run-uuid"
|
|
36
|
+
export TESTTRAIN_AUTH_TOKEN="your-token"
|
|
37
|
+
export TESTTRAIN_URL="https://testtrain.io" # Optional
|
|
38
|
+
pytest
|
|
39
|
+
```
|
|
40
|
+
|
|
41
|
+
> [!TIP]
|
|
42
|
+
> If you want to use a `.env` file, you should install `pytest-dotenv` separately as this plugin does not load `.env` files automatically.
|
|
43
|
+
|
|
44
|
+
### Option 2: Command Line Arguments
|
|
45
|
+
|
|
46
|
+
Pass them directly to the `pytest` command.
|
|
47
|
+
|
|
48
|
+
```bash
|
|
49
|
+
pytest --testtrain-run-id=your-run-uuid --testtrain-auth-token=your-token --testtrain-url=https://custom.testtrain.io
|
|
50
|
+
```
|
|
51
|
+
|
|
52
|
+
### Option 3: Configuration File (`pytest.ini` or `pyproject.toml`)
|
|
53
|
+
|
|
54
|
+
Add them to your project's configuration file.
|
|
55
|
+
|
|
56
|
+
**pytest.ini**:
|
|
57
|
+
```ini
|
|
58
|
+
[pytest]
|
|
59
|
+
testtrain_run_id = your-run-uuid
|
|
60
|
+
testtrain_auth_token = your-token
|
|
61
|
+
testtrain_url = https://testtrain.io
|
|
62
|
+
```
|
|
63
|
+
|
|
64
|
+
**pyproject.toml**:
|
|
65
|
+
```toml
|
|
66
|
+
[tool.pytest.ini_options]
|
|
67
|
+
testtrain_run_id = "your-run-uuid"
|
|
68
|
+
testtrain_auth_token = "your-token"
|
|
69
|
+
testtrain_url = "https://testtrain.io"
|
|
70
|
+
```
|
|
71
|
+
|
|
72
|
+
## Usage
|
|
73
|
+
|
|
74
|
+
Once configured, the plugin works automatically. If the required configuration is missing, the plugin will remain inactive and won't affect your tests.
|
|
75
|
+
|
|
76
|
+
### Allure Integration
|
|
77
|
+
|
|
78
|
+
To capture Allure metadata (like custom titles and labels), you must run your tests with the Allure plugin enabled:
|
|
79
|
+
|
|
80
|
+
```bash
|
|
81
|
+
pytest --alluredir=allure-results
|
|
82
|
+
```
|
|
83
|
+
|
|
84
|
+
Without the `--alluredir` flag, Allure metadata will not be available to the Testtrain plugin during the test run.
|
|
85
|
+
|
|
86
|
+
## Running Tests
|
|
87
|
+
|
|
88
|
+
To verify that the plugin correctly handles Allure fields and reports to Testtrain, you can run the provided test suite. These tests use `pytester` to simulate real test runs and verify that the plugin sends the correct data without making real network requests.
|
|
89
|
+
|
|
90
|
+
### Prerequisites
|
|
91
|
+
|
|
92
|
+
Install the development dependencies and fix the environment:
|
|
93
|
+
|
|
94
|
+
```bash
|
|
95
|
+
uv sync
|
|
96
|
+
```
|
|
97
|
+
|
|
98
|
+
### Run Allure Reporting Tests
|
|
99
|
+
|
|
100
|
+
```bash
|
|
101
|
+
uv run pytest -v -p pytester tests/
|
|
102
|
+
```
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["setuptools>=61.0.0", "wheel"]
|
|
3
|
+
build-backend = "setuptools.build_meta"
|
|
4
|
+
|
|
5
|
+
[project]
|
|
6
|
+
name = "testtrain-pytest"
|
|
7
|
+
version = "0.1.0"
|
|
8
|
+
description = "Testtrain Pytest Plugin — Real-time test result reporting."
|
|
9
|
+
readme = "README.md"
|
|
10
|
+
license = {text = "GPL-3.0-only"}
|
|
11
|
+
requires-python = ">=3.8"
|
|
12
|
+
dependencies = [
|
|
13
|
+
"pytest>=7.0.0",
|
|
14
|
+
"requests",
|
|
15
|
+
"allure-pytest",
|
|
16
|
+
"pytest-mock>=3.14.1",
|
|
17
|
+
]
|
|
18
|
+
classifiers = [
|
|
19
|
+
"Framework :: Pytest",
|
|
20
|
+
"Programming Language :: Python :: 3",
|
|
21
|
+
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
|
|
22
|
+
"Operating System :: OS Independent",
|
|
23
|
+
]
|
|
24
|
+
|
|
25
|
+
[project.entry-points."pytest11"]
|
|
26
|
+
testtrain = "testtrain_pytest"
|
|
27
|
+
|
|
28
|
+
[tool.setuptools.packages.find]
|
|
29
|
+
where = ["src"]
|
|
30
|
+
|
|
31
|
+
[dependency-groups]
|
|
32
|
+
dev = [
|
|
33
|
+
"ruff>=0.15.8",
|
|
34
|
+
]
|
|
@@ -0,0 +1,420 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import time
|
|
3
|
+
from datetime import datetime, timezone
|
|
4
|
+
|
|
5
|
+
import pytest
|
|
6
|
+
import requests
|
|
7
|
+
|
|
8
|
+
# Map pytest outcomes to API states
|
|
9
|
+
_STATE_MAP = {
|
|
10
|
+
"passed": "passed",
|
|
11
|
+
"failed": "failed",
|
|
12
|
+
"skipped": "skipped",
|
|
13
|
+
}
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def pytest_addoption(parser):
|
|
17
|
+
"""Register configuration options and INI settings."""
|
|
18
|
+
group = parser.getgroup("testtrain", "Testtrain reporting")
|
|
19
|
+
|
|
20
|
+
# Command line options
|
|
21
|
+
group.addoption(
|
|
22
|
+
"--testtrain-url", help="Platform base URL (default: https://testtrain.io)"
|
|
23
|
+
)
|
|
24
|
+
group.addoption("--testtrain-run-id", help="UUID of an existing testrun")
|
|
25
|
+
group.addoption("--testtrain-auth-token", help="Bearer auth token")
|
|
26
|
+
group.addoption(
|
|
27
|
+
"--testtrain-create-tag",
|
|
28
|
+
help="Create tags if they do not exist on the platform (default: true)",
|
|
29
|
+
)
|
|
30
|
+
|
|
31
|
+
# INI settings (allows putting these in pytest.ini or pyproject.toml)
|
|
32
|
+
parser.addini("testtrain_url", help="Platform base URL")
|
|
33
|
+
parser.addini("testtrain_run_id", help="UUID of an existing testrun")
|
|
34
|
+
parser.addini("testtrain_auth_token", help="Bearer auth token")
|
|
35
|
+
parser.addini(
|
|
36
|
+
"testtrain_create_tag", help="Create tags if they do not exist on the platform"
|
|
37
|
+
)
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
_PLUGIN_CONFIG = None
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
@pytest.hookimpl(tryfirst=True)
|
|
44
|
+
def pytest_configure(config):
|
|
45
|
+
"""Initialize configuration."""
|
|
46
|
+
global _PLUGIN_CONFIG
|
|
47
|
+
_PLUGIN_CONFIG = config
|
|
48
|
+
|
|
49
|
+
# 1. Extract values with priority: CLI > Config File > Environment Variable > Default
|
|
50
|
+
url = (
|
|
51
|
+
config.getoption("--testtrain-url")
|
|
52
|
+
or config.getini("testtrain_url")
|
|
53
|
+
or os.getenv("TESTTRAIN_URL")
|
|
54
|
+
or "https://testtrain.io"
|
|
55
|
+
)
|
|
56
|
+
|
|
57
|
+
run_id = (
|
|
58
|
+
config.getoption("--testtrain-run-id")
|
|
59
|
+
or config.getini("testtrain_run_id")
|
|
60
|
+
or os.getenv("TESTTRAIN_RUN_ID")
|
|
61
|
+
)
|
|
62
|
+
|
|
63
|
+
auth_token = (
|
|
64
|
+
config.getoption("--testtrain-auth-token")
|
|
65
|
+
or config.getini("testtrain_auth_token")
|
|
66
|
+
or os.getenv("TESTTRAIN_AUTH_TOKEN")
|
|
67
|
+
)
|
|
68
|
+
|
|
69
|
+
create_tag = (
|
|
70
|
+
config.getoption("--testtrain-create-tag")
|
|
71
|
+
or config.getini("testtrain_create_tag")
|
|
72
|
+
or os.getenv("TESTTRAIN_CREATE_TAG")
|
|
73
|
+
or "true"
|
|
74
|
+
)
|
|
75
|
+
|
|
76
|
+
# 2. Store on the config object for later hooks to access
|
|
77
|
+
config._testtrain_url = url.rstrip("/")
|
|
78
|
+
config._testtrain_run_id = run_id
|
|
79
|
+
config._testtrain_auth_token = auth_token
|
|
80
|
+
config._testtrain_create_tag = str(create_tag).lower() == "true"
|
|
81
|
+
config._testtrain_enabled = bool(run_id and auth_token)
|
|
82
|
+
|
|
83
|
+
# 3. Storage for test lifecycle tracking
|
|
84
|
+
config._test_start_times = {}
|
|
85
|
+
config._test_meta_stash = {}
|
|
86
|
+
config._test_outcome_stash = {}
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
def pytest_sessionstart(session):
|
|
90
|
+
"""Inform user about reporting status at start of session."""
|
|
91
|
+
config = session.config
|
|
92
|
+
|
|
93
|
+
if hasattr(config, "workerinput"):
|
|
94
|
+
return
|
|
95
|
+
|
|
96
|
+
if config._testtrain_enabled:
|
|
97
|
+
print(f"\n🚀 Testtrain: reporting to {config._testtrain_url}")
|
|
98
|
+
print(f" Testrun ID: {config._testtrain_run_id}\n")
|
|
99
|
+
else:
|
|
100
|
+
missing = []
|
|
101
|
+
if not config._testtrain_run_id:
|
|
102
|
+
missing.append("TESTTRAIN_RUN_ID")
|
|
103
|
+
if not config._testtrain_auth_token:
|
|
104
|
+
missing.append("TESTTRAIN_AUTH_TOKEN")
|
|
105
|
+
|
|
106
|
+
if len(missing) < 2 or config.getoption("--testtrain-url"):
|
|
107
|
+
print(f"\n⚠️ Testtrain: reporting disabled. Missing: {', '.join(missing)}")
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
@pytest.hookimpl(tryfirst=True)
|
|
111
|
+
def pytest_runtest_setup(item):
|
|
112
|
+
"""Record test start time."""
|
|
113
|
+
if item.config._testtrain_enabled:
|
|
114
|
+
item.config._test_start_times[item.nodeid] = _utc_now_iso()
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
@pytest.hookimpl(hookwrapper=True, tryfirst=True)
|
|
118
|
+
def pytest_runtest_makereport(item, call):
|
|
119
|
+
"""
|
|
120
|
+
Capture metadata and outcome across all phases and attach to teardown for reporting.
|
|
121
|
+
"""
|
|
122
|
+
try:
|
|
123
|
+
outcome = yield
|
|
124
|
+
report = outcome.get_result()
|
|
125
|
+
|
|
126
|
+
if not getattr(item.config, "_testtrain_enabled", False):
|
|
127
|
+
return
|
|
128
|
+
|
|
129
|
+
# 1. Capture metadata for the current phase
|
|
130
|
+
_extract_metadata(item)
|
|
131
|
+
|
|
132
|
+
# 2. Accumulate overall test state
|
|
133
|
+
if item.nodeid not in item.config._test_outcome_stash:
|
|
134
|
+
item.config._test_outcome_stash[item.nodeid] = {
|
|
135
|
+
"outcome": "passed",
|
|
136
|
+
"longrepr": None,
|
|
137
|
+
"reported": False,
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
stash = item.config._test_outcome_stash[item.nodeid]
|
|
141
|
+
if report.failed:
|
|
142
|
+
# Prefer body/setup failures over teardown failures
|
|
143
|
+
if stash["outcome"] != "failed" or report.when != "teardown":
|
|
144
|
+
stash["outcome"] = "failed"
|
|
145
|
+
stash["longrepr"] = report.longreprtext
|
|
146
|
+
elif report.skipped and stash["outcome"] == "passed":
|
|
147
|
+
stash["outcome"] = "skipped"
|
|
148
|
+
|
|
149
|
+
# 3. Attach data to the report for final delivery.
|
|
150
|
+
# We report on teardown, OR on setup if skipped/failed (teardown won't run or we want early info).
|
|
151
|
+
# We ensure only one report is ever sent via 'reported' flag.
|
|
152
|
+
should_report = False
|
|
153
|
+
if report.when == "teardown":
|
|
154
|
+
should_report = True
|
|
155
|
+
elif report.when == "setup" and (report.skipped or report.failed):
|
|
156
|
+
should_report = True
|
|
157
|
+
|
|
158
|
+
if should_report and not stash["reported"]:
|
|
159
|
+
stash["reported"] = True
|
|
160
|
+
current_meta = getattr(item.config, "_test_meta_stash", {}).get(
|
|
161
|
+
item.nodeid, {}
|
|
162
|
+
)
|
|
163
|
+
allure_data = _get_allure_result_data()
|
|
164
|
+
|
|
165
|
+
data = {
|
|
166
|
+
"start_time": getattr(item.config, "_test_start_times", {}).get(
|
|
167
|
+
item.nodeid
|
|
168
|
+
),
|
|
169
|
+
"finished_at": _utc_now_iso(),
|
|
170
|
+
"meta": current_meta,
|
|
171
|
+
"allure_title": allure_data.get("name"),
|
|
172
|
+
"allure_steps": allure_data.get("steps"),
|
|
173
|
+
"name": item.nodeid,
|
|
174
|
+
"outcome": stash["outcome"],
|
|
175
|
+
"longrepr": stash["longrepr"],
|
|
176
|
+
}
|
|
177
|
+
|
|
178
|
+
if not hasattr(report, "user_properties"):
|
|
179
|
+
report.user_properties = []
|
|
180
|
+
report.user_properties.append(("testtrain_data", data))
|
|
181
|
+
except Exception as e:
|
|
182
|
+
print(f"\n ⚠️ Testtrain internal error: {e}")
|
|
183
|
+
|
|
184
|
+
|
|
185
|
+
def pytest_runtest_logreport(report):
|
|
186
|
+
"""Send results to Testtrain after the phase completes."""
|
|
187
|
+
config = _PLUGIN_CONFIG
|
|
188
|
+
if not config or not getattr(config, "_testtrain_enabled", False):
|
|
189
|
+
return
|
|
190
|
+
|
|
191
|
+
if hasattr(config, "workerinput"):
|
|
192
|
+
return
|
|
193
|
+
|
|
194
|
+
# We report on 'teardown' for most tests.
|
|
195
|
+
# However, for skipped tests, teardown might not run or we want to report early.
|
|
196
|
+
# To ensure exactly one report, we report on teardown, OR on setup if it skipped/failed.
|
|
197
|
+
if report.when == "teardown":
|
|
198
|
+
pass
|
|
199
|
+
elif report.when == "setup" and (report.skipped or report.failed):
|
|
200
|
+
pass
|
|
201
|
+
else:
|
|
202
|
+
return
|
|
203
|
+
|
|
204
|
+
# Extract bundled data from user_properties safely
|
|
205
|
+
data = {}
|
|
206
|
+
for prop in getattr(report, "user_properties", []):
|
|
207
|
+
if isinstance(prop, tuple) and len(prop) == 2 and prop[0] == "testtrain_data":
|
|
208
|
+
data = prop[1]
|
|
209
|
+
break
|
|
210
|
+
|
|
211
|
+
if not data:
|
|
212
|
+
return
|
|
213
|
+
|
|
214
|
+
finished_at = data.get("finished_at") or _utc_now_iso()
|
|
215
|
+
started_at = data.get("start_time") or finished_at
|
|
216
|
+
meta = data.get("meta") or {}
|
|
217
|
+
computed_name = data.get("allure_title") or data.get("name") or report.nodeid
|
|
218
|
+
description = meta.get("allure_description")
|
|
219
|
+
state = _STATE_MAP.get(data.get("outcome"), "failed")
|
|
220
|
+
|
|
221
|
+
# Capture Allure tags
|
|
222
|
+
tags = []
|
|
223
|
+
for label in meta.get("allure_labels", []):
|
|
224
|
+
if label.get("name") == "tag":
|
|
225
|
+
tags.append(label.get("value"))
|
|
226
|
+
|
|
227
|
+
test_entry = {
|
|
228
|
+
"testrunId": config._testtrain_run_id,
|
|
229
|
+
"name": computed_name,
|
|
230
|
+
"nodeId": report.nodeid,
|
|
231
|
+
"state": state,
|
|
232
|
+
"startedAt": started_at,
|
|
233
|
+
"finishedAt": finished_at,
|
|
234
|
+
"description": description,
|
|
235
|
+
"defects": meta.get("allure_links", []),
|
|
236
|
+
"tags": tags,
|
|
237
|
+
"create_tag_if_not_exists": config._testtrain_create_tag,
|
|
238
|
+
"output": data.get("longrepr") or "",
|
|
239
|
+
}
|
|
240
|
+
|
|
241
|
+
if data.get("allure_steps"):
|
|
242
|
+
test_entry["steps"] = data.get("allure_steps")
|
|
243
|
+
|
|
244
|
+
max_retries = 3
|
|
245
|
+
for attempt in range(max_retries + 1):
|
|
246
|
+
try:
|
|
247
|
+
resp = requests.post(
|
|
248
|
+
f"{config._testtrain_url}/api/tests",
|
|
249
|
+
json={"tests": [test_entry]},
|
|
250
|
+
headers={
|
|
251
|
+
"Authorization": f"Bearer {config._testtrain_auth_token}",
|
|
252
|
+
"Content-Type": "application/json",
|
|
253
|
+
},
|
|
254
|
+
timeout=10,
|
|
255
|
+
)
|
|
256
|
+
if not resp.ok:
|
|
257
|
+
error_msg = (
|
|
258
|
+
resp.json().get("message", resp.text) if resp.content else resp.text
|
|
259
|
+
)
|
|
260
|
+
if 400 <= resp.status_code < 500:
|
|
261
|
+
pytest.exit(
|
|
262
|
+
f"\n❌ Testtrain: Failed to send test result (Status {resp.status_code}).\n Error: {error_msg}\n Aborting to ensure no results are lost."
|
|
263
|
+
)
|
|
264
|
+
else:
|
|
265
|
+
if attempt < max_retries:
|
|
266
|
+
time.sleep(10)
|
|
267
|
+
continue
|
|
268
|
+
pytest.exit(
|
|
269
|
+
f"\n❌ Testtrain: Failed to send test result after {max_retries + 1} attempts (Status {resp.status_code}).\n Error: {error_msg}\n Aborting to ensure no results are lost."
|
|
270
|
+
)
|
|
271
|
+
else:
|
|
272
|
+
break
|
|
273
|
+
except Exception as e:
|
|
274
|
+
if attempt < max_retries:
|
|
275
|
+
time.sleep(10)
|
|
276
|
+
continue
|
|
277
|
+
pytest.exit(
|
|
278
|
+
f"\n❌ Testtrain: Connection error during reporting after {max_retries + 1} attempts: {e}\n Aborting to ensure no results are lost."
|
|
279
|
+
)
|
|
280
|
+
|
|
281
|
+
|
|
282
|
+
def _utc_now_iso() -> str:
|
|
283
|
+
"""Return current UTC time in ISO format with Z suffix."""
|
|
284
|
+
return datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3] + "Z"
|
|
285
|
+
|
|
286
|
+
|
|
287
|
+
def _get_allure_result_data() -> dict:
|
|
288
|
+
"""Attempts to extract the current test's Allure data (name, steps)."""
|
|
289
|
+
res = {"name": None, "steps": None}
|
|
290
|
+
try:
|
|
291
|
+
import allure_commons
|
|
292
|
+
|
|
293
|
+
plugins = allure_commons.plugin_manager.get_plugins()
|
|
294
|
+
listener = next(
|
|
295
|
+
(
|
|
296
|
+
p
|
|
297
|
+
for p in plugins
|
|
298
|
+
if type(p).__name__ == "AllureListener"
|
|
299
|
+
or (
|
|
300
|
+
hasattr(p, "allure_logger") and hasattr(p.allure_logger, "get_test")
|
|
301
|
+
)
|
|
302
|
+
),
|
|
303
|
+
None,
|
|
304
|
+
)
|
|
305
|
+
if not listener:
|
|
306
|
+
# Fallback for some environments where the listener might be hidden or named differently
|
|
307
|
+
for p in plugins:
|
|
308
|
+
if hasattr(p, "allure_logger"):
|
|
309
|
+
listener = p
|
|
310
|
+
break
|
|
311
|
+
if listener:
|
|
312
|
+
test_result = listener.allure_logger.get_test(None)
|
|
313
|
+
if test_result:
|
|
314
|
+
if test_result.name:
|
|
315
|
+
res["name"] = str(test_result.name)
|
|
316
|
+
if test_result.steps:
|
|
317
|
+
res["steps"] = [_map_allure_step(s) for s in test_result.steps]
|
|
318
|
+
except (ImportError, Exception):
|
|
319
|
+
pass
|
|
320
|
+
return res
|
|
321
|
+
|
|
322
|
+
|
|
323
|
+
def _map_allure_step(step) -> dict:
|
|
324
|
+
"""Recursively map Allure StepResult to Testtrain step format."""
|
|
325
|
+
from allure_commons.model2 import Status
|
|
326
|
+
|
|
327
|
+
output = None
|
|
328
|
+
if step.statusDetails:
|
|
329
|
+
output = ""
|
|
330
|
+
if step.statusDetails.message:
|
|
331
|
+
output += step.statusDetails.message
|
|
332
|
+
if step.statusDetails.trace:
|
|
333
|
+
if output:
|
|
334
|
+
output += "\n"
|
|
335
|
+
output += step.statusDetails.trace
|
|
336
|
+
|
|
337
|
+
mapped = {
|
|
338
|
+
"name": str(step.name) if step.name else "step",
|
|
339
|
+
"is_failed": step.status in (Status.FAILED, Status.BROKEN),
|
|
340
|
+
"duration": int(step.stop - step.start) if step.stop and step.start else 0,
|
|
341
|
+
}
|
|
342
|
+
if output:
|
|
343
|
+
mapped["output"] = output
|
|
344
|
+
if step.steps:
|
|
345
|
+
mapped["steps"] = [_map_allure_step(s) for s in step.steps]
|
|
346
|
+
|
|
347
|
+
return mapped
|
|
348
|
+
|
|
349
|
+
|
|
350
|
+
def _extract_metadata(item):
|
|
351
|
+
"""Internal helper to pull Allure and Pytest markers."""
|
|
352
|
+
try:
|
|
353
|
+
if not hasattr(item.config, "_test_meta_stash"):
|
|
354
|
+
item.config._test_meta_stash = {}
|
|
355
|
+
if item.nodeid not in item.config._test_meta_stash:
|
|
356
|
+
item.config._test_meta_stash[item.nodeid] = {
|
|
357
|
+
"markers": [],
|
|
358
|
+
"allure_labels": [],
|
|
359
|
+
"allure_links": [],
|
|
360
|
+
"allure_description": None,
|
|
361
|
+
}
|
|
362
|
+
|
|
363
|
+
stash = item.config._test_meta_stash[item.nodeid]
|
|
364
|
+
|
|
365
|
+
markers = []
|
|
366
|
+
for m in item.iter_markers():
|
|
367
|
+
markers.append({"name": m.name, "args": [str(a) for a in m.args]})
|
|
368
|
+
stash["markers"] = markers
|
|
369
|
+
|
|
370
|
+
allure_links = []
|
|
371
|
+
seen_urls = set()
|
|
372
|
+
for mark in item.iter_markers(name="allure_link"):
|
|
373
|
+
if mark.kwargs.get("link_type") == "issue":
|
|
374
|
+
url = mark.args[0] if mark.args else ""
|
|
375
|
+
if url not in seen_urls:
|
|
376
|
+
issue = {"url": url}
|
|
377
|
+
if mark.kwargs.get("name"):
|
|
378
|
+
issue["name"] = str(mark.kwargs["name"])
|
|
379
|
+
allure_links.append(issue)
|
|
380
|
+
seen_urls.add(url)
|
|
381
|
+
|
|
382
|
+
for mark in item.iter_markers(name="issue"):
|
|
383
|
+
url = str(mark.args[0]) if mark.args else ""
|
|
384
|
+
if url not in seen_urls:
|
|
385
|
+
issue = {"url": url}
|
|
386
|
+
if mark.kwargs.get("name"):
|
|
387
|
+
issue["name"] = str(mark.kwargs["name"])
|
|
388
|
+
allure_links.append(issue)
|
|
389
|
+
seen_urls.add(url)
|
|
390
|
+
stash["allure_links"] = allure_links
|
|
391
|
+
|
|
392
|
+
try:
|
|
393
|
+
import allure_commons
|
|
394
|
+
|
|
395
|
+
listener = next(
|
|
396
|
+
(
|
|
397
|
+
p
|
|
398
|
+
for p in allure_commons.plugin_manager.get_plugins()
|
|
399
|
+
if type(p).__name__ == "AllureListener"
|
|
400
|
+
),
|
|
401
|
+
None,
|
|
402
|
+
)
|
|
403
|
+
if listener:
|
|
404
|
+
res = listener.allure_logger.get_test(None)
|
|
405
|
+
if res:
|
|
406
|
+
allure_labels = [
|
|
407
|
+
{
|
|
408
|
+
"name": str(getattr(label, "name", "")),
|
|
409
|
+
"value": str(getattr(label, "value", "")),
|
|
410
|
+
}
|
|
411
|
+
for label in getattr(res, "labels", [])
|
|
412
|
+
]
|
|
413
|
+
stash["allure_labels"] = allure_labels
|
|
414
|
+
if res.description:
|
|
415
|
+
stash["allure_description"] = str(res.description)
|
|
416
|
+
except Exception:
|
|
417
|
+
pass
|
|
418
|
+
|
|
419
|
+
except Exception:
|
|
420
|
+
pass
|