testtrain-pytest 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,420 @@
1
+ import os
2
+ import time
3
+ from datetime import datetime, timezone
4
+
5
+ import pytest
6
+ import requests
7
+
8
+ # Map pytest outcomes to API states
9
+ _STATE_MAP = {
10
+ "passed": "passed",
11
+ "failed": "failed",
12
+ "skipped": "skipped",
13
+ }
14
+
15
+
16
+ def pytest_addoption(parser):
17
+ """Register configuration options and INI settings."""
18
+ group = parser.getgroup("testtrain", "Testtrain reporting")
19
+
20
+ # Command line options
21
+ group.addoption(
22
+ "--testtrain-url", help="Platform base URL (default: https://testtrain.io)"
23
+ )
24
+ group.addoption("--testtrain-run-id", help="UUID of an existing testrun")
25
+ group.addoption("--testtrain-auth-token", help="Bearer auth token")
26
+ group.addoption(
27
+ "--testtrain-create-tag",
28
+ help="Create tags if they do not exist on the platform (default: true)",
29
+ )
30
+
31
+ # INI settings (allows putting these in pytest.ini or pyproject.toml)
32
+ parser.addini("testtrain_url", help="Platform base URL")
33
+ parser.addini("testtrain_run_id", help="UUID of an existing testrun")
34
+ parser.addini("testtrain_auth_token", help="Bearer auth token")
35
+ parser.addini(
36
+ "testtrain_create_tag", help="Create tags if they do not exist on the platform"
37
+ )
38
+
39
+
40
+ _PLUGIN_CONFIG = None
41
+
42
+
43
+ @pytest.hookimpl(tryfirst=True)
44
+ def pytest_configure(config):
45
+ """Initialize configuration."""
46
+ global _PLUGIN_CONFIG
47
+ _PLUGIN_CONFIG = config
48
+
49
+ # 1. Extract values with priority: CLI > Config File > Environment Variable > Default
50
+ url = (
51
+ config.getoption("--testtrain-url")
52
+ or config.getini("testtrain_url")
53
+ or os.getenv("TESTTRAIN_URL")
54
+ or "https://testtrain.io"
55
+ )
56
+
57
+ run_id = (
58
+ config.getoption("--testtrain-run-id")
59
+ or config.getini("testtrain_run_id")
60
+ or os.getenv("TESTTRAIN_RUN_ID")
61
+ )
62
+
63
+ auth_token = (
64
+ config.getoption("--testtrain-auth-token")
65
+ or config.getini("testtrain_auth_token")
66
+ or os.getenv("TESTTRAIN_AUTH_TOKEN")
67
+ )
68
+
69
+ create_tag = (
70
+ config.getoption("--testtrain-create-tag")
71
+ or config.getini("testtrain_create_tag")
72
+ or os.getenv("TESTTRAIN_CREATE_TAG")
73
+ or "true"
74
+ )
75
+
76
+ # 2. Store on the config object for later hooks to access
77
+ config._testtrain_url = url.rstrip("/")
78
+ config._testtrain_run_id = run_id
79
+ config._testtrain_auth_token = auth_token
80
+ config._testtrain_create_tag = str(create_tag).lower() == "true"
81
+ config._testtrain_enabled = bool(run_id and auth_token)
82
+
83
+ # 3. Storage for test lifecycle tracking
84
+ config._test_start_times = {}
85
+ config._test_meta_stash = {}
86
+ config._test_outcome_stash = {}
87
+
88
+
89
+ def pytest_sessionstart(session):
90
+ """Inform user about reporting status at start of session."""
91
+ config = session.config
92
+
93
+ if hasattr(config, "workerinput"):
94
+ return
95
+
96
+ if config._testtrain_enabled:
97
+ print(f"\n🚀 Testtrain: reporting to {config._testtrain_url}")
98
+ print(f" Testrun ID: {config._testtrain_run_id}\n")
99
+ else:
100
+ missing = []
101
+ if not config._testtrain_run_id:
102
+ missing.append("TESTTRAIN_RUN_ID")
103
+ if not config._testtrain_auth_token:
104
+ missing.append("TESTTRAIN_AUTH_TOKEN")
105
+
106
+ if len(missing) < 2 or config.getoption("--testtrain-url"):
107
+ print(f"\n⚠️ Testtrain: reporting disabled. Missing: {', '.join(missing)}")
108
+
109
+
110
+ @pytest.hookimpl(tryfirst=True)
111
+ def pytest_runtest_setup(item):
112
+ """Record test start time."""
113
+ if item.config._testtrain_enabled:
114
+ item.config._test_start_times[item.nodeid] = _utc_now_iso()
115
+
116
+
117
+ @pytest.hookimpl(hookwrapper=True, tryfirst=True)
118
+ def pytest_runtest_makereport(item, call):
119
+ """
120
+ Capture metadata and outcome across all phases and attach to teardown for reporting.
121
+ """
122
+ try:
123
+ outcome = yield
124
+ report = outcome.get_result()
125
+
126
+ if not getattr(item.config, "_testtrain_enabled", False):
127
+ return
128
+
129
+ # 1. Capture metadata for the current phase
130
+ _extract_metadata(item)
131
+
132
+ # 2. Accumulate overall test state
133
+ if item.nodeid not in item.config._test_outcome_stash:
134
+ item.config._test_outcome_stash[item.nodeid] = {
135
+ "outcome": "passed",
136
+ "longrepr": None,
137
+ "reported": False,
138
+ }
139
+
140
+ stash = item.config._test_outcome_stash[item.nodeid]
141
+ if report.failed:
142
+ # Prefer body/setup failures over teardown failures
143
+ if stash["outcome"] != "failed" or report.when != "teardown":
144
+ stash["outcome"] = "failed"
145
+ stash["longrepr"] = report.longreprtext
146
+ elif report.skipped and stash["outcome"] == "passed":
147
+ stash["outcome"] = "skipped"
148
+
149
+ # 3. Attach data to the report for final delivery.
150
+ # We report on teardown, OR on setup if skipped/failed (teardown won't run or we want early info).
151
+ # We ensure only one report is ever sent via 'reported' flag.
152
+ should_report = False
153
+ if report.when == "teardown":
154
+ should_report = True
155
+ elif report.when == "setup" and (report.skipped or report.failed):
156
+ should_report = True
157
+
158
+ if should_report and not stash["reported"]:
159
+ stash["reported"] = True
160
+ current_meta = getattr(item.config, "_test_meta_stash", {}).get(
161
+ item.nodeid, {}
162
+ )
163
+ allure_data = _get_allure_result_data()
164
+
165
+ data = {
166
+ "start_time": getattr(item.config, "_test_start_times", {}).get(
167
+ item.nodeid
168
+ ),
169
+ "finished_at": _utc_now_iso(),
170
+ "meta": current_meta,
171
+ "allure_title": allure_data.get("name"),
172
+ "allure_steps": allure_data.get("steps"),
173
+ "name": item.nodeid,
174
+ "outcome": stash["outcome"],
175
+ "longrepr": stash["longrepr"],
176
+ }
177
+
178
+ if not hasattr(report, "user_properties"):
179
+ report.user_properties = []
180
+ report.user_properties.append(("testtrain_data", data))
181
+ except Exception as e:
182
+ print(f"\n ⚠️ Testtrain internal error: {e}")
183
+
184
+
185
+ def pytest_runtest_logreport(report):
186
+ """Send results to Testtrain after the phase completes."""
187
+ config = _PLUGIN_CONFIG
188
+ if not config or not getattr(config, "_testtrain_enabled", False):
189
+ return
190
+
191
+ if hasattr(config, "workerinput"):
192
+ return
193
+
194
+ # We report on 'teardown' for most tests.
195
+ # However, for skipped tests, teardown might not run or we want to report early.
196
+ # To ensure exactly one report, we report on teardown, OR on setup if it skipped/failed.
197
+ if report.when == "teardown":
198
+ pass
199
+ elif report.when == "setup" and (report.skipped or report.failed):
200
+ pass
201
+ else:
202
+ return
203
+
204
+ # Extract bundled data from user_properties safely
205
+ data = {}
206
+ for prop in getattr(report, "user_properties", []):
207
+ if isinstance(prop, tuple) and len(prop) == 2 and prop[0] == "testtrain_data":
208
+ data = prop[1]
209
+ break
210
+
211
+ if not data:
212
+ return
213
+
214
+ finished_at = data.get("finished_at") or _utc_now_iso()
215
+ started_at = data.get("start_time") or finished_at
216
+ meta = data.get("meta") or {}
217
+ computed_name = data.get("allure_title") or data.get("name") or report.nodeid
218
+ description = meta.get("allure_description")
219
+ state = _STATE_MAP.get(data.get("outcome"), "failed")
220
+
221
+ # Capture Allure tags
222
+ tags = []
223
+ for label in meta.get("allure_labels", []):
224
+ if label.get("name") == "tag":
225
+ tags.append(label.get("value"))
226
+
227
+ test_entry = {
228
+ "testrunId": config._testtrain_run_id,
229
+ "name": computed_name,
230
+ "nodeId": report.nodeid,
231
+ "state": state,
232
+ "startedAt": started_at,
233
+ "finishedAt": finished_at,
234
+ "description": description,
235
+ "defects": meta.get("allure_links", []),
236
+ "tags": tags,
237
+ "create_tag_if_not_exists": config._testtrain_create_tag,
238
+ "output": data.get("longrepr") or "",
239
+ }
240
+
241
+ if data.get("allure_steps"):
242
+ test_entry["steps"] = data.get("allure_steps")
243
+
244
+ max_retries = 3
245
+ for attempt in range(max_retries + 1):
246
+ try:
247
+ resp = requests.post(
248
+ f"{config._testtrain_url}/api/tests",
249
+ json={"tests": [test_entry]},
250
+ headers={
251
+ "Authorization": f"Bearer {config._testtrain_auth_token}",
252
+ "Content-Type": "application/json",
253
+ },
254
+ timeout=10,
255
+ )
256
+ if not resp.ok:
257
+ error_msg = (
258
+ resp.json().get("message", resp.text) if resp.content else resp.text
259
+ )
260
+ if 400 <= resp.status_code < 500:
261
+ pytest.exit(
262
+ f"\n❌ Testtrain: Failed to send test result (Status {resp.status_code}).\n Error: {error_msg}\n Aborting to ensure no results are lost."
263
+ )
264
+ else:
265
+ if attempt < max_retries:
266
+ time.sleep(10)
267
+ continue
268
+ pytest.exit(
269
+ f"\n❌ Testtrain: Failed to send test result after {max_retries + 1} attempts (Status {resp.status_code}).\n Error: {error_msg}\n Aborting to ensure no results are lost."
270
+ )
271
+ else:
272
+ break
273
+ except Exception as e:
274
+ if attempt < max_retries:
275
+ time.sleep(10)
276
+ continue
277
+ pytest.exit(
278
+ f"\n❌ Testtrain: Connection error during reporting after {max_retries + 1} attempts: {e}\n Aborting to ensure no results are lost."
279
+ )
280
+
281
+
282
+ def _utc_now_iso() -> str:
283
+ """Return current UTC time in ISO format with Z suffix."""
284
+ return datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3] + "Z"
285
+
286
+
287
+ def _get_allure_result_data() -> dict:
288
+ """Attempts to extract the current test's Allure data (name, steps)."""
289
+ res = {"name": None, "steps": None}
290
+ try:
291
+ import allure_commons
292
+
293
+ plugins = allure_commons.plugin_manager.get_plugins()
294
+ listener = next(
295
+ (
296
+ p
297
+ for p in plugins
298
+ if type(p).__name__ == "AllureListener"
299
+ or (
300
+ hasattr(p, "allure_logger") and hasattr(p.allure_logger, "get_test")
301
+ )
302
+ ),
303
+ None,
304
+ )
305
+ if not listener:
306
+ # Fallback for some environments where the listener might be hidden or named differently
307
+ for p in plugins:
308
+ if hasattr(p, "allure_logger"):
309
+ listener = p
310
+ break
311
+ if listener:
312
+ test_result = listener.allure_logger.get_test(None)
313
+ if test_result:
314
+ if test_result.name:
315
+ res["name"] = str(test_result.name)
316
+ if test_result.steps:
317
+ res["steps"] = [_map_allure_step(s) for s in test_result.steps]
318
+ except (ImportError, Exception):
319
+ pass
320
+ return res
321
+
322
+
323
+ def _map_allure_step(step) -> dict:
324
+ """Recursively map Allure StepResult to Testtrain step format."""
325
+ from allure_commons.model2 import Status
326
+
327
+ output = None
328
+ if step.statusDetails:
329
+ output = ""
330
+ if step.statusDetails.message:
331
+ output += step.statusDetails.message
332
+ if step.statusDetails.trace:
333
+ if output:
334
+ output += "\n"
335
+ output += step.statusDetails.trace
336
+
337
+ mapped = {
338
+ "name": str(step.name) if step.name else "step",
339
+ "is_failed": step.status in (Status.FAILED, Status.BROKEN),
340
+ "duration": int(step.stop - step.start) if step.stop and step.start else 0,
341
+ }
342
+ if output:
343
+ mapped["output"] = output
344
+ if step.steps:
345
+ mapped["steps"] = [_map_allure_step(s) for s in step.steps]
346
+
347
+ return mapped
348
+
349
+
350
+ def _extract_metadata(item):
351
+ """Internal helper to pull Allure and Pytest markers."""
352
+ try:
353
+ if not hasattr(item.config, "_test_meta_stash"):
354
+ item.config._test_meta_stash = {}
355
+ if item.nodeid not in item.config._test_meta_stash:
356
+ item.config._test_meta_stash[item.nodeid] = {
357
+ "markers": [],
358
+ "allure_labels": [],
359
+ "allure_links": [],
360
+ "allure_description": None,
361
+ }
362
+
363
+ stash = item.config._test_meta_stash[item.nodeid]
364
+
365
+ markers = []
366
+ for m in item.iter_markers():
367
+ markers.append({"name": m.name, "args": [str(a) for a in m.args]})
368
+ stash["markers"] = markers
369
+
370
+ allure_links = []
371
+ seen_urls = set()
372
+ for mark in item.iter_markers(name="allure_link"):
373
+ if mark.kwargs.get("link_type") == "issue":
374
+ url = mark.args[0] if mark.args else ""
375
+ if url not in seen_urls:
376
+ issue = {"url": url}
377
+ if mark.kwargs.get("name"):
378
+ issue["name"] = str(mark.kwargs["name"])
379
+ allure_links.append(issue)
380
+ seen_urls.add(url)
381
+
382
+ for mark in item.iter_markers(name="issue"):
383
+ url = str(mark.args[0]) if mark.args else ""
384
+ if url not in seen_urls:
385
+ issue = {"url": url}
386
+ if mark.kwargs.get("name"):
387
+ issue["name"] = str(mark.kwargs["name"])
388
+ allure_links.append(issue)
389
+ seen_urls.add(url)
390
+ stash["allure_links"] = allure_links
391
+
392
+ try:
393
+ import allure_commons
394
+
395
+ listener = next(
396
+ (
397
+ p
398
+ for p in allure_commons.plugin_manager.get_plugins()
399
+ if type(p).__name__ == "AllureListener"
400
+ ),
401
+ None,
402
+ )
403
+ if listener:
404
+ res = listener.allure_logger.get_test(None)
405
+ if res:
406
+ allure_labels = [
407
+ {
408
+ "name": str(getattr(label, "name", "")),
409
+ "value": str(getattr(label, "value", "")),
410
+ }
411
+ for label in getattr(res, "labels", [])
412
+ ]
413
+ stash["allure_labels"] = allure_labels
414
+ if res.description:
415
+ stash["allure_description"] = str(res.description)
416
+ except Exception:
417
+ pass
418
+
419
+ except Exception:
420
+ pass
@@ -0,0 +1,118 @@
1
+ Metadata-Version: 2.4
2
+ Name: testtrain-pytest
3
+ Version: 0.1.0
4
+ Summary: Testtrain Pytest Plugin — Real-time test result reporting.
5
+ License: GPL-3.0-only
6
+ Classifier: Framework :: Pytest
7
+ Classifier: Programming Language :: Python :: 3
8
+ Classifier: License :: OSI Approved :: GNU General Public License v3 (GPLv3)
9
+ Classifier: Operating System :: OS Independent
10
+ Requires-Python: >=3.8
11
+ Description-Content-Type: text/markdown
12
+ Requires-Dist: pytest>=7.0.0
13
+ Requires-Dist: requests
14
+ Requires-Dist: allure-pytest
15
+ Requires-Dist: pytest-mock>=3.14.1
16
+
17
+ # testtrain-pytest
18
+
19
+ Testtrain Pytest Plugin — Real-time test result reporting.
20
+
21
+ Sends each test result to the Testtrain platform API immediately after the test finishes, enabling real-time visibility into your test runs.
22
+
23
+ ## Installation
24
+
25
+ You can install `testtrain-pytest` via pip:
26
+
27
+ ```bash
28
+ pip install testtrain-pytest
29
+ ```
30
+
31
+ Alternatively, you can install the development version from GitHub:
32
+
33
+ ```bash
34
+ pip install git+https://github.com/njxqlus/testtrain-pytest.git
35
+ ```
36
+
37
+ ## Configuration
38
+
39
+ The plugin supports the following settings:
40
+ - **Run ID** (Mandatory): The UUID of an existing test run in Testtrain.
41
+ - **Auth Token** (Mandatory): Your bearer authentication token.
42
+ - **URL** (Optional): The platform base URL. Defaults to `https://testtrain.io`.
43
+
44
+ You can configure these using environment variables, command-line arguments, or your `pytest.ini` file.
45
+
46
+ ### Option 1: Environment Variables (Recommended)
47
+
48
+ Set these in your shell before running pytest. This is standard for CI/CD environments.
49
+
50
+ ```bash
51
+ export TESTTRAIN_RUN_ID="your-run-uuid"
52
+ export TESTTRAIN_AUTH_TOKEN="your-token"
53
+ export TESTTRAIN_URL="https://testtrain.io" # Optional
54
+ pytest
55
+ ```
56
+
57
+ > [!TIP]
58
+ > If you want to use a `.env` file, you should install `pytest-dotenv` separately as this plugin does not load `.env` files automatically.
59
+
60
+ ### Option 2: Command Line Arguments
61
+
62
+ Pass them directly to the `pytest` command.
63
+
64
+ ```bash
65
+ pytest --testtrain-run-id=your-run-uuid --testtrain-auth-token=your-token --testtrain-url=https://custom.testtrain.io
66
+ ```
67
+
68
+ ### Option 3: Configuration File (`pytest.ini` or `pyproject.toml`)
69
+
70
+ Add them to your project's configuration file.
71
+
72
+ **pytest.ini**:
73
+ ```ini
74
+ [pytest]
75
+ testtrain_run_id = your-run-uuid
76
+ testtrain_auth_token = your-token
77
+ testtrain_url = https://testtrain.io
78
+ ```
79
+
80
+ **pyproject.toml**:
81
+ ```toml
82
+ [tool.pytest.ini_options]
83
+ testtrain_run_id = "your-run-uuid"
84
+ testtrain_auth_token = "your-token"
85
+ testtrain_url = "https://testtrain.io"
86
+ ```
87
+
88
+ ## Usage
89
+
90
+ Once configured, the plugin works automatically. If the required configuration is missing, the plugin will remain inactive and won't affect your tests.
91
+
92
+ ### Allure Integration
93
+
94
+ To capture Allure metadata (like custom titles and labels), you must run your tests with the Allure plugin enabled:
95
+
96
+ ```bash
97
+ pytest --alluredir=allure-results
98
+ ```
99
+
100
+ Without the `--alluredir` flag, Allure metadata will not be available to the Testtrain plugin during the test run.
101
+
102
+ ## Running Tests
103
+
104
+ To verify that the plugin correctly handles Allure fields and reports to Testtrain, you can run the provided test suite. These tests use `pytester` to simulate real test runs and verify that the plugin sends the correct data without making real network requests.
105
+
106
+ ### Prerequisites
107
+
108
+ Install the development dependencies and fix the environment:
109
+
110
+ ```bash
111
+ uv sync
112
+ ```
113
+
114
+ ### Run Allure Reporting Tests
115
+
116
+ ```bash
117
+ uv run pytest -v -p pytester tests/
118
+ ```
@@ -0,0 +1,6 @@
1
+ testtrain_pytest/__init__.py,sha256=m3b0ZeL25O464AiFnFLYBtYfLUiD32IEvDKaEk_-nPE,14657
2
+ testtrain_pytest-0.1.0.dist-info/METADATA,sha256=f72-PpecU5d7JOGPTbFgclv_rmhfwretcXDcudadDW4,3415
3
+ testtrain_pytest-0.1.0.dist-info/WHEEL,sha256=aeYiig01lYGDzBgS8HxWXOg3uV61G9ijOsup-k9o1sk,91
4
+ testtrain_pytest-0.1.0.dist-info/entry_points.txt,sha256=VyaX74Lae2PPxzIzFUzDvMyGPfvHubX8R6-hN2GDQ80,40
5
+ testtrain_pytest-0.1.0.dist-info/top_level.txt,sha256=5bXtZ61UEz69dihzgXjzcELJchkCEZxZWvqGZqtLJVA,17
6
+ testtrain_pytest-0.1.0.dist-info/RECORD,,
@@ -0,0 +1,5 @@
1
+ Wheel-Version: 1.0
2
+ Generator: setuptools (82.0.1)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
@@ -0,0 +1,2 @@
1
+ [pytest11]
2
+ testtrain = testtrain_pytest
@@ -0,0 +1 @@
1
+ testtrain_pytest