qastudio-pytest 1.0.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- qastudio_pytest/__init__.py +13 -0
- qastudio_pytest/api_client.py +379 -0
- qastudio_pytest/models.py +259 -0
- qastudio_pytest/plugin.py +497 -0
- qastudio_pytest/utils.py +284 -0
- qastudio_pytest-1.0.4.dist-info/METADATA +310 -0
- qastudio_pytest-1.0.4.dist-info/RECORD +11 -0
- qastudio_pytest-1.0.4.dist-info/WHEEL +5 -0
- qastudio_pytest-1.0.4.dist-info/entry_points.txt +2 -0
- qastudio_pytest-1.0.4.dist-info/licenses/LICENSE +21 -0
- qastudio_pytest-1.0.4.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,497 @@
|
|
|
1
|
+
"""pytest plugin for QAStudio.dev integration."""
|
|
2
|
+
|
|
3
|
+
import time
|
|
4
|
+
from typing import Any, List, Optional
|
|
5
|
+
import pytest
|
|
6
|
+
|
|
7
|
+
from .api_client import QAStudioAPIClient, APIError
|
|
8
|
+
from .models import ReporterConfig, TestResult, TestRunSummary
|
|
9
|
+
from .utils import (
|
|
10
|
+
batch_list,
|
|
11
|
+
format_duration,
|
|
12
|
+
generate_test_run_name,
|
|
13
|
+
validate_config,
|
|
14
|
+
)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class QAStudioPlugin:
|
|
18
|
+
"""pytest plugin for reporting test results to QAStudio.dev."""
|
|
19
|
+
|
|
20
|
+
def __init__(self, config: ReporterConfig):
|
|
21
|
+
"""Initialize the plugin with configuration."""
|
|
22
|
+
self.config = config
|
|
23
|
+
self.api_client = QAStudioAPIClient(config)
|
|
24
|
+
self.test_run_id: Optional[str] = None
|
|
25
|
+
self.results: List[TestResult] = []
|
|
26
|
+
self.start_time: float = 0
|
|
27
|
+
self.session_duration: float = 0
|
|
28
|
+
|
|
29
|
+
# Counters
|
|
30
|
+
self.total_tests = 0
|
|
31
|
+
self.passed_tests = 0
|
|
32
|
+
self.failed_tests = 0
|
|
33
|
+
self.skipped_tests = 0
|
|
34
|
+
self.error_tests = 0
|
|
35
|
+
|
|
36
|
+
@pytest.hookimpl(tryfirst=True)
|
|
37
|
+
def pytest_sessionstart(self, session: Any) -> None:
|
|
38
|
+
"""
|
|
39
|
+
Called before test session starts.
|
|
40
|
+
|
|
41
|
+
Creates a new test run in QAStudio.dev.
|
|
42
|
+
"""
|
|
43
|
+
self.start_time = time.time()
|
|
44
|
+
self._log("QAStudio.dev Reporter initialized")
|
|
45
|
+
self._log(f"Environment: {self.config.environment}")
|
|
46
|
+
|
|
47
|
+
try:
|
|
48
|
+
if self.config.create_test_run and not self.config.test_run_id:
|
|
49
|
+
# Create new test run
|
|
50
|
+
test_run_name = self.config.test_run_name or generate_test_run_name()
|
|
51
|
+
response = self.api_client.create_test_run(
|
|
52
|
+
name=test_run_name,
|
|
53
|
+
description=self.config.test_run_description,
|
|
54
|
+
)
|
|
55
|
+
self.test_run_id = response.get("id")
|
|
56
|
+
self._log(f"Created test run: {self.test_run_id}")
|
|
57
|
+
else:
|
|
58
|
+
# Use existing test run ID
|
|
59
|
+
self.test_run_id = self.config.test_run_id
|
|
60
|
+
self._log(f"Using existing test run: {self.test_run_id}")
|
|
61
|
+
|
|
62
|
+
except APIError as e:
|
|
63
|
+
self._handle_error("Failed to create test run", e)
|
|
64
|
+
|
|
65
|
+
@pytest.hookimpl(hookwrapper=True)
|
|
66
|
+
def pytest_runtest_makereport(self, item: Any, call: Any) -> Any:
|
|
67
|
+
"""
|
|
68
|
+
Called to create test report for each test phase.
|
|
69
|
+
|
|
70
|
+
We only care about the 'call' phase (actual test execution).
|
|
71
|
+
"""
|
|
72
|
+
outcome = yield
|
|
73
|
+
report = outcome.get_result()
|
|
74
|
+
|
|
75
|
+
# Only process the actual test execution phase
|
|
76
|
+
if report.when == "call":
|
|
77
|
+
try:
|
|
78
|
+
result = TestResult.from_pytest_report(item, report, self.config)
|
|
79
|
+
|
|
80
|
+
# Collect attachments if enabled
|
|
81
|
+
if self.config.upload_attachments:
|
|
82
|
+
result.attachment_paths = self._collect_attachments(item)
|
|
83
|
+
|
|
84
|
+
self.results.append(result)
|
|
85
|
+
|
|
86
|
+
# Update counters
|
|
87
|
+
self.total_tests += 1
|
|
88
|
+
if result.status.value == "passed":
|
|
89
|
+
self.passed_tests += 1
|
|
90
|
+
elif result.status.value == "failed":
|
|
91
|
+
self.failed_tests += 1
|
|
92
|
+
elif result.status.value == "skipped":
|
|
93
|
+
self.skipped_tests += 1
|
|
94
|
+
elif result.status.value == "error":
|
|
95
|
+
self.error_tests += 1
|
|
96
|
+
|
|
97
|
+
self._log(
|
|
98
|
+
f"Test completed: {item.name} - {result.status.value} ({result.duration:.2f}s)"
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
except Exception as e:
|
|
102
|
+
self._log(f"Error processing test result: {e}")
|
|
103
|
+
|
|
104
|
+
@pytest.hookimpl(trylast=True)
|
|
105
|
+
def pytest_sessionfinish(self, session: Any) -> None:
|
|
106
|
+
"""
|
|
107
|
+
Called after all tests have finished.
|
|
108
|
+
|
|
109
|
+
Submits results to QAStudio.dev and completes the test run.
|
|
110
|
+
"""
|
|
111
|
+
self.session_duration = time.time() - self.start_time
|
|
112
|
+
|
|
113
|
+
self._log("Test session completed")
|
|
114
|
+
self._log(
|
|
115
|
+
f"Total: {self.total_tests}, "
|
|
116
|
+
f"Passed: {self.passed_tests}, "
|
|
117
|
+
f"Failed: {self.failed_tests}, "
|
|
118
|
+
f"Skipped: {self.skipped_tests}, "
|
|
119
|
+
f"Errors: {self.error_tests}"
|
|
120
|
+
)
|
|
121
|
+
self._log(f"Duration: {format_duration(self.session_duration)}")
|
|
122
|
+
|
|
123
|
+
if not self.test_run_id:
|
|
124
|
+
self._log("No test run ID available, skipping result submission")
|
|
125
|
+
return
|
|
126
|
+
|
|
127
|
+
try:
|
|
128
|
+
# Submit test results in batches
|
|
129
|
+
self._submit_results()
|
|
130
|
+
|
|
131
|
+
# Complete the test run
|
|
132
|
+
summary = TestRunSummary(
|
|
133
|
+
total=self.total_tests,
|
|
134
|
+
passed=self.passed_tests,
|
|
135
|
+
failed=self.failed_tests,
|
|
136
|
+
skipped=self.skipped_tests,
|
|
137
|
+
errors=self.error_tests,
|
|
138
|
+
duration=self.session_duration,
|
|
139
|
+
)
|
|
140
|
+
|
|
141
|
+
self.api_client.complete_test_run(self.test_run_id, summary)
|
|
142
|
+
self._log("Results submitted successfully to QAStudio.dev")
|
|
143
|
+
|
|
144
|
+
except APIError as e:
|
|
145
|
+
self._handle_error("Failed to submit results", e)
|
|
146
|
+
finally:
|
|
147
|
+
self.api_client.close()
|
|
148
|
+
|
|
149
|
+
def _submit_results(self) -> None:
|
|
150
|
+
"""Submit test results in batches and upload attachments."""
|
|
151
|
+
if not self.results:
|
|
152
|
+
self._log("No results to submit")
|
|
153
|
+
return
|
|
154
|
+
|
|
155
|
+
batches = batch_list(self.results, self.config.batch_size)
|
|
156
|
+
self._log(f"Submitting {len(self.results)} results in {len(batches)} batch(es)")
|
|
157
|
+
|
|
158
|
+
for i, batch in enumerate(batches, 1):
|
|
159
|
+
try:
|
|
160
|
+
self._log(f"Submitting batch {i}/{len(batches)} ({len(batch)} results)")
|
|
161
|
+
response = self.api_client.submit_test_results(
|
|
162
|
+
self.test_run_id, # type: ignore
|
|
163
|
+
batch,
|
|
164
|
+
)
|
|
165
|
+
|
|
166
|
+
# Store result IDs for attachment uploads
|
|
167
|
+
if response and "results" in response:
|
|
168
|
+
for j, result_data in enumerate(response["results"]):
|
|
169
|
+
if j < len(batch):
|
|
170
|
+
batch[j].result_id = result_data.get("id")
|
|
171
|
+
|
|
172
|
+
# Upload attachments if enabled
|
|
173
|
+
if self.config.upload_attachments:
|
|
174
|
+
for result in batch:
|
|
175
|
+
if result.result_id and result.attachment_paths:
|
|
176
|
+
self._upload_attachments(result)
|
|
177
|
+
|
|
178
|
+
except APIError as e:
|
|
179
|
+
self._handle_error(f"Failed to submit batch {i}", e)
|
|
180
|
+
|
|
181
|
+
def _collect_attachments(self, item: Any) -> List[str]:
|
|
182
|
+
"""
|
|
183
|
+
Collect attachment file paths for a test.
|
|
184
|
+
|
|
185
|
+
Looks for attachments in:
|
|
186
|
+
1. Custom attachments directory (if configured)
|
|
187
|
+
2. pytest-html plugin screenshots
|
|
188
|
+
3. Test fixtures that store attachment paths
|
|
189
|
+
|
|
190
|
+
Args:
|
|
191
|
+
item: pytest test item
|
|
192
|
+
|
|
193
|
+
Returns:
|
|
194
|
+
List of file paths to attach
|
|
195
|
+
"""
|
|
196
|
+
import os
|
|
197
|
+
import glob
|
|
198
|
+
|
|
199
|
+
attachments = []
|
|
200
|
+
|
|
201
|
+
# Check if test has attachment paths stored in stash or fixtures
|
|
202
|
+
if hasattr(item, "stash"):
|
|
203
|
+
# pytest >= 7.0 uses stash API
|
|
204
|
+
from pytest import StashKey
|
|
205
|
+
|
|
206
|
+
attachment_key = StashKey[List[str]]()
|
|
207
|
+
stored_attachments = item.stash.get(attachment_key, [])
|
|
208
|
+
attachments.extend(stored_attachments)
|
|
209
|
+
|
|
210
|
+
# Check custom attachments directory
|
|
211
|
+
if self.config.attachments_dir:
|
|
212
|
+
test_name = item.name.replace("[", "_").replace("]", "_")
|
|
213
|
+
attachment_dir = os.path.join(self.config.attachments_dir, test_name)
|
|
214
|
+
|
|
215
|
+
if os.path.exists(attachment_dir):
|
|
216
|
+
# Find common attachment types
|
|
217
|
+
patterns = [
|
|
218
|
+
"*.png",
|
|
219
|
+
"*.jpg",
|
|
220
|
+
"*.jpeg",
|
|
221
|
+
"*.gif",
|
|
222
|
+
"*.mp4",
|
|
223
|
+
"*.webm",
|
|
224
|
+
"*.txt",
|
|
225
|
+
"*.log",
|
|
226
|
+
"*.zip",
|
|
227
|
+
]
|
|
228
|
+
for pattern in patterns:
|
|
229
|
+
files = glob.glob(os.path.join(attachment_dir, pattern))
|
|
230
|
+
attachments.extend(files)
|
|
231
|
+
|
|
232
|
+
return attachments
|
|
233
|
+
|
|
234
|
+
def _upload_attachments(self, result: TestResult) -> None:
|
|
235
|
+
"""
|
|
236
|
+
Upload attachments for a test result.
|
|
237
|
+
|
|
238
|
+
Args:
|
|
239
|
+
result: TestResult with attachment_paths and result_id
|
|
240
|
+
"""
|
|
241
|
+
if not result.result_id or not result.attachment_paths:
|
|
242
|
+
return
|
|
243
|
+
|
|
244
|
+
self._log(f"Uploading {len(result.attachment_paths)} attachment(s) for {result.title}")
|
|
245
|
+
|
|
246
|
+
for file_path in result.attachment_paths:
|
|
247
|
+
try:
|
|
248
|
+
# Determine attachment type from file extension
|
|
249
|
+
import os
|
|
250
|
+
|
|
251
|
+
ext = os.path.splitext(file_path)[1].lower()
|
|
252
|
+
filename = os.path.basename(file_path)
|
|
253
|
+
attachment_type = None
|
|
254
|
+
|
|
255
|
+
if ext in [".png", ".jpg", ".jpeg", ".gif"]:
|
|
256
|
+
attachment_type = "screenshot"
|
|
257
|
+
elif ext in [".mp4", ".webm", ".avi", ".mov"]:
|
|
258
|
+
attachment_type = "video"
|
|
259
|
+
elif ext in [".log", ".txt"]:
|
|
260
|
+
attachment_type = "log"
|
|
261
|
+
elif ext == ".zip" and "trace" in filename.lower():
|
|
262
|
+
attachment_type = "trace"
|
|
263
|
+
|
|
264
|
+
self.api_client.upload_attachment(
|
|
265
|
+
test_result_id=result.result_id,
|
|
266
|
+
file_path=file_path,
|
|
267
|
+
attachment_type=attachment_type,
|
|
268
|
+
)
|
|
269
|
+
|
|
270
|
+
self._log(f" Uploaded: {os.path.basename(file_path)}")
|
|
271
|
+
|
|
272
|
+
except APIError as e:
|
|
273
|
+
self._handle_error(f"Failed to upload attachment {file_path}", e)
|
|
274
|
+
except Exception as e:
|
|
275
|
+
self._log(f" Error uploading {file_path}: {e}")
|
|
276
|
+
|
|
277
|
+
def _log(self, message: str) -> None:
|
|
278
|
+
"""Log message if verbose mode is enabled."""
|
|
279
|
+
if self.config.verbose:
|
|
280
|
+
print(f"[QAStudio] {message}")
|
|
281
|
+
|
|
282
|
+
def _handle_error(self, message: str, error: Exception) -> None:
|
|
283
|
+
"""Handle errors based on silent mode."""
|
|
284
|
+
error_msg = f"{message}: {str(error)}"
|
|
285
|
+
|
|
286
|
+
if self.config.silent:
|
|
287
|
+
print(f"[QAStudio] ERROR: {error_msg}")
|
|
288
|
+
else:
|
|
289
|
+
raise Exception(error_msg) from error
|
|
290
|
+
|
|
291
|
+
|
|
292
|
+
def pytest_addoption(parser: Any) -> None:
|
|
293
|
+
"""Add command line options for QAStudio configuration."""
|
|
294
|
+
group = parser.getgroup("qastudio", "QAStudio.dev integration")
|
|
295
|
+
|
|
296
|
+
group.addoption(
|
|
297
|
+
"--qastudio-api-url",
|
|
298
|
+
action="store",
|
|
299
|
+
dest="qastudio_api_url",
|
|
300
|
+
help="QAStudio.dev API URL",
|
|
301
|
+
)
|
|
302
|
+
|
|
303
|
+
group.addoption(
|
|
304
|
+
"--qastudio-api-key",
|
|
305
|
+
action="store",
|
|
306
|
+
dest="qastudio_api_key",
|
|
307
|
+
help="QAStudio.dev API key",
|
|
308
|
+
)
|
|
309
|
+
|
|
310
|
+
group.addoption(
|
|
311
|
+
"--qastudio-project-id",
|
|
312
|
+
action="store",
|
|
313
|
+
dest="qastudio_project_id",
|
|
314
|
+
help="QAStudio.dev project ID",
|
|
315
|
+
)
|
|
316
|
+
|
|
317
|
+
group.addoption(
|
|
318
|
+
"--qastudio-environment",
|
|
319
|
+
action="store",
|
|
320
|
+
dest="qastudio_environment",
|
|
321
|
+
default="default",
|
|
322
|
+
help="Test environment name (default: default)",
|
|
323
|
+
)
|
|
324
|
+
|
|
325
|
+
group.addoption(
|
|
326
|
+
"--qastudio-test-run-id",
|
|
327
|
+
action="store",
|
|
328
|
+
dest="qastudio_test_run_id",
|
|
329
|
+
help="Existing test run ID (skip creation)",
|
|
330
|
+
)
|
|
331
|
+
|
|
332
|
+
group.addoption(
|
|
333
|
+
"--qastudio-test-run-name",
|
|
334
|
+
action="store",
|
|
335
|
+
dest="qastudio_test_run_name",
|
|
336
|
+
help="Custom test run name",
|
|
337
|
+
)
|
|
338
|
+
|
|
339
|
+
group.addoption(
|
|
340
|
+
"--qastudio-verbose",
|
|
341
|
+
action="store_true",
|
|
342
|
+
dest="qastudio_verbose",
|
|
343
|
+
help="Enable verbose logging",
|
|
344
|
+
)
|
|
345
|
+
|
|
346
|
+
group.addoption(
|
|
347
|
+
"--qastudio-silent",
|
|
348
|
+
action="store_true",
|
|
349
|
+
dest="qastudio_silent",
|
|
350
|
+
default=True,
|
|
351
|
+
help="Fail silently on API errors (default: True)",
|
|
352
|
+
)
|
|
353
|
+
|
|
354
|
+
group.addoption(
|
|
355
|
+
"--qastudio-include-error-snippet",
|
|
356
|
+
action="store_true",
|
|
357
|
+
dest="qastudio_include_error_snippet",
|
|
358
|
+
default=True,
|
|
359
|
+
help="Include error code snippet (default: True)",
|
|
360
|
+
)
|
|
361
|
+
|
|
362
|
+
group.addoption(
|
|
363
|
+
"--qastudio-include-error-location",
|
|
364
|
+
action="store_true",
|
|
365
|
+
dest="qastudio_include_error_location",
|
|
366
|
+
default=True,
|
|
367
|
+
help="Include precise error location (default: True)",
|
|
368
|
+
)
|
|
369
|
+
|
|
370
|
+
group.addoption(
|
|
371
|
+
"--qastudio-include-test-steps",
|
|
372
|
+
action="store_true",
|
|
373
|
+
dest="qastudio_include_test_steps",
|
|
374
|
+
default=True,
|
|
375
|
+
help="Include test execution steps (default: True)",
|
|
376
|
+
)
|
|
377
|
+
|
|
378
|
+
group.addoption(
|
|
379
|
+
"--qastudio-include-console-output",
|
|
380
|
+
action="store_true",
|
|
381
|
+
dest="qastudio_include_console_output",
|
|
382
|
+
default=False,
|
|
383
|
+
help="Include console output (default: False)",
|
|
384
|
+
)
|
|
385
|
+
|
|
386
|
+
group.addoption(
|
|
387
|
+
"--qastudio-upload-attachments",
|
|
388
|
+
action="store_true",
|
|
389
|
+
dest="qastudio_upload_attachments",
|
|
390
|
+
default=True,
|
|
391
|
+
help="Upload test attachments (default: True)",
|
|
392
|
+
)
|
|
393
|
+
|
|
394
|
+
group.addoption(
|
|
395
|
+
"--qastudio-attachments-dir",
|
|
396
|
+
action="store",
|
|
397
|
+
dest="qastudio_attachments_dir",
|
|
398
|
+
help="Directory containing test attachments",
|
|
399
|
+
)
|
|
400
|
+
|
|
401
|
+
# Add pytest.ini configuration
|
|
402
|
+
parser.addini("qastudio_api_url", "QAStudio.dev API URL")
|
|
403
|
+
parser.addini("qastudio_api_key", "QAStudio.dev API key")
|
|
404
|
+
parser.addini("qastudio_project_id", "QAStudio.dev project ID")
|
|
405
|
+
parser.addini("qastudio_environment", "Test environment name")
|
|
406
|
+
parser.addini("qastudio_test_run_id", "Existing test run ID")
|
|
407
|
+
parser.addini("qastudio_test_run_name", "Custom test run name")
|
|
408
|
+
parser.addini("qastudio_test_run_description", "Test run description")
|
|
409
|
+
parser.addini("qastudio_create_test_run", "Create new test run (true/false)")
|
|
410
|
+
parser.addini("qastudio_batch_size", "Results batch size")
|
|
411
|
+
parser.addini("qastudio_silent", "Fail silently on API errors (true/false)")
|
|
412
|
+
parser.addini("qastudio_verbose", "Enable verbose logging (true/false)")
|
|
413
|
+
parser.addini("qastudio_max_retries", "Maximum API retry attempts")
|
|
414
|
+
parser.addini("qastudio_timeout", "API request timeout in seconds")
|
|
415
|
+
parser.addini(
|
|
416
|
+
"qastudio_include_error_snippet",
|
|
417
|
+
"Include error code snippet (true/false)",
|
|
418
|
+
)
|
|
419
|
+
parser.addini(
|
|
420
|
+
"qastudio_include_error_location",
|
|
421
|
+
"Include precise error location (true/false)",
|
|
422
|
+
)
|
|
423
|
+
parser.addini(
|
|
424
|
+
"qastudio_include_test_steps",
|
|
425
|
+
"Include test execution steps (true/false)",
|
|
426
|
+
)
|
|
427
|
+
parser.addini(
|
|
428
|
+
"qastudio_include_console_output",
|
|
429
|
+
"Include console output (true/false)",
|
|
430
|
+
)
|
|
431
|
+
parser.addini(
|
|
432
|
+
"qastudio_upload_attachments",
|
|
433
|
+
"Upload test attachments (true/false)",
|
|
434
|
+
)
|
|
435
|
+
parser.addini(
|
|
436
|
+
"qastudio_attachments_dir",
|
|
437
|
+
"Directory containing test attachments",
|
|
438
|
+
)
|
|
439
|
+
|
|
440
|
+
|
|
441
|
+
def pytest_configure(config: Any) -> None:
|
|
442
|
+
"""
|
|
443
|
+
Configure pytest plugin.
|
|
444
|
+
|
|
445
|
+
Registers the plugin if API key is provided.
|
|
446
|
+
"""
|
|
447
|
+
import os
|
|
448
|
+
|
|
449
|
+
# Check if we have minimum required config
|
|
450
|
+
api_key = (
|
|
451
|
+
config.getini("qastudio_api_key")
|
|
452
|
+
or config.getoption("--qastudio-api-key", default=None)
|
|
453
|
+
or os.environ.get("QASTUDIO_API_KEY")
|
|
454
|
+
or None
|
|
455
|
+
)
|
|
456
|
+
|
|
457
|
+
if not api_key:
|
|
458
|
+
# No API key provided, skip plugin registration
|
|
459
|
+
return
|
|
460
|
+
|
|
461
|
+
try:
|
|
462
|
+
# Create reporter config
|
|
463
|
+
reporter_config = ReporterConfig.from_pytest_config(config)
|
|
464
|
+
|
|
465
|
+
# Validate config
|
|
466
|
+
validate_config(reporter_config)
|
|
467
|
+
|
|
468
|
+
# Check if an instance of QAStudioPlugin is already registered
|
|
469
|
+
existing_plugin = config.pluginmanager.get_plugin("qastudio")
|
|
470
|
+
if existing_plugin and isinstance(existing_plugin, QAStudioPlugin):
|
|
471
|
+
# Plugin instance already registered, skip
|
|
472
|
+
return
|
|
473
|
+
elif existing_plugin:
|
|
474
|
+
# Module is registered (from entry point), unregister it first
|
|
475
|
+
config.pluginmanager.unregister(existing_plugin)
|
|
476
|
+
|
|
477
|
+
# Register plugin instance
|
|
478
|
+
plugin = QAStudioPlugin(reporter_config)
|
|
479
|
+
config.pluginmanager.register(plugin, "qastudio")
|
|
480
|
+
|
|
481
|
+
# Register custom markers
|
|
482
|
+
config.addinivalue_line(
|
|
483
|
+
"markers",
|
|
484
|
+
"qastudio_id(id): Link test to QAStudio test case ID",
|
|
485
|
+
)
|
|
486
|
+
config.addinivalue_line(
|
|
487
|
+
"markers",
|
|
488
|
+
"qastudio_priority(level): Set test priority (low/medium/high/critical)",
|
|
489
|
+
)
|
|
490
|
+
config.addinivalue_line(
|
|
491
|
+
"markers",
|
|
492
|
+
"qastudio_tags(*tags): Add tags to test case",
|
|
493
|
+
)
|
|
494
|
+
|
|
495
|
+
except Exception as e:
|
|
496
|
+
print(f"[QAStudio] Failed to initialize plugin: {e}")
|
|
497
|
+
raise
|