alita-sdk 0.3.379__py3-none-any.whl → 0.3.627__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- alita_sdk/cli/__init__.py +10 -0
- alita_sdk/cli/__main__.py +17 -0
- alita_sdk/cli/agent/__init__.py +5 -0
- alita_sdk/cli/agent/default.py +258 -0
- alita_sdk/cli/agent_executor.py +156 -0
- alita_sdk/cli/agent_loader.py +245 -0
- alita_sdk/cli/agent_ui.py +228 -0
- alita_sdk/cli/agents.py +3113 -0
- alita_sdk/cli/callbacks.py +647 -0
- alita_sdk/cli/cli.py +168 -0
- alita_sdk/cli/config.py +306 -0
- alita_sdk/cli/context/__init__.py +30 -0
- alita_sdk/cli/context/cleanup.py +198 -0
- alita_sdk/cli/context/manager.py +731 -0
- alita_sdk/cli/context/message.py +285 -0
- alita_sdk/cli/context/strategies.py +289 -0
- alita_sdk/cli/context/token_estimation.py +127 -0
- alita_sdk/cli/formatting.py +182 -0
- alita_sdk/cli/input_handler.py +419 -0
- alita_sdk/cli/inventory.py +1073 -0
- alita_sdk/cli/mcp_loader.py +315 -0
- alita_sdk/cli/testcases/__init__.py +94 -0
- alita_sdk/cli/testcases/data_generation.py +119 -0
- alita_sdk/cli/testcases/discovery.py +96 -0
- alita_sdk/cli/testcases/executor.py +84 -0
- alita_sdk/cli/testcases/logger.py +85 -0
- alita_sdk/cli/testcases/parser.py +172 -0
- alita_sdk/cli/testcases/prompts.py +91 -0
- alita_sdk/cli/testcases/reporting.py +125 -0
- alita_sdk/cli/testcases/setup.py +108 -0
- alita_sdk/cli/testcases/test_runner.py +282 -0
- alita_sdk/cli/testcases/utils.py +39 -0
- alita_sdk/cli/testcases/validation.py +90 -0
- alita_sdk/cli/testcases/workflow.py +196 -0
- alita_sdk/cli/toolkit.py +327 -0
- alita_sdk/cli/toolkit_loader.py +85 -0
- alita_sdk/cli/tools/__init__.py +43 -0
- alita_sdk/cli/tools/approval.py +224 -0
- alita_sdk/cli/tools/filesystem.py +1751 -0
- alita_sdk/cli/tools/planning.py +389 -0
- alita_sdk/cli/tools/terminal.py +414 -0
- alita_sdk/community/__init__.py +72 -12
- alita_sdk/community/inventory/__init__.py +236 -0
- alita_sdk/community/inventory/config.py +257 -0
- alita_sdk/community/inventory/enrichment.py +2137 -0
- alita_sdk/community/inventory/extractors.py +1469 -0
- alita_sdk/community/inventory/ingestion.py +3172 -0
- alita_sdk/community/inventory/knowledge_graph.py +1457 -0
- alita_sdk/community/inventory/parsers/__init__.py +218 -0
- alita_sdk/community/inventory/parsers/base.py +295 -0
- alita_sdk/community/inventory/parsers/csharp_parser.py +907 -0
- alita_sdk/community/inventory/parsers/go_parser.py +851 -0
- alita_sdk/community/inventory/parsers/html_parser.py +389 -0
- alita_sdk/community/inventory/parsers/java_parser.py +593 -0
- alita_sdk/community/inventory/parsers/javascript_parser.py +629 -0
- alita_sdk/community/inventory/parsers/kotlin_parser.py +768 -0
- alita_sdk/community/inventory/parsers/markdown_parser.py +362 -0
- alita_sdk/community/inventory/parsers/python_parser.py +604 -0
- alita_sdk/community/inventory/parsers/rust_parser.py +858 -0
- alita_sdk/community/inventory/parsers/swift_parser.py +832 -0
- alita_sdk/community/inventory/parsers/text_parser.py +322 -0
- alita_sdk/community/inventory/parsers/yaml_parser.py +370 -0
- alita_sdk/community/inventory/patterns/__init__.py +61 -0
- alita_sdk/community/inventory/patterns/ast_adapter.py +380 -0
- alita_sdk/community/inventory/patterns/loader.py +348 -0
- alita_sdk/community/inventory/patterns/registry.py +198 -0
- alita_sdk/community/inventory/presets.py +535 -0
- alita_sdk/community/inventory/retrieval.py +1403 -0
- alita_sdk/community/inventory/toolkit.py +173 -0
- alita_sdk/community/inventory/toolkit_utils.py +176 -0
- alita_sdk/community/inventory/visualize.py +1370 -0
- alita_sdk/configurations/__init__.py +1 -1
- alita_sdk/configurations/ado.py +141 -20
- alita_sdk/configurations/bitbucket.py +94 -2
- alita_sdk/configurations/confluence.py +130 -1
- alita_sdk/configurations/figma.py +76 -0
- alita_sdk/configurations/gitlab.py +91 -0
- alita_sdk/configurations/jira.py +103 -0
- alita_sdk/configurations/openapi.py +329 -0
- alita_sdk/configurations/qtest.py +72 -1
- alita_sdk/configurations/report_portal.py +96 -0
- alita_sdk/configurations/sharepoint.py +148 -0
- alita_sdk/configurations/testio.py +83 -0
- alita_sdk/configurations/testrail.py +88 -0
- alita_sdk/configurations/xray.py +93 -0
- alita_sdk/configurations/zephyr_enterprise.py +93 -0
- alita_sdk/configurations/zephyr_essential.py +75 -0
- alita_sdk/runtime/clients/artifact.py +3 -3
- alita_sdk/runtime/clients/client.py +388 -46
- alita_sdk/runtime/clients/mcp_discovery.py +342 -0
- alita_sdk/runtime/clients/mcp_manager.py +262 -0
- alita_sdk/runtime/clients/sandbox_client.py +8 -21
- alita_sdk/runtime/langchain/_constants_bkup.py +1318 -0
- alita_sdk/runtime/langchain/assistant.py +157 -39
- alita_sdk/runtime/langchain/constants.py +647 -1
- alita_sdk/runtime/langchain/document_loaders/AlitaDocxMammothLoader.py +315 -3
- alita_sdk/runtime/langchain/document_loaders/AlitaExcelLoader.py +103 -60
- alita_sdk/runtime/langchain/document_loaders/AlitaJSONLinesLoader.py +77 -0
- alita_sdk/runtime/langchain/document_loaders/AlitaJSONLoader.py +10 -4
- alita_sdk/runtime/langchain/document_loaders/AlitaPowerPointLoader.py +226 -7
- alita_sdk/runtime/langchain/document_loaders/AlitaTextLoader.py +5 -2
- alita_sdk/runtime/langchain/document_loaders/constants.py +40 -19
- alita_sdk/runtime/langchain/langraph_agent.py +405 -84
- alita_sdk/runtime/langchain/utils.py +106 -7
- alita_sdk/runtime/llms/preloaded.py +2 -6
- alita_sdk/runtime/models/mcp_models.py +61 -0
- alita_sdk/runtime/skills/__init__.py +91 -0
- alita_sdk/runtime/skills/callbacks.py +498 -0
- alita_sdk/runtime/skills/discovery.py +540 -0
- alita_sdk/runtime/skills/executor.py +610 -0
- alita_sdk/runtime/skills/input_builder.py +371 -0
- alita_sdk/runtime/skills/models.py +330 -0
- alita_sdk/runtime/skills/registry.py +355 -0
- alita_sdk/runtime/skills/skill_runner.py +330 -0
- alita_sdk/runtime/toolkits/__init__.py +31 -0
- alita_sdk/runtime/toolkits/application.py +29 -10
- alita_sdk/runtime/toolkits/artifact.py +20 -11
- alita_sdk/runtime/toolkits/datasource.py +13 -6
- alita_sdk/runtime/toolkits/mcp.py +783 -0
- alita_sdk/runtime/toolkits/mcp_config.py +1048 -0
- alita_sdk/runtime/toolkits/planning.py +178 -0
- alita_sdk/runtime/toolkits/skill_router.py +238 -0
- alita_sdk/runtime/toolkits/subgraph.py +251 -6
- alita_sdk/runtime/toolkits/tools.py +356 -69
- alita_sdk/runtime/toolkits/vectorstore.py +11 -5
- alita_sdk/runtime/tools/__init__.py +10 -3
- alita_sdk/runtime/tools/application.py +27 -6
- alita_sdk/runtime/tools/artifact.py +511 -28
- alita_sdk/runtime/tools/data_analysis.py +183 -0
- alita_sdk/runtime/tools/function.py +67 -35
- alita_sdk/runtime/tools/graph.py +10 -4
- alita_sdk/runtime/tools/image_generation.py +148 -46
- alita_sdk/runtime/tools/llm.py +1003 -128
- alita_sdk/runtime/tools/loop.py +3 -1
- alita_sdk/runtime/tools/loop_output.py +3 -1
- alita_sdk/runtime/tools/mcp_inspect_tool.py +284 -0
- alita_sdk/runtime/tools/mcp_remote_tool.py +181 -0
- alita_sdk/runtime/tools/mcp_server_tool.py +8 -5
- alita_sdk/runtime/tools/planning/__init__.py +36 -0
- alita_sdk/runtime/tools/planning/models.py +246 -0
- alita_sdk/runtime/tools/planning/wrapper.py +607 -0
- alita_sdk/runtime/tools/router.py +2 -4
- alita_sdk/runtime/tools/sandbox.py +65 -48
- alita_sdk/runtime/tools/skill_router.py +776 -0
- alita_sdk/runtime/tools/tool.py +3 -1
- alita_sdk/runtime/tools/vectorstore.py +9 -3
- alita_sdk/runtime/tools/vectorstore_base.py +70 -14
- alita_sdk/runtime/utils/AlitaCallback.py +137 -21
- alita_sdk/runtime/utils/constants.py +5 -1
- alita_sdk/runtime/utils/mcp_client.py +492 -0
- alita_sdk/runtime/utils/mcp_oauth.py +361 -0
- alita_sdk/runtime/utils/mcp_sse_client.py +434 -0
- alita_sdk/runtime/utils/mcp_tools_discovery.py +124 -0
- alita_sdk/runtime/utils/serialization.py +155 -0
- alita_sdk/runtime/utils/streamlit.py +40 -13
- alita_sdk/runtime/utils/toolkit_utils.py +30 -9
- alita_sdk/runtime/utils/utils.py +36 -0
- alita_sdk/tools/__init__.py +134 -35
- alita_sdk/tools/ado/repos/__init__.py +51 -32
- alita_sdk/tools/ado/repos/repos_wrapper.py +148 -89
- alita_sdk/tools/ado/test_plan/__init__.py +25 -9
- alita_sdk/tools/ado/test_plan/test_plan_wrapper.py +23 -1
- alita_sdk/tools/ado/utils.py +1 -18
- alita_sdk/tools/ado/wiki/__init__.py +25 -12
- alita_sdk/tools/ado/wiki/ado_wrapper.py +291 -22
- alita_sdk/tools/ado/work_item/__init__.py +26 -13
- alita_sdk/tools/ado/work_item/ado_wrapper.py +73 -11
- alita_sdk/tools/advanced_jira_mining/__init__.py +11 -8
- alita_sdk/tools/aws/delta_lake/__init__.py +13 -9
- alita_sdk/tools/aws/delta_lake/tool.py +5 -1
- alita_sdk/tools/azure_ai/search/__init__.py +11 -8
- alita_sdk/tools/azure_ai/search/api_wrapper.py +1 -1
- alita_sdk/tools/base/tool.py +5 -1
- alita_sdk/tools/base_indexer_toolkit.py +271 -84
- alita_sdk/tools/bitbucket/__init__.py +17 -11
- alita_sdk/tools/bitbucket/api_wrapper.py +59 -11
- alita_sdk/tools/bitbucket/cloud_api_wrapper.py +49 -35
- alita_sdk/tools/browser/__init__.py +5 -4
- alita_sdk/tools/carrier/__init__.py +5 -6
- alita_sdk/tools/carrier/backend_reports_tool.py +6 -6
- alita_sdk/tools/carrier/run_ui_test_tool.py +6 -6
- alita_sdk/tools/carrier/ui_reports_tool.py +5 -5
- alita_sdk/tools/chunkers/__init__.py +3 -1
- alita_sdk/tools/chunkers/code/treesitter/treesitter.py +37 -13
- alita_sdk/tools/chunkers/sematic/json_chunker.py +1 -0
- alita_sdk/tools/chunkers/sematic/markdown_chunker.py +97 -6
- alita_sdk/tools/chunkers/sematic/proposal_chunker.py +1 -1
- alita_sdk/tools/chunkers/universal_chunker.py +270 -0
- alita_sdk/tools/cloud/aws/__init__.py +10 -7
- alita_sdk/tools/cloud/azure/__init__.py +10 -7
- alita_sdk/tools/cloud/gcp/__init__.py +10 -7
- alita_sdk/tools/cloud/k8s/__init__.py +10 -7
- alita_sdk/tools/code/linter/__init__.py +10 -8
- alita_sdk/tools/code/loaders/codesearcher.py +3 -2
- alita_sdk/tools/code/sonar/__init__.py +11 -8
- alita_sdk/tools/code_indexer_toolkit.py +82 -22
- alita_sdk/tools/confluence/__init__.py +22 -16
- alita_sdk/tools/confluence/api_wrapper.py +107 -30
- alita_sdk/tools/confluence/loader.py +14 -2
- alita_sdk/tools/custom_open_api/__init__.py +12 -5
- alita_sdk/tools/elastic/__init__.py +11 -8
- alita_sdk/tools/elitea_base.py +493 -30
- alita_sdk/tools/figma/__init__.py +58 -11
- alita_sdk/tools/figma/api_wrapper.py +1235 -143
- alita_sdk/tools/figma/figma_client.py +73 -0
- alita_sdk/tools/figma/toon_tools.py +2748 -0
- alita_sdk/tools/github/__init__.py +14 -15
- alita_sdk/tools/github/github_client.py +224 -100
- alita_sdk/tools/github/graphql_client_wrapper.py +119 -33
- alita_sdk/tools/github/schemas.py +14 -5
- alita_sdk/tools/github/tool.py +5 -1
- alita_sdk/tools/github/tool_prompts.py +9 -22
- alita_sdk/tools/gitlab/__init__.py +16 -11
- alita_sdk/tools/gitlab/api_wrapper.py +218 -48
- alita_sdk/tools/gitlab_org/__init__.py +10 -9
- alita_sdk/tools/gitlab_org/api_wrapper.py +63 -64
- alita_sdk/tools/google/bigquery/__init__.py +13 -12
- alita_sdk/tools/google/bigquery/tool.py +5 -1
- alita_sdk/tools/google_places/__init__.py +11 -8
- alita_sdk/tools/google_places/api_wrapper.py +1 -1
- alita_sdk/tools/jira/__init__.py +17 -10
- alita_sdk/tools/jira/api_wrapper.py +92 -41
- alita_sdk/tools/keycloak/__init__.py +11 -8
- alita_sdk/tools/localgit/__init__.py +9 -3
- alita_sdk/tools/localgit/local_git.py +62 -54
- alita_sdk/tools/localgit/tool.py +5 -1
- alita_sdk/tools/memory/__init__.py +12 -4
- alita_sdk/tools/non_code_indexer_toolkit.py +1 -0
- alita_sdk/tools/ocr/__init__.py +11 -8
- alita_sdk/tools/openapi/__init__.py +491 -106
- alita_sdk/tools/openapi/api_wrapper.py +1368 -0
- alita_sdk/tools/openapi/tool.py +20 -0
- alita_sdk/tools/pandas/__init__.py +20 -12
- alita_sdk/tools/pandas/api_wrapper.py +38 -25
- alita_sdk/tools/pandas/dataframe/generator/base.py +3 -1
- alita_sdk/tools/postman/__init__.py +10 -9
- alita_sdk/tools/pptx/__init__.py +11 -10
- alita_sdk/tools/pptx/pptx_wrapper.py +1 -1
- alita_sdk/tools/qtest/__init__.py +31 -11
- alita_sdk/tools/qtest/api_wrapper.py +2135 -86
- alita_sdk/tools/rally/__init__.py +10 -9
- alita_sdk/tools/rally/api_wrapper.py +1 -1
- alita_sdk/tools/report_portal/__init__.py +12 -8
- alita_sdk/tools/salesforce/__init__.py +10 -8
- alita_sdk/tools/servicenow/__init__.py +17 -15
- alita_sdk/tools/servicenow/api_wrapper.py +1 -1
- alita_sdk/tools/sharepoint/__init__.py +10 -7
- alita_sdk/tools/sharepoint/api_wrapper.py +129 -38
- alita_sdk/tools/sharepoint/authorization_helper.py +191 -1
- alita_sdk/tools/sharepoint/utils.py +8 -2
- alita_sdk/tools/slack/__init__.py +10 -7
- alita_sdk/tools/slack/api_wrapper.py +2 -2
- alita_sdk/tools/sql/__init__.py +12 -9
- alita_sdk/tools/testio/__init__.py +10 -7
- alita_sdk/tools/testrail/__init__.py +11 -10
- alita_sdk/tools/testrail/api_wrapper.py +1 -1
- alita_sdk/tools/utils/__init__.py +9 -4
- alita_sdk/tools/utils/content_parser.py +103 -18
- alita_sdk/tools/utils/text_operations.py +410 -0
- alita_sdk/tools/utils/tool_prompts.py +79 -0
- alita_sdk/tools/vector_adapters/VectorStoreAdapter.py +30 -13
- alita_sdk/tools/xray/__init__.py +13 -9
- alita_sdk/tools/yagmail/__init__.py +9 -3
- alita_sdk/tools/zephyr/__init__.py +10 -7
- alita_sdk/tools/zephyr_enterprise/__init__.py +11 -7
- alita_sdk/tools/zephyr_essential/__init__.py +10 -7
- alita_sdk/tools/zephyr_essential/api_wrapper.py +30 -13
- alita_sdk/tools/zephyr_essential/client.py +2 -2
- alita_sdk/tools/zephyr_scale/__init__.py +11 -8
- alita_sdk/tools/zephyr_scale/api_wrapper.py +2 -2
- alita_sdk/tools/zephyr_squad/__init__.py +10 -7
- {alita_sdk-0.3.379.dist-info → alita_sdk-0.3.627.dist-info}/METADATA +154 -8
- alita_sdk-0.3.627.dist-info/RECORD +468 -0
- alita_sdk-0.3.627.dist-info/entry_points.txt +2 -0
- alita_sdk-0.3.379.dist-info/RECORD +0 -360
- {alita_sdk-0.3.379.dist-info → alita_sdk-0.3.627.dist-info}/WHEEL +0 -0
- {alita_sdk-0.3.379.dist-info → alita_sdk-0.3.627.dist-info}/licenses/LICENSE +0 -0
- {alita_sdk-0.3.379.dist-info → alita_sdk-0.3.627.dist-info}/top_level.txt +0 -0
|
@@ -3,17 +3,21 @@ import json
|
|
|
3
3
|
import logging
|
|
4
4
|
import re
|
|
5
5
|
from traceback import format_exc
|
|
6
|
-
from typing import Any, Optional
|
|
6
|
+
from typing import Any, Optional, Generator, Literal
|
|
7
7
|
|
|
8
|
+
import requests
|
|
8
9
|
import swagger_client
|
|
10
|
+
from langchain_core.documents import Document
|
|
9
11
|
from langchain_core.tools import ToolException
|
|
10
12
|
from pydantic import Field, PrivateAttr, model_validator, create_model, SecretStr
|
|
11
13
|
from sklearn.feature_extraction.text import strip_tags
|
|
12
|
-
from swagger_client import TestCaseApi, SearchApi, PropertyResource, ModuleApi
|
|
14
|
+
from swagger_client import TestCaseApi, SearchApi, PropertyResource, ModuleApi, ProjectApi, FieldApi
|
|
13
15
|
from swagger_client.rest import ApiException
|
|
14
16
|
|
|
15
|
-
from ..
|
|
16
|
-
from ..utils.
|
|
17
|
+
from ..non_code_indexer_toolkit import NonCodeIndexerToolkit
|
|
18
|
+
from ..utils.available_tools_decorator import extend_with_parent_available_tools
|
|
19
|
+
from ..utils.content_parser import parse_file_content, file_extension_by_chunker
|
|
20
|
+
from ...runtime.utils.utils import IndexerKeywords
|
|
17
21
|
|
|
18
22
|
QTEST_ID = "QTest Id"
|
|
19
23
|
|
|
@@ -26,21 +30,34 @@ If generated data was used, put appropriate note to the test case description fi
|
|
|
26
30
|
### CRITERIA
|
|
27
31
|
1. The structure should be as in EXAMPLE.
|
|
28
32
|
2. Case and spaces for field names should be exactly the same as in NOTES.
|
|
29
|
-
3. Extra fields are allowed.
|
|
33
|
+
3. Extra fields are allowed and will be mapped to project's custom fields if they exist.
|
|
30
34
|
4. "{QTEST_ID}" is required to update, change or replace values in test case.
|
|
31
35
|
5. Do not provide "Id" and "{QTEST_ID}" to create test case.
|
|
32
|
-
6
|
|
36
|
+
6. "Steps" is a list of test step objects with fields "Test Step Number", "Test Step Description", "Test Step Expected Result".
|
|
37
|
+
7. For updates, provide ONLY the fields you want to change. Omitted fields will remain unchanged.
|
|
33
38
|
|
|
34
39
|
### NOTES
|
|
35
|
-
Id: Unique identifier (e.g., TC-123).
|
|
36
|
-
QTest id: Unique identifier (e.g., 4626964).
|
|
37
|
-
Name: Brief title.
|
|
38
|
-
Description: Short purpose.
|
|
39
|
-
Type: 'Manual'
|
|
40
|
-
Status:
|
|
41
|
-
Priority:
|
|
42
|
-
Test Type:
|
|
43
|
-
Precondition:
|
|
40
|
+
Id: Unique identifier (e.g., TC-123). Read-only.
|
|
41
|
+
QTest id: Unique identifier (e.g., 4626964). Required for updates.
|
|
42
|
+
Name: Brief title of the test case.
|
|
43
|
+
Description: Short description of the test purpose.
|
|
44
|
+
Type: Type of test (e.g., 'Manual', 'Automation - UTAF').
|
|
45
|
+
Status: Current status (e.g., 'New', 'In Progress', 'Completed').
|
|
46
|
+
Priority: Priority level (e.g., 'High', 'Medium', 'Low').
|
|
47
|
+
Test Type: Category of test (e.g., 'Functional', 'Regression', 'Smoke').
|
|
48
|
+
Precondition: Prerequisites for the test, formatted as: <Step1> <Step2> Leave blank if none.
|
|
49
|
+
Steps: Array of test steps with Description and Expected Result.
|
|
50
|
+
|
|
51
|
+
**Multi-select fields**: For fields that allow multiple values (e.g., Team, Assigned To etc.), you can provide:
|
|
52
|
+
- Single value: "Team": "Epam"
|
|
53
|
+
- Multiple values: "Team": ["Epam", "EJ"]
|
|
54
|
+
|
|
55
|
+
**Clearing/Unsetting fields**: To clear a field value (unassign, set to empty/blank):
|
|
56
|
+
- Use `null` in JSON: "Priority": null
|
|
57
|
+
- Works for multi-select fields, user assignments, etc. (Note: single-select dropdowns have API limitations)
|
|
58
|
+
- Example: {{"QTest Id": "4626964", "Assigned To": null, "Review status": null}}
|
|
59
|
+
|
|
60
|
+
**For Updates**: Include only the fields you want to modify. The system will validate property values against project configuration.
|
|
44
61
|
|
|
45
62
|
### EXAMPLE
|
|
46
63
|
{{
|
|
@@ -53,6 +70,7 @@ Precondition: List prerequisites in one cell, formatted as: <Step1> <Step2> Leav
|
|
|
53
70
|
"Priority": "",
|
|
54
71
|
"Test Type": "Functional",
|
|
55
72
|
"Precondition": "<ONLY provided by user precondition>",
|
|
73
|
+
"Team": ["Epam", "EJ"],
|
|
56
74
|
"Steps": [
|
|
57
75
|
{{ "Test Step Number": 1, "Test Step Description": "Navigate to url", "Test Step Expected Result": "Page content is loaded"}},
|
|
58
76
|
{{ "Test Step Number": 2, "Test Step Description": "Click 'Login'", "Test Step Expected Result": "Form is expanded"}},
|
|
@@ -63,6 +81,73 @@ Precondition: List prerequisites in one cell, formatted as: <Step1> <Step2> Leav
|
|
|
63
81
|
Json object
|
|
64
82
|
"""
|
|
65
83
|
|
|
84
|
+
# DQL Syntax Documentation - reusable across all DQL-based search tools
|
|
85
|
+
DQL_SYNTAX_DOCS = """
|
|
86
|
+
CRITICAL: USE SINGLE QUOTES ONLY - DQL does not support double quotes!
|
|
87
|
+
- ✓ CORRECT: Description ~ 'Forgot Password'
|
|
88
|
+
- ✗ WRONG: Description ~ "Forgot Password"
|
|
89
|
+
|
|
90
|
+
LIMITATION - CANNOT SEARCH BY LINKED OBJECTS:
|
|
91
|
+
- ✗ Searching by linked requirements, test cases, defects is NOT supported
|
|
92
|
+
- Use dedicated find_*_by_*_id tools for relationship queries
|
|
93
|
+
|
|
94
|
+
SEARCHABLE FIELDS:
|
|
95
|
+
- Direct fields: Id, Name, Description, Status, Type, Priority, etc.
|
|
96
|
+
- Custom fields: Use exact field name from project configuration
|
|
97
|
+
- Date fields: MUST use ISO DateTime format (e.g., '2024-01-01T00:00:00.000Z')
|
|
98
|
+
|
|
99
|
+
ENTITY-SPECIFIC NOTES:
|
|
100
|
+
- test-logs: Only support 'Execution Start Date' and 'Execution End Date' queries
|
|
101
|
+
- builds/test-cycles: Also support 'Created Date' and 'Last Modified Date'
|
|
102
|
+
- defects: Can use 'Affected Release/Build' and 'Fixed Release/Build'
|
|
103
|
+
|
|
104
|
+
SYNTAX RULES:
|
|
105
|
+
1. ALL string values MUST use single quotes (never double quotes)
|
|
106
|
+
2. Field names with spaces MUST be in single quotes: 'Created Date' > '2024-01-01T00:00:00.000Z'
|
|
107
|
+
3. Use ~ for 'contains', !~ for 'not contains': Description ~ 'login'
|
|
108
|
+
4. Use 'is not empty' for non-empty check: Name is 'not empty'
|
|
109
|
+
5. Operators: =, !=, <, >, <=, >=, in, ~, !~
|
|
110
|
+
|
|
111
|
+
EXAMPLES:
|
|
112
|
+
- Id = 'TC-123' or Id = 'RQ-15' or Id = 'DF-100' (depending on entity type)
|
|
113
|
+
- Description ~ 'Forgot Password'
|
|
114
|
+
- Status = 'New' and Priority = 'High'
|
|
115
|
+
- Name ~ 'login'
|
|
116
|
+
- 'Created Date' > '2024-01-01T00:00:00.000Z'
|
|
117
|
+
- 'Execution Start Date' > '2024-01-01T00:00:00.000Z' (for test-logs)
|
|
118
|
+
"""
|
|
119
|
+
|
|
120
|
+
# Supported object types for DQL search (based on QTest Search API documentation)
|
|
121
|
+
# Note: Prefixes are configurable per-project but these are standard defaults
|
|
122
|
+
# Modules (MD) are NOT searchable via DQL - use get_modules tool instead
|
|
123
|
+
# Test-logs have NO prefix - they are internal records accessed via test runs
|
|
124
|
+
|
|
125
|
+
# Entity types with ID prefixes (can be looked up by ID like TC-123)
|
|
126
|
+
QTEST_OBJECT_TYPES = {
|
|
127
|
+
# Core test management entities
|
|
128
|
+
'test-cases': {'prefix': 'TC', 'name': 'Test Case', 'description': 'Test case definitions with steps'},
|
|
129
|
+
'test-runs': {'prefix': 'TR', 'name': 'Test Run', 'description': 'Execution instances of test cases'},
|
|
130
|
+
'defects': {'prefix': 'DF', 'name': 'Defect', 'description': 'Bugs/issues found during testing'},
|
|
131
|
+
'requirements': {'prefix': 'RQ', 'name': 'Requirement', 'description': 'Requirements to be tested'},
|
|
132
|
+
|
|
133
|
+
# Test organization entities
|
|
134
|
+
'test-suites': {'prefix': 'TS', 'name': 'Test Suite', 'description': 'Collections of test runs'},
|
|
135
|
+
'test-cycles': {'prefix': 'CL', 'name': 'Test Cycle', 'description': 'Test execution cycles'},
|
|
136
|
+
|
|
137
|
+
# Release management entities
|
|
138
|
+
'releases': {'prefix': 'RL', 'name': 'Release', 'description': 'Software releases'},
|
|
139
|
+
'builds': {'prefix': 'BL', 'name': 'Build', 'description': 'Builds within releases'},
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
# Entity types searchable via DQL but without ID prefixes
|
|
143
|
+
# These can be searched by specific fields only, not by ID
|
|
144
|
+
QTEST_SEARCHABLE_ONLY_TYPES = {
|
|
145
|
+
'test-logs': {
|
|
146
|
+
'name': 'Test Log',
|
|
147
|
+
'description': "Execution logs. Only date queries supported (Execution Start Date, Execution End Date). For specific log details, use test run's 'Latest Test Log' field."
|
|
148
|
+
},
|
|
149
|
+
}
|
|
150
|
+
|
|
66
151
|
logger = logging.getLogger(__name__)
|
|
67
152
|
|
|
68
153
|
QtestDataQuerySearch = create_model(
|
|
@@ -82,8 +167,16 @@ QtestCreateTestCase = create_model(
|
|
|
82
167
|
|
|
83
168
|
QtestLinkTestCaseToJiraRequirement = create_model(
|
|
84
169
|
"QtestLinkTestCaseToJiraRequirement",
|
|
85
|
-
requirement_external_id=(str, Field("Qtest requirement external id which represent jira issue id linked to Qtest as a requirement e.g. SITEPOD-4038")),
|
|
86
|
-
json_list_of_test_case_ids=(str, Field("""List of the test case ids to be linked to particular requirement.
|
|
170
|
+
requirement_external_id=(str, Field(description="Qtest requirement external id which represent jira issue id linked to Qtest as a requirement e.g. SITEPOD-4038")),
|
|
171
|
+
json_list_of_test_case_ids=(str, Field(description="""List of the test case ids to be linked to particular requirement.
|
|
172
|
+
Create a list of the test case ids in the following format '["TC-123", "TC-234", "TC-456"]' which represents json array as a string.
|
|
173
|
+
It should be capable to be extracted directly by python json.loads method."""))
|
|
174
|
+
)
|
|
175
|
+
|
|
176
|
+
QtestLinkTestCaseToQtestRequirement = create_model(
|
|
177
|
+
"QtestLinkTestCaseToQtestRequirement",
|
|
178
|
+
requirement_id=(str, Field(description="QTest internal requirement ID in format RQ-123")),
|
|
179
|
+
json_list_of_test_case_ids=(str, Field(description="""List of the test case ids to be linked to particular requirement.
|
|
87
180
|
Create a list of the test case ids in the following format '["TC-123", "TC-234", "TC-456"]' which represents json array as a string.
|
|
88
181
|
It should be capable to be extracted directly by python json.loads method."""))
|
|
89
182
|
)
|
|
@@ -118,7 +211,52 @@ GetModules = create_model(
|
|
|
118
211
|
|
|
119
212
|
)
|
|
120
213
|
|
|
121
|
-
|
|
214
|
+
GetAllTestCasesFieldsForProject = create_model(
|
|
215
|
+
"GetAllTestCasesFieldsForProject",
|
|
216
|
+
force_refresh=(Optional[bool],
|
|
217
|
+
Field(description="Set to true to reload field definitions from API if project configuration has changed (new fields added, dropdown values modified). Default: false (uses cached data).",
|
|
218
|
+
default=False)),
|
|
219
|
+
)
|
|
220
|
+
|
|
221
|
+
FindTestCasesByRequirementId = create_model(
|
|
222
|
+
"FindTestCasesByRequirementId",
|
|
223
|
+
requirement_id=(str, Field(description="QTest requirement ID in format RQ-123. This will find all test cases linked to this requirement.")),
|
|
224
|
+
include_details=(Optional[bool], Field(description="If true, returns full test case details. If false (default), returns Id, QTest Id, Name, and Description fields.", default=False)),
|
|
225
|
+
)
|
|
226
|
+
|
|
227
|
+
FindRequirementsByTestCaseId = create_model(
|
|
228
|
+
"FindRequirementsByTestCaseId",
|
|
229
|
+
test_case_id=(str, Field(description="Test case ID in format TC-123. This will find all requirements linked to this test case.")),
|
|
230
|
+
)
|
|
231
|
+
|
|
232
|
+
FindTestRunsByTestCaseId = create_model(
|
|
233
|
+
"FindTestRunsByTestCaseId",
|
|
234
|
+
test_case_id=(str, Field(description="Test case ID in format TC-123. This will find all test runs associated with this test case.")),
|
|
235
|
+
)
|
|
236
|
+
|
|
237
|
+
FindDefectsByTestRunId = create_model(
|
|
238
|
+
"FindDefectsByTestRunId",
|
|
239
|
+
test_run_id=(str, Field(description="Test run ID in format TR-123. This will find all defects associated with this test run.")),
|
|
240
|
+
)
|
|
241
|
+
|
|
242
|
+
# Generic search model for any entity type
|
|
243
|
+
GenericDqlSearch = create_model(
|
|
244
|
+
"GenericDqlSearch",
|
|
245
|
+
object_type=(str, Field(description="Entity type to search: 'test-cases', 'test-runs', 'defects', 'requirements', 'test-suites', 'test-cycles', 'test-logs', 'releases', or 'builds'. Note: test-logs only support date queries; modules are NOT searchable - use get_modules tool.")),
|
|
246
|
+
dql=(str, Field(description="QTest Data Query Language (DQL) query string")),
|
|
247
|
+
)
|
|
248
|
+
|
|
249
|
+
# Generic find by ID model - only for entities with ID prefixes (NOT test-logs)
|
|
250
|
+
FindEntityById = create_model(
|
|
251
|
+
"FindEntityById",
|
|
252
|
+
entity_id=(str, Field(description="Entity ID with prefix: TC-123 (test case), RQ-15 (requirement), DF-100 (defect), TR-39 (test run), TS-5 (test suite), CL-3 (test cycle), RL-1 (release), or BL-2 (build). Note: test-logs and modules do NOT have ID prefixes.")),
|
|
253
|
+
)
|
|
254
|
+
|
|
255
|
+
NoInput = create_model(
|
|
256
|
+
"NoInput"
|
|
257
|
+
)
|
|
258
|
+
|
|
259
|
+
class QtestApiWrapper(NonCodeIndexerToolkit):
|
|
122
260
|
base_url: str
|
|
123
261
|
qtest_project_id: int
|
|
124
262
|
qtest_api_token: SecretStr
|
|
@@ -126,18 +264,20 @@ class QtestApiWrapper(BaseToolApiWrapper):
|
|
|
126
264
|
page: int = 1
|
|
127
265
|
no_of_tests_shown_in_dql_search: int = 10
|
|
128
266
|
_client: Any = PrivateAttr()
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
if 'project_id' in values:
|
|
135
|
-
values['qtest_project_id'] = values.pop('project_id')
|
|
136
|
-
return values
|
|
267
|
+
_field_definitions_cache: Optional[dict] = PrivateAttr(default=None)
|
|
268
|
+
_modules_cache: Optional[list] = PrivateAttr(default=None)
|
|
269
|
+
_chunking_tool: Optional[str] = PrivateAttr(default=None)
|
|
270
|
+
_extract_images: bool = PrivateAttr(default=False)
|
|
271
|
+
_image_prompt: Optional[str] = PrivateAttr(default=None)
|
|
137
272
|
|
|
138
273
|
@model_validator(mode='before')
|
|
139
274
|
@classmethod
|
|
140
275
|
def validate_toolkit(cls, values):
|
|
276
|
+
# Handle project_id alias
|
|
277
|
+
# There is no such alias and this alias is breaking the scheduled indexing setting to qtest project id the value of the elitea project id.
|
|
278
|
+
# if 'project_id' in values:
|
|
279
|
+
# values['qtest_project_id'] = values.pop('project_id')
|
|
280
|
+
|
|
141
281
|
try:
|
|
142
282
|
import swagger_client # noqa: F401
|
|
143
283
|
except ImportError:
|
|
@@ -146,15 +286,22 @@ class QtestApiWrapper(BaseToolApiWrapper):
|
|
|
146
286
|
"`pip install git+https://github.com/Roman-Mitusov/qtest-api.git`"
|
|
147
287
|
)
|
|
148
288
|
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
289
|
+
cls.llm = values.get('llm')
|
|
290
|
+
# Call parent validator to set up embeddings and vectorstore params
|
|
291
|
+
return super().validate_toolkit(values)
|
|
292
|
+
|
|
293
|
+
@model_validator(mode='after')
|
|
294
|
+
def setup_qtest_client(self):
|
|
295
|
+
"""Initialize QTest swagger client after model validation."""
|
|
296
|
+
import swagger_client
|
|
297
|
+
|
|
298
|
+
if self.qtest_api_token:
|
|
152
299
|
configuration = swagger_client.Configuration()
|
|
153
|
-
configuration.host =
|
|
154
|
-
configuration.api_key['Authorization'] =
|
|
300
|
+
configuration.host = self.base_url
|
|
301
|
+
configuration.api_key['Authorization'] = self.qtest_api_token.get_secret_value()
|
|
155
302
|
configuration.api_key_prefix['Authorization'] = 'Bearer'
|
|
156
|
-
|
|
157
|
-
return
|
|
303
|
+
self._client = swagger_client.ApiClient(configuration)
|
|
304
|
+
return self
|
|
158
305
|
|
|
159
306
|
def __instantiate_test_api_instance(self) -> TestCaseApi:
|
|
160
307
|
# Instantiate the TestCaseApi instance according to the qtest api documentation and swagger client
|
|
@@ -163,33 +310,263 @@ class QtestApiWrapper(BaseToolApiWrapper):
|
|
|
163
310
|
def __instantiate_module_api_instance(self) -> ModuleApi:
|
|
164
311
|
return swagger_client.ModuleApi(self._client)
|
|
165
312
|
|
|
313
|
+
def __instantiate_fields_api_instance(self) -> FieldApi:
|
|
314
|
+
return swagger_client.FieldApi(self._client)
|
|
315
|
+
|
|
316
|
+
def __get_field_definitions_cached(self) -> dict:
|
|
317
|
+
"""Get field definitions with session-level caching.
|
|
318
|
+
|
|
319
|
+
Field definitions are cached for the lifetime of this wrapper instance.
|
|
320
|
+
If project field configuration changes, call refresh_field_definitions_cache()
|
|
321
|
+
to reload the definitions.
|
|
322
|
+
|
|
323
|
+
Returns:
|
|
324
|
+
dict: Field definitions mapping
|
|
325
|
+
"""
|
|
326
|
+
if self._field_definitions_cache is None:
|
|
327
|
+
self._field_definitions_cache = self.__get_project_field_definitions()
|
|
328
|
+
return self._field_definitions_cache
|
|
329
|
+
|
|
330
|
+
def refresh_field_definitions_cache(self) -> dict:
|
|
331
|
+
"""Manually refresh the field definitions cache.
|
|
332
|
+
|
|
333
|
+
Call this method if project field configuration has been updated
|
|
334
|
+
(new fields added, dropdown values changed, etc.) and you need to
|
|
335
|
+
reload the definitions within the same agent session.
|
|
336
|
+
|
|
337
|
+
Returns:
|
|
338
|
+
dict: Freshly loaded field definitions
|
|
339
|
+
"""
|
|
340
|
+
self._field_definitions_cache = None
|
|
341
|
+
return self.__get_field_definitions_cached()
|
|
342
|
+
|
|
343
|
+
def __map_properties_to_api_format(self, test_case_data: dict, field_definitions: dict,
|
|
344
|
+
base_properties: list = None) -> list:
|
|
345
|
+
"""
|
|
346
|
+
Convert user-friendly property names/values to QTest API PropertyResource format.
|
|
347
|
+
|
|
348
|
+
Args:
|
|
349
|
+
test_case_data: Dict with property names as keys (e.g., {"Status": "New", "Priority": "High"})
|
|
350
|
+
field_definitions: Output from __get_project_field_definitions()
|
|
351
|
+
base_properties: Existing properties from a test case (for updates, optional)
|
|
352
|
+
|
|
353
|
+
Returns:
|
|
354
|
+
list[PropertyResource]: Properties ready for API submission
|
|
355
|
+
|
|
356
|
+
Raises:
|
|
357
|
+
ValueError: If any field names are unknown or values are invalid (shows ALL errors)
|
|
358
|
+
"""
|
|
359
|
+
# Start with base properties or empty dict
|
|
360
|
+
props_dict = {}
|
|
361
|
+
if base_properties:
|
|
362
|
+
for prop in base_properties:
|
|
363
|
+
field_name = prop.get('field_name')
|
|
364
|
+
if field_name:
|
|
365
|
+
props_dict[field_name] = {
|
|
366
|
+
'field_id': prop['field_id'],
|
|
367
|
+
'field_name': field_name,
|
|
368
|
+
'field_value': prop['field_value'],
|
|
369
|
+
'field_value_name': prop.get('field_value_name')
|
|
370
|
+
}
|
|
371
|
+
|
|
372
|
+
# Collect ALL validation errors before raising
|
|
373
|
+
validation_errors = []
|
|
374
|
+
|
|
375
|
+
# Map incoming properties from test_case_data
|
|
376
|
+
for field_name, field_value in test_case_data.items():
|
|
377
|
+
# Skip non-property fields (these are handled separately)
|
|
378
|
+
if field_name in ['Name', 'Description', 'Precondition', 'Steps', 'Id', QTEST_ID]:
|
|
379
|
+
continue
|
|
380
|
+
|
|
381
|
+
# Skip empty string values (don't update these fields)
|
|
382
|
+
if field_value == '':
|
|
383
|
+
continue
|
|
384
|
+
|
|
385
|
+
# Handle None value - this means "clear/unset this field"
|
|
386
|
+
if field_value is None:
|
|
387
|
+
# Validate field exists before attempting to clear
|
|
388
|
+
if field_name not in field_definitions:
|
|
389
|
+
validation_errors.append(
|
|
390
|
+
f"❌ Unknown field '{field_name}' - not defined in project configuration"
|
|
391
|
+
)
|
|
392
|
+
continue
|
|
393
|
+
|
|
394
|
+
field_def = field_definitions[field_name]
|
|
395
|
+
field_id = field_def['field_id']
|
|
396
|
+
is_multiple = field_def.get('multiple', False)
|
|
397
|
+
has_allowed_values = bool(field_def.get('values')) # True = dropdown, False = text
|
|
398
|
+
|
|
399
|
+
if not has_allowed_values:
|
|
400
|
+
# TEXT FIELD: can clear with empty string
|
|
401
|
+
props_dict[field_name] = {
|
|
402
|
+
'field_id': field_id,
|
|
403
|
+
'field_name': field_name,
|
|
404
|
+
'field_value': '',
|
|
405
|
+
'field_value_name': ''
|
|
406
|
+
}
|
|
407
|
+
elif is_multiple:
|
|
408
|
+
# MULTI-SELECT: can clear using empty array "[]"
|
|
409
|
+
props_dict[field_name] = {
|
|
410
|
+
'field_id': field_id,
|
|
411
|
+
'field_name': field_name,
|
|
412
|
+
'field_value': "[]",
|
|
413
|
+
'field_value_name': None
|
|
414
|
+
}
|
|
415
|
+
else:
|
|
416
|
+
# SINGLE-SELECT: QTest API limitation - cannot clear to empty
|
|
417
|
+
# Note: Users CAN clear these fields from UI, but API doesn't expose this capability
|
|
418
|
+
validation_errors.append(
|
|
419
|
+
f"⚠️ Cannot clear single-select field '{field_name}' - this is a QTest API limitation "
|
|
420
|
+
f"(clearing is possible from UI but not exposed via API). "
|
|
421
|
+
f"Please select an alternative value instead. "
|
|
422
|
+
f"Available values: {', '.join(field_def.get('values', {}).keys()) or 'none'}"
|
|
423
|
+
)
|
|
424
|
+
continue
|
|
425
|
+
|
|
426
|
+
# Validate field exists in project - STRICT validation
|
|
427
|
+
if field_name not in field_definitions:
|
|
428
|
+
validation_errors.append(
|
|
429
|
+
f"❌ Unknown field '{field_name}' - not defined in project configuration"
|
|
430
|
+
)
|
|
431
|
+
continue # Skip to next field, keep collecting errors
|
|
432
|
+
|
|
433
|
+
field_def = field_definitions[field_name]
|
|
434
|
+
field_id = field_def['field_id']
|
|
435
|
+
data_type = field_def.get('data_type')
|
|
436
|
+
is_multiple = field_def.get('multiple', False)
|
|
437
|
+
|
|
438
|
+
# Normalize field_value to list for consistent processing
|
|
439
|
+
# Multi-select fields can receive: "value", ["value1", "value2"], or ["value1"]
|
|
440
|
+
# Single-select fields: "value" only
|
|
441
|
+
if is_multiple:
|
|
442
|
+
# Convert to list if not already
|
|
443
|
+
values_to_process = field_value if isinstance(field_value, list) else [field_value]
|
|
444
|
+
else:
|
|
445
|
+
# Single-select: keep as single value
|
|
446
|
+
values_to_process = [field_value]
|
|
447
|
+
|
|
448
|
+
# Validate value(s) for dropdown fields (only if field has allowed values)
|
|
449
|
+
if field_def['values']:
|
|
450
|
+
# Field has allowed values (dropdown/combobox/user fields) - validate strictly
|
|
451
|
+
value_ids = []
|
|
452
|
+
value_names = []
|
|
453
|
+
|
|
454
|
+
for single_value in values_to_process:
|
|
455
|
+
if single_value not in field_def['values']:
|
|
456
|
+
available = ", ".join(sorted(field_def['values'].keys()))
|
|
457
|
+
validation_errors.append(
|
|
458
|
+
f"❌ Invalid value '{single_value}' for field '{field_name}'. "
|
|
459
|
+
f"Allowed values: {available}"
|
|
460
|
+
)
|
|
461
|
+
continue # Skip this value, but continue validating others
|
|
462
|
+
|
|
463
|
+
# Valid value - add to lists
|
|
464
|
+
value_ids.append(field_def['values'][single_value])
|
|
465
|
+
value_names.append(single_value)
|
|
466
|
+
|
|
467
|
+
# If all values were invalid, skip this field
|
|
468
|
+
if not value_ids:
|
|
469
|
+
continue
|
|
470
|
+
|
|
471
|
+
# Format based on field type and value count
|
|
472
|
+
if is_multiple and len(value_ids) == 1:
|
|
473
|
+
# Single value in multi-select field: bracketed string "[419950]"
|
|
474
|
+
# This includes single user assignment: "[626983]"
|
|
475
|
+
field_value_id = f"[{value_ids[0]}]"
|
|
476
|
+
field_value_name = f"[{value_names[0]}]" if data_type == 5 else value_names[0]
|
|
477
|
+
elif is_multiple:
|
|
478
|
+
# Multiple values in multi-select: bracketed string with comma-separated IDs
|
|
479
|
+
ids_str = ",".join(str(vid) for vid in value_ids)
|
|
480
|
+
field_value_id = f"[{ids_str}]"
|
|
481
|
+
field_value_name = ", ".join(value_names)
|
|
482
|
+
else:
|
|
483
|
+
# Regular single-select dropdown: plain ID
|
|
484
|
+
field_value_id = value_ids[0]
|
|
485
|
+
field_value_name = value_names[0]
|
|
486
|
+
else:
|
|
487
|
+
# Text field or field without restricted values - use value directly
|
|
488
|
+
# No validation needed - users can write anything (by design)
|
|
489
|
+
field_value_id = field_value
|
|
490
|
+
field_value_name = field_value if isinstance(field_value, str) else None
|
|
491
|
+
|
|
492
|
+
# Update or add property (only if no errors for this field)
|
|
493
|
+
props_dict[field_name] = {
|
|
494
|
+
'field_id': field_id,
|
|
495
|
+
'field_name': field_name,
|
|
496
|
+
'field_value': field_value_id,
|
|
497
|
+
'field_value_name': field_value_name
|
|
498
|
+
}
|
|
499
|
+
|
|
500
|
+
# If ANY validation errors found, raise comprehensive error with all issues
|
|
501
|
+
if validation_errors:
|
|
502
|
+
available_fields = ", ".join(sorted(field_definitions.keys()))
|
|
503
|
+
error_msg = (
|
|
504
|
+
f"Found {len(validation_errors)} validation error(s) in test case properties:\n\n" +
|
|
505
|
+
"\n".join(validation_errors) +
|
|
506
|
+
f"\n\n📋 Available fields for this project: {available_fields}\n\n"
|
|
507
|
+
f"💡 Tip: Use 'get_all_test_cases_fields_for_project' tool to see all fields with their allowed values."
|
|
508
|
+
)
|
|
509
|
+
raise ValueError(error_msg)
|
|
510
|
+
|
|
511
|
+
# Convert to PropertyResource list, filtering out special fields
|
|
512
|
+
result = []
|
|
513
|
+
for field_name, prop_data in props_dict.items():
|
|
514
|
+
if field_name in ['Shared', 'Projects Shared to']:
|
|
515
|
+
continue
|
|
516
|
+
result.append(PropertyResource(
|
|
517
|
+
field_id=prop_data['field_id'],
|
|
518
|
+
field_name=prop_data['field_name'],
|
|
519
|
+
field_value=prop_data['field_value'],
|
|
520
|
+
field_value_name=prop_data.get('field_value_name')
|
|
521
|
+
))
|
|
522
|
+
|
|
523
|
+
return result
|
|
524
|
+
|
|
166
525
|
def __build_body_for_create_test_case(self, test_cases_data: list[dict],
|
|
167
526
|
folder_to_place_test_cases_to: str = '') -> list:
|
|
168
|
-
|
|
527
|
+
# Get field definitions for property mapping (cached for session)
|
|
528
|
+
field_definitions = self.__get_field_definitions_cached()
|
|
529
|
+
|
|
169
530
|
modules = self._parse_modules()
|
|
170
531
|
parent_id = ''.join(str(module['module_id']) for module in modules if
|
|
171
532
|
folder_to_place_test_cases_to and module['full_module_name'] == folder_to_place_test_cases_to)
|
|
172
|
-
|
|
173
|
-
for prop in initial_project_properties:
|
|
174
|
-
if prop.get('field_name', '') == 'Shared' or prop.get('field_name', '') == 'Projects Shared to':
|
|
175
|
-
continue
|
|
176
|
-
props.append(PropertyResource(field_id=prop['field_id'], field_name=prop['field_name'],
|
|
177
|
-
field_value_name=prop.get('field_value_name', None),
|
|
178
|
-
field_value=prop['field_value']))
|
|
533
|
+
|
|
179
534
|
bodies = []
|
|
180
535
|
for test_case in test_cases_data:
|
|
536
|
+
# Map properties from user format to API format
|
|
537
|
+
props = self.__map_properties_to_api_format(test_case, field_definitions)
|
|
538
|
+
|
|
181
539
|
body = swagger_client.TestCaseWithCustomFieldResource(properties=props)
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
540
|
+
|
|
541
|
+
# Handle core fields: Name, Description, Precondition
|
|
542
|
+
# These are set if explicitly provided in the input
|
|
543
|
+
# None or empty string means "clear this field" (except Name which is required)
|
|
544
|
+
if 'Name' in test_case:
|
|
545
|
+
# Name is required - use 'Untitled' as fallback if null/empty
|
|
546
|
+
name_value = test_case['Name']
|
|
547
|
+
body.name = name_value if name_value else 'Untitled'
|
|
548
|
+
|
|
549
|
+
if 'Precondition' in test_case:
|
|
550
|
+
# Allow clearing with None or empty string
|
|
551
|
+
body.precondition = test_case['Precondition'] if test_case['Precondition'] is not None else ''
|
|
552
|
+
|
|
553
|
+
if 'Description' in test_case:
|
|
554
|
+
# Allow clearing with None or empty string
|
|
555
|
+
body.description = test_case['Description'] if test_case['Description'] is not None else ''
|
|
556
|
+
|
|
185
557
|
if parent_id:
|
|
186
558
|
body.parent_id = parent_id
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
559
|
+
|
|
560
|
+
# Only set test_steps if Steps are provided in the input
|
|
561
|
+
# This prevents overwriting existing steps during partial updates
|
|
562
|
+
if 'Steps' in test_case and test_case['Steps'] is not None:
|
|
563
|
+
test_steps_resources = []
|
|
564
|
+
for step in test_case['Steps']:
|
|
565
|
+
test_steps_resources.append(
|
|
566
|
+
swagger_client.TestStepResource(description=step.get('Test Step Description'),
|
|
567
|
+
expected=step.get('Test Step Expected Result')))
|
|
568
|
+
body.test_steps = test_steps_resources
|
|
569
|
+
|
|
193
570
|
bodies.append(body)
|
|
194
571
|
return bodies
|
|
195
572
|
|
|
@@ -206,7 +583,273 @@ class QtestApiWrapper(BaseToolApiWrapper):
|
|
|
206
583
|
Exception: \n {stacktrace}""")
|
|
207
584
|
return modules
|
|
208
585
|
|
|
586
|
+
def __get_field_definitions_from_properties_api(self) -> dict:
|
|
587
|
+
"""
|
|
588
|
+
Fallback method: Get field definitions using /properties and /properties-info APIs.
|
|
589
|
+
|
|
590
|
+
These APIs don't require Field Management permission and are available to all users.
|
|
591
|
+
Requires 2 API calls + 1 search to get a test case ID.
|
|
592
|
+
|
|
593
|
+
Returns:
|
|
594
|
+
dict: Same structure as __get_project_field_definitions()
|
|
595
|
+
"""
|
|
596
|
+
logger.info(
|
|
597
|
+
"Using properties API fallback (no Field Management permission). "
|
|
598
|
+
"This requires getting a template test case first."
|
|
599
|
+
)
|
|
600
|
+
|
|
601
|
+
# Step 1: Get any test case ID to query properties
|
|
602
|
+
search_instance = swagger_client.SearchApi(self._client)
|
|
603
|
+
body = swagger_client.ArtifactSearchParams(
|
|
604
|
+
object_type='test-cases',
|
|
605
|
+
fields=['*'],
|
|
606
|
+
query='' # Empty query returns all test cases
|
|
607
|
+
)
|
|
608
|
+
|
|
609
|
+
try:
|
|
610
|
+
# Search for any test case - just need one
|
|
611
|
+
response = search_instance.search_artifact(
|
|
612
|
+
self.qtest_project_id,
|
|
613
|
+
body,
|
|
614
|
+
page_size=1,
|
|
615
|
+
page=1
|
|
616
|
+
)
|
|
617
|
+
except ApiException as e:
|
|
618
|
+
stacktrace = format_exc()
|
|
619
|
+
logger.error(f"Failed to find test case for properties API: {stacktrace}")
|
|
620
|
+
raise ValueError(
|
|
621
|
+
f"Cannot find any test case to query field definitions. "
|
|
622
|
+
f"Please create at least one test case in project {self.qtest_project_id}"
|
|
623
|
+
) from e
|
|
624
|
+
|
|
625
|
+
if not response or not response.get('items') or len(response['items']) == 0:
|
|
626
|
+
raise ValueError(
|
|
627
|
+
f"No test cases found in project {self.qtest_project_id}. "
|
|
628
|
+
f"Please create at least one test case to retrieve field definitions."
|
|
629
|
+
)
|
|
630
|
+
|
|
631
|
+
test_case_id = response['items'][0]['id']
|
|
632
|
+
logger.info(f"Using test case ID {test_case_id} to retrieve field definitions")
|
|
633
|
+
|
|
634
|
+
# Step 2: Call /properties API
|
|
635
|
+
headers = {
|
|
636
|
+
"Authorization": f"Bearer {self.qtest_api_token.get_secret_value()}"
|
|
637
|
+
}
|
|
638
|
+
|
|
639
|
+
properties_url = f"{self.base_url}/api/v3/projects/{self.qtest_project_id}/test-cases/{test_case_id}/properties"
|
|
640
|
+
properties_info_url = f"{self.base_url}/api/v3/projects/{self.qtest_project_id}/test-cases/{test_case_id}/properties-info"
|
|
641
|
+
|
|
642
|
+
try:
|
|
643
|
+
# Get properties with current values and field metadata
|
|
644
|
+
props_response = requests.get(
|
|
645
|
+
properties_url,
|
|
646
|
+
headers=headers,
|
|
647
|
+
params={'calledBy': 'testcase_properties'}
|
|
648
|
+
)
|
|
649
|
+
props_response.raise_for_status()
|
|
650
|
+
properties_data = props_response.json()
|
|
651
|
+
|
|
652
|
+
# Get properties-info with data types and allowed values
|
|
653
|
+
info_response = requests.get(properties_info_url, headers=headers)
|
|
654
|
+
info_response.raise_for_status()
|
|
655
|
+
info_data = info_response.json()
|
|
656
|
+
|
|
657
|
+
except requests.exceptions.RequestException as e:
|
|
658
|
+
stacktrace = format_exc()
|
|
659
|
+
logger.error(f"Failed to call properties API: {stacktrace}")
|
|
660
|
+
raise ValueError(
|
|
661
|
+
f"Unable to retrieve field definitions using properties API. "
|
|
662
|
+
f"Error: {stacktrace}"
|
|
663
|
+
) from e
|
|
664
|
+
|
|
665
|
+
# Step 3: Build field mapping by merging both responses
|
|
666
|
+
field_mapping = {}
|
|
667
|
+
|
|
668
|
+
# Create lookup by field ID from properties-info
|
|
669
|
+
metadata_by_id = {item['id']: item for item in info_data['metadata']}
|
|
670
|
+
|
|
671
|
+
# Data type mapping to determine 'multiple' flag
|
|
672
|
+
MULTI_SELECT_TYPES = {
|
|
673
|
+
'UserListDataType',
|
|
674
|
+
'MultiSelectionDataType',
|
|
675
|
+
'CheckListDataType'
|
|
676
|
+
}
|
|
677
|
+
|
|
678
|
+
USER_FIELD_TYPES = {'UserListDataType'}
|
|
679
|
+
|
|
680
|
+
# System fields to exclude (same as in property mapping)
|
|
681
|
+
excluded_fields = {'Shared', 'Projects Shared to'}
|
|
682
|
+
|
|
683
|
+
for prop in properties_data:
|
|
684
|
+
field_name = prop.get('name')
|
|
685
|
+
field_id = prop.get('id')
|
|
686
|
+
|
|
687
|
+
if not field_name or field_name in excluded_fields:
|
|
688
|
+
continue
|
|
689
|
+
|
|
690
|
+
# Get metadata for this field
|
|
691
|
+
metadata = metadata_by_id.get(field_id, {})
|
|
692
|
+
data_type_str = metadata.get('data_type')
|
|
693
|
+
|
|
694
|
+
# Determine data_type number (5 for user fields, None for others)
|
|
695
|
+
data_type = 5 if data_type_str in USER_FIELD_TYPES else None
|
|
696
|
+
|
|
697
|
+
# Determine if multi-select
|
|
698
|
+
is_multiple = data_type_str in MULTI_SELECT_TYPES
|
|
699
|
+
|
|
700
|
+
field_mapping[field_name] = {
|
|
701
|
+
'field_id': field_id,
|
|
702
|
+
'required': prop.get('required', False),
|
|
703
|
+
'data_type': data_type,
|
|
704
|
+
'multiple': is_multiple,
|
|
705
|
+
'values': {}
|
|
706
|
+
}
|
|
707
|
+
|
|
708
|
+
# Map allowed values from metadata
|
|
709
|
+
allowed_values = metadata.get('allowed_values', [])
|
|
710
|
+
for allowed_val in allowed_values:
|
|
711
|
+
value_text = allowed_val.get('value_text')
|
|
712
|
+
value_id = allowed_val.get('id')
|
|
713
|
+
if value_text and value_id:
|
|
714
|
+
field_mapping[field_name]['values'][value_text] = value_id
|
|
715
|
+
|
|
716
|
+
logger.info(
|
|
717
|
+
f"Retrieved {len(field_mapping)} field definitions using properties API. "
|
|
718
|
+
f"This method works for all users without Field Management permission."
|
|
719
|
+
)
|
|
720
|
+
|
|
721
|
+
return field_mapping
|
|
722
|
+
|
|
723
|
+
def __get_project_field_definitions(self) -> dict:
|
|
724
|
+
"""
|
|
725
|
+
Get structured field definitions for test cases in the project.
|
|
726
|
+
|
|
727
|
+
Returns:
|
|
728
|
+
dict: Mapping of field names to their IDs and allowed values.
|
|
729
|
+
Example: {
|
|
730
|
+
'Status': {
|
|
731
|
+
'field_id': 12345,
|
|
732
|
+
'required': True,
|
|
733
|
+
'values': {'New': 1, 'In Progress': 2, 'Completed': 3}
|
|
734
|
+
},
|
|
735
|
+
'Priority': {
|
|
736
|
+
'field_id': 12346,
|
|
737
|
+
'required': False,
|
|
738
|
+
'values': {'High': 1, 'Medium': 2, 'Low': 3}
|
|
739
|
+
}
|
|
740
|
+
}
|
|
741
|
+
"""
|
|
742
|
+
fields_api = self.__instantiate_fields_api_instance()
|
|
743
|
+
qtest_object = 'test-cases'
|
|
744
|
+
|
|
745
|
+
try:
|
|
746
|
+
fields = fields_api.get_fields(self.qtest_project_id, qtest_object)
|
|
747
|
+
except ApiException as e:
|
|
748
|
+
# Check if permission denied (403) - use fallback
|
|
749
|
+
if e.status == 403:
|
|
750
|
+
logger.warning(
|
|
751
|
+
"get_fields permission denied (Field Management permission required). "
|
|
752
|
+
"Using properties API fallback..."
|
|
753
|
+
)
|
|
754
|
+
return self.__get_field_definitions_from_properties_api()
|
|
755
|
+
|
|
756
|
+
# Other API errors
|
|
757
|
+
stacktrace = format_exc()
|
|
758
|
+
logger.error(f"Exception when calling FieldAPI->get_fields:\n {stacktrace}")
|
|
759
|
+
raise ValueError(
|
|
760
|
+
f"Unable to get test case fields for project {self.qtest_project_id}. Exception: \n {stacktrace}")
|
|
761
|
+
|
|
762
|
+
# Build structured mapping
|
|
763
|
+
field_mapping = {}
|
|
764
|
+
for field in fields:
|
|
765
|
+
field_name = field.label
|
|
766
|
+
field_mapping[field_name] = {
|
|
767
|
+
'field_id': field.id,
|
|
768
|
+
'required': getattr(field, 'required', False),
|
|
769
|
+
'data_type': getattr(field, 'data_type', None), # 5 = user field
|
|
770
|
+
'multiple': getattr(field, 'multiple', False), # True = multi-select, needs array format
|
|
771
|
+
'values': {}
|
|
772
|
+
}
|
|
773
|
+
|
|
774
|
+
# Map allowed values if field has them (dropdown/combobox/user fields)
|
|
775
|
+
# Only include active values (is_active=True)
|
|
776
|
+
if hasattr(field, 'allowed_values') and field.allowed_values:
|
|
777
|
+
for allowed_value in field.allowed_values:
|
|
778
|
+
# Skip inactive values (deleted/deprecated options)
|
|
779
|
+
if hasattr(allowed_value, 'is_active') and not allowed_value.is_active:
|
|
780
|
+
continue
|
|
781
|
+
|
|
782
|
+
# AllowedValueResource has 'label' for the display name and 'value' for the ID
|
|
783
|
+
# Note: 'value' is the field_value, not 'id'
|
|
784
|
+
# For user fields (data_type=5), label is user name and value is user ID
|
|
785
|
+
value_label = allowed_value.label
|
|
786
|
+
value_id = allowed_value.value
|
|
787
|
+
field_mapping[field_name]['values'][value_label] = value_id
|
|
788
|
+
|
|
789
|
+
return field_mapping
|
|
790
|
+
|
|
791
|
+
def __format_field_info_for_display(self, field_definitions: dict) -> str:
|
|
792
|
+
"""
|
|
793
|
+
Format field definitions in human-readable format for LLM.
|
|
794
|
+
|
|
795
|
+
Args:
|
|
796
|
+
field_definitions: Output from __get_project_field_definitions()
|
|
797
|
+
|
|
798
|
+
Returns:
|
|
799
|
+
Formatted string with field information
|
|
800
|
+
"""
|
|
801
|
+
output = [f"Available Test Case Fields for Project {self.qtest_project_id}:\n"]
|
|
802
|
+
|
|
803
|
+
for field_name, field_info in sorted(field_definitions.items()):
|
|
804
|
+
required_marker = " (Required)" if field_info.get('required') else ""
|
|
805
|
+
has_values = bool(field_info.get('values'))
|
|
806
|
+
is_multiple = field_info.get('multiple', False)
|
|
807
|
+
|
|
808
|
+
# Determine field type label
|
|
809
|
+
if not has_values:
|
|
810
|
+
type_label = "Text"
|
|
811
|
+
elif is_multiple:
|
|
812
|
+
type_label = "Multi-select"
|
|
813
|
+
else:
|
|
814
|
+
type_label = "Single-select"
|
|
815
|
+
|
|
816
|
+
output.append(f"\n{field_name} ({type_label}{required_marker}):")
|
|
817
|
+
|
|
818
|
+
if has_values:
|
|
819
|
+
for value_name, value_id in sorted(field_info['values'].items()):
|
|
820
|
+
output.append(f" - {value_name}")
|
|
821
|
+
else:
|
|
822
|
+
output.append(" Free text input. Set to null to clear.")
|
|
823
|
+
|
|
824
|
+
output.append("\n\n--- Field Type Guide ---")
|
|
825
|
+
output.append("\nText fields: Use null to clear, provide string value to set.")
|
|
826
|
+
output.append("\nSingle-select: Provide exact value name from the list above. Cannot be cleared via API.")
|
|
827
|
+
output.append("\nMulti-select: Provide value as array [\"val1\", \"val2\"]. Use null to clear.")
|
|
828
|
+
return '\n'.join(output)
|
|
829
|
+
|
|
830
|
+
def get_all_test_cases_fields_for_project(self, force_refresh: bool = False) -> str:
|
|
831
|
+
"""
|
|
832
|
+
Get formatted information about available test case fields and their values.
|
|
833
|
+
This method is exposed as a tool for LLM to query field information.
|
|
834
|
+
|
|
835
|
+
Args:
|
|
836
|
+
force_refresh: If True, reload field definitions from API instead of using cache.
|
|
837
|
+
Use this if project configuration has changed (new fields added,
|
|
838
|
+
dropdown values modified, etc.).
|
|
839
|
+
|
|
840
|
+
Returns:
|
|
841
|
+
Formatted string with field names and allowed values
|
|
842
|
+
"""
|
|
843
|
+
if force_refresh:
|
|
844
|
+
self.refresh_field_definitions_cache()
|
|
845
|
+
field_defs = self.__get_field_definitions_cached()
|
|
846
|
+
return self.__format_field_info_for_display(field_defs)
|
|
847
|
+
|
|
209
848
|
def _parse_modules(self) -> list[dict]:
|
|
849
|
+
"""Get parsed modules list with caching for the session."""
|
|
850
|
+
if self._modules_cache is not None:
|
|
851
|
+
return self._modules_cache
|
|
852
|
+
|
|
210
853
|
modules = self.__get_all_modules_for_project()
|
|
211
854
|
result = []
|
|
212
855
|
|
|
@@ -227,6 +870,7 @@ class QtestApiWrapper(BaseToolApiWrapper):
|
|
|
227
870
|
for module in modules:
|
|
228
871
|
parse_module(module)
|
|
229
872
|
|
|
873
|
+
self._modules_cache = result
|
|
230
874
|
return result
|
|
231
875
|
|
|
232
876
|
def __execute_single_create_test_case_request(self, test_case_api_instance: TestCaseApi, body,
|
|
@@ -243,32 +887,75 @@ class QtestApiWrapper(BaseToolApiWrapper):
|
|
|
243
887
|
raise ToolException(
|
|
244
888
|
f"Unable to create test case in project - {self.qtest_project_id} with the following content:\n{test_case_content}.\n\n Stacktrace was {stacktrace}") from e
|
|
245
889
|
|
|
890
|
+
def __format_property_value(self, prop: dict) -> Any:
|
|
891
|
+
"""Format property value for display, detecting field type from response structure.
|
|
892
|
+
|
|
893
|
+
Detection rules based on API response patterns:
|
|
894
|
+
- Text field: field_value_name is empty/None
|
|
895
|
+
- Multi-select: field_value_name starts with '[' and ends with ']'
|
|
896
|
+
- Single-select: field_value_name is plain text (no brackets)
|
|
897
|
+
|
|
898
|
+
Args:
|
|
899
|
+
prop: Property dict from API response with field_value and field_value_name
|
|
900
|
+
|
|
901
|
+
Returns:
|
|
902
|
+
Formatted value: list for multi-select, string for others
|
|
903
|
+
"""
|
|
904
|
+
field_value = prop.get('field_value') or ''
|
|
905
|
+
field_value_name = prop.get('field_value_name')
|
|
906
|
+
|
|
907
|
+
# Text field: no field_value_name, use field_value directly
|
|
908
|
+
if not field_value_name:
|
|
909
|
+
return field_value
|
|
910
|
+
|
|
911
|
+
# Multi-select: field_value_name is bracketed like '[value1, value2]'
|
|
912
|
+
if isinstance(field_value_name, str) and field_value_name.startswith('[') and field_value_name.endswith(']'):
|
|
913
|
+
inner = field_value_name[1:-1].strip() # Remove brackets
|
|
914
|
+
if inner:
|
|
915
|
+
return [v.strip() for v in inner.split(',')]
|
|
916
|
+
return [] # Empty multi-select
|
|
917
|
+
|
|
918
|
+
# Single-select: plain text value
|
|
919
|
+
return field_value_name
|
|
920
|
+
|
|
246
921
|
def __parse_data(self, response_to_parse: dict, parsed_data: list, extract_images: bool=False, prompt: str=None):
|
|
247
922
|
import html
|
|
923
|
+
|
|
924
|
+
# PERMISSION-FREE: Parse properties directly from API response
|
|
925
|
+
# No get_fields call needed - works for all users
|
|
926
|
+
|
|
248
927
|
for item in response_to_parse['items']:
|
|
928
|
+
# Start with core fields (always present)
|
|
249
929
|
parsed_data_row = {
|
|
250
930
|
'Id': item['pid'],
|
|
931
|
+
'Name': item['name'],
|
|
251
932
|
'Description': html.unescape(strip_tags(item['description'])),
|
|
252
933
|
'Precondition': html.unescape(strip_tags(item['precondition'])),
|
|
253
|
-
'Name': item['name'],
|
|
254
934
|
QTEST_ID: item['id'],
|
|
255
935
|
'Steps': list(map(lambda step: {
|
|
256
936
|
'Test Step Number': step[0] + 1,
|
|
257
937
|
'Test Step Description': self._process_image(step[1]['description'], extract_images, prompt),
|
|
258
938
|
'Test Step Expected Result': self._process_image(step[1]['expected'], extract_images, prompt)
|
|
259
939
|
}, enumerate(item['test_steps']))),
|
|
260
|
-
'Status': ''.join([properties['field_value_name'] for properties in item['properties']
|
|
261
|
-
if properties['field_name'] == 'Status']),
|
|
262
|
-
'Automation': ''.join([properties['field_value_name'] for properties in item['properties']
|
|
263
|
-
if properties['field_name'] == 'Automation']),
|
|
264
|
-
'Type': ''.join([properties['field_value_name'] for properties in item['properties']
|
|
265
|
-
if properties['field_name'] == 'Type']),
|
|
266
|
-
'Priority': ''.join([properties['field_value_name'] for properties in item['properties']
|
|
267
|
-
if properties['field_name'] == 'Priority']),
|
|
268
940
|
}
|
|
941
|
+
|
|
942
|
+
# Add custom fields directly from API response properties
|
|
943
|
+
for prop in item['properties']:
|
|
944
|
+
field_name = prop.get('field_name')
|
|
945
|
+
if not field_name:
|
|
946
|
+
continue
|
|
947
|
+
|
|
948
|
+
# Format value based on field type (multi-select as array, etc.)
|
|
949
|
+
parsed_data_row[field_name] = self.__format_property_value(prop)
|
|
950
|
+
|
|
269
951
|
parsed_data.append(parsed_data_row)
|
|
270
952
|
|
|
271
953
|
def _process_image(self, content: str, extract: bool=False, prompt: str=None):
|
|
954
|
+
"""Extract and process base64 images from HTML img tags.
|
|
955
|
+
|
|
956
|
+
IMPORTANT: This method must be called BEFORE strip_tags() because it needs
|
|
957
|
+
the HTML <img> tags to extract base64-encoded images.
|
|
958
|
+
"""
|
|
272
959
|
#extract image by regex
|
|
273
960
|
img_regex = r'<img\s+src="data:image\/[^;]+;base64,([^"]+)"\s+[^>]*data-filename="([^"]+)"[^>]*>'
|
|
274
961
|
|
|
@@ -288,6 +975,33 @@ class QtestApiWrapper(BaseToolApiWrapper):
|
|
|
288
975
|
content = re.sub(img_regex, replace_image, content)
|
|
289
976
|
return content
|
|
290
977
|
|
|
978
|
+
def _clean_html_content(self, content: str, extract_images: bool = False, image_prompt: str = None) -> str:
|
|
979
|
+
"""Clean HTML content with proper order of operations.
|
|
980
|
+
|
|
981
|
+
The correct order is:
|
|
982
|
+
1. Process images first (extracts from <img> tags - needs HTML intact)
|
|
983
|
+
2. Strip remaining HTML tags
|
|
984
|
+
3. Unescape HTML entities
|
|
985
|
+
|
|
986
|
+
Args:
|
|
987
|
+
content: Raw HTML content from QTest
|
|
988
|
+
extract_images: Whether to extract and describe images using LLM
|
|
989
|
+
image_prompt: Custom prompt for image analysis
|
|
990
|
+
|
|
991
|
+
Returns:
|
|
992
|
+
Cleaned text content with optional image descriptions
|
|
993
|
+
"""
|
|
994
|
+
import html
|
|
995
|
+
if not content:
|
|
996
|
+
return ''
|
|
997
|
+
# Step 1: Process images FIRST (needs HTML <img> tags intact)
|
|
998
|
+
content = self._process_image(content, extract_images, image_prompt)
|
|
999
|
+
# Step 2: Strip remaining HTML tags
|
|
1000
|
+
content = strip_tags(content)
|
|
1001
|
+
# Step 3: Unescape HTML entities
|
|
1002
|
+
content = html.unescape(content)
|
|
1003
|
+
return content
|
|
1004
|
+
|
|
291
1005
|
def __perform_search_by_dql(self, dql: str, extract_images:bool=False, prompt: str=None) -> list:
|
|
292
1006
|
search_instance: SearchApi = swagger_client.SearchApi(self._client)
|
|
293
1007
|
body = swagger_client.ArtifactSearchParams(object_type='test-cases', fields=['*'],
|
|
@@ -323,18 +1037,140 @@ class QtestApiWrapper(BaseToolApiWrapper):
|
|
|
323
1037
|
parsed_data = self.__perform_search_by_dql(dql)
|
|
324
1038
|
return parsed_data[0]['QTest Id']
|
|
325
1039
|
|
|
326
|
-
def
|
|
327
|
-
|
|
328
|
-
|
|
1040
|
+
def __find_qtest_internal_id(self, object_type: str, entity_id: str) -> int:
|
|
1041
|
+
"""Generic search for an entity's internal QTest ID using its external ID (e.g., TR-xxx, DF-xxx, RQ-xxx).
|
|
1042
|
+
|
|
1043
|
+
This is the unified method for looking up internal IDs. Use this instead of
|
|
1044
|
+
the entity-specific methods (__find_qtest_requirement_id_by_id, etc.).
|
|
1045
|
+
|
|
1046
|
+
Args:
|
|
1047
|
+
object_type: QTest object type ('test-runs', 'defects', 'requirements', etc.)
|
|
1048
|
+
entity_id: Entity ID in format TR-123, DF-456, etc.
|
|
1049
|
+
|
|
1050
|
+
Returns:
|
|
1051
|
+
int: Internal QTest ID for the entity
|
|
1052
|
+
|
|
1053
|
+
Raises:
|
|
1054
|
+
ValueError: If entity is not found
|
|
1055
|
+
"""
|
|
1056
|
+
dql = f"Id = '{entity_id}'"
|
|
1057
|
+
search_instance: SearchApi = swagger_client.SearchApi(self._client)
|
|
1058
|
+
body = swagger_client.ArtifactSearchParams(object_type=object_type, fields=['*'], query=dql)
|
|
1059
|
+
|
|
329
1060
|
try:
|
|
330
|
-
response =
|
|
331
|
-
|
|
1061
|
+
response = search_instance.search_artifact(self.qtest_project_id, body)
|
|
1062
|
+
if response['total'] == 0:
|
|
1063
|
+
raise ValueError(
|
|
1064
|
+
f"{object_type.capitalize()} '{entity_id}' not found in project {self.qtest_project_id}. "
|
|
1065
|
+
f"Please verify the {entity_id} ID exists."
|
|
1066
|
+
)
|
|
1067
|
+
return response['items'][0]['id']
|
|
332
1068
|
except ApiException as e:
|
|
333
1069
|
stacktrace = format_exc()
|
|
334
|
-
logger.error(f"Exception when
|
|
335
|
-
raise
|
|
1070
|
+
logger.error(f"Exception when searching for '{object_type}': '{entity_id}': \n {stacktrace}")
|
|
1071
|
+
raise ToolException(
|
|
1072
|
+
f"Unable to search for {object_type} '{entity_id}' in project {self.qtest_project_id}. "
|
|
1073
|
+
f"Exception: \n{stacktrace}"
|
|
1074
|
+
) from e
|
|
1075
|
+
|
|
1076
|
+
def __find_qtest_requirement_id_by_id(self, requirement_id: str) -> int:
|
|
1077
|
+
"""Search for requirement's internal QTest ID using requirement ID (RQ-xxx format).
|
|
1078
|
+
|
|
1079
|
+
Args:
|
|
1080
|
+
requirement_id: Requirement ID in format RQ-123
|
|
1081
|
+
|
|
1082
|
+
Returns:
|
|
1083
|
+
int: Internal QTest ID for the requirement
|
|
1084
|
+
|
|
1085
|
+
Raises:
|
|
1086
|
+
ValueError: If requirement is not found
|
|
1087
|
+
"""
|
|
1088
|
+
return self.__find_qtest_internal_id('requirements', requirement_id)
|
|
1089
|
+
|
|
1090
|
+
def __find_qtest_defect_id_by_id(self, defect_id: str) -> int:
|
|
1091
|
+
"""Search for defect's internal QTest ID using defect ID (DF-xxx format).
|
|
1092
|
+
|
|
1093
|
+
Args:
|
|
1094
|
+
defect_id: Defect ID in format DF-123
|
|
1095
|
+
|
|
1096
|
+
Returns:
|
|
1097
|
+
int: Internal QTest ID for the defect
|
|
1098
|
+
|
|
1099
|
+
Raises:
|
|
1100
|
+
ValueError: If defect is not found
|
|
1101
|
+
"""
|
|
1102
|
+
return self.__find_qtest_internal_id('defects', defect_id)
|
|
336
1103
|
|
|
337
|
-
def
|
|
1104
|
+
def __search_entity_by_id(self, object_type: str, entity_id: str) -> dict:
|
|
1105
|
+
"""Generic search for any entity by its ID (RQ-xxx, DF-xxx, etc.).
|
|
1106
|
+
|
|
1107
|
+
Uses the unified __parse_entity_item method for consistent parsing.
|
|
1108
|
+
|
|
1109
|
+
Args:
|
|
1110
|
+
object_type: QTest object type ('requirements', 'defects', etc.)
|
|
1111
|
+
entity_id: Entity ID in format prefix-number (RQ-123, DF-456)
|
|
1112
|
+
|
|
1113
|
+
Returns:
|
|
1114
|
+
dict: Entity data with all parsed fields, or None if not found
|
|
1115
|
+
"""
|
|
1116
|
+
dql = f"Id = '{entity_id}'"
|
|
1117
|
+
search_instance: SearchApi = swagger_client.SearchApi(self._client)
|
|
1118
|
+
body = swagger_client.ArtifactSearchParams(object_type=object_type, fields=['*'], query=dql)
|
|
1119
|
+
|
|
1120
|
+
try:
|
|
1121
|
+
response = search_instance.search_artifact(self.qtest_project_id, body)
|
|
1122
|
+
if response['total'] == 0:
|
|
1123
|
+
return None # Not found, but don't raise - caller handles this
|
|
1124
|
+
|
|
1125
|
+
# Use the unified parser
|
|
1126
|
+
return self.__parse_entity_item(object_type, response['items'][0])
|
|
1127
|
+
|
|
1128
|
+
except ApiException as e:
|
|
1129
|
+
logger.warning(f"Could not fetch details for {entity_id}: {e}")
|
|
1130
|
+
return None
|
|
1131
|
+
|
|
1132
|
+
def __get_entity_pid_by_internal_id(self, object_type: str, internal_id: int) -> str:
|
|
1133
|
+
"""Reverse lookup: get entity PID (TC-xxx, TR-xxx, etc.) from internal QTest ID.
|
|
1134
|
+
|
|
1135
|
+
Args:
|
|
1136
|
+
object_type: QTest object type ('test-cases', 'test-runs', 'defects', 'requirements')
|
|
1137
|
+
internal_id: Internal QTest ID (numeric)
|
|
1138
|
+
|
|
1139
|
+
Returns:
|
|
1140
|
+
str: Entity PID in format prefix-number (TC-123, TR-456, etc.) or None if not found
|
|
1141
|
+
"""
|
|
1142
|
+
search_instance = swagger_client.SearchApi(self._client)
|
|
1143
|
+
# Note: 'id' needs quotes for DQL when searching by internal ID
|
|
1144
|
+
body = swagger_client.ArtifactSearchParams(
|
|
1145
|
+
object_type=object_type,
|
|
1146
|
+
fields=['id', 'pid'],
|
|
1147
|
+
query=f"'id' = '{internal_id}'"
|
|
1148
|
+
)
|
|
1149
|
+
|
|
1150
|
+
try:
|
|
1151
|
+
response = search_instance.search_artifact(self.qtest_project_id, body)
|
|
1152
|
+
if response['total'] > 0:
|
|
1153
|
+
return response['items'][0].get('pid')
|
|
1154
|
+
return None
|
|
1155
|
+
except ApiException as e:
|
|
1156
|
+
logger.warning(f"Could not get PID for {object_type} internal ID {internal_id}: {e}")
|
|
1157
|
+
return None
|
|
1158
|
+
|
|
1159
|
+
def __find_qtest_test_run_id_by_id(self, test_run_id: str) -> int:
|
|
1160
|
+
"""Search for test run's internal QTest ID using test run ID (TR-xxx format).
|
|
1161
|
+
|
|
1162
|
+
Args:
|
|
1163
|
+
test_run_id: Test run ID in format TR-123
|
|
1164
|
+
|
|
1165
|
+
Returns:
|
|
1166
|
+
int: Internal QTest ID for the test run
|
|
1167
|
+
|
|
1168
|
+
Raises:
|
|
1169
|
+
ValueError: If test run is not found
|
|
1170
|
+
"""
|
|
1171
|
+
return self.__find_qtest_internal_id('test-runs', test_run_id)
|
|
1172
|
+
|
|
1173
|
+
def __is_jira_requirement_present(self, jira_issue_id: str) -> tuple[bool, dict]:
|
|
338
1174
|
""" Define if particular Jira requirement is present in qtest or not """
|
|
339
1175
|
dql = f"'External Id' = '{jira_issue_id}'"
|
|
340
1176
|
search_instance: SearchApi = swagger_client.SearchApi(self._client)
|
|
@@ -350,31 +1186,502 @@ class QtestApiWrapper(BaseToolApiWrapper):
|
|
|
350
1186
|
logger.error(f"Error: {format_exc()}")
|
|
351
1187
|
raise e
|
|
352
1188
|
|
|
353
|
-
def _get_jira_requirement_id(self, jira_issue_id: str) -> int
|
|
354
|
-
"""
|
|
1189
|
+
def _get_jira_requirement_id(self, jira_issue_id: str) -> int:
|
|
1190
|
+
"""Search for requirement id using the linked jira_issue_id.
|
|
1191
|
+
|
|
1192
|
+
Args:
|
|
1193
|
+
jira_issue_id: External Jira issue ID (e.g., PLAN-128)
|
|
1194
|
+
|
|
1195
|
+
Returns:
|
|
1196
|
+
int: Internal QTest ID for the Jira requirement
|
|
1197
|
+
|
|
1198
|
+
Raises:
|
|
1199
|
+
ValueError: If Jira requirement is not found in QTest
|
|
1200
|
+
"""
|
|
355
1201
|
is_present, response = self.__is_jira_requirement_present(jira_issue_id)
|
|
356
1202
|
if not is_present:
|
|
357
|
-
|
|
1203
|
+
raise ValueError(
|
|
1204
|
+
f"Jira requirement '{jira_issue_id}' not found in QTest project {self.qtest_project_id}. "
|
|
1205
|
+
f"Please ensure the Jira issue is linked to QTest as a requirement."
|
|
1206
|
+
)
|
|
358
1207
|
return response['items'][0]['id']
|
|
359
1208
|
|
|
360
1209
|
|
|
361
1210
|
def link_tests_to_jira_requirement(self, requirement_external_id: str, json_list_of_test_case_ids: str) -> str:
|
|
362
|
-
"""
|
|
1211
|
+
"""Link test cases to external Jira requirement.
|
|
1212
|
+
|
|
1213
|
+
Args:
|
|
1214
|
+
requirement_external_id: Jira issue ID (e.g., PLAN-128)
|
|
1215
|
+
json_list_of_test_case_ids: JSON array string of test case IDs (e.g., '["TC-123", "TC-234"]')
|
|
1216
|
+
|
|
1217
|
+
Returns:
|
|
1218
|
+
Success message with linked test case IDs
|
|
1219
|
+
"""
|
|
363
1220
|
link_object_api_instance = swagger_client.ObjectLinkApi(self._client)
|
|
364
1221
|
source_type = "requirements"
|
|
365
1222
|
linked_type = "test-cases"
|
|
366
|
-
|
|
1223
|
+
test_case_ids = json.loads(json_list_of_test_case_ids)
|
|
1224
|
+
qtest_test_case_ids = [self.__find_qtest_id_by_test_id(tc_id) for tc_id in test_case_ids]
|
|
367
1225
|
requirement_id = self._get_jira_requirement_id(requirement_external_id)
|
|
368
1226
|
|
|
369
1227
|
try:
|
|
370
|
-
response = link_object_api_instance.link_artifacts(
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
1228
|
+
response = link_object_api_instance.link_artifacts(
|
|
1229
|
+
self.qtest_project_id,
|
|
1230
|
+
object_id=requirement_id,
|
|
1231
|
+
type=linked_type,
|
|
1232
|
+
object_type=source_type,
|
|
1233
|
+
body=qtest_test_case_ids
|
|
1234
|
+
)
|
|
1235
|
+
linked_test_cases = [link.pid for link in response[0].objects]
|
|
1236
|
+
return (
|
|
1237
|
+
f"Successfully linked {len(linked_test_cases)} test case(s) to Jira requirement '{requirement_external_id}' "
|
|
1238
|
+
f"in project {self.qtest_project_id}.\n"
|
|
1239
|
+
f"Linked test cases: {', '.join(linked_test_cases)}"
|
|
1240
|
+
)
|
|
1241
|
+
except ApiException as e:
|
|
1242
|
+
stacktrace = format_exc()
|
|
1243
|
+
logger.error(f"Error linking to Jira requirement: {stacktrace}")
|
|
1244
|
+
raise ToolException(
|
|
1245
|
+
f"Unable to link test cases to Jira requirement '{requirement_external_id}' "
|
|
1246
|
+
f"in project {self.qtest_project_id}. Exception: \n{stacktrace}"
|
|
1247
|
+
) from e
|
|
1248
|
+
|
|
1249
|
+
def link_tests_to_qtest_requirement(self, requirement_id: str, json_list_of_test_case_ids: str) -> str:
|
|
1250
|
+
"""Link test cases to internal QTest requirement.
|
|
1251
|
+
|
|
1252
|
+
Args:
|
|
1253
|
+
requirement_id: QTest requirement ID in format RQ-123
|
|
1254
|
+
json_list_of_test_case_ids: JSON array string of test case IDs (e.g., '["TC-123", "TC-234"]')
|
|
1255
|
+
|
|
1256
|
+
Returns:
|
|
1257
|
+
Success message with linked test case IDs
|
|
1258
|
+
|
|
1259
|
+
Raises:
|
|
1260
|
+
ValueError: If requirement or test cases are not found
|
|
1261
|
+
ToolException: If linking fails
|
|
1262
|
+
"""
|
|
1263
|
+
link_object_api_instance = swagger_client.ObjectLinkApi(self._client)
|
|
1264
|
+
source_type = "requirements"
|
|
1265
|
+
linked_type = "test-cases"
|
|
1266
|
+
|
|
1267
|
+
# Parse and convert test case IDs
|
|
1268
|
+
test_case_ids = json.loads(json_list_of_test_case_ids)
|
|
1269
|
+
qtest_test_case_ids = [self.__find_qtest_id_by_test_id(tc_id) for tc_id in test_case_ids]
|
|
1270
|
+
|
|
1271
|
+
# Get internal QTest ID for the requirement
|
|
1272
|
+
qtest_requirement_id = self.__find_qtest_requirement_id_by_id(requirement_id)
|
|
1273
|
+
|
|
1274
|
+
try:
|
|
1275
|
+
response = link_object_api_instance.link_artifacts(
|
|
1276
|
+
self.qtest_project_id,
|
|
1277
|
+
object_id=qtest_requirement_id,
|
|
1278
|
+
type=linked_type,
|
|
1279
|
+
object_type=source_type,
|
|
1280
|
+
body=qtest_test_case_ids
|
|
1281
|
+
)
|
|
1282
|
+
linked_test_cases = [link.pid for link in response[0].objects]
|
|
1283
|
+
return (
|
|
1284
|
+
f"Successfully linked {len(linked_test_cases)} test case(s) to QTest requirement '{requirement_id}' "
|
|
1285
|
+
f"in project {self.qtest_project_id}.\n"
|
|
1286
|
+
f"Linked test cases: {', '.join(linked_test_cases)}"
|
|
1287
|
+
)
|
|
1288
|
+
except ApiException as e:
|
|
1289
|
+
stacktrace = format_exc()
|
|
1290
|
+
logger.error(f"Error linking to QTest requirement: {stacktrace}")
|
|
1291
|
+
raise ToolException(
|
|
1292
|
+
f"Unable to link test cases to QTest requirement '{requirement_id}' "
|
|
1293
|
+
f"in project {self.qtest_project_id}. Exception: \n{stacktrace}"
|
|
1294
|
+
) from e
|
|
1295
|
+
|
|
1296
|
+
def find_test_cases_by_requirement_id(self, requirement_id: str, include_details: bool = False) -> dict:
|
|
1297
|
+
"""Find all test cases linked to a QTest requirement.
|
|
1298
|
+
|
|
1299
|
+
This method uses the ObjectLinkApi.find() to discover test cases that are
|
|
1300
|
+
linked to a specific requirement. This is the correct way to find linked
|
|
1301
|
+
test cases - DQL queries cannot search test cases by linked requirement.
|
|
1302
|
+
|
|
1303
|
+
Args:
|
|
1304
|
+
requirement_id: QTest requirement ID in format RQ-123
|
|
1305
|
+
include_details: If True, fetches full test case details. If False, returns summary with Id, Name, Description.
|
|
1306
|
+
|
|
1307
|
+
Returns:
|
|
1308
|
+
dict with requirement_id, total count, and test_cases list
|
|
1309
|
+
|
|
1310
|
+
Raises:
|
|
1311
|
+
ValueError: If requirement is not found
|
|
1312
|
+
ToolException: If API call fails
|
|
1313
|
+
"""
|
|
1314
|
+
# Get internal QTest ID for the requirement
|
|
1315
|
+
qtest_requirement_id = self.__find_qtest_requirement_id_by_id(requirement_id)
|
|
1316
|
+
|
|
1317
|
+
link_object_api_instance = swagger_client.ObjectLinkApi(self._client)
|
|
1318
|
+
|
|
1319
|
+
try:
|
|
1320
|
+
# Use ObjectLinkApi.find() to get linked artifacts
|
|
1321
|
+
# type='requirements' means we're searching from requirements
|
|
1322
|
+
# ids=[qtest_requirement_id] specifies which requirement(s) to check
|
|
1323
|
+
response = link_object_api_instance.find(
|
|
1324
|
+
self.qtest_project_id,
|
|
1325
|
+
type='requirements',
|
|
1326
|
+
ids=[qtest_requirement_id]
|
|
1327
|
+
)
|
|
1328
|
+
|
|
1329
|
+
# Parse the response to extract linked test cases
|
|
1330
|
+
# Response structure: [{id: req_internal_id, pid: 'RQ-15', objects: [{id: tc_internal_id, pid: 'TC-123'}, ...]}]
|
|
1331
|
+
linked_test_cases = []
|
|
1332
|
+
if response and len(response) > 0:
|
|
1333
|
+
for container in response:
|
|
1334
|
+
# Convert to dict if it's an object
|
|
1335
|
+
container_data = container.to_dict() if hasattr(container, 'to_dict') else container
|
|
1336
|
+
objects = container_data.get('objects', []) if isinstance(container_data, dict) else []
|
|
1337
|
+
|
|
1338
|
+
for obj in objects:
|
|
1339
|
+
obj_data = obj.to_dict() if hasattr(obj, 'to_dict') else obj
|
|
1340
|
+
if isinstance(obj_data, dict):
|
|
1341
|
+
pid = obj_data.get('pid', '')
|
|
1342
|
+
internal_id = obj_data.get('id')
|
|
1343
|
+
if pid and pid.startswith('TC-'):
|
|
1344
|
+
linked_test_cases.append({
|
|
1345
|
+
'Id': pid,
|
|
1346
|
+
QTEST_ID: internal_id
|
|
1347
|
+
})
|
|
1348
|
+
|
|
1349
|
+
if not linked_test_cases:
|
|
1350
|
+
return {
|
|
1351
|
+
'requirement_id': requirement_id,
|
|
1352
|
+
'total': 0,
|
|
1353
|
+
'test_cases': [],
|
|
1354
|
+
'message': f"No test cases are linked to requirement '{requirement_id}'"
|
|
1355
|
+
}
|
|
1356
|
+
|
|
1357
|
+
# Build result based on detail level
|
|
1358
|
+
test_cases_result = []
|
|
1359
|
+
|
|
1360
|
+
if not include_details:
|
|
1361
|
+
# Short view: fetch Name, Description via DQL for each test case
|
|
1362
|
+
for tc in linked_test_cases:
|
|
1363
|
+
try:
|
|
1364
|
+
parsed_data = self.__perform_search_by_dql(f"Id = '{tc['Id']}'")
|
|
1365
|
+
if parsed_data:
|
|
1366
|
+
tc_data = parsed_data[0]
|
|
1367
|
+
test_cases_result.append({
|
|
1368
|
+
'Id': tc['Id'],
|
|
1369
|
+
QTEST_ID: tc[QTEST_ID],
|
|
1370
|
+
'Name': tc_data.get('Name'),
|
|
1371
|
+
'Description': tc_data.get('Description', '')
|
|
1372
|
+
})
|
|
1373
|
+
except Exception as e:
|
|
1374
|
+
logger.warning(f"Could not fetch details for {tc['Id']}: {e}")
|
|
1375
|
+
test_cases_result.append({
|
|
1376
|
+
'Id': tc['Id'],
|
|
1377
|
+
QTEST_ID: tc[QTEST_ID],
|
|
1378
|
+
'Name': 'Unable to fetch',
|
|
1379
|
+
'Description': ''
|
|
1380
|
+
})
|
|
1381
|
+
else:
|
|
1382
|
+
# Full details: fetch complete test case data
|
|
1383
|
+
for tc in linked_test_cases:
|
|
1384
|
+
try:
|
|
1385
|
+
parsed_data = self.__perform_search_by_dql(f"Id = '{tc['Id']}'")
|
|
1386
|
+
if parsed_data:
|
|
1387
|
+
test_cases_result.append(parsed_data[0])
|
|
1388
|
+
except Exception as e:
|
|
1389
|
+
logger.warning(f"Could not fetch details for {tc['Id']}: {e}")
|
|
1390
|
+
test_cases_result.append({
|
|
1391
|
+
'Id': tc['Id'],
|
|
1392
|
+
QTEST_ID: tc[QTEST_ID],
|
|
1393
|
+
'error': f'Unable to fetch details: {str(e)}'
|
|
1394
|
+
})
|
|
1395
|
+
|
|
1396
|
+
return {
|
|
1397
|
+
'requirement_id': requirement_id,
|
|
1398
|
+
'total': len(test_cases_result),
|
|
1399
|
+
'test_cases': test_cases_result
|
|
1400
|
+
}
|
|
1401
|
+
|
|
1402
|
+
except ApiException as e:
|
|
1403
|
+
stacktrace = format_exc()
|
|
1404
|
+
logger.error(f"Error finding test cases by requirement: {stacktrace}")
|
|
1405
|
+
raise ToolException(
|
|
1406
|
+
f"Unable to find test cases linked to requirement '{requirement_id}' "
|
|
1407
|
+
f"in project {self.qtest_project_id}. Exception: \n{stacktrace}"
|
|
1408
|
+
) from e
|
|
1409
|
+
|
|
1410
|
+
def find_requirements_by_test_case_id(self, test_case_id: str) -> dict:
|
|
1411
|
+
"""Find all requirements linked to a test case.
|
|
1412
|
+
|
|
1413
|
+
This method uses the ObjectLinkApi.find() to discover requirements that are
|
|
1414
|
+
linked to a specific test case (reverse lookup).
|
|
1415
|
+
|
|
1416
|
+
Args:
|
|
1417
|
+
test_case_id: Test case ID in format TC-123
|
|
1418
|
+
|
|
1419
|
+
Returns:
|
|
1420
|
+
dict with test_case_id, total count, and requirements list
|
|
1421
|
+
|
|
1422
|
+
Raises:
|
|
1423
|
+
ValueError: If test case is not found
|
|
1424
|
+
ToolException: If API call fails
|
|
1425
|
+
"""
|
|
1426
|
+
# Get internal QTest ID for the test case
|
|
1427
|
+
qtest_test_case_id = self.__find_qtest_id_by_test_id(test_case_id)
|
|
1428
|
+
|
|
1429
|
+
link_object_api_instance = swagger_client.ObjectLinkApi(self._client)
|
|
1430
|
+
|
|
1431
|
+
try:
|
|
1432
|
+
# Use ObjectLinkApi.find() to get linked artifacts
|
|
1433
|
+
# type='test-cases' means we're searching from test cases
|
|
1434
|
+
response = link_object_api_instance.find(
|
|
1435
|
+
self.qtest_project_id,
|
|
1436
|
+
type='test-cases',
|
|
1437
|
+
ids=[qtest_test_case_id]
|
|
1438
|
+
)
|
|
1439
|
+
|
|
1440
|
+
# Parse the response to extract linked requirement IDs
|
|
1441
|
+
linked_requirement_ids = []
|
|
1442
|
+
if response and len(response) > 0:
|
|
1443
|
+
for container in response:
|
|
1444
|
+
container_data = container.to_dict() if hasattr(container, 'to_dict') else container
|
|
1445
|
+
objects = container_data.get('objects', []) if isinstance(container_data, dict) else []
|
|
1446
|
+
|
|
1447
|
+
for obj in objects:
|
|
1448
|
+
obj_data = obj.to_dict() if hasattr(obj, 'to_dict') else obj
|
|
1449
|
+
if isinstance(obj_data, dict):
|
|
1450
|
+
pid = obj_data.get('pid', '')
|
|
1451
|
+
# Requirements have RQ- prefix
|
|
1452
|
+
if pid and pid.startswith('RQ-'):
|
|
1453
|
+
linked_requirement_ids.append(pid)
|
|
1454
|
+
|
|
1455
|
+
if not linked_requirement_ids:
|
|
1456
|
+
return {
|
|
1457
|
+
'test_case_id': test_case_id,
|
|
1458
|
+
'total': 0,
|
|
1459
|
+
'requirements': [],
|
|
1460
|
+
'message': f"No requirements are linked to test case '{test_case_id}'"
|
|
1461
|
+
}
|
|
1462
|
+
|
|
1463
|
+
# Fetch actual requirement details via DQL search
|
|
1464
|
+
requirements_result = []
|
|
1465
|
+
for req_id in linked_requirement_ids:
|
|
1466
|
+
req_data = self.__search_entity_by_id('requirements', req_id)
|
|
1467
|
+
if req_data:
|
|
1468
|
+
requirements_result.append(req_data)
|
|
1469
|
+
else:
|
|
1470
|
+
# Fallback if search fails
|
|
1471
|
+
requirements_result.append({
|
|
1472
|
+
'Id': req_id,
|
|
1473
|
+
'QTest Id': None,
|
|
1474
|
+
'Name': 'Unable to fetch',
|
|
1475
|
+
'Description': ''
|
|
1476
|
+
})
|
|
1477
|
+
|
|
1478
|
+
return {
|
|
1479
|
+
'test_case_id': test_case_id,
|
|
1480
|
+
'total': len(requirements_result),
|
|
1481
|
+
'requirements': requirements_result
|
|
1482
|
+
}
|
|
1483
|
+
|
|
1484
|
+
except ApiException as e:
|
|
1485
|
+
stacktrace = format_exc()
|
|
1486
|
+
logger.error(f"Error finding requirements by test case: {stacktrace}")
|
|
1487
|
+
raise ToolException(
|
|
1488
|
+
f"Unable to find requirements linked to test case '{test_case_id}' "
|
|
1489
|
+
f"in project {self.qtest_project_id}. Exception: \n{stacktrace}"
|
|
1490
|
+
) from e
|
|
1491
|
+
|
|
1492
|
+
def find_test_runs_by_test_case_id(self, test_case_id: str) -> dict:
|
|
1493
|
+
"""Find all test runs associated with a test case.
|
|
1494
|
+
|
|
1495
|
+
A test run represents an execution instance of a test case. Each test run
|
|
1496
|
+
tracks execution details, status, and any defects found during that run.
|
|
1497
|
+
|
|
1498
|
+
IMPORTANT: In QTest's data model, defects are linked to test runs, not directly
|
|
1499
|
+
to test cases. To find defects related to a test case:
|
|
1500
|
+
1. Use this tool to find test runs for the test case
|
|
1501
|
+
2. Use find_defects_by_test_run_id for each test run to get related defects
|
|
1502
|
+
|
|
1503
|
+
Each test run in the result includes 'Test Case Id' showing which test case
|
|
1504
|
+
it executes, and 'Latest Test Log' with execution status and log ID.
|
|
1505
|
+
|
|
1506
|
+
Args:
|
|
1507
|
+
test_case_id: Test case ID in format TC-123
|
|
1508
|
+
|
|
1509
|
+
Returns:
|
|
1510
|
+
dict with test_case_id, total count, and test_runs list with full details
|
|
1511
|
+
|
|
1512
|
+
Raises:
|
|
1513
|
+
ValueError: If test case is not found
|
|
1514
|
+
ToolException: If API call fails
|
|
1515
|
+
"""
|
|
1516
|
+
# Get internal QTest ID for the test case
|
|
1517
|
+
qtest_test_case_id = self.__find_qtest_id_by_test_id(test_case_id)
|
|
1518
|
+
|
|
1519
|
+
link_object_api_instance = swagger_client.ObjectLinkApi(self._client)
|
|
1520
|
+
|
|
1521
|
+
try:
|
|
1522
|
+
# Use ObjectLinkApi.find() to get linked artifacts
|
|
1523
|
+
response = link_object_api_instance.find(
|
|
1524
|
+
self.qtest_project_id,
|
|
1525
|
+
type='test-cases',
|
|
1526
|
+
ids=[qtest_test_case_id]
|
|
1527
|
+
)
|
|
1528
|
+
|
|
1529
|
+
# Parse the response to extract linked test run IDs
|
|
1530
|
+
linked_test_run_ids = []
|
|
1531
|
+
if response and len(response) > 0:
|
|
1532
|
+
for container in response:
|
|
1533
|
+
container_data = container.to_dict() if hasattr(container, 'to_dict') else container
|
|
1534
|
+
objects = container_data.get('objects', []) if isinstance(container_data, dict) else []
|
|
1535
|
+
|
|
1536
|
+
for obj in objects:
|
|
1537
|
+
obj_data = obj.to_dict() if hasattr(obj, 'to_dict') else obj
|
|
1538
|
+
if isinstance(obj_data, dict):
|
|
1539
|
+
pid = obj_data.get('pid', '')
|
|
1540
|
+
# Test runs have TR- prefix
|
|
1541
|
+
if pid and pid.startswith('TR-'):
|
|
1542
|
+
linked_test_run_ids.append(pid)
|
|
1543
|
+
|
|
1544
|
+
if not linked_test_run_ids:
|
|
1545
|
+
return {
|
|
1546
|
+
'test_case_id': test_case_id,
|
|
1547
|
+
'total': 0,
|
|
1548
|
+
'test_runs': [],
|
|
1549
|
+
'message': f"No test runs are associated with test case '{test_case_id}'"
|
|
1550
|
+
}
|
|
1551
|
+
|
|
1552
|
+
# Fetch actual test run details via DQL search
|
|
1553
|
+
test_runs_result = []
|
|
1554
|
+
for tr_id in linked_test_run_ids:
|
|
1555
|
+
tr_data = self.__search_entity_by_id('test-runs', tr_id)
|
|
1556
|
+
if tr_data:
|
|
1557
|
+
test_runs_result.append(tr_data)
|
|
1558
|
+
else:
|
|
1559
|
+
# Fallback if search fails
|
|
1560
|
+
test_runs_result.append({
|
|
1561
|
+
'Id': tr_id,
|
|
1562
|
+
'QTest Id': None,
|
|
1563
|
+
'Name': 'Unable to fetch',
|
|
1564
|
+
'Description': ''
|
|
1565
|
+
})
|
|
1566
|
+
|
|
1567
|
+
return {
|
|
1568
|
+
'test_case_id': test_case_id,
|
|
1569
|
+
'total': len(test_runs_result),
|
|
1570
|
+
'test_runs': test_runs_result,
|
|
1571
|
+
'hint': 'To find defects, use find_defects_by_test_run_id for each test run.'
|
|
1572
|
+
}
|
|
1573
|
+
|
|
1574
|
+
except ApiException as e:
|
|
1575
|
+
stacktrace = format_exc()
|
|
1576
|
+
logger.error(f"Error finding test runs by test case: {stacktrace}")
|
|
1577
|
+
raise ToolException(
|
|
1578
|
+
f"Unable to find test runs associated with test case '{test_case_id}' "
|
|
1579
|
+
f"in project {self.qtest_project_id}. Exception: \n{stacktrace}"
|
|
1580
|
+
) from e
|
|
1581
|
+
|
|
1582
|
+
def find_defects_by_test_run_id(self, test_run_id: str) -> dict:
|
|
1583
|
+
"""Find all defects associated with a test run.
|
|
1584
|
+
|
|
1585
|
+
In QTest, defects are linked to test runs (not directly to test cases).
|
|
1586
|
+
A test run executes a specific test case, so defects found here are
|
|
1587
|
+
related to that test case through the test run execution context.
|
|
1588
|
+
|
|
1589
|
+
Use this tool after find_test_runs_by_test_case_id to discover defects.
|
|
1590
|
+
The result includes source context (test run and test case IDs) for traceability.
|
|
1591
|
+
|
|
1592
|
+
Args:
|
|
1593
|
+
test_run_id: Test run ID in format TR-123
|
|
1594
|
+
|
|
1595
|
+
Returns:
|
|
1596
|
+
dict with test_run_id, source_test_case_id, total count, and defects list with full details
|
|
1597
|
+
|
|
1598
|
+
Raises:
|
|
1599
|
+
ValueError: If test run is not found
|
|
1600
|
+
ToolException: If API call fails
|
|
1601
|
+
"""
|
|
1602
|
+
# First, get test run details to get the source test case context
|
|
1603
|
+
test_run_data = self.__search_entity_by_id('test-runs', test_run_id)
|
|
1604
|
+
source_test_case_id = None
|
|
1605
|
+
if test_run_data:
|
|
1606
|
+
# testCaseId is the internal ID, we need the PID (TC-xxx format)
|
|
1607
|
+
internal_tc_id = test_run_data.get('Test Case Id')
|
|
1608
|
+
if internal_tc_id:
|
|
1609
|
+
source_test_case_id = self.__get_entity_pid_by_internal_id('test-cases', internal_tc_id)
|
|
1610
|
+
else:
|
|
1611
|
+
raise ValueError(f"Test run '{test_run_id}' not found")
|
|
1612
|
+
|
|
1613
|
+
# Get internal QTest ID for the test run from test_run_data (avoids duplicate API call)
|
|
1614
|
+
qtest_test_run_id = test_run_data.get('QTest Id')
|
|
1615
|
+
if not qtest_test_run_id:
|
|
1616
|
+
raise ValueError(f"QTest Id not found in test run data for '{test_run_id}'")
|
|
1617
|
+
|
|
1618
|
+
link_object_api_instance = swagger_client.ObjectLinkApi(self._client)
|
|
1619
|
+
|
|
1620
|
+
try:
|
|
1621
|
+
# Use ObjectLinkApi.find() to get linked artifacts
|
|
1622
|
+
response = link_object_api_instance.find(
|
|
1623
|
+
self.qtest_project_id,
|
|
1624
|
+
type='test-runs',
|
|
1625
|
+
ids=[qtest_test_run_id]
|
|
1626
|
+
)
|
|
1627
|
+
|
|
1628
|
+
# Parse the response to extract linked defect IDs
|
|
1629
|
+
linked_defect_ids = []
|
|
1630
|
+
if response and len(response) > 0:
|
|
1631
|
+
for container in response:
|
|
1632
|
+
container_data = container.to_dict() if hasattr(container, 'to_dict') else container
|
|
1633
|
+
objects = container_data.get('objects', []) if isinstance(container_data, dict) else []
|
|
1634
|
+
|
|
1635
|
+
for obj in objects:
|
|
1636
|
+
obj_data = obj.to_dict() if hasattr(obj, 'to_dict') else obj
|
|
1637
|
+
if isinstance(obj_data, dict):
|
|
1638
|
+
pid = obj_data.get('pid', '')
|
|
1639
|
+
# Defects have DF- prefix
|
|
1640
|
+
if pid and pid.startswith('DF-'):
|
|
1641
|
+
linked_defect_ids.append(pid)
|
|
1642
|
+
|
|
1643
|
+
if not linked_defect_ids:
|
|
1644
|
+
result = {
|
|
1645
|
+
'test_run_id': test_run_id,
|
|
1646
|
+
'total': 0,
|
|
1647
|
+
'defects': [],
|
|
1648
|
+
'message': f"No defects are associated with test run '{test_run_id}'"
|
|
1649
|
+
}
|
|
1650
|
+
if source_test_case_id:
|
|
1651
|
+
result['source_test_case_id'] = source_test_case_id
|
|
1652
|
+
return result
|
|
1653
|
+
|
|
1654
|
+
# Fetch actual defect details via DQL search
|
|
1655
|
+
defects_result = []
|
|
1656
|
+
for defect_id in linked_defect_ids:
|
|
1657
|
+
defect_data = self.__search_entity_by_id('defects', defect_id)
|
|
1658
|
+
if defect_data:
|
|
1659
|
+
defects_result.append(defect_data)
|
|
1660
|
+
else:
|
|
1661
|
+
# Fallback if search fails
|
|
1662
|
+
defects_result.append({
|
|
1663
|
+
'Id': defect_id,
|
|
1664
|
+
'QTest Id': None,
|
|
1665
|
+
'Name': 'Unable to fetch',
|
|
1666
|
+
'Description': ''
|
|
1667
|
+
})
|
|
1668
|
+
|
|
1669
|
+
result = {
|
|
1670
|
+
'test_run_id': test_run_id,
|
|
1671
|
+
'total': len(defects_result),
|
|
1672
|
+
'defects': defects_result
|
|
1673
|
+
}
|
|
1674
|
+
if source_test_case_id:
|
|
1675
|
+
result['source_test_case_id'] = source_test_case_id
|
|
1676
|
+
return result
|
|
1677
|
+
|
|
1678
|
+
except ApiException as e:
|
|
1679
|
+
stacktrace = format_exc()
|
|
1680
|
+
logger.error(f"Error finding defects by test run: {stacktrace}")
|
|
1681
|
+
raise ToolException(
|
|
1682
|
+
f"Unable to find defects associated with test run '{test_run_id}' "
|
|
1683
|
+
f"in project {self.qtest_project_id}. Exception: \n{stacktrace}"
|
|
1684
|
+
) from e
|
|
378
1685
|
|
|
379
1686
|
def search_by_dql(self, dql: str, extract_images:bool=False, prompt: str=None):
|
|
380
1687
|
"""Search for the test cases in qTest using Data Query Language """
|
|
@@ -382,8 +1689,179 @@ class QtestApiWrapper(BaseToolApiWrapper):
|
|
|
382
1689
|
return "Found " + str(
|
|
383
1690
|
len(parsed_data)) + f" Qtest test cases:\n" + str(parsed_data[:self.no_of_tests_shown_in_dql_search])
|
|
384
1691
|
|
|
1692
|
+
def search_entities_by_dql(self, object_type: str, dql: str) -> dict:
|
|
1693
|
+
"""Generic DQL search for any entity type (test-cases, requirements, defects, test-runs, etc.).
|
|
1694
|
+
|
|
1695
|
+
This is the unified search method that works for all QTest searchable entity types.
|
|
1696
|
+
Each entity type has its own properties structure, but this method parses
|
|
1697
|
+
them consistently using the generic entity parser.
|
|
1698
|
+
|
|
1699
|
+
Args:
|
|
1700
|
+
object_type: Entity type to search (see QTEST_OBJECT_TYPES and QTEST_SEARCHABLE_ONLY_TYPES)
|
|
1701
|
+
dql: QTest Data Query Language query string
|
|
1702
|
+
|
|
1703
|
+
Returns:
|
|
1704
|
+
dict with object_type, total count, and items list with full entity details
|
|
1705
|
+
"""
|
|
1706
|
+
# Check if object_type is valid (either has prefix or is searchable-only)
|
|
1707
|
+
all_searchable = {**QTEST_OBJECT_TYPES, **QTEST_SEARCHABLE_ONLY_TYPES}
|
|
1708
|
+
if object_type not in all_searchable:
|
|
1709
|
+
raise ValueError(
|
|
1710
|
+
f"Invalid object_type '{object_type}'. "
|
|
1711
|
+
f"Must be one of: {', '.join(all_searchable.keys())}"
|
|
1712
|
+
)
|
|
1713
|
+
|
|
1714
|
+
entity_info = all_searchable[object_type]
|
|
1715
|
+
search_instance = swagger_client.SearchApi(self._client)
|
|
1716
|
+
body = swagger_client.ArtifactSearchParams(
|
|
1717
|
+
object_type=object_type,
|
|
1718
|
+
fields=['*'],
|
|
1719
|
+
query=dql
|
|
1720
|
+
)
|
|
1721
|
+
|
|
1722
|
+
try:
|
|
1723
|
+
response = search_instance.search_artifact(self.qtest_project_id, body)
|
|
1724
|
+
|
|
1725
|
+
# Parse all items using the generic parser
|
|
1726
|
+
items = []
|
|
1727
|
+
for item in response.get('items', []):
|
|
1728
|
+
parsed = self.__parse_entity_item(object_type, item)
|
|
1729
|
+
items.append(parsed)
|
|
1730
|
+
|
|
1731
|
+
return {
|
|
1732
|
+
'object_type': object_type,
|
|
1733
|
+
'entity_name': entity_info['name'],
|
|
1734
|
+
'total': response.get('total', 0),
|
|
1735
|
+
'returned': len(items),
|
|
1736
|
+
'items': items[:self.no_of_tests_shown_in_dql_search]
|
|
1737
|
+
}
|
|
1738
|
+
|
|
1739
|
+
except ApiException as e:
|
|
1740
|
+
stacktrace = format_exc()
|
|
1741
|
+
logger.error(f"Error searching {object_type} by DQL: {stacktrace}")
|
|
1742
|
+
raise ToolException(
|
|
1743
|
+
f"Unable to search {entity_info['name']}s with DQL '{dql}' "
|
|
1744
|
+
f"in project {self.qtest_project_id}. Exception: \n{stacktrace}"
|
|
1745
|
+
) from e
|
|
1746
|
+
|
|
1747
|
+
def find_entity_by_id(self, entity_id: str) -> dict:
|
|
1748
|
+
"""Find any QTest entity by its ID (TC-xxx, RQ-xxx, DF-xxx, TR-xxx).
|
|
1749
|
+
|
|
1750
|
+
This is a universal lookup tool that works for any entity type.
|
|
1751
|
+
The entity type is automatically determined from the ID prefix.
|
|
1752
|
+
|
|
1753
|
+
Args:
|
|
1754
|
+
entity_id: Entity ID with prefix (TC-123, RQ-15, DF-100, TR-39, etc.)
|
|
1755
|
+
|
|
1756
|
+
Returns:
|
|
1757
|
+
dict with full entity details including all properties
|
|
1758
|
+
"""
|
|
1759
|
+
# Determine object type from prefix - dynamically built from registry
|
|
1760
|
+
prefix = entity_id.split('-')[0].upper() if '-' in entity_id else ''
|
|
1761
|
+
|
|
1762
|
+
# Build reverse mapping: prefix -> object_type from QTEST_OBJECT_TYPES
|
|
1763
|
+
prefix_to_type = {
|
|
1764
|
+
info['prefix']: obj_type
|
|
1765
|
+
for obj_type, info in QTEST_OBJECT_TYPES.items()
|
|
1766
|
+
}
|
|
1767
|
+
|
|
1768
|
+
if prefix not in prefix_to_type:
|
|
1769
|
+
valid_prefixes = ', '.join(sorted(prefix_to_type.keys()))
|
|
1770
|
+
raise ValueError(
|
|
1771
|
+
f"Invalid entity ID format '{entity_id}'. "
|
|
1772
|
+
f"Expected prefix to be one of: {valid_prefixes}"
|
|
1773
|
+
)
|
|
1774
|
+
|
|
1775
|
+
object_type = prefix_to_type[prefix]
|
|
1776
|
+
result = self.__search_entity_by_id(object_type, entity_id)
|
|
1777
|
+
|
|
1778
|
+
if result is None:
|
|
1779
|
+
entity_name = QTEST_OBJECT_TYPES[object_type]['name']
|
|
1780
|
+
raise ValueError(
|
|
1781
|
+
f"{entity_name} '{entity_id}' not found in project {self.qtest_project_id}"
|
|
1782
|
+
)
|
|
1783
|
+
|
|
1784
|
+
return result
|
|
1785
|
+
|
|
1786
|
+
def __parse_entity_item(self, object_type: str, item: dict) -> dict:
|
|
1787
|
+
"""Generic parser for any entity type from DQL search response.
|
|
1788
|
+
|
|
1789
|
+
This parses the raw API response item into a clean dictionary,
|
|
1790
|
+
handling the differences between entity types (some have name at top level,
|
|
1791
|
+
some have it in properties as Summary, etc.)
|
|
1792
|
+
|
|
1793
|
+
Args:
|
|
1794
|
+
object_type: QTest object type
|
|
1795
|
+
item: Raw item from search response
|
|
1796
|
+
|
|
1797
|
+
Returns:
|
|
1798
|
+
dict with parsed entity data
|
|
1799
|
+
"""
|
|
1800
|
+
import html
|
|
1801
|
+
|
|
1802
|
+
result = {
|
|
1803
|
+
'Id': item.get('pid'),
|
|
1804
|
+
'QTest Id': item.get('id'),
|
|
1805
|
+
}
|
|
1806
|
+
|
|
1807
|
+
# Add top-level fields if present
|
|
1808
|
+
if item.get('name'):
|
|
1809
|
+
result['Name'] = item.get('name')
|
|
1810
|
+
if item.get('description'):
|
|
1811
|
+
result['Description'] = html.unescape(strip_tags(item.get('description', '') or ''))
|
|
1812
|
+
if item.get('web_url'):
|
|
1813
|
+
result['Web URL'] = item.get('web_url')
|
|
1814
|
+
|
|
1815
|
+
# Test-case specific fields
|
|
1816
|
+
if object_type == 'test-cases':
|
|
1817
|
+
if item.get('precondition'):
|
|
1818
|
+
result['Precondition'] = html.unescape(strip_tags(item.get('precondition', '') or ''))
|
|
1819
|
+
if item.get('test_steps'):
|
|
1820
|
+
result['Steps'] = [
|
|
1821
|
+
{
|
|
1822
|
+
'Test Step Number': idx + 1,
|
|
1823
|
+
'Test Step Description': html.unescape(strip_tags(step.get('description', '') or '')),
|
|
1824
|
+
'Test Step Expected Result': html.unescape(strip_tags(step.get('expected', '') or ''))
|
|
1825
|
+
}
|
|
1826
|
+
for idx, step in enumerate(item.get('test_steps', []))
|
|
1827
|
+
]
|
|
1828
|
+
|
|
1829
|
+
# Test-run specific fields
|
|
1830
|
+
if object_type == 'test-runs':
|
|
1831
|
+
if item.get('testCaseId'):
|
|
1832
|
+
result['Test Case Id'] = item.get('testCaseId')
|
|
1833
|
+
if item.get('automation'):
|
|
1834
|
+
result['Automation'] = item.get('automation')
|
|
1835
|
+
if item.get('latest_test_log'):
|
|
1836
|
+
log = item.get('latest_test_log')
|
|
1837
|
+
result['Latest Test Log'] = {
|
|
1838
|
+
'Log Id': log.get('id'),
|
|
1839
|
+
'Status': log.get('status'),
|
|
1840
|
+
'Execution Start': log.get('exe_start_date'),
|
|
1841
|
+
'Execution End': log.get('exe_end_date')
|
|
1842
|
+
}
|
|
1843
|
+
if item.get('test_case_version'):
|
|
1844
|
+
result['Test Case Version'] = item.get('test_case_version')
|
|
1845
|
+
|
|
1846
|
+
# Parse all properties - works for all entity types
|
|
1847
|
+
for prop in item.get('properties', []):
|
|
1848
|
+
field_name = prop.get('field_name')
|
|
1849
|
+
if not field_name:
|
|
1850
|
+
continue
|
|
1851
|
+
|
|
1852
|
+
# Format value based on field type (multi-select as array, etc.)
|
|
1853
|
+
field_value = self.__format_property_value(prop)
|
|
1854
|
+
|
|
1855
|
+
# Strip HTML from text fields (strings only, not arrays)
|
|
1856
|
+
if isinstance(field_value, str) and ('<' in field_value or '&' in field_value):
|
|
1857
|
+
field_value = html.unescape(strip_tags(field_value))
|
|
1858
|
+
|
|
1859
|
+
result[field_name] = field_value
|
|
1860
|
+
|
|
1861
|
+
return result
|
|
1862
|
+
|
|
385
1863
|
def create_test_cases(self, test_case_content: str, folder_to_place_test_cases_to: str) -> dict:
|
|
386
|
-
""" Create the
|
|
1864
|
+
""" Create the test case based on the incoming content. The input should be in json format. """
|
|
387
1865
|
test_cases_api_instance: TestCaseApi = self.__instantiate_test_api_instance()
|
|
388
1866
|
input_obj = json.loads(test_case_content)
|
|
389
1867
|
test_cases = input_obj if isinstance(input_obj, list) else [input_obj]
|
|
@@ -458,12 +1936,43 @@ class QtestApiWrapper(BaseToolApiWrapper):
|
|
|
458
1936
|
kwargs["search"] = search
|
|
459
1937
|
return module_api.get_sub_modules_of(project_id=self.qtest_project_id, **kwargs)
|
|
460
1938
|
|
|
1939
|
+
@extend_with_parent_available_tools
|
|
461
1940
|
def get_available_tools(self):
|
|
462
1941
|
return [
|
|
463
1942
|
{
|
|
464
1943
|
"name": "search_by_dql",
|
|
465
1944
|
"mode": "search_by_dql",
|
|
466
|
-
"description":
|
|
1945
|
+
"description": """Search test cases in qTest using Data Query Language (DQL).
|
|
1946
|
+
|
|
1947
|
+
CRITICAL: USE SINGLE QUOTES ONLY - DQL does not support double quotes!
|
|
1948
|
+
- ✓ CORRECT: Description ~ 'Forgot Password'
|
|
1949
|
+
- ✗ WRONG: Description ~ "Forgot Password"
|
|
1950
|
+
|
|
1951
|
+
LIMITATION - CANNOT SEARCH BY LINKED OBJECTS:
|
|
1952
|
+
- ✗ 'Requirement Id' = 'RQ-15' will fail - use 'find_test_cases_by_requirement_id' tool instead
|
|
1953
|
+
- ✗ Linked defects or other relationship queries are not supported
|
|
1954
|
+
|
|
1955
|
+
SEARCHABLE FIELDS:
|
|
1956
|
+
- Direct fields: Id, Name, Description, Status, Type, Priority, Automation, etc.
|
|
1957
|
+
- Module: Use 'Module in' syntax
|
|
1958
|
+
- Custom fields: Use exact field name from project configuration
|
|
1959
|
+
- Date fields: MUST use ISO DateTime format (e.g., '2024-01-01T00:00:00.000Z')
|
|
1960
|
+
|
|
1961
|
+
SYNTAX RULES:
|
|
1962
|
+
1. ALL string values MUST use single quotes (never double quotes)
|
|
1963
|
+
2. Field names with spaces MUST be in single quotes: 'Created Date' > '2024-01-01T00:00:00.000Z'
|
|
1964
|
+
3. Use ~ for 'contains', !~ for 'not contains': Description ~ 'login'
|
|
1965
|
+
4. Use 'is not empty' for non-empty check: Name is 'not empty'
|
|
1966
|
+
5. Operators: =, !=, <, >, <=, >=, in, ~, !~
|
|
1967
|
+
|
|
1968
|
+
EXAMPLES:
|
|
1969
|
+
- Id = 'TC-123'
|
|
1970
|
+
- Description ~ 'Forgot Password'
|
|
1971
|
+
- Status = 'New' and Priority = 'High'
|
|
1972
|
+
- Module in 'MD-78 Master Test Suite'
|
|
1973
|
+
- Name ~ 'login'
|
|
1974
|
+
- 'Created Date' > '2024-01-01T00:00:00.000Z'
|
|
1975
|
+
""",
|
|
467
1976
|
"args_schema": QtestDataQuerySearch,
|
|
468
1977
|
"ref": self.search_by_dql,
|
|
469
1978
|
},
|
|
@@ -496,17 +2005,557 @@ class QtestApiWrapper(BaseToolApiWrapper):
|
|
|
496
2005
|
"ref": self.delete_test_case,
|
|
497
2006
|
},
|
|
498
2007
|
{
|
|
499
|
-
"name": "
|
|
500
|
-
"mode": "
|
|
501
|
-
"description": "
|
|
2008
|
+
"name": "link_tests_to_jira_requirement",
|
|
2009
|
+
"mode": "link_tests_to_jira_requirement",
|
|
2010
|
+
"description": "Link test cases to external Jira requirement. Provide Jira issue ID (e.g., PLAN-128) and list of test case IDs in format '[\"TC-123\", \"TC-234\"]'",
|
|
502
2011
|
"args_schema": QtestLinkTestCaseToJiraRequirement,
|
|
503
2012
|
"ref": self.link_tests_to_jira_requirement,
|
|
504
2013
|
},
|
|
2014
|
+
{
|
|
2015
|
+
"name": "link_tests_to_qtest_requirement",
|
|
2016
|
+
"mode": "link_tests_to_qtest_requirement",
|
|
2017
|
+
"description": "Link test cases to internal QTest requirement. Provide QTest requirement ID (e.g., RQ-15) and list of test case IDs in format '[\"TC-123\", \"TC-234\"]'",
|
|
2018
|
+
"args_schema": QtestLinkTestCaseToQtestRequirement,
|
|
2019
|
+
"ref": self.link_tests_to_qtest_requirement,
|
|
2020
|
+
},
|
|
505
2021
|
{
|
|
506
2022
|
"name": "get_modules",
|
|
507
2023
|
"mode": "get_modules",
|
|
508
2024
|
"description": self.get_modules.__doc__,
|
|
509
2025
|
"args_schema": GetModules,
|
|
510
2026
|
"ref": self.get_modules,
|
|
2027
|
+
},
|
|
2028
|
+
{
|
|
2029
|
+
"name": "get_all_test_cases_fields_for_project",
|
|
2030
|
+
"mode": "get_all_test_cases_fields_for_project",
|
|
2031
|
+
"description": "Get information about available test case fields and their valid values for the project. Shows which property values are allowed (e.g., Status: 'New', 'In Progress', 'Completed') based on the project configuration. Use force_refresh=true if project configuration has changed.",
|
|
2032
|
+
"args_schema": GetAllTestCasesFieldsForProject,
|
|
2033
|
+
"ref": self.get_all_test_cases_fields_for_project,
|
|
2034
|
+
},
|
|
2035
|
+
{
|
|
2036
|
+
"name": "find_test_cases_by_requirement_id",
|
|
2037
|
+
"mode": "find_test_cases_by_requirement_id",
|
|
2038
|
+
"description": """Find all test cases linked to a QTest requirement.
|
|
2039
|
+
|
|
2040
|
+
Use this tool to find test cases associated with a specific requirement.
|
|
2041
|
+
DQL search cannot query by linked requirement - use this tool instead.
|
|
2042
|
+
|
|
2043
|
+
Parameters:
|
|
2044
|
+
- requirement_id: QTest requirement ID in format RQ-123
|
|
2045
|
+
- include_details: If true, returns full test case data. If false (default), returns Id, QTest Id, Name, and Description.
|
|
2046
|
+
|
|
2047
|
+
Examples:
|
|
2048
|
+
- Find test cases for RQ-15: requirement_id='RQ-15'
|
|
2049
|
+
- Get full details: requirement_id='RQ-15', include_details=true
|
|
2050
|
+
""",
|
|
2051
|
+
"args_schema": FindTestCasesByRequirementId,
|
|
2052
|
+
"ref": self.find_test_cases_by_requirement_id,
|
|
2053
|
+
},
|
|
2054
|
+
{
|
|
2055
|
+
"name": "find_requirements_by_test_case_id",
|
|
2056
|
+
"mode": "find_requirements_by_test_case_id",
|
|
2057
|
+
"description": """Find all requirements linked to a test case (direct link: test-case 'covers' requirements).
|
|
2058
|
+
|
|
2059
|
+
Use this tool to discover which requirements a specific test case covers.
|
|
2060
|
+
|
|
2061
|
+
Parameters:
|
|
2062
|
+
- test_case_id: Test case ID in format TC-123
|
|
2063
|
+
|
|
2064
|
+
Returns: List of linked requirements with Id, QTest Id, Name, and Description.
|
|
2065
|
+
|
|
2066
|
+
Examples:
|
|
2067
|
+
- Find requirements for TC-123: test_case_id='TC-123'
|
|
2068
|
+
""",
|
|
2069
|
+
"args_schema": FindRequirementsByTestCaseId,
|
|
2070
|
+
"ref": self.find_requirements_by_test_case_id,
|
|
2071
|
+
},
|
|
2072
|
+
{
|
|
2073
|
+
"name": "find_test_runs_by_test_case_id",
|
|
2074
|
+
"mode": "find_test_runs_by_test_case_id",
|
|
2075
|
+
"description": """Find all test runs associated with a test case.
|
|
2076
|
+
|
|
2077
|
+
IMPORTANT: In QTest, defects are NOT directly linked to test cases.
|
|
2078
|
+
Defects are linked to TEST RUNS. To find defects related to a test case:
|
|
2079
|
+
1. First use this tool to find test runs for the test case
|
|
2080
|
+
2. Then use find_defects_by_test_run_id for each test run
|
|
2081
|
+
|
|
2082
|
+
Parameters:
|
|
2083
|
+
- test_case_id: Test case ID in format TC-123
|
|
2084
|
+
|
|
2085
|
+
Returns: List of test runs with Id, QTest Id, Name, and Description.
|
|
2086
|
+
Also includes a hint about finding defects via test runs.
|
|
2087
|
+
|
|
2088
|
+
Examples:
|
|
2089
|
+
- Find test runs for TC-123: test_case_id='TC-123'
|
|
2090
|
+
""",
|
|
2091
|
+
"args_schema": FindTestRunsByTestCaseId,
|
|
2092
|
+
"ref": self.find_test_runs_by_test_case_id,
|
|
2093
|
+
},
|
|
2094
|
+
{
|
|
2095
|
+
"name": "find_defects_by_test_run_id",
|
|
2096
|
+
"mode": "find_defects_by_test_run_id",
|
|
2097
|
+
"description": """Find all defects associated with a test run.
|
|
2098
|
+
|
|
2099
|
+
In QTest data model, defects are linked to test runs (not directly to test cases).
|
|
2100
|
+
A defect found here means it was reported during execution of this specific test run.
|
|
2101
|
+
|
|
2102
|
+
To find defects related to a test case:
|
|
2103
|
+
1. First use find_test_runs_by_test_case_id to get test runs
|
|
2104
|
+
2. Then use this tool for each test run
|
|
2105
|
+
|
|
2106
|
+
Parameters:
|
|
2107
|
+
- test_run_id: Test run ID in format TR-123
|
|
2108
|
+
|
|
2109
|
+
Returns: List of defects with Id, QTest Id, Name, and Description.
|
|
2110
|
+
|
|
2111
|
+
Examples:
|
|
2112
|
+
- Find defects for TR-39: test_run_id='TR-39'
|
|
2113
|
+
""",
|
|
2114
|
+
"args_schema": FindDefectsByTestRunId,
|
|
2115
|
+
"ref": self.find_defects_by_test_run_id,
|
|
2116
|
+
},
|
|
2117
|
+
{
|
|
2118
|
+
"name": "search_entities_by_dql",
|
|
2119
|
+
"mode": "search_entities_by_dql",
|
|
2120
|
+
"description": f"""Search any QTest entity type using Data Query Language (DQL).
|
|
2121
|
+
|
|
2122
|
+
This is a unified search tool for all searchable QTest entity types.
|
|
2123
|
+
|
|
2124
|
+
SUPPORTED ENTITY TYPES (object_type parameter):
|
|
2125
|
+
- 'test-cases' (TC-xxx): Test case definitions with steps
|
|
2126
|
+
- 'test-runs' (TR-xxx): Execution instances of test cases
|
|
2127
|
+
- 'defects' (DF-xxx): Bugs/issues found during testing
|
|
2128
|
+
- 'requirements' (RQ-xxx): Requirements to be tested
|
|
2129
|
+
- 'test-suites' (TS-xxx): Collections of test runs
|
|
2130
|
+
- 'test-cycles' (CL-xxx): Test execution cycles
|
|
2131
|
+
- 'test-logs': Execution logs (date queries ONLY - see notes)
|
|
2132
|
+
- 'releases' (RL-xxx): Software releases
|
|
2133
|
+
- 'builds' (BL-xxx): Builds within releases
|
|
2134
|
+
|
|
2135
|
+
NOTES:
|
|
2136
|
+
- Modules (MD-xxx) are NOT searchable via DQL. Use 'get_modules' tool instead.
|
|
2137
|
+
- Test-logs: Only date queries work (Execution Start Date, Execution End Date).
|
|
2138
|
+
For specific test log details, use find_test_runs_by_test_case_id -
|
|
2139
|
+
the test run includes 'Latest Test Log' with status and execution times.
|
|
2140
|
+
|
|
2141
|
+
{DQL_SYNTAX_DOCS}
|
|
2142
|
+
|
|
2143
|
+
EXAMPLES BY ENTITY TYPE:
|
|
2144
|
+
- Test cases: object_type='test-cases', dql="Name ~ 'login'"
|
|
2145
|
+
- Requirements: object_type='requirements', dql="Status = 'Baselined'"
|
|
2146
|
+
- Defects: object_type='defects', dql="Priority = 'High'"
|
|
2147
|
+
- Test runs: object_type='test-runs', dql="Status = 'Failed'"
|
|
2148
|
+
- Test logs: object_type='test-logs', dql="'Execution Start Date' > '2024-01-01T00:00:00.000Z'" (date queries only)
|
|
2149
|
+
- Releases: object_type='releases', dql="Name ~ '2024'"
|
|
2150
|
+
""",
|
|
2151
|
+
"args_schema": GenericDqlSearch,
|
|
2152
|
+
"ref": self.search_entities_by_dql,
|
|
2153
|
+
},
|
|
2154
|
+
{
|
|
2155
|
+
"name": "find_entity_by_id",
|
|
2156
|
+
"mode": "find_entity_by_id",
|
|
2157
|
+
"description": """Find any QTest entity by its ID.
|
|
2158
|
+
|
|
2159
|
+
This universal lookup tool works for entity types that have ID prefixes.
|
|
2160
|
+
The entity type is automatically determined from the ID prefix.
|
|
2161
|
+
|
|
2162
|
+
SUPPORTED ID FORMATS:
|
|
2163
|
+
- TC-123: Test Case
|
|
2164
|
+
- TR-39: Test Run
|
|
2165
|
+
- DF-100: Defect
|
|
2166
|
+
- RQ-15: Requirement
|
|
2167
|
+
- TS-5: Test Suite
|
|
2168
|
+
- CL-3: Test Cycle
|
|
2169
|
+
- RL-1: Release
|
|
2170
|
+
- BL-2: Build
|
|
2171
|
+
|
|
2172
|
+
NOT SUPPORTED (no ID prefix):
|
|
2173
|
+
- Test Logs: Get details from test run's 'Latest Test Log' field (contains Log Id, Status, Execution Start/End Date)
|
|
2174
|
+
- Modules: Use 'get_modules' tool instead
|
|
2175
|
+
|
|
2176
|
+
Parameters:
|
|
2177
|
+
- entity_id: Entity ID with prefix (e.g., TC-123, RQ-15, DF-100, TR-39)
|
|
2178
|
+
|
|
2179
|
+
Returns: Full entity details including all properties.
|
|
2180
|
+
|
|
2181
|
+
Examples:
|
|
2182
|
+
- Find test case: entity_id='TC-123'
|
|
2183
|
+
- Find requirement: entity_id='RQ-15'
|
|
2184
|
+
- Find defect: entity_id='DF-100'
|
|
2185
|
+
- Find test run: entity_id='TR-39'
|
|
2186
|
+
""",
|
|
2187
|
+
"args_schema": FindEntityById,
|
|
2188
|
+
"ref": self.find_entity_by_id,
|
|
511
2189
|
}
|
|
512
|
-
]
|
|
2190
|
+
]
|
|
2191
|
+
|
|
2192
|
+
# ==================== INDEXER METHODS ====================
|
|
2193
|
+
|
|
2194
|
+
def _index_tool_params(self, **kwargs) -> dict[str, tuple[type, Field]]:
|
|
2195
|
+
"""
|
|
2196
|
+
Returns a list of fields for index_data args schema.
|
|
2197
|
+
Defines three indexing modes: DQL query, module-based, and full project traversal.
|
|
2198
|
+
"""
|
|
2199
|
+
return {
|
|
2200
|
+
"chunking_tool": (Literal['markdown', ''], Field(
|
|
2201
|
+
description="Name of chunking tool for test case content",
|
|
2202
|
+
default='markdown')),
|
|
2203
|
+
"indexing_mode": (Literal['dql', 'module', 'full'], Field(
|
|
2204
|
+
description="Indexing mode: 'dql' - use DQL query (may have API limitations), "
|
|
2205
|
+
"'module' - index specific module/folder (most deterministic), "
|
|
2206
|
+
"'full' - traverse entire project with pagination",
|
|
2207
|
+
default='full')),
|
|
2208
|
+
"dql": (Optional[str], Field(
|
|
2209
|
+
description="DQL query for 'dql' mode. Example: \"Status = 'New' and Priority = 'High'\". "
|
|
2210
|
+
"Can also filter by module: \"Module in 'MD-7 Master Test Suite'\". "
|
|
2211
|
+
"Note: DQL via API may return incomplete results for complex queries.",
|
|
2212
|
+
default=None,
|
|
2213
|
+
json_schema_extra={'visible_when': {'field': 'indexing_mode', 'value': 'dql'}})),
|
|
2214
|
+
"module_name": (Optional[str], Field(
|
|
2215
|
+
description="Module/folder name for 'module' mode. Use the visible name from UI "
|
|
2216
|
+
"e.g., 'MD-7 Master Test Suite'. Most deterministic way to index a specific folder.",
|
|
2217
|
+
default=None,
|
|
2218
|
+
json_schema_extra={'visible_when': {'field': 'indexing_mode', 'value': 'module'}})),
|
|
2219
|
+
"extract_images": (Optional[bool], Field(
|
|
2220
|
+
description="Whether to extract and process images from test steps using LLM",
|
|
2221
|
+
default=False)),
|
|
2222
|
+
"image_prompt": (Optional[str], Field(
|
|
2223
|
+
description="Custom prompt for image analysis (only used if extract_images=True)",
|
|
2224
|
+
default="Analyze this image from a test case step. Describe what the image shows, including any UI elements, text, buttons, or visual indicators. Focus on elements relevant to testing.",
|
|
2225
|
+
json_schema_extra={'visible_when': {'field': 'extract_images', 'value': True}})),
|
|
2226
|
+
}
|
|
2227
|
+
|
|
2228
|
+
def _base_loader(self, **kwargs) -> Generator[Document, None, None]:
|
|
2229
|
+
"""
|
|
2230
|
+
Base loader for QTest test cases. Supports three indexing modes:
|
|
2231
|
+
- dql: Use DQL query (may have API limitations for complex queries)
|
|
2232
|
+
- module: Index specific module/folder by name (most deterministic)
|
|
2233
|
+
- full: Full project traversal with pagination
|
|
2234
|
+
"""
|
|
2235
|
+
self._chunking_tool = kwargs.get('chunking_tool', 'markdown')
|
|
2236
|
+
self._extract_images = kwargs.get('extract_images', False)
|
|
2237
|
+
self._image_prompt = kwargs.get('image_prompt', None)
|
|
2238
|
+
|
|
2239
|
+
indexing_mode = kwargs.get('indexing_mode', 'full')
|
|
2240
|
+
dql = kwargs.get('dql')
|
|
2241
|
+
module_name = kwargs.get('module_name')
|
|
2242
|
+
|
|
2243
|
+
logger.info(f"Starting QTest indexing in '{indexing_mode}' mode for project {self.qtest_project_id}")
|
|
2244
|
+
|
|
2245
|
+
if indexing_mode == 'dql':
|
|
2246
|
+
if not dql:
|
|
2247
|
+
raise ToolException("DQL query is required for 'dql' indexing mode")
|
|
2248
|
+
yield from self._load_test_cases_by_dql(dql)
|
|
2249
|
+
elif indexing_mode == 'module':
|
|
2250
|
+
if not module_name:
|
|
2251
|
+
raise ToolException("module_name is required for 'module' indexing mode")
|
|
2252
|
+
# Resolve module name to internal ID
|
|
2253
|
+
module_id = self._resolve_module_name_to_id(module_name)
|
|
2254
|
+
if not module_id:
|
|
2255
|
+
raise ToolException(
|
|
2256
|
+
f"Module '{module_name}' not found in project {self.qtest_project_id}. "
|
|
2257
|
+
f"Use get_modules tool to see available modules."
|
|
2258
|
+
)
|
|
2259
|
+
yield from self._load_test_cases_by_module(module_id)
|
|
2260
|
+
else: # full mode
|
|
2261
|
+
yield from self._load_test_cases_full_project()
|
|
2262
|
+
|
|
2263
|
+
def _resolve_module_name_to_id(self, module_name: str) -> Optional[int]:
|
|
2264
|
+
"""
|
|
2265
|
+
Resolve a module name (e.g., 'MD-7 Master Test Suite') to its internal ID.
|
|
2266
|
+
Uses the same approach as __build_body_for_create_test_case.
|
|
2267
|
+
"""
|
|
2268
|
+
modules = self._parse_modules()
|
|
2269
|
+
for module in modules:
|
|
2270
|
+
if module.get('full_module_name') == module_name:
|
|
2271
|
+
return module.get('module_id')
|
|
2272
|
+
return None
|
|
2273
|
+
|
|
2274
|
+
def _load_test_cases_by_dql(self, dql: str) -> Generator[Document, None, None]:
|
|
2275
|
+
"""Load test cases using DQL query."""
|
|
2276
|
+
logger.info(f"Loading test cases by DQL: {dql}")
|
|
2277
|
+
search_instance: SearchApi = swagger_client.SearchApi(self._client)
|
|
2278
|
+
body = swagger_client.ArtifactSearchParams(
|
|
2279
|
+
object_type='test-cases',
|
|
2280
|
+
fields=['*'],
|
|
2281
|
+
query=dql
|
|
2282
|
+
)
|
|
2283
|
+
|
|
2284
|
+
page = 1
|
|
2285
|
+
while True:
|
|
2286
|
+
try:
|
|
2287
|
+
response = search_instance.search_artifact(
|
|
2288
|
+
self.qtest_project_id,
|
|
2289
|
+
body,
|
|
2290
|
+
append_test_steps='true',
|
|
2291
|
+
include_external_properties='true',
|
|
2292
|
+
page_size=self.no_of_items_per_page,
|
|
2293
|
+
page=page
|
|
2294
|
+
)
|
|
2295
|
+
|
|
2296
|
+
items = response.get('items', [])
|
|
2297
|
+
if not items:
|
|
2298
|
+
break
|
|
2299
|
+
|
|
2300
|
+
for item in items:
|
|
2301
|
+
yield self._create_test_case_document(item)
|
|
2302
|
+
|
|
2303
|
+
# Check for next page
|
|
2304
|
+
links = response.get('links', [])
|
|
2305
|
+
has_next = any(link.get('rel') == 'next' for link in links)
|
|
2306
|
+
if not has_next:
|
|
2307
|
+
break
|
|
2308
|
+
page += 1
|
|
2309
|
+
|
|
2310
|
+
except ApiException as e:
|
|
2311
|
+
stacktrace = format_exc()
|
|
2312
|
+
logger.error(f"Error loading test cases by DQL: {stacktrace}")
|
|
2313
|
+
raise ToolException(f"Failed to load test cases by DQL: {stacktrace}") from e
|
|
2314
|
+
|
|
2315
|
+
def _load_test_cases_by_module(self, module_id: int) -> Generator[Document, None, None]:
|
|
2316
|
+
"""Load test cases from a specific module/folder."""
|
|
2317
|
+
logger.info(f"Loading test cases from module {module_id}")
|
|
2318
|
+
test_case_api: TestCaseApi = self.__instantiate_test_api_instance()
|
|
2319
|
+
|
|
2320
|
+
page = 1
|
|
2321
|
+
while True:
|
|
2322
|
+
try:
|
|
2323
|
+
response = test_case_api.get_test_cases(
|
|
2324
|
+
self.qtest_project_id,
|
|
2325
|
+
parent_id=module_id,
|
|
2326
|
+
page=page,
|
|
2327
|
+
size=self.no_of_items_per_page,
|
|
2328
|
+
expand_steps='true'
|
|
2329
|
+
)
|
|
2330
|
+
|
|
2331
|
+
if not response:
|
|
2332
|
+
break
|
|
2333
|
+
|
|
2334
|
+
# Convert response objects to dicts if needed
|
|
2335
|
+
items = [item.to_dict() if hasattr(item, 'to_dict') else item for item in response]
|
|
2336
|
+
|
|
2337
|
+
if not items:
|
|
2338
|
+
break
|
|
2339
|
+
|
|
2340
|
+
for item in items:
|
|
2341
|
+
yield self._create_test_case_document(item)
|
|
2342
|
+
|
|
2343
|
+
if len(items) < self.no_of_items_per_page:
|
|
2344
|
+
break
|
|
2345
|
+
page += 1
|
|
2346
|
+
|
|
2347
|
+
except ApiException as e:
|
|
2348
|
+
stacktrace = format_exc()
|
|
2349
|
+
logger.error(f"Error loading test cases from module: {stacktrace}")
|
|
2350
|
+
raise ToolException(f"Failed to load test cases from module {module_id}: {stacktrace}") from e
|
|
2351
|
+
|
|
2352
|
+
def _load_test_cases_full_project(self) -> Generator[Document, None, None]:
|
|
2353
|
+
"""Load all test cases from the project using pagination."""
|
|
2354
|
+
logger.info(f"Loading all test cases from project {self.qtest_project_id}")
|
|
2355
|
+
test_case_api: TestCaseApi = self.__instantiate_test_api_instance()
|
|
2356
|
+
|
|
2357
|
+
page = 1
|
|
2358
|
+
while True:
|
|
2359
|
+
try:
|
|
2360
|
+
response = test_case_api.get_test_cases(
|
|
2361
|
+
self.qtest_project_id,
|
|
2362
|
+
page=page,
|
|
2363
|
+
size=self.no_of_items_per_page,
|
|
2364
|
+
expand_steps='true'
|
|
2365
|
+
)
|
|
2366
|
+
|
|
2367
|
+
if not response:
|
|
2368
|
+
break
|
|
2369
|
+
|
|
2370
|
+
# Convert response objects to dicts if needed
|
|
2371
|
+
items = [item.to_dict() if hasattr(item, 'to_dict') else item for item in response]
|
|
2372
|
+
|
|
2373
|
+
if not items:
|
|
2374
|
+
break
|
|
2375
|
+
|
|
2376
|
+
for item in items:
|
|
2377
|
+
yield self._create_test_case_document(item)
|
|
2378
|
+
|
|
2379
|
+
if len(items) < self.no_of_items_per_page:
|
|
2380
|
+
break
|
|
2381
|
+
page += 1
|
|
2382
|
+
|
|
2383
|
+
except ApiException as e:
|
|
2384
|
+
stacktrace = format_exc()
|
|
2385
|
+
logger.error(f"Error loading test cases: {stacktrace}")
|
|
2386
|
+
raise ToolException(f"Failed to load test cases from project: {stacktrace}") from e
|
|
2387
|
+
|
|
2388
|
+
def _create_test_case_document(self, item: dict) -> Document:
|
|
2389
|
+
"""Create a Document from a test case item with basic metadata for duplicate detection."""
|
|
2390
|
+
|
|
2391
|
+
# Extract basic identifiers
|
|
2392
|
+
test_case_id = item.get('pid', '')
|
|
2393
|
+
qtest_id = item.get('id', '')
|
|
2394
|
+
|
|
2395
|
+
# Get updated timestamp for duplicate detection
|
|
2396
|
+
# Try different timestamp fields
|
|
2397
|
+
updated_on = (
|
|
2398
|
+
item.get('last_modified_date') or
|
|
2399
|
+
item.get('updated_date') or
|
|
2400
|
+
item.get('created_date') or
|
|
2401
|
+
''
|
|
2402
|
+
)
|
|
2403
|
+
|
|
2404
|
+
# Get module/folder info
|
|
2405
|
+
parent_id = item.get('parent_id')
|
|
2406
|
+
module_name = self._get_module_name(parent_id) if parent_id else ''
|
|
2407
|
+
|
|
2408
|
+
# Build basic metadata for the document
|
|
2409
|
+
metadata = {
|
|
2410
|
+
'id': test_case_id,
|
|
2411
|
+
'qtest_id': qtest_id,
|
|
2412
|
+
'updated_on': updated_on,
|
|
2413
|
+
'name': item.get('name', ''),
|
|
2414
|
+
'parent_id': parent_id,
|
|
2415
|
+
'module_name': module_name,
|
|
2416
|
+
'project_id': self.qtest_project_id,
|
|
2417
|
+
'type': 'test_case',
|
|
2418
|
+
# Store full item for later processing in _extend_data
|
|
2419
|
+
'_raw_item': item,
|
|
2420
|
+
}
|
|
2421
|
+
|
|
2422
|
+
return Document(page_content="", metadata=metadata)
|
|
2423
|
+
|
|
2424
|
+
def _get_module_name(self, module_id: int) -> str:
|
|
2425
|
+
"""Get module name by ID from cached modules."""
|
|
2426
|
+
if self._modules_cache is None:
|
|
2427
|
+
self._parse_modules()
|
|
2428
|
+
|
|
2429
|
+
for module in self._modules_cache or []:
|
|
2430
|
+
if module.get('module_id') == module_id:
|
|
2431
|
+
return module.get('full_module_name', module.get('module_name', ''))
|
|
2432
|
+
return ''
|
|
2433
|
+
|
|
2434
|
+
def _extend_data(self, documents: Generator[Document, None, None]) -> Generator[Document, None, None]:
|
|
2435
|
+
"""
|
|
2436
|
+
Extend base documents with full content formatted as markdown.
|
|
2437
|
+
This is called after duplicate detection, so we only process documents that need indexing.
|
|
2438
|
+
"""
|
|
2439
|
+
|
|
2440
|
+
for document in documents:
|
|
2441
|
+
try:
|
|
2442
|
+
raw_item = document.metadata.pop('_raw_item', None)
|
|
2443
|
+
if not raw_item:
|
|
2444
|
+
yield document
|
|
2445
|
+
continue
|
|
2446
|
+
|
|
2447
|
+
# Build markdown content for the test case
|
|
2448
|
+
content = self._format_test_case_as_markdown(raw_item)
|
|
2449
|
+
|
|
2450
|
+
# Store content for chunking
|
|
2451
|
+
document.metadata[IndexerKeywords.CONTENT_IN_BYTES.value] = content.encode('utf-8')
|
|
2452
|
+
document.metadata[IndexerKeywords.CONTENT_FILE_NAME.value] = f"test_case{file_extension_by_chunker(self._chunking_tool)}"
|
|
2453
|
+
|
|
2454
|
+
# Add additional metadata from properties
|
|
2455
|
+
for prop in raw_item.get('properties', []):
|
|
2456
|
+
field_name = prop.get('field_name')
|
|
2457
|
+
if field_name and field_name not in document.metadata:
|
|
2458
|
+
document.metadata[field_name.lower().replace(' ', '_')] = self.__format_property_value(prop)
|
|
2459
|
+
|
|
2460
|
+
except Exception as e:
|
|
2461
|
+
logger.error(f"Failed to extend document {document.metadata.get('id')}: {e}")
|
|
2462
|
+
|
|
2463
|
+
yield document
|
|
2464
|
+
|
|
2465
|
+
def _format_test_case_as_markdown(self, item: dict) -> str:
|
|
2466
|
+
"""Format a test case as markdown for better semantic search."""
|
|
2467
|
+
|
|
2468
|
+
lines = []
|
|
2469
|
+
|
|
2470
|
+
# Header
|
|
2471
|
+
test_id = item.get('pid', 'Unknown')
|
|
2472
|
+
name = item.get('name', 'Untitled')
|
|
2473
|
+
lines.append(f"# Test Case: {test_id} - {name}")
|
|
2474
|
+
lines.append("")
|
|
2475
|
+
|
|
2476
|
+
# Module/Folder
|
|
2477
|
+
parent_id = item.get('parent_id')
|
|
2478
|
+
if parent_id:
|
|
2479
|
+
module_name = self._get_module_name(parent_id)
|
|
2480
|
+
if module_name:
|
|
2481
|
+
lines.append(f"## Module")
|
|
2482
|
+
lines.append(module_name)
|
|
2483
|
+
lines.append("")
|
|
2484
|
+
|
|
2485
|
+
# Description
|
|
2486
|
+
description = item.get('description', '')
|
|
2487
|
+
if description:
|
|
2488
|
+
description = self._clean_html_content(
|
|
2489
|
+
description,
|
|
2490
|
+
self._extract_images,
|
|
2491
|
+
self._image_prompt
|
|
2492
|
+
)
|
|
2493
|
+
lines.append("## Description")
|
|
2494
|
+
lines.append(description)
|
|
2495
|
+
lines.append("")
|
|
2496
|
+
|
|
2497
|
+
# Precondition
|
|
2498
|
+
precondition = item.get('precondition', '')
|
|
2499
|
+
if precondition:
|
|
2500
|
+
precondition = self._clean_html_content(
|
|
2501
|
+
precondition,
|
|
2502
|
+
self._extract_images,
|
|
2503
|
+
self._image_prompt
|
|
2504
|
+
)
|
|
2505
|
+
lines.append("## Precondition")
|
|
2506
|
+
lines.append(precondition)
|
|
2507
|
+
lines.append("")
|
|
2508
|
+
|
|
2509
|
+
# Properties (Status, Type, Priority, etc.)
|
|
2510
|
+
properties = item.get('properties', [])
|
|
2511
|
+
if properties:
|
|
2512
|
+
lines.append("## Properties")
|
|
2513
|
+
for prop in properties:
|
|
2514
|
+
field_name = prop.get('field_name', '')
|
|
2515
|
+
field_value = self.__format_property_value(prop)
|
|
2516
|
+
if field_name and field_value:
|
|
2517
|
+
if isinstance(field_value, list):
|
|
2518
|
+
field_value = ', '.join(str(v) for v in field_value)
|
|
2519
|
+
lines.append(f"- **{field_name}**: {field_value}")
|
|
2520
|
+
lines.append("")
|
|
2521
|
+
|
|
2522
|
+
# Test Steps
|
|
2523
|
+
test_steps = item.get('test_steps', [])
|
|
2524
|
+
if test_steps:
|
|
2525
|
+
lines.append("## Test Steps")
|
|
2526
|
+
lines.append("")
|
|
2527
|
+
|
|
2528
|
+
for idx, step in enumerate(test_steps, 1):
|
|
2529
|
+
step_desc = step.get('description', '')
|
|
2530
|
+
step_expected = step.get('expected', '')
|
|
2531
|
+
|
|
2532
|
+
# Clean HTML content (processes images first, then strips tags)
|
|
2533
|
+
step_desc = self._clean_html_content(
|
|
2534
|
+
step_desc,
|
|
2535
|
+
self._extract_images,
|
|
2536
|
+
self._image_prompt
|
|
2537
|
+
)
|
|
2538
|
+
step_expected = self._clean_html_content(
|
|
2539
|
+
step_expected,
|
|
2540
|
+
self._extract_images,
|
|
2541
|
+
self._image_prompt
|
|
2542
|
+
)
|
|
2543
|
+
|
|
2544
|
+
lines.append(f"### Step {idx}")
|
|
2545
|
+
if step_desc:
|
|
2546
|
+
lines.append(f"**Action:** {step_desc}")
|
|
2547
|
+
if step_expected:
|
|
2548
|
+
lines.append(f"**Expected Result:** {step_expected}")
|
|
2549
|
+
lines.append("")
|
|
2550
|
+
|
|
2551
|
+
return '\n'.join(lines)
|
|
2552
|
+
|
|
2553
|
+
def _process_document(self, base_document: Document) -> Generator[Document, None, None]:
|
|
2554
|
+
"""
|
|
2555
|
+
Process a base document to extract dependent documents (images).
|
|
2556
|
+
Currently yields nothing as image content is inline in the markdown.
|
|
2557
|
+
Can be extended to yield separate image documents if needed.
|
|
2558
|
+
"""
|
|
2559
|
+
# For now, images are processed inline in the markdown content.
|
|
2560
|
+
# If separate image documents are needed in the future, they can be yielded here.
|
|
2561
|
+
yield from ()
|