alita-sdk 0.3.257__py3-none-any.whl → 0.3.584__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of alita-sdk might be problematic. Click here for more details.
- alita_sdk/cli/__init__.py +10 -0
- alita_sdk/cli/__main__.py +17 -0
- alita_sdk/cli/agent/__init__.py +5 -0
- alita_sdk/cli/agent/default.py +258 -0
- alita_sdk/cli/agent_executor.py +155 -0
- alita_sdk/cli/agent_loader.py +215 -0
- alita_sdk/cli/agent_ui.py +228 -0
- alita_sdk/cli/agents.py +3794 -0
- alita_sdk/cli/callbacks.py +647 -0
- alita_sdk/cli/cli.py +168 -0
- alita_sdk/cli/config.py +306 -0
- alita_sdk/cli/context/__init__.py +30 -0
- alita_sdk/cli/context/cleanup.py +198 -0
- alita_sdk/cli/context/manager.py +731 -0
- alita_sdk/cli/context/message.py +285 -0
- alita_sdk/cli/context/strategies.py +289 -0
- alita_sdk/cli/context/token_estimation.py +127 -0
- alita_sdk/cli/formatting.py +182 -0
- alita_sdk/cli/input_handler.py +419 -0
- alita_sdk/cli/inventory.py +1073 -0
- alita_sdk/cli/mcp_loader.py +315 -0
- alita_sdk/cli/toolkit.py +327 -0
- alita_sdk/cli/toolkit_loader.py +85 -0
- alita_sdk/cli/tools/__init__.py +43 -0
- alita_sdk/cli/tools/approval.py +224 -0
- alita_sdk/cli/tools/filesystem.py +1751 -0
- alita_sdk/cli/tools/planning.py +389 -0
- alita_sdk/cli/tools/terminal.py +414 -0
- alita_sdk/community/__init__.py +72 -12
- alita_sdk/community/inventory/__init__.py +236 -0
- alita_sdk/community/inventory/config.py +257 -0
- alita_sdk/community/inventory/enrichment.py +2137 -0
- alita_sdk/community/inventory/extractors.py +1469 -0
- alita_sdk/community/inventory/ingestion.py +3172 -0
- alita_sdk/community/inventory/knowledge_graph.py +1457 -0
- alita_sdk/community/inventory/parsers/__init__.py +218 -0
- alita_sdk/community/inventory/parsers/base.py +295 -0
- alita_sdk/community/inventory/parsers/csharp_parser.py +907 -0
- alita_sdk/community/inventory/parsers/go_parser.py +851 -0
- alita_sdk/community/inventory/parsers/html_parser.py +389 -0
- alita_sdk/community/inventory/parsers/java_parser.py +593 -0
- alita_sdk/community/inventory/parsers/javascript_parser.py +629 -0
- alita_sdk/community/inventory/parsers/kotlin_parser.py +768 -0
- alita_sdk/community/inventory/parsers/markdown_parser.py +362 -0
- alita_sdk/community/inventory/parsers/python_parser.py +604 -0
- alita_sdk/community/inventory/parsers/rust_parser.py +858 -0
- alita_sdk/community/inventory/parsers/swift_parser.py +832 -0
- alita_sdk/community/inventory/parsers/text_parser.py +322 -0
- alita_sdk/community/inventory/parsers/yaml_parser.py +370 -0
- alita_sdk/community/inventory/patterns/__init__.py +61 -0
- alita_sdk/community/inventory/patterns/ast_adapter.py +380 -0
- alita_sdk/community/inventory/patterns/loader.py +348 -0
- alita_sdk/community/inventory/patterns/registry.py +198 -0
- alita_sdk/community/inventory/presets.py +535 -0
- alita_sdk/community/inventory/retrieval.py +1403 -0
- alita_sdk/community/inventory/toolkit.py +173 -0
- alita_sdk/community/inventory/toolkit_utils.py +176 -0
- alita_sdk/community/inventory/visualize.py +1370 -0
- alita_sdk/configurations/__init__.py +11 -0
- alita_sdk/configurations/ado.py +148 -2
- alita_sdk/configurations/azure_search.py +1 -1
- alita_sdk/configurations/bigquery.py +1 -1
- alita_sdk/configurations/bitbucket.py +94 -2
- alita_sdk/configurations/browser.py +18 -0
- alita_sdk/configurations/carrier.py +19 -0
- alita_sdk/configurations/confluence.py +130 -1
- alita_sdk/configurations/delta_lake.py +1 -1
- alita_sdk/configurations/figma.py +76 -5
- alita_sdk/configurations/github.py +65 -1
- alita_sdk/configurations/gitlab.py +81 -0
- alita_sdk/configurations/google_places.py +17 -0
- alita_sdk/configurations/jira.py +103 -0
- alita_sdk/configurations/openapi.py +323 -0
- alita_sdk/configurations/postman.py +1 -1
- alita_sdk/configurations/qtest.py +72 -3
- alita_sdk/configurations/report_portal.py +115 -0
- alita_sdk/configurations/salesforce.py +19 -0
- alita_sdk/configurations/service_now.py +1 -12
- alita_sdk/configurations/sharepoint.py +167 -0
- alita_sdk/configurations/sonar.py +18 -0
- alita_sdk/configurations/sql.py +20 -0
- alita_sdk/configurations/testio.py +101 -0
- alita_sdk/configurations/testrail.py +88 -0
- alita_sdk/configurations/xray.py +94 -1
- alita_sdk/configurations/zephyr_enterprise.py +94 -1
- alita_sdk/configurations/zephyr_essential.py +95 -0
- alita_sdk/runtime/clients/artifact.py +21 -4
- alita_sdk/runtime/clients/client.py +458 -67
- alita_sdk/runtime/clients/mcp_discovery.py +342 -0
- alita_sdk/runtime/clients/mcp_manager.py +262 -0
- alita_sdk/runtime/clients/sandbox_client.py +352 -0
- alita_sdk/runtime/langchain/_constants_bkup.py +1318 -0
- alita_sdk/runtime/langchain/assistant.py +183 -43
- alita_sdk/runtime/langchain/constants.py +647 -1
- alita_sdk/runtime/langchain/document_loaders/AlitaDocxMammothLoader.py +315 -3
- alita_sdk/runtime/langchain/document_loaders/AlitaExcelLoader.py +209 -31
- alita_sdk/runtime/langchain/document_loaders/AlitaImageLoader.py +1 -1
- alita_sdk/runtime/langchain/document_loaders/AlitaJSONLinesLoader.py +77 -0
- alita_sdk/runtime/langchain/document_loaders/AlitaJSONLoader.py +10 -3
- alita_sdk/runtime/langchain/document_loaders/AlitaMarkdownLoader.py +66 -0
- alita_sdk/runtime/langchain/document_loaders/AlitaPDFLoader.py +79 -10
- alita_sdk/runtime/langchain/document_loaders/AlitaPowerPointLoader.py +52 -15
- alita_sdk/runtime/langchain/document_loaders/AlitaPythonLoader.py +9 -0
- alita_sdk/runtime/langchain/document_loaders/AlitaTableLoader.py +1 -4
- alita_sdk/runtime/langchain/document_loaders/AlitaTextLoader.py +15 -2
- alita_sdk/runtime/langchain/document_loaders/ImageParser.py +30 -0
- alita_sdk/runtime/langchain/document_loaders/constants.py +189 -41
- alita_sdk/runtime/langchain/interfaces/llm_processor.py +4 -2
- alita_sdk/runtime/langchain/langraph_agent.py +493 -105
- alita_sdk/runtime/langchain/utils.py +118 -8
- alita_sdk/runtime/llms/preloaded.py +2 -6
- alita_sdk/runtime/models/mcp_models.py +61 -0
- alita_sdk/runtime/skills/__init__.py +91 -0
- alita_sdk/runtime/skills/callbacks.py +498 -0
- alita_sdk/runtime/skills/discovery.py +540 -0
- alita_sdk/runtime/skills/executor.py +610 -0
- alita_sdk/runtime/skills/input_builder.py +371 -0
- alita_sdk/runtime/skills/models.py +330 -0
- alita_sdk/runtime/skills/registry.py +355 -0
- alita_sdk/runtime/skills/skill_runner.py +330 -0
- alita_sdk/runtime/toolkits/__init__.py +28 -0
- alita_sdk/runtime/toolkits/application.py +14 -4
- alita_sdk/runtime/toolkits/artifact.py +25 -9
- alita_sdk/runtime/toolkits/datasource.py +13 -6
- alita_sdk/runtime/toolkits/mcp.py +782 -0
- alita_sdk/runtime/toolkits/planning.py +178 -0
- alita_sdk/runtime/toolkits/skill_router.py +238 -0
- alita_sdk/runtime/toolkits/subgraph.py +11 -6
- alita_sdk/runtime/toolkits/tools.py +314 -70
- alita_sdk/runtime/toolkits/vectorstore.py +11 -5
- alita_sdk/runtime/tools/__init__.py +24 -0
- alita_sdk/runtime/tools/application.py +16 -4
- alita_sdk/runtime/tools/artifact.py +367 -33
- alita_sdk/runtime/tools/data_analysis.py +183 -0
- alita_sdk/runtime/tools/function.py +100 -4
- alita_sdk/runtime/tools/graph.py +81 -0
- alita_sdk/runtime/tools/image_generation.py +218 -0
- alita_sdk/runtime/tools/llm.py +1032 -177
- alita_sdk/runtime/tools/loop.py +3 -1
- alita_sdk/runtime/tools/loop_output.py +3 -1
- alita_sdk/runtime/tools/mcp_inspect_tool.py +284 -0
- alita_sdk/runtime/tools/mcp_remote_tool.py +181 -0
- alita_sdk/runtime/tools/mcp_server_tool.py +3 -1
- alita_sdk/runtime/tools/planning/__init__.py +36 -0
- alita_sdk/runtime/tools/planning/models.py +246 -0
- alita_sdk/runtime/tools/planning/wrapper.py +607 -0
- alita_sdk/runtime/tools/router.py +2 -1
- alita_sdk/runtime/tools/sandbox.py +375 -0
- alita_sdk/runtime/tools/skill_router.py +776 -0
- alita_sdk/runtime/tools/tool.py +3 -1
- alita_sdk/runtime/tools/vectorstore.py +69 -65
- alita_sdk/runtime/tools/vectorstore_base.py +163 -90
- alita_sdk/runtime/utils/AlitaCallback.py +137 -21
- alita_sdk/runtime/utils/constants.py +5 -1
- alita_sdk/runtime/utils/mcp_client.py +492 -0
- alita_sdk/runtime/utils/mcp_oauth.py +361 -0
- alita_sdk/runtime/utils/mcp_sse_client.py +434 -0
- alita_sdk/runtime/utils/mcp_tools_discovery.py +124 -0
- alita_sdk/runtime/utils/streamlit.py +41 -14
- alita_sdk/runtime/utils/toolkit_utils.py +28 -9
- alita_sdk/runtime/utils/utils.py +48 -0
- alita_sdk/tools/__init__.py +135 -37
- alita_sdk/tools/ado/__init__.py +2 -2
- alita_sdk/tools/ado/repos/__init__.py +16 -19
- alita_sdk/tools/ado/repos/repos_wrapper.py +12 -20
- alita_sdk/tools/ado/test_plan/__init__.py +27 -8
- alita_sdk/tools/ado/test_plan/test_plan_wrapper.py +56 -28
- alita_sdk/tools/ado/wiki/__init__.py +28 -12
- alita_sdk/tools/ado/wiki/ado_wrapper.py +114 -40
- alita_sdk/tools/ado/work_item/__init__.py +28 -12
- alita_sdk/tools/ado/work_item/ado_wrapper.py +95 -11
- alita_sdk/tools/advanced_jira_mining/__init__.py +13 -8
- alita_sdk/tools/aws/delta_lake/__init__.py +15 -11
- alita_sdk/tools/aws/delta_lake/tool.py +5 -1
- alita_sdk/tools/azure_ai/search/__init__.py +14 -8
- alita_sdk/tools/base/tool.py +5 -1
- alita_sdk/tools/base_indexer_toolkit.py +454 -110
- alita_sdk/tools/bitbucket/__init__.py +28 -19
- alita_sdk/tools/bitbucket/api_wrapper.py +285 -27
- alita_sdk/tools/bitbucket/cloud_api_wrapper.py +5 -5
- alita_sdk/tools/browser/__init__.py +41 -16
- alita_sdk/tools/browser/crawler.py +3 -1
- alita_sdk/tools/browser/utils.py +15 -6
- alita_sdk/tools/carrier/__init__.py +18 -17
- alita_sdk/tools/carrier/backend_reports_tool.py +8 -4
- alita_sdk/tools/carrier/excel_reporter.py +8 -4
- alita_sdk/tools/chunkers/__init__.py +3 -1
- alita_sdk/tools/chunkers/code/codeparser.py +1 -1
- alita_sdk/tools/chunkers/sematic/json_chunker.py +2 -1
- alita_sdk/tools/chunkers/sematic/markdown_chunker.py +97 -6
- alita_sdk/tools/chunkers/sematic/proposal_chunker.py +1 -1
- alita_sdk/tools/chunkers/universal_chunker.py +270 -0
- alita_sdk/tools/cloud/aws/__init__.py +12 -7
- alita_sdk/tools/cloud/azure/__init__.py +12 -7
- alita_sdk/tools/cloud/gcp/__init__.py +12 -7
- alita_sdk/tools/cloud/k8s/__init__.py +12 -7
- alita_sdk/tools/code/linter/__init__.py +10 -8
- alita_sdk/tools/code/loaders/codesearcher.py +3 -2
- alita_sdk/tools/code/sonar/__init__.py +21 -13
- alita_sdk/tools/code_indexer_toolkit.py +199 -0
- alita_sdk/tools/confluence/__init__.py +22 -14
- alita_sdk/tools/confluence/api_wrapper.py +197 -58
- alita_sdk/tools/confluence/loader.py +14 -2
- alita_sdk/tools/custom_open_api/__init__.py +12 -5
- alita_sdk/tools/elastic/__init__.py +11 -8
- alita_sdk/tools/elitea_base.py +546 -64
- alita_sdk/tools/figma/__init__.py +60 -11
- alita_sdk/tools/figma/api_wrapper.py +1400 -167
- alita_sdk/tools/figma/figma_client.py +73 -0
- alita_sdk/tools/figma/toon_tools.py +2748 -0
- alita_sdk/tools/github/__init__.py +18 -17
- alita_sdk/tools/github/api_wrapper.py +9 -26
- alita_sdk/tools/github/github_client.py +81 -12
- alita_sdk/tools/github/schemas.py +2 -1
- alita_sdk/tools/github/tool.py +5 -1
- alita_sdk/tools/gitlab/__init__.py +19 -13
- alita_sdk/tools/gitlab/api_wrapper.py +256 -80
- alita_sdk/tools/gitlab_org/__init__.py +14 -10
- alita_sdk/tools/google/bigquery/__init__.py +14 -13
- alita_sdk/tools/google/bigquery/tool.py +5 -1
- alita_sdk/tools/google_places/__init__.py +21 -11
- alita_sdk/tools/jira/__init__.py +22 -11
- alita_sdk/tools/jira/api_wrapper.py +315 -168
- alita_sdk/tools/keycloak/__init__.py +11 -8
- alita_sdk/tools/localgit/__init__.py +9 -3
- alita_sdk/tools/localgit/local_git.py +62 -54
- alita_sdk/tools/localgit/tool.py +5 -1
- alita_sdk/tools/memory/__init__.py +38 -14
- alita_sdk/tools/non_code_indexer_toolkit.py +7 -2
- alita_sdk/tools/ocr/__init__.py +11 -8
- alita_sdk/tools/openapi/__init__.py +491 -106
- alita_sdk/tools/openapi/api_wrapper.py +1357 -0
- alita_sdk/tools/openapi/tool.py +20 -0
- alita_sdk/tools/pandas/__init__.py +20 -12
- alita_sdk/tools/pandas/api_wrapper.py +40 -45
- alita_sdk/tools/pandas/dataframe/generator/base.py +3 -1
- alita_sdk/tools/postman/__init__.py +11 -11
- alita_sdk/tools/postman/api_wrapper.py +19 -8
- alita_sdk/tools/postman/postman_analysis.py +8 -1
- alita_sdk/tools/pptx/__init__.py +11 -10
- alita_sdk/tools/qtest/__init__.py +22 -14
- alita_sdk/tools/qtest/api_wrapper.py +1784 -88
- alita_sdk/tools/rally/__init__.py +13 -10
- alita_sdk/tools/report_portal/__init__.py +23 -16
- alita_sdk/tools/salesforce/__init__.py +22 -16
- alita_sdk/tools/servicenow/__init__.py +21 -16
- alita_sdk/tools/servicenow/api_wrapper.py +1 -1
- alita_sdk/tools/sharepoint/__init__.py +17 -14
- alita_sdk/tools/sharepoint/api_wrapper.py +179 -39
- alita_sdk/tools/sharepoint/authorization_helper.py +191 -1
- alita_sdk/tools/sharepoint/utils.py +8 -2
- alita_sdk/tools/slack/__init__.py +13 -8
- alita_sdk/tools/sql/__init__.py +22 -19
- alita_sdk/tools/sql/api_wrapper.py +71 -23
- alita_sdk/tools/testio/__init__.py +21 -13
- alita_sdk/tools/testrail/__init__.py +13 -11
- alita_sdk/tools/testrail/api_wrapper.py +214 -46
- alita_sdk/tools/utils/__init__.py +28 -4
- alita_sdk/tools/utils/content_parser.py +241 -55
- alita_sdk/tools/utils/text_operations.py +254 -0
- alita_sdk/tools/vector_adapters/VectorStoreAdapter.py +83 -27
- alita_sdk/tools/xray/__init__.py +18 -14
- alita_sdk/tools/xray/api_wrapper.py +58 -113
- alita_sdk/tools/yagmail/__init__.py +9 -3
- alita_sdk/tools/zephyr/__init__.py +12 -7
- alita_sdk/tools/zephyr_enterprise/__init__.py +16 -9
- alita_sdk/tools/zephyr_enterprise/api_wrapper.py +30 -15
- alita_sdk/tools/zephyr_essential/__init__.py +16 -10
- alita_sdk/tools/zephyr_essential/api_wrapper.py +297 -54
- alita_sdk/tools/zephyr_essential/client.py +6 -4
- alita_sdk/tools/zephyr_scale/__init__.py +13 -8
- alita_sdk/tools/zephyr_scale/api_wrapper.py +39 -31
- alita_sdk/tools/zephyr_squad/__init__.py +12 -7
- {alita_sdk-0.3.257.dist-info → alita_sdk-0.3.584.dist-info}/METADATA +184 -37
- alita_sdk-0.3.584.dist-info/RECORD +452 -0
- alita_sdk-0.3.584.dist-info/entry_points.txt +2 -0
- alita_sdk/tools/bitbucket/tools.py +0 -304
- alita_sdk-0.3.257.dist-info/RECORD +0 -343
- {alita_sdk-0.3.257.dist-info → alita_sdk-0.3.584.dist-info}/WHEEL +0 -0
- {alita_sdk-0.3.257.dist-info → alita_sdk-0.3.584.dist-info}/licenses/LICENSE +0 -0
- {alita_sdk-0.3.257.dist-info → alita_sdk-0.3.584.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,1357 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import logging
|
|
3
|
+
import re
|
|
4
|
+
from urllib.parse import urlencode
|
|
5
|
+
from typing import Annotated, Any, Callable, Optional
|
|
6
|
+
import copy
|
|
7
|
+
|
|
8
|
+
import yaml
|
|
9
|
+
from langchain_core.tools import ToolException
|
|
10
|
+
from pydantic import BaseModel, BeforeValidator, ConfigDict, Field, PrivateAttr, create_model
|
|
11
|
+
from requests_openapi import Client, Operation
|
|
12
|
+
|
|
13
|
+
from ..elitea_base import BaseToolApiWrapper
|
|
14
|
+
from ..utils import clean_string
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def _coerce_empty_string_to_none(v: Any) -> Any:
|
|
18
|
+
"""Convert empty strings to None for optional fields.
|
|
19
|
+
|
|
20
|
+
This handles UI/pipeline inputs where empty fields are sent as '' instead of null.
|
|
21
|
+
"""
|
|
22
|
+
if v == '':
|
|
23
|
+
return None
|
|
24
|
+
return v
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def _coerce_headers_value(v: Any) -> Optional[dict]:
|
|
28
|
+
"""Convert headers value to dict, handling empty strings and JSON strings.
|
|
29
|
+
|
|
30
|
+
This handles UI/pipeline inputs where:
|
|
31
|
+
- Empty fields are sent as '' instead of null
|
|
32
|
+
- Dict values may be sent as JSON strings like '{}'
|
|
33
|
+
"""
|
|
34
|
+
if v is None or v == '':
|
|
35
|
+
return None
|
|
36
|
+
if isinstance(v, dict):
|
|
37
|
+
return v
|
|
38
|
+
if isinstance(v, str):
|
|
39
|
+
try:
|
|
40
|
+
parsed = json.loads(v)
|
|
41
|
+
if isinstance(parsed, dict):
|
|
42
|
+
return parsed
|
|
43
|
+
except json.JSONDecodeError:
|
|
44
|
+
# Intentionally ignore JSON decode errors - fall back to returning
|
|
45
|
+
# the original value which will be validated later in _execute
|
|
46
|
+
pass
|
|
47
|
+
# Return as-is, will be validated later in _execute
|
|
48
|
+
return v
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
logger = logging.getLogger(__name__)
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
# Base class for dynamically created parameter models
|
|
55
|
+
# Supports populate_by_name so both alias (original param name) and field name (sanitized) work
|
|
56
|
+
class _BaseParamsModel(BaseModel):
|
|
57
|
+
model_config = ConfigDict(populate_by_name=True)
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
def _sanitize_param_name(name: str) -> str:
|
|
61
|
+
"""Sanitize OpenAPI parameter names for use as Python/Pydantic identifiers.
|
|
62
|
+
|
|
63
|
+
Pydantic's create_model requires valid Python identifiers as field names.
|
|
64
|
+
This function handles:
|
|
65
|
+
- Dots in names (e.g., 'searchCriteria.minTime' -> 'searchCriteria_minTime')
|
|
66
|
+
- Dollar sign prefix (e.g., '$top' -> 'dollar_top')
|
|
67
|
+
- Other special characters
|
|
68
|
+
|
|
69
|
+
Returns the sanitized name suitable for use as a Pydantic field name.
|
|
70
|
+
"""
|
|
71
|
+
if not name:
|
|
72
|
+
return name
|
|
73
|
+
|
|
74
|
+
sanitized = name
|
|
75
|
+
# Replace dots with underscores
|
|
76
|
+
sanitized = sanitized.replace('.', '_')
|
|
77
|
+
# Handle $ prefix (common in OData APIs like Azure DevOps)
|
|
78
|
+
if sanitized.startswith('$'):
|
|
79
|
+
sanitized = 'dollar_' + sanitized[1:]
|
|
80
|
+
# Replace any remaining invalid characters with underscores
|
|
81
|
+
# Python identifiers: [a-zA-Z_][a-zA-Z0-9_]*
|
|
82
|
+
sanitized = re.sub(r'[^a-zA-Z0-9_]', '_', sanitized)
|
|
83
|
+
# Ensure it doesn't start with a digit
|
|
84
|
+
if sanitized and sanitized[0].isdigit():
|
|
85
|
+
sanitized = '_' + sanitized
|
|
86
|
+
|
|
87
|
+
return sanitized
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
def _raise_openapi_tool_exception(
|
|
91
|
+
*,
|
|
92
|
+
code: str,
|
|
93
|
+
message: str,
|
|
94
|
+
operation_id: Optional[str] = None,
|
|
95
|
+
url: Optional[str] = None,
|
|
96
|
+
retryable: Optional[bool] = None,
|
|
97
|
+
missing_inputs: Optional[list[str]] = None,
|
|
98
|
+
http_status: Optional[int] = None,
|
|
99
|
+
http_body_preview: Optional[str] = None,
|
|
100
|
+
details: Optional[dict[str, Any]] = None,
|
|
101
|
+
) -> None:
|
|
102
|
+
payload: dict[str, Any] = {
|
|
103
|
+
"tool": "openapi",
|
|
104
|
+
"code": code,
|
|
105
|
+
"message": message,
|
|
106
|
+
}
|
|
107
|
+
if operation_id:
|
|
108
|
+
payload["operation_id"] = operation_id
|
|
109
|
+
if url:
|
|
110
|
+
payload["url"] = url
|
|
111
|
+
if retryable is not None:
|
|
112
|
+
payload["retryable"] = bool(retryable)
|
|
113
|
+
if missing_inputs:
|
|
114
|
+
payload["missing_inputs"] = list(missing_inputs)
|
|
115
|
+
if http_status is not None:
|
|
116
|
+
payload["http_status"] = int(http_status)
|
|
117
|
+
if http_body_preview:
|
|
118
|
+
payload["http_body_preview"] = str(http_body_preview)
|
|
119
|
+
if details:
|
|
120
|
+
payload["details"] = details
|
|
121
|
+
|
|
122
|
+
try:
|
|
123
|
+
details_json = json.dumps(payload, ensure_ascii=False, indent=2)
|
|
124
|
+
except Exception:
|
|
125
|
+
details_json = str(payload)
|
|
126
|
+
|
|
127
|
+
raise ToolException(f"{message}\n\nToolError:\n{details_json}")
|
|
128
|
+
|
|
129
|
+
|
|
130
|
+
def _truncate(text: Any, max_len: int) -> str:
|
|
131
|
+
if text is None:
|
|
132
|
+
return ""
|
|
133
|
+
s = str(text)
|
|
134
|
+
if len(s) <= max_len:
|
|
135
|
+
return s
|
|
136
|
+
return s[:max_len] + "…"
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
def _is_retryable_http_status(status_code: Optional[int]) -> bool:
|
|
140
|
+
if status_code is None:
|
|
141
|
+
return False
|
|
142
|
+
return int(status_code) in (408, 425, 429, 500, 502, 503, 504)
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
def _resolve_server_variables(url: str, variables: Optional[dict]) -> tuple[str, list[str]]:
|
|
146
|
+
"""
|
|
147
|
+
Substitute server variables in URL with their default values.
|
|
148
|
+
|
|
149
|
+
Per OpenAPI 3.x spec, server URLs can contain variables like:
|
|
150
|
+
https://dev.azure.com/{organization}/{project}
|
|
151
|
+
|
|
152
|
+
The variables object provides default values:
|
|
153
|
+
{
|
|
154
|
+
"organization": {"default": "MyOrg"},
|
|
155
|
+
"project": {"default": "MyProject"}
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
Args:
|
|
159
|
+
url: Server URL potentially containing {variable} placeholders
|
|
160
|
+
variables: Dict of variable definitions with 'default' values
|
|
161
|
+
|
|
162
|
+
Returns:
|
|
163
|
+
Tuple of (resolved_url, list of variable names that could not be resolved)
|
|
164
|
+
"""
|
|
165
|
+
if not url:
|
|
166
|
+
return url, []
|
|
167
|
+
|
|
168
|
+
result = url
|
|
169
|
+
missing_defaults: list[str] = []
|
|
170
|
+
|
|
171
|
+
if variables and isinstance(variables, dict):
|
|
172
|
+
for var_name, var_def in variables.items():
|
|
173
|
+
placeholder = '{' + str(var_name) + '}'
|
|
174
|
+
if placeholder not in result:
|
|
175
|
+
continue # Variable not used in URL
|
|
176
|
+
|
|
177
|
+
if not isinstance(var_def, dict):
|
|
178
|
+
missing_defaults.append(var_name)
|
|
179
|
+
continue
|
|
180
|
+
|
|
181
|
+
default_value = var_def.get('default')
|
|
182
|
+
if default_value is not None:
|
|
183
|
+
result = result.replace(placeholder, str(default_value))
|
|
184
|
+
else:
|
|
185
|
+
# Variable defined but no default provided
|
|
186
|
+
missing_defaults.append(var_name)
|
|
187
|
+
|
|
188
|
+
# Check for any remaining {variable} placeholders that weren't in the variables dict
|
|
189
|
+
# This catches cases where the URL has placeholders but no variables definition
|
|
190
|
+
remaining_placeholders = re.findall(r'\{([^}]+)\}', result)
|
|
191
|
+
for placeholder_name in remaining_placeholders:
|
|
192
|
+
if placeholder_name not in missing_defaults:
|
|
193
|
+
missing_defaults.append(placeholder_name)
|
|
194
|
+
|
|
195
|
+
return result, missing_defaults
|
|
196
|
+
|
|
197
|
+
|
|
198
|
+
def _get_base_url_from_spec(spec: dict) -> str:
|
|
199
|
+
"""
|
|
200
|
+
Extract base URL from OpenAPI spec's servers array.
|
|
201
|
+
|
|
202
|
+
Handles server variables by substituting their default values.
|
|
203
|
+
For example:
|
|
204
|
+
url: "https://dev.azure.com/{organization}/{project}"
|
|
205
|
+
variables:
|
|
206
|
+
organization: {default: "MyOrg"}
|
|
207
|
+
project: {default: "MyProject"}
|
|
208
|
+
|
|
209
|
+
Returns: "https://dev.azure.com/MyOrg/MyProject"
|
|
210
|
+
|
|
211
|
+
Note: Unresolved variables are logged but not raised here - this function
|
|
212
|
+
is used for display/debugging purposes. The main validation happens in
|
|
213
|
+
_resolve_server_variables_in_spec() during initialization.
|
|
214
|
+
"""
|
|
215
|
+
servers = spec.get("servers") if isinstance(spec, dict) else None
|
|
216
|
+
if isinstance(servers, list) and servers:
|
|
217
|
+
first = servers[0]
|
|
218
|
+
if isinstance(first, dict) and isinstance(first.get("url"), str):
|
|
219
|
+
url = first["url"].strip()
|
|
220
|
+
variables = first.get("variables")
|
|
221
|
+
resolved_url, _ = _resolve_server_variables(url, variables)
|
|
222
|
+
return resolved_url
|
|
223
|
+
return ""
|
|
224
|
+
|
|
225
|
+
|
|
226
|
+
def _is_absolute_url(url: str) -> bool:
|
|
227
|
+
return isinstance(url, str) and (url.startswith("http://") or url.startswith("https://"))
|
|
228
|
+
|
|
229
|
+
|
|
230
|
+
def _resolve_server_variables_in_spec(spec: dict) -> tuple[dict, Optional[dict]]:
|
|
231
|
+
"""
|
|
232
|
+
Resolve server variables in the OpenAPI spec by substituting their default values.
|
|
233
|
+
|
|
234
|
+
This modifies the spec's servers[].url to replace {variable} placeholders with
|
|
235
|
+
the default values from servers[].variables. This is necessary because the
|
|
236
|
+
requests_openapi library doesn't handle server variables - it uses the raw URL.
|
|
237
|
+
|
|
238
|
+
Example transformation:
|
|
239
|
+
url: "https://dev.azure.com/{organization}/{project}"
|
|
240
|
+
variables:
|
|
241
|
+
organization: {default: "MyOrg"}
|
|
242
|
+
project: {default: "MyProject"}
|
|
243
|
+
|
|
244
|
+
Becomes:
|
|
245
|
+
url: "https://dev.azure.com/MyOrg/MyProject"
|
|
246
|
+
|
|
247
|
+
Args:
|
|
248
|
+
spec: OpenAPI specification dict
|
|
249
|
+
|
|
250
|
+
Returns:
|
|
251
|
+
Tuple of (spec, unresolved_info) where:
|
|
252
|
+
- spec: The same spec dict with server URLs resolved (modified in place)
|
|
253
|
+
- unresolved_info: None if all variables resolved, or dict with error details:
|
|
254
|
+
{
|
|
255
|
+
"url": original URL with placeholders,
|
|
256
|
+
"missing_vars": list of variable names without defaults,
|
|
257
|
+
"server_index": index of problematic server
|
|
258
|
+
}
|
|
259
|
+
"""
|
|
260
|
+
if not isinstance(spec, dict):
|
|
261
|
+
return spec, None
|
|
262
|
+
|
|
263
|
+
servers = spec.get("servers")
|
|
264
|
+
if not isinstance(servers, list):
|
|
265
|
+
return spec, None
|
|
266
|
+
|
|
267
|
+
for i, server in enumerate(servers):
|
|
268
|
+
if not isinstance(server, dict):
|
|
269
|
+
continue
|
|
270
|
+
url = server.get("url")
|
|
271
|
+
if not isinstance(url, str):
|
|
272
|
+
continue
|
|
273
|
+
variables = server.get("variables")
|
|
274
|
+
|
|
275
|
+
resolved_url, missing_vars = _resolve_server_variables(url, variables)
|
|
276
|
+
|
|
277
|
+
if missing_vars:
|
|
278
|
+
# Don't raise here - return the info so it can be raised at execution time
|
|
279
|
+
# This allows toolkit creation to succeed and tools to be listed
|
|
280
|
+
logger.warning(
|
|
281
|
+
f"Server URL '{url}' has variables without defaults: {missing_vars}. "
|
|
282
|
+
f"Tool execution will fail until defaults are provided in the spec."
|
|
283
|
+
)
|
|
284
|
+
return spec, {
|
|
285
|
+
"url": url,
|
|
286
|
+
"missing_vars": missing_vars,
|
|
287
|
+
"server_index": i,
|
|
288
|
+
}
|
|
289
|
+
|
|
290
|
+
if resolved_url != url:
|
|
291
|
+
server["url"] = resolved_url
|
|
292
|
+
logger.debug(f"Resolved server URL: '{url}' -> '{resolved_url}'")
|
|
293
|
+
|
|
294
|
+
return spec, None
|
|
295
|
+
|
|
296
|
+
|
|
297
|
+
def _apply_base_url_override(spec: dict, base_url_override: str) -> dict:
|
|
298
|
+
"""Normalize server URL when OpenAPI spec uses relative servers.
|
|
299
|
+
|
|
300
|
+
Some public specs (including Petstore) use relative server URLs like "/api/v3".
|
|
301
|
+
To execute requests against a real host, we can provide a base URL override like
|
|
302
|
+
"https://petstore3.swagger.io" and convert the first server URL to an absolute URL.
|
|
303
|
+
"""
|
|
304
|
+
if not isinstance(spec, dict):
|
|
305
|
+
return spec
|
|
306
|
+
if not isinstance(base_url_override, str) or not base_url_override.strip():
|
|
307
|
+
return spec
|
|
308
|
+
base_url_override = base_url_override.strip().rstrip("/")
|
|
309
|
+
|
|
310
|
+
servers = spec.get("servers")
|
|
311
|
+
if not isinstance(servers, list) or not servers:
|
|
312
|
+
spec["servers"] = [{"url": base_url_override}]
|
|
313
|
+
return spec
|
|
314
|
+
|
|
315
|
+
first = servers[0]
|
|
316
|
+
if not isinstance(first, dict):
|
|
317
|
+
return spec
|
|
318
|
+
server_url = first.get("url")
|
|
319
|
+
if not isinstance(server_url, str):
|
|
320
|
+
return spec
|
|
321
|
+
server_url = server_url.strip()
|
|
322
|
+
if not server_url:
|
|
323
|
+
first["url"] = base_url_override
|
|
324
|
+
return spec
|
|
325
|
+
if _is_absolute_url(server_url):
|
|
326
|
+
return spec
|
|
327
|
+
|
|
328
|
+
# Relative server URL ("/api/v3" or "api/v3") -> join with base host.
|
|
329
|
+
if not server_url.startswith("/"):
|
|
330
|
+
server_url = "/" + server_url
|
|
331
|
+
first["url"] = base_url_override + server_url
|
|
332
|
+
return spec
|
|
333
|
+
|
|
334
|
+
|
|
335
|
+
def _join_base_and_path(base_url: str, path: str) -> str:
|
|
336
|
+
base = (base_url or "").rstrip("/")
|
|
337
|
+
p = (path or "")
|
|
338
|
+
if not p.startswith("/"):
|
|
339
|
+
p = "/" + p
|
|
340
|
+
if not base:
|
|
341
|
+
return p
|
|
342
|
+
return base + p
|
|
343
|
+
|
|
344
|
+
|
|
345
|
+
# Maximum length for generated operationIds (tool names)
|
|
346
|
+
_MAX_OPERATION_ID_LENGTH = 64
|
|
347
|
+
|
|
348
|
+
# Map HTTP methods to semantic action names for better readability
|
|
349
|
+
_METHOD_TO_ACTION = {
|
|
350
|
+
'get': 'get',
|
|
351
|
+
'post': 'create',
|
|
352
|
+
'put': 'update',
|
|
353
|
+
'patch': 'update',
|
|
354
|
+
'delete': 'delete',
|
|
355
|
+
'head': 'head',
|
|
356
|
+
'options': 'options',
|
|
357
|
+
'trace': 'trace',
|
|
358
|
+
}
|
|
359
|
+
|
|
360
|
+
|
|
361
|
+
def _generate_operation_id(method: str, path: str) -> str:
|
|
362
|
+
"""
|
|
363
|
+
Generate an operationId from HTTP method and path when not provided in spec.
|
|
364
|
+
|
|
365
|
+
Follows a pattern that produces readable, unique identifiers:
|
|
366
|
+
- Format: {action}_{path_segments}
|
|
367
|
+
- HTTP methods are mapped to semantic actions (POST→create, PUT/PATCH→update)
|
|
368
|
+
- Path parameters ({id}) become "by_{param}"
|
|
369
|
+
- Segments are joined with underscores
|
|
370
|
+
- Result is snake_case
|
|
371
|
+
- Truncated to _MAX_OPERATION_ID_LENGTH characters
|
|
372
|
+
|
|
373
|
+
Examples:
|
|
374
|
+
GET /users -> get_users
|
|
375
|
+
GET /users/{id} -> get_users_by_id
|
|
376
|
+
POST /users -> create_users
|
|
377
|
+
PUT /users/{id} -> update_users_by_id
|
|
378
|
+
PATCH /users/{id} -> update_users_by_id
|
|
379
|
+
DELETE /api/v1/items/{itemId} -> delete_api_v1_items_by_itemId
|
|
380
|
+
|
|
381
|
+
Args:
|
|
382
|
+
method: HTTP method (GET, POST, etc.)
|
|
383
|
+
path: URL path (e.g., /users/{id})
|
|
384
|
+
|
|
385
|
+
Returns:
|
|
386
|
+
Generated operationId string
|
|
387
|
+
"""
|
|
388
|
+
# Map HTTP method to semantic action
|
|
389
|
+
action = _METHOD_TO_ACTION.get(method.lower(), method.lower())
|
|
390
|
+
|
|
391
|
+
# Split path and process segments
|
|
392
|
+
segments = [s for s in path.split('/') if s]
|
|
393
|
+
processed_segments = []
|
|
394
|
+
|
|
395
|
+
for segment in segments:
|
|
396
|
+
# Check if it's a path parameter like {id} or {userId}
|
|
397
|
+
if segment.startswith('{') and segment.endswith('}'):
|
|
398
|
+
param_name = segment[1:-1] # Remove braces
|
|
399
|
+
processed_segments.append(f'by_{param_name}')
|
|
400
|
+
else:
|
|
401
|
+
# Regular segment - keep as is (already suitable for identifier)
|
|
402
|
+
# Replace any non-alphanumeric chars with underscore
|
|
403
|
+
clean_segment = re.sub(r'[^a-zA-Z0-9]', '_', segment)
|
|
404
|
+
if clean_segment:
|
|
405
|
+
processed_segments.append(clean_segment)
|
|
406
|
+
|
|
407
|
+
# Join: action_segment1_segment2_...
|
|
408
|
+
if processed_segments:
|
|
409
|
+
operation_id = f"{action}_{'_'.join(processed_segments)}"
|
|
410
|
+
else:
|
|
411
|
+
# Edge case: root path "/"
|
|
412
|
+
operation_id = f"{action}_root"
|
|
413
|
+
|
|
414
|
+
# Ensure valid Python identifier (no leading digits)
|
|
415
|
+
if operation_id[0].isdigit():
|
|
416
|
+
operation_id = '_' + operation_id
|
|
417
|
+
|
|
418
|
+
# Truncate if too long
|
|
419
|
+
if len(operation_id) > _MAX_OPERATION_ID_LENGTH:
|
|
420
|
+
# Start with a hard cut at the max length
|
|
421
|
+
truncated = operation_id[:_MAX_OPERATION_ID_LENGTH]
|
|
422
|
+
# Prefer truncating at a word boundary (underscore) if possible
|
|
423
|
+
last_underscore = truncated.rfind('_')
|
|
424
|
+
if last_underscore > 0:
|
|
425
|
+
truncated = truncated[:last_underscore]
|
|
426
|
+
# Ensure we don't end with an underscore after truncation
|
|
427
|
+
truncated = truncated.rstrip('_')
|
|
428
|
+
operation_id = truncated
|
|
429
|
+
|
|
430
|
+
return operation_id
|
|
431
|
+
|
|
432
|
+
|
|
433
|
+
def _ensure_operation_ids(spec: dict) -> dict:
|
|
434
|
+
"""
|
|
435
|
+
Ensure all operations in the spec have operationIds.
|
|
436
|
+
|
|
437
|
+
For operations missing operationId, generates one from method+path.
|
|
438
|
+
Handles deduplication by appending _2, _3, etc. for collisions.
|
|
439
|
+
|
|
440
|
+
Args:
|
|
441
|
+
spec: Parsed OpenAPI specification dict
|
|
442
|
+
|
|
443
|
+
Returns:
|
|
444
|
+
The same spec dict with operationIds injected where missing
|
|
445
|
+
"""
|
|
446
|
+
paths = spec.get('paths')
|
|
447
|
+
if not isinstance(paths, dict):
|
|
448
|
+
return spec
|
|
449
|
+
|
|
450
|
+
# Track all operationIds (existing + generated) to handle collisions
|
|
451
|
+
used_operation_ids: set[str] = set()
|
|
452
|
+
|
|
453
|
+
# First pass: collect existing operationIds
|
|
454
|
+
for path, path_item in paths.items():
|
|
455
|
+
if not isinstance(path_item, dict):
|
|
456
|
+
continue
|
|
457
|
+
for method, operation in path_item.items():
|
|
458
|
+
if not isinstance(operation, dict):
|
|
459
|
+
continue
|
|
460
|
+
# Skip non-operation keys like 'parameters', 'summary', etc.
|
|
461
|
+
if method.lower() not in ('get', 'post', 'put', 'patch', 'delete', 'head', 'options', 'trace'):
|
|
462
|
+
continue
|
|
463
|
+
existing_id = operation.get('operationId')
|
|
464
|
+
if existing_id:
|
|
465
|
+
used_operation_ids.add(str(existing_id))
|
|
466
|
+
|
|
467
|
+
# Second pass: generate missing operationIds
|
|
468
|
+
for path, path_item in paths.items():
|
|
469
|
+
if not isinstance(path_item, dict):
|
|
470
|
+
continue
|
|
471
|
+
for method, operation in path_item.items():
|
|
472
|
+
if not isinstance(operation, dict):
|
|
473
|
+
continue
|
|
474
|
+
# Skip non-operation keys
|
|
475
|
+
if method.lower() not in ('get', 'post', 'put', 'patch', 'delete', 'head', 'options', 'trace'):
|
|
476
|
+
continue
|
|
477
|
+
|
|
478
|
+
if operation.get('operationId'):
|
|
479
|
+
continue # Already has operationId
|
|
480
|
+
|
|
481
|
+
# Generate operationId
|
|
482
|
+
base_id = _generate_operation_id(method, path)
|
|
483
|
+
|
|
484
|
+
# Handle collisions by appending suffix
|
|
485
|
+
final_id = base_id
|
|
486
|
+
counter = 2
|
|
487
|
+
while final_id in used_operation_ids:
|
|
488
|
+
suffix = f'_{counter}'
|
|
489
|
+
# Ensure we don't exceed max length with suffix
|
|
490
|
+
max_base_len = _MAX_OPERATION_ID_LENGTH - len(suffix)
|
|
491
|
+
truncated_base = base_id[:max_base_len]
|
|
492
|
+
final_id = f'{truncated_base}{suffix}'
|
|
493
|
+
counter += 1
|
|
494
|
+
|
|
495
|
+
operation['operationId'] = final_id
|
|
496
|
+
used_operation_ids.add(final_id)
|
|
497
|
+
logger.debug(f"Generated operationId '{final_id}' for {method.upper()} {path}")
|
|
498
|
+
|
|
499
|
+
return spec
|
|
500
|
+
|
|
501
|
+
|
|
502
|
+
def _parse_openapi_spec(spec: str | dict) -> dict:
|
|
503
|
+
if isinstance(spec, dict):
|
|
504
|
+
return spec
|
|
505
|
+
if not isinstance(spec, str) or not spec.strip():
|
|
506
|
+
_raise_openapi_tool_exception(code="missing_spec", message="OpenAPI spec is required")
|
|
507
|
+
|
|
508
|
+
try:
|
|
509
|
+
parsed = json.loads(spec)
|
|
510
|
+
except json.JSONDecodeError:
|
|
511
|
+
try:
|
|
512
|
+
parsed = yaml.safe_load(spec)
|
|
513
|
+
except yaml.YAMLError as e:
|
|
514
|
+
_raise_openapi_tool_exception(
|
|
515
|
+
code="invalid_spec",
|
|
516
|
+
message=f"Failed to parse OpenAPI spec as JSON or YAML: {e}",
|
|
517
|
+
details={"error": str(e)},
|
|
518
|
+
)
|
|
519
|
+
|
|
520
|
+
if not isinstance(parsed, dict):
|
|
521
|
+
_raise_openapi_tool_exception(code="invalid_spec", message="OpenAPI spec must parse to an object")
|
|
522
|
+
return parsed
|
|
523
|
+
|
|
524
|
+
|
|
525
|
+
def _guess_python_type(openapi_schema: dict | None) -> type:
|
|
526
|
+
schema_type = (openapi_schema or {}).get("type")
|
|
527
|
+
if schema_type == "integer":
|
|
528
|
+
return int
|
|
529
|
+
if schema_type == "number":
|
|
530
|
+
return float
|
|
531
|
+
if schema_type == "boolean":
|
|
532
|
+
return bool
|
|
533
|
+
# arrays/objects are left as string for now (simple start)
|
|
534
|
+
return str
|
|
535
|
+
|
|
536
|
+
|
|
537
|
+
def _schema_type_hint(openapi_schema: dict | None) -> str:
|
|
538
|
+
if not isinstance(openapi_schema, dict):
|
|
539
|
+
return ""
|
|
540
|
+
type_ = openapi_schema.get("type")
|
|
541
|
+
fmt = openapi_schema.get("format")
|
|
542
|
+
if not type_:
|
|
543
|
+
return ""
|
|
544
|
+
if fmt:
|
|
545
|
+
return f"{type_} ({fmt})"
|
|
546
|
+
return str(type_)
|
|
547
|
+
|
|
548
|
+
|
|
549
|
+
def _extract_request_body_example(spec: Optional[dict], op_raw: dict) -> Optional[str]:
|
|
550
|
+
request_body = op_raw.get("requestBody") or {}
|
|
551
|
+
content = request_body.get("content") or {}
|
|
552
|
+
for media_type in ("application/json", "application/*+json"):
|
|
553
|
+
mt = content.get(media_type)
|
|
554
|
+
if not isinstance(mt, dict):
|
|
555
|
+
continue
|
|
556
|
+
|
|
557
|
+
if "example" in mt:
|
|
558
|
+
try:
|
|
559
|
+
return json.dumps(mt["example"], indent=2)
|
|
560
|
+
except Exception:
|
|
561
|
+
return str(mt["example"])
|
|
562
|
+
|
|
563
|
+
examples = mt.get("examples")
|
|
564
|
+
if isinstance(examples, dict) and examples:
|
|
565
|
+
first = next(iter(examples.values()))
|
|
566
|
+
if isinstance(first, dict) and "value" in first:
|
|
567
|
+
try:
|
|
568
|
+
return json.dumps(first["value"], indent=2)
|
|
569
|
+
except Exception:
|
|
570
|
+
return str(first["value"])
|
|
571
|
+
|
|
572
|
+
schema = mt.get("schema")
|
|
573
|
+
if isinstance(schema, dict) and "example" in schema:
|
|
574
|
+
try:
|
|
575
|
+
return json.dumps(schema["example"], indent=2)
|
|
576
|
+
except Exception:
|
|
577
|
+
return str(schema["example"])
|
|
578
|
+
|
|
579
|
+
# No explicit example found; fall back to schema-based template.
|
|
580
|
+
if isinstance(schema, dict):
|
|
581
|
+
template_obj = _schema_to_template_json(
|
|
582
|
+
spec=spec,
|
|
583
|
+
schema=schema,
|
|
584
|
+
max_depth=3,
|
|
585
|
+
max_properties=20,
|
|
586
|
+
)
|
|
587
|
+
if template_obj is not None:
|
|
588
|
+
try:
|
|
589
|
+
return json.dumps(template_obj, indent=2)
|
|
590
|
+
except Exception:
|
|
591
|
+
return str(template_obj)
|
|
592
|
+
return None
|
|
593
|
+
|
|
594
|
+
|
|
595
|
+
def _schema_to_template_json(
|
|
596
|
+
spec: Any,
|
|
597
|
+
schema: dict,
|
|
598
|
+
max_depth: int,
|
|
599
|
+
max_properties: int,
|
|
600
|
+
) -> Any:
|
|
601
|
+
"""Build a schema-shaped JSON template from an OpenAPI/JSONSchema fragment.
|
|
602
|
+
|
|
603
|
+
This is a best-effort helper intended for LLM prompting. It avoids infinite recursion
|
|
604
|
+
(via depth and $ref cycle checks) and prefers enum/default/example when available.
|
|
605
|
+
"""
|
|
606
|
+
visited_refs: set[str] = set()
|
|
607
|
+
return _schema_node_to_value(
|
|
608
|
+
spec=spec if isinstance(spec, dict) else None,
|
|
609
|
+
node=schema,
|
|
610
|
+
depth=0,
|
|
611
|
+
max_depth=max_depth,
|
|
612
|
+
max_properties=max_properties,
|
|
613
|
+
visited_refs=visited_refs,
|
|
614
|
+
)
|
|
615
|
+
|
|
616
|
+
|
|
617
|
+
def _schema_node_to_value(
|
|
618
|
+
spec: Optional[dict],
|
|
619
|
+
node: Any,
|
|
620
|
+
depth: int,
|
|
621
|
+
max_depth: int,
|
|
622
|
+
max_properties: int,
|
|
623
|
+
visited_refs: set[str],
|
|
624
|
+
) -> Any:
|
|
625
|
+
if depth > max_depth:
|
|
626
|
+
return "<...>"
|
|
627
|
+
|
|
628
|
+
if not isinstance(node, dict):
|
|
629
|
+
return "<value>"
|
|
630
|
+
|
|
631
|
+
# Prefer explicit example/default/enum at this node.
|
|
632
|
+
if "example" in node:
|
|
633
|
+
return node.get("example")
|
|
634
|
+
if "default" in node:
|
|
635
|
+
return node.get("default")
|
|
636
|
+
if isinstance(node.get("enum"), list) and node.get("enum"):
|
|
637
|
+
return node.get("enum")[0]
|
|
638
|
+
|
|
639
|
+
ref = node.get("$ref")
|
|
640
|
+
if isinstance(ref, str):
|
|
641
|
+
if ref in visited_refs:
|
|
642
|
+
return "<ref-cycle>"
|
|
643
|
+
visited_refs.add(ref)
|
|
644
|
+
resolved = _resolve_ref(spec, ref)
|
|
645
|
+
if resolved is None:
|
|
646
|
+
return "<ref>"
|
|
647
|
+
return _schema_node_to_value(
|
|
648
|
+
spec=spec,
|
|
649
|
+
node=resolved,
|
|
650
|
+
depth=depth + 1,
|
|
651
|
+
max_depth=max_depth,
|
|
652
|
+
max_properties=max_properties,
|
|
653
|
+
visited_refs=visited_refs,
|
|
654
|
+
)
|
|
655
|
+
|
|
656
|
+
# Combinators
|
|
657
|
+
for key in ("oneOf", "anyOf"):
|
|
658
|
+
if isinstance(node.get(key), list) and node.get(key):
|
|
659
|
+
return _schema_node_to_value(
|
|
660
|
+
spec=spec,
|
|
661
|
+
node=node.get(key)[0],
|
|
662
|
+
depth=depth + 1,
|
|
663
|
+
max_depth=max_depth,
|
|
664
|
+
max_properties=max_properties,
|
|
665
|
+
visited_refs=visited_refs,
|
|
666
|
+
)
|
|
667
|
+
|
|
668
|
+
if isinstance(node.get("allOf"), list) and node.get("allOf"):
|
|
669
|
+
# Best-effort merge for objects.
|
|
670
|
+
merged: dict = {"type": "object", "properties": {}, "required": []}
|
|
671
|
+
for part in node.get("allOf"):
|
|
672
|
+
part_resolved = _schema_node_to_value(
|
|
673
|
+
spec=spec,
|
|
674
|
+
node=part,
|
|
675
|
+
depth=depth + 1,
|
|
676
|
+
max_depth=max_depth,
|
|
677
|
+
max_properties=max_properties,
|
|
678
|
+
visited_refs=visited_refs,
|
|
679
|
+
)
|
|
680
|
+
# If a part produced an object template, merge keys.
|
|
681
|
+
if isinstance(part_resolved, dict):
|
|
682
|
+
for k, v in part_resolved.items():
|
|
683
|
+
merged.setdefault(k, v)
|
|
684
|
+
return merged
|
|
685
|
+
|
|
686
|
+
type_ = node.get("type")
|
|
687
|
+
fmt = node.get("format")
|
|
688
|
+
|
|
689
|
+
if type_ == "object" or (type_ is None and ("properties" in node or "additionalProperties" in node)):
|
|
690
|
+
props = node.get("properties") if isinstance(node.get("properties"), dict) else {}
|
|
691
|
+
required = node.get("required") if isinstance(node.get("required"), list) else []
|
|
692
|
+
|
|
693
|
+
out: dict[str, Any] = {}
|
|
694
|
+
# Prefer required fields, then a small subset of optional fields for guidance.
|
|
695
|
+
keys: list[str] = []
|
|
696
|
+
for k in required:
|
|
697
|
+
if isinstance(k, str) and k in props:
|
|
698
|
+
keys.append(k)
|
|
699
|
+
if not keys:
|
|
700
|
+
keys = list(props.keys())[: min(3, len(props))]
|
|
701
|
+
else:
|
|
702
|
+
optional = [k for k in props.keys() if k not in keys]
|
|
703
|
+
keys.extend(optional[: max(0, min(3, len(optional)))])
|
|
704
|
+
|
|
705
|
+
keys = keys[:max_properties]
|
|
706
|
+
for k in keys:
|
|
707
|
+
out[k] = _schema_node_to_value(
|
|
708
|
+
spec=spec,
|
|
709
|
+
node=props.get(k),
|
|
710
|
+
depth=depth + 1,
|
|
711
|
+
max_depth=max_depth,
|
|
712
|
+
max_properties=max_properties,
|
|
713
|
+
visited_refs=visited_refs,
|
|
714
|
+
)
|
|
715
|
+
return out
|
|
716
|
+
|
|
717
|
+
if type_ == "array":
|
|
718
|
+
items = node.get("items")
|
|
719
|
+
return [
|
|
720
|
+
_schema_node_to_value(
|
|
721
|
+
spec=spec,
|
|
722
|
+
node=items,
|
|
723
|
+
depth=depth + 1,
|
|
724
|
+
max_depth=max_depth,
|
|
725
|
+
max_properties=max_properties,
|
|
726
|
+
visited_refs=visited_refs,
|
|
727
|
+
)
|
|
728
|
+
]
|
|
729
|
+
|
|
730
|
+
if type_ == "integer":
|
|
731
|
+
return 0
|
|
732
|
+
if type_ == "number":
|
|
733
|
+
return 0.0
|
|
734
|
+
if type_ == "boolean":
|
|
735
|
+
return False
|
|
736
|
+
if type_ == "string":
|
|
737
|
+
if fmt == "date-time":
|
|
738
|
+
return "2025-01-01T00:00:00Z"
|
|
739
|
+
if fmt == "date":
|
|
740
|
+
return "2025-01-01"
|
|
741
|
+
if fmt == "uuid":
|
|
742
|
+
return "00000000-0000-0000-0000-000000000000"
|
|
743
|
+
return "<string>"
|
|
744
|
+
|
|
745
|
+
# Unknown: return a placeholder
|
|
746
|
+
return "<value>"
|
|
747
|
+
|
|
748
|
+
|
|
749
|
+
def _resolve_ref(spec: Optional[dict], ref: str) -> Optional[dict]:
|
|
750
|
+
if not spec or not isinstance(ref, str):
|
|
751
|
+
return None
|
|
752
|
+
if not ref.startswith("#/"):
|
|
753
|
+
return None
|
|
754
|
+
# Only local refs supported for now.
|
|
755
|
+
parts = ref.lstrip("#/").split("/")
|
|
756
|
+
cur: Any = spec
|
|
757
|
+
for part in parts:
|
|
758
|
+
if not isinstance(cur, dict):
|
|
759
|
+
return None
|
|
760
|
+
cur = cur.get(part)
|
|
761
|
+
if isinstance(cur, dict):
|
|
762
|
+
return cur
|
|
763
|
+
return None
|
|
764
|
+
|
|
765
|
+
|
|
766
|
+
def _normalize_output(value: Any) -> str:
|
|
767
|
+
if value is None:
|
|
768
|
+
return ""
|
|
769
|
+
if isinstance(value, bytes):
|
|
770
|
+
try:
|
|
771
|
+
return value.decode("utf-8")
|
|
772
|
+
except Exception:
|
|
773
|
+
return value.decode("utf-8", errors="replace")
|
|
774
|
+
return str(value)
|
|
775
|
+
|
|
776
|
+
|
|
777
|
+
class OpenApiApiWrapper(BaseToolApiWrapper):
|
|
778
|
+
"""Builds callable tool functions for OpenAPI operations and executes them."""
|
|
779
|
+
|
|
780
|
+
spec: dict = Field(description="Parsed OpenAPI spec")
|
|
781
|
+
base_headers: dict[str, str] = Field(default_factory=dict)
|
|
782
|
+
|
|
783
|
+
_client: Client = PrivateAttr()
|
|
784
|
+
_op_meta: dict[str, dict] = PrivateAttr(default_factory=dict)
|
|
785
|
+
_tool_defs: list[dict[str, Any]] = PrivateAttr(default_factory=list)
|
|
786
|
+
_tool_ref_by_name: dict[str, Callable[..., str]] = PrivateAttr(default_factory=dict)
|
|
787
|
+
# Mapping: operation_id -> {sanitized_field_name: original_param_name}
|
|
788
|
+
# Needed because LangChain passes kwargs using Pydantic field names (sanitized),
|
|
789
|
+
# but the API expects original parameter names
|
|
790
|
+
_param_name_mapping: dict[str, dict[str, str]] = PrivateAttr(default_factory=dict)
|
|
791
|
+
# Stores unresolved server variable info for deferred error reporting
|
|
792
|
+
# If set, any tool execution will fail with a helpful error message
|
|
793
|
+
_unresolved_server_vars: Optional[dict] = PrivateAttr(default=None)
|
|
794
|
+
|
|
795
|
+
def model_post_init(self, __context: Any) -> None:
|
|
796
|
+
# Resolve server variables in spec URLs before loading
|
|
797
|
+
# This handles specs like Azure DevOps that use {organization}/{project} placeholders
|
|
798
|
+
# If variables can't be resolved, we store the info and defer the error to execution time
|
|
799
|
+
_, self._unresolved_server_vars = _resolve_server_variables_in_spec(self.spec)
|
|
800
|
+
|
|
801
|
+
# Build meta from raw spec (method/path/examples)
|
|
802
|
+
op_meta: dict[str, dict] = {}
|
|
803
|
+
paths = self.spec.get("paths") or {}
|
|
804
|
+
if isinstance(paths, dict):
|
|
805
|
+
for path, path_item in paths.items():
|
|
806
|
+
if not isinstance(path_item, dict):
|
|
807
|
+
continue
|
|
808
|
+
for method, op_raw in path_item.items():
|
|
809
|
+
if not isinstance(op_raw, dict):
|
|
810
|
+
continue
|
|
811
|
+
operation_id = op_raw.get("operationId")
|
|
812
|
+
if not operation_id:
|
|
813
|
+
continue
|
|
814
|
+
op_meta[str(operation_id)] = {
|
|
815
|
+
"method": str(method).upper(),
|
|
816
|
+
"path": str(path),
|
|
817
|
+
"raw": op_raw,
|
|
818
|
+
}
|
|
819
|
+
|
|
820
|
+
client = Client()
|
|
821
|
+
client.load_spec(self.spec)
|
|
822
|
+
if self.base_headers:
|
|
823
|
+
client.requestor.headers.update({str(k): str(v) for k, v in self.base_headers.items()})
|
|
824
|
+
|
|
825
|
+
self._client = client
|
|
826
|
+
self._op_meta = op_meta
|
|
827
|
+
|
|
828
|
+
# Build tool definitions once.
|
|
829
|
+
self._tool_defs = self._build_tool_defs()
|
|
830
|
+
self._tool_ref_by_name = {t["name"]: t["ref"] for t in self._tool_defs if "ref" in t}
|
|
831
|
+
|
|
832
|
+
def _build_tool_defs(self) -> list[dict[str, Any]]:
|
|
833
|
+
tool_defs: list[dict[str, Any]] = []
|
|
834
|
+
for operation_id, op in getattr(self._client, "operations", {}).items():
|
|
835
|
+
if not isinstance(op, Operation):
|
|
836
|
+
continue
|
|
837
|
+
op_id = str(operation_id)
|
|
838
|
+
meta = self._op_meta.get(op_id, {})
|
|
839
|
+
op_raw = meta.get("raw") if isinstance(meta.get("raw"), dict) else {}
|
|
840
|
+
|
|
841
|
+
method = meta.get("method")
|
|
842
|
+
path = meta.get("path")
|
|
843
|
+
|
|
844
|
+
title_line = ""
|
|
845
|
+
if method and path:
|
|
846
|
+
title_line = f"{method} {path}"
|
|
847
|
+
|
|
848
|
+
summary = op.spec.summary or ""
|
|
849
|
+
description = op.spec.description or ""
|
|
850
|
+
tool_desc_parts = [p for p in [title_line, summary, description] if p]
|
|
851
|
+
|
|
852
|
+
has_request_body = bool(op.spec.requestBody)
|
|
853
|
+
usage_lines: list[str] = ["How to call:"]
|
|
854
|
+
usage_lines.append("- Provide path/query parameters as named arguments.")
|
|
855
|
+
if has_request_body:
|
|
856
|
+
usage_lines.append("- For JSON request bodies, pass `body_json` as a JSON string.")
|
|
857
|
+
usage_lines.append(
|
|
858
|
+
"- Use `headers` only for per-call extra headers; base/toolkit headers (including auth) are already applied."
|
|
859
|
+
)
|
|
860
|
+
tool_desc_parts.append("\n".join(usage_lines))
|
|
861
|
+
|
|
862
|
+
args_schema = self._create_args_schema(op_id, op, op_raw)
|
|
863
|
+
ref = self._make_operation_callable(op_id)
|
|
864
|
+
|
|
865
|
+
tool_defs.append(
|
|
866
|
+
{
|
|
867
|
+
"name": op_id,
|
|
868
|
+
"description": "\n".join(tool_desc_parts).strip(),
|
|
869
|
+
"args_schema": args_schema,
|
|
870
|
+
"ref": ref,
|
|
871
|
+
}
|
|
872
|
+
)
|
|
873
|
+
return tool_defs
|
|
874
|
+
|
|
875
|
+
def _make_operation_callable(self, operation_id: str) -> Callable[..., str]:
|
|
876
|
+
def _call_operation(*args: Any, **kwargs: Any) -> str:
|
|
877
|
+
return self._execute(operation_id, *args, **kwargs)
|
|
878
|
+
|
|
879
|
+
return _call_operation
|
|
880
|
+
|
|
881
|
+
def _create_args_schema(self, operation_id: str, op: Operation, op_raw: dict) -> type[BaseModel]:
|
|
882
|
+
fields: dict[str, tuple[Any, Any]] = {}
|
|
883
|
+
# Track sanitized -> original name mapping for this operation
|
|
884
|
+
name_mapping: dict[str, str] = {}
|
|
885
|
+
|
|
886
|
+
# Parameters
|
|
887
|
+
raw_params = op_raw.get("parameters") or []
|
|
888
|
+
raw_param_map: dict[tuple[str, str], dict] = {}
|
|
889
|
+
if isinstance(raw_params, list):
|
|
890
|
+
for p in raw_params:
|
|
891
|
+
if isinstance(p, dict) and p.get("name") and p.get("in"):
|
|
892
|
+
raw_param_map[(str(p.get("name")), str(p.get("in")))] = p
|
|
893
|
+
|
|
894
|
+
for param in op.spec.parameters or []:
|
|
895
|
+
param_name = str(param.name)
|
|
896
|
+
# Sanitize parameter name for Pydantic field (handles dots, $ prefix, etc.)
|
|
897
|
+
sanitized_name = _sanitize_param_name(param_name)
|
|
898
|
+
# Track if we need alias (original name differs from sanitized)
|
|
899
|
+
needs_alias = sanitized_name != param_name
|
|
900
|
+
if needs_alias:
|
|
901
|
+
# Store mapping for restoring original names in _execute
|
|
902
|
+
name_mapping[sanitized_name] = param_name
|
|
903
|
+
logger.debug(
|
|
904
|
+
f"Using alias for parameter '{param_name}' (field name: '{sanitized_name}') for operation '{operation_id}'")
|
|
905
|
+
|
|
906
|
+
param_in_obj = getattr(param, "param_in", None)
|
|
907
|
+
# requests_openapi uses an enum-like value for `param_in`.
|
|
908
|
+
# For prompt quality and stable matching against raw spec, normalize to e.g. "query".
|
|
909
|
+
if hasattr(param_in_obj, "value"):
|
|
910
|
+
param_in = str(getattr(param_in_obj, "value"))
|
|
911
|
+
else:
|
|
912
|
+
param_in = str(param_in_obj)
|
|
913
|
+
raw_param = raw_param_map.get((param_name, param_in), {})
|
|
914
|
+
|
|
915
|
+
required = bool(raw_param.get("required", False))
|
|
916
|
+
schema = raw_param.get("schema") if isinstance(raw_param.get("schema"), dict) else None
|
|
917
|
+
py_type = _guess_python_type(schema)
|
|
918
|
+
|
|
919
|
+
example = raw_param.get("example")
|
|
920
|
+
if example is None and isinstance(schema, dict):
|
|
921
|
+
example = schema.get("example")
|
|
922
|
+
|
|
923
|
+
default = getattr(param.param_schema, "default", None)
|
|
924
|
+
# Build description
|
|
925
|
+
desc = (param.description or "").strip()
|
|
926
|
+
desc = f"({param_in}) {desc}".strip()
|
|
927
|
+
type_hint = _schema_type_hint(schema)
|
|
928
|
+
if type_hint:
|
|
929
|
+
desc = f"{desc}\nType: {type_hint}".strip()
|
|
930
|
+
if required:
|
|
931
|
+
desc = f"{desc}\nRequired: true".strip()
|
|
932
|
+
if example is not None:
|
|
933
|
+
desc = f"{desc}\nExample: {example}".strip()
|
|
934
|
+
if default is not None:
|
|
935
|
+
desc = f"{desc}\nDefault: {default}".strip()
|
|
936
|
+
|
|
937
|
+
# Build Field kwargs - use alias if name was sanitized so schema shows original name
|
|
938
|
+
field_kwargs = {"description": desc}
|
|
939
|
+
if needs_alias:
|
|
940
|
+
# Use alias so JSON schema shows original param name (e.g., "$top", "searchCriteria.status")
|
|
941
|
+
# and Pydantic accepts input using original name
|
|
942
|
+
field_kwargs["alias"] = param_name
|
|
943
|
+
|
|
944
|
+
# Required fields have no default. Use sanitized name for field.
|
|
945
|
+
if required:
|
|
946
|
+
fields[sanitized_name] = (py_type, Field(**field_kwargs))
|
|
947
|
+
else:
|
|
948
|
+
field_kwargs["default"] = default
|
|
949
|
+
fields[sanitized_name] = (Optional[py_type], Field(**field_kwargs))
|
|
950
|
+
|
|
951
|
+
# Additional headers not modeled in spec
|
|
952
|
+
# Use Annotated with BeforeValidator to coerce empty strings and JSON strings to dict
|
|
953
|
+
fields["headers"] = (
|
|
954
|
+
Annotated[Optional[dict], BeforeValidator(_coerce_headers_value)],
|
|
955
|
+
Field(
|
|
956
|
+
default_factory=dict,
|
|
957
|
+
description=(
|
|
958
|
+
"Additional HTTP headers to include in this request. "
|
|
959
|
+
"These are merged with the toolkit/base headers (including auth headers). "
|
|
960
|
+
"Only add headers if the API requires them. "
|
|
961
|
+
"Provide a JSON object/dict. Example: {\"X-Trace-Id\": \"123\"}"
|
|
962
|
+
),
|
|
963
|
+
),
|
|
964
|
+
)
|
|
965
|
+
|
|
966
|
+
# Request body
|
|
967
|
+
request_body = op_raw.get("requestBody") if isinstance(op_raw.get("requestBody"), dict) else None
|
|
968
|
+
body_required = bool((request_body or {}).get("required", False))
|
|
969
|
+
body_example = _extract_request_body_example(self.spec, op_raw)
|
|
970
|
+
body_desc = (
|
|
971
|
+
"Request body (JSON) as a string. The tool will parse it with json.loads and send as the request JSON body."
|
|
972
|
+
)
|
|
973
|
+
if body_example:
|
|
974
|
+
body_desc = f"{body_desc}\nExample JSON:\n{body_example}"
|
|
975
|
+
if op.spec.requestBody:
|
|
976
|
+
if body_required:
|
|
977
|
+
fields["body_json"] = (str, Field(description=body_desc))
|
|
978
|
+
else:
|
|
979
|
+
# Use BeforeValidator to coerce empty strings to None for optional body_json
|
|
980
|
+
fields["body_json"] = (
|
|
981
|
+
Annotated[Optional[str], BeforeValidator(_coerce_empty_string_to_none)],
|
|
982
|
+
Field(default=None, description=body_desc),
|
|
983
|
+
)
|
|
984
|
+
|
|
985
|
+
model_name = f"OpenApi_{clean_string(operation_id, max_length=40) or 'Operation'}_Params"
|
|
986
|
+
|
|
987
|
+
# Store the mapping for this operation (needed to restore original names in _execute)
|
|
988
|
+
if name_mapping:
|
|
989
|
+
self._param_name_mapping[operation_id] = name_mapping
|
|
990
|
+
|
|
991
|
+
return create_model(
|
|
992
|
+
model_name,
|
|
993
|
+
__base__=_BaseParamsModel,
|
|
994
|
+
# Use BeforeValidator to coerce empty strings to None for optional regexp
|
|
995
|
+
regexp=(
|
|
996
|
+
Annotated[Optional[str], BeforeValidator(_coerce_empty_string_to_none)],
|
|
997
|
+
Field(
|
|
998
|
+
description="Regular expression to remove from the final output (optional)",
|
|
999
|
+
default=None,
|
|
1000
|
+
),
|
|
1001
|
+
),
|
|
1002
|
+
**fields,
|
|
1003
|
+
)
|
|
1004
|
+
|
|
1005
|
+
def get_available_tools(self, selected_tools: Optional[list[str]] = None) -> list[dict[str, Any]]:
|
|
1006
|
+
if not selected_tools:
|
|
1007
|
+
return list(self._tool_defs)
|
|
1008
|
+
selected_set = {t for t in selected_tools if isinstance(t, str) and t}
|
|
1009
|
+
return [t for t in self._tool_defs if t.get("name") in selected_set]
|
|
1010
|
+
|
|
1011
|
+
def run(self, mode: str, *args: Any, **kwargs: Any) -> str:
|
|
1012
|
+
try:
|
|
1013
|
+
ref = self._tool_ref_by_name[mode]
|
|
1014
|
+
except KeyError:
|
|
1015
|
+
_raise_openapi_tool_exception(
|
|
1016
|
+
code="unknown_operation",
|
|
1017
|
+
message=f"Unknown operation: {mode}",
|
|
1018
|
+
details={"known_operations": sorted(list(self._tool_ref_by_name.keys()))[:200]},
|
|
1019
|
+
)
|
|
1020
|
+
return ref(*args, **kwargs)
|
|
1021
|
+
|
|
1022
|
+
def _get_required_inputs_from_raw_spec(self, operation_id: str) -> dict[str, Any]:
|
|
1023
|
+
meta = self._op_meta.get(str(operation_id), {})
|
|
1024
|
+
op_raw = meta.get("raw") if isinstance(meta, dict) and isinstance(meta.get("raw"), dict) else {}
|
|
1025
|
+
|
|
1026
|
+
required_path: list[str] = []
|
|
1027
|
+
required_query: list[str] = []
|
|
1028
|
+
raw_params = op_raw.get("parameters")
|
|
1029
|
+
if isinstance(raw_params, list):
|
|
1030
|
+
for p in raw_params:
|
|
1031
|
+
if not isinstance(p, dict):
|
|
1032
|
+
continue
|
|
1033
|
+
name = p.get("name")
|
|
1034
|
+
where = p.get("in")
|
|
1035
|
+
required = bool(p.get("required", False))
|
|
1036
|
+
if not required or not isinstance(name, str) or not isinstance(where, str):
|
|
1037
|
+
continue
|
|
1038
|
+
if where == "path":
|
|
1039
|
+
required_path.append(name)
|
|
1040
|
+
elif where == "query":
|
|
1041
|
+
required_query.append(name)
|
|
1042
|
+
|
|
1043
|
+
req_body = False
|
|
1044
|
+
rb = op_raw.get("requestBody")
|
|
1045
|
+
if isinstance(rb, dict):
|
|
1046
|
+
req_body = bool(rb.get("required", False))
|
|
1047
|
+
|
|
1048
|
+
return {
|
|
1049
|
+
"required_path": required_path,
|
|
1050
|
+
"required_query": required_query,
|
|
1051
|
+
"required_body": req_body,
|
|
1052
|
+
}
|
|
1053
|
+
|
|
1054
|
+
def get_operation_request_url(self, operation_id: str, params: dict[str, Any]) -> str:
|
|
1055
|
+
"""Best-effort resolved URL for debugging/prompt-quality inspection.
|
|
1056
|
+
|
|
1057
|
+
This does not execute the request.
|
|
1058
|
+
"""
|
|
1059
|
+
meta = self._op_meta.get(str(operation_id), {})
|
|
1060
|
+
path = meta.get("path") if isinstance(meta, dict) else None
|
|
1061
|
+
if not isinstance(path, str):
|
|
1062
|
+
return ""
|
|
1063
|
+
base_url = _get_base_url_from_spec(self.spec)
|
|
1064
|
+
url = _join_base_and_path(base_url, path)
|
|
1065
|
+
|
|
1066
|
+
# Substitute {pathParams}
|
|
1067
|
+
for k, v in (params or {}).items():
|
|
1068
|
+
placeholder = "{" + str(k) + "}"
|
|
1069
|
+
if placeholder in url:
|
|
1070
|
+
url = url.replace(placeholder, str(v))
|
|
1071
|
+
|
|
1072
|
+
# Add query params if present.
|
|
1073
|
+
query: dict[str, Any] = {}
|
|
1074
|
+
try:
|
|
1075
|
+
op = self._client.operations[str(operation_id)]
|
|
1076
|
+
if isinstance(op, Operation):
|
|
1077
|
+
for p in op.spec.parameters or []:
|
|
1078
|
+
p_in_obj = getattr(p, "param_in", None)
|
|
1079
|
+
p_in = str(getattr(p_in_obj, "value", p_in_obj))
|
|
1080
|
+
if p_in != "query":
|
|
1081
|
+
continue
|
|
1082
|
+
name = str(p.name)
|
|
1083
|
+
if name in (params or {}) and (params or {}).get(name) is not None:
|
|
1084
|
+
query[name] = (params or {})[name]
|
|
1085
|
+
except Exception:
|
|
1086
|
+
query = {}
|
|
1087
|
+
|
|
1088
|
+
if query:
|
|
1089
|
+
url = url + "?" + urlencode(query, doseq=True)
|
|
1090
|
+
return url
|
|
1091
|
+
|
|
1092
|
+
def _execute(self, operation_id: str, *args: Any, **kwargs: Any) -> str:
|
|
1093
|
+
# Check for unresolved server variables - fail early with helpful error
|
|
1094
|
+
if self._unresolved_server_vars:
|
|
1095
|
+
self._raise_unresolved_server_vars_error(operation_id)
|
|
1096
|
+
|
|
1097
|
+
# Extract special fields (already coerced by BeforeValidator at validation time)
|
|
1098
|
+
regexp = kwargs.pop("regexp", None)
|
|
1099
|
+
extra_headers = kwargs.pop("headers", None)
|
|
1100
|
+
|
|
1101
|
+
# Restore original parameter names from sanitized field names
|
|
1102
|
+
# LangChain passes kwargs using Pydantic field names (sanitized like 'dollar_top'),
|
|
1103
|
+
# but the API expects original parameter names (like '$top')
|
|
1104
|
+
name_mapping = self._param_name_mapping.get(operation_id, {})
|
|
1105
|
+
if name_mapping:
|
|
1106
|
+
restored_kwargs: dict[str, Any] = {}
|
|
1107
|
+
for key, value in kwargs.items():
|
|
1108
|
+
original_name = name_mapping.get(key, key)
|
|
1109
|
+
restored_kwargs[original_name] = value
|
|
1110
|
+
kwargs = restored_kwargs
|
|
1111
|
+
|
|
1112
|
+
# Validate headers type (should be dict or None after BeforeValidator coercion)
|
|
1113
|
+
if extra_headers is not None and not isinstance(extra_headers, dict):
|
|
1114
|
+
_raise_openapi_tool_exception(
|
|
1115
|
+
code="invalid_headers",
|
|
1116
|
+
message="'headers' must be a dict or valid JSON object string",
|
|
1117
|
+
operation_id=str(operation_id),
|
|
1118
|
+
details={"provided_type": str(type(extra_headers)), "provided_value": str(extra_headers)[:100]},
|
|
1119
|
+
)
|
|
1120
|
+
|
|
1121
|
+
# Handle body_json (already coerced to None for empty strings by BeforeValidator)
|
|
1122
|
+
body_json = kwargs.pop("body_json", None)
|
|
1123
|
+
if body_json is not None:
|
|
1124
|
+
if isinstance(body_json, str):
|
|
1125
|
+
try:
|
|
1126
|
+
kwargs["json"] = json.loads(body_json)
|
|
1127
|
+
except Exception as e:
|
|
1128
|
+
_raise_openapi_tool_exception(
|
|
1129
|
+
code="invalid_json_body",
|
|
1130
|
+
message=f"Invalid JSON body: {e}",
|
|
1131
|
+
operation_id=str(operation_id),
|
|
1132
|
+
details={"hint": "Ensure body_json is valid JSON (double quotes, no trailing commas)."},
|
|
1133
|
+
)
|
|
1134
|
+
else:
|
|
1135
|
+
kwargs["json"] = body_json
|
|
1136
|
+
|
|
1137
|
+
# Backward compatible: accept `json` as a string too.
|
|
1138
|
+
if "json" in kwargs and isinstance(kwargs.get("json"), str):
|
|
1139
|
+
try:
|
|
1140
|
+
kwargs["json"] = json.loads(kwargs["json"])
|
|
1141
|
+
except Exception as e:
|
|
1142
|
+
_raise_openapi_tool_exception(
|
|
1143
|
+
code="invalid_json_body",
|
|
1144
|
+
message=f"Invalid JSON body: {e}",
|
|
1145
|
+
operation_id=str(operation_id),
|
|
1146
|
+
details={"hint": "If you pass `json` as a string, it must be valid JSON."},
|
|
1147
|
+
)
|
|
1148
|
+
|
|
1149
|
+
try:
|
|
1150
|
+
op = self._client.operations[operation_id]
|
|
1151
|
+
except Exception:
|
|
1152
|
+
_raise_openapi_tool_exception(
|
|
1153
|
+
code="operation_not_found",
|
|
1154
|
+
message=f"Operation '{operation_id}' not found in OpenAPI spec",
|
|
1155
|
+
operation_id=str(operation_id),
|
|
1156
|
+
)
|
|
1157
|
+
if not isinstance(op, Operation):
|
|
1158
|
+
_raise_openapi_tool_exception(
|
|
1159
|
+
code="invalid_operation",
|
|
1160
|
+
message=f"Operation '{operation_id}' is not a valid OpenAPI operation",
|
|
1161
|
+
operation_id=str(operation_id),
|
|
1162
|
+
)
|
|
1163
|
+
|
|
1164
|
+
# Best-effort URL reconstruction for error context.
|
|
1165
|
+
debug_url = ""
|
|
1166
|
+
try:
|
|
1167
|
+
debug_url = self.get_operation_request_url(operation_id, dict(kwargs))
|
|
1168
|
+
except Exception:
|
|
1169
|
+
debug_url = ""
|
|
1170
|
+
|
|
1171
|
+
# Preflight required input checks (helps LLM recover without needing spec knowledge).
|
|
1172
|
+
missing: list[str] = []
|
|
1173
|
+
required_info = self._get_required_inputs_from_raw_spec(str(operation_id))
|
|
1174
|
+
for name in required_info.get("required_path", []) or []:
|
|
1175
|
+
if name not in kwargs or kwargs.get(name) is None:
|
|
1176
|
+
missing.append(name)
|
|
1177
|
+
for name in required_info.get("required_query", []) or []:
|
|
1178
|
+
if name not in kwargs or kwargs.get(name) is None:
|
|
1179
|
+
missing.append(name)
|
|
1180
|
+
if bool(required_info.get("required_body")) and kwargs.get("json") is None:
|
|
1181
|
+
missing.append("body_json")
|
|
1182
|
+
|
|
1183
|
+
# Also check for unresolved {param} placeholders in the path.
|
|
1184
|
+
meta = self._op_meta.get(str(operation_id), {})
|
|
1185
|
+
path = meta.get("path") if isinstance(meta, dict) else None
|
|
1186
|
+
if isinstance(path, str):
|
|
1187
|
+
for placeholder in re.findall(r"\{([^}]+)\}", path):
|
|
1188
|
+
if placeholder and (placeholder not in kwargs or kwargs.get(placeholder) is None):
|
|
1189
|
+
missing.append(str(placeholder))
|
|
1190
|
+
|
|
1191
|
+
if missing:
|
|
1192
|
+
_raise_openapi_tool_exception(
|
|
1193
|
+
code="missing_required_inputs",
|
|
1194
|
+
message=f"Missing required inputs for operation '{operation_id}': {', '.join(sorted(set(missing)))}",
|
|
1195
|
+
operation_id=str(operation_id),
|
|
1196
|
+
url=debug_url or None,
|
|
1197
|
+
retryable=True,
|
|
1198
|
+
missing_inputs=sorted(set(missing)),
|
|
1199
|
+
details={"hint": "Provide the missing fields and retry the same operation."},
|
|
1200
|
+
)
|
|
1201
|
+
|
|
1202
|
+
# Preflight base URL check: requests_openapi needs an absolute server URL to execute HTTP.
|
|
1203
|
+
base_url = _get_base_url_from_spec(self.spec)
|
|
1204
|
+
if not base_url or not _is_absolute_url(base_url):
|
|
1205
|
+
servers = self.spec.get("servers") if isinstance(self.spec, dict) else None
|
|
1206
|
+
server_url = None
|
|
1207
|
+
if isinstance(servers, list) and servers and isinstance(servers[0], dict):
|
|
1208
|
+
server_url = servers[0].get("url")
|
|
1209
|
+
|
|
1210
|
+
_raise_openapi_tool_exception(
|
|
1211
|
+
code="missing_base_url",
|
|
1212
|
+
message=(
|
|
1213
|
+
"Cannot execute HTTP request because the OpenAPI spec does not contain an absolute server URL. "
|
|
1214
|
+
"Provide `base_url_override`/`base_url` in the toolkit settings (e.g. 'https://host') "
|
|
1215
|
+
"or update `servers[0].url` to an absolute URL (https://...)."
|
|
1216
|
+
),
|
|
1217
|
+
operation_id=str(operation_id),
|
|
1218
|
+
url=debug_url or None,
|
|
1219
|
+
retryable=False,
|
|
1220
|
+
details={
|
|
1221
|
+
"servers_0_url": server_url,
|
|
1222
|
+
"computed_base_url": base_url,
|
|
1223
|
+
"hint": "If servers[0].url is relative like '/api/v3', set base_url_override to the host (e.g. 'https://petstore3.swagger.io').",
|
|
1224
|
+
},
|
|
1225
|
+
)
|
|
1226
|
+
|
|
1227
|
+
# Apply per-call extra headers (best-effort) without permanently mutating global headers.
|
|
1228
|
+
old_headers = dict(getattr(self._client.requestor, "headers", {}) or {})
|
|
1229
|
+
try:
|
|
1230
|
+
if extra_headers:
|
|
1231
|
+
self._client.requestor.headers.update({str(k): str(v) for k, v in extra_headers.items()})
|
|
1232
|
+
response = op(*args, **kwargs)
|
|
1233
|
+
except Exception as e:
|
|
1234
|
+
_raise_openapi_tool_exception(
|
|
1235
|
+
code="request_failed",
|
|
1236
|
+
message=f"OpenAPI request failed for operation '{operation_id}': {e}",
|
|
1237
|
+
operation_id=str(operation_id),
|
|
1238
|
+
url=debug_url or None,
|
|
1239
|
+
retryable=True,
|
|
1240
|
+
details={"exception": repr(e)},
|
|
1241
|
+
)
|
|
1242
|
+
finally:
|
|
1243
|
+
try:
|
|
1244
|
+
self._client.requestor.headers.clear()
|
|
1245
|
+
self._client.requestor.headers.update(old_headers)
|
|
1246
|
+
except Exception:
|
|
1247
|
+
pass
|
|
1248
|
+
|
|
1249
|
+
# If this looks like a requests.Response, raise on HTTP errors with actionable context.
|
|
1250
|
+
status_code = getattr(response, "status_code", None)
|
|
1251
|
+
if isinstance(status_code, int) and status_code >= 400:
|
|
1252
|
+
body_preview = ""
|
|
1253
|
+
for attr in ("text", "content", "data"):
|
|
1254
|
+
if hasattr(response, attr):
|
|
1255
|
+
body_preview = _normalize_output(getattr(response, attr))
|
|
1256
|
+
break
|
|
1257
|
+
body_preview = _truncate(body_preview, 2000)
|
|
1258
|
+
retryable = _is_retryable_http_status(status_code)
|
|
1259
|
+
|
|
1260
|
+
hint = ""
|
|
1261
|
+
if status_code in (401, 403):
|
|
1262
|
+
hint = "Authentication/authorization failed. Verify toolkit authentication settings / base headers."
|
|
1263
|
+
elif status_code == 404:
|
|
1264
|
+
hint = "Resource not found. Check path parameters and identifiers."
|
|
1265
|
+
elif status_code == 400:
|
|
1266
|
+
hint = "Bad request. Check required parameters and request body schema."
|
|
1267
|
+
elif status_code == 415:
|
|
1268
|
+
hint = "Unsupported media type. The API may require Content-Type headers."
|
|
1269
|
+
elif status_code == 429:
|
|
1270
|
+
hint = "Rate limited. Retry after a short delay."
|
|
1271
|
+
|
|
1272
|
+
_raise_openapi_tool_exception(
|
|
1273
|
+
code="http_error",
|
|
1274
|
+
message=f"OpenAPI request failed with HTTP {status_code} for operation '{operation_id}'",
|
|
1275
|
+
operation_id=str(operation_id),
|
|
1276
|
+
url=debug_url or None,
|
|
1277
|
+
retryable=retryable,
|
|
1278
|
+
http_status=status_code,
|
|
1279
|
+
http_body_preview=body_preview,
|
|
1280
|
+
details={"hint": hint} if hint else None,
|
|
1281
|
+
)
|
|
1282
|
+
|
|
1283
|
+
output = None
|
|
1284
|
+
for attr in ("content", "data", "text"):
|
|
1285
|
+
if hasattr(response, attr):
|
|
1286
|
+
output = getattr(response, attr)
|
|
1287
|
+
break
|
|
1288
|
+
if output is None:
|
|
1289
|
+
output = response
|
|
1290
|
+
|
|
1291
|
+
output_str = _normalize_output(output)
|
|
1292
|
+
|
|
1293
|
+
if regexp:
|
|
1294
|
+
try:
|
|
1295
|
+
output_str = re.sub(rf"{regexp}", "", output_str)
|
|
1296
|
+
except Exception as e:
|
|
1297
|
+
logger.debug(f"Failed to apply regexp filter: {e}")
|
|
1298
|
+
|
|
1299
|
+
return output_str
|
|
1300
|
+
|
|
1301
|
+
def _raise_unresolved_server_vars_error(self, operation_id: str) -> None:
|
|
1302
|
+
"""Raise a detailed error for unresolved server variables with YAML/JSON examples."""
|
|
1303
|
+
info = self._unresolved_server_vars
|
|
1304
|
+
if not info:
|
|
1305
|
+
return
|
|
1306
|
+
|
|
1307
|
+
url = info.get("url", "")
|
|
1308
|
+
missing_vars = info.get("missing_vars", [])
|
|
1309
|
+
server_index = info.get("server_index", 0)
|
|
1310
|
+
|
|
1311
|
+
# Build example snippets showing how to fix the spec
|
|
1312
|
+
yaml_vars = '\n'.join(f' {v}:\n default: "your_{v}_value"' for v in missing_vars)
|
|
1313
|
+
json_vars = ', '.join(f'"{v}": {{"default": "your_{v}_value"}}' for v in missing_vars)
|
|
1314
|
+
var_list = ', '.join(f'"{v}"' for v in missing_vars)
|
|
1315
|
+
|
|
1316
|
+
_raise_openapi_tool_exception(
|
|
1317
|
+
code="unresolved_server_variables",
|
|
1318
|
+
message=(
|
|
1319
|
+
f"Server URL contains variables without default values: {var_list}.\n\n"
|
|
1320
|
+
f"The OpenAPI spec defines server URL:\n"
|
|
1321
|
+
f" {url}\n\n"
|
|
1322
|
+
f"These variables must have default values. Update your OpenAPI spec as follows:\n\n"
|
|
1323
|
+
f"YAML format:\n"
|
|
1324
|
+
f" servers:\n"
|
|
1325
|
+
f" - url: \"{url}\"\n"
|
|
1326
|
+
f" variables:\n"
|
|
1327
|
+
f"{yaml_vars}\n\n"
|
|
1328
|
+
f"JSON format:\n"
|
|
1329
|
+
f" {{\n"
|
|
1330
|
+
f" \"servers\": [{{\n"
|
|
1331
|
+
f" \"url\": \"{url}\",\n"
|
|
1332
|
+
f" \"variables\": {{ {json_vars} }}\n"
|
|
1333
|
+
f" }}]\n"
|
|
1334
|
+
f" }}"
|
|
1335
|
+
),
|
|
1336
|
+
operation_id=str(operation_id),
|
|
1337
|
+
details={
|
|
1338
|
+
"server_url": url,
|
|
1339
|
+
"missing_variables": missing_vars,
|
|
1340
|
+
"server_index": server_index,
|
|
1341
|
+
},
|
|
1342
|
+
)
|
|
1343
|
+
|
|
1344
|
+
|
|
1345
|
+
def build_wrapper(
|
|
1346
|
+
openapi_spec: str | dict,
|
|
1347
|
+
base_headers: Optional[dict[str, str]] = None,
|
|
1348
|
+
base_url_override: Optional[str] = None,
|
|
1349
|
+
) -> OpenApiApiWrapper:
|
|
1350
|
+
parsed = _parse_openapi_spec(openapi_spec)
|
|
1351
|
+
# Avoid mutating caller-owned spec dict.
|
|
1352
|
+
spec = copy.deepcopy(parsed)
|
|
1353
|
+
if base_url_override:
|
|
1354
|
+
spec = _apply_base_url_override(spec, base_url_override)
|
|
1355
|
+
# Ensure all operations have operationIds (generate from method+path if missing)
|
|
1356
|
+
spec = _ensure_operation_ids(spec)
|
|
1357
|
+
return OpenApiApiWrapper(spec=spec, base_headers=base_headers or {})
|