alita-sdk 0.3.365__py3-none-any.whl → 0.3.462__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of alita-sdk might be problematic. Click here for more details.

Files changed (118) hide show
  1. alita_sdk/cli/__init__.py +10 -0
  2. alita_sdk/cli/__main__.py +17 -0
  3. alita_sdk/cli/agent_executor.py +144 -0
  4. alita_sdk/cli/agent_loader.py +197 -0
  5. alita_sdk/cli/agent_ui.py +166 -0
  6. alita_sdk/cli/agents.py +1069 -0
  7. alita_sdk/cli/callbacks.py +576 -0
  8. alita_sdk/cli/cli.py +159 -0
  9. alita_sdk/cli/config.py +153 -0
  10. alita_sdk/cli/formatting.py +182 -0
  11. alita_sdk/cli/mcp_loader.py +315 -0
  12. alita_sdk/cli/toolkit.py +330 -0
  13. alita_sdk/cli/toolkit_loader.py +55 -0
  14. alita_sdk/cli/tools/__init__.py +9 -0
  15. alita_sdk/cli/tools/filesystem.py +905 -0
  16. alita_sdk/configurations/bitbucket.py +95 -0
  17. alita_sdk/configurations/confluence.py +96 -1
  18. alita_sdk/configurations/gitlab.py +79 -0
  19. alita_sdk/configurations/jira.py +103 -0
  20. alita_sdk/configurations/testrail.py +88 -0
  21. alita_sdk/configurations/xray.py +93 -0
  22. alita_sdk/configurations/zephyr_enterprise.py +93 -0
  23. alita_sdk/configurations/zephyr_essential.py +75 -0
  24. alita_sdk/runtime/clients/artifact.py +1 -1
  25. alita_sdk/runtime/clients/client.py +47 -10
  26. alita_sdk/runtime/clients/mcp_discovery.py +342 -0
  27. alita_sdk/runtime/clients/mcp_manager.py +262 -0
  28. alita_sdk/runtime/clients/sandbox_client.py +373 -0
  29. alita_sdk/runtime/langchain/assistant.py +70 -41
  30. alita_sdk/runtime/langchain/constants.py +6 -1
  31. alita_sdk/runtime/langchain/document_loaders/AlitaDocxMammothLoader.py +315 -3
  32. alita_sdk/runtime/langchain/document_loaders/AlitaJSONLoader.py +4 -1
  33. alita_sdk/runtime/langchain/document_loaders/constants.py +73 -100
  34. alita_sdk/runtime/langchain/langraph_agent.py +164 -38
  35. alita_sdk/runtime/langchain/utils.py +43 -7
  36. alita_sdk/runtime/models/mcp_models.py +61 -0
  37. alita_sdk/runtime/toolkits/__init__.py +24 -0
  38. alita_sdk/runtime/toolkits/application.py +8 -1
  39. alita_sdk/runtime/toolkits/artifact.py +5 -6
  40. alita_sdk/runtime/toolkits/mcp.py +895 -0
  41. alita_sdk/runtime/toolkits/tools.py +140 -50
  42. alita_sdk/runtime/tools/__init__.py +7 -2
  43. alita_sdk/runtime/tools/application.py +7 -0
  44. alita_sdk/runtime/tools/function.py +94 -5
  45. alita_sdk/runtime/tools/graph.py +10 -4
  46. alita_sdk/runtime/tools/image_generation.py +104 -8
  47. alita_sdk/runtime/tools/llm.py +204 -114
  48. alita_sdk/runtime/tools/mcp_inspect_tool.py +284 -0
  49. alita_sdk/runtime/tools/mcp_remote_tool.py +166 -0
  50. alita_sdk/runtime/tools/mcp_server_tool.py +3 -1
  51. alita_sdk/runtime/tools/sandbox.py +180 -79
  52. alita_sdk/runtime/tools/vectorstore.py +22 -21
  53. alita_sdk/runtime/tools/vectorstore_base.py +79 -26
  54. alita_sdk/runtime/utils/mcp_oauth.py +164 -0
  55. alita_sdk/runtime/utils/mcp_sse_client.py +405 -0
  56. alita_sdk/runtime/utils/streamlit.py +34 -3
  57. alita_sdk/runtime/utils/toolkit_utils.py +14 -4
  58. alita_sdk/runtime/utils/utils.py +1 -0
  59. alita_sdk/tools/__init__.py +48 -31
  60. alita_sdk/tools/ado/repos/__init__.py +1 -0
  61. alita_sdk/tools/ado/test_plan/__init__.py +1 -1
  62. alita_sdk/tools/ado/wiki/__init__.py +1 -5
  63. alita_sdk/tools/ado/work_item/__init__.py +1 -5
  64. alita_sdk/tools/ado/work_item/ado_wrapper.py +17 -8
  65. alita_sdk/tools/base_indexer_toolkit.py +194 -112
  66. alita_sdk/tools/bitbucket/__init__.py +1 -0
  67. alita_sdk/tools/chunkers/sematic/proposal_chunker.py +1 -1
  68. alita_sdk/tools/code/sonar/__init__.py +1 -1
  69. alita_sdk/tools/code_indexer_toolkit.py +15 -5
  70. alita_sdk/tools/confluence/__init__.py +2 -2
  71. alita_sdk/tools/confluence/api_wrapper.py +110 -63
  72. alita_sdk/tools/confluence/loader.py +10 -0
  73. alita_sdk/tools/elitea_base.py +22 -22
  74. alita_sdk/tools/github/__init__.py +2 -2
  75. alita_sdk/tools/gitlab/__init__.py +2 -1
  76. alita_sdk/tools/gitlab/api_wrapper.py +11 -7
  77. alita_sdk/tools/gitlab_org/__init__.py +1 -2
  78. alita_sdk/tools/google_places/__init__.py +2 -1
  79. alita_sdk/tools/jira/__init__.py +1 -0
  80. alita_sdk/tools/jira/api_wrapper.py +1 -1
  81. alita_sdk/tools/memory/__init__.py +1 -1
  82. alita_sdk/tools/non_code_indexer_toolkit.py +2 -2
  83. alita_sdk/tools/openapi/__init__.py +10 -1
  84. alita_sdk/tools/pandas/__init__.py +1 -1
  85. alita_sdk/tools/postman/__init__.py +2 -1
  86. alita_sdk/tools/postman/api_wrapper.py +18 -8
  87. alita_sdk/tools/postman/postman_analysis.py +8 -1
  88. alita_sdk/tools/pptx/__init__.py +2 -2
  89. alita_sdk/tools/qtest/__init__.py +3 -3
  90. alita_sdk/tools/qtest/api_wrapper.py +1708 -76
  91. alita_sdk/tools/rally/__init__.py +1 -2
  92. alita_sdk/tools/report_portal/__init__.py +1 -0
  93. alita_sdk/tools/salesforce/__init__.py +1 -0
  94. alita_sdk/tools/servicenow/__init__.py +2 -3
  95. alita_sdk/tools/sharepoint/__init__.py +1 -0
  96. alita_sdk/tools/sharepoint/api_wrapper.py +125 -34
  97. alita_sdk/tools/sharepoint/authorization_helper.py +191 -1
  98. alita_sdk/tools/sharepoint/utils.py +8 -2
  99. alita_sdk/tools/slack/__init__.py +1 -0
  100. alita_sdk/tools/sql/__init__.py +2 -1
  101. alita_sdk/tools/sql/api_wrapper.py +71 -23
  102. alita_sdk/tools/testio/__init__.py +1 -0
  103. alita_sdk/tools/testrail/__init__.py +1 -3
  104. alita_sdk/tools/utils/__init__.py +17 -0
  105. alita_sdk/tools/utils/content_parser.py +35 -24
  106. alita_sdk/tools/vector_adapters/VectorStoreAdapter.py +67 -21
  107. alita_sdk/tools/xray/__init__.py +2 -1
  108. alita_sdk/tools/zephyr/__init__.py +2 -1
  109. alita_sdk/tools/zephyr_enterprise/__init__.py +1 -0
  110. alita_sdk/tools/zephyr_essential/__init__.py +1 -0
  111. alita_sdk/tools/zephyr_scale/__init__.py +1 -0
  112. alita_sdk/tools/zephyr_squad/__init__.py +1 -0
  113. {alita_sdk-0.3.365.dist-info → alita_sdk-0.3.462.dist-info}/METADATA +8 -2
  114. {alita_sdk-0.3.365.dist-info → alita_sdk-0.3.462.dist-info}/RECORD +118 -93
  115. alita_sdk-0.3.462.dist-info/entry_points.txt +2 -0
  116. {alita_sdk-0.3.365.dist-info → alita_sdk-0.3.462.dist-info}/WHEEL +0 -0
  117. {alita_sdk-0.3.365.dist-info → alita_sdk-0.3.462.dist-info}/licenses/LICENSE +0 -0
  118. {alita_sdk-0.3.365.dist-info → alita_sdk-0.3.462.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,262 @@
1
+ """
2
+ MCP Manager - Unified interface for both static and dynamic MCP tool discovery.
3
+ Provides a single API that can work with both registry-based and live discovery.
4
+ """
5
+
6
+ import asyncio
7
+ import logging
8
+ from typing import Dict, List, Optional, Any, Union
9
+ from enum import Enum
10
+
11
+ from ..models.mcp_models import McpConnectionConfig, McpToolMetadata
12
+ from .mcp_discovery import McpDiscoveryService, get_discovery_service
13
+
14
+ logger = logging.getLogger(__name__)
15
+
16
+
17
+ class DiscoveryMode(Enum):
18
+ """MCP discovery modes."""
19
+ STATIC = "static" # Use alita.get_mcp_toolkits() registry
20
+ DYNAMIC = "dynamic" # Live discovery from MCP servers
21
+ HYBRID = "hybrid" # Try dynamic first, fallback to static
22
+
23
+
24
+ class McpManager:
25
+ """
26
+ Unified manager for MCP tool discovery supporting multiple modes.
27
+ """
28
+
29
+ def __init__(
30
+ self,
31
+ default_mode: DiscoveryMode = DiscoveryMode.DYNAMIC,
32
+ discovery_service: Optional[McpDiscoveryService] = None
33
+ ):
34
+ self.default_mode = default_mode
35
+ self.discovery_service = discovery_service or get_discovery_service()
36
+ self._static_fallback_enabled = True
37
+
38
+ async def discover_server_tools(
39
+ self,
40
+ server_name: str,
41
+ connection_config: Optional[McpConnectionConfig] = None,
42
+ alita_client=None,
43
+ mode: Optional[DiscoveryMode] = None,
44
+ **kwargs
45
+ ) -> List[McpToolMetadata]:
46
+ """
47
+ Discover tools from an MCP server using the specified mode.
48
+
49
+ Args:
50
+ server_name: Name of the MCP server
51
+ connection_config: Connection configuration (required for dynamic mode)
52
+ alita_client: Alita client (required for static mode)
53
+ mode: Discovery mode to use (defaults to manager's default)
54
+ **kwargs: Additional options
55
+
56
+ Returns:
57
+ List of discovered tool metadata
58
+ """
59
+ discovery_mode = mode or self.default_mode
60
+
61
+ if discovery_mode == DiscoveryMode.DYNAMIC:
62
+ return await self._discover_dynamic(server_name, connection_config)
63
+
64
+ elif discovery_mode == DiscoveryMode.STATIC:
65
+ return await self._discover_static(server_name, alita_client)
66
+
67
+ elif discovery_mode == DiscoveryMode.HYBRID:
68
+ return await self._discover_hybrid(server_name, connection_config, alita_client)
69
+
70
+ else:
71
+ raise ValueError(f"Unknown discovery mode: {discovery_mode}")
72
+
73
+ async def _discover_dynamic(
74
+ self,
75
+ server_name: str,
76
+ connection_config: Optional[McpConnectionConfig]
77
+ ) -> List[McpToolMetadata]:
78
+ """Discover tools using dynamic MCP protocol."""
79
+ if not connection_config:
80
+ raise ValueError("Connection configuration required for dynamic discovery")
81
+
82
+ try:
83
+ # Ensure discovery service is started
84
+ await self.discovery_service.start()
85
+
86
+ # Register and discover
87
+ await self.discovery_service.register_server(server_name, connection_config)
88
+ tools = await self.discovery_service.get_server_tools(server_name)
89
+
90
+ logger.info(f"Dynamic discovery found {len(tools)} tools from {server_name}")
91
+ return tools
92
+
93
+ except Exception as e:
94
+ logger.error(f"Dynamic discovery failed for {server_name}: {e}")
95
+ raise
96
+
97
+ async def _discover_static(
98
+ self,
99
+ server_name: str,
100
+ alita_client
101
+ ) -> List[McpToolMetadata]:
102
+ """Discover tools using static registry."""
103
+ if not alita_client or not hasattr(alita_client, 'get_mcp_toolkits'):
104
+ raise ValueError("Alita client with get_mcp_toolkits() required for static discovery")
105
+
106
+ try:
107
+ # Use existing registry approach
108
+ all_toolkits = alita_client.get_mcp_toolkits()
109
+ server_toolkit = next((tk for tk in all_toolkits if tk.get('name') == server_name), None)
110
+
111
+ if not server_toolkit:
112
+ logger.warning(f"Static registry: Server {server_name} not found")
113
+ return []
114
+
115
+ # Convert to metadata format
116
+ tools = []
117
+ for tool_info in server_toolkit.get('tools', []):
118
+ metadata = McpToolMetadata(
119
+ name=tool_info.get('name', ''),
120
+ description=tool_info.get('description', ''),
121
+ server=server_name,
122
+ input_schema=tool_info.get('inputSchema', {}),
123
+ enabled=True
124
+ )
125
+ tools.append(metadata)
126
+
127
+ logger.info(f"Static discovery found {len(tools)} tools from {server_name}")
128
+ return tools
129
+
130
+ except Exception as e:
131
+ logger.error(f"Static discovery failed for {server_name}: {e}")
132
+ raise
133
+
134
+ async def _discover_hybrid(
135
+ self,
136
+ server_name: str,
137
+ connection_config: Optional[McpConnectionConfig],
138
+ alita_client
139
+ ) -> List[McpToolMetadata]:
140
+ """Discover tools using hybrid approach (dynamic first, static fallback)."""
141
+
142
+ # Try dynamic discovery first
143
+ if connection_config:
144
+ try:
145
+ return await self._discover_dynamic(server_name, connection_config)
146
+ except Exception as e:
147
+ logger.warning(f"Dynamic discovery failed for {server_name}, trying static: {e}")
148
+
149
+ # Fallback to static discovery
150
+ if self._static_fallback_enabled and alita_client:
151
+ try:
152
+ return await self._discover_static(server_name, alita_client)
153
+ except Exception as e:
154
+ logger.error(f"Static fallback also failed for {server_name}: {e}")
155
+
156
+ logger.error(f"All discovery methods failed for {server_name}")
157
+ return []
158
+
159
+ async def get_server_health(
160
+ self,
161
+ server_name: Optional[str] = None
162
+ ) -> Dict[str, Any]:
163
+ """Get health information for servers."""
164
+ try:
165
+ if server_name:
166
+ # Get specific server health from discovery service
167
+ all_health = self.discovery_service.get_server_health()
168
+ return all_health.get(server_name, {"status": "unknown"})
169
+ else:
170
+ # Get all server health
171
+ return self.discovery_service.get_server_health()
172
+ except Exception as e:
173
+ logger.error(f"Failed to get server health: {e}")
174
+ return {"status": "error", "error": str(e)}
175
+
176
+ async def refresh_server(self, server_name: str):
177
+ """Force refresh a specific server's tools."""
178
+ try:
179
+ await self.discovery_service.refresh_server(server_name)
180
+ except Exception as e:
181
+ logger.error(f"Failed to refresh server {server_name}: {e}")
182
+
183
+ async def start(self):
184
+ """Start the MCP manager."""
185
+ await self.discovery_service.start()
186
+
187
+ async def stop(self):
188
+ """Stop the MCP manager."""
189
+ await self.discovery_service.stop()
190
+
191
+ def set_static_fallback(self, enabled: bool):
192
+ """Enable or disable static fallback in hybrid mode."""
193
+ self._static_fallback_enabled = enabled
194
+
195
+
196
+ # Global manager instance
197
+ _mcp_manager: Optional[McpManager] = None
198
+
199
+
200
+ def get_mcp_manager(mode: DiscoveryMode = DiscoveryMode.HYBRID) -> McpManager:
201
+ """Get the global MCP manager instance."""
202
+ global _mcp_manager
203
+ if _mcp_manager is None:
204
+ _mcp_manager = McpManager(default_mode=mode)
205
+ return _mcp_manager
206
+
207
+
208
+ async def discover_mcp_tools(
209
+ server_name: str,
210
+ connection_config: Optional[McpConnectionConfig] = None,
211
+ alita_client=None,
212
+ mode: Optional[DiscoveryMode] = None
213
+ ) -> List[McpToolMetadata]:
214
+ """
215
+ Convenience function for discovering MCP tools.
216
+
217
+ Args:
218
+ server_name: Name of the MCP server
219
+ connection_config: Connection config (for dynamic discovery)
220
+ alita_client: Alita client (for static discovery)
221
+ mode: Discovery mode (defaults to HYBRID)
222
+
223
+ Returns:
224
+ List of discovered tool metadata
225
+ """
226
+ manager = get_mcp_manager()
227
+ return await manager.discover_server_tools(
228
+ server_name=server_name,
229
+ connection_config=connection_config,
230
+ alita_client=alita_client,
231
+ mode=mode or DiscoveryMode.HYBRID
232
+ )
233
+
234
+
235
+ async def init_mcp_manager(mode: DiscoveryMode = DiscoveryMode.HYBRID):
236
+ """Initialize the global MCP manager."""
237
+ manager = get_mcp_manager(mode)
238
+ await manager.start()
239
+
240
+
241
+ async def shutdown_mcp_manager():
242
+ """Shutdown the global MCP manager."""
243
+ global _mcp_manager
244
+ if _mcp_manager:
245
+ await _mcp_manager.stop()
246
+ _mcp_manager = None
247
+
248
+
249
+ # Configuration helpers
250
+ def create_discovery_config(
251
+ mode: str = "hybrid",
252
+ discovery_interval: int = 300,
253
+ enable_static_fallback: bool = True,
254
+ **kwargs
255
+ ) -> Dict[str, Any]:
256
+ """Create a discovery configuration dictionary."""
257
+ return {
258
+ "discovery_mode": mode,
259
+ "discovery_interval": discovery_interval,
260
+ "enable_static_fallback": enable_static_fallback,
261
+ **kwargs
262
+ }
@@ -0,0 +1,373 @@
1
+ import logging
2
+ from typing import Dict, Optional
3
+ from urllib.parse import quote
4
+
5
+ import requests
6
+ from typing import Any
7
+ from json import dumps
8
+ import chardet
9
+
10
+ logger = logging.getLogger(__name__)
11
+
12
+
13
+ class ApiDetailsRequestError(Exception):
14
+ ...
15
+
16
+
17
+ class SandboxArtifact:
18
+ def __init__(self, client: Any, bucket_name: str):
19
+ self.client = client
20
+ self.bucket_name = bucket_name
21
+ if not self.client.bucket_exists(bucket_name):
22
+ self.client.create_bucket(bucket_name)
23
+
24
+ def create(self, artifact_name: str, artifact_data: Any, bucket_name: str = None):
25
+ try:
26
+ if not bucket_name:
27
+ bucket_name = self.bucket_name
28
+ return dumps(self.client.create_artifact(bucket_name, artifact_name, artifact_data))
29
+ except Exception as e:
30
+ logger.error(f'Error: {e}')
31
+ return f'Error: {e}'
32
+
33
+ def get(self,
34
+ artifact_name: str,
35
+ bucket_name: str = None,
36
+ is_capture_image: bool = False,
37
+ page_number: int = None,
38
+ sheet_name: str = None,
39
+ excel_by_sheets: bool = False,
40
+ llm=None):
41
+ if not bucket_name:
42
+ bucket_name = self.bucket_name
43
+ data = self.client.download_artifact(bucket_name, artifact_name)
44
+ if len(data) == 0:
45
+ # empty file might be created
46
+ return ''
47
+ if isinstance(data, dict) and data['error']:
48
+ return f'{data['error']}. {data['content'] if data['content'] else ''}'
49
+ detected = chardet.detect(data)
50
+ return data
51
+ # TODO: add proper handling for binary files (images, pdf, etc.) for sandbox
52
+ # if detected['encoding'] is not None:
53
+ # try:
54
+ # return data.decode(detected['encoding'])
55
+ # except Exception:
56
+ # logger.error('Error while default encoding')
57
+ # return parse_file_content(file_name=artifact_name,
58
+ # file_content=data,
59
+ # is_capture_image=is_capture_image,
60
+ # page_number=page_number,
61
+ # sheet_name=sheet_name,
62
+ # excel_by_sheets=excel_by_sheets,
63
+ # llm=llm)
64
+ # else:
65
+ # return parse_file_content(file_name=artifact_name,
66
+ # file_content=data,
67
+ # is_capture_image=is_capture_image,
68
+ # page_number=page_number,
69
+ # sheet_name=sheet_name,
70
+ # excel_by_sheets=excel_by_sheets,
71
+ # llm=llm)
72
+
73
+ def delete(self, artifact_name: str, bucket_name=None):
74
+ if not bucket_name:
75
+ bucket_name = self.bucket_name
76
+ self.client.delete_artifact(bucket_name, artifact_name)
77
+
78
+ def list(self, bucket_name: str = None, return_as_string=True) -> str | dict:
79
+ if not bucket_name:
80
+ bucket_name = self.bucket_name
81
+ artifacts = self.client.list_artifacts(bucket_name)
82
+ return str(artifacts) if return_as_string else artifacts
83
+
84
+ def append(self, artifact_name: str, additional_data: Any, bucket_name: str = None):
85
+ if not bucket_name:
86
+ bucket_name = self.bucket_name
87
+ data = self.get(artifact_name, bucket_name)
88
+ if data == 'Could not detect encoding':
89
+ return data
90
+ data += f'{additional_data}' if len(data) > 0 else additional_data
91
+ self.client.create_artifact(bucket_name, artifact_name, data)
92
+ return 'Data appended successfully'
93
+
94
+ def overwrite(self, artifact_name: str, new_data: Any, bucket_name: str = None):
95
+ if not bucket_name:
96
+ bucket_name = self.bucket_name
97
+ return self.create(artifact_name, new_data, bucket_name)
98
+
99
+ def get_content_bytes(self,
100
+ artifact_name: str,
101
+ bucket_name: str = None):
102
+ if not bucket_name:
103
+ bucket_name = self.bucket_name
104
+ return self.client.download_artifact(bucket_name, artifact_name)
105
+
106
+
107
+ class SandboxClient:
108
+ def __init__(self,
109
+ base_url: str,
110
+ project_id: int,
111
+ auth_token: str,
112
+ api_extra_headers: Optional[dict] = None,
113
+ configurations: Optional[list] = None,
114
+ **kwargs):
115
+
116
+ self.base_url = base_url.rstrip('/')
117
+ self.api_path = '/api/v1'
118
+ self.llm_path = '/llm/v1'
119
+ self.project_id = project_id
120
+ self.auth_token = auth_token
121
+ self.headers = {
122
+ 'Authorization': f'Bearer {auth_token}',
123
+ 'X-SECRET': kwargs.get('XSECRET', 'secret')
124
+ }
125
+ if api_extra_headers is not None:
126
+ self.headers.update(api_extra_headers)
127
+ self.predict_url = f'{self.base_url}{self.api_path}/prompt_lib/predict/prompt_lib/{self.project_id}'
128
+ self.prompt_versions = f'{self.base_url}{self.api_path}/prompt_lib/version/prompt_lib/{self.project_id}'
129
+ self.prompts = f'{self.base_url}{self.api_path}/prompt_lib/prompt/prompt_lib/{self.project_id}'
130
+ self.datasources = f'{self.base_url}{self.api_path}/datasources/datasource/prompt_lib/{self.project_id}'
131
+ self.datasources_predict = f'{self.base_url}{self.api_path}/datasources/predict/prompt_lib/{self.project_id}'
132
+ self.datasources_search = f'{self.base_url}{self.api_path}/datasources/search/prompt_lib/{self.project_id}'
133
+ self.app = f'{self.base_url}{self.api_path}/applications/application/prompt_lib/{self.project_id}'
134
+ self.mcp_tools_list = f'{self.base_url}{self.api_path}/mcp_sse/tools_list/{self.project_id}'
135
+ self.mcp_tools_call = f'{self.base_url}{self.api_path}/mcp_sse/tools_call/{self.project_id}'
136
+ self.application_versions = f'{self.base_url}{self.api_path}/applications/version/prompt_lib/{self.project_id}'
137
+ self.list_apps_url = f'{self.base_url}{self.api_path}/applications/applications/prompt_lib/{self.project_id}'
138
+ self.integration_details = f'{self.base_url}{self.api_path}/integrations/integration/{self.project_id}'
139
+ self.secrets_url = f'{self.base_url}{self.api_path}/secrets/secret/{self.project_id}'
140
+ self.artifacts_url = f'{self.base_url}{self.api_path}/artifacts/artifacts/default/{self.project_id}'
141
+ self.artifact_url = f'{self.base_url}{self.api_path}/artifacts/artifact/default/{self.project_id}'
142
+ self.bucket_url = f'{self.base_url}{self.api_path}/artifacts/buckets/{self.project_id}'
143
+ self.configurations_url = f'{self.base_url}{self.api_path}/integrations/integrations/default/{self.project_id}?section=configurations&unsecret=true'
144
+ self.ai_section_url = f'{self.base_url}{self.api_path}/integrations/integrations/default/{self.project_id}?section=ai'
145
+ self.image_generation_url = f'{self.base_url}{self.llm_path}/images/generations'
146
+ self.auth_user_url = f'{self.base_url}{self.api_path}/auth/user'
147
+ self.configurations: list = configurations or []
148
+ self.model_timeout = kwargs.get('model_timeout', 120)
149
+ self.model_image_generation = kwargs.get('model_image_generation')
150
+
151
+ def get_mcp_toolkits(self):
152
+ if user_id := self._get_real_user_id():
153
+ url = f'{self.mcp_tools_list}/{user_id}'
154
+ data = requests.get(url, headers=self.headers, verify=False).json()
155
+ return data
156
+ else:
157
+ return []
158
+
159
+ def mcp_tool_call(self, params: dict[str, Any]):
160
+ if user_id := self._get_real_user_id():
161
+ url = f'{self.mcp_tools_call}/{user_id}'
162
+ #
163
+ # This loop iterates over each key-value pair in the arguments dictionary,
164
+ # and if a value is a Pydantic object, it replaces it with its dictionary representation using .dict().
165
+ for arg_name, arg_value in params.get('params', {}).get('arguments', {}).items():
166
+ if isinstance(arg_value, list):
167
+ params['params']['arguments'][arg_name] = [
168
+ item.dict() if hasattr(item, 'dict') and callable(item.dict) else item
169
+ for item in arg_value
170
+ ]
171
+ elif hasattr(arg_value, 'dict') and callable(arg_value.dict):
172
+ params['params']['arguments'][arg_name] = arg_value.dict()
173
+ #
174
+ response = requests.post(url, headers=self.headers, json=params, verify=False)
175
+ try:
176
+ return response.json()
177
+ except (ValueError, TypeError):
178
+ return response.text
179
+ else:
180
+ return f'Error: Could not determine user ID for MCP tool call'
181
+
182
+ def get_app_details(self, application_id: int):
183
+ url = f'{self.app}/{application_id}'
184
+ data = requests.get(url, headers=self.headers, verify=False).json()
185
+ return data
186
+
187
+ def get_list_of_apps(self):
188
+ apps = []
189
+ limit = 10
190
+ offset = 0
191
+ total_count = None
192
+
193
+ while total_count is None or offset < total_count:
194
+ params = {'offset': offset, 'limit': limit}
195
+ resp = requests.get(self.list_apps_url, headers=self.headers, params=params, verify=False)
196
+
197
+ if resp.ok:
198
+ data = resp.json()
199
+ total_count = data.get('total')
200
+ apps.extend([{'name': app['name'], 'id': app['id']} for app in data.get('rows', [])])
201
+ offset += limit
202
+ else:
203
+ break
204
+
205
+ return apps
206
+
207
+ def fetch_available_configurations(self) -> list:
208
+ resp = requests.get(self.configurations_url, headers=self.headers, verify=False)
209
+ if resp.ok:
210
+ return resp.json()
211
+ return []
212
+
213
+ def all_models_and_integrations(self):
214
+ resp = requests.get(self.ai_section_url, headers=self.headers, verify=False)
215
+ if resp.ok:
216
+ return resp.json()
217
+ return []
218
+
219
+ def generate_image(self,
220
+ prompt: str,
221
+ n: int = 1,
222
+ size: str = 'auto',
223
+ quality: str = 'auto',
224
+ response_format: str = 'b64_json',
225
+ style: Optional[str] = None) -> dict:
226
+
227
+ if not self.model_image_generation:
228
+ raise ValueError('Image generation model is not configured for this client')
229
+
230
+ image_generation_data = {
231
+ 'prompt': prompt,
232
+ 'model': self.model_image_generation,
233
+ 'n': n,
234
+ 'response_format': response_format,
235
+ }
236
+
237
+ # Only add optional parameters if they have meaningful values
238
+ if size and size.lower() != 'auto':
239
+ image_generation_data['size'] = size
240
+
241
+ if quality and quality.lower() != 'auto':
242
+ image_generation_data['quality'] = quality
243
+
244
+ if style:
245
+ image_generation_data['style'] = style
246
+
247
+ # Standard headers for image generation
248
+ image_headers = self.headers.copy()
249
+ image_headers.update({
250
+ 'Content-Type': 'application/json',
251
+ })
252
+
253
+ logger.info(f'Generating image with model: {self.model_image_generation}, prompt: {prompt[:50]}...')
254
+
255
+ try:
256
+ response = requests.post(
257
+ self.image_generation_url,
258
+ headers=image_headers,
259
+ json=image_generation_data,
260
+ verify=False,
261
+ timeout=self.model_timeout
262
+ )
263
+ response.raise_for_status()
264
+ return response.json()
265
+
266
+ except requests.exceptions.HTTPError as e:
267
+ logger.error(f'Image generation failed: {e.response.status_code} - {e.response.text}')
268
+ raise
269
+ except requests.exceptions.RequestException as e:
270
+ logger.error(f'Image generation request failed: {e}')
271
+ raise
272
+
273
+ def get_app_version_details(self, application_id: int, application_version_id: int) -> dict:
274
+ url = f'{self.application_versions}/{application_id}/{application_version_id}'
275
+ if self.configurations:
276
+ configs = self.configurations
277
+ else:
278
+ configs = self.fetch_available_configurations()
279
+
280
+ resp = requests.patch(url, headers=self.headers, verify=False, json={'configurations': configs})
281
+ if resp.ok:
282
+ return resp.json()
283
+ logger.error(f'Failed to fetch application version details: {resp.status_code} - {resp.text}.'
284
+ f' Application ID: {application_id}, Version ID: {application_version_id}')
285
+ raise ApiDetailsRequestError(
286
+ f'Failed to fetch application version details for {application_id}/{application_version_id}.')
287
+
288
+ def get_integration_details(self, integration_id: str, format_for_model: bool = False):
289
+ url = f'{self.integration_details}/{integration_id}'
290
+ data = requests.get(url, headers=self.headers, verify=False).json()
291
+ return data
292
+
293
+ def unsecret(self, secret_name: str):
294
+ url = f'{self.secrets_url}/{secret_name}'
295
+ data = requests.get(url, headers=self.headers, verify=False).json()
296
+ logger.info(f'Unsecret response: {data}')
297
+ return data.get('value', None)
298
+
299
+ def artifact(self, bucket_name):
300
+ return SandboxArtifact(self, bucket_name)
301
+
302
+ def _process_requst(self, data: requests.Response) -> Dict[str, str]:
303
+ if data.status_code == 403:
304
+ return {'error': 'You are not authorized to access this resource'}
305
+ elif data.status_code == 404:
306
+ return {'error': 'Resource not found'}
307
+ elif data.status_code != 200:
308
+ return {
309
+ 'error': 'An error occurred while fetching the resource',
310
+ 'content': data.text
311
+ }
312
+ else:
313
+ return data.json()
314
+
315
+ def bucket_exists(self, bucket_name):
316
+ try:
317
+ resp = self._process_requst(
318
+ requests.get(f'{self.bucket_url}', headers=self.headers, verify=False)
319
+ )
320
+ for each in resp.get('rows', []):
321
+ if each['name'] == bucket_name:
322
+ return True
323
+ return False
324
+ except:
325
+ return False
326
+
327
+ def create_bucket(self, bucket_name, expiration_measure='months', expiration_value=1):
328
+ post_data = {
329
+ 'name': bucket_name,
330
+ 'expiration_measure': expiration_measure,
331
+ 'expiration_value': expiration_value
332
+ }
333
+ resp = requests.post(f'{self.bucket_url}', headers=self.headers, json=post_data, verify=False)
334
+ return self._process_requst(resp)
335
+
336
+ def list_artifacts(self, bucket_name: str):
337
+ # Ensure bucket name is lowercase as required by the API
338
+ url = f'{self.artifacts_url}/{bucket_name.lower()}'
339
+ data = requests.get(url, headers=self.headers, verify=False)
340
+ return self._process_requst(data)
341
+
342
+ def create_artifact(self, bucket_name, artifact_name, artifact_data):
343
+ url = f'{self.artifacts_url}/{bucket_name.lower()}'
344
+ data = requests.post(url, headers=self.headers, files={
345
+ 'file': (artifact_name, artifact_data)
346
+ }, verify=False)
347
+ return self._process_requst(data)
348
+
349
+ def download_artifact(self, bucket_name, artifact_name):
350
+ url = f'{self.artifact_url}/{bucket_name.lower()}/{artifact_name}'
351
+ data = requests.get(url, headers=self.headers, verify=False)
352
+ if data.status_code == 403:
353
+ return {'error': 'You are not authorized to access this resource'}
354
+ elif data.status_code == 404:
355
+ return {'error': 'Resource not found'}
356
+ elif data.status_code != 200:
357
+ return {
358
+ 'error': 'An error occurred while fetching the resource',
359
+ 'content': data.content
360
+ }
361
+ return data.content
362
+
363
+ def delete_artifact(self, bucket_name, artifact_name):
364
+ url = f'{self.artifact_url}/{bucket_name}'
365
+ data = requests.delete(url, headers=self.headers, verify=False, params={'filename': quote(artifact_name)})
366
+ return self._process_requst(data)
367
+
368
+ def get_user_data(self) -> Dict[str, Any]:
369
+ resp = requests.get(self.auth_user_url, headers=self.headers, verify=False)
370
+ if resp.ok:
371
+ return resp.json()
372
+ logger.error(f'Failed to fetch user data: {resp.status_code} - {resp.text}')
373
+ raise ApiDetailsRequestError(f'Failed to fetch user data with status code {resp.status_code}.')