alita-sdk 0.3.532__py3-none-any.whl → 0.3.602__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of alita-sdk might be problematic. Click here for more details.

Files changed (137) hide show
  1. alita_sdk/cli/agent_executor.py +2 -1
  2. alita_sdk/cli/agent_loader.py +34 -4
  3. alita_sdk/cli/agents.py +433 -203
  4. alita_sdk/community/__init__.py +8 -4
  5. alita_sdk/configurations/__init__.py +1 -0
  6. alita_sdk/configurations/openapi.py +323 -0
  7. alita_sdk/runtime/clients/client.py +165 -7
  8. alita_sdk/runtime/langchain/_constants_bkup.py +1318 -0
  9. alita_sdk/runtime/langchain/assistant.py +61 -11
  10. alita_sdk/runtime/langchain/constants.py +419 -171
  11. alita_sdk/runtime/langchain/document_loaders/AlitaJSONLoader.py +4 -2
  12. alita_sdk/runtime/langchain/document_loaders/AlitaTextLoader.py +5 -2
  13. alita_sdk/runtime/langchain/langraph_agent.py +108 -23
  14. alita_sdk/runtime/langchain/utils.py +76 -14
  15. alita_sdk/runtime/skills/__init__.py +91 -0
  16. alita_sdk/runtime/skills/callbacks.py +498 -0
  17. alita_sdk/runtime/skills/discovery.py +540 -0
  18. alita_sdk/runtime/skills/executor.py +610 -0
  19. alita_sdk/runtime/skills/input_builder.py +371 -0
  20. alita_sdk/runtime/skills/models.py +330 -0
  21. alita_sdk/runtime/skills/registry.py +355 -0
  22. alita_sdk/runtime/skills/skill_runner.py +330 -0
  23. alita_sdk/runtime/toolkits/__init__.py +5 -0
  24. alita_sdk/runtime/toolkits/artifact.py +2 -1
  25. alita_sdk/runtime/toolkits/mcp.py +6 -3
  26. alita_sdk/runtime/toolkits/mcp_config.py +1048 -0
  27. alita_sdk/runtime/toolkits/skill_router.py +238 -0
  28. alita_sdk/runtime/toolkits/tools.py +139 -10
  29. alita_sdk/runtime/toolkits/vectorstore.py +1 -1
  30. alita_sdk/runtime/tools/__init__.py +3 -1
  31. alita_sdk/runtime/tools/artifact.py +15 -0
  32. alita_sdk/runtime/tools/data_analysis.py +183 -0
  33. alita_sdk/runtime/tools/llm.py +260 -73
  34. alita_sdk/runtime/tools/loop.py +3 -1
  35. alita_sdk/runtime/tools/loop_output.py +3 -1
  36. alita_sdk/runtime/tools/mcp_server_tool.py +6 -3
  37. alita_sdk/runtime/tools/router.py +2 -4
  38. alita_sdk/runtime/tools/sandbox.py +9 -6
  39. alita_sdk/runtime/tools/skill_router.py +776 -0
  40. alita_sdk/runtime/tools/tool.py +3 -1
  41. alita_sdk/runtime/tools/vectorstore.py +7 -2
  42. alita_sdk/runtime/tools/vectorstore_base.py +7 -2
  43. alita_sdk/runtime/utils/constants.py +5 -1
  44. alita_sdk/runtime/utils/mcp_client.py +1 -1
  45. alita_sdk/runtime/utils/mcp_sse_client.py +1 -1
  46. alita_sdk/runtime/utils/toolkit_utils.py +2 -0
  47. alita_sdk/tools/__init__.py +44 -2
  48. alita_sdk/tools/ado/repos/__init__.py +26 -8
  49. alita_sdk/tools/ado/repos/repos_wrapper.py +78 -52
  50. alita_sdk/tools/ado/test_plan/__init__.py +3 -2
  51. alita_sdk/tools/ado/test_plan/test_plan_wrapper.py +23 -1
  52. alita_sdk/tools/ado/utils.py +1 -18
  53. alita_sdk/tools/ado/wiki/__init__.py +2 -1
  54. alita_sdk/tools/ado/wiki/ado_wrapper.py +23 -1
  55. alita_sdk/tools/ado/work_item/__init__.py +3 -2
  56. alita_sdk/tools/ado/work_item/ado_wrapper.py +56 -3
  57. alita_sdk/tools/advanced_jira_mining/__init__.py +2 -1
  58. alita_sdk/tools/aws/delta_lake/__init__.py +2 -1
  59. alita_sdk/tools/azure_ai/search/__init__.py +2 -1
  60. alita_sdk/tools/azure_ai/search/api_wrapper.py +1 -1
  61. alita_sdk/tools/base_indexer_toolkit.py +51 -30
  62. alita_sdk/tools/bitbucket/__init__.py +2 -1
  63. alita_sdk/tools/bitbucket/api_wrapper.py +1 -1
  64. alita_sdk/tools/bitbucket/cloud_api_wrapper.py +3 -3
  65. alita_sdk/tools/browser/__init__.py +1 -1
  66. alita_sdk/tools/carrier/__init__.py +1 -1
  67. alita_sdk/tools/chunkers/code/treesitter/treesitter.py +37 -13
  68. alita_sdk/tools/cloud/aws/__init__.py +2 -1
  69. alita_sdk/tools/cloud/azure/__init__.py +2 -1
  70. alita_sdk/tools/cloud/gcp/__init__.py +2 -1
  71. alita_sdk/tools/cloud/k8s/__init__.py +2 -1
  72. alita_sdk/tools/code/linter/__init__.py +2 -1
  73. alita_sdk/tools/code/sonar/__init__.py +2 -1
  74. alita_sdk/tools/code_indexer_toolkit.py +19 -2
  75. alita_sdk/tools/confluence/__init__.py +7 -6
  76. alita_sdk/tools/confluence/api_wrapper.py +7 -8
  77. alita_sdk/tools/confluence/loader.py +4 -2
  78. alita_sdk/tools/custom_open_api/__init__.py +2 -1
  79. alita_sdk/tools/elastic/__init__.py +2 -1
  80. alita_sdk/tools/elitea_base.py +28 -9
  81. alita_sdk/tools/figma/__init__.py +52 -6
  82. alita_sdk/tools/figma/api_wrapper.py +1158 -123
  83. alita_sdk/tools/figma/figma_client.py +73 -0
  84. alita_sdk/tools/figma/toon_tools.py +2748 -0
  85. alita_sdk/tools/github/__init__.py +2 -1
  86. alita_sdk/tools/github/github_client.py +56 -92
  87. alita_sdk/tools/github/schemas.py +4 -4
  88. alita_sdk/tools/gitlab/__init__.py +2 -1
  89. alita_sdk/tools/gitlab/api_wrapper.py +118 -38
  90. alita_sdk/tools/gitlab_org/__init__.py +2 -1
  91. alita_sdk/tools/gitlab_org/api_wrapper.py +60 -62
  92. alita_sdk/tools/google/bigquery/__init__.py +2 -1
  93. alita_sdk/tools/google_places/__init__.py +2 -1
  94. alita_sdk/tools/jira/__init__.py +2 -1
  95. alita_sdk/tools/keycloak/__init__.py +2 -1
  96. alita_sdk/tools/localgit/__init__.py +2 -1
  97. alita_sdk/tools/memory/__init__.py +1 -1
  98. alita_sdk/tools/ocr/__init__.py +2 -1
  99. alita_sdk/tools/openapi/__init__.py +490 -118
  100. alita_sdk/tools/openapi/api_wrapper.py +1368 -0
  101. alita_sdk/tools/openapi/tool.py +20 -0
  102. alita_sdk/tools/pandas/__init__.py +11 -5
  103. alita_sdk/tools/pandas/api_wrapper.py +38 -25
  104. alita_sdk/tools/pandas/dataframe/generator/base.py +3 -1
  105. alita_sdk/tools/postman/__init__.py +2 -1
  106. alita_sdk/tools/pptx/__init__.py +2 -1
  107. alita_sdk/tools/qtest/__init__.py +21 -2
  108. alita_sdk/tools/qtest/api_wrapper.py +430 -13
  109. alita_sdk/tools/rally/__init__.py +2 -1
  110. alita_sdk/tools/rally/api_wrapper.py +1 -1
  111. alita_sdk/tools/report_portal/__init__.py +2 -1
  112. alita_sdk/tools/salesforce/__init__.py +2 -1
  113. alita_sdk/tools/servicenow/__init__.py +11 -10
  114. alita_sdk/tools/servicenow/api_wrapper.py +1 -1
  115. alita_sdk/tools/sharepoint/__init__.py +2 -1
  116. alita_sdk/tools/sharepoint/api_wrapper.py +2 -2
  117. alita_sdk/tools/slack/__init__.py +3 -2
  118. alita_sdk/tools/slack/api_wrapper.py +2 -2
  119. alita_sdk/tools/sql/__init__.py +3 -2
  120. alita_sdk/tools/testio/__init__.py +2 -1
  121. alita_sdk/tools/testrail/__init__.py +2 -1
  122. alita_sdk/tools/utils/content_parser.py +77 -3
  123. alita_sdk/tools/utils/text_operations.py +163 -71
  124. alita_sdk/tools/xray/__init__.py +3 -2
  125. alita_sdk/tools/yagmail/__init__.py +2 -1
  126. alita_sdk/tools/zephyr/__init__.py +2 -1
  127. alita_sdk/tools/zephyr_enterprise/__init__.py +2 -1
  128. alita_sdk/tools/zephyr_essential/__init__.py +2 -1
  129. alita_sdk/tools/zephyr_scale/__init__.py +3 -2
  130. alita_sdk/tools/zephyr_scale/api_wrapper.py +2 -2
  131. alita_sdk/tools/zephyr_squad/__init__.py +2 -1
  132. {alita_sdk-0.3.532.dist-info → alita_sdk-0.3.602.dist-info}/METADATA +7 -6
  133. {alita_sdk-0.3.532.dist-info → alita_sdk-0.3.602.dist-info}/RECORD +137 -119
  134. {alita_sdk-0.3.532.dist-info → alita_sdk-0.3.602.dist-info}/WHEEL +0 -0
  135. {alita_sdk-0.3.532.dist-info → alita_sdk-0.3.602.dist-info}/entry_points.txt +0 -0
  136. {alita_sdk-0.3.532.dist-info → alita_sdk-0.3.602.dist-info}/licenses/LICENSE +0 -0
  137. {alita_sdk-0.3.532.dist-info → alita_sdk-0.3.602.dist-info}/top_level.txt +0 -0
@@ -86,7 +86,9 @@ Answer must be JSON only extractable by JSON.LOADS."""
86
86
  else:
87
87
  input_[-1].content += self.unstructured_output
88
88
  completion = self.client.invoke(input_, config=config)
89
- result = _extract_json(completion.content.strip())
89
+ from ..langchain.utils import extract_text_from_completion
90
+ content_text = extract_text_from_completion(completion)
91
+ result = _extract_json(content_text.strip())
90
92
  logger.info(f"ToolNode tool params: {result}")
91
93
  try:
92
94
  # handler for application added as a tool
@@ -12,9 +12,11 @@ from alita_sdk.tools.vector_adapters.VectorStoreAdapter import VectorStoreAdapte
12
12
  from logging import getLogger
13
13
 
14
14
  from ..utils.logging import dispatch_custom_event
15
+ from ..langchain.utils import extract_text_from_completion
15
16
 
16
17
  logger = getLogger(__name__)
17
18
 
19
+
18
20
  class IndexDocumentsModel(BaseModel):
19
21
  documents: Any = Field(description="Generator of documents to index")
20
22
 
@@ -684,8 +686,10 @@ class VectorStoreWrapper(BaseToolApiWrapper):
684
686
  ]
685
687
  )
686
688
  ])
689
+ # Extract text content safely (handles both string and list content from thinking models)
690
+ search_query = extract_text_from_completion(result)
687
691
  search_results = self.search_documents(
688
- result.content, doctype, filter, cut_off, search_top,
692
+ search_query, doctype, filter, cut_off, search_top,
689
693
  full_text_search=full_text_search,
690
694
  reranking_config=reranking_config,
691
695
  extended_search=extended_search
@@ -714,7 +718,8 @@ class VectorStoreWrapper(BaseToolApiWrapper):
714
718
  ]
715
719
  )
716
720
  ])
717
- return result.content
721
+ # Extract text content safely (handles both string and list content from thinking models)
722
+ return extract_text_from_completion(result)
718
723
 
719
724
  def _log_data(self, message: str, tool_name: str = "index_data"):
720
725
  """Log data and dispatch custom event for indexing progress"""
@@ -13,9 +13,11 @@ from pydantic import BaseModel, model_validator, Field
13
13
  from alita_sdk.tools.elitea_base import BaseToolApiWrapper
14
14
  from alita_sdk.tools.vector_adapters.VectorStoreAdapter import VectorStoreAdapterFactory
15
15
  from ...runtime.utils.utils import IndexerKeywords
16
+ from ...runtime.langchain.utils import extract_text_from_completion
16
17
 
17
18
  logger = getLogger(__name__)
18
19
 
20
+
19
21
  class IndexDocumentsModel(BaseModel):
20
22
  documents: Any = Field(description="Generator of documents to index")
21
23
 
@@ -625,8 +627,10 @@ class VectorStoreWrapperBase(BaseToolApiWrapper):
625
627
  ]
626
628
  )
627
629
  ])
630
+ # Extract text content safely (handles both string and list content from thinking models)
631
+ search_query = extract_text_from_completion(result)
628
632
  search_results = self.search_documents(
629
- result.content, doctype, filter, cut_off, search_top,
633
+ search_query, doctype, filter, cut_off, search_top,
630
634
  full_text_search=full_text_search,
631
635
  reranking_config=reranking_config,
632
636
  extended_search=extended_search
@@ -655,7 +659,8 @@ class VectorStoreWrapperBase(BaseToolApiWrapper):
655
659
  ]
656
660
  )
657
661
  ])
658
- return result.content
662
+ # Extract text content safely (handles both string and list content from thinking models)
663
+ return extract_text_from_completion(result)
659
664
 
660
665
  def get_available_tools(self):
661
666
  return [
@@ -436,4 +436,8 @@ STYLES = r"""
436
436
  filter: hue-rotate(180deg) brightness(1.2);
437
437
  }
438
438
  </style>
439
- """
439
+ """
440
+
441
+ TOOLKIT_NAME_META = "toolkit_name"
442
+ TOOL_NAME_META = "tool_name"
443
+ TOOLKIT_TYPE_META = "toolkit_type"
@@ -192,7 +192,7 @@ class McpClient:
192
192
  "sampling": {}
193
193
  },
194
194
  "clientInfo": {
195
- "name": "Alita MCP Client",
195
+ "name": "ELITEA MCP Client",
196
196
  "version": "1.0.0"
197
197
  }
198
198
  }
@@ -374,7 +374,7 @@ class McpSseClient:
374
374
  "sampling": {}
375
375
  },
376
376
  "clientInfo": {
377
- "name": "Alita MCP Client",
377
+ "name": "ELITEA MCP Client",
378
378
  "version": "1.0.0"
379
379
  }
380
380
  }
@@ -59,10 +59,12 @@ def instantiate_toolkit_with_client(toolkit_config: Dict[str, Any],
59
59
 
60
60
  # Create a tool configuration dict with required fields
61
61
  # Note: MCP toolkit always requires toolkit_name, other toolkits respect use_prefix flag
62
+ # Note: 'name' is always set for provider-based toolkits (used by provider_worker.utils.tools)
62
63
  tool_config = {
63
64
  'id': toolkit_config.get('id', random.randint(1, 1000000)),
64
65
  'type': toolkit_config.get('type', toolkit_type),
65
66
  'settings': settings,
67
+ 'name': toolkit_name, # Always pass name for provider toolkits
66
68
  'toolkit_name': toolkit_name if (use_prefix or toolkit_type == 'mcp') else None
67
69
  }
68
70
 
@@ -49,6 +49,9 @@ def _safe_import_tool(tool_name, module_path, get_tools_name=None, toolkit_class
49
49
  if hasattr(module, 'get_toolkit'):
50
50
  imported['get_toolkit'] = getattr(module, 'get_toolkit')
51
51
 
52
+ if hasattr(module, 'get_toolkit_available_tools'):
53
+ imported['get_toolkit_available_tools'] = getattr(module, 'get_toolkit_available_tools')
54
+
52
55
  if toolkit_class_name and hasattr(module, toolkit_class_name):
53
56
  imported['toolkit_class'] = getattr(module, toolkit_class_name)
54
57
  AVAILABLE_TOOLKITS[toolkit_class_name] = getattr(module, toolkit_class_name)
@@ -64,7 +67,7 @@ def _safe_import_tool(tool_name, module_path, get_tools_name=None, toolkit_class
64
67
 
65
68
  # Safe imports for all tools
66
69
  _safe_import_tool('github', 'github', 'get_tools', 'AlitaGitHubToolkit')
67
- _safe_import_tool('openapi', 'openapi', 'get_tools')
70
+ _safe_import_tool('openapi', 'openapi', 'get_tools', 'AlitaOpenAPIToolkit')
68
71
  _safe_import_tool('jira', 'jira', 'get_tools', 'JiraToolkit')
69
72
  _safe_import_tool('confluence', 'confluence', 'get_tools', 'ConfluenceToolkit')
70
73
  _safe_import_tool('service_now', 'servicenow', 'get_tools', 'ServiceNowToolkit')
@@ -99,7 +102,7 @@ _safe_import_tool('k8s', 'cloud.k8s', None, 'KubernetesToolkit')
99
102
  _safe_import_tool('elastic', 'elastic', None, 'ElasticToolkit')
100
103
  _safe_import_tool('keycloak', 'keycloak', None, 'KeycloakToolkit')
101
104
  _safe_import_tool('localgit', 'localgit', None, 'AlitaLocalGitToolkit')
102
- _safe_import_tool('pandas', 'pandas', 'get_tools', 'PandasToolkit')
105
+ # pandas toolkit removed - use Data Analysis internal tool instead
103
106
  _safe_import_tool('azure_search', 'azure_ai.search', 'get_tools', 'AzureSearchToolkit')
104
107
  _safe_import_tool('figma', 'figma', 'get_tools', 'FigmaToolkit')
105
108
  _safe_import_tool('salesforce', 'salesforce', 'get_tools', 'SalesforceToolkit')
@@ -183,7 +186,9 @@ def get_tools(tools_list, alita, llm, store: Optional[BaseStore] = None, *args,
183
186
  toolkit = tkitclass.get_toolkit(**get_toolkit_params)
184
187
  toolkit_tools.extend(toolkit.get_tools())
185
188
  except Exception as e:
189
+ import traceback
186
190
  logger.error(f"Error in getting custom toolkit: {e}")
191
+ logger.error(f"Traceback:\n{traceback.format_exc()}")
187
192
  else:
188
193
  if tool_type in FAILED_IMPORTS:
189
194
  logger.warning(f"Tool '{tool_type}' is not available: {FAILED_IMPORTS[tool_type]}")
@@ -240,6 +245,42 @@ def get_available_toolkit_models():
240
245
  """Return dict with available toolkit classes."""
241
246
  return deepcopy(AVAILABLE_TOOLS)
242
247
 
248
+
249
+ def get_toolkit_available_tools(toolkit_type: str, settings: dict) -> dict:
250
+ """Return dynamic available tools + per-tool JSON schemas for a toolkit instance.
251
+
252
+ This is the single SDK entrypoint used by backend services (e.g. indexer_worker)
253
+ when the UI needs spec/instance-dependent tool enumeration. Toolkits that don't
254
+ support dynamic enumeration should return an empty payload.
255
+
256
+ Args:
257
+ toolkit_type: toolkit type string (e.g. 'openapi')
258
+ settings: persisted toolkit settings
259
+
260
+ Returns:
261
+ {
262
+ "tools": [{"name": str, "description": str}],
263
+ "args_schemas": {"tool_name": <json schema dict>}
264
+ }
265
+ """
266
+ toolkit_type = (toolkit_type or '').strip().lower()
267
+ if not isinstance(settings, dict):
268
+ settings = {}
269
+
270
+ tool_module = AVAILABLE_TOOLS.get(toolkit_type) or {}
271
+ enumerator = tool_module.get('get_toolkit_available_tools')
272
+ if not callable(enumerator):
273
+ return {"tools": [], "args_schemas": {}}
274
+
275
+ try:
276
+ result = enumerator(settings)
277
+ if not isinstance(result, dict):
278
+ return {"tools": [], "args_schemas": {}, "error": "Invalid response from toolkit enumerator"}
279
+ return result
280
+ except Exception as e: # pylint: disable=W0718
281
+ logger.exception("Failed to compute available tools for toolkit_type=%s", toolkit_type)
282
+ return {"tools": [], "args_schemas": {}, "error": str(e)}
283
+
243
284
  def diagnose_imports():
244
285
  """Print diagnostic information about tool imports."""
245
286
  available_count = len(AVAILABLE_TOOLS)
@@ -276,6 +317,7 @@ def diagnose_imports():
276
317
  __all__ = [
277
318
  'get_tools',
278
319
  'get_toolkits',
320
+ 'get_toolkit_available_tools',
279
321
  'get_available_tools',
280
322
  'get_failed_imports',
281
323
  'get_available_toolkits',
@@ -11,6 +11,7 @@ from ....configurations.pgvector import PgVectorConfiguration
11
11
  from ...base.tool import BaseAction
12
12
  from .repos_wrapper import ReposApiWrapper
13
13
  from ...utils import clean_string, get_max_toolkit_length, check_connection_response
14
+ from ....runtime.utils.constants import TOOLKIT_NAME_META, TOOL_NAME_META, TOOLKIT_TYPE_META
14
15
 
15
16
  name = "ado_repos"
16
17
 
@@ -52,13 +53,30 @@ class AzureDevOpsReposToolkit(BaseToolkit):
52
53
  embedding_model=(Optional[str], Field(default=None, description="Embedding configuration.", json_schema_extra={'configuration_model': 'embedding'})),
53
54
 
54
55
  selected_tools=(List[Literal[tuple(selected_tools)]], Field(default=[], json_schema_extra={'args_schemas': selected_tools})),
55
- __config__={'json_schema_extra': {'metadata':
56
- {
57
- "label": "ADO repos",
58
- "icon_url": "ado-repos-icon.svg",
59
- "categories": ["code repositories"],
60
- "extra_categories": ["code", "repository", "version control"],
61
- }}}
56
+ __config__={
57
+ 'json_schema_extra': {
58
+ 'metadata': {
59
+ "label": "ADO repos",
60
+ "icon_url": "ado-repos-icon.svg",
61
+ "categories": ["code repositories"],
62
+ "extra_categories": ["code", "repository", "version control"],
63
+ "sections": {
64
+ "auth": {
65
+ "required": True,
66
+ "subsections": [
67
+ {
68
+ "name": "Token",
69
+ "fields": ["token"]
70
+ }
71
+ ]
72
+ }
73
+ },
74
+ "configuration_group": {
75
+ "name": "ado",
76
+ }
77
+ }
78
+ }
79
+ }
62
80
  )
63
81
 
64
82
  @check_connection_response
@@ -107,7 +125,7 @@ class AzureDevOpsReposToolkit(BaseToolkit):
107
125
  name=tool["name"],
108
126
  description=description,
109
127
  args_schema=tool["args_schema"],
110
- metadata={"toolkit_name": toolkit_name} if toolkit_name else {}
128
+ metadata={TOOLKIT_NAME_META: toolkit_name, TOOLKIT_TYPE_META: name, TOOL_NAME_META: tool["name"]} if toolkit_name else {TOOL_NAME_META: tool["name"]}
111
129
  )
112
130
  )
113
131
  return cls(tools=tools)
@@ -23,7 +23,8 @@ from langchain_core.tools import ToolException
23
23
  from msrest.authentication import BasicAuthentication
24
24
  from pydantic import Field, PrivateAttr, create_model, model_validator, SecretStr
25
25
 
26
- from ..utils import extract_old_new_pairs, generate_diff, get_content_from_generator
26
+ from ...elitea_base import BaseCodeToolApiWrapper
27
+ from ..utils import generate_diff, get_content_from_generator
27
28
  from ...code_indexer_toolkit import CodeIndexerToolkit
28
29
  from ...utils.available_tools_decorator import extend_with_parent_available_tools
29
30
 
@@ -129,9 +130,30 @@ class ArgsSchema(Enum):
129
130
  )
130
131
  UpdateFile = create_model(
131
132
  "UpdateFile",
132
- branch_name=(str, Field(description="The name of the branch, e.g. `my_branch`.")),
133
- file_path=(str, Field(description="Path of a file to be updated.")),
134
- update_query=(str, Field(description="Update query used to adjust target file.")),
133
+ branch_name=(
134
+ str,
135
+ Field(description="The name of the branch, e.g. `my_branch`.")
136
+ ),
137
+ file_path=(
138
+ str,
139
+ Field(description="Path of a file to be updated."),
140
+ ),
141
+ update_query=(
142
+ str,
143
+ Field(
144
+ description=(
145
+ "The exact OLD text to be replaced and the NEW "
146
+ "text to insert, using one or more block pairs like:"
147
+ "OLD <<<<Hello Earth!>>>> OLD NEW <<<<Hello Mars!>>>> NEW"
148
+ "Each OLD block must contain the exact text that will be replaced "
149
+ "via string replacement (exact full match including non-written characters). Each corresponding NEW "
150
+ "block contains the replacement text. For multi-line changes, it is "
151
+ "preferred to provide several smaller OLD/NEW pairs rather than one "
152
+ "large block, so that each OLD block closely matches a contiguous "
153
+ "snippet from the file."
154
+ )
155
+ ),
156
+ ),
135
157
  )
136
158
  DeleteFile = create_model(
137
159
  "DeleteFile",
@@ -252,6 +274,9 @@ class ReposApiWrapper(CodeIndexerToolkit):
252
274
  token: Optional[SecretStr]
253
275
  _client: Optional[GitClient] = PrivateAttr()
254
276
 
277
+ # Reuse common file helpers from BaseCodeToolApiWrapper
278
+ edit_file = BaseCodeToolApiWrapper.edit_file
279
+
255
280
  class Config:
256
281
  arbitrary_types_allowed = True
257
282
 
@@ -835,22 +860,50 @@ class ReposApiWrapper(CodeIndexerToolkit):
835
860
  logger.error(msg)
836
861
  return ToolException(msg)
837
862
 
838
- def update_file(self, branch_name: str, file_path: str, update_query: str) -> str:
863
+ def _write_file(self, file_path: str, content: str, branch: str = None, commit_message: str = None) -> str:
864
+ """Write content to a file in Azure DevOps by creating an edit commit.
865
+
866
+ This implementation follows the previous `update_file` behavior: it always
867
+ performs an 'edit' change (does not create the file), gets the latest
868
+ commit id for the branch and pushes a new commit containing the change.
839
869
  """
840
- Updates a file with new content in Azure DevOps.
870
+ try:
871
+ # Get the latest commit ID of the target branch
872
+ branch_obj = self._client.get_branch(
873
+ repository_id=self.repository_id,
874
+ project=self.project,
875
+ name=branch,
876
+ )
877
+ if branch_obj is None or not hasattr(branch_obj, 'commit') or not hasattr(branch_obj.commit, 'commit_id'):
878
+ raise ToolException(f"Branch `{branch}` does not exist or has no commits.")
879
+
880
+ latest_commit_id = branch_obj.commit.commit_id
881
+
882
+ # Build edit change and push
883
+ change = GitChange("edit", file_path, content).to_dict()
884
+
885
+ ref_update = GitRefUpdate(name=f"refs/heads/{branch}", old_object_id=latest_commit_id)
886
+ new_commit = GitCommit(comment=commit_message or ("Update " + file_path), changes=[change])
887
+ push = GitPush(commits=[new_commit], ref_updates=[ref_update])
888
+
889
+ self._client.create_push(push=push, repository_id=self.repository_id, project=self.project)
890
+ return f"Updated file {file_path}"
891
+ except ToolException:
892
+ # Re-raise known tool exceptions
893
+ raise
894
+ except Exception as e:
895
+ logger.error(f"Unable to write file {file_path}: {e}")
896
+ raise ToolException(f"Unable to write file {file_path}: {str(e)}")
897
+
898
+ def update_file(self, branch_name: str, file_path: str, update_query: str) -> str:
899
+ """Updates a file with new content in Azure DevOps using OLD/NEW markers.
900
+
841
901
  Parameters:
842
902
  branch_name (str): The name of the branch where update the file.
843
903
  file_path (str): Path to the file for update.
844
- update_query(str): Contains the file contents requried to be updated.
845
- The old file contents is wrapped in OLD <<<< and >>>> OLD
846
- The new file contents is wrapped in NEW <<<< and >>>> NEW
847
- For example:
848
- OLD <<<<
849
- Hello Earth!
850
- >>>> OLD
851
- NEW <<<<
852
- Hello Mars!
853
- >>>> NEW
904
+ update_query(str): Contains the file contents required to be updated,
905
+ wrapped in Git-style OLD/NEW blocks.
906
+
854
907
  Returns:
855
908
  A success or failure message
856
909
  """
@@ -863,43 +916,16 @@ class ReposApiWrapper(CodeIndexerToolkit):
863
916
  "Please create a new branch and try again."
864
917
  )
865
918
  try:
866
- file_content = self._read_file(file_path, branch_name)
867
-
868
- if isinstance(file_content, ToolException):
869
- return file_content
870
-
871
- updated_file_content = file_content
872
- for old, new in extract_old_new_pairs(update_query):
873
- if not old.strip():
874
- continue
875
- updated_file_content = updated_file_content.replace(old, new)
876
-
877
- if file_content == updated_file_content:
878
- return (
879
- "File content was not updated because old content was not found or empty. "
880
- "It may be helpful to use the read_file action to get "
881
- "the current file contents."
882
- )
883
-
884
- # Get the latest commit ID of the active branch to use as oldObjectId
885
- branch = self._client.get_branch(
886
- repository_id=self.repository_id,
887
- project=self.project,
888
- name=self.active_branch,
889
- )
890
- latest_commit_id = branch.commit.commit_id
891
-
892
- change = GitChange("edit", file_path, updated_file_content).to_dict()
893
-
894
- ref_update = GitRefUpdate(
895
- name=f"refs/heads/{self.active_branch}", old_object_id=latest_commit_id
896
- )
897
- new_commit = GitCommit(comment=f"Update {file_path}", changes=[change])
898
- push = GitPush(commits=[new_commit], ref_updates=[ref_update])
899
- self._client.create_push(
900
- push=push, repository_id=self.repository_id, project=self.project
919
+ # Let edit_file handle parsing and content updates; this will call _read_file and _write_file.
920
+ # For ADO, branch_name is used as branch; commit message is derived from file_path.
921
+ return self.edit_file(
922
+ file_path=file_path,
923
+ file_query=update_query,
924
+ branch=self.active_branch,
925
+ commit_message=f"Update {file_path}",
901
926
  )
902
- return "Updated file " + file_path
927
+ except ToolException as e:
928
+ return str(e)
903
929
  except Exception as e:
904
930
  msg = f"Unable to update file due to error:\n{str(e)}"
905
931
  logger.error(msg)
@@ -11,6 +11,7 @@ from ....configurations.pgvector import PgVectorConfiguration
11
11
  from .test_plan_wrapper import TestPlanApiWrapper
12
12
  from ...base.tool import BaseAction
13
13
  from ...utils import clean_string, get_max_toolkit_length, check_connection_response
14
+ from ....runtime.utils.constants import TOOLKIT_NAME_META, TOOL_NAME_META, TOOLKIT_TYPE_META
14
15
 
15
16
 
16
17
  name = "azure_devops_plans"
@@ -40,7 +41,7 @@ class AzureDevOpsPlansToolkit(BaseToolkit):
40
41
  m = create_model(
41
42
  name_alias,
42
43
  ado_configuration=(AdoConfiguration, Field(description="Ado configuration", json_schema_extra={'configuration_types': ['ado']})),
43
- limit=(Optional[int], Field(description="ADO plans limit used for limitation of the list with results", default=5)),
44
+ limit=(Optional[int], Field(description="ADO plans limit used for limitation of the list with results", default=5, gt=0)),
44
45
  # indexer settings
45
46
  pgvector_configuration=(Optional[PgVectorConfiguration], Field(default=None,
46
47
  description="PgVector Configuration", json_schema_extra={'configuration_types': ['pgvector']})),
@@ -122,7 +123,7 @@ class AzureDevOpsPlansToolkit(BaseToolkit):
122
123
  name=tool["name"],
123
124
  description=description,
124
125
  args_schema=tool["args_schema"],
125
- metadata={"toolkit_name": toolkit_name} if toolkit_name else {}
126
+ metadata={TOOLKIT_NAME_META: toolkit_name, TOOLKIT_TYPE_META: name, TOOL_NAME_META: tool["name"]} if toolkit_name else {TOOL_NAME_META: tool["name"]}
126
127
  ))
127
128
  return cls(tools=tools)
128
129
 
@@ -180,7 +180,29 @@ class TestPlanApiWrapper(NonCodeIndexerToolkit):
180
180
  connection = Connection(base_url=values['organization_url'], creds=credentials)
181
181
  cls._client = connection.clients.get_test_plan_client()
182
182
  except Exception as e:
183
- raise ImportError(f"Failed to connect to Azure DevOps: {e}")
183
+ error_msg = str(e).lower()
184
+ if "expired" in error_msg or "token" in error_msg and ("invalid" in error_msg or "unauthorized" in error_msg):
185
+ raise ValueError(
186
+ "Azure DevOps connection failed: Your access token has expired or is invalid. "
187
+ "Please refresh your token in the toolkit configuration."
188
+ )
189
+ elif "401" in error_msg or "unauthorized" in error_msg:
190
+ raise ValueError(
191
+ "Azure DevOps connection failed: Authentication failed. "
192
+ "Please check your credentials in the toolkit configuration."
193
+ )
194
+ elif "404" in error_msg or "not found" in error_msg:
195
+ raise ValueError(
196
+ "Azure DevOps connection failed: Organization or project not found. "
197
+ "Please verify your organization URL and project name."
198
+ )
199
+ elif "timeout" in error_msg or "timed out" in error_msg:
200
+ raise ValueError(
201
+ "Azure DevOps connection failed: Connection timed out. "
202
+ "Please check your network connection and try again."
203
+ )
204
+ else:
205
+ raise ValueError(f"Azure DevOps connection failed: {e}")
184
206
  return super().validate_toolkit(values)
185
207
 
186
208
  def create_test_plan(self, test_plan_create_params: str):
@@ -1,24 +1,6 @@
1
- import re
2
1
  import difflib
3
2
 
4
3
 
5
- def extract_old_new_pairs(file_query: str):
6
- """
7
- Extracts old and new content pairs from a file query.
8
- Parameters:
9
- file_query (str): The file query containing old and new content.
10
- Returns:
11
- list of tuples: A list where each tuple contains (old_content, new_content).
12
- """
13
- old_pattern = re.compile(r"OLD <<<<\s*(.*?)\s*>>>> OLD", re.DOTALL)
14
- new_pattern = re.compile(r"NEW <<<<\s*(.*?)\s*>>>> NEW", re.DOTALL)
15
-
16
- old_contents = old_pattern.findall(file_query)
17
- new_contents = new_pattern.findall(file_query)
18
-
19
- return list(zip(old_contents, new_contents))
20
-
21
-
22
4
  def generate_diff(base_text, target_text, file_path):
23
5
  base_lines = base_text.splitlines(keepends=True)
24
6
  target_lines = target_text.splitlines(keepends=True)
@@ -28,6 +10,7 @@ def generate_diff(base_text, target_text, file_path):
28
10
 
29
11
  return "".join(diff)
30
12
 
13
+
31
14
  def get_content_from_generator(content_generator):
32
15
  def safe_decode(chunk):
33
16
  try:
@@ -10,6 +10,7 @@ from ....configurations.ado import AdoConfiguration
10
10
  from ....configurations.pgvector import PgVectorConfiguration
11
11
  from ...base.tool import BaseAction
12
12
  from ...utils import clean_string, get_max_toolkit_length, check_connection_response
13
+ from ....runtime.utils.constants import TOOLKIT_NAME_META, TOOL_NAME_META, TOOLKIT_TYPE_META
13
14
 
14
15
  name = "azure_devops_wiki"
15
16
  name_alias = 'ado_wiki'
@@ -116,7 +117,7 @@ class AzureDevOpsWikiToolkit(BaseToolkit):
116
117
  name=tool["name"],
117
118
  description=description,
118
119
  args_schema=tool["args_schema"],
119
- metadata={"toolkit_name": toolkit_name} if toolkit_name else {}
120
+ metadata={TOOLKIT_NAME_META: toolkit_name, TOOLKIT_TYPE_META: name, TOOL_NAME_META: tool["name"]} if toolkit_name else {TOOL_NAME_META: tool["name"]}
120
121
  ))
121
122
  return cls(tools=tools)
122
123
 
@@ -105,7 +105,29 @@ class AzureDevOpsApiWrapper(NonCodeIndexerToolkit):
105
105
  cls._core_client = connection.clients.get_core_client()
106
106
 
107
107
  except Exception as e:
108
- return ImportError(f"Failed to connect to Azure DevOps: {e}")
108
+ error_msg = str(e).lower()
109
+ if "expired" in error_msg or "token" in error_msg and ("invalid" in error_msg or "unauthorized" in error_msg):
110
+ raise ValueError(
111
+ "Azure DevOps connection failed: Your access token has expired or is invalid. "
112
+ "Please refresh your token in the toolkit configuration."
113
+ )
114
+ elif "401" in error_msg or "unauthorized" in error_msg:
115
+ raise ValueError(
116
+ "Azure DevOps connection failed: Authentication failed. "
117
+ "Please check your credentials in the toolkit configuration."
118
+ )
119
+ elif "404" in error_msg or "not found" in error_msg:
120
+ raise ValueError(
121
+ "Azure DevOps connection failed: Organization or project not found. "
122
+ "Please verify your organization URL and project name."
123
+ )
124
+ elif "timeout" in error_msg or "timed out" in error_msg:
125
+ raise ValueError(
126
+ "Azure DevOps connection failed: Connection timed out. "
127
+ "Please check your network connection and try again."
128
+ )
129
+ else:
130
+ raise ValueError(f"Azure DevOps connection failed: {e}")
109
131
 
110
132
  return super().validate_toolkit(values)
111
133
 
@@ -10,6 +10,7 @@ from ....configurations.ado import AdoConfiguration
10
10
  from ....configurations.pgvector import PgVectorConfiguration
11
11
  from ...base.tool import BaseAction
12
12
  from ...utils import clean_string, get_max_toolkit_length, check_connection_response
13
+ from ....runtime.utils.constants import TOOLKIT_NAME_META, TOOL_NAME_META, TOOLKIT_TYPE_META
13
14
 
14
15
  name = "ado_boards"
15
16
 
@@ -37,7 +38,7 @@ class AzureDevOpsWorkItemsToolkit(BaseToolkit):
37
38
  m = create_model(
38
39
  name,
39
40
  ado_configuration=(AdoConfiguration, Field(description="Ado Work Item configuration", json_schema_extra={'configuration_types': ['ado']})),
40
- limit=(Optional[int], Field(description="ADO plans limit used for limitation of the list with results", default=5)),
41
+ limit=(Optional[int], Field(description="Default ADO boards result limit (can be overridden by agent instructions)", default=5, gt=0)),
41
42
  selected_tools=(List[Literal[tuple(selected_tools)]], Field(default=[], json_schema_extra={'args_schemas': selected_tools})),
42
43
  # indexer settings
43
44
  pgvector_configuration=(Optional[PgVectorConfiguration], Field(default = None,
@@ -117,7 +118,7 @@ class AzureDevOpsWorkItemsToolkit(BaseToolkit):
117
118
  name=tool["name"],
118
119
  description=description,
119
120
  args_schema=tool["args_schema"],
120
- metadata={"toolkit_name": toolkit_name} if toolkit_name else {}
121
+ metadata={TOOLKIT_NAME_META: toolkit_name, TOOLKIT_TYPE_META: name, TOOL_NAME_META: tool["name"]} if toolkit_name else {TOOL_NAME_META: tool["name"]}
121
122
  ))
122
123
  return cls(tools=tools)
123
124