alita-sdk 0.3.206__py3-none-any.whl → 0.3.208__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- alita_sdk/runtime/clients/client.py +369 -6
- alita_sdk/runtime/langchain/langraph_agent.py +6 -1
- alita_sdk/runtime/langchain/store_manager.py +4 -4
- alita_sdk/runtime/toolkits/tools.py +11 -20
- alita_sdk/runtime/utils/streamlit.py +472 -192
- alita_sdk/runtime/utils/toolkit_runtime.py +147 -0
- alita_sdk/runtime/utils/toolkit_utils.py +157 -0
- alita_sdk/tools/ado/test_plan/test_plan_wrapper.py +82 -11
- alita_sdk/tools/ado/wiki/ado_wrapper.py +62 -2
- alita_sdk/tools/chunkers/sematic/markdown_chunker.py +2 -1
- alita_sdk/tools/memory/__init__.py +54 -10
- alita_sdk/tools/sharepoint/api_wrapper.py +13 -4
- {alita_sdk-0.3.206.dist-info → alita_sdk-0.3.208.dist-info}/METADATA +1 -1
- {alita_sdk-0.3.206.dist-info → alita_sdk-0.3.208.dist-info}/RECORD +17 -24
- alita_sdk/community/analysis/__init__.py +0 -0
- alita_sdk/community/analysis/ado_analyse/__init__.py +0 -103
- alita_sdk/community/analysis/ado_analyse/api_wrapper.py +0 -261
- alita_sdk/community/analysis/github_analyse/__init__.py +0 -98
- alita_sdk/community/analysis/github_analyse/api_wrapper.py +0 -166
- alita_sdk/community/analysis/gitlab_analyse/__init__.py +0 -110
- alita_sdk/community/analysis/gitlab_analyse/api_wrapper.py +0 -172
- alita_sdk/community/analysis/jira_analyse/__init__.py +0 -141
- alita_sdk/community/analysis/jira_analyse/api_wrapper.py +0 -252
- {alita_sdk-0.3.206.dist-info → alita_sdk-0.3.208.dist-info}/WHEEL +0 -0
- {alita_sdk-0.3.206.dist-info → alita_sdk-0.3.208.dist-info}/licenses/LICENSE +0 -0
- {alita_sdk-0.3.206.dist-info → alita_sdk-0.3.208.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,147 @@
|
|
1
|
+
"""
|
2
|
+
Toolkit runtime utilities for event dispatching and execution context.
|
3
|
+
This module provides tools with the ability to dispatch custom events during execution.
|
4
|
+
"""
|
5
|
+
|
6
|
+
import sys
|
7
|
+
import logging
|
8
|
+
from typing import Dict, Any, Optional
|
9
|
+
|
10
|
+
logger = logging.getLogger(__name__)
|
11
|
+
|
12
|
+
|
13
|
+
def dispatch_custom_event(event_type: str, data: Dict[str, Any]) -> Optional[Dict[str, Any]]:
|
14
|
+
"""
|
15
|
+
Dispatch a custom event from within a toolkit tool execution.
|
16
|
+
|
17
|
+
This function can be called by toolkit tools to send events back to the runtime
|
18
|
+
for monitoring, logging, or other purposes.
|
19
|
+
|
20
|
+
Args:
|
21
|
+
event_type: Type of the event (e.g., "progress", "warning", "info")
|
22
|
+
data: Event data dictionary
|
23
|
+
|
24
|
+
Returns:
|
25
|
+
Event dictionary if successful, None if no executor context available
|
26
|
+
|
27
|
+
Example:
|
28
|
+
```python
|
29
|
+
from alita_sdk.runtime.utils.toolkit_runtime import dispatch_custom_event
|
30
|
+
|
31
|
+
def my_tool_function(param1, param2):
|
32
|
+
# Dispatch a progress event
|
33
|
+
dispatch_custom_event("progress", {
|
34
|
+
"message": "Processing started",
|
35
|
+
"step": 1,
|
36
|
+
"total_steps": 3
|
37
|
+
})
|
38
|
+
|
39
|
+
# Do some work
|
40
|
+
result = process_data(param1, param2)
|
41
|
+
|
42
|
+
# Dispatch completion event
|
43
|
+
dispatch_custom_event("completion", {
|
44
|
+
"message": "Processing completed",
|
45
|
+
"result_size": len(result)
|
46
|
+
})
|
47
|
+
|
48
|
+
return result
|
49
|
+
```
|
50
|
+
"""
|
51
|
+
try:
|
52
|
+
# Try to get the current executor context
|
53
|
+
if hasattr(sys.modules[__name__], 'toolkit_dispatch_context'):
|
54
|
+
context = sys.modules[__name__].toolkit_dispatch_context
|
55
|
+
return context.dispatch_custom_event(event_type, data)
|
56
|
+
else:
|
57
|
+
# No executor context available - this is normal when not in test mode
|
58
|
+
logger.debug(f"No toolkit executor context available for event: {event_type}")
|
59
|
+
return None
|
60
|
+
except Exception as e:
|
61
|
+
logger.warning(f"Error dispatching custom event {event_type}: {e}")
|
62
|
+
return None
|
63
|
+
|
64
|
+
|
65
|
+
def get_executor_context():
|
66
|
+
"""
|
67
|
+
Get the current toolkit executor context if available.
|
68
|
+
|
69
|
+
Returns:
|
70
|
+
ToolkitExecutor context or None if not in execution context
|
71
|
+
"""
|
72
|
+
try:
|
73
|
+
if hasattr(sys.modules[__name__], 'toolkit_dispatch_context'):
|
74
|
+
return sys.modules[__name__].toolkit_dispatch_context.executor
|
75
|
+
return None
|
76
|
+
except Exception:
|
77
|
+
return None
|
78
|
+
|
79
|
+
|
80
|
+
def is_in_test_mode() -> bool:
|
81
|
+
"""
|
82
|
+
Check if the toolkit is currently running in test mode.
|
83
|
+
|
84
|
+
Returns:
|
85
|
+
True if running in test mode with executor context, False otherwise
|
86
|
+
"""
|
87
|
+
return get_executor_context() is not None
|
88
|
+
|
89
|
+
|
90
|
+
class ToolkitRuntimeContext:
|
91
|
+
"""
|
92
|
+
Context manager for toolkit runtime execution.
|
93
|
+
|
94
|
+
This can be used by tools that need to perform setup/cleanup operations
|
95
|
+
when running in test mode vs normal execution.
|
96
|
+
"""
|
97
|
+
|
98
|
+
def __init__(self, tool_name: str):
|
99
|
+
self.tool_name = tool_name
|
100
|
+
self.executor = get_executor_context()
|
101
|
+
|
102
|
+
def __enter__(self):
|
103
|
+
if self.executor:
|
104
|
+
dispatch_custom_event("tool_start", {
|
105
|
+
"tool_name": self.tool_name,
|
106
|
+
"message": f"Starting execution of {self.tool_name}"
|
107
|
+
})
|
108
|
+
return self
|
109
|
+
|
110
|
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
111
|
+
if self.executor:
|
112
|
+
if exc_type is None:
|
113
|
+
dispatch_custom_event("tool_end", {
|
114
|
+
"tool_name": self.tool_name,
|
115
|
+
"message": f"Completed execution of {self.tool_name}",
|
116
|
+
"success": True
|
117
|
+
})
|
118
|
+
else:
|
119
|
+
dispatch_custom_event("tool_error", {
|
120
|
+
"tool_name": self.tool_name,
|
121
|
+
"message": f"Error in {self.tool_name}: {exc_val}",
|
122
|
+
"success": False,
|
123
|
+
"error_type": exc_type.__name__ if exc_type else "Unknown"
|
124
|
+
})
|
125
|
+
return False # Don't suppress exceptions
|
126
|
+
|
127
|
+
def dispatch_progress(self, message: str, step: int = None, total_steps: int = None, **kwargs):
|
128
|
+
"""Convenience method for dispatching progress events."""
|
129
|
+
data = {"message": message, "tool_name": self.tool_name}
|
130
|
+
if step is not None:
|
131
|
+
data["step"] = step
|
132
|
+
if total_steps is not None:
|
133
|
+
data["total_steps"] = total_steps
|
134
|
+
data.update(kwargs)
|
135
|
+
dispatch_custom_event("progress", data)
|
136
|
+
|
137
|
+
def dispatch_info(self, message: str, **kwargs):
|
138
|
+
"""Convenience method for dispatching info events."""
|
139
|
+
data = {"message": message, "tool_name": self.tool_name}
|
140
|
+
data.update(kwargs)
|
141
|
+
dispatch_custom_event("info", data)
|
142
|
+
|
143
|
+
def dispatch_warning(self, message: str, **kwargs):
|
144
|
+
"""Convenience method for dispatching warning events."""
|
145
|
+
data = {"message": message, "tool_name": self.tool_name}
|
146
|
+
data.update(kwargs)
|
147
|
+
dispatch_custom_event("warning", data)
|
@@ -0,0 +1,157 @@
|
|
1
|
+
"""
|
2
|
+
Toolkit utilities for instantiating and managing toolkits.
|
3
|
+
This module provides toolkit management functions that are not tied to any specific interface.
|
4
|
+
"""
|
5
|
+
|
6
|
+
import logging
|
7
|
+
import random
|
8
|
+
from typing import Dict, Any, Optional, List
|
9
|
+
|
10
|
+
logger = logging.getLogger(__name__)
|
11
|
+
|
12
|
+
|
13
|
+
def instantiate_toolkit_with_client(toolkit_config: Dict[str, Any],
|
14
|
+
llm_client: Any,
|
15
|
+
alita_client: Optional[Any] = None) -> List[Any]:
|
16
|
+
"""
|
17
|
+
Instantiate a toolkit with LLM client support.
|
18
|
+
|
19
|
+
This is a variant of instantiate_toolkit that includes LLM client support
|
20
|
+
for toolkits that require LLM capabilities.
|
21
|
+
|
22
|
+
Args:
|
23
|
+
toolkit_config: Configuration dictionary for the toolkit
|
24
|
+
llm_client: LLM client instance for tools that need LLM capabilities
|
25
|
+
client: Optional additional client instance
|
26
|
+
|
27
|
+
Returns:
|
28
|
+
List of instantiated tools from the toolkit
|
29
|
+
|
30
|
+
Raises:
|
31
|
+
ValueError: If required configuration or client is missing
|
32
|
+
Exception: If toolkit instantiation fails
|
33
|
+
"""
|
34
|
+
try:
|
35
|
+
from ..toolkits.tools import get_tools
|
36
|
+
|
37
|
+
toolkit_name = toolkit_config.get('toolkit_name')
|
38
|
+
if not toolkit_name:
|
39
|
+
raise ValueError("toolkit_name is required in configuration")
|
40
|
+
|
41
|
+
if not llm_client:
|
42
|
+
raise ValueError("LLM client is required but not provided")
|
43
|
+
|
44
|
+
settings = toolkit_config.get('settings', {})
|
45
|
+
|
46
|
+
# Log the configuration being used
|
47
|
+
logger.info(f"Instantiating toolkit {toolkit_name} with LLM client")
|
48
|
+
logger.debug(f"Toolkit {toolkit_name} configuration: {toolkit_config}")
|
49
|
+
|
50
|
+
# Create a tool configuration dict with required fields
|
51
|
+
tool_config = {
|
52
|
+
'id': toolkit_config.get('id', random.randint(1, 1000000)),
|
53
|
+
'type': toolkit_config.get('type', toolkit_name.lower()),
|
54
|
+
'settings': settings,
|
55
|
+
'toolkit_name': toolkit_name
|
56
|
+
}
|
57
|
+
|
58
|
+
# Get tools using the toolkit configuration with clients
|
59
|
+
# Parameter order: get_tools(tools_list, alita_client, llm, memory_store)
|
60
|
+
tools = get_tools([tool_config], alita_client, llm_client)
|
61
|
+
|
62
|
+
if not tools:
|
63
|
+
logger.warning(f"No tools returned for toolkit {toolkit_name}")
|
64
|
+
return []
|
65
|
+
|
66
|
+
logger.info(f"Successfully instantiated toolkit {toolkit_name} with {len(tools)} tools")
|
67
|
+
return tools
|
68
|
+
|
69
|
+
except Exception as e:
|
70
|
+
logger.error(f"Error instantiating toolkit {toolkit_name} with client: {str(e)}")
|
71
|
+
raise
|
72
|
+
|
73
|
+
|
74
|
+
def get_toolkit_tools(toolkit_instance: Any) -> List[Any]:
|
75
|
+
"""
|
76
|
+
Extract tools from an instantiated toolkit instance.
|
77
|
+
|
78
|
+
This function provides a standardized way to get tools from various
|
79
|
+
toolkit implementations that might have different interfaces.
|
80
|
+
|
81
|
+
Args:
|
82
|
+
toolkit_instance: An instantiated toolkit object
|
83
|
+
|
84
|
+
Returns:
|
85
|
+
List of tools from the toolkit
|
86
|
+
|
87
|
+
Raises:
|
88
|
+
ValueError: If no tools can be extracted from the toolkit
|
89
|
+
"""
|
90
|
+
try:
|
91
|
+
# Try different methods to get tools from the toolkit
|
92
|
+
if hasattr(toolkit_instance, 'get_tools'):
|
93
|
+
tools = toolkit_instance.get_tools()
|
94
|
+
elif hasattr(toolkit_instance, 'tools'):
|
95
|
+
tools = toolkit_instance.tools
|
96
|
+
elif hasattr(toolkit_instance, '_tools'):
|
97
|
+
tools = toolkit_instance._tools
|
98
|
+
else:
|
99
|
+
raise ValueError("Could not find tools in the toolkit instance")
|
100
|
+
|
101
|
+
if not tools:
|
102
|
+
logger.warning("Toolkit instance returned empty tools list")
|
103
|
+
return []
|
104
|
+
|
105
|
+
logger.info(f"Extracted {len(tools)} tools from toolkit instance")
|
106
|
+
return tools
|
107
|
+
|
108
|
+
except Exception as e:
|
109
|
+
logger.error(f"Error extracting tools from toolkit: {str(e)}")
|
110
|
+
raise
|
111
|
+
|
112
|
+
|
113
|
+
def find_tool_by_name(tools: List[Any], tool_name: str) -> Optional[Any]:
|
114
|
+
"""
|
115
|
+
Find a specific tool by name from a list of tools.
|
116
|
+
|
117
|
+
Args:
|
118
|
+
tools: List of tool instances
|
119
|
+
tool_name: Name of the tool to find
|
120
|
+
|
121
|
+
Returns:
|
122
|
+
The tool instance if found, None otherwise
|
123
|
+
"""
|
124
|
+
for tool in tools:
|
125
|
+
# Check various attributes that might contain the tool name
|
126
|
+
if hasattr(tool, 'name') and tool.name == tool_name:
|
127
|
+
return tool
|
128
|
+
elif hasattr(tool, 'func') and hasattr(tool.func, '__name__') and tool.func.__name__ == tool_name:
|
129
|
+
return tool
|
130
|
+
elif hasattr(tool, '__name__') and tool.__name__ == tool_name:
|
131
|
+
return tool
|
132
|
+
|
133
|
+
return None
|
134
|
+
|
135
|
+
|
136
|
+
def get_tool_names(tools: List[Any]) -> List[str]:
|
137
|
+
"""
|
138
|
+
Extract tool names from a list of tools.
|
139
|
+
|
140
|
+
Args:
|
141
|
+
tools: List of tool instances
|
142
|
+
|
143
|
+
Returns:
|
144
|
+
List of tool names
|
145
|
+
"""
|
146
|
+
tool_names = []
|
147
|
+
for tool in tools:
|
148
|
+
if hasattr(tool, 'name'):
|
149
|
+
tool_names.append(tool.name)
|
150
|
+
elif hasattr(tool, 'func') and hasattr(tool.func, '__name__'):
|
151
|
+
tool_names.append(tool.func.__name__)
|
152
|
+
elif hasattr(tool, '__name__'):
|
153
|
+
tool_names.append(tool.__name__)
|
154
|
+
else:
|
155
|
+
tool_names.append(str(tool))
|
156
|
+
|
157
|
+
return tool_names
|
@@ -1,7 +1,10 @@
|
|
1
1
|
import json
|
2
2
|
import logging
|
3
|
-
from typing import Optional
|
3
|
+
from typing import Any, Dict, Generator, List, Optional
|
4
4
|
|
5
|
+
from langchain_core.documents import Document
|
6
|
+
|
7
|
+
from alita_sdk.tools.elitea_base import BaseIndexParams
|
5
8
|
from azure.devops.connection import Connection
|
6
9
|
from azure.devops.v7_0.test_plan.models import TestPlanCreateParams, TestSuiteCreateParams, \
|
7
10
|
SuiteTestCaseCreateUpdateParameters
|
@@ -13,7 +16,11 @@ from pydantic.fields import FieldInfo as Field
|
|
13
16
|
import xml.etree.ElementTree as ET
|
14
17
|
|
15
18
|
from ..work_item import AzureDevOpsApiWrapper
|
16
|
-
from ...elitea_base import
|
19
|
+
from ...elitea_base import BaseVectorStoreToolApiWrapper, extend_with_vector_tools
|
20
|
+
try:
|
21
|
+
from alita_sdk.runtime.langchain.interfaces.llm_processor import get_embeddings
|
22
|
+
except ImportError:
|
23
|
+
from alita_sdk.langchain.interfaces.llm_processor import get_embeddings
|
17
24
|
|
18
25
|
logger = logging.getLogger(__name__)
|
19
26
|
|
@@ -96,7 +103,6 @@ TestCaseAddModel = create_model(
|
|
96
103
|
suite_id=(int, Field(description="ID of the test suite to which test cases are to be added"))
|
97
104
|
)
|
98
105
|
|
99
|
-
|
100
106
|
test_steps_description = """Json or XML array string with test steps.
|
101
107
|
Json example: [{"stepNumber": 1, "action": "Some action", "expectedResult": "Some expectation"},...]
|
102
108
|
XML example:
|
@@ -158,7 +164,19 @@ TestCasesGetModel = create_model(
|
|
158
164
|
suite_id=(int, Field(description="ID of the test suite for which test cases are requested"))
|
159
165
|
)
|
160
166
|
|
161
|
-
|
167
|
+
# Schema for indexing ADO Wiki pages into vector store
|
168
|
+
indexData = create_model(
|
169
|
+
"indexData",
|
170
|
+
__base__=BaseIndexParams,
|
171
|
+
plan_id=(int, Field(description="ID of the test plan for which test cases are requested")),
|
172
|
+
suite_ids=(list[int], Field(description="List of test suite IDs for which test cases are requested (can be empty)")),
|
173
|
+
progress_step=(Optional[int], Field(default=None, ge=0, le=100,
|
174
|
+
description="Optional step size for progress reporting during indexing")),
|
175
|
+
clean_index=(Optional[bool], Field(default=False,
|
176
|
+
description="Optional flag to enforce clean existing index before indexing new data")),
|
177
|
+
)
|
178
|
+
|
179
|
+
class TestPlanApiWrapper(BaseVectorStoreToolApiWrapper):
|
162
180
|
__test__ = False
|
163
181
|
organization_url: str
|
164
182
|
project: str
|
@@ -166,6 +184,13 @@ class TestPlanApiWrapper(BaseToolApiWrapper):
|
|
166
184
|
limit: Optional[int] = 5
|
167
185
|
_client: Optional[TestPlanClient] = PrivateAttr()
|
168
186
|
|
187
|
+
llm: Any = None
|
188
|
+
connection_string: Optional[SecretStr] = None
|
189
|
+
collection_name: Optional[str] = None
|
190
|
+
embedding_model: Optional[str] = "HuggingFaceEmbeddings"
|
191
|
+
embedding_model_params: Optional[Dict[str, Any]] = {"model_name": "sentence-transformers/all-MiniLM-L6-v2"}
|
192
|
+
vectorstore_type: Optional[str] = "PGVector"
|
193
|
+
|
169
194
|
class Config:
|
170
195
|
arbitrary_types_allowed = True
|
171
196
|
|
@@ -250,8 +275,10 @@ class TestPlanApiWrapper(BaseToolApiWrapper):
|
|
250
275
|
try:
|
251
276
|
if isinstance(suite_test_case_create_update_parameters, str):
|
252
277
|
suite_test_case_create_update_parameters = json.loads(suite_test_case_create_update_parameters)
|
253
|
-
suite_test_case_create_update_params_obj = [SuiteTestCaseCreateUpdateParameters(**param) for param in
|
254
|
-
|
278
|
+
suite_test_case_create_update_params_obj = [SuiteTestCaseCreateUpdateParameters(**param) for param in
|
279
|
+
suite_test_case_create_update_parameters]
|
280
|
+
test_cases = self._client.add_test_cases_to_suite(suite_test_case_create_update_params_obj, self.project,
|
281
|
+
plan_id, suite_id)
|
255
282
|
return [test_case.as_dict() for test_case in test_cases]
|
256
283
|
except Exception as e:
|
257
284
|
logger.error(f"Error adding test case: {e}")
|
@@ -268,10 +295,11 @@ class TestPlanApiWrapper(BaseToolApiWrapper):
|
|
268
295
|
test_steps=test_case['test_steps'],
|
269
296
|
test_steps_format=test_case['test_steps_format']) for test_case in test_cases]
|
270
297
|
|
271
|
-
|
272
|
-
|
298
|
+
def create_test_case(self, plan_id: int, suite_id: int, title: str, description: str, test_steps: str,
|
299
|
+
test_steps_format: str = 'json'):
|
273
300
|
"""Creates a new test case in specified suite in Azure DevOps."""
|
274
|
-
work_item_wrapper = AzureDevOpsApiWrapper(organization_url=self.organization_url,
|
301
|
+
work_item_wrapper = AzureDevOpsApiWrapper(organization_url=self.organization_url,
|
302
|
+
token=self.token.get_secret_value(), project=self.project)
|
275
303
|
if test_steps_format == 'json':
|
276
304
|
steps_xml = self.get_test_steps_xml(json.loads(test_steps))
|
277
305
|
elif test_steps_format == 'xml':
|
@@ -279,8 +307,9 @@ class TestPlanApiWrapper(BaseToolApiWrapper):
|
|
279
307
|
else:
|
280
308
|
return ToolException("Unknown test steps format: " + test_steps_format)
|
281
309
|
work_item_json = self.build_ado_test_case(title, description, steps_xml)
|
282
|
-
created_work_item_id =
|
283
|
-
|
310
|
+
created_work_item_id = \
|
311
|
+
work_item_wrapper.create_work_item(work_item_json=json.dumps(work_item_json), wi_type="Test Case")['id']
|
312
|
+
return self.add_test_case([{"work_item": {"id": created_work_item_id}}], plan_id, suite_id)
|
284
313
|
|
285
314
|
def build_ado_test_case(self, title, description, steps_xml):
|
286
315
|
"""
|
@@ -355,6 +384,42 @@ class TestPlanApiWrapper(BaseToolApiWrapper):
|
|
355
384
|
logger.error(f"Error getting test cases: {e}")
|
356
385
|
return ToolException(f"Error getting test cases: {e}")
|
357
386
|
|
387
|
+
def index_data(self,
|
388
|
+
plan_id: str,
|
389
|
+
suite_ids: list[str] = [],
|
390
|
+
collection_suffix: str = '',
|
391
|
+
progress_step: int = None,
|
392
|
+
clean_index: bool = False
|
393
|
+
):
|
394
|
+
"""Load ADO TestCases into the vector store."""
|
395
|
+
docs = self._base_loader(plan_id, suite_ids)
|
396
|
+
embedding = get_embeddings(self.embedding_model, self.embedding_model_params)
|
397
|
+
vs = self._init_vector_store(collection_suffix, embeddings=embedding)
|
398
|
+
return vs.index_documents(docs, progress_step=progress_step, clean_index=clean_index)
|
399
|
+
|
400
|
+
def _base_loader(self, plan_id: str, suite_ids: Optional[list[str]] = []) -> Generator[Document, None, None]:
|
401
|
+
cases = []
|
402
|
+
for sid in suite_ids:
|
403
|
+
cases.extend(self.get_test_cases(plan_id, sid))
|
404
|
+
#
|
405
|
+
for case in cases:
|
406
|
+
field_dicts = case.get('work_item', {}).get('work_item_fields', [])
|
407
|
+
data = {k: v for d in field_dicts for k, v in d.items()}
|
408
|
+
yield Document(
|
409
|
+
page_content=data.get('Microsoft.VSTS.TCM.Steps', ''),
|
410
|
+
metadata={
|
411
|
+
'id': case.get('work_item', {}).get('id', ''),
|
412
|
+
'title': case.get('work_item', {}).get('name', ''),
|
413
|
+
'plan_id': case.get('test_plan', {}).get('id', ''),
|
414
|
+
'suite_id': case.get('test_suite', {}).get('id', ''),
|
415
|
+
'description': data.get('System.Description', ''),
|
416
|
+
'updated_on': data.get('System.Rev', ''),
|
417
|
+
})
|
418
|
+
|
419
|
+
def _process_document(self, document: Document) -> Generator[Document, None, None]:
|
420
|
+
yield document
|
421
|
+
|
422
|
+
@extend_with_vector_tools
|
358
423
|
def get_available_tools(self):
|
359
424
|
"""Return a list of available tools."""
|
360
425
|
return [
|
@@ -423,5 +488,11 @@ class TestPlanApiWrapper(BaseToolApiWrapper):
|
|
423
488
|
"description": self.get_test_cases.__doc__,
|
424
489
|
"args_schema": TestCasesGetModel,
|
425
490
|
"ref": self.get_test_cases,
|
491
|
+
},
|
492
|
+
{
|
493
|
+
"name": "index_data",
|
494
|
+
"ref": self.index_data,
|
495
|
+
"description": self.index_data.__doc__,
|
496
|
+
"args_schema": indexData,
|
426
497
|
}
|
427
498
|
]
|
@@ -1,6 +1,10 @@
|
|
1
1
|
import logging
|
2
|
-
from typing import Optional
|
2
|
+
from typing import Any, Dict, Generator, List, Optional
|
3
3
|
|
4
|
+
from alita_sdk.tools.elitea_base import BaseIndexParams
|
5
|
+
from langchain_core.documents import Document
|
6
|
+
|
7
|
+
from ...elitea_base import BaseVectorStoreToolApiWrapper, extend_with_vector_tools
|
4
8
|
from azure.devops.connection import Connection
|
5
9
|
from azure.devops.exceptions import AzureDevOpsServiceError
|
6
10
|
from azure.devops.v7_0.core import CoreClient
|
@@ -12,6 +16,10 @@ from msrest.authentication import BasicAuthentication
|
|
12
16
|
from pydantic import create_model, PrivateAttr, SecretStr
|
13
17
|
from pydantic import model_validator
|
14
18
|
from pydantic.fields import Field
|
19
|
+
try:
|
20
|
+
from alita_sdk.runtime.langchain.interfaces.llm_processor import get_embeddings
|
21
|
+
except ImportError:
|
22
|
+
from alita_sdk.langchain.interfaces.llm_processor import get_embeddings
|
15
23
|
|
16
24
|
from ...elitea_base import BaseToolApiWrapper
|
17
25
|
|
@@ -52,14 +60,32 @@ RenamePageInput = create_model(
|
|
52
60
|
version_type=(Optional[str], Field(description="Version type (branch, tag, or commit). Determines how Id is interpreted", default="branch"))
|
53
61
|
)
|
54
62
|
|
63
|
+
# Schema for indexing ADO Wiki pages into vector store
|
64
|
+
indexData = create_model(
|
65
|
+
"indexData",
|
66
|
+
__base__=BaseIndexParams,
|
67
|
+
wiki_identifier=(str, Field(description="Wiki identifier to index, e.g., 'ABCProject.wiki'")),
|
68
|
+
progress_step=(Optional[int], Field(default=None, ge=0, le=100,
|
69
|
+
description="Optional step size for progress reporting during indexing")),
|
70
|
+
clean_index=(Optional[bool], Field(default=False,
|
71
|
+
description="Optional flag to enforce clean existing index before indexing new data")),
|
72
|
+
)
|
73
|
+
|
55
74
|
|
56
|
-
class AzureDevOpsApiWrapper(
|
75
|
+
class AzureDevOpsApiWrapper(BaseVectorStoreToolApiWrapper):
|
57
76
|
organization_url: str
|
58
77
|
project: str
|
59
78
|
token: SecretStr
|
60
79
|
_client: Optional[WikiClient] = PrivateAttr() # Private attribute for the wiki client
|
61
80
|
_core_client: Optional[CoreClient] = PrivateAttr() # Private attribute for the CoreClient client
|
62
81
|
|
82
|
+
llm: Any = None
|
83
|
+
connection_string: Optional[SecretStr] = None
|
84
|
+
collection_name: Optional[str] = None
|
85
|
+
embedding_model: Optional[str] = "HuggingFaceEmbeddings"
|
86
|
+
embedding_model_params: Optional[Dict[str, Any]] = {"model_name": "sentence-transformers/all-MiniLM-L6-v2"}
|
87
|
+
vectorstore_type: Optional[str] = "PGVector"
|
88
|
+
|
63
89
|
class Config:
|
64
90
|
arbitrary_types_allowed = True # Allow arbitrary types (e.g., WorkItemTrackingClient)
|
65
91
|
|
@@ -216,6 +242,34 @@ class AzureDevOpsApiWrapper(BaseToolApiWrapper):
|
|
216
242
|
logger.error(f"Unable to modify wiki page: {str(e)}")
|
217
243
|
return ToolException(f"Unable to modify wiki page: {str(e)}")
|
218
244
|
|
245
|
+
def index_data(
|
246
|
+
self,
|
247
|
+
wiki_identifier: str,
|
248
|
+
collection_suffix: str = '',
|
249
|
+
progress_step: int = None,
|
250
|
+
clean_index: bool = False
|
251
|
+
):
|
252
|
+
"""Load ADO Wiki pages into the vector store."""
|
253
|
+
docs = self._base_loader(wiki_identifier)
|
254
|
+
embedding = get_embeddings(self.embedding_model, self.embedding_model_params)
|
255
|
+
vs = self._init_vector_store(collection_suffix, embeddings=embedding)
|
256
|
+
return vs.index_documents(docs, progress_step=progress_step, clean_index=clean_index)
|
257
|
+
|
258
|
+
def _base_loader(self, wiki_identifier: str) -> Generator[Document, None, None]:
|
259
|
+
pages = self._client.get_pages_batch(pages_batch_request={}, project=self.project, wiki_identifier=wiki_identifier)
|
260
|
+
#
|
261
|
+
for page in pages:
|
262
|
+
content = self._client.get_page_by_id(project=self.project, wiki_identifier=wiki_identifier, id=page.id, include_content=True).page.content
|
263
|
+
yield Document(page_content=content, metadata={
|
264
|
+
'id': page.id,
|
265
|
+
'path': page.path,
|
266
|
+
'updated_on': ''
|
267
|
+
})
|
268
|
+
|
269
|
+
def _process_document(self, document: Document) -> Generator[Document, None, None]:
|
270
|
+
yield document
|
271
|
+
|
272
|
+
@extend_with_vector_tools
|
219
273
|
def get_available_tools(self):
|
220
274
|
"""Return a list of available tools."""
|
221
275
|
return [
|
@@ -260,5 +314,11 @@ class AzureDevOpsApiWrapper(BaseToolApiWrapper):
|
|
260
314
|
"description": self.rename_wiki_page.__doc__,
|
261
315
|
"args_schema": RenamePageInput,
|
262
316
|
"ref": self.rename_wiki_page,
|
317
|
+
},
|
318
|
+
{
|
319
|
+
"name": "index_data",
|
320
|
+
"ref": self.index_data,
|
321
|
+
"description": self.index_data.__doc__,
|
322
|
+
"args_schema": indexData,
|
263
323
|
}
|
264
324
|
]
|
@@ -1,12 +1,13 @@
|
|
1
1
|
from typing import Generator
|
2
2
|
from langchain.schema import Document
|
3
|
+
from langchain_core.documents import Document
|
3
4
|
from langchain_text_splitters import MarkdownHeaderTextSplitter
|
4
5
|
from langchain.text_splitter import TokenTextSplitter
|
5
6
|
from ..utils import tiktoken_length
|
6
7
|
from copy import deepcopy as copy
|
7
8
|
|
8
9
|
|
9
|
-
def markdown_chunker(file_content_generator: Generator[Document, None, None], config: dict, *args, **kwargs) -> Generator[
|
10
|
+
def markdown_chunker(file_content_generator: Generator[Document, None, None], config: dict, *args, **kwargs) -> Generator[Document, None, None]:
|
10
11
|
strip_header = config.get("strip_header", False)
|
11
12
|
return_each_line = config.get("return_each_line", False)
|
12
13
|
headers_to_split_on = config.get("headers_to_split_on", [])
|
@@ -1,7 +1,7 @@
|
|
1
1
|
from typing import Optional, List
|
2
2
|
|
3
3
|
from langchain_core.tools import BaseToolkit, BaseTool
|
4
|
-
|
4
|
+
|
5
5
|
try:
|
6
6
|
from langmem import create_manage_memory_tool, create_search_memory_tool
|
7
7
|
except ImportError:
|
@@ -15,13 +15,37 @@ from pydantic import create_model, BaseModel, ConfigDict, Field, SecretStr
|
|
15
15
|
|
16
16
|
name = "memory"
|
17
17
|
|
18
|
-
def get_tools(
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
18
|
+
def get_tools(tools_list: list, alita_client, llm, memory_store=None):
|
19
|
+
"""
|
20
|
+
Get memory tools for the provided tool configurations.
|
21
|
+
|
22
|
+
Args:
|
23
|
+
tools_list: List of tool configurations
|
24
|
+
alita_client: Alita client instance
|
25
|
+
llm: LLM client instance
|
26
|
+
memory_store: Optional memory store instance
|
27
|
+
|
28
|
+
Returns:
|
29
|
+
List of memory tools
|
30
|
+
"""
|
31
|
+
all_tools = []
|
32
|
+
|
33
|
+
for tool in tools_list:
|
34
|
+
if tool.get('type') == 'memory' or tool.get('toolkit_name') == 'memory':
|
35
|
+
try:
|
36
|
+
toolkit_instance = MemoryToolkit().get_toolkit(
|
37
|
+
namespace=tool['settings'].get('namespace', str(tool['id'])),
|
38
|
+
username=tool['settings'].get('username', ''),
|
39
|
+
store=tool['settings'].get('store', memory_store),
|
40
|
+
toolkit_name=tool.get('toolkit_name', '')
|
41
|
+
)
|
42
|
+
all_tools.extend(toolkit_instance.get_tools())
|
43
|
+
except Exception as e:
|
44
|
+
print(f"DEBUG: Error in memory toolkit get_tools: {e}")
|
45
|
+
print(f"DEBUG: Tool config: {tool}")
|
46
|
+
raise
|
47
|
+
|
48
|
+
return all_tools
|
25
49
|
|
26
50
|
class MemoryToolkit(BaseToolkit):
|
27
51
|
tools: List[BaseTool] = []
|
@@ -30,7 +54,7 @@ class MemoryToolkit(BaseToolkit):
|
|
30
54
|
@staticmethod
|
31
55
|
def toolkit_config_schema() -> BaseModel:
|
32
56
|
return create_model(
|
33
|
-
|
57
|
+
'MemoryConfig',
|
34
58
|
namespace=(str, Field(description="Memory namespace", json_schema_extra={'toolkit_name': True})),
|
35
59
|
username=(Optional[str], Field(description="Username", default='Tester', json_schema_extra={'hidden': True})),
|
36
60
|
connection_string=(Optional[SecretStr], Field(description="Connection string for vectorstore",
|
@@ -48,7 +72,27 @@ class MemoryToolkit(BaseToolkit):
|
|
48
72
|
)
|
49
73
|
|
50
74
|
@classmethod
|
51
|
-
def get_toolkit(cls, namespace: str, store
|
75
|
+
def get_toolkit(cls, namespace: str, store=None, **kwargs):
|
76
|
+
"""
|
77
|
+
Get toolkit with memory tools.
|
78
|
+
|
79
|
+
Args:
|
80
|
+
namespace: Memory namespace
|
81
|
+
store: PostgresStore instance (imported dynamically)
|
82
|
+
**kwargs: Additional arguments
|
83
|
+
"""
|
84
|
+
try:
|
85
|
+
from langgraph.store.postgres import PostgresStore
|
86
|
+
except ImportError:
|
87
|
+
raise ImportError(
|
88
|
+
"PostgreSQL dependencies (psycopg) are required for MemoryToolkit. "
|
89
|
+
"Install with: pip install psycopg[binary]"
|
90
|
+
)
|
91
|
+
|
92
|
+
# Validate store type
|
93
|
+
if store is not None and not isinstance(store, PostgresStore):
|
94
|
+
raise TypeError(f"Expected PostgresStore, got {type(store)}")
|
95
|
+
|
52
96
|
return cls(tools=[
|
53
97
|
create_manage_memory_tool(namespace=namespace, store=store),
|
54
98
|
create_search_memory_tool(namespace=namespace, store=store)
|