alita-sdk 0.3.397__py3-none-any.whl → 0.3.411__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of alita-sdk might be problematic. Click here for more details.
- alita_sdk/runtime/clients/client.py +3 -2
- alita_sdk/runtime/langchain/assistant.py +12 -5
- alita_sdk/runtime/langchain/langraph_agent.py +25 -3
- alita_sdk/runtime/toolkits/application.py +8 -1
- alita_sdk/runtime/toolkits/tools.py +72 -62
- alita_sdk/runtime/tools/application.py +7 -0
- alita_sdk/runtime/tools/llm.py +6 -6
- alita_sdk/tools/__init__.py +41 -35
- alita_sdk/tools/code_indexer_toolkit.py +13 -3
- alita_sdk/tools/jira/api_wrapper.py +1 -1
- alita_sdk/tools/sharepoint/api_wrapper.py +55 -11
- alita_sdk/tools/sharepoint/authorization_helper.py +131 -1
- {alita_sdk-0.3.397.dist-info → alita_sdk-0.3.411.dist-info}/METADATA +1 -1
- {alita_sdk-0.3.397.dist-info → alita_sdk-0.3.411.dist-info}/RECORD +17 -17
- {alita_sdk-0.3.397.dist-info → alita_sdk-0.3.411.dist-info}/WHEEL +0 -0
- {alita_sdk-0.3.397.dist-info → alita_sdk-0.3.411.dist-info}/licenses/LICENSE +0 -0
- {alita_sdk-0.3.397.dist-info → alita_sdk-0.3.411.dist-info}/top_level.txt +0 -0
|
@@ -568,7 +568,7 @@ class AlitaClient:
|
|
|
568
568
|
def predict_agent(self, llm: ChatOpenAI, instructions: str = "You are a helpful assistant.",
|
|
569
569
|
tools: Optional[list] = None, chat_history: Optional[List[Any]] = None,
|
|
570
570
|
memory=None, runtime='langchain', variables: Optional[list] = None,
|
|
571
|
-
store: Optional[BaseStore] = None):
|
|
571
|
+
store: Optional[BaseStore] = None, debug_mode: Optional[bool] = False):
|
|
572
572
|
"""
|
|
573
573
|
Create a predict-type agent with minimal configuration.
|
|
574
574
|
|
|
@@ -581,6 +581,7 @@ class AlitaClient:
|
|
|
581
581
|
runtime: Runtime type (default: 'langchain')
|
|
582
582
|
variables: Optional list of variables for the agent
|
|
583
583
|
store: Optional store for memory
|
|
584
|
+
debug_mode: Enable debug mode for cases when assistant can be initialized without tools
|
|
584
585
|
|
|
585
586
|
Returns:
|
|
586
587
|
Runnable agent ready for execution
|
|
@@ -600,7 +601,7 @@ class AlitaClient:
|
|
|
600
601
|
'variables': variables
|
|
601
602
|
}
|
|
602
603
|
return LangChainAssistant(self, agent_data, llm,
|
|
603
|
-
chat_history, "predict", memory=memory, store=store).runnable()
|
|
604
|
+
chat_history, "predict", memory=memory, store=store, debug_mode=debug_mode).runnable()
|
|
604
605
|
|
|
605
606
|
def test_toolkit_tool(self, toolkit_config: dict, tool_name: str, tool_params: dict = None,
|
|
606
607
|
runtime_config: dict = None, llm_model: str = None,
|
|
@@ -17,6 +17,7 @@ from .constants import REACT_ADDON, REACT_VARS, XML_ADDON
|
|
|
17
17
|
from .chat_message_template import Jinja2TemplatedChatMessagesTemplate
|
|
18
18
|
from ..tools.echo import EchoTool
|
|
19
19
|
from langchain_core.tools import BaseTool, ToolException
|
|
20
|
+
from jinja2 import Environment, DebugUndefined
|
|
20
21
|
|
|
21
22
|
logger = logging.getLogger(__name__)
|
|
22
23
|
|
|
@@ -29,7 +30,8 @@ class Assistant:
|
|
|
29
30
|
app_type: str = "openai",
|
|
30
31
|
tools: Optional[list] = [],
|
|
31
32
|
memory: Optional[Any] = None,
|
|
32
|
-
store: Optional[BaseStore] = None
|
|
33
|
+
store: Optional[BaseStore] = None,
|
|
34
|
+
debug_mode: Optional[bool] = False):
|
|
33
35
|
|
|
34
36
|
self.app_type = app_type
|
|
35
37
|
self.memory = memory
|
|
@@ -87,7 +89,7 @@ class Assistant:
|
|
|
87
89
|
for internal_tool_name in meta.get("internal_tools"):
|
|
88
90
|
version_tools.append({"type": "internal_tool", "name": internal_tool_name})
|
|
89
91
|
|
|
90
|
-
self.tools = get_tools(version_tools, alita_client=alita, llm=self.client, memory_store=self.store)
|
|
92
|
+
self.tools = get_tools(version_tools, alita_client=alita, llm=self.client, memory_store=self.store, debug_mode=debug_mode)
|
|
91
93
|
if tools:
|
|
92
94
|
self.tools += tools
|
|
93
95
|
# Handle prompt setup
|
|
@@ -288,6 +290,7 @@ class Assistant:
|
|
|
288
290
|
'value': 'messages'
|
|
289
291
|
}
|
|
290
292
|
},
|
|
293
|
+
'step_limit': self.max_iterations,
|
|
291
294
|
'input': ['messages'],
|
|
292
295
|
'output': ['messages'],
|
|
293
296
|
'transition': 'END'
|
|
@@ -313,7 +316,8 @@ class Assistant:
|
|
|
313
316
|
store=self.store,
|
|
314
317
|
debug=False,
|
|
315
318
|
for_subgraph=False,
|
|
316
|
-
alita_client=self.alita_client
|
|
319
|
+
alita_client=self.alita_client,
|
|
320
|
+
steps_limit=self.max_iterations
|
|
317
321
|
)
|
|
318
322
|
|
|
319
323
|
return agent
|
|
@@ -328,7 +332,8 @@ class Assistant:
|
|
|
328
332
|
agent = create_graph(
|
|
329
333
|
client=self.client, tools=self.tools,
|
|
330
334
|
yaml_schema=self.prompt, memory=memory,
|
|
331
|
-
alita_client=self.alita_client
|
|
335
|
+
alita_client=self.alita_client,
|
|
336
|
+
steps_limit=self.max_iterations
|
|
332
337
|
)
|
|
333
338
|
#
|
|
334
339
|
return agent
|
|
@@ -348,5 +353,7 @@ class Assistant:
|
|
|
348
353
|
continue
|
|
349
354
|
# take only the content of the system message from the openai prompt
|
|
350
355
|
if isinstance(message, SystemMessage):
|
|
351
|
-
|
|
356
|
+
environment = Environment(undefined=DebugUndefined)
|
|
357
|
+
template = environment.from_string(message.content)
|
|
358
|
+
return template.render(self.prompt.partial_variables)
|
|
352
359
|
return None
|
|
@@ -596,7 +596,7 @@ def create_graph(
|
|
|
596
596
|
else:
|
|
597
597
|
# Use all available tools
|
|
598
598
|
available_tools = [tool for tool in tools if isinstance(tool, BaseTool)]
|
|
599
|
-
|
|
599
|
+
|
|
600
600
|
lg_builder.add_node(node_id, LLMNode(
|
|
601
601
|
client=client,
|
|
602
602
|
input_mapping=node.get('input_mapping', {'messages': {'type': 'variable', 'value': 'messages'}}),
|
|
@@ -607,7 +607,9 @@ def create_graph(
|
|
|
607
607
|
input_variables=node.get('input', ['messages']),
|
|
608
608
|
structured_output=node.get('structured_output', False),
|
|
609
609
|
available_tools=available_tools,
|
|
610
|
-
tool_names=tool_names
|
|
610
|
+
tool_names=tool_names,
|
|
611
|
+
steps_limit=kwargs.get('steps_limit', 25)
|
|
612
|
+
))
|
|
611
613
|
elif node_type == 'router':
|
|
612
614
|
# Add a RouterNode as an independent node
|
|
613
615
|
lg_builder.add_node(node_id, RouterNode(
|
|
@@ -793,8 +795,28 @@ class LangGraphAgentRunnable(CompiledStateGraph):
|
|
|
793
795
|
current_message = input['input']
|
|
794
796
|
else:
|
|
795
797
|
current_message = input.get('input')[-1]
|
|
798
|
+
|
|
796
799
|
# TODO: add handler after we add 2+ inputs (filterByType, etc.)
|
|
797
|
-
|
|
800
|
+
if isinstance(current_message, HumanMessage):
|
|
801
|
+
current_content = current_message.content
|
|
802
|
+
if isinstance(current_content, list):
|
|
803
|
+
text_contents = [
|
|
804
|
+
item['text'] if isinstance(item, dict) and item.get('type') == 'text'
|
|
805
|
+
else item if isinstance(item, str)
|
|
806
|
+
else None
|
|
807
|
+
for item in current_content
|
|
808
|
+
]
|
|
809
|
+
text_contents = [text for text in text_contents if text is not None]
|
|
810
|
+
input['input'] = ". ".join(text_contents)
|
|
811
|
+
elif isinstance(current_content, str):
|
|
812
|
+
# on regenerate case
|
|
813
|
+
input['input'] = current_content
|
|
814
|
+
else:
|
|
815
|
+
input['input'] = str(current_content)
|
|
816
|
+
elif isinstance(current_message, str):
|
|
817
|
+
input['input'] = current_message
|
|
818
|
+
else:
|
|
819
|
+
input['input'] = str(current_message)
|
|
798
820
|
if input.get('messages'):
|
|
799
821
|
# Ensure existing messages are LangChain objects
|
|
800
822
|
input['messages'] = [convert_dict_to_message(msg) for msg in input['messages']]
|
|
@@ -39,7 +39,14 @@ class ApplicationToolkit(BaseToolkit):
|
|
|
39
39
|
description=app_details.get("description"),
|
|
40
40
|
application=app,
|
|
41
41
|
args_schema=applicationToolSchema,
|
|
42
|
-
return_type='str'
|
|
42
|
+
return_type='str',
|
|
43
|
+
client=client,
|
|
44
|
+
args_runnable={
|
|
45
|
+
"application_id": application_id,
|
|
46
|
+
"application_version_id": application_version_id,
|
|
47
|
+
"store": store,
|
|
48
|
+
"llm": client.get_llm(version_details['llm_settings']['model_name'], model_settings),
|
|
49
|
+
})])
|
|
43
50
|
|
|
44
51
|
def get_tools(self):
|
|
45
52
|
return self.tools
|
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import logging
|
|
2
|
+
from typing import Optional
|
|
2
3
|
|
|
3
4
|
from langchain_core.tools import ToolException
|
|
4
5
|
from langgraph.store.base import BaseStore
|
|
@@ -34,74 +35,83 @@ def get_toolkits():
|
|
|
34
35
|
return core_toolkits + community_toolkits() + alita_toolkits()
|
|
35
36
|
|
|
36
37
|
|
|
37
|
-
def get_tools(tools_list: list, alita_client, llm, memory_store: BaseStore = None) -> list:
|
|
38
|
+
def get_tools(tools_list: list, alita_client, llm, memory_store: BaseStore = None, debug_mode: Optional[bool] = False) -> list:
|
|
38
39
|
prompts = []
|
|
39
40
|
tools = []
|
|
40
41
|
|
|
41
42
|
for tool in tools_list:
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
# TODO: update configuration of internal tools
|
|
73
|
-
elif tool['type'] == 'internal_tool':
|
|
74
|
-
if tool['name'] == 'pyodide':
|
|
75
|
-
tools += SandboxToolkit.get_toolkit(
|
|
76
|
-
stateful=False,
|
|
77
|
-
allow_net=True,
|
|
78
|
-
alita_client=alita_client,
|
|
43
|
+
try:
|
|
44
|
+
if tool['type'] == 'datasource':
|
|
45
|
+
tools.extend(DatasourcesToolkit.get_toolkit(
|
|
46
|
+
alita_client,
|
|
47
|
+
datasource_ids=[int(tool['settings']['datasource_id'])],
|
|
48
|
+
selected_tools=tool['settings']['selected_tools'],
|
|
49
|
+
toolkit_name=tool.get('toolkit_name', '') or tool.get('name', '')
|
|
50
|
+
).get_tools())
|
|
51
|
+
elif tool['type'] == 'application' and tool.get('agent_type', '') != 'pipeline' :
|
|
52
|
+
tools.extend(ApplicationToolkit.get_toolkit(
|
|
53
|
+
alita_client,
|
|
54
|
+
application_id=int(tool['settings']['application_id']),
|
|
55
|
+
application_version_id=int(tool['settings']['application_version_id']),
|
|
56
|
+
selected_tools=[]
|
|
57
|
+
).get_tools())
|
|
58
|
+
elif tool['type'] == 'application' and tool.get('agent_type', '') == 'pipeline':
|
|
59
|
+
# static get_toolkit returns a list of CompiledStateGraph stubs
|
|
60
|
+
tools.extend(SubgraphToolkit.get_toolkit(
|
|
61
|
+
alita_client,
|
|
62
|
+
application_id=int(tool['settings']['application_id']),
|
|
63
|
+
application_version_id=int(tool['settings']['application_version_id']),
|
|
64
|
+
app_api_key=alita_client.auth_token,
|
|
65
|
+
selected_tools=[],
|
|
66
|
+
llm=llm
|
|
67
|
+
))
|
|
68
|
+
elif tool['type'] == 'memory':
|
|
69
|
+
tools += MemoryToolkit.get_toolkit(
|
|
70
|
+
namespace=tool['settings'].get('namespace', str(tool['id'])),
|
|
71
|
+
pgvector_configuration=tool['settings'].get('pgvector_configuration', {}),
|
|
72
|
+
store=memory_store,
|
|
79
73
|
).get_tools()
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
74
|
+
# TODO: update configuration of internal tools
|
|
75
|
+
elif tool['type'] == 'internal_tool':
|
|
76
|
+
if tool['name'] == 'pyodide':
|
|
77
|
+
tools += SandboxToolkit.get_toolkit(
|
|
78
|
+
stateful=False,
|
|
79
|
+
allow_net=True,
|
|
80
|
+
alita_client=alita_client,
|
|
84
81
|
).get_tools()
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
82
|
+
elif tool['name'] == 'image_generation':
|
|
83
|
+
if alita_client and alita_client.model_image_generation:
|
|
84
|
+
tools += ImageGenerationToolkit.get_toolkit(
|
|
85
|
+
client=alita_client,
|
|
86
|
+
).get_tools()
|
|
87
|
+
else:
|
|
88
|
+
logger.warning("Image generation internal tool requested "
|
|
89
|
+
"but no image generation model configured")
|
|
90
|
+
elif tool['type'] == 'artifact':
|
|
91
|
+
tools.extend(ArtifactToolkit.get_toolkit(
|
|
92
|
+
client=alita_client,
|
|
93
|
+
bucket=tool['settings']['bucket'],
|
|
94
|
+
toolkit_name=tool.get('toolkit_name', ''),
|
|
95
|
+
selected_tools=tool['settings'].get('selected_tools', []),
|
|
96
|
+
llm=llm,
|
|
97
|
+
# indexer settings
|
|
98
|
+
pgvector_configuration=tool['settings'].get('pgvector_configuration', {}),
|
|
99
|
+
embedding_model=tool['settings'].get('embedding_model'),
|
|
100
|
+
collection_name=f"{tool.get('toolkit_name')}",
|
|
101
|
+
collection_schema = str(tool['id'])
|
|
102
|
+
).get_tools())
|
|
103
|
+
elif tool['type'] == 'vectorstore':
|
|
104
|
+
tools.extend(VectorStoreToolkit.get_toolkit(
|
|
105
|
+
llm=llm,
|
|
106
|
+
toolkit_name=tool.get('toolkit_name', ''),
|
|
107
|
+
**tool['settings']).get_tools())
|
|
108
|
+
except Exception as e:
|
|
109
|
+
logger.error(f"Error initializing toolkit for tool '{tool.get('name', 'unknown')}': {e}", exc_info=True)
|
|
110
|
+
if debug_mode:
|
|
111
|
+
logger.info("Skipping tool initialization error due to debug mode.")
|
|
112
|
+
continue
|
|
113
|
+
else:
|
|
114
|
+
raise ToolException(f"Error initializing toolkit for tool '{tool.get('name', 'unknown')}': {e}")
|
|
105
115
|
|
|
106
116
|
if len(prompts) > 0:
|
|
107
117
|
tools += PromptToolkit.get_toolkit(alita_client, prompts).get_tools()
|
|
@@ -50,6 +50,8 @@ class Application(BaseTool):
|
|
|
50
50
|
application: Any
|
|
51
51
|
args_schema: Type[BaseModel] = applicationToolSchema
|
|
52
52
|
return_type: str = "str"
|
|
53
|
+
client: Any
|
|
54
|
+
args_runnable: dict = {}
|
|
53
55
|
|
|
54
56
|
@field_validator('name', mode='before')
|
|
55
57
|
@classmethod
|
|
@@ -66,6 +68,11 @@ class Application(BaseTool):
|
|
|
66
68
|
return self._run(*config, **all_kwargs)
|
|
67
69
|
|
|
68
70
|
def _run(self, *args, **kwargs):
|
|
71
|
+
if self.client and self.args_runnable:
|
|
72
|
+
# Recreate new LanggraphAgentRunnable in order to reflect the current input_mapping (it can be dynamic for pipelines).
|
|
73
|
+
# Actually, for pipelines agent toolkits LanggraphAgentRunnable is created (for LLMNode) before pipeline's schema parsing.
|
|
74
|
+
application_variables = {k: {"name": k, "value": v} for k, v in kwargs.items()}
|
|
75
|
+
self.application = self.client.application(**self.args_runnable, application_variables=application_variables)
|
|
69
76
|
response = self.application.invoke(formulate_query(kwargs))
|
|
70
77
|
if self.return_type == "str":
|
|
71
78
|
return response["output"]
|
alita_sdk/runtime/tools/llm.py
CHANGED
|
@@ -30,6 +30,7 @@ class LLMNode(BaseTool):
|
|
|
30
30
|
structured_output: Optional[bool] = Field(default=False, description='Whether to use structured output')
|
|
31
31
|
available_tools: Optional[List[BaseTool]] = Field(default=None, description='Available tools for binding')
|
|
32
32
|
tool_names: Optional[List[str]] = Field(default=None, description='Specific tool names to filter')
|
|
33
|
+
steps_limit: Optional[int] = Field(default=25, description='Maximum steps for tool execution')
|
|
33
34
|
|
|
34
35
|
def get_filtered_tools(self) -> List[BaseTool]:
|
|
35
36
|
"""
|
|
@@ -184,17 +185,16 @@ class LLMNode(BaseTool):
|
|
|
184
185
|
def __perform_tool_calling(self, completion, messages, llm_client, config):
|
|
185
186
|
# Handle iterative tool-calling and execution
|
|
186
187
|
new_messages = messages + [completion]
|
|
187
|
-
max_iterations = 15
|
|
188
188
|
iteration = 0
|
|
189
189
|
|
|
190
190
|
# Continue executing tools until no more tool calls or max iterations reached
|
|
191
191
|
current_completion = completion
|
|
192
192
|
while (hasattr(current_completion, 'tool_calls') and
|
|
193
193
|
current_completion.tool_calls and
|
|
194
|
-
iteration <
|
|
194
|
+
iteration < self.steps_limit):
|
|
195
195
|
|
|
196
196
|
iteration += 1
|
|
197
|
-
logger.info(f"Tool execution iteration {iteration}/{
|
|
197
|
+
logger.info(f"Tool execution iteration {iteration}/{self.steps_limit}")
|
|
198
198
|
|
|
199
199
|
# Execute each tool call in the current completion
|
|
200
200
|
tool_calls = current_completion.tool_calls if hasattr(current_completion.tool_calls,
|
|
@@ -285,10 +285,10 @@ class LLMNode(BaseTool):
|
|
|
285
285
|
break
|
|
286
286
|
|
|
287
287
|
# Log completion status
|
|
288
|
-
if iteration >=
|
|
289
|
-
logger.warning(f"Reached maximum iterations ({
|
|
288
|
+
if iteration >= self.steps_limit:
|
|
289
|
+
logger.warning(f"Reached maximum iterations ({self.steps_limit}) for tool execution")
|
|
290
290
|
# Add a warning message to the chat
|
|
291
|
-
warning_msg = f"Maximum tool execution iterations ({
|
|
291
|
+
warning_msg = f"Maximum tool execution iterations ({self.steps_limit}) reached. Stopping tool execution."
|
|
292
292
|
new_messages.append(AIMessage(content=warning_msg))
|
|
293
293
|
else:
|
|
294
294
|
logger.info(f"Tool execution completed after {iteration} iterations")
|
alita_sdk/tools/__init__.py
CHANGED
|
@@ -90,68 +90,74 @@ available_count = len(AVAILABLE_TOOLS)
|
|
|
90
90
|
total_attempted = len(AVAILABLE_TOOLS) + len(FAILED_IMPORTS)
|
|
91
91
|
logger.info(f"Tool imports completed: {available_count}/{total_attempted} successful")
|
|
92
92
|
|
|
93
|
+
|
|
93
94
|
def get_tools(tools_list, alita, llm, store: Optional[BaseStore] = None, *args, **kwargs):
|
|
94
95
|
tools = []
|
|
96
|
+
|
|
95
97
|
for tool in tools_list:
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
if not tool.get('settings'):
|
|
98
|
+
settings = tool.get('settings')
|
|
99
|
+
|
|
100
|
+
# Skip tools without settings early
|
|
101
|
+
if not settings:
|
|
101
102
|
logger.warning(f"Tool '{tool.get('type', '')}' has no settings, skipping...")
|
|
102
103
|
continue
|
|
103
|
-
|
|
104
|
-
tool
|
|
105
|
-
|
|
104
|
+
|
|
105
|
+
# Validate tool names once
|
|
106
|
+
selected_tools = settings.get('selected_tools', [])
|
|
107
|
+
invalid_tools = [name for name in selected_tools if isinstance(name, str) and name.startswith('_')]
|
|
108
|
+
if invalid_tools:
|
|
109
|
+
raise ValueError(f"Tool names {invalid_tools} from toolkit '{tool.get('type', '')}' cannot start with '_'")
|
|
110
|
+
|
|
111
|
+
# Cache tool type and add common settings
|
|
106
112
|
tool_type = tool['type']
|
|
113
|
+
settings['alita'] = alita
|
|
114
|
+
settings['llm'] = llm
|
|
115
|
+
settings['store'] = store
|
|
107
116
|
|
|
108
|
-
#
|
|
117
|
+
# Set pgvector collection schema if present
|
|
118
|
+
if settings.get('pgvector_configuration'):
|
|
119
|
+
settings['pgvector_configuration']['collection_schema'] = str(tool['id'])
|
|
120
|
+
|
|
121
|
+
# Handle ADO special cases
|
|
109
122
|
if tool_type in ['ado_boards', 'ado_wiki', 'ado_plans']:
|
|
110
123
|
tools.extend(AVAILABLE_TOOLS['ado']['get_tools'](tool_type, tool))
|
|
124
|
+
continue
|
|
111
125
|
|
|
112
|
-
#
|
|
113
|
-
|
|
126
|
+
# Handle ADO repos aliases
|
|
127
|
+
if tool_type in ['ado_repos', 'azure_devops_repos'] and 'ado_repos' in AVAILABLE_TOOLS:
|
|
114
128
|
try:
|
|
115
|
-
|
|
116
|
-
if tool['settings'].get('pgvector_configuration'):
|
|
117
|
-
# Set collection schema to toolkit_id and put it to pgvector configuration
|
|
118
|
-
# to propagate it to the all toolkits level from single place
|
|
119
|
-
tool['settings']['pgvector_configuration']['collection_schema'] = str(tool['id'])
|
|
120
|
-
tools.extend(get_tools_func(tool))
|
|
121
|
-
|
|
129
|
+
tools.extend(AVAILABLE_TOOLS['ado_repos']['get_tools'](tool))
|
|
122
130
|
except Exception as e:
|
|
123
|
-
logger.error(f"Error getting
|
|
124
|
-
|
|
131
|
+
logger.error(f"Error getting ADO repos tools: {e}")
|
|
132
|
+
continue
|
|
125
133
|
|
|
126
|
-
# Handle
|
|
127
|
-
|
|
134
|
+
# Handle standard tools
|
|
135
|
+
if tool_type in AVAILABLE_TOOLS and 'get_tools' in AVAILABLE_TOOLS[tool_type]:
|
|
128
136
|
try:
|
|
129
|
-
|
|
130
|
-
tools.extend(get_tools_func(tool))
|
|
137
|
+
tools.extend(AVAILABLE_TOOLS[tool_type]['get_tools'](tool))
|
|
131
138
|
except Exception as e:
|
|
132
|
-
logger.error(f"Error getting
|
|
139
|
+
logger.error(f"Error getting tools for {tool_type}: {e}")
|
|
140
|
+
raise ToolException(f"Error getting tools for {tool_type}: {e}")
|
|
141
|
+
continue
|
|
133
142
|
|
|
134
143
|
# Handle custom modules
|
|
135
|
-
|
|
144
|
+
if settings.get("module"):
|
|
136
145
|
try:
|
|
137
|
-
settings = tool.get("settings", {})
|
|
138
146
|
mod = import_module(settings.pop("module"))
|
|
139
147
|
tkitclass = getattr(mod, settings.pop("class"))
|
|
140
|
-
|
|
141
|
-
get_toolkit_params = tool["settings"].copy()
|
|
148
|
+
get_toolkit_params = settings.copy()
|
|
142
149
|
get_toolkit_params["name"] = tool.get("name")
|
|
143
|
-
#
|
|
144
150
|
toolkit = tkitclass.get_toolkit(**get_toolkit_params)
|
|
145
151
|
tools.extend(toolkit.get_tools())
|
|
146
152
|
except Exception as e:
|
|
147
153
|
logger.error(f"Error in getting custom toolkit: {e}")
|
|
154
|
+
continue
|
|
148
155
|
|
|
156
|
+
# Tool not available
|
|
157
|
+
if tool_type in FAILED_IMPORTS:
|
|
158
|
+
logger.warning(f"Tool '{tool_type}' is not available: {FAILED_IMPORTS[tool_type]}")
|
|
149
159
|
else:
|
|
150
|
-
|
|
151
|
-
if tool_type in FAILED_IMPORTS:
|
|
152
|
-
logger.warning(f"Tool '{tool_type}' is not available: {FAILED_IMPORTS[tool_type]}")
|
|
153
|
-
else:
|
|
154
|
-
logger.warning(f"Unknown tool type: {tool_type}")
|
|
160
|
+
logger.warning(f"Unknown tool type: {tool_type}")
|
|
155
161
|
|
|
156
162
|
return tools
|
|
157
163
|
|
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
import ast
|
|
2
2
|
import fnmatch
|
|
3
|
+
import json
|
|
3
4
|
import logging
|
|
4
5
|
from typing import Optional, List, Generator
|
|
5
6
|
|
|
@@ -21,7 +22,7 @@ class CodeIndexerToolkit(BaseIndexerToolkit):
|
|
|
21
22
|
return self.vector_adapter.get_code_indexed_data(self, index_name)
|
|
22
23
|
|
|
23
24
|
def key_fn(self, document: Document):
|
|
24
|
-
return document.metadata.get(
|
|
25
|
+
return document.metadata.get("filename")
|
|
25
26
|
|
|
26
27
|
def compare_fn(self, document: Document, idx_data):
|
|
27
28
|
return (document.metadata.get('commit_hash') and
|
|
@@ -46,7 +47,7 @@ class CodeIndexerToolkit(BaseIndexerToolkit):
|
|
|
46
47
|
)
|
|
47
48
|
|
|
48
49
|
def _extend_data(self, documents: Generator[Document, None, None]):
|
|
49
|
-
yield from
|
|
50
|
+
yield from documents
|
|
50
51
|
|
|
51
52
|
def _index_tool_params(self):
|
|
52
53
|
"""Return the parameters for indexing data."""
|
|
@@ -117,6 +118,15 @@ class CodeIndexerToolkit(BaseIndexerToolkit):
|
|
|
117
118
|
if not file_content:
|
|
118
119
|
# empty file, skip
|
|
119
120
|
continue
|
|
121
|
+
#
|
|
122
|
+
# ensure file content is a string
|
|
123
|
+
if isinstance(file_content, bytes):
|
|
124
|
+
file_content = file_content.decode("utf-8", errors="ignore")
|
|
125
|
+
elif isinstance(file_content, dict) and file.endswith('.json'):
|
|
126
|
+
file_content = json.dumps(file_content)
|
|
127
|
+
elif not isinstance(file_content, str):
|
|
128
|
+
file_content = str(file_content)
|
|
129
|
+
#
|
|
120
130
|
# hash the file content to ensure uniqueness
|
|
121
131
|
import hashlib
|
|
122
132
|
file_hash = hashlib.sha256(file_content.encode("utf-8")).hexdigest()
|
|
@@ -127,7 +137,7 @@ class CodeIndexerToolkit(BaseIndexerToolkit):
|
|
|
127
137
|
self._log_tool_event(message=f"{idx} out of {total_files} files have been read", tool_name="loader")
|
|
128
138
|
self._log_tool_event(message=f"{len(_files)} have been read", tool_name="loader")
|
|
129
139
|
|
|
130
|
-
return file_content_generator()
|
|
140
|
+
return parse_code_files_for_db(file_content_generator())
|
|
131
141
|
|
|
132
142
|
def __handle_get_files(self, path: str, branch: str):
|
|
133
143
|
"""
|
|
@@ -563,7 +563,7 @@ class JiraApiWrapper(NonCodeIndexerToolkit):
|
|
|
563
563
|
Use the appropriate issue link type (e.g., "Test", "Relates", "Blocks").
|
|
564
564
|
If we use "Test" linktype, the test is inward issue, the story/other issue is outward issue.."""
|
|
565
565
|
|
|
566
|
-
comment = "
|
|
566
|
+
comment = f"Issue {inward_issue_key} was linked to {outward_issue_key}."
|
|
567
567
|
comment_body = {"content": [{"content": [{"text": comment,"type": "text"}],"type": "paragraph"}],"type": "doc","version": 1} if self.api_version == "3" else comment
|
|
568
568
|
link_data = {
|
|
569
569
|
"type": {"name": f"{linktype}"},
|
|
@@ -127,8 +127,23 @@ class SharepointApiWrapper(NonCodeIndexerToolkit):
|
|
|
127
127
|
result.append(temp_props)
|
|
128
128
|
return result if result else ToolException("Can not get files or folder is empty. Please, double check folder name and read permissions.")
|
|
129
129
|
except Exception as e:
|
|
130
|
-
|
|
131
|
-
|
|
130
|
+
# attempt to get via graph api
|
|
131
|
+
try:
|
|
132
|
+
# attempt to get files via graph api
|
|
133
|
+
from .authorization_helper import SharepointAuthorizationHelper
|
|
134
|
+
auth_helper = SharepointAuthorizationHelper(
|
|
135
|
+
client_id=self.client_id,
|
|
136
|
+
client_secret=self.client_secret.get_secret_value(),
|
|
137
|
+
tenant="", # optional for graph api
|
|
138
|
+
scope="", # optional for graph api
|
|
139
|
+
token_json="", # optional for graph api
|
|
140
|
+
)
|
|
141
|
+
files = auth_helper.get_files_list(self.site_url, folder_name, limit_files)
|
|
142
|
+
return files
|
|
143
|
+
except Exception as graph_e:
|
|
144
|
+
logging.error(f"Failed to load files from sharepoint via base api: {e}")
|
|
145
|
+
logging.error(f"Failed to load files from sharepoint via graph api: {graph_e}")
|
|
146
|
+
return ToolException(f"Can not get files. Please, double check folder name and read permissions: {e} and {graph_e}")
|
|
132
147
|
|
|
133
148
|
def read_file(self, path,
|
|
134
149
|
is_capture_image: bool = False,
|
|
@@ -141,11 +156,28 @@ class SharepointApiWrapper(NonCodeIndexerToolkit):
|
|
|
141
156
|
self._client.load(file).execute_query()
|
|
142
157
|
|
|
143
158
|
file_content = file.read()
|
|
159
|
+
file_name = file.name
|
|
144
160
|
self._client.execute_query()
|
|
145
161
|
except Exception as e:
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
162
|
+
# attempt to get via graph api
|
|
163
|
+
try:
|
|
164
|
+
# attempt to get files via graph api
|
|
165
|
+
from .authorization_helper import SharepointAuthorizationHelper
|
|
166
|
+
auth_helper = SharepointAuthorizationHelper(
|
|
167
|
+
client_id=self.client_id,
|
|
168
|
+
client_secret=self.client_secret.get_secret_value(),
|
|
169
|
+
tenant="", # optional for graph api
|
|
170
|
+
scope="", # optional for graph api
|
|
171
|
+
token_json="", # optional for graph api
|
|
172
|
+
)
|
|
173
|
+
file_content = auth_helper.get_file_content(self.site_url, path)
|
|
174
|
+
file_name = path.split('/')[-1]
|
|
175
|
+
except Exception as graph_e:
|
|
176
|
+
logging.error(f"Failed to load file from SharePoint via base api: {e}. Path: {path}. Please, double check file name and path.")
|
|
177
|
+
logging.error(f"Failed to load file from SharePoint via graph api: {graph_e}. Path: {path}. Please, double check file name and path.")
|
|
178
|
+
return ToolException(f"File not found. Please, check file name and path: {e} and {graph_e}")
|
|
179
|
+
#
|
|
180
|
+
return parse_file_content(file_name=file_name,
|
|
149
181
|
file_content=file_content,
|
|
150
182
|
is_capture_image=is_capture_image,
|
|
151
183
|
page_number=page_number,
|
|
@@ -219,12 +251,24 @@ class SharepointApiWrapper(NonCodeIndexerToolkit):
|
|
|
219
251
|
yield document
|
|
220
252
|
|
|
221
253
|
def _load_file_content_in_bytes(self, path):
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
254
|
+
try:
|
|
255
|
+
file = self._client.web.get_file_by_server_relative_path(path)
|
|
256
|
+
self._client.load(file).execute_query()
|
|
257
|
+
file_content = file.read()
|
|
258
|
+
self._client.execute_query()
|
|
259
|
+
#
|
|
260
|
+
return file_content
|
|
261
|
+
except Exception as e:
|
|
262
|
+
# attempt to get via graph api
|
|
263
|
+
from .authorization_helper import SharepointAuthorizationHelper
|
|
264
|
+
auth_helper = SharepointAuthorizationHelper(
|
|
265
|
+
client_id=self.client_id,
|
|
266
|
+
client_secret=self.client_secret.get_secret_value(),
|
|
267
|
+
tenant="", # optional for graph api
|
|
268
|
+
scope="", # optional for graph api
|
|
269
|
+
token_json="", # optional for graph api
|
|
270
|
+
)
|
|
271
|
+
return auth_helper.get_file_content(self.site_url, path)
|
|
228
272
|
|
|
229
273
|
def get_available_tools(self):
|
|
230
274
|
return super().get_available_tools() + [
|
|
@@ -1,7 +1,10 @@
|
|
|
1
1
|
from datetime import datetime, timezone
|
|
2
|
+
from urllib.parse import unquote, urlparse, quote
|
|
2
3
|
|
|
3
4
|
import jwt
|
|
4
5
|
import requests
|
|
6
|
+
from botocore.response import get_response
|
|
7
|
+
|
|
5
8
|
|
|
6
9
|
class SharepointAuthorizationHelper:
|
|
7
10
|
|
|
@@ -54,4 +57,131 @@ class SharepointAuthorizationHelper:
|
|
|
54
57
|
except jwt.ExpiredSignatureError:
|
|
55
58
|
return False
|
|
56
59
|
except jwt.InvalidTokenError:
|
|
57
|
-
return False
|
|
60
|
+
return False
|
|
61
|
+
|
|
62
|
+
def _validate_response(self, response, required_field, error_prefix=None):
|
|
63
|
+
if response.status_code != 200:
|
|
64
|
+
raise RuntimeError(f"{error_prefix or 'Request'} failed: {response.status_code} {response.text}")
|
|
65
|
+
json_data = response.json()
|
|
66
|
+
if required_field not in json_data:
|
|
67
|
+
raise KeyError(f"'{required_field}' missing in response")
|
|
68
|
+
return json_data[required_field]
|
|
69
|
+
|
|
70
|
+
def generate_token_and_site_id(self, site_url: str) -> tuple[str, str]:
|
|
71
|
+
try:
|
|
72
|
+
parsed = urlparse(site_url)
|
|
73
|
+
domain = parsed.hostname
|
|
74
|
+
site_path = parsed.path.strip('/')
|
|
75
|
+
if not domain or not site_path:
|
|
76
|
+
raise ValueError(f"site_url missing domain or site path: {site_url}")
|
|
77
|
+
app_name = domain.split('.')[0]
|
|
78
|
+
openid_config_url = f"https://login.microsoftonline.com/{app_name}.onmicrosoft.com/v2.0/.well-known/openid-configuration"
|
|
79
|
+
response = requests.get(openid_config_url)
|
|
80
|
+
token_url = self._validate_response(response, required_field="token_endpoint", error_prefix="OpenID config")
|
|
81
|
+
token_data = {
|
|
82
|
+
"grant_type": "client_credentials",
|
|
83
|
+
"client_id": self.client_id,
|
|
84
|
+
"client_secret": self.client_secret,
|
|
85
|
+
"scope": "https://graph.microsoft.com/.default"
|
|
86
|
+
}
|
|
87
|
+
token_response = requests.post(token_url, data=token_data)
|
|
88
|
+
access_token = self._validate_response(token_response, required_field="access_token", error_prefix="Token request")
|
|
89
|
+
graph_site_url = f"https://graph.microsoft.com/v1.0/sites/{domain}:/{site_path}"
|
|
90
|
+
headers = {"Authorization": f"Bearer {access_token}"}
|
|
91
|
+
site_response = requests.get(graph_site_url, headers=headers)
|
|
92
|
+
site_id = self._validate_response(site_response, required_field="id", error_prefix="Site info")
|
|
93
|
+
return access_token, site_id
|
|
94
|
+
except Exception as e:
|
|
95
|
+
raise RuntimeError(f"Error while obtaining access_token and site_id: {e}")
|
|
96
|
+
|
|
97
|
+
def get_files_list(self, site_url: str, folder_name: str = None, limit_files: int = 100):
|
|
98
|
+
if not site_url or not site_url.startswith("https://"):
|
|
99
|
+
raise ValueError(f"Invalid site_url format: {site_url}")
|
|
100
|
+
if limit_files is not None and (not isinstance(limit_files, int) or limit_files <= 0):
|
|
101
|
+
raise ValueError(f"limit_files must be a positive integer, got: {limit_files}")
|
|
102
|
+
try:
|
|
103
|
+
access_token, site_id = self.generate_token_and_site_id(site_url)
|
|
104
|
+
headers = {"Authorization": f"Bearer {access_token}"}
|
|
105
|
+
drives_url = f"https://graph.microsoft.com/v1.0/sites/{site_id}/drives"
|
|
106
|
+
drives_response = requests.get(drives_url, headers=headers)
|
|
107
|
+
drives = self._validate_response(drives_response, required_field="value", error_prefix="Drives request")
|
|
108
|
+
result = []
|
|
109
|
+
def _recurse_drive(drive_id, drive_path, parent_folder, limit_files):
|
|
110
|
+
# Escape folder_name for URL safety if present
|
|
111
|
+
if parent_folder:
|
|
112
|
+
safe_folder_name = quote(parent_folder.strip('/'), safe="/")
|
|
113
|
+
url = f"https://graph.microsoft.com/v1.0/sites/{site_id}/drives/{drive_id}/root:/{safe_folder_name}:/children?$top={limit_files}"
|
|
114
|
+
else:
|
|
115
|
+
url = f"https://graph.microsoft.com/v1.0/sites/{site_id}/drives/{drive_id}/root/children?$top={limit_files}"
|
|
116
|
+
response = requests.get(url, headers=headers)
|
|
117
|
+
if response.status_code != 200:
|
|
118
|
+
return []
|
|
119
|
+
files_json = response.json()
|
|
120
|
+
if "value" not in files_json:
|
|
121
|
+
return []
|
|
122
|
+
files = []
|
|
123
|
+
for file in files_json["value"]:
|
|
124
|
+
file_name = file.get('name', '')
|
|
125
|
+
# Build full path reflecting nested folders
|
|
126
|
+
if parent_folder:
|
|
127
|
+
full_path = '/' + '/'.join([drive_path.strip('/'), parent_folder.strip('/'), file_name.strip('/')])
|
|
128
|
+
else:
|
|
129
|
+
full_path = '/' + '/'.join([drive_path.strip('/'), file_name.strip('/')])
|
|
130
|
+
temp_props = {
|
|
131
|
+
'Name': file_name,
|
|
132
|
+
'Path': full_path,
|
|
133
|
+
'Created': file.get('createdDateTime'),
|
|
134
|
+
'Modified': file.get('lastModifiedDateTime'),
|
|
135
|
+
'Link': file.get('webUrl'),
|
|
136
|
+
'id': file.get('id')
|
|
137
|
+
}
|
|
138
|
+
if not all([temp_props['Name'], temp_props['Path'], temp_props['id']]):
|
|
139
|
+
continue # skip files with missing required fields
|
|
140
|
+
if 'folder' in file:
|
|
141
|
+
# Recursively extract files from this folder
|
|
142
|
+
inner_folder = parent_folder + '/' + file_name if parent_folder else file_name
|
|
143
|
+
inner_files = _recurse_drive(drive_id, drive_path, inner_folder, limit_files)
|
|
144
|
+
files.extend(inner_files)
|
|
145
|
+
else:
|
|
146
|
+
files.append(temp_props)
|
|
147
|
+
if limit_files is not None and len(result) + len(files) >= limit_files:
|
|
148
|
+
return files[:limit_files - len(result)]
|
|
149
|
+
return files
|
|
150
|
+
for drive in drives:
|
|
151
|
+
drive_id = drive.get("id")
|
|
152
|
+
drive_path = unquote(urlparse(drive.get("webUrl")).path) if drive.get("webUrl") else ""
|
|
153
|
+
if not drive_id:
|
|
154
|
+
continue # skip drives without id
|
|
155
|
+
files = _recurse_drive(drive_id, drive_path, folder_name, limit_files)
|
|
156
|
+
result.extend(files)
|
|
157
|
+
if limit_files is not None and len(result) >= limit_files:
|
|
158
|
+
return result[:limit_files]
|
|
159
|
+
return result
|
|
160
|
+
except Exception as e:
|
|
161
|
+
raise RuntimeError(f"Error in get_files_list: {e}")
|
|
162
|
+
|
|
163
|
+
def get_file_content(self, site_url: str, path: str):
|
|
164
|
+
try:
|
|
165
|
+
access_token, site_id = self.generate_token_and_site_id(site_url)
|
|
166
|
+
headers = {"Authorization": f"Bearer {access_token}"}
|
|
167
|
+
drives_url = f"https://graph.microsoft.com/v1.0/sites/{site_id}/drives"
|
|
168
|
+
drives_response = requests.get(drives_url, headers=headers)
|
|
169
|
+
drives = self._validate_response(drives_response, required_field="value", error_prefix="Drives request")
|
|
170
|
+
path = path.strip('/')
|
|
171
|
+
#
|
|
172
|
+
for drive in drives:
|
|
173
|
+
drive_path = unquote(urlparse(drive.get("webUrl")).path).strip('/')
|
|
174
|
+
if not drive_path or not path.startswith(drive_path):
|
|
175
|
+
continue
|
|
176
|
+
drive_id = drive.get("id")
|
|
177
|
+
if not drive_id:
|
|
178
|
+
continue
|
|
179
|
+
path = path.replace(drive_path, '').strip('/')
|
|
180
|
+
safe_path = quote(path, safe="")
|
|
181
|
+
url = f"https://graph.microsoft.com/v1.0/drives/{drive_id}/root:/{safe_path}:/content"
|
|
182
|
+
response = requests.get(url, headers=headers)
|
|
183
|
+
if response.status_code == 200:
|
|
184
|
+
return response.content
|
|
185
|
+
raise RuntimeError(f"File '{path}' not found in any private or shared documents.")
|
|
186
|
+
except Exception as e:
|
|
187
|
+
raise RuntimeError(f"Error in get_file_content: {e}")
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: alita_sdk
|
|
3
|
-
Version: 0.3.
|
|
3
|
+
Version: 0.3.411
|
|
4
4
|
Summary: SDK for building langchain agents using resources from Alita
|
|
5
5
|
Author-email: Artem Rozumenko <artyom.rozumenko@gmail.com>, Mikalai Biazruchka <mikalai_biazruchka@epam.com>, Roman Mitusov <roman_mitusov@epam.com>, Ivan Krakhmaliuk <lifedj27@gmail.com>, Artem Dubrovskiy <ad13box@gmail.com>
|
|
6
6
|
License-Expression: Apache-2.0
|
|
@@ -36,16 +36,16 @@ alita_sdk/configurations/zephyr_essential.py,sha256=tUIrh-PRNvdrLBj6rJXqlF-h6oaM
|
|
|
36
36
|
alita_sdk/runtime/__init__.py,sha256=4W0UF-nl3QF2bvET5lnah4o24CoTwSoKXhuN0YnwvEE,828
|
|
37
37
|
alita_sdk/runtime/clients/__init__.py,sha256=BdehU5GBztN1Qi1Wul0cqlU46FxUfMnI6Vq2Zd_oq1M,296
|
|
38
38
|
alita_sdk/runtime/clients/artifact.py,sha256=b7hVuGRROt6qUcT11uAZqzJqslzmlgW-Y6oGsiwNmjI,4029
|
|
39
|
-
alita_sdk/runtime/clients/client.py,sha256=
|
|
39
|
+
alita_sdk/runtime/clients/client.py,sha256=ElJdZHYLpuXLQadoHMcuhiHzs8HVUiiv5rZE7UU-iNg,45896
|
|
40
40
|
alita_sdk/runtime/clients/datasource.py,sha256=HAZovoQN9jBg0_-lIlGBQzb4FJdczPhkHehAiVG3Wx0,1020
|
|
41
41
|
alita_sdk/runtime/clients/prompt.py,sha256=li1RG9eBwgNK_Qf0qUaZ8QNTmsncFrAL2pv3kbxZRZg,1447
|
|
42
42
|
alita_sdk/runtime/clients/sandbox_client.py,sha256=OhEasE0MxBBDw4o76xkxVCpNpr3xJ8spQsrsVxMrjUA,16192
|
|
43
43
|
alita_sdk/runtime/langchain/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
44
|
-
alita_sdk/runtime/langchain/assistant.py,sha256=
|
|
44
|
+
alita_sdk/runtime/langchain/assistant.py,sha256=qKoEjbGuUnX-OZDHmSaK3plb1jON9unzEwAjxBT9DY8,16044
|
|
45
45
|
alita_sdk/runtime/langchain/chat_message_template.py,sha256=kPz8W2BG6IMyITFDA5oeb5BxVRkHEVZhuiGl4MBZKdc,2176
|
|
46
46
|
alita_sdk/runtime/langchain/constants.py,sha256=eHVJ_beJNTf1WJo4yq7KMK64fxsRvs3lKc34QCXSbpk,3319
|
|
47
47
|
alita_sdk/runtime/langchain/indexer.py,sha256=0ENHy5EOhThnAiYFc7QAsaTNp9rr8hDV_hTK8ahbatk,37592
|
|
48
|
-
alita_sdk/runtime/langchain/langraph_agent.py,sha256=
|
|
48
|
+
alita_sdk/runtime/langchain/langraph_agent.py,sha256=SoA7il7_Q9OyJbCDVubVMVNkL1NI0OzIU7FR33R7onI,50185
|
|
49
49
|
alita_sdk/runtime/langchain/mixedAgentParser.py,sha256=M256lvtsL3YtYflBCEp-rWKrKtcY1dJIyRGVv7KW9ME,2611
|
|
50
50
|
alita_sdk/runtime/langchain/mixedAgentRenderes.py,sha256=asBtKqm88QhZRILditjYICwFVKF5KfO38hu2O-WrSWE,5964
|
|
51
51
|
alita_sdk/runtime/langchain/store_manager.py,sha256=i8Fl11IXJhrBXq1F1ukEVln57B1IBe-tqSUvfUmBV4A,2218
|
|
@@ -96,17 +96,17 @@ alita_sdk/runtime/langchain/tools/bdd_parser/parser.py,sha256=1H1Nd_OH5Wx8A5YV1z
|
|
|
96
96
|
alita_sdk/runtime/llms/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
97
97
|
alita_sdk/runtime/llms/preloaded.py,sha256=3AaUbZK3d8fvxAQMjR3ftOoYa0SnkCOL1EvdvDCXIHE,11321
|
|
98
98
|
alita_sdk/runtime/toolkits/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
99
|
-
alita_sdk/runtime/toolkits/application.py,sha256=
|
|
99
|
+
alita_sdk/runtime/toolkits/application.py,sha256=HHAKgwKOckxc7EQG-AV7rz4POOzQJKFRr7AGEjmLudE,2688
|
|
100
100
|
alita_sdk/runtime/toolkits/artifact.py,sha256=YChNCX4QhVpaQG7Jk4TS-Wl0Aruc4slQ2K21zh9nNO0,3176
|
|
101
101
|
alita_sdk/runtime/toolkits/configurations.py,sha256=kIDAlnryPQfbZyFxV-9SzN2-Vefzx06TX1BBdIIpN90,141
|
|
102
102
|
alita_sdk/runtime/toolkits/datasource.py,sha256=qk78OdPoReYPCWwahfkKLbKc4pfsu-061oXRryFLP6I,2498
|
|
103
103
|
alita_sdk/runtime/toolkits/prompt.py,sha256=WIpTkkVYWqIqOWR_LlSWz3ug8uO9tm5jJ7aZYdiGRn0,1192
|
|
104
104
|
alita_sdk/runtime/toolkits/subgraph.py,sha256=wwUK8JjPXkGzyVZ3tAukmvST6eGbqx_U11rpnmbrvtg,2105
|
|
105
|
-
alita_sdk/runtime/toolkits/tools.py,sha256=
|
|
105
|
+
alita_sdk/runtime/toolkits/tools.py,sha256=XBbbEZOTbzt1cogu7b9mg-nluZgD5EEUuaZp5QvE9b8,10122
|
|
106
106
|
alita_sdk/runtime/toolkits/vectorstore.py,sha256=BGppQADa1ZiLO17fC0uCACTTEvPHlodEDYEzUcBRbAA,2901
|
|
107
107
|
alita_sdk/runtime/tools/__init__.py,sha256=Fx7iHqkzA90-KfjdcUUzMUI_7kDarjuTsSpSzOW2pN0,568
|
|
108
108
|
alita_sdk/runtime/tools/agent.py,sha256=m98QxOHwnCRTT9j18Olbb5UPS8-ZGeQaGiUyZJSyFck,3162
|
|
109
|
-
alita_sdk/runtime/tools/application.py,sha256=
|
|
109
|
+
alita_sdk/runtime/tools/application.py,sha256=RCGe-mRfj8372gTFkEX2xBvcYhw7IKdU1t50lXaBPOY,3701
|
|
110
110
|
alita_sdk/runtime/tools/artifact.py,sha256=u3szFwZqguHrPZ3tZJ7S_TiZl7cxlT3oHYd6zbdpRDE,13842
|
|
111
111
|
alita_sdk/runtime/tools/datasource.py,sha256=pvbaSfI-ThQQnjHG-QhYNSTYRnZB0rYtZFpjCfpzxYI,2443
|
|
112
112
|
alita_sdk/runtime/tools/echo.py,sha256=spw9eCweXzixJqHnZofHE1yWiSUa04L4VKycf3KCEaM,486
|
|
@@ -114,7 +114,7 @@ alita_sdk/runtime/tools/function.py,sha256=jk_JrtuYByR9Df5EFOGFheB9HktNPJcOwf4js
|
|
|
114
114
|
alita_sdk/runtime/tools/graph.py,sha256=7jImBBSEdP5Mjnn2keOiyUwdGDFhEXLUrgUiugO3mgA,3503
|
|
115
115
|
alita_sdk/runtime/tools/image_generation.py,sha256=Kls9D_ke_SK7xmVr7I9SlQcAEBJc86gf66haN0qIj9k,7469
|
|
116
116
|
alita_sdk/runtime/tools/indexer_tool.py,sha256=whSLPevB4WD6dhh2JDXEivDmTvbjiMV1MrPl9cz5eLA,4375
|
|
117
|
-
alita_sdk/runtime/tools/llm.py,sha256=
|
|
117
|
+
alita_sdk/runtime/tools/llm.py,sha256=OEhf4_YlZIihIpkuRKbbWJ_Lfk-V_rJHpy2NRm5xuCg,15533
|
|
118
118
|
alita_sdk/runtime/tools/loop.py,sha256=uds0WhZvwMxDVFI6MZHrcmMle637cQfBNg682iLxoJA,8335
|
|
119
119
|
alita_sdk/runtime/tools/loop_output.py,sha256=U4hO9PCQgWlXwOq6jdmCGbegtAxGAPXObSxZQ3z38uk,8069
|
|
120
120
|
alita_sdk/runtime/tools/mcp_server_tool.py,sha256=MhLxZJ44LYrB_0GrojmkyqKoDRaqIHkEQAsg718ipog,4277
|
|
@@ -135,9 +135,9 @@ alita_sdk/runtime/utils/streamlit.py,sha256=GQ69CsjfRMcGXcCrslL0Uoj24Cl07Jeji0rZ
|
|
|
135
135
|
alita_sdk/runtime/utils/toolkit_runtime.py,sha256=MU63Fpxj0b5_r1IUUc0Q3-PN9VwL7rUxp2MRR4tmYR8,5136
|
|
136
136
|
alita_sdk/runtime/utils/toolkit_utils.py,sha256=I9QFqnaqfVgN26LUr6s3XlBlG6y0CoHURnCzG7XcwVs,5311
|
|
137
137
|
alita_sdk/runtime/utils/utils.py,sha256=PJK8A-JVIzY1IowOjGG8DIqsIiEFe65qDKvFcjJCKWA,1041
|
|
138
|
-
alita_sdk/tools/__init__.py,sha256=
|
|
138
|
+
alita_sdk/tools/__init__.py,sha256=wrcSP0AN6HukZHPXpObCKI58cY0lVpHyzbpq609CMhE,10726
|
|
139
139
|
alita_sdk/tools/base_indexer_toolkit.py,sha256=7UTcrmvGvmIBF3WGKrsEp7zJL-XB1JIgaRkbE1ZSS9A,26439
|
|
140
|
-
alita_sdk/tools/code_indexer_toolkit.py,sha256=
|
|
140
|
+
alita_sdk/tools/code_indexer_toolkit.py,sha256=2VkOC8JfBDc25_jp-NWyMYqpaYRETIzTJFLrIYrfBpE,7814
|
|
141
141
|
alita_sdk/tools/elitea_base.py,sha256=34fmVdYgd2YXifU5LFNjMQysr4OOIZ6AOZjq4GxLgSw,34417
|
|
142
142
|
alita_sdk/tools/non_code_indexer_toolkit.py,sha256=6Lrqor1VeSLbPLDHAfg_7UAUqKFy1r_n6bdsc4-ak98,1315
|
|
143
143
|
alita_sdk/tools/ado/__init__.py,sha256=NnNYpNFW0_N_v1td_iekYOoQRRB7PIunbpT2f9ZFJM4,1201
|
|
@@ -264,7 +264,7 @@ alita_sdk/tools/google/bigquery/tool.py,sha256=Esf9Hsp8I0e7-5EdkFqQ-bid0cfrg-bfS
|
|
|
264
264
|
alita_sdk/tools/google_places/__init__.py,sha256=QtmBCI0bHDK79u4hsCSWFcUihu-h4EmPSh9Yll7zz3w,3590
|
|
265
265
|
alita_sdk/tools/google_places/api_wrapper.py,sha256=7nZly6nk4f4Tm7s2MVdnnwlb-1_WHRrDhyjDiqoyPjA,4674
|
|
266
266
|
alita_sdk/tools/jira/__init__.py,sha256=G-9qnOYKFWM_adG0QFexh5-2pj_WaxIxxZanB3ARFqI,6339
|
|
267
|
-
alita_sdk/tools/jira/api_wrapper.py,sha256=
|
|
267
|
+
alita_sdk/tools/jira/api_wrapper.py,sha256=xmbZNYL1YkSsVXKuKEVQs1j0Fh7weGj4MdW5CnkXK-o,82611
|
|
268
268
|
alita_sdk/tools/keycloak/__init__.py,sha256=0WB9yXMUUAHQRni1ghDEmd7GYa7aJPsTVlZgMCM9cQ0,3050
|
|
269
269
|
alita_sdk/tools/keycloak/api_wrapper.py,sha256=cOGr0f3S3-c6tRDBWI8wMnetjoNSxiV5rvC_0VHb8uw,3100
|
|
270
270
|
alita_sdk/tools/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -317,8 +317,8 @@ alita_sdk/tools/servicenow/__init__.py,sha256=ziEt2juPrGFyB98ZXbGf25v6gZo4UJTHsz
|
|
|
317
317
|
alita_sdk/tools/servicenow/api_wrapper.py,sha256=WpH-bBLGFdhehs4g-K-WAkNuaD1CSrwsDpdgB3RG53s,6120
|
|
318
318
|
alita_sdk/tools/servicenow/servicenow_client.py,sha256=Rdqfu-ll-qbnclMzChLZBsfXRDzgoX_FdeI2WLApWxc,3269
|
|
319
319
|
alita_sdk/tools/sharepoint/__init__.py,sha256=5z2iSmm-0kbHKf70wN6OOgS4Px7tOzwkIpHXz0Vrbj4,4045
|
|
320
|
-
alita_sdk/tools/sharepoint/api_wrapper.py,sha256=
|
|
321
|
-
alita_sdk/tools/sharepoint/authorization_helper.py,sha256=
|
|
320
|
+
alita_sdk/tools/sharepoint/api_wrapper.py,sha256=onWKNO-pC3MEvv54JyRj26RaDu2PEg7vwnQcqN3UIiQ,14675
|
|
321
|
+
alita_sdk/tools/sharepoint/authorization_helper.py,sha256=QvxWFBjYZfhI1h_KkSrDbRh8D5BlFX8xWDLmlIoO4mo,9569
|
|
322
322
|
alita_sdk/tools/sharepoint/utils.py,sha256=fZ1YzAu5CTjKSZeslowpOPH974902S8vCp1Wu7L44LM,446
|
|
323
323
|
alita_sdk/tools/slack/__init__.py,sha256=YiPAoRc6y6uVpfHl0K1Qi-flcyPlWFIMVcVbhicGWXY,3990
|
|
324
324
|
alita_sdk/tools/slack/api_wrapper.py,sha256=5VrV7iSGno8ZcDzEHdGPNhInhtODGPPvAzoZ9W9iQWE,14009
|
|
@@ -353,8 +353,8 @@ alita_sdk/tools/zephyr_scale/api_wrapper.py,sha256=kT0TbmMvuKhDUZc0i7KO18O38JM9S
|
|
|
353
353
|
alita_sdk/tools/zephyr_squad/__init__.py,sha256=0ne8XLJEQSLOWfzd2HdnqOYmQlUliKHbBED5kW_Vias,2895
|
|
354
354
|
alita_sdk/tools/zephyr_squad/api_wrapper.py,sha256=kmw_xol8YIYFplBLWTqP_VKPRhL_1ItDD0_vXTe_UuI,14906
|
|
355
355
|
alita_sdk/tools/zephyr_squad/zephyr_squad_cloud_client.py,sha256=R371waHsms4sllHCbijKYs90C-9Yu0sSR3N4SUfQOgU,5066
|
|
356
|
-
alita_sdk-0.3.
|
|
357
|
-
alita_sdk-0.3.
|
|
358
|
-
alita_sdk-0.3.
|
|
359
|
-
alita_sdk-0.3.
|
|
360
|
-
alita_sdk-0.3.
|
|
356
|
+
alita_sdk-0.3.411.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
|
357
|
+
alita_sdk-0.3.411.dist-info/METADATA,sha256=OAQs8F9qR3gpOkt1_JRbaA2m-pOt-JX4v-mOPfxATeE,19071
|
|
358
|
+
alita_sdk-0.3.411.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
359
|
+
alita_sdk-0.3.411.dist-info/top_level.txt,sha256=0vJYy5p_jK6AwVb1aqXr7Kgqgk3WDtQ6t5C-XI9zkmg,10
|
|
360
|
+
alita_sdk-0.3.411.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|