alita-sdk 0.3.376__py3-none-any.whl → 0.3.423__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of alita-sdk might be problematic. Click here for more details.
- alita_sdk/configurations/bitbucket.py +95 -0
- alita_sdk/configurations/confluence.py +96 -1
- alita_sdk/configurations/gitlab.py +79 -0
- alita_sdk/configurations/jira.py +103 -0
- alita_sdk/configurations/testrail.py +88 -0
- alita_sdk/configurations/xray.py +93 -0
- alita_sdk/configurations/zephyr_enterprise.py +93 -0
- alita_sdk/configurations/zephyr_essential.py +75 -0
- alita_sdk/runtime/clients/client.py +3 -2
- alita_sdk/runtime/clients/sandbox_client.py +8 -0
- alita_sdk/runtime/langchain/assistant.py +41 -38
- alita_sdk/runtime/langchain/constants.py +4 -0
- alita_sdk/runtime/langchain/document_loaders/AlitaDocxMammothLoader.py +315 -3
- alita_sdk/runtime/langchain/document_loaders/AlitaJSONLoader.py +4 -1
- alita_sdk/runtime/langchain/document_loaders/constants.py +28 -12
- alita_sdk/runtime/langchain/langraph_agent.py +88 -27
- alita_sdk/runtime/langchain/utils.py +24 -4
- alita_sdk/runtime/toolkits/application.py +8 -1
- alita_sdk/runtime/toolkits/tools.py +80 -49
- alita_sdk/runtime/tools/__init__.py +7 -2
- alita_sdk/runtime/tools/application.py +7 -0
- alita_sdk/runtime/tools/function.py +20 -28
- alita_sdk/runtime/tools/graph.py +10 -4
- alita_sdk/runtime/tools/image_generation.py +104 -8
- alita_sdk/runtime/tools/llm.py +146 -114
- alita_sdk/runtime/tools/sandbox.py +166 -63
- alita_sdk/runtime/tools/vectorstore.py +3 -2
- alita_sdk/runtime/tools/vectorstore_base.py +4 -3
- alita_sdk/runtime/utils/utils.py +1 -0
- alita_sdk/tools/__init__.py +43 -31
- alita_sdk/tools/ado/work_item/ado_wrapper.py +17 -8
- alita_sdk/tools/base_indexer_toolkit.py +75 -66
- alita_sdk/tools/code_indexer_toolkit.py +13 -3
- alita_sdk/tools/confluence/api_wrapper.py +29 -7
- alita_sdk/tools/confluence/loader.py +10 -0
- alita_sdk/tools/elitea_base.py +7 -7
- alita_sdk/tools/gitlab/api_wrapper.py +8 -9
- alita_sdk/tools/jira/api_wrapper.py +1 -1
- alita_sdk/tools/openapi/__init__.py +10 -1
- alita_sdk/tools/qtest/api_wrapper.py +298 -51
- alita_sdk/tools/sharepoint/api_wrapper.py +104 -33
- alita_sdk/tools/sharepoint/authorization_helper.py +175 -1
- alita_sdk/tools/sharepoint/utils.py +8 -2
- alita_sdk/tools/utils/content_parser.py +27 -16
- alita_sdk/tools/vector_adapters/VectorStoreAdapter.py +19 -6
- {alita_sdk-0.3.376.dist-info → alita_sdk-0.3.423.dist-info}/METADATA +1 -1
- {alita_sdk-0.3.376.dist-info → alita_sdk-0.3.423.dist-info}/RECORD +50 -50
- {alita_sdk-0.3.376.dist-info → alita_sdk-0.3.423.dist-info}/WHEEL +0 -0
- {alita_sdk-0.3.376.dist-info → alita_sdk-0.3.423.dist-info}/licenses/LICENSE +0 -0
- {alita_sdk-0.3.376.dist-info → alita_sdk-0.3.423.dist-info}/top_level.txt +0 -0
|
@@ -43,7 +43,7 @@ DEFAULT_ALLOWED_WITH_LLM = {
|
|
|
43
43
|
LoaderProperties.PROMPT.value: "",
|
|
44
44
|
}
|
|
45
45
|
|
|
46
|
-
# Image file loaders mapping
|
|
46
|
+
# Image file loaders mapping - directly supported by LLM with image_url
|
|
47
47
|
image_loaders_map = {
|
|
48
48
|
'.png': {
|
|
49
49
|
'class': AlitaImageLoader,
|
|
@@ -73,6 +73,17 @@ image_loaders_map = {
|
|
|
73
73
|
'kwargs': {},
|
|
74
74
|
'allowed_to_override': DEFAULT_ALLOWED_WITH_LLM
|
|
75
75
|
},
|
|
76
|
+
'.webp': {
|
|
77
|
+
'class': AlitaImageLoader,
|
|
78
|
+
'mime_type': 'image/webp',
|
|
79
|
+
'is_multimodal_processing': True,
|
|
80
|
+
'kwargs': {},
|
|
81
|
+
'allowed_to_override': DEFAULT_ALLOWED_WITH_LLM
|
|
82
|
+
}
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
# Image file loaders mapping - require conversion before sending to LLM
|
|
86
|
+
image_loaders_map_converted = {
|
|
76
87
|
'.bmp': {
|
|
77
88
|
'class': AlitaImageLoader,
|
|
78
89
|
'mime_type': 'image/bmp',
|
|
@@ -102,7 +113,7 @@ document_loaders_map = {
|
|
|
102
113
|
},
|
|
103
114
|
'.yml': {
|
|
104
115
|
'class': AlitaTextLoader,
|
|
105
|
-
'mime_type': 'application/
|
|
116
|
+
'mime_type': 'application/yaml',
|
|
106
117
|
'is_multimodal_processing': False,
|
|
107
118
|
'kwargs': {
|
|
108
119
|
'autodetect_encoding': True
|
|
@@ -111,7 +122,7 @@ document_loaders_map = {
|
|
|
111
122
|
},
|
|
112
123
|
'.yaml': {
|
|
113
124
|
'class': AlitaTextLoader,
|
|
114
|
-
'mime_type': 'application/
|
|
125
|
+
'mime_type': 'application/yaml',
|
|
115
126
|
'is_multimodal_processing': False,
|
|
116
127
|
'kwargs': {
|
|
117
128
|
'autodetect_encoding': True
|
|
@@ -244,17 +255,17 @@ document_loaders_map = {
|
|
|
244
255
|
'extract_images': False,
|
|
245
256
|
}
|
|
246
257
|
},
|
|
247
|
-
'.py': {
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
}
|
|
258
|
+
# '.py': {
|
|
259
|
+
# 'class': AlitaPythonLoader,
|
|
260
|
+
# 'mime_type': 'text/x-python',
|
|
261
|
+
# 'is_multimodal_processing': False,
|
|
262
|
+
# 'kwargs': {},
|
|
263
|
+
# 'allowed_to_override': DEFAULT_ALLOWED_BASE
|
|
264
|
+
# }
|
|
254
265
|
}
|
|
255
266
|
|
|
256
267
|
code_extensions = [
|
|
257
|
-
|
|
268
|
+
'.py', # Python
|
|
258
269
|
'.js', # JavaScript
|
|
259
270
|
'.ts', # TypeScript
|
|
260
271
|
'.java', # Java
|
|
@@ -292,7 +303,12 @@ default_loader_config = {
|
|
|
292
303
|
code_loaders_map = {ext: default_loader_config for ext in code_extensions}
|
|
293
304
|
|
|
294
305
|
# Combined mapping for backward compatibility
|
|
295
|
-
loaders_map = {
|
|
306
|
+
loaders_map = {
|
|
307
|
+
**image_loaders_map,
|
|
308
|
+
**image_loaders_map_converted,
|
|
309
|
+
**document_loaders_map,
|
|
310
|
+
**code_loaders_map
|
|
311
|
+
}
|
|
296
312
|
|
|
297
313
|
loaders_allowed_to_override = {
|
|
298
314
|
extension: config.get('allowed_to_override')
|
|
@@ -19,8 +19,9 @@ from langgraph.managed.base import is_managed_value
|
|
|
19
19
|
from langgraph.prebuilt import InjectedStore
|
|
20
20
|
from langgraph.store.base import BaseStore
|
|
21
21
|
|
|
22
|
+
from .constants import PRINTER_NODE_RS, PRINTER
|
|
22
23
|
from .mixedAgentRenderes import convert_message_to_json
|
|
23
|
-
from .utils import create_state, propagate_the_input_mapping
|
|
24
|
+
from .utils import create_state, propagate_the_input_mapping, safe_format
|
|
24
25
|
from ..tools.function import FunctionTool
|
|
25
26
|
from ..tools.indexer_tool import IndexerNode
|
|
26
27
|
from ..tools.llm import LLMNode
|
|
@@ -232,6 +233,24 @@ class StateDefaultNode(Runnable):
|
|
|
232
233
|
result[key] = temp_value
|
|
233
234
|
return result
|
|
234
235
|
|
|
236
|
+
class PrinterNode(Runnable):
|
|
237
|
+
name = "PrinterNode"
|
|
238
|
+
|
|
239
|
+
def __init__(self, input_mapping: Optional[dict[str, dict]]):
|
|
240
|
+
self.input_mapping = input_mapping
|
|
241
|
+
|
|
242
|
+
def invoke(self, state: BaseStore, config: Optional[RunnableConfig] = None) -> dict:
|
|
243
|
+
logger.info(f"Printer Node - Current state variables: {state}")
|
|
244
|
+
result = {}
|
|
245
|
+
logger.debug(f"Initial text pattern: {self.input_mapping}")
|
|
246
|
+
mapping = propagate_the_input_mapping(self.input_mapping, [], state)
|
|
247
|
+
if not mapping.get(PRINTER):
|
|
248
|
+
raise ToolException(f"PrinterNode requires '{PRINTER}' field in input mapping")
|
|
249
|
+
formatted_output = mapping[PRINTER]
|
|
250
|
+
logger.debug(f"Formatted output: {formatted_output}")
|
|
251
|
+
result[PRINTER_NODE_RS] = formatted_output
|
|
252
|
+
return result
|
|
253
|
+
|
|
235
254
|
|
|
236
255
|
class StateModifierNode(Runnable):
|
|
237
256
|
name = "StateModifierNode"
|
|
@@ -348,8 +367,8 @@ class StateModifierNode(Runnable):
|
|
|
348
367
|
return result
|
|
349
368
|
|
|
350
369
|
|
|
351
|
-
|
|
352
|
-
|
|
370
|
+
def prepare_output_schema(lg_builder, memory, store, debug=False, interrupt_before=None, interrupt_after=None,
|
|
371
|
+
state_class=None, output_variables=None):
|
|
353
372
|
# prepare output channels
|
|
354
373
|
if interrupt_after is None:
|
|
355
374
|
interrupt_after = []
|
|
@@ -466,11 +485,12 @@ def create_graph(
|
|
|
466
485
|
elif node_type == 'agent':
|
|
467
486
|
input_params = node.get('input', ['messages'])
|
|
468
487
|
input_mapping = node.get('input_mapping',
|
|
469
|
-
|
|
488
|
+
{'messages': {'type': 'variable', 'value': 'messages'}})
|
|
489
|
+
output_vars = node.get('output', [])
|
|
470
490
|
lg_builder.add_node(node_id, FunctionTool(
|
|
471
491
|
client=client, tool=tool,
|
|
472
492
|
name=node_id, return_type='str',
|
|
473
|
-
output_variables=
|
|
493
|
+
output_variables=output_vars + ['messages'] if 'messages' not in output_vars else output_vars,
|
|
474
494
|
input_variables=input_params,
|
|
475
495
|
input_mapping= input_mapping
|
|
476
496
|
))
|
|
@@ -481,7 +501,8 @@ def create_graph(
|
|
|
481
501
|
# wrap with mappings
|
|
482
502
|
pipeline_name = node.get('tool', None)
|
|
483
503
|
if not pipeline_name:
|
|
484
|
-
raise ValueError(
|
|
504
|
+
raise ValueError(
|
|
505
|
+
"Subgraph must have a 'tool' node: add required tool to the subgraph node")
|
|
485
506
|
node_fn = SubgraphRunnable(
|
|
486
507
|
inner=tool.graph,
|
|
487
508
|
name=pipeline_name,
|
|
@@ -499,15 +520,6 @@ def create_graph(
|
|
|
499
520
|
structured_output=node.get('structured_output', False),
|
|
500
521
|
task=node.get('task')
|
|
501
522
|
))
|
|
502
|
-
# TODO: decide on struct output for agent nodes
|
|
503
|
-
# elif node_type == 'agent':
|
|
504
|
-
# lg_builder.add_node(node_id, AgentNode(
|
|
505
|
-
# client=client, tool=tool,
|
|
506
|
-
# name=node['id'], return_type='dict',
|
|
507
|
-
# output_variables=node.get('output', []),
|
|
508
|
-
# input_variables=node.get('input', ['messages']),
|
|
509
|
-
# task=node.get('task')
|
|
510
|
-
# ))
|
|
511
523
|
elif node_type == 'loop':
|
|
512
524
|
lg_builder.add_node(node_id, LoopNode(
|
|
513
525
|
client=client, tool=tool,
|
|
@@ -520,7 +532,8 @@ def create_graph(
|
|
|
520
532
|
loop_toolkit_name = node.get('loop_toolkit_name')
|
|
521
533
|
loop_tool_name = node.get('loop_tool')
|
|
522
534
|
if (loop_toolkit_name and loop_tool_name) or loop_tool_name:
|
|
523
|
-
loop_tool_name = f"{clean_string(loop_toolkit_name)}{TOOLKIT_SPLITTER}{loop_tool_name}" if loop_toolkit_name else clean_string(
|
|
535
|
+
loop_tool_name = f"{clean_string(loop_toolkit_name)}{TOOLKIT_SPLITTER}{loop_tool_name}" if loop_toolkit_name else clean_string(
|
|
536
|
+
loop_tool_name)
|
|
524
537
|
for t in tools:
|
|
525
538
|
if t.name == loop_tool_name:
|
|
526
539
|
logger.debug(f"Loop tool discovered: {t}")
|
|
@@ -555,12 +568,13 @@ def create_graph(
|
|
|
555
568
|
break
|
|
556
569
|
elif node_type == 'code':
|
|
557
570
|
from ..tools.sandbox import create_sandbox_tool
|
|
558
|
-
sandbox_tool = create_sandbox_tool(stateful=False, allow_net=True
|
|
559
|
-
|
|
571
|
+
sandbox_tool = create_sandbox_tool(stateful=False, allow_net=True,
|
|
572
|
+
alita_client=kwargs.get('alita_client', None))
|
|
573
|
+
code_data = node.get('code', {'type': 'fixed', 'value': "return 'Code block is empty'"})
|
|
560
574
|
lg_builder.add_node(node_id, FunctionTool(
|
|
561
575
|
tool=sandbox_tool, name=node['id'], return_type='dict',
|
|
562
576
|
output_variables=node.get('output', []),
|
|
563
|
-
input_mapping={'code':
|
|
577
|
+
input_mapping={'code': code_data},
|
|
564
578
|
input_variables=node.get('input', ['messages']),
|
|
565
579
|
structured_output=node.get('structured_output', False),
|
|
566
580
|
alita_client=kwargs.get('alita_client', None)
|
|
@@ -593,7 +607,7 @@ def create_graph(
|
|
|
593
607
|
else:
|
|
594
608
|
# Use all available tools
|
|
595
609
|
available_tools = [tool for tool in tools if isinstance(tool, BaseTool)]
|
|
596
|
-
|
|
610
|
+
|
|
597
611
|
lg_builder.add_node(node_id, LLMNode(
|
|
598
612
|
client=client,
|
|
599
613
|
input_mapping=node.get('input_mapping', {'messages': {'type': 'variable', 'value': 'messages'}}),
|
|
@@ -604,7 +618,9 @@ def create_graph(
|
|
|
604
618
|
input_variables=node.get('input', ['messages']),
|
|
605
619
|
structured_output=node.get('structured_output', False),
|
|
606
620
|
available_tools=available_tools,
|
|
607
|
-
tool_names=tool_names
|
|
621
|
+
tool_names=tool_names,
|
|
622
|
+
steps_limit=kwargs.get('steps_limit', 25)
|
|
623
|
+
))
|
|
608
624
|
elif node_type == 'router':
|
|
609
625
|
# Add a RouterNode as an independent node
|
|
610
626
|
lg_builder.add_node(node_id, RouterNode(
|
|
@@ -631,6 +647,22 @@ def create_graph(
|
|
|
631
647
|
input_variables=node.get('input', ['messages']),
|
|
632
648
|
output_variables=node.get('output', [])
|
|
633
649
|
))
|
|
650
|
+
elif node_type == 'printer':
|
|
651
|
+
lg_builder.add_node(node_id, PrinterNode(
|
|
652
|
+
input_mapping=node.get('input_mapping', {'printer': {'type': 'fixed', 'value': ''}}),
|
|
653
|
+
))
|
|
654
|
+
|
|
655
|
+
# add interrupts after printer node if specified
|
|
656
|
+
interrupt_after.append(clean_string(node_id))
|
|
657
|
+
|
|
658
|
+
# reset printer output variable to avoid carrying over
|
|
659
|
+
reset_node_id = f"{node_id}_reset"
|
|
660
|
+
lg_builder.add_node(reset_node_id, PrinterNode(
|
|
661
|
+
input_mapping={'printer': {'type': 'fixed', 'value': ''}}
|
|
662
|
+
))
|
|
663
|
+
lg_builder.add_edge(node_id, reset_node_id)
|
|
664
|
+
lg_builder.add_conditional_edges(reset_node_id, TransitionalEdge(clean_string(node['transition'])))
|
|
665
|
+
continue
|
|
634
666
|
if node.get('transition'):
|
|
635
667
|
next_step = clean_string(node['transition'])
|
|
636
668
|
logger.info(f'Adding transition: {next_step}')
|
|
@@ -777,20 +809,46 @@ class LangGraphAgentRunnable(CompiledStateGraph):
|
|
|
777
809
|
# Convert chat history dict messages to LangChain message objects
|
|
778
810
|
chat_history = input.pop('chat_history')
|
|
779
811
|
input['messages'] = [convert_dict_to_message(msg) for msg in chat_history]
|
|
780
|
-
|
|
812
|
+
|
|
813
|
+
# handler for LLM node: if no input (Chat perspective), then take last human message
|
|
814
|
+
if not input.get('input'):
|
|
815
|
+
if input.get('messages'):
|
|
816
|
+
input['input'] = [next((msg for msg in reversed(input['messages']) if isinstance(msg, HumanMessage)),
|
|
817
|
+
None)]
|
|
818
|
+
|
|
781
819
|
# Append current input to existing messages instead of overwriting
|
|
782
820
|
if input.get('input'):
|
|
783
821
|
if isinstance(input['input'], str):
|
|
784
822
|
current_message = input['input']
|
|
785
823
|
else:
|
|
786
824
|
current_message = input.get('input')[-1]
|
|
825
|
+
|
|
787
826
|
# TODO: add handler after we add 2+ inputs (filterByType, etc.)
|
|
788
|
-
|
|
827
|
+
if isinstance(current_message, HumanMessage):
|
|
828
|
+
current_content = current_message.content
|
|
829
|
+
if isinstance(current_content, list):
|
|
830
|
+
text_contents = [
|
|
831
|
+
item['text'] if isinstance(item, dict) and item.get('type') == 'text'
|
|
832
|
+
else item if isinstance(item, str)
|
|
833
|
+
else None
|
|
834
|
+
for item in current_content
|
|
835
|
+
]
|
|
836
|
+
text_contents = [text for text in text_contents if text is not None]
|
|
837
|
+
input['input'] = ". ".join(text_contents)
|
|
838
|
+
elif isinstance(current_content, str):
|
|
839
|
+
# on regenerate case
|
|
840
|
+
input['input'] = current_content
|
|
841
|
+
else:
|
|
842
|
+
input['input'] = str(current_content)
|
|
843
|
+
elif isinstance(current_message, str):
|
|
844
|
+
input['input'] = current_message
|
|
845
|
+
else:
|
|
846
|
+
input['input'] = str(current_message)
|
|
789
847
|
if input.get('messages'):
|
|
790
848
|
# Ensure existing messages are LangChain objects
|
|
791
849
|
input['messages'] = [convert_dict_to_message(msg) for msg in input['messages']]
|
|
792
850
|
# Append to existing messages
|
|
793
|
-
input['messages'].append(current_message)
|
|
851
|
+
# input['messages'].append(current_message)
|
|
794
852
|
else:
|
|
795
853
|
# No existing messages, create new list
|
|
796
854
|
input['messages'] = [current_message]
|
|
@@ -801,7 +859,12 @@ class LangGraphAgentRunnable(CompiledStateGraph):
|
|
|
801
859
|
else:
|
|
802
860
|
result = super().invoke(input, config=config, *args, **kwargs)
|
|
803
861
|
try:
|
|
804
|
-
|
|
862
|
+
if not result.get(PRINTER_NODE_RS):
|
|
863
|
+
output = next((msg.content for msg in reversed(result['messages']) if not isinstance(msg, HumanMessage)),
|
|
864
|
+
result['messages'][-1].content)
|
|
865
|
+
else:
|
|
866
|
+
# used for printer node output - it will be reset by next `reset` node
|
|
867
|
+
output = result.get(PRINTER_NODE_RS)
|
|
805
868
|
except:
|
|
806
869
|
output = list(result.values())[-1]
|
|
807
870
|
config_state = self.get_state(config)
|
|
@@ -809,8 +872,6 @@ class LangGraphAgentRunnable(CompiledStateGraph):
|
|
|
809
872
|
if is_execution_finished:
|
|
810
873
|
thread_id = None
|
|
811
874
|
|
|
812
|
-
|
|
813
|
-
|
|
814
875
|
result_with_state = {
|
|
815
876
|
"output": output,
|
|
816
877
|
"thread_id": thread_id,
|
|
@@ -5,8 +5,9 @@ import re
|
|
|
5
5
|
from pydantic import create_model, Field
|
|
6
6
|
from typing import Tuple, TypedDict, Any, Optional, Annotated
|
|
7
7
|
from langchain_core.messages import AnyMessage
|
|
8
|
-
from
|
|
9
|
-
|
|
8
|
+
from langgraph.graph import add_messages
|
|
9
|
+
|
|
10
|
+
from ...runtime.langchain.constants import ELITEA_RS, PRINTER_NODE_RS
|
|
10
11
|
|
|
11
12
|
logger = logging.getLogger(__name__)
|
|
12
13
|
|
|
@@ -130,13 +131,15 @@ def parse_type(type_str):
|
|
|
130
131
|
|
|
131
132
|
|
|
132
133
|
def create_state(data: Optional[dict] = None):
|
|
133
|
-
state_dict = {'input': str, 'router_output': str
|
|
134
|
+
state_dict = {'input': str, 'router_output': str,
|
|
135
|
+
ELITEA_RS: str, PRINTER_NODE_RS: str} # Always include router_output
|
|
134
136
|
types_dict = {}
|
|
135
137
|
if not data:
|
|
136
138
|
data = {'messages': 'list[str]'}
|
|
137
139
|
for key, value in data.items():
|
|
138
140
|
# support of old & new UI
|
|
139
141
|
value = value['type'] if isinstance(value, dict) else value
|
|
142
|
+
value = 'str' if value == 'string' else value # normalize string type (old state support)
|
|
140
143
|
if key == 'messages':
|
|
141
144
|
state_dict[key] = Annotated[list[AnyMessage], add_messages]
|
|
142
145
|
elif value in ['str', 'int', 'float', 'bool', 'list', 'dict', 'number', 'dict']:
|
|
@@ -177,13 +180,30 @@ def propagate_the_input_mapping(input_mapping: dict[str, dict], input_variables:
|
|
|
177
180
|
var_dict = create_params(input_variables, source)
|
|
178
181
|
|
|
179
182
|
if value['type'] == 'fstring':
|
|
180
|
-
|
|
183
|
+
try:
|
|
184
|
+
input_data[key] = value['value'].format(**var_dict)
|
|
185
|
+
except KeyError as e:
|
|
186
|
+
logger.error(f"KeyError in fstring formatting for key '{key}'. Attempt to find proper data in state.\n{e}")
|
|
187
|
+
try:
|
|
188
|
+
# search for variables in state if not found in var_dict
|
|
189
|
+
input_data[key] = safe_format(value['value'], state)
|
|
190
|
+
except KeyError as no_var_exception:
|
|
191
|
+
logger.error(f"KeyError in fstring formatting for key '{key}' with state data.\n{no_var_exception}")
|
|
192
|
+
# leave value as is if still not found (could be a constant string marked as fstring by mistake)
|
|
193
|
+
input_data[key] = value['value']
|
|
181
194
|
elif value['type'] == 'fixed':
|
|
182
195
|
input_data[key] = value['value']
|
|
183
196
|
else:
|
|
184
197
|
input_data[key] = source.get(value['value'], "")
|
|
185
198
|
return input_data
|
|
186
199
|
|
|
200
|
+
def safe_format(template, mapping):
|
|
201
|
+
"""Format a template string using a mapping, leaving placeholders unchanged if keys are missing."""
|
|
202
|
+
|
|
203
|
+
def replacer(match):
|
|
204
|
+
key = match.group(1)
|
|
205
|
+
return str(mapping.get(key, f'{{{key}}}'))
|
|
206
|
+
return re.sub(r'\{(\w+)\}', replacer, template)
|
|
187
207
|
|
|
188
208
|
def create_pydantic_model(model_name: str, variables: dict[str, dict]):
|
|
189
209
|
fields = {}
|
|
@@ -39,7 +39,14 @@ class ApplicationToolkit(BaseToolkit):
|
|
|
39
39
|
description=app_details.get("description"),
|
|
40
40
|
application=app,
|
|
41
41
|
args_schema=applicationToolSchema,
|
|
42
|
-
return_type='str'
|
|
42
|
+
return_type='str',
|
|
43
|
+
client=client,
|
|
44
|
+
args_runnable={
|
|
45
|
+
"application_id": application_id,
|
|
46
|
+
"application_version_id": application_version_id,
|
|
47
|
+
"store": store,
|
|
48
|
+
"llm": client.get_llm(version_details['llm_settings']['model_name'], model_settings),
|
|
49
|
+
})])
|
|
43
50
|
|
|
44
51
|
def get_tools(self):
|
|
45
52
|
return self.tools
|
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import logging
|
|
2
|
+
from typing import Optional
|
|
2
3
|
|
|
3
4
|
from langchain_core.tools import ToolException
|
|
4
5
|
from langgraph.store.base import BaseStore
|
|
@@ -12,6 +13,8 @@ from .prompt import PromptToolkit
|
|
|
12
13
|
from .subgraph import SubgraphToolkit
|
|
13
14
|
from .vectorstore import VectorStoreToolkit
|
|
14
15
|
from ..tools.mcp_server_tool import McpServerTool
|
|
16
|
+
from ..tools.sandbox import SandboxToolkit
|
|
17
|
+
from ..tools.image_generation import ImageGenerationToolkit
|
|
15
18
|
# Import community tools
|
|
16
19
|
from ...community import get_toolkits as community_toolkits, get_tools as community_tools
|
|
17
20
|
from ...tools.memory import MemoryToolkit
|
|
@@ -24,64 +27,92 @@ def get_toolkits():
|
|
|
24
27
|
core_toolkits = [
|
|
25
28
|
ArtifactToolkit.toolkit_config_schema(),
|
|
26
29
|
MemoryToolkit.toolkit_config_schema(),
|
|
27
|
-
VectorStoreToolkit.toolkit_config_schema()
|
|
30
|
+
VectorStoreToolkit.toolkit_config_schema(),
|
|
31
|
+
SandboxToolkit.toolkit_config_schema(),
|
|
32
|
+
ImageGenerationToolkit.toolkit_config_schema()
|
|
28
33
|
]
|
|
29
34
|
|
|
30
35
|
return core_toolkits + community_toolkits() + alita_toolkits()
|
|
31
36
|
|
|
32
37
|
|
|
33
|
-
def get_tools(tools_list: list, alita_client, llm, memory_store: BaseStore = None) -> list:
|
|
38
|
+
def get_tools(tools_list: list, alita_client, llm, memory_store: BaseStore = None, debug_mode: Optional[bool] = False) -> list:
|
|
34
39
|
prompts = []
|
|
35
40
|
tools = []
|
|
36
41
|
|
|
37
42
|
for tool in tools_list:
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
43
|
+
try:
|
|
44
|
+
if tool['type'] == 'datasource':
|
|
45
|
+
tools.extend(DatasourcesToolkit.get_toolkit(
|
|
46
|
+
alita_client,
|
|
47
|
+
datasource_ids=[int(tool['settings']['datasource_id'])],
|
|
48
|
+
selected_tools=tool['settings']['selected_tools'],
|
|
49
|
+
toolkit_name=tool.get('toolkit_name', '') or tool.get('name', '')
|
|
50
|
+
).get_tools())
|
|
51
|
+
elif tool['type'] == 'application':
|
|
52
|
+
tools.extend(ApplicationToolkit.get_toolkit(
|
|
53
|
+
alita_client,
|
|
54
|
+
application_id=int(tool['settings']['application_id']),
|
|
55
|
+
application_version_id=int(tool['settings']['application_version_id']),
|
|
56
|
+
selected_tools=[]
|
|
57
|
+
).get_tools())
|
|
58
|
+
# backward compatibility for pipeline application type as subgraph node
|
|
59
|
+
if tool.get('agent_type', '') == 'pipeline':
|
|
60
|
+
# static get_toolkit returns a list of CompiledStateGraph stubs
|
|
61
|
+
tools.extend(SubgraphToolkit.get_toolkit(
|
|
62
|
+
alita_client,
|
|
63
|
+
application_id=int(tool['settings']['application_id']),
|
|
64
|
+
application_version_id=int(tool['settings']['application_version_id']),
|
|
65
|
+
app_api_key=alita_client.auth_token,
|
|
66
|
+
selected_tools=[],
|
|
67
|
+
llm=llm
|
|
68
|
+
))
|
|
69
|
+
elif tool['type'] == 'memory':
|
|
70
|
+
tools += MemoryToolkit.get_toolkit(
|
|
71
|
+
namespace=tool['settings'].get('namespace', str(tool['id'])),
|
|
72
|
+
pgvector_configuration=tool['settings'].get('pgvector_configuration', {}),
|
|
73
|
+
store=memory_store,
|
|
74
|
+
).get_tools()
|
|
75
|
+
# TODO: update configuration of internal tools
|
|
76
|
+
elif tool['type'] == 'internal_tool':
|
|
77
|
+
if tool['name'] == 'pyodide':
|
|
78
|
+
tools += SandboxToolkit.get_toolkit(
|
|
79
|
+
stateful=False,
|
|
80
|
+
allow_net=True,
|
|
81
|
+
alita_client=alita_client,
|
|
82
|
+
).get_tools()
|
|
83
|
+
elif tool['name'] == 'image_generation':
|
|
84
|
+
if alita_client and alita_client.model_image_generation:
|
|
85
|
+
tools += ImageGenerationToolkit.get_toolkit(
|
|
86
|
+
client=alita_client,
|
|
87
|
+
).get_tools()
|
|
88
|
+
else:
|
|
89
|
+
logger.warning("Image generation internal tool requested "
|
|
90
|
+
"but no image generation model configured")
|
|
91
|
+
elif tool['type'] == 'artifact':
|
|
92
|
+
tools.extend(ArtifactToolkit.get_toolkit(
|
|
93
|
+
client=alita_client,
|
|
94
|
+
bucket=tool['settings']['bucket'],
|
|
95
|
+
toolkit_name=tool.get('toolkit_name', ''),
|
|
96
|
+
selected_tools=tool['settings'].get('selected_tools', []),
|
|
97
|
+
llm=llm,
|
|
98
|
+
# indexer settings
|
|
99
|
+
pgvector_configuration=tool['settings'].get('pgvector_configuration', {}),
|
|
100
|
+
embedding_model=tool['settings'].get('embedding_model'),
|
|
101
|
+
collection_name=f"{tool.get('toolkit_name')}",
|
|
102
|
+
collection_schema = str(tool['id'])
|
|
103
|
+
).get_tools())
|
|
104
|
+
elif tool['type'] == 'vectorstore':
|
|
105
|
+
tools.extend(VectorStoreToolkit.get_toolkit(
|
|
106
|
+
llm=llm,
|
|
107
|
+
toolkit_name=tool.get('toolkit_name', ''),
|
|
108
|
+
**tool['settings']).get_tools())
|
|
109
|
+
except Exception as e:
|
|
110
|
+
logger.error(f"Error initializing toolkit for tool '{tool.get('name', 'unknown')}': {e}", exc_info=True)
|
|
111
|
+
if debug_mode:
|
|
112
|
+
logger.info("Skipping tool initialization error due to debug mode.")
|
|
113
|
+
continue
|
|
114
|
+
else:
|
|
115
|
+
raise ToolException(f"Error initializing toolkit for tool '{tool.get('name', 'unknown')}': {e}")
|
|
85
116
|
|
|
86
117
|
if len(prompts) > 0:
|
|
87
118
|
tools += PromptToolkit.get_toolkit(alita_client, prompts).get_tools()
|
|
@@ -5,7 +5,11 @@ This module provides various tools that can be used within LangGraph agents.
|
|
|
5
5
|
|
|
6
6
|
from .sandbox import PyodideSandboxTool, StatefulPyodideSandboxTool, create_sandbox_tool
|
|
7
7
|
from .echo import EchoTool
|
|
8
|
-
from .image_generation import
|
|
8
|
+
from .image_generation import (
|
|
9
|
+
ImageGenerationTool,
|
|
10
|
+
create_image_generation_tool,
|
|
11
|
+
ImageGenerationToolkit
|
|
12
|
+
)
|
|
9
13
|
|
|
10
14
|
__all__ = [
|
|
11
15
|
"PyodideSandboxTool",
|
|
@@ -13,5 +17,6 @@ __all__ = [
|
|
|
13
17
|
"create_sandbox_tool",
|
|
14
18
|
"EchoTool",
|
|
15
19
|
"ImageGenerationTool",
|
|
20
|
+
"ImageGenerationToolkit",
|
|
16
21
|
"create_image_generation_tool"
|
|
17
|
-
]
|
|
22
|
+
]
|
|
@@ -50,6 +50,8 @@ class Application(BaseTool):
|
|
|
50
50
|
application: Any
|
|
51
51
|
args_schema: Type[BaseModel] = applicationToolSchema
|
|
52
52
|
return_type: str = "str"
|
|
53
|
+
client: Any
|
|
54
|
+
args_runnable: dict = {}
|
|
53
55
|
|
|
54
56
|
@field_validator('name', mode='before')
|
|
55
57
|
@classmethod
|
|
@@ -66,6 +68,11 @@ class Application(BaseTool):
|
|
|
66
68
|
return self._run(*config, **all_kwargs)
|
|
67
69
|
|
|
68
70
|
def _run(self, *args, **kwargs):
|
|
71
|
+
if self.client and self.args_runnable:
|
|
72
|
+
# Recreate new LanggraphAgentRunnable in order to reflect the current input_mapping (it can be dynamic for pipelines).
|
|
73
|
+
# Actually, for pipelines agent toolkits LanggraphAgentRunnable is created (for LLMNode) before pipeline's schema parsing.
|
|
74
|
+
application_variables = {k: {"name": k, "value": v} for k, v in kwargs.items()}
|
|
75
|
+
self.application = self.client.application(**self.args_runnable, application_variables=application_variables)
|
|
69
76
|
response = self.application.invoke(formulate_query(kwargs))
|
|
70
77
|
if self.return_type == "str":
|
|
71
78
|
return response["output"]
|