alita-sdk 0.3.375__py3-none-any.whl → 0.3.417__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. alita_sdk/configurations/bitbucket.py +95 -0
  2. alita_sdk/configurations/confluence.py +96 -1
  3. alita_sdk/configurations/gitlab.py +79 -0
  4. alita_sdk/configurations/jira.py +103 -0
  5. alita_sdk/configurations/testrail.py +88 -0
  6. alita_sdk/configurations/xray.py +93 -0
  7. alita_sdk/configurations/zephyr_enterprise.py +93 -0
  8. alita_sdk/configurations/zephyr_essential.py +75 -0
  9. alita_sdk/runtime/clients/client.py +3 -2
  10. alita_sdk/runtime/langchain/assistant.py +56 -40
  11. alita_sdk/runtime/langchain/constants.py +2 -0
  12. alita_sdk/runtime/langchain/document_loaders/AlitaDocxMammothLoader.py +315 -3
  13. alita_sdk/runtime/langchain/document_loaders/AlitaJSONLoader.py +4 -1
  14. alita_sdk/runtime/langchain/document_loaders/constants.py +28 -12
  15. alita_sdk/runtime/langchain/langraph_agent.py +52 -27
  16. alita_sdk/runtime/langchain/utils.py +15 -4
  17. alita_sdk/runtime/toolkits/application.py +8 -1
  18. alita_sdk/runtime/toolkits/tools.py +79 -49
  19. alita_sdk/runtime/tools/__init__.py +7 -2
  20. alita_sdk/runtime/tools/application.py +7 -0
  21. alita_sdk/runtime/tools/function.py +28 -23
  22. alita_sdk/runtime/tools/graph.py +10 -4
  23. alita_sdk/runtime/tools/image_generation.py +104 -8
  24. alita_sdk/runtime/tools/llm.py +142 -114
  25. alita_sdk/runtime/tools/sandbox.py +166 -63
  26. alita_sdk/runtime/tools/vectorstore.py +2 -1
  27. alita_sdk/runtime/tools/vectorstore_base.py +2 -1
  28. alita_sdk/runtime/utils/utils.py +1 -0
  29. alita_sdk/tools/__init__.py +43 -31
  30. alita_sdk/tools/base_indexer_toolkit.py +54 -60
  31. alita_sdk/tools/code_indexer_toolkit.py +13 -3
  32. alita_sdk/tools/confluence/api_wrapper.py +29 -7
  33. alita_sdk/tools/confluence/loader.py +10 -0
  34. alita_sdk/tools/elitea_base.py +1 -1
  35. alita_sdk/tools/gitlab/api_wrapper.py +8 -9
  36. alita_sdk/tools/jira/api_wrapper.py +1 -1
  37. alita_sdk/tools/qtest/api_wrapper.py +7 -10
  38. alita_sdk/tools/sharepoint/api_wrapper.py +81 -28
  39. alita_sdk/tools/sharepoint/authorization_helper.py +131 -1
  40. alita_sdk/tools/sharepoint/utils.py +8 -2
  41. alita_sdk/tools/utils/content_parser.py +27 -16
  42. alita_sdk/tools/vector_adapters/VectorStoreAdapter.py +18 -5
  43. {alita_sdk-0.3.375.dist-info → alita_sdk-0.3.417.dist-info}/METADATA +1 -1
  44. {alita_sdk-0.3.375.dist-info → alita_sdk-0.3.417.dist-info}/RECORD +47 -47
  45. {alita_sdk-0.3.375.dist-info → alita_sdk-0.3.417.dist-info}/WHEEL +0 -0
  46. {alita_sdk-0.3.375.dist-info → alita_sdk-0.3.417.dist-info}/licenses/LICENSE +0 -0
  47. {alita_sdk-0.3.375.dist-info → alita_sdk-0.3.417.dist-info}/top_level.txt +0 -0
@@ -43,7 +43,7 @@ DEFAULT_ALLOWED_WITH_LLM = {
43
43
  LoaderProperties.PROMPT.value: "",
44
44
  }
45
45
 
46
- # Image file loaders mapping
46
+ # Image file loaders mapping - directly supported by LLM with image_url
47
47
  image_loaders_map = {
48
48
  '.png': {
49
49
  'class': AlitaImageLoader,
@@ -73,6 +73,17 @@ image_loaders_map = {
73
73
  'kwargs': {},
74
74
  'allowed_to_override': DEFAULT_ALLOWED_WITH_LLM
75
75
  },
76
+ '.webp': {
77
+ 'class': AlitaImageLoader,
78
+ 'mime_type': 'image/webp',
79
+ 'is_multimodal_processing': True,
80
+ 'kwargs': {},
81
+ 'allowed_to_override': DEFAULT_ALLOWED_WITH_LLM
82
+ }
83
+ }
84
+
85
+ # Image file loaders mapping - require conversion before sending to LLM
86
+ image_loaders_map_converted = {
76
87
  '.bmp': {
77
88
  'class': AlitaImageLoader,
78
89
  'mime_type': 'image/bmp',
@@ -102,7 +113,7 @@ document_loaders_map = {
102
113
  },
103
114
  '.yml': {
104
115
  'class': AlitaTextLoader,
105
- 'mime_type': 'application/x-yaml',
116
+ 'mime_type': 'application/yaml',
106
117
  'is_multimodal_processing': False,
107
118
  'kwargs': {
108
119
  'autodetect_encoding': True
@@ -111,7 +122,7 @@ document_loaders_map = {
111
122
  },
112
123
  '.yaml': {
113
124
  'class': AlitaTextLoader,
114
- 'mime_type': 'application/x-yaml',
125
+ 'mime_type': 'application/yaml',
115
126
  'is_multimodal_processing': False,
116
127
  'kwargs': {
117
128
  'autodetect_encoding': True
@@ -244,17 +255,17 @@ document_loaders_map = {
244
255
  'extract_images': False,
245
256
  }
246
257
  },
247
- '.py': {
248
- 'class': AlitaPythonLoader,
249
- 'mime_type': 'text/x-python',
250
- 'is_multimodal_processing': False,
251
- 'kwargs': {},
252
- 'allowed_to_override': DEFAULT_ALLOWED_BASE
253
- }
258
+ # '.py': {
259
+ # 'class': AlitaPythonLoader,
260
+ # 'mime_type': 'text/x-python',
261
+ # 'is_multimodal_processing': False,
262
+ # 'kwargs': {},
263
+ # 'allowed_to_override': DEFAULT_ALLOWED_BASE
264
+ # }
254
265
  }
255
266
 
256
267
  code_extensions = [
257
- # '.py', # Python
268
+ '.py', # Python
258
269
  '.js', # JavaScript
259
270
  '.ts', # TypeScript
260
271
  '.java', # Java
@@ -292,7 +303,12 @@ default_loader_config = {
292
303
  code_loaders_map = {ext: default_loader_config for ext in code_extensions}
293
304
 
294
305
  # Combined mapping for backward compatibility
295
- loaders_map = {**image_loaders_map, **document_loaders_map, **code_loaders_map}
306
+ loaders_map = {
307
+ **image_loaders_map,
308
+ **image_loaders_map_converted,
309
+ **document_loaders_map,
310
+ **code_loaders_map
311
+ }
296
312
 
297
313
  loaders_allowed_to_override = {
298
314
  extension: config.get('allowed_to_override')
@@ -348,8 +348,8 @@ class StateModifierNode(Runnable):
348
348
  return result
349
349
 
350
350
 
351
-
352
- def prepare_output_schema(lg_builder, memory, store, debug=False, interrupt_before=None, interrupt_after=None, state_class=None, output_variables=None):
351
+ def prepare_output_schema(lg_builder, memory, store, debug=False, interrupt_before=None, interrupt_after=None,
352
+ state_class=None, output_variables=None):
353
353
  # prepare output channels
354
354
  if interrupt_after is None:
355
355
  interrupt_after = []
@@ -466,11 +466,12 @@ def create_graph(
466
466
  elif node_type == 'agent':
467
467
  input_params = node.get('input', ['messages'])
468
468
  input_mapping = node.get('input_mapping',
469
- {'messages': {'type': 'variable', 'value': 'messages'}})
469
+ {'messages': {'type': 'variable', 'value': 'messages'}})
470
+ output_vars = node.get('output', [])
470
471
  lg_builder.add_node(node_id, FunctionTool(
471
472
  client=client, tool=tool,
472
473
  name=node_id, return_type='str',
473
- output_variables=node.get('output', []),
474
+ output_variables=output_vars + ['messages'] if 'messages' not in output_vars else output_vars,
474
475
  input_variables=input_params,
475
476
  input_mapping= input_mapping
476
477
  ))
@@ -481,7 +482,8 @@ def create_graph(
481
482
  # wrap with mappings
482
483
  pipeline_name = node.get('tool', None)
483
484
  if not pipeline_name:
484
- raise ValueError("Subgraph must have a 'tool' node: add required tool to the subgraph node")
485
+ raise ValueError(
486
+ "Subgraph must have a 'tool' node: add required tool to the subgraph node")
485
487
  node_fn = SubgraphRunnable(
486
488
  inner=tool.graph,
487
489
  name=pipeline_name,
@@ -499,15 +501,6 @@ def create_graph(
499
501
  structured_output=node.get('structured_output', False),
500
502
  task=node.get('task')
501
503
  ))
502
- # TODO: decide on struct output for agent nodes
503
- # elif node_type == 'agent':
504
- # lg_builder.add_node(node_id, AgentNode(
505
- # client=client, tool=tool,
506
- # name=node['id'], return_type='dict',
507
- # output_variables=node.get('output', []),
508
- # input_variables=node.get('input', ['messages']),
509
- # task=node.get('task')
510
- # ))
511
504
  elif node_type == 'loop':
512
505
  lg_builder.add_node(node_id, LoopNode(
513
506
  client=client, tool=tool,
@@ -520,7 +513,8 @@ def create_graph(
520
513
  loop_toolkit_name = node.get('loop_toolkit_name')
521
514
  loop_tool_name = node.get('loop_tool')
522
515
  if (loop_toolkit_name and loop_tool_name) or loop_tool_name:
523
- loop_tool_name = f"{clean_string(loop_toolkit_name)}{TOOLKIT_SPLITTER}{loop_tool_name}" if loop_toolkit_name else clean_string(loop_tool_name)
516
+ loop_tool_name = f"{clean_string(loop_toolkit_name)}{TOOLKIT_SPLITTER}{loop_tool_name}" if loop_toolkit_name else clean_string(
517
+ loop_tool_name)
524
518
  for t in tools:
525
519
  if t.name == loop_tool_name:
526
520
  logger.debug(f"Loop tool discovered: {t}")
@@ -555,12 +549,13 @@ def create_graph(
555
549
  break
556
550
  elif node_type == 'code':
557
551
  from ..tools.sandbox import create_sandbox_tool
558
- sandbox_tool = create_sandbox_tool(stateful=False, allow_net=True)
559
- code = node.get('code', "return 'Code block is empty'")
552
+ sandbox_tool = create_sandbox_tool(stateful=False, allow_net=True,
553
+ alita_client=kwargs.get('alita_client', None))
554
+ code_data = node.get('code', {'type': 'fixed', 'value': "return 'Code block is empty'"})
560
555
  lg_builder.add_node(node_id, FunctionTool(
561
556
  tool=sandbox_tool, name=node['id'], return_type='dict',
562
557
  output_variables=node.get('output', []),
563
- input_mapping={'code': {'type': 'fixed', 'value': code}},
558
+ input_mapping={'code': code_data},
564
559
  input_variables=node.get('input', ['messages']),
565
560
  structured_output=node.get('structured_output', False),
566
561
  alita_client=kwargs.get('alita_client', None)
@@ -593,7 +588,7 @@ def create_graph(
593
588
  else:
594
589
  # Use all available tools
595
590
  available_tools = [tool for tool in tools if isinstance(tool, BaseTool)]
596
-
591
+
597
592
  lg_builder.add_node(node_id, LLMNode(
598
593
  client=client,
599
594
  input_mapping=node.get('input_mapping', {'messages': {'type': 'variable', 'value': 'messages'}}),
@@ -604,7 +599,9 @@ def create_graph(
604
599
  input_variables=node.get('input', ['messages']),
605
600
  structured_output=node.get('structured_output', False),
606
601
  available_tools=available_tools,
607
- tool_names=tool_names))
602
+ tool_names=tool_names,
603
+ steps_limit=kwargs.get('steps_limit', 25)
604
+ ))
608
605
  elif node_type == 'router':
609
606
  # Add a RouterNode as an independent node
610
607
  lg_builder.add_node(node_id, RouterNode(
@@ -777,17 +774,46 @@ class LangGraphAgentRunnable(CompiledStateGraph):
777
774
  # Convert chat history dict messages to LangChain message objects
778
775
  chat_history = input.pop('chat_history')
779
776
  input['messages'] = [convert_dict_to_message(msg) for msg in chat_history]
780
-
777
+
778
+ # handler for LLM node: if no input (Chat perspective), then take last human message
779
+ if not input.get('input'):
780
+ if input.get('messages'):
781
+ input['input'] = [next((msg for msg in reversed(input['messages']) if isinstance(msg, HumanMessage)),
782
+ None)]
783
+
781
784
  # Append current input to existing messages instead of overwriting
782
785
  if input.get('input'):
783
- current_message = input.get('input')[-1]
786
+ if isinstance(input['input'], str):
787
+ current_message = input['input']
788
+ else:
789
+ current_message = input.get('input')[-1]
790
+
784
791
  # TODO: add handler after we add 2+ inputs (filterByType, etc.)
785
- input['input'] = current_message if isinstance(current_message, str) else str(current_message)
792
+ if isinstance(current_message, HumanMessage):
793
+ current_content = current_message.content
794
+ if isinstance(current_content, list):
795
+ text_contents = [
796
+ item['text'] if isinstance(item, dict) and item.get('type') == 'text'
797
+ else item if isinstance(item, str)
798
+ else None
799
+ for item in current_content
800
+ ]
801
+ text_contents = [text for text in text_contents if text is not None]
802
+ input['input'] = ". ".join(text_contents)
803
+ elif isinstance(current_content, str):
804
+ # on regenerate case
805
+ input['input'] = current_content
806
+ else:
807
+ input['input'] = str(current_content)
808
+ elif isinstance(current_message, str):
809
+ input['input'] = current_message
810
+ else:
811
+ input['input'] = str(current_message)
786
812
  if input.get('messages'):
787
813
  # Ensure existing messages are LangChain objects
788
814
  input['messages'] = [convert_dict_to_message(msg) for msg in input['messages']]
789
815
  # Append to existing messages
790
- input['messages'].append(current_message)
816
+ # input['messages'].append(current_message)
791
817
  else:
792
818
  # No existing messages, create new list
793
819
  input['messages'] = [current_message]
@@ -798,7 +824,8 @@ class LangGraphAgentRunnable(CompiledStateGraph):
798
824
  else:
799
825
  result = super().invoke(input, config=config, *args, **kwargs)
800
826
  try:
801
- output = next((msg.content for msg in reversed(result['messages']) if not isinstance(msg, HumanMessage)), result['messages'][-1].content)
827
+ output = next((msg.content for msg in reversed(result['messages']) if not isinstance(msg, HumanMessage)),
828
+ result['messages'][-1].content)
802
829
  except:
803
830
  output = list(result.values())[-1]
804
831
  config_state = self.get_state(config)
@@ -806,8 +833,6 @@ class LangGraphAgentRunnable(CompiledStateGraph):
806
833
  if is_execution_finished:
807
834
  thread_id = None
808
835
 
809
-
810
-
811
836
  result_with_state = {
812
837
  "output": output,
813
838
  "thread_id": thread_id,
@@ -5,8 +5,9 @@ import re
5
5
  from pydantic import create_model, Field
6
6
  from typing import Tuple, TypedDict, Any, Optional, Annotated
7
7
  from langchain_core.messages import AnyMessage
8
- from langchain_core.prompts import PromptTemplate
9
- from langgraph.graph import MessagesState, add_messages
8
+ from langgraph.graph import add_messages
9
+
10
+ from ...runtime.langchain.constants import ELITEA_RS
10
11
 
11
12
  logger = logging.getLogger(__name__)
12
13
 
@@ -130,7 +131,7 @@ def parse_type(type_str):
130
131
 
131
132
 
132
133
  def create_state(data: Optional[dict] = None):
133
- state_dict = {'input': str, 'router_output': str} # Always include router_output
134
+ state_dict = {'input': str, 'router_output': str, ELITEA_RS: str} # Always include router_output
134
135
  types_dict = {}
135
136
  if not data:
136
137
  data = {'messages': 'list[str]'}
@@ -177,7 +178,17 @@ def propagate_the_input_mapping(input_mapping: dict[str, dict], input_variables:
177
178
  var_dict = create_params(input_variables, source)
178
179
 
179
180
  if value['type'] == 'fstring':
180
- input_data[key] = value['value'].format(**var_dict)
181
+ try:
182
+ input_data[key] = value['value'].format(**var_dict)
183
+ except KeyError as e:
184
+ logger.error(f"KeyError in fstring formatting for key '{key}'. Attempt to find proper data in state.\n{e}")
185
+ try:
186
+ # search for variables in state if not found in var_dict
187
+ input_data[key] = value['value'].format(**state)
188
+ except KeyError as no_var_exception:
189
+ logger.error(f"KeyError in fstring formatting for key '{key}' with state data.\n{no_var_exception}")
190
+ # leave value as is if still not found (could be a constant string marked as fstring by mistake)
191
+ input_data[key] = value['value']
181
192
  elif value['type'] == 'fixed':
182
193
  input_data[key] = value['value']
183
194
  else:
@@ -39,7 +39,14 @@ class ApplicationToolkit(BaseToolkit):
39
39
  description=app_details.get("description"),
40
40
  application=app,
41
41
  args_schema=applicationToolSchema,
42
- return_type='str')])
42
+ return_type='str',
43
+ client=client,
44
+ args_runnable={
45
+ "application_id": application_id,
46
+ "application_version_id": application_version_id,
47
+ "store": store,
48
+ "llm": client.get_llm(version_details['llm_settings']['model_name'], model_settings),
49
+ })])
43
50
 
44
51
  def get_tools(self):
45
52
  return self.tools
@@ -1,4 +1,5 @@
1
1
  import logging
2
+ from typing import Optional
2
3
 
3
4
  from langchain_core.tools import ToolException
4
5
  from langgraph.store.base import BaseStore
@@ -12,6 +13,8 @@ from .prompt import PromptToolkit
12
13
  from .subgraph import SubgraphToolkit
13
14
  from .vectorstore import VectorStoreToolkit
14
15
  from ..tools.mcp_server_tool import McpServerTool
16
+ from ..tools.sandbox import SandboxToolkit
17
+ from ..tools.image_generation import ImageGenerationToolkit
15
18
  # Import community tools
16
19
  from ...community import get_toolkits as community_toolkits, get_tools as community_tools
17
20
  from ...tools.memory import MemoryToolkit
@@ -24,64 +27,91 @@ def get_toolkits():
24
27
  core_toolkits = [
25
28
  ArtifactToolkit.toolkit_config_schema(),
26
29
  MemoryToolkit.toolkit_config_schema(),
27
- VectorStoreToolkit.toolkit_config_schema()
30
+ VectorStoreToolkit.toolkit_config_schema(),
31
+ SandboxToolkit.toolkit_config_schema(),
32
+ ImageGenerationToolkit.toolkit_config_schema()
28
33
  ]
29
34
 
30
35
  return core_toolkits + community_toolkits() + alita_toolkits()
31
36
 
32
37
 
33
- def get_tools(tools_list: list, alita_client, llm, memory_store: BaseStore = None) -> list:
38
+ def get_tools(tools_list: list, alita_client, llm, memory_store: BaseStore = None, debug_mode: Optional[bool] = False) -> list:
34
39
  prompts = []
35
40
  tools = []
36
41
 
37
42
  for tool in tools_list:
38
- if tool['type'] == 'datasource':
39
- tools.extend(DatasourcesToolkit.get_toolkit(
40
- alita_client,
41
- datasource_ids=[int(tool['settings']['datasource_id'])],
42
- selected_tools=tool['settings']['selected_tools'],
43
- toolkit_name=tool.get('toolkit_name', '') or tool.get('name', '')
44
- ).get_tools())
45
- elif tool['type'] == 'application' and tool.get('agent_type', '') != 'pipeline' :
46
- tools.extend(ApplicationToolkit.get_toolkit(
47
- alita_client,
48
- application_id=int(tool['settings']['application_id']),
49
- application_version_id=int(tool['settings']['application_version_id']),
50
- selected_tools=[]
51
- ).get_tools())
52
- elif tool['type'] == 'application' and tool.get('agent_type', '') == 'pipeline':
53
- # static get_toolkit returns a list of CompiledStateGraph stubs
54
- tools.extend(SubgraphToolkit.get_toolkit(
55
- alita_client,
56
- application_id=int(tool['settings']['application_id']),
57
- application_version_id=int(tool['settings']['application_version_id']),
58
- app_api_key=alita_client.auth_token,
59
- selected_tools=[],
60
- llm=llm
61
- ))
62
- elif tool['type'] == 'memory':
63
- tools += MemoryToolkit.get_toolkit(
64
- namespace=tool['settings'].get('namespace', str(tool['id'])),
65
- pgvector_configuration=tool['settings'].get('pgvector_configuration', {}),
66
- store=memory_store,
67
- ).get_tools()
68
- elif tool['type'] == 'artifact':
69
- tools.extend(ArtifactToolkit.get_toolkit(
70
- client=alita_client,
71
- bucket=tool['settings']['bucket'],
72
- toolkit_name=tool.get('toolkit_name', ''),
73
- selected_tools=tool['settings'].get('selected_tools', []),
74
- llm=llm,
75
- # indexer settings
76
- pgvector_configuration=tool['settings'].get('pgvector_configuration', {}),
77
- embedding_model=tool['settings'].get('embedding_model'),
78
- collection_name=f"{tool.get('toolkit_name')}",
79
- ).get_tools())
80
- elif tool['type'] == 'vectorstore':
81
- tools.extend(VectorStoreToolkit.get_toolkit(
82
- llm=llm,
83
- toolkit_name=tool.get('toolkit_name', ''),
84
- **tool['settings']).get_tools())
43
+ try:
44
+ if tool['type'] == 'datasource':
45
+ tools.extend(DatasourcesToolkit.get_toolkit(
46
+ alita_client,
47
+ datasource_ids=[int(tool['settings']['datasource_id'])],
48
+ selected_tools=tool['settings']['selected_tools'],
49
+ toolkit_name=tool.get('toolkit_name', '') or tool.get('name', '')
50
+ ).get_tools())
51
+ elif tool['type'] == 'application' and tool.get('agent_type', '') != 'pipeline' :
52
+ tools.extend(ApplicationToolkit.get_toolkit(
53
+ alita_client,
54
+ application_id=int(tool['settings']['application_id']),
55
+ application_version_id=int(tool['settings']['application_version_id']),
56
+ selected_tools=[]
57
+ ).get_tools())
58
+ elif tool['type'] == 'application' and tool.get('agent_type', '') == 'pipeline':
59
+ # static get_toolkit returns a list of CompiledStateGraph stubs
60
+ tools.extend(SubgraphToolkit.get_toolkit(
61
+ alita_client,
62
+ application_id=int(tool['settings']['application_id']),
63
+ application_version_id=int(tool['settings']['application_version_id']),
64
+ app_api_key=alita_client.auth_token,
65
+ selected_tools=[],
66
+ llm=llm
67
+ ))
68
+ elif tool['type'] == 'memory':
69
+ tools += MemoryToolkit.get_toolkit(
70
+ namespace=tool['settings'].get('namespace', str(tool['id'])),
71
+ pgvector_configuration=tool['settings'].get('pgvector_configuration', {}),
72
+ store=memory_store,
73
+ ).get_tools()
74
+ # TODO: update configuration of internal tools
75
+ elif tool['type'] == 'internal_tool':
76
+ if tool['name'] == 'pyodide':
77
+ tools += SandboxToolkit.get_toolkit(
78
+ stateful=False,
79
+ allow_net=True,
80
+ alita_client=alita_client,
81
+ ).get_tools()
82
+ elif tool['name'] == 'image_generation':
83
+ if alita_client and alita_client.model_image_generation:
84
+ tools += ImageGenerationToolkit.get_toolkit(
85
+ client=alita_client,
86
+ ).get_tools()
87
+ else:
88
+ logger.warning("Image generation internal tool requested "
89
+ "but no image generation model configured")
90
+ elif tool['type'] == 'artifact':
91
+ tools.extend(ArtifactToolkit.get_toolkit(
92
+ client=alita_client,
93
+ bucket=tool['settings']['bucket'],
94
+ toolkit_name=tool.get('toolkit_name', ''),
95
+ selected_tools=tool['settings'].get('selected_tools', []),
96
+ llm=llm,
97
+ # indexer settings
98
+ pgvector_configuration=tool['settings'].get('pgvector_configuration', {}),
99
+ embedding_model=tool['settings'].get('embedding_model'),
100
+ collection_name=f"{tool.get('toolkit_name')}",
101
+ collection_schema = str(tool['id'])
102
+ ).get_tools())
103
+ elif tool['type'] == 'vectorstore':
104
+ tools.extend(VectorStoreToolkit.get_toolkit(
105
+ llm=llm,
106
+ toolkit_name=tool.get('toolkit_name', ''),
107
+ **tool['settings']).get_tools())
108
+ except Exception as e:
109
+ logger.error(f"Error initializing toolkit for tool '{tool.get('name', 'unknown')}': {e}", exc_info=True)
110
+ if debug_mode:
111
+ logger.info("Skipping tool initialization error due to debug mode.")
112
+ continue
113
+ else:
114
+ raise ToolException(f"Error initializing toolkit for tool '{tool.get('name', 'unknown')}': {e}")
85
115
 
86
116
  if len(prompts) > 0:
87
117
  tools += PromptToolkit.get_toolkit(alita_client, prompts).get_tools()
@@ -5,7 +5,11 @@ This module provides various tools that can be used within LangGraph agents.
5
5
 
6
6
  from .sandbox import PyodideSandboxTool, StatefulPyodideSandboxTool, create_sandbox_tool
7
7
  from .echo import EchoTool
8
- from .image_generation import ImageGenerationTool, create_image_generation_tool
8
+ from .image_generation import (
9
+ ImageGenerationTool,
10
+ create_image_generation_tool,
11
+ ImageGenerationToolkit
12
+ )
9
13
 
10
14
  __all__ = [
11
15
  "PyodideSandboxTool",
@@ -13,5 +17,6 @@ __all__ = [
13
17
  "create_sandbox_tool",
14
18
  "EchoTool",
15
19
  "ImageGenerationTool",
20
+ "ImageGenerationToolkit",
16
21
  "create_image_generation_tool"
17
- ]
22
+ ]
@@ -50,6 +50,8 @@ class Application(BaseTool):
50
50
  application: Any
51
51
  args_schema: Type[BaseModel] = applicationToolSchema
52
52
  return_type: str = "str"
53
+ client: Any
54
+ args_runnable: dict = {}
53
55
 
54
56
  @field_validator('name', mode='before')
55
57
  @classmethod
@@ -66,6 +68,11 @@ class Application(BaseTool):
66
68
  return self._run(*config, **all_kwargs)
67
69
 
68
70
  def _run(self, *args, **kwargs):
71
+ if self.client and self.args_runnable:
72
+ # Recreate new LanggraphAgentRunnable in order to reflect the current input_mapping (it can be dynamic for pipelines).
73
+ # Actually, for pipelines agent toolkits LanggraphAgentRunnable is created (for LLMNode) before pipeline's schema parsing.
74
+ application_variables = {k: {"name": k, "value": v} for k, v in kwargs.items()}
75
+ self.application = self.client.application(**self.args_runnable, application_variables=application_variables)
69
76
  response = self.application.invoke(formulate_query(kwargs))
70
77
  if self.return_type == "str":
71
78
  return response["output"]
@@ -7,7 +7,7 @@ from langchain_core.callbacks import dispatch_custom_event
7
7
  from langchain_core.messages import ToolCall
8
8
  from langchain_core.runnables import RunnableConfig
9
9
  from langchain_core.tools import BaseTool, ToolException
10
- from typing import Any, Optional, Union, Annotated
10
+ from typing import Any, Optional, Union
11
11
  from langchain_core.utils.function_calling import convert_to_openai_tool
12
12
  from pydantic import ValidationError
13
13
 
@@ -31,24 +31,10 @@ class FunctionTool(BaseTool):
31
31
  """Prepare input for PyodideSandboxTool by injecting state into the code block."""
32
32
  # add state into the code block here since it might be changed during the execution of the code
33
33
  state_copy = deepcopy(state)
34
- # pickle state
35
- import pickle
36
34
 
37
35
  del state_copy['messages'] # remove messages to avoid issues with pickling without langchain-core
38
- serialized_state = pickle.dumps(state_copy)
39
36
  # inject state into the code block as alita_state variable
40
- pyodide_predata = f"""import pickle\nalita_state = pickle.loads({serialized_state})\n"""
41
- # add classes related to sandbox client
42
- # read the content of alita_sdk/runtime/cliens/sandbox_client.py
43
- try:
44
- with open('alita_sdk/runtime/clients/sandbox_client.py', 'r') as f:
45
- sandbox_client_code = f.read()
46
- pyodide_predata += f"\n{sandbox_client_code}\n"
47
- pyodide_predata += (f"alita_client = SandboxClient(base_url='{self.alita_client.base_url}',"
48
- f"project_id={self.alita_client.project_id},"
49
- f"auth_token='{self.alita_client.auth_token}')")
50
- except FileNotFoundError:
51
- logger.error("sandbox_client.py not found. Ensure 'alita_sdk/runtime/clients/sandbox_client.py' exists.")
37
+ pyodide_predata = f"#state dict\nalita_state = {state_copy}\n"
52
38
  return pyodide_predata
53
39
 
54
40
  def _handle_pyodide_output(self, tool_result: Any) -> dict:
@@ -57,6 +43,10 @@ class FunctionTool(BaseTool):
57
43
 
58
44
  if self.output_variables:
59
45
  for var in self.output_variables:
46
+ if var == "messages":
47
+ tool_result_converted.update(
48
+ {"messages": [{"role": "assistant", "content": dumps(tool_result)}]})
49
+ continue
60
50
  if isinstance(tool_result, dict) and var in tool_result:
61
51
  tool_result_converted[var] = tool_result[var]
62
52
  else:
@@ -71,8 +61,14 @@ class FunctionTool(BaseTool):
71
61
  # execute code tool and update state variables
72
62
  try:
73
63
  result_value = tool_result.get('result', {})
74
- tool_result_converted.update(result_value if isinstance(result_value, dict)
75
- else json.loads(result_value))
64
+ if isinstance(result_value, dict):
65
+ tool_result_converted.update(result_value)
66
+ elif isinstance(result_value, list):
67
+ # Handle list case - could wrap in a key or handle differently based on requirements
68
+ tool_result_converted.update({"result": result_value})
69
+ else:
70
+ # Handle JSON string case
71
+ tool_result_converted.update(json.loads(result_value))
76
72
  except json.JSONDecodeError:
77
73
  logger.error(f"JSONDecodeError: {tool_result}")
78
74
 
@@ -98,7 +94,9 @@ class FunctionTool(BaseTool):
98
94
  # special handler for PyodideSandboxTool
99
95
  if self._is_pyodide_tool():
100
96
  code = func_args['code']
101
- func_args['code'] = f"{self._prepare_pyodide_input(state)}\n{code}"
97
+ func_args['code'] = (f"{self._prepare_pyodide_input(state)}\n{code}"
98
+ # handle new lines in the code properly
99
+ .replace('\\n','\\\\n'))
102
100
  try:
103
101
  tool_result = self.tool.invoke(func_args, config, **kwargs)
104
102
  dispatch_custom_event(
@@ -118,14 +116,21 @@ class FunctionTool(BaseTool):
118
116
  if not self.output_variables:
119
117
  return {"messages": [{"role": "assistant", "content": dumps(tool_result)}]}
120
118
  else:
121
- if self.output_variables[0] == "messages":
122
- return {
119
+ if "messages" in self.output_variables:
120
+ messages_dict = {
123
121
  "messages": [{
124
122
  "role": "assistant",
125
- "content": dumps(tool_result) if not isinstance(tool_result, ToolException) else str(
126
- tool_result)
123
+ "content": dumps(tool_result) if not isinstance(tool_result, ToolException)
124
+ else str(tool_result)
127
125
  }]
128
126
  }
127
+ for var in self.output_variables:
128
+ if var != "messages":
129
+ if isinstance(tool_result, dict) and var in tool_result:
130
+ messages_dict[var] = tool_result[var]
131
+ else:
132
+ messages_dict[var] = tool_result
133
+ return messages_dict
129
134
  else:
130
135
  return { self.output_variables[0]: tool_result }
131
136
  except ValidationError:
@@ -47,8 +47,8 @@ def formulate_query(kwargs):
47
47
 
48
48
 
49
49
  class GraphTool(BaseTool):
50
- name: str
51
- description: str
50
+ name: str = 'GraphTool'
51
+ description: str = 'Graph tool for tools'
52
52
  graph: CompiledStateGraph
53
53
  args_schema: Type[BaseModel] = graphToolSchema
54
54
  return_type: str = "str"
@@ -65,10 +65,16 @@ class GraphTool(BaseTool):
65
65
  all_kwargs = {**kwargs, **extras, **schema_values}
66
66
  if config is None:
67
67
  config = {}
68
- return self._run(*config, **all_kwargs)
68
+ # Pass the config to the _run empty or the one passed from the parent executor.
69
+ return self._run(config, **all_kwargs)
69
70
 
70
71
  def _run(self, *args, **kwargs):
71
- response = self.graph.invoke(formulate_query(kwargs))
72
+ config = None
73
+ # From invoke method we are passing only 1 arg so it is safe to do this condition and config assignment.
74
+ # Default to None is safe because it will be checked also on the langchain side.
75
+ if args:
76
+ config = args[0]
77
+ response = self.graph.invoke(formulate_query(kwargs), config=config)
72
78
  if self.return_type == "str":
73
79
  return response["output"]
74
80
  else: