alita-sdk 0.3.390__py3-none-any.whl → 0.3.417__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. alita_sdk/configurations/bitbucket.py +95 -0
  2. alita_sdk/configurations/confluence.py +96 -1
  3. alita_sdk/configurations/gitlab.py +79 -0
  4. alita_sdk/configurations/jira.py +103 -0
  5. alita_sdk/configurations/testrail.py +88 -0
  6. alita_sdk/configurations/xray.py +93 -0
  7. alita_sdk/configurations/zephyr_enterprise.py +93 -0
  8. alita_sdk/configurations/zephyr_essential.py +75 -0
  9. alita_sdk/runtime/clients/client.py +3 -2
  10. alita_sdk/runtime/langchain/assistant.py +29 -5
  11. alita_sdk/runtime/langchain/constants.py +2 -0
  12. alita_sdk/runtime/langchain/document_loaders/AlitaDocxMammothLoader.py +315 -3
  13. alita_sdk/runtime/langchain/document_loaders/AlitaJSONLoader.py +4 -1
  14. alita_sdk/runtime/langchain/document_loaders/constants.py +8 -8
  15. alita_sdk/runtime/langchain/langraph_agent.py +46 -24
  16. alita_sdk/runtime/langchain/utils.py +11 -4
  17. alita_sdk/runtime/toolkits/application.py +8 -1
  18. alita_sdk/runtime/toolkits/tools.py +72 -62
  19. alita_sdk/runtime/tools/application.py +7 -0
  20. alita_sdk/runtime/tools/function.py +11 -4
  21. alita_sdk/runtime/tools/llm.py +142 -116
  22. alita_sdk/runtime/tools/sandbox.py +15 -31
  23. alita_sdk/tools/__init__.py +41 -31
  24. alita_sdk/tools/base_indexer_toolkit.py +27 -2
  25. alita_sdk/tools/code_indexer_toolkit.py +13 -3
  26. alita_sdk/tools/confluence/loader.py +10 -0
  27. alita_sdk/tools/gitlab/api_wrapper.py +8 -9
  28. alita_sdk/tools/jira/api_wrapper.py +1 -1
  29. alita_sdk/tools/qtest/api_wrapper.py +7 -10
  30. alita_sdk/tools/sharepoint/api_wrapper.py +81 -28
  31. alita_sdk/tools/sharepoint/authorization_helper.py +131 -1
  32. alita_sdk/tools/sharepoint/utils.py +8 -2
  33. alita_sdk/tools/utils/content_parser.py +27 -16
  34. alita_sdk/tools/vector_adapters/VectorStoreAdapter.py +10 -2
  35. {alita_sdk-0.3.390.dist-info → alita_sdk-0.3.417.dist-info}/METADATA +1 -1
  36. {alita_sdk-0.3.390.dist-info → alita_sdk-0.3.417.dist-info}/RECORD +39 -39
  37. {alita_sdk-0.3.390.dist-info → alita_sdk-0.3.417.dist-info}/WHEEL +0 -0
  38. {alita_sdk-0.3.390.dist-info → alita_sdk-0.3.417.dist-info}/licenses/LICENSE +0 -0
  39. {alita_sdk-0.3.390.dist-info → alita_sdk-0.3.417.dist-info}/top_level.txt +0 -0
@@ -348,8 +348,8 @@ class StateModifierNode(Runnable):
348
348
  return result
349
349
 
350
350
 
351
-
352
- def prepare_output_schema(lg_builder, memory, store, debug=False, interrupt_before=None, interrupt_after=None, state_class=None, output_variables=None):
351
+ def prepare_output_schema(lg_builder, memory, store, debug=False, interrupt_before=None, interrupt_after=None,
352
+ state_class=None, output_variables=None):
353
353
  # prepare output channels
354
354
  if interrupt_after is None:
355
355
  interrupt_after = []
@@ -466,11 +466,12 @@ def create_graph(
466
466
  elif node_type == 'agent':
467
467
  input_params = node.get('input', ['messages'])
468
468
  input_mapping = node.get('input_mapping',
469
- {'messages': {'type': 'variable', 'value': 'messages'}})
469
+ {'messages': {'type': 'variable', 'value': 'messages'}})
470
+ output_vars = node.get('output', [])
470
471
  lg_builder.add_node(node_id, FunctionTool(
471
472
  client=client, tool=tool,
472
473
  name=node_id, return_type='str',
473
- output_variables=node.get('output', []),
474
+ output_variables=output_vars + ['messages'] if 'messages' not in output_vars else output_vars,
474
475
  input_variables=input_params,
475
476
  input_mapping= input_mapping
476
477
  ))
@@ -481,7 +482,8 @@ def create_graph(
481
482
  # wrap with mappings
482
483
  pipeline_name = node.get('tool', None)
483
484
  if not pipeline_name:
484
- raise ValueError("Subgraph must have a 'tool' node: add required tool to the subgraph node")
485
+ raise ValueError(
486
+ "Subgraph must have a 'tool' node: add required tool to the subgraph node")
485
487
  node_fn = SubgraphRunnable(
486
488
  inner=tool.graph,
487
489
  name=pipeline_name,
@@ -499,15 +501,6 @@ def create_graph(
499
501
  structured_output=node.get('structured_output', False),
500
502
  task=node.get('task')
501
503
  ))
502
- # TODO: decide on struct output for agent nodes
503
- # elif node_type == 'agent':
504
- # lg_builder.add_node(node_id, AgentNode(
505
- # client=client, tool=tool,
506
- # name=node['id'], return_type='dict',
507
- # output_variables=node.get('output', []),
508
- # input_variables=node.get('input', ['messages']),
509
- # task=node.get('task')
510
- # ))
511
504
  elif node_type == 'loop':
512
505
  lg_builder.add_node(node_id, LoopNode(
513
506
  client=client, tool=tool,
@@ -520,7 +513,8 @@ def create_graph(
520
513
  loop_toolkit_name = node.get('loop_toolkit_name')
521
514
  loop_tool_name = node.get('loop_tool')
522
515
  if (loop_toolkit_name and loop_tool_name) or loop_tool_name:
523
- loop_tool_name = f"{clean_string(loop_toolkit_name)}{TOOLKIT_SPLITTER}{loop_tool_name}" if loop_toolkit_name else clean_string(loop_tool_name)
516
+ loop_tool_name = f"{clean_string(loop_toolkit_name)}{TOOLKIT_SPLITTER}{loop_tool_name}" if loop_toolkit_name else clean_string(
517
+ loop_tool_name)
524
518
  for t in tools:
525
519
  if t.name == loop_tool_name:
526
520
  logger.debug(f"Loop tool discovered: {t}")
@@ -555,7 +549,8 @@ def create_graph(
555
549
  break
556
550
  elif node_type == 'code':
557
551
  from ..tools.sandbox import create_sandbox_tool
558
- sandbox_tool = create_sandbox_tool(stateful=False, allow_net=True, alita_client=kwargs.get('alita_client', None))
552
+ sandbox_tool = create_sandbox_tool(stateful=False, allow_net=True,
553
+ alita_client=kwargs.get('alita_client', None))
559
554
  code_data = node.get('code', {'type': 'fixed', 'value': "return 'Code block is empty'"})
560
555
  lg_builder.add_node(node_id, FunctionTool(
561
556
  tool=sandbox_tool, name=node['id'], return_type='dict',
@@ -593,7 +588,7 @@ def create_graph(
593
588
  else:
594
589
  # Use all available tools
595
590
  available_tools = [tool for tool in tools if isinstance(tool, BaseTool)]
596
-
591
+
597
592
  lg_builder.add_node(node_id, LLMNode(
598
593
  client=client,
599
594
  input_mapping=node.get('input_mapping', {'messages': {'type': 'variable', 'value': 'messages'}}),
@@ -604,7 +599,9 @@ def create_graph(
604
599
  input_variables=node.get('input', ['messages']),
605
600
  structured_output=node.get('structured_output', False),
606
601
  available_tools=available_tools,
607
- tool_names=tool_names))
602
+ tool_names=tool_names,
603
+ steps_limit=kwargs.get('steps_limit', 25)
604
+ ))
608
605
  elif node_type == 'router':
609
606
  # Add a RouterNode as an independent node
610
607
  lg_builder.add_node(node_id, RouterNode(
@@ -777,20 +774,46 @@ class LangGraphAgentRunnable(CompiledStateGraph):
777
774
  # Convert chat history dict messages to LangChain message objects
778
775
  chat_history = input.pop('chat_history')
779
776
  input['messages'] = [convert_dict_to_message(msg) for msg in chat_history]
780
-
777
+
778
+ # handler for LLM node: if no input (Chat perspective), then take last human message
779
+ if not input.get('input'):
780
+ if input.get('messages'):
781
+ input['input'] = [next((msg for msg in reversed(input['messages']) if isinstance(msg, HumanMessage)),
782
+ None)]
783
+
781
784
  # Append current input to existing messages instead of overwriting
782
785
  if input.get('input'):
783
786
  if isinstance(input['input'], str):
784
787
  current_message = input['input']
785
788
  else:
786
789
  current_message = input.get('input')[-1]
790
+
787
791
  # TODO: add handler after we add 2+ inputs (filterByType, etc.)
788
- input['input'] = current_message if isinstance(current_message, str) else str(current_message)
792
+ if isinstance(current_message, HumanMessage):
793
+ current_content = current_message.content
794
+ if isinstance(current_content, list):
795
+ text_contents = [
796
+ item['text'] if isinstance(item, dict) and item.get('type') == 'text'
797
+ else item if isinstance(item, str)
798
+ else None
799
+ for item in current_content
800
+ ]
801
+ text_contents = [text for text in text_contents if text is not None]
802
+ input['input'] = ". ".join(text_contents)
803
+ elif isinstance(current_content, str):
804
+ # on regenerate case
805
+ input['input'] = current_content
806
+ else:
807
+ input['input'] = str(current_content)
808
+ elif isinstance(current_message, str):
809
+ input['input'] = current_message
810
+ else:
811
+ input['input'] = str(current_message)
789
812
  if input.get('messages'):
790
813
  # Ensure existing messages are LangChain objects
791
814
  input['messages'] = [convert_dict_to_message(msg) for msg in input['messages']]
792
815
  # Append to existing messages
793
- input['messages'].append(current_message)
816
+ # input['messages'].append(current_message)
794
817
  else:
795
818
  # No existing messages, create new list
796
819
  input['messages'] = [current_message]
@@ -801,7 +824,8 @@ class LangGraphAgentRunnable(CompiledStateGraph):
801
824
  else:
802
825
  result = super().invoke(input, config=config, *args, **kwargs)
803
826
  try:
804
- output = next((msg.content for msg in reversed(result['messages']) if not isinstance(msg, HumanMessage)), result['messages'][-1].content)
827
+ output = next((msg.content for msg in reversed(result['messages']) if not isinstance(msg, HumanMessage)),
828
+ result['messages'][-1].content)
805
829
  except:
806
830
  output = list(result.values())[-1]
807
831
  config_state = self.get_state(config)
@@ -809,8 +833,6 @@ class LangGraphAgentRunnable(CompiledStateGraph):
809
833
  if is_execution_finished:
810
834
  thread_id = None
811
835
 
812
-
813
-
814
836
  result_with_state = {
815
837
  "output": output,
816
838
  "thread_id": thread_id,
@@ -5,8 +5,9 @@ import re
5
5
  from pydantic import create_model, Field
6
6
  from typing import Tuple, TypedDict, Any, Optional, Annotated
7
7
  from langchain_core.messages import AnyMessage
8
- from langchain_core.prompts import PromptTemplate
9
- from langgraph.graph import MessagesState, add_messages
8
+ from langgraph.graph import add_messages
9
+
10
+ from ...runtime.langchain.constants import ELITEA_RS
10
11
 
11
12
  logger = logging.getLogger(__name__)
12
13
 
@@ -130,7 +131,7 @@ def parse_type(type_str):
130
131
 
131
132
 
132
133
  def create_state(data: Optional[dict] = None):
133
- state_dict = {'input': str, 'router_output': str} # Always include router_output
134
+ state_dict = {'input': str, 'router_output': str, ELITEA_RS: str} # Always include router_output
134
135
  types_dict = {}
135
136
  if not data:
136
137
  data = {'messages': 'list[str]'}
@@ -181,7 +182,13 @@ def propagate_the_input_mapping(input_mapping: dict[str, dict], input_variables:
181
182
  input_data[key] = value['value'].format(**var_dict)
182
183
  except KeyError as e:
183
184
  logger.error(f"KeyError in fstring formatting for key '{key}'. Attempt to find proper data in state.\n{e}")
184
- input_data[key] = value['value'].format(**state)
185
+ try:
186
+ # search for variables in state if not found in var_dict
187
+ input_data[key] = value['value'].format(**state)
188
+ except KeyError as no_var_exception:
189
+ logger.error(f"KeyError in fstring formatting for key '{key}' with state data.\n{no_var_exception}")
190
+ # leave value as is if still not found (could be a constant string marked as fstring by mistake)
191
+ input_data[key] = value['value']
185
192
  elif value['type'] == 'fixed':
186
193
  input_data[key] = value['value']
187
194
  else:
@@ -39,7 +39,14 @@ class ApplicationToolkit(BaseToolkit):
39
39
  description=app_details.get("description"),
40
40
  application=app,
41
41
  args_schema=applicationToolSchema,
42
- return_type='str')])
42
+ return_type='str',
43
+ client=client,
44
+ args_runnable={
45
+ "application_id": application_id,
46
+ "application_version_id": application_version_id,
47
+ "store": store,
48
+ "llm": client.get_llm(version_details['llm_settings']['model_name'], model_settings),
49
+ })])
43
50
 
44
51
  def get_tools(self):
45
52
  return self.tools
@@ -1,4 +1,5 @@
1
1
  import logging
2
+ from typing import Optional
2
3
 
3
4
  from langchain_core.tools import ToolException
4
5
  from langgraph.store.base import BaseStore
@@ -34,74 +35,83 @@ def get_toolkits():
34
35
  return core_toolkits + community_toolkits() + alita_toolkits()
35
36
 
36
37
 
37
- def get_tools(tools_list: list, alita_client, llm, memory_store: BaseStore = None) -> list:
38
+ def get_tools(tools_list: list, alita_client, llm, memory_store: BaseStore = None, debug_mode: Optional[bool] = False) -> list:
38
39
  prompts = []
39
40
  tools = []
40
41
 
41
42
  for tool in tools_list:
42
- if tool['type'] == 'datasource':
43
- tools.extend(DatasourcesToolkit.get_toolkit(
44
- alita_client,
45
- datasource_ids=[int(tool['settings']['datasource_id'])],
46
- selected_tools=tool['settings']['selected_tools'],
47
- toolkit_name=tool.get('toolkit_name', '') or tool.get('name', '')
48
- ).get_tools())
49
- elif tool['type'] == 'application' and tool.get('agent_type', '') != 'pipeline' :
50
- tools.extend(ApplicationToolkit.get_toolkit(
51
- alita_client,
52
- application_id=int(tool['settings']['application_id']),
53
- application_version_id=int(tool['settings']['application_version_id']),
54
- selected_tools=[]
55
- ).get_tools())
56
- elif tool['type'] == 'application' and tool.get('agent_type', '') == 'pipeline':
57
- # static get_toolkit returns a list of CompiledStateGraph stubs
58
- tools.extend(SubgraphToolkit.get_toolkit(
59
- alita_client,
60
- application_id=int(tool['settings']['application_id']),
61
- application_version_id=int(tool['settings']['application_version_id']),
62
- app_api_key=alita_client.auth_token,
63
- selected_tools=[],
64
- llm=llm
65
- ))
66
- elif tool['type'] == 'memory':
67
- tools += MemoryToolkit.get_toolkit(
68
- namespace=tool['settings'].get('namespace', str(tool['id'])),
69
- pgvector_configuration=tool['settings'].get('pgvector_configuration', {}),
70
- store=memory_store,
71
- ).get_tools()
72
- # TODO: update configuration of internal tools
73
- elif tool['type'] == 'internal_tool':
74
- if tool['name'] == 'pyodide':
75
- tools += SandboxToolkit.get_toolkit(
76
- stateful=False,
77
- allow_net=True,
78
- alita_client=alita_client,
43
+ try:
44
+ if tool['type'] == 'datasource':
45
+ tools.extend(DatasourcesToolkit.get_toolkit(
46
+ alita_client,
47
+ datasource_ids=[int(tool['settings']['datasource_id'])],
48
+ selected_tools=tool['settings']['selected_tools'],
49
+ toolkit_name=tool.get('toolkit_name', '') or tool.get('name', '')
50
+ ).get_tools())
51
+ elif tool['type'] == 'application' and tool.get('agent_type', '') != 'pipeline' :
52
+ tools.extend(ApplicationToolkit.get_toolkit(
53
+ alita_client,
54
+ application_id=int(tool['settings']['application_id']),
55
+ application_version_id=int(tool['settings']['application_version_id']),
56
+ selected_tools=[]
57
+ ).get_tools())
58
+ elif tool['type'] == 'application' and tool.get('agent_type', '') == 'pipeline':
59
+ # static get_toolkit returns a list of CompiledStateGraph stubs
60
+ tools.extend(SubgraphToolkit.get_toolkit(
61
+ alita_client,
62
+ application_id=int(tool['settings']['application_id']),
63
+ application_version_id=int(tool['settings']['application_version_id']),
64
+ app_api_key=alita_client.auth_token,
65
+ selected_tools=[],
66
+ llm=llm
67
+ ))
68
+ elif tool['type'] == 'memory':
69
+ tools += MemoryToolkit.get_toolkit(
70
+ namespace=tool['settings'].get('namespace', str(tool['id'])),
71
+ pgvector_configuration=tool['settings'].get('pgvector_configuration', {}),
72
+ store=memory_store,
79
73
  ).get_tools()
80
- elif tool['name'] == 'image_generation':
81
- if alita_client and alita_client.model_image_generation:
82
- tools += ImageGenerationToolkit.get_toolkit(
83
- client=alita_client,
74
+ # TODO: update configuration of internal tools
75
+ elif tool['type'] == 'internal_tool':
76
+ if tool['name'] == 'pyodide':
77
+ tools += SandboxToolkit.get_toolkit(
78
+ stateful=False,
79
+ allow_net=True,
80
+ alita_client=alita_client,
84
81
  ).get_tools()
85
- else:
86
- logger.warning("Image generation internal tool requested "
87
- "but no image generation model configured")
88
- elif tool['type'] == 'artifact':
89
- tools.extend(ArtifactToolkit.get_toolkit(
90
- client=alita_client,
91
- bucket=tool['settings']['bucket'],
92
- toolkit_name=tool.get('toolkit_name', ''),
93
- selected_tools=tool['settings'].get('selected_tools', []),
94
- llm=llm,
95
- # indexer settings
96
- pgvector_configuration=tool['settings'].get('pgvector_configuration', {}),
97
- embedding_model=tool['settings'].get('embedding_model'),
98
- collection_name=f"{tool.get('toolkit_name')}",
99
- ).get_tools())
100
- elif tool['type'] == 'vectorstore':
101
- tools.extend(VectorStoreToolkit.get_toolkit(
102
- llm=llm,
103
- toolkit_name=tool.get('toolkit_name', ''),
104
- **tool['settings']).get_tools())
82
+ elif tool['name'] == 'image_generation':
83
+ if alita_client and alita_client.model_image_generation:
84
+ tools += ImageGenerationToolkit.get_toolkit(
85
+ client=alita_client,
86
+ ).get_tools()
87
+ else:
88
+ logger.warning("Image generation internal tool requested "
89
+ "but no image generation model configured")
90
+ elif tool['type'] == 'artifact':
91
+ tools.extend(ArtifactToolkit.get_toolkit(
92
+ client=alita_client,
93
+ bucket=tool['settings']['bucket'],
94
+ toolkit_name=tool.get('toolkit_name', ''),
95
+ selected_tools=tool['settings'].get('selected_tools', []),
96
+ llm=llm,
97
+ # indexer settings
98
+ pgvector_configuration=tool['settings'].get('pgvector_configuration', {}),
99
+ embedding_model=tool['settings'].get('embedding_model'),
100
+ collection_name=f"{tool.get('toolkit_name')}",
101
+ collection_schema = str(tool['id'])
102
+ ).get_tools())
103
+ elif tool['type'] == 'vectorstore':
104
+ tools.extend(VectorStoreToolkit.get_toolkit(
105
+ llm=llm,
106
+ toolkit_name=tool.get('toolkit_name', ''),
107
+ **tool['settings']).get_tools())
108
+ except Exception as e:
109
+ logger.error(f"Error initializing toolkit for tool '{tool.get('name', 'unknown')}': {e}", exc_info=True)
110
+ if debug_mode:
111
+ logger.info("Skipping tool initialization error due to debug mode.")
112
+ continue
113
+ else:
114
+ raise ToolException(f"Error initializing toolkit for tool '{tool.get('name', 'unknown')}': {e}")
105
115
 
106
116
  if len(prompts) > 0:
107
117
  tools += PromptToolkit.get_toolkit(alita_client, prompts).get_tools()
@@ -50,6 +50,8 @@ class Application(BaseTool):
50
50
  application: Any
51
51
  args_schema: Type[BaseModel] = applicationToolSchema
52
52
  return_type: str = "str"
53
+ client: Any
54
+ args_runnable: dict = {}
53
55
 
54
56
  @field_validator('name', mode='before')
55
57
  @classmethod
@@ -66,6 +68,11 @@ class Application(BaseTool):
66
68
  return self._run(*config, **all_kwargs)
67
69
 
68
70
  def _run(self, *args, **kwargs):
71
+ if self.client and self.args_runnable:
72
+ # Recreate new LanggraphAgentRunnable in order to reflect the current input_mapping (it can be dynamic for pipelines).
73
+ # Actually, for pipelines agent toolkits LanggraphAgentRunnable is created (for LLMNode) before pipeline's schema parsing.
74
+ application_variables = {k: {"name": k, "value": v} for k, v in kwargs.items()}
75
+ self.application = self.client.application(**self.args_runnable, application_variables=application_variables)
69
76
  response = self.application.invoke(formulate_query(kwargs))
70
77
  if self.return_type == "str":
71
78
  return response["output"]
@@ -116,14 +116,21 @@ class FunctionTool(BaseTool):
116
116
  if not self.output_variables:
117
117
  return {"messages": [{"role": "assistant", "content": dumps(tool_result)}]}
118
118
  else:
119
- if self.output_variables[0] == "messages":
120
- return {
119
+ if "messages" in self.output_variables:
120
+ messages_dict = {
121
121
  "messages": [{
122
122
  "role": "assistant",
123
- "content": dumps(tool_result) if not isinstance(tool_result, ToolException) else str(
124
- tool_result)
123
+ "content": dumps(tool_result) if not isinstance(tool_result, ToolException)
124
+ else str(tool_result)
125
125
  }]
126
126
  }
127
+ for var in self.output_variables:
128
+ if var != "messages":
129
+ if isinstance(tool_result, dict) and var in tool_result:
130
+ messages_dict[var] = tool_result[var]
131
+ else:
132
+ messages_dict[var] = tool_result
133
+ return messages_dict
127
134
  else:
128
135
  return { self.output_variables[0]: tool_result }
129
136
  except ValidationError: