alita-sdk 0.3.376__py3-none-any.whl → 0.3.435__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of alita-sdk might be problematic. Click here for more details.
- alita_sdk/configurations/bitbucket.py +95 -0
- alita_sdk/configurations/confluence.py +96 -1
- alita_sdk/configurations/gitlab.py +79 -0
- alita_sdk/configurations/jira.py +103 -0
- alita_sdk/configurations/testrail.py +88 -0
- alita_sdk/configurations/xray.py +93 -0
- alita_sdk/configurations/zephyr_enterprise.py +93 -0
- alita_sdk/configurations/zephyr_essential.py +75 -0
- alita_sdk/runtime/clients/client.py +9 -4
- alita_sdk/runtime/clients/mcp_discovery.py +342 -0
- alita_sdk/runtime/clients/mcp_manager.py +262 -0
- alita_sdk/runtime/clients/sandbox_client.py +8 -0
- alita_sdk/runtime/langchain/assistant.py +41 -38
- alita_sdk/runtime/langchain/constants.py +5 -1
- alita_sdk/runtime/langchain/document_loaders/AlitaDocxMammothLoader.py +315 -3
- alita_sdk/runtime/langchain/document_loaders/AlitaJSONLoader.py +4 -1
- alita_sdk/runtime/langchain/document_loaders/constants.py +28 -12
- alita_sdk/runtime/langchain/langraph_agent.py +91 -27
- alita_sdk/runtime/langchain/utils.py +24 -4
- alita_sdk/runtime/models/mcp_models.py +57 -0
- alita_sdk/runtime/toolkits/__init__.py +24 -0
- alita_sdk/runtime/toolkits/application.py +8 -1
- alita_sdk/runtime/toolkits/mcp.py +787 -0
- alita_sdk/runtime/toolkits/tools.py +98 -50
- alita_sdk/runtime/tools/__init__.py +7 -2
- alita_sdk/runtime/tools/application.py +7 -0
- alita_sdk/runtime/tools/function.py +20 -28
- alita_sdk/runtime/tools/graph.py +10 -4
- alita_sdk/runtime/tools/image_generation.py +104 -8
- alita_sdk/runtime/tools/llm.py +146 -114
- alita_sdk/runtime/tools/mcp_inspect_tool.py +284 -0
- alita_sdk/runtime/tools/mcp_server_tool.py +79 -10
- alita_sdk/runtime/tools/sandbox.py +166 -63
- alita_sdk/runtime/tools/vectorstore.py +3 -2
- alita_sdk/runtime/tools/vectorstore_base.py +4 -3
- alita_sdk/runtime/utils/streamlit.py +34 -3
- alita_sdk/runtime/utils/toolkit_utils.py +5 -2
- alita_sdk/runtime/utils/utils.py +1 -0
- alita_sdk/tools/__init__.py +48 -31
- alita_sdk/tools/ado/work_item/ado_wrapper.py +17 -8
- alita_sdk/tools/base_indexer_toolkit.py +75 -66
- alita_sdk/tools/chunkers/sematic/proposal_chunker.py +1 -1
- alita_sdk/tools/code_indexer_toolkit.py +13 -3
- alita_sdk/tools/confluence/api_wrapper.py +29 -7
- alita_sdk/tools/confluence/loader.py +10 -0
- alita_sdk/tools/elitea_base.py +7 -7
- alita_sdk/tools/gitlab/api_wrapper.py +11 -7
- alita_sdk/tools/jira/api_wrapper.py +1 -1
- alita_sdk/tools/openapi/__init__.py +10 -1
- alita_sdk/tools/qtest/api_wrapper.py +522 -74
- alita_sdk/tools/sharepoint/api_wrapper.py +104 -33
- alita_sdk/tools/sharepoint/authorization_helper.py +175 -1
- alita_sdk/tools/sharepoint/utils.py +8 -2
- alita_sdk/tools/utils/content_parser.py +27 -16
- alita_sdk/tools/vector_adapters/VectorStoreAdapter.py +19 -6
- {alita_sdk-0.3.376.dist-info → alita_sdk-0.3.435.dist-info}/METADATA +1 -1
- {alita_sdk-0.3.376.dist-info → alita_sdk-0.3.435.dist-info}/RECORD +60 -55
- {alita_sdk-0.3.376.dist-info → alita_sdk-0.3.435.dist-info}/WHEEL +0 -0
- {alita_sdk-0.3.376.dist-info → alita_sdk-0.3.435.dist-info}/licenses/LICENSE +0 -0
- {alita_sdk-0.3.376.dist-info → alita_sdk-0.3.435.dist-info}/top_level.txt +0 -0
|
@@ -19,8 +19,9 @@ from langgraph.managed.base import is_managed_value
|
|
|
19
19
|
from langgraph.prebuilt import InjectedStore
|
|
20
20
|
from langgraph.store.base import BaseStore
|
|
21
21
|
|
|
22
|
+
from .constants import PRINTER_NODE_RS, PRINTER
|
|
22
23
|
from .mixedAgentRenderes import convert_message_to_json
|
|
23
|
-
from .utils import create_state, propagate_the_input_mapping
|
|
24
|
+
from .utils import create_state, propagate_the_input_mapping, safe_format
|
|
24
25
|
from ..tools.function import FunctionTool
|
|
25
26
|
from ..tools.indexer_tool import IndexerNode
|
|
26
27
|
from ..tools.llm import LLMNode
|
|
@@ -232,6 +233,27 @@ class StateDefaultNode(Runnable):
|
|
|
232
233
|
result[key] = temp_value
|
|
233
234
|
return result
|
|
234
235
|
|
|
236
|
+
class PrinterNode(Runnable):
|
|
237
|
+
name = "PrinterNode"
|
|
238
|
+
|
|
239
|
+
def __init__(self, input_mapping: Optional[dict[str, dict]]):
|
|
240
|
+
self.input_mapping = input_mapping
|
|
241
|
+
|
|
242
|
+
def invoke(self, state: BaseStore, config: Optional[RunnableConfig] = None) -> dict:
|
|
243
|
+
logger.info(f"Printer Node - Current state variables: {state}")
|
|
244
|
+
result = {}
|
|
245
|
+
logger.debug(f"Initial text pattern: {self.input_mapping}")
|
|
246
|
+
mapping = propagate_the_input_mapping(self.input_mapping, [], state)
|
|
247
|
+
if mapping.get(PRINTER) is None:
|
|
248
|
+
raise ToolException(f"PrinterNode requires '{PRINTER}' field in input mapping")
|
|
249
|
+
formatted_output = mapping[PRINTER]
|
|
250
|
+
# add info label to the printer's output
|
|
251
|
+
if formatted_output:
|
|
252
|
+
formatted_output += f"\n\n-----\n*How to proceed?*\n* *to resume the pipeline - type anything...*"
|
|
253
|
+
logger.debug(f"Formatted output: {formatted_output}")
|
|
254
|
+
result[PRINTER_NODE_RS] = formatted_output
|
|
255
|
+
return result
|
|
256
|
+
|
|
235
257
|
|
|
236
258
|
class StateModifierNode(Runnable):
|
|
237
259
|
name = "StateModifierNode"
|
|
@@ -348,8 +370,8 @@ class StateModifierNode(Runnable):
|
|
|
348
370
|
return result
|
|
349
371
|
|
|
350
372
|
|
|
351
|
-
|
|
352
|
-
|
|
373
|
+
def prepare_output_schema(lg_builder, memory, store, debug=False, interrupt_before=None, interrupt_after=None,
|
|
374
|
+
state_class=None, output_variables=None):
|
|
353
375
|
# prepare output channels
|
|
354
376
|
if interrupt_after is None:
|
|
355
377
|
interrupt_after = []
|
|
@@ -466,11 +488,12 @@ def create_graph(
|
|
|
466
488
|
elif node_type == 'agent':
|
|
467
489
|
input_params = node.get('input', ['messages'])
|
|
468
490
|
input_mapping = node.get('input_mapping',
|
|
469
|
-
|
|
491
|
+
{'messages': {'type': 'variable', 'value': 'messages'}})
|
|
492
|
+
output_vars = node.get('output', [])
|
|
470
493
|
lg_builder.add_node(node_id, FunctionTool(
|
|
471
494
|
client=client, tool=tool,
|
|
472
495
|
name=node_id, return_type='str',
|
|
473
|
-
output_variables=
|
|
496
|
+
output_variables=output_vars + ['messages'] if 'messages' not in output_vars else output_vars,
|
|
474
497
|
input_variables=input_params,
|
|
475
498
|
input_mapping= input_mapping
|
|
476
499
|
))
|
|
@@ -481,7 +504,8 @@ def create_graph(
|
|
|
481
504
|
# wrap with mappings
|
|
482
505
|
pipeline_name = node.get('tool', None)
|
|
483
506
|
if not pipeline_name:
|
|
484
|
-
raise ValueError(
|
|
507
|
+
raise ValueError(
|
|
508
|
+
"Subgraph must have a 'tool' node: add required tool to the subgraph node")
|
|
485
509
|
node_fn = SubgraphRunnable(
|
|
486
510
|
inner=tool.graph,
|
|
487
511
|
name=pipeline_name,
|
|
@@ -499,15 +523,6 @@ def create_graph(
|
|
|
499
523
|
structured_output=node.get('structured_output', False),
|
|
500
524
|
task=node.get('task')
|
|
501
525
|
))
|
|
502
|
-
# TODO: decide on struct output for agent nodes
|
|
503
|
-
# elif node_type == 'agent':
|
|
504
|
-
# lg_builder.add_node(node_id, AgentNode(
|
|
505
|
-
# client=client, tool=tool,
|
|
506
|
-
# name=node['id'], return_type='dict',
|
|
507
|
-
# output_variables=node.get('output', []),
|
|
508
|
-
# input_variables=node.get('input', ['messages']),
|
|
509
|
-
# task=node.get('task')
|
|
510
|
-
# ))
|
|
511
526
|
elif node_type == 'loop':
|
|
512
527
|
lg_builder.add_node(node_id, LoopNode(
|
|
513
528
|
client=client, tool=tool,
|
|
@@ -520,7 +535,8 @@ def create_graph(
|
|
|
520
535
|
loop_toolkit_name = node.get('loop_toolkit_name')
|
|
521
536
|
loop_tool_name = node.get('loop_tool')
|
|
522
537
|
if (loop_toolkit_name and loop_tool_name) or loop_tool_name:
|
|
523
|
-
loop_tool_name = f"{clean_string(loop_toolkit_name)}{TOOLKIT_SPLITTER}{loop_tool_name}" if loop_toolkit_name else clean_string(
|
|
538
|
+
loop_tool_name = f"{clean_string(loop_toolkit_name)}{TOOLKIT_SPLITTER}{loop_tool_name}" if loop_toolkit_name else clean_string(
|
|
539
|
+
loop_tool_name)
|
|
524
540
|
for t in tools:
|
|
525
541
|
if t.name == loop_tool_name:
|
|
526
542
|
logger.debug(f"Loop tool discovered: {t}")
|
|
@@ -555,12 +571,13 @@ def create_graph(
|
|
|
555
571
|
break
|
|
556
572
|
elif node_type == 'code':
|
|
557
573
|
from ..tools.sandbox import create_sandbox_tool
|
|
558
|
-
sandbox_tool = create_sandbox_tool(stateful=False, allow_net=True
|
|
559
|
-
|
|
574
|
+
sandbox_tool = create_sandbox_tool(stateful=False, allow_net=True,
|
|
575
|
+
alita_client=kwargs.get('alita_client', None))
|
|
576
|
+
code_data = node.get('code', {'type': 'fixed', 'value': "return 'Code block is empty'"})
|
|
560
577
|
lg_builder.add_node(node_id, FunctionTool(
|
|
561
578
|
tool=sandbox_tool, name=node['id'], return_type='dict',
|
|
562
579
|
output_variables=node.get('output', []),
|
|
563
|
-
input_mapping={'code':
|
|
580
|
+
input_mapping={'code': code_data},
|
|
564
581
|
input_variables=node.get('input', ['messages']),
|
|
565
582
|
structured_output=node.get('structured_output', False),
|
|
566
583
|
alita_client=kwargs.get('alita_client', None)
|
|
@@ -593,7 +610,7 @@ def create_graph(
|
|
|
593
610
|
else:
|
|
594
611
|
# Use all available tools
|
|
595
612
|
available_tools = [tool for tool in tools if isinstance(tool, BaseTool)]
|
|
596
|
-
|
|
613
|
+
|
|
597
614
|
lg_builder.add_node(node_id, LLMNode(
|
|
598
615
|
client=client,
|
|
599
616
|
input_mapping=node.get('input_mapping', {'messages': {'type': 'variable', 'value': 'messages'}}),
|
|
@@ -604,7 +621,9 @@ def create_graph(
|
|
|
604
621
|
input_variables=node.get('input', ['messages']),
|
|
605
622
|
structured_output=node.get('structured_output', False),
|
|
606
623
|
available_tools=available_tools,
|
|
607
|
-
tool_names=tool_names
|
|
624
|
+
tool_names=tool_names,
|
|
625
|
+
steps_limit=kwargs.get('steps_limit', 25)
|
|
626
|
+
))
|
|
608
627
|
elif node_type == 'router':
|
|
609
628
|
# Add a RouterNode as an independent node
|
|
610
629
|
lg_builder.add_node(node_id, RouterNode(
|
|
@@ -631,6 +650,22 @@ def create_graph(
|
|
|
631
650
|
input_variables=node.get('input', ['messages']),
|
|
632
651
|
output_variables=node.get('output', [])
|
|
633
652
|
))
|
|
653
|
+
elif node_type == 'printer':
|
|
654
|
+
lg_builder.add_node(node_id, PrinterNode(
|
|
655
|
+
input_mapping=node.get('input_mapping', {'printer': {'type': 'fixed', 'value': ''}}),
|
|
656
|
+
))
|
|
657
|
+
|
|
658
|
+
# add interrupts after printer node if specified
|
|
659
|
+
interrupt_after.append(clean_string(node_id))
|
|
660
|
+
|
|
661
|
+
# reset printer output variable to avoid carrying over
|
|
662
|
+
reset_node_id = f"{node_id}_reset"
|
|
663
|
+
lg_builder.add_node(reset_node_id, PrinterNode(
|
|
664
|
+
input_mapping={'printer': {'type': 'fixed', 'value': ''}}
|
|
665
|
+
))
|
|
666
|
+
lg_builder.add_edge(node_id, reset_node_id)
|
|
667
|
+
lg_builder.add_conditional_edges(reset_node_id, TransitionalEdge(clean_string(node['transition'])))
|
|
668
|
+
continue
|
|
634
669
|
if node.get('transition'):
|
|
635
670
|
next_step = clean_string(node['transition'])
|
|
636
671
|
logger.info(f'Adding transition: {next_step}')
|
|
@@ -777,20 +812,46 @@ class LangGraphAgentRunnable(CompiledStateGraph):
|
|
|
777
812
|
# Convert chat history dict messages to LangChain message objects
|
|
778
813
|
chat_history = input.pop('chat_history')
|
|
779
814
|
input['messages'] = [convert_dict_to_message(msg) for msg in chat_history]
|
|
780
|
-
|
|
815
|
+
|
|
816
|
+
# handler for LLM node: if no input (Chat perspective), then take last human message
|
|
817
|
+
if not input.get('input'):
|
|
818
|
+
if input.get('messages'):
|
|
819
|
+
input['input'] = [next((msg for msg in reversed(input['messages']) if isinstance(msg, HumanMessage)),
|
|
820
|
+
None)]
|
|
821
|
+
|
|
781
822
|
# Append current input to existing messages instead of overwriting
|
|
782
823
|
if input.get('input'):
|
|
783
824
|
if isinstance(input['input'], str):
|
|
784
825
|
current_message = input['input']
|
|
785
826
|
else:
|
|
786
827
|
current_message = input.get('input')[-1]
|
|
828
|
+
|
|
787
829
|
# TODO: add handler after we add 2+ inputs (filterByType, etc.)
|
|
788
|
-
|
|
830
|
+
if isinstance(current_message, HumanMessage):
|
|
831
|
+
current_content = current_message.content
|
|
832
|
+
if isinstance(current_content, list):
|
|
833
|
+
text_contents = [
|
|
834
|
+
item['text'] if isinstance(item, dict) and item.get('type') == 'text'
|
|
835
|
+
else item if isinstance(item, str)
|
|
836
|
+
else None
|
|
837
|
+
for item in current_content
|
|
838
|
+
]
|
|
839
|
+
text_contents = [text for text in text_contents if text is not None]
|
|
840
|
+
input['input'] = ". ".join(text_contents)
|
|
841
|
+
elif isinstance(current_content, str):
|
|
842
|
+
# on regenerate case
|
|
843
|
+
input['input'] = current_content
|
|
844
|
+
else:
|
|
845
|
+
input['input'] = str(current_content)
|
|
846
|
+
elif isinstance(current_message, str):
|
|
847
|
+
input['input'] = current_message
|
|
848
|
+
else:
|
|
849
|
+
input['input'] = str(current_message)
|
|
789
850
|
if input.get('messages'):
|
|
790
851
|
# Ensure existing messages are LangChain objects
|
|
791
852
|
input['messages'] = [convert_dict_to_message(msg) for msg in input['messages']]
|
|
792
853
|
# Append to existing messages
|
|
793
|
-
input['messages'].append(current_message)
|
|
854
|
+
# input['messages'].append(current_message)
|
|
794
855
|
else:
|
|
795
856
|
# No existing messages, create new list
|
|
796
857
|
input['messages'] = [current_message]
|
|
@@ -801,7 +862,12 @@ class LangGraphAgentRunnable(CompiledStateGraph):
|
|
|
801
862
|
else:
|
|
802
863
|
result = super().invoke(input, config=config, *args, **kwargs)
|
|
803
864
|
try:
|
|
804
|
-
|
|
865
|
+
if not result.get(PRINTER_NODE_RS):
|
|
866
|
+
output = next((msg.content for msg in reversed(result['messages']) if not isinstance(msg, HumanMessage)),
|
|
867
|
+
result['messages'][-1].content)
|
|
868
|
+
else:
|
|
869
|
+
# used for printer node output - it will be reset by next `reset` node
|
|
870
|
+
output = result.get(PRINTER_NODE_RS)
|
|
805
871
|
except:
|
|
806
872
|
output = list(result.values())[-1]
|
|
807
873
|
config_state = self.get_state(config)
|
|
@@ -809,8 +875,6 @@ class LangGraphAgentRunnable(CompiledStateGraph):
|
|
|
809
875
|
if is_execution_finished:
|
|
810
876
|
thread_id = None
|
|
811
877
|
|
|
812
|
-
|
|
813
|
-
|
|
814
878
|
result_with_state = {
|
|
815
879
|
"output": output,
|
|
816
880
|
"thread_id": thread_id,
|
|
@@ -5,8 +5,9 @@ import re
|
|
|
5
5
|
from pydantic import create_model, Field
|
|
6
6
|
from typing import Tuple, TypedDict, Any, Optional, Annotated
|
|
7
7
|
from langchain_core.messages import AnyMessage
|
|
8
|
-
from
|
|
9
|
-
|
|
8
|
+
from langgraph.graph import add_messages
|
|
9
|
+
|
|
10
|
+
from ...runtime.langchain.constants import ELITEA_RS, PRINTER_NODE_RS
|
|
10
11
|
|
|
11
12
|
logger = logging.getLogger(__name__)
|
|
12
13
|
|
|
@@ -130,13 +131,15 @@ def parse_type(type_str):
|
|
|
130
131
|
|
|
131
132
|
|
|
132
133
|
def create_state(data: Optional[dict] = None):
|
|
133
|
-
state_dict = {'input': str, 'router_output': str
|
|
134
|
+
state_dict = {'input': str, 'router_output': str,
|
|
135
|
+
ELITEA_RS: str, PRINTER_NODE_RS: str} # Always include router_output
|
|
134
136
|
types_dict = {}
|
|
135
137
|
if not data:
|
|
136
138
|
data = {'messages': 'list[str]'}
|
|
137
139
|
for key, value in data.items():
|
|
138
140
|
# support of old & new UI
|
|
139
141
|
value = value['type'] if isinstance(value, dict) else value
|
|
142
|
+
value = 'str' if value == 'string' else value # normalize string type (old state support)
|
|
140
143
|
if key == 'messages':
|
|
141
144
|
state_dict[key] = Annotated[list[AnyMessage], add_messages]
|
|
142
145
|
elif value in ['str', 'int', 'float', 'bool', 'list', 'dict', 'number', 'dict']:
|
|
@@ -177,13 +180,30 @@ def propagate_the_input_mapping(input_mapping: dict[str, dict], input_variables:
|
|
|
177
180
|
var_dict = create_params(input_variables, source)
|
|
178
181
|
|
|
179
182
|
if value['type'] == 'fstring':
|
|
180
|
-
|
|
183
|
+
try:
|
|
184
|
+
input_data[key] = value['value'].format(**var_dict)
|
|
185
|
+
except KeyError as e:
|
|
186
|
+
logger.error(f"KeyError in fstring formatting for key '{key}'. Attempt to find proper data in state.\n{e}")
|
|
187
|
+
try:
|
|
188
|
+
# search for variables in state if not found in var_dict
|
|
189
|
+
input_data[key] = safe_format(value['value'], state)
|
|
190
|
+
except KeyError as no_var_exception:
|
|
191
|
+
logger.error(f"KeyError in fstring formatting for key '{key}' with state data.\n{no_var_exception}")
|
|
192
|
+
# leave value as is if still not found (could be a constant string marked as fstring by mistake)
|
|
193
|
+
input_data[key] = value['value']
|
|
181
194
|
elif value['type'] == 'fixed':
|
|
182
195
|
input_data[key] = value['value']
|
|
183
196
|
else:
|
|
184
197
|
input_data[key] = source.get(value['value'], "")
|
|
185
198
|
return input_data
|
|
186
199
|
|
|
200
|
+
def safe_format(template, mapping):
|
|
201
|
+
"""Format a template string using a mapping, leaving placeholders unchanged if keys are missing."""
|
|
202
|
+
|
|
203
|
+
def replacer(match):
|
|
204
|
+
key = match.group(1)
|
|
205
|
+
return str(mapping.get(key, f'{{{key}}}'))
|
|
206
|
+
return re.sub(r'\{(\w+)\}', replacer, template)
|
|
187
207
|
|
|
188
208
|
def create_pydantic_model(model_name: str, variables: dict[str, dict]):
|
|
189
209
|
fields = {}
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Models for MCP (Model Context Protocol) configuration.
|
|
3
|
+
Following MCP specification for remote HTTP servers only.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from typing import Optional, List, Dict, Any
|
|
7
|
+
from pydantic import BaseModel, Field, validator
|
|
8
|
+
from urllib.parse import urlparse
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class McpConnectionConfig(BaseModel):
|
|
12
|
+
"""
|
|
13
|
+
MCP connection configuration for remote HTTP servers.
|
|
14
|
+
Based on https://modelcontextprotocol.io/specification/2025-06-18
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
url: str = Field(description="MCP server HTTP URL (http:// or https://)")
|
|
18
|
+
headers: Optional[Dict[str, str]] = Field(
|
|
19
|
+
default=None,
|
|
20
|
+
description="HTTP headers for the connection (JSON object)"
|
|
21
|
+
)
|
|
22
|
+
|
|
23
|
+
@validator('url')
|
|
24
|
+
def validate_url(cls, v):
|
|
25
|
+
"""Validate URL is HTTP/HTTPS."""
|
|
26
|
+
if not v:
|
|
27
|
+
raise ValueError("URL cannot be empty")
|
|
28
|
+
|
|
29
|
+
parsed = urlparse(v)
|
|
30
|
+
if parsed.scheme not in ['http', 'https']:
|
|
31
|
+
raise ValueError("URL must use http:// or https:// scheme for remote MCP servers")
|
|
32
|
+
|
|
33
|
+
if not parsed.netloc:
|
|
34
|
+
raise ValueError("URL must include host and port")
|
|
35
|
+
|
|
36
|
+
return v
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
class McpToolkitConfig(BaseModel):
|
|
40
|
+
"""Configuration for a single remote MCP server toolkit."""
|
|
41
|
+
|
|
42
|
+
server_name: str = Field(description="MCP server name/identifier")
|
|
43
|
+
connection: McpConnectionConfig = Field(description="MCP connection configuration")
|
|
44
|
+
timeout: int = Field(default=60, description="Request timeout in seconds", ge=1, le=3600)
|
|
45
|
+
selected_tools: List[str] = Field(default_factory=list, description="Specific tools to enable (empty = all)")
|
|
46
|
+
enable_caching: bool = Field(default=True, description="Enable tool schema caching")
|
|
47
|
+
cache_ttl: int = Field(default=300, description="Cache TTL in seconds", ge=60, le=3600)
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
class McpToolMetadata(BaseModel):
|
|
51
|
+
"""Metadata about an MCP tool."""
|
|
52
|
+
|
|
53
|
+
name: str = Field(description="Tool name")
|
|
54
|
+
description: str = Field(description="Tool description")
|
|
55
|
+
server: str = Field(description="Source server name")
|
|
56
|
+
input_schema: Dict[str, Any] = Field(description="Tool input schema")
|
|
57
|
+
enabled: bool = Field(default=True, description="Whether tool is enabled")
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Runtime toolkits module for Alita SDK.
|
|
3
|
+
This module provides various toolkit implementations for LangGraph agents.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from .application import ApplicationToolkit
|
|
7
|
+
from .artifact import ArtifactToolkit
|
|
8
|
+
from .datasource import DatasourcesToolkit
|
|
9
|
+
from .prompt import PromptToolkit
|
|
10
|
+
from .subgraph import SubgraphToolkit
|
|
11
|
+
from .vectorstore import VectorStoreToolkit
|
|
12
|
+
from .mcp import McpToolkit
|
|
13
|
+
from ...tools.memory import MemoryToolkit
|
|
14
|
+
|
|
15
|
+
__all__ = [
|
|
16
|
+
"ApplicationToolkit",
|
|
17
|
+
"ArtifactToolkit",
|
|
18
|
+
"DatasourcesToolkit",
|
|
19
|
+
"PromptToolkit",
|
|
20
|
+
"SubgraphToolkit",
|
|
21
|
+
"VectorStoreToolkit",
|
|
22
|
+
"McpToolkit",
|
|
23
|
+
"MemoryToolkit"
|
|
24
|
+
]
|
|
@@ -39,7 +39,14 @@ class ApplicationToolkit(BaseToolkit):
|
|
|
39
39
|
description=app_details.get("description"),
|
|
40
40
|
application=app,
|
|
41
41
|
args_schema=applicationToolSchema,
|
|
42
|
-
return_type='str'
|
|
42
|
+
return_type='str',
|
|
43
|
+
client=client,
|
|
44
|
+
args_runnable={
|
|
45
|
+
"application_id": application_id,
|
|
46
|
+
"application_version_id": application_version_id,
|
|
47
|
+
"store": store,
|
|
48
|
+
"llm": client.get_llm(version_details['llm_settings']['model_name'], model_settings),
|
|
49
|
+
})])
|
|
43
50
|
|
|
44
51
|
def get_tools(self):
|
|
45
52
|
return self.tools
|