alita-sdk 0.3.205__py3-none-any.whl → 0.3.207__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- alita_sdk/runtime/clients/client.py +314 -11
- alita_sdk/runtime/langchain/assistant.py +22 -21
- alita_sdk/runtime/langchain/interfaces/llm_processor.py +1 -4
- alita_sdk/runtime/langchain/langraph_agent.py +6 -1
- alita_sdk/runtime/langchain/store_manager.py +4 -4
- alita_sdk/runtime/toolkits/application.py +5 -10
- alita_sdk/runtime/toolkits/tools.py +11 -21
- alita_sdk/runtime/tools/vectorstore.py +25 -11
- alita_sdk/runtime/utils/streamlit.py +505 -222
- alita_sdk/runtime/utils/toolkit_runtime.py +147 -0
- alita_sdk/runtime/utils/toolkit_utils.py +157 -0
- alita_sdk/runtime/utils/utils.py +5 -0
- alita_sdk/tools/__init__.py +2 -0
- alita_sdk/tools/ado/repos/repos_wrapper.py +20 -13
- alita_sdk/tools/bitbucket/api_wrapper.py +5 -5
- alita_sdk/tools/bitbucket/cloud_api_wrapper.py +54 -29
- alita_sdk/tools/elitea_base.py +9 -4
- alita_sdk/tools/gitlab/__init__.py +22 -10
- alita_sdk/tools/gitlab/api_wrapper.py +278 -253
- alita_sdk/tools/gitlab/tools.py +354 -376
- alita_sdk/tools/llm/llm_utils.py +0 -6
- alita_sdk/tools/memory/__init__.py +54 -10
- alita_sdk/tools/openapi/__init__.py +14 -3
- alita_sdk/tools/sharepoint/__init__.py +2 -1
- alita_sdk/tools/sharepoint/api_wrapper.py +11 -3
- alita_sdk/tools/testrail/api_wrapper.py +39 -16
- alita_sdk/tools/utils/content_parser.py +77 -13
- {alita_sdk-0.3.205.dist-info → alita_sdk-0.3.207.dist-info}/METADATA +1 -1
- {alita_sdk-0.3.205.dist-info → alita_sdk-0.3.207.dist-info}/RECORD +32 -40
- alita_sdk/community/analysis/__init__.py +0 -0
- alita_sdk/community/analysis/ado_analyse/__init__.py +0 -103
- alita_sdk/community/analysis/ado_analyse/api_wrapper.py +0 -261
- alita_sdk/community/analysis/github_analyse/__init__.py +0 -98
- alita_sdk/community/analysis/github_analyse/api_wrapper.py +0 -166
- alita_sdk/community/analysis/gitlab_analyse/__init__.py +0 -110
- alita_sdk/community/analysis/gitlab_analyse/api_wrapper.py +0 -172
- alita_sdk/community/analysis/jira_analyse/__init__.py +0 -141
- alita_sdk/community/analysis/jira_analyse/api_wrapper.py +0 -252
- alita_sdk/runtime/llms/alita.py +0 -259
- {alita_sdk-0.3.205.dist-info → alita_sdk-0.3.207.dist-info}/WHEEL +0 -0
- {alita_sdk-0.3.205.dist-info → alita_sdk-0.3.207.dist-info}/licenses/LICENSE +0 -0
- {alita_sdk-0.3.205.dist-info → alita_sdk-0.3.207.dist-info}/top_level.txt +0 -0
@@ -10,6 +10,7 @@ from langchain_core.messages import (
|
|
10
10
|
)
|
11
11
|
from langchain_core.tools import ToolException
|
12
12
|
from langgraph.store.base import BaseStore
|
13
|
+
from langchain_openai import OpenAIEmbeddings, ChatOpenAI
|
13
14
|
|
14
15
|
from ..langchain.assistant import Assistant as LangChainAssistant
|
15
16
|
# from ..llamaindex.assistant import Assistant as LLamaAssistant
|
@@ -37,6 +38,7 @@ class AlitaClient:
|
|
37
38
|
|
38
39
|
self.base_url = base_url.rstrip('/')
|
39
40
|
self.api_path = '/api/v1'
|
41
|
+
self.llm_path = '/llm/v1'
|
40
42
|
self.project_id = project_id
|
41
43
|
self.auth_token = auth_token
|
42
44
|
self.headers = {
|
@@ -152,6 +154,35 @@ class AlitaClient:
|
|
152
154
|
return resp.json()
|
153
155
|
return []
|
154
156
|
|
157
|
+
def get_llm(self, model_name: str, model_config: dict) -> ChatOpenAI:
|
158
|
+
"""
|
159
|
+
Get a ChatOpenAI model instance based on the model name and configuration.
|
160
|
+
|
161
|
+
Args:
|
162
|
+
model_name: Name of the model to retrieve
|
163
|
+
model_config: Configuration parameters for the model
|
164
|
+
|
165
|
+
Returns:
|
166
|
+
An instance of ChatOpenAI configured with the provided parameters.
|
167
|
+
"""
|
168
|
+
if not model_name:
|
169
|
+
raise ValueError("Model name must be provided")
|
170
|
+
|
171
|
+
logger.info(f"Creating ChatOpenAI model: {model_name} with config: {model_config}")
|
172
|
+
|
173
|
+
return ChatOpenAI(
|
174
|
+
base_url=f"{self.base_url}{self.llm_path}",
|
175
|
+
model=model_name,
|
176
|
+
api_key=self.auth_token,
|
177
|
+
stream_usage=model_config.get("stream_usage", True),
|
178
|
+
max_tokens=model_config.get("max_tokens", None),
|
179
|
+
top_p=model_config.get("top_p"),
|
180
|
+
temperature=model_config.get("temperature"),
|
181
|
+
max_retries=model_config.get("max_retries", 3),
|
182
|
+
seed=model_config.get("seed", None),
|
183
|
+
)
|
184
|
+
|
185
|
+
|
155
186
|
def get_app_version_details(self, application_id: int, application_version_id: int) -> dict:
|
156
187
|
url = f"{self.application_versions}/{application_id}/{application_version_id}"
|
157
188
|
if self.configurations:
|
@@ -177,11 +208,12 @@ class AlitaClient:
|
|
177
208
|
logger.info(f"Unsecret response: {data}")
|
178
209
|
return data.get('value', None)
|
179
210
|
|
180
|
-
def application(self,
|
211
|
+
def application(self, application_id: int, application_version_id: int,
|
181
212
|
tools: Optional[list] = None, chat_history: Optional[List[Any]] = None,
|
182
213
|
app_type=None, memory=None, runtime='langchain',
|
183
214
|
application_variables: Optional[dict] = None,
|
184
|
-
version_details: Optional[dict] = None, store: Optional[BaseStore] = None
|
215
|
+
version_details: Optional[dict] = None, store: Optional[BaseStore] = None,
|
216
|
+
llm: Optional[ChatOpenAI] = None):
|
185
217
|
if tools is None:
|
186
218
|
tools = []
|
187
219
|
if chat_history is None:
|
@@ -200,7 +232,15 @@ class AlitaClient:
|
|
200
232
|
for var in data.get('variables', {}):
|
201
233
|
if var['name'] in application_variables:
|
202
234
|
var.update(application_variables[var['name']])
|
203
|
-
|
235
|
+
if llm is None:
|
236
|
+
llm = self.get_llm(
|
237
|
+
model_name=data['llm_settings']['model_name'],
|
238
|
+
model_config = {
|
239
|
+
"max_tokens": data['llm_settings']['max_tokens'],
|
240
|
+
"top_p": data['llm_settings']['top_p'],
|
241
|
+
"temperature": data['llm_settings']['temperature']
|
242
|
+
}
|
243
|
+
)
|
204
244
|
if not app_type:
|
205
245
|
app_type = data.get("agent_type", "react")
|
206
246
|
if app_type == "alita":
|
@@ -212,10 +252,10 @@ class AlitaClient:
|
|
212
252
|
elif app_type == 'autogen':
|
213
253
|
app_type = "openai"
|
214
254
|
if runtime == 'nonrunnable':
|
215
|
-
return LangChainAssistant(self, data,
|
255
|
+
return LangChainAssistant(self, data, llm, chat_history, app_type,
|
216
256
|
tools=tools, memory=memory, store=store)
|
217
257
|
if runtime == 'langchain':
|
218
|
-
return LangChainAssistant(self, data,
|
258
|
+
return LangChainAssistant(self, data, llm,
|
219
259
|
chat_history, app_type,
|
220
260
|
tools=tools, memory=memory, store=store).runnable()
|
221
261
|
elif runtime == 'llama':
|
@@ -433,15 +473,15 @@ class AlitaClient:
|
|
433
473
|
logger.warning(f"Error: Could not determine user ID for MCP tool: {e}")
|
434
474
|
return None
|
435
475
|
|
436
|
-
def predict_agent(self,
|
476
|
+
def predict_agent(self, llm: ChatOpenAI, instructions: str = "You are a helpful assistant.",
|
437
477
|
tools: Optional[list] = None, chat_history: Optional[List[Any]] = None,
|
438
478
|
memory=None, runtime='langchain', variables: Optional[list] = None,
|
439
479
|
store: Optional[BaseStore] = None):
|
440
480
|
"""
|
441
481
|
Create a predict-type agent with minimal configuration.
|
442
|
-
|
482
|
+
|
443
483
|
Args:
|
444
|
-
|
484
|
+
llm: The LLM to use
|
445
485
|
instructions: System instructions for the agent
|
446
486
|
tools: Optional list of tools to provide to the agent
|
447
487
|
chat_history: Optional chat history
|
@@ -449,7 +489,7 @@ class AlitaClient:
|
|
449
489
|
runtime: Runtime type (default: 'langchain')
|
450
490
|
variables: Optional list of variables for the agent
|
451
491
|
store: Optional store for memory
|
452
|
-
|
492
|
+
|
453
493
|
Returns:
|
454
494
|
Runnable agent ready for execution
|
455
495
|
"""
|
@@ -459,7 +499,7 @@ class AlitaClient:
|
|
459
499
|
chat_history = []
|
460
500
|
if variables is None:
|
461
501
|
variables = []
|
462
|
-
|
502
|
+
|
463
503
|
# Create a minimal data structure for predict agent
|
464
504
|
# All LLM settings are taken from the passed client instance
|
465
505
|
agent_data = {
|
@@ -467,6 +507,269 @@ class AlitaClient:
|
|
467
507
|
'tools': tools, # Tools are handled separately in predict agents
|
468
508
|
'variables': variables
|
469
509
|
}
|
470
|
-
return LangChainAssistant(self, agent_data,
|
510
|
+
return LangChainAssistant(self, agent_data, llm,
|
471
511
|
chat_history, "predict", memory=memory, store=store).runnable()
|
512
|
+
|
513
|
+
def test_toolkit_tool(self, toolkit_config: dict, tool_name: str, tool_params: dict = None,
|
514
|
+
runtime_config: dict = None, llm_model: str = None,
|
515
|
+
llm_config: dict = None) -> dict:
|
516
|
+
"""
|
517
|
+
Test a single tool from a toolkit with given parameters and runtime callbacks.
|
518
|
+
|
519
|
+
This method initializes a toolkit, calls a specific tool, and supports runtime
|
520
|
+
callbacks for event dispatching, enabling tools to send custom events back to
|
521
|
+
the platform during execution.
|
522
|
+
|
523
|
+
Args:
|
524
|
+
toolkit_config: Configuration dictionary for the toolkit containing:
|
525
|
+
- toolkit_name: Name of the toolkit (e.g., 'github', 'jira')
|
526
|
+
- settings: Dictionary containing toolkit-specific settings
|
527
|
+
tool_name: Name of the specific tool to call
|
528
|
+
tool_params: Parameters to pass to the tool (default: empty dict)
|
529
|
+
runtime_config: Runtime configuration with callbacks for events, containing:
|
530
|
+
- callbacks: List of callback handlers for event processing
|
531
|
+
- configurable: Additional configuration parameters
|
532
|
+
- tags: Tags for the execution
|
533
|
+
llm_model: Name of the LLM model to use (default: 'gpt-4o-mini')
|
534
|
+
llm_config: Configuration for the LLM containing:
|
535
|
+
- max_tokens: Maximum tokens for response (default: 1000)
|
536
|
+
- temperature: Temperature for response generation (default: 0.1)
|
537
|
+
- top_p: Top-p value for response generation (default: 1.0)
|
538
|
+
|
539
|
+
Returns:
|
540
|
+
Dictionary containing:
|
541
|
+
- success: Boolean indicating if the operation was successful
|
542
|
+
- result: The actual result from the tool (if successful)
|
543
|
+
- error: Error message (if unsuccessful)
|
544
|
+
- tool_name: Name of the executed tool
|
545
|
+
- toolkit_config: Original toolkit configuration
|
546
|
+
- events_dispatched: List of custom events dispatched during execution
|
547
|
+
- llm_model: LLM model used for the test
|
548
|
+
- execution_time_seconds: Time taken to execute the tool in seconds
|
549
|
+
|
550
|
+
Example:
|
551
|
+
>>> from langchain_core.callbacks import BaseCallbackHandler
|
552
|
+
>>>
|
553
|
+
>>> class TestCallback(BaseCallbackHandler):
|
554
|
+
... def __init__(self):
|
555
|
+
... self.events = []
|
556
|
+
... def on_custom_event(self, name, data, **kwargs):
|
557
|
+
... self.events.append({'name': name, 'data': data})
|
558
|
+
>>>
|
559
|
+
>>> callback = TestCallback()
|
560
|
+
>>> runtime_config = {'callbacks': [callback]}
|
561
|
+
>>>
|
562
|
+
>>> config = {
|
563
|
+
... 'toolkit_name': 'github',
|
564
|
+
... 'settings': {'github_token': 'your_token'}
|
565
|
+
... }
|
566
|
+
>>> result = client.test_toolkit_tool(
|
567
|
+
... config, 'get_repository_info',
|
568
|
+
... {'repo_name': 'alita'}, runtime_config,
|
569
|
+
... llm_model='gpt-4o-mini',
|
570
|
+
... llm_config={'temperature': 0.1}
|
571
|
+
... )
|
572
|
+
"""
|
573
|
+
if tool_params is None:
|
574
|
+
tool_params = {}
|
575
|
+
if llm_model is None:
|
576
|
+
llm_model = 'gpt-4o-mini'
|
577
|
+
if llm_config is None:
|
578
|
+
llm_config = {
|
579
|
+
'max_tokens': 1024,
|
580
|
+
'temperature': 0.1,
|
581
|
+
'top_p': 1.0
|
582
|
+
}
|
583
|
+
|
584
|
+
try:
|
585
|
+
from ..utils.toolkit_utils import instantiate_toolkit_with_client
|
586
|
+
from langchain_core.runnables import RunnableConfig
|
587
|
+
import logging
|
588
|
+
import time
|
589
|
+
|
590
|
+
logger = logging.getLogger(__name__)
|
591
|
+
logger.info(f"Testing tool '{tool_name}' from toolkit '{toolkit_config.get('toolkit_name')}' with LLM '{llm_model}'")
|
592
|
+
|
593
|
+
# Create RunnableConfig for callback support
|
594
|
+
config = None
|
595
|
+
callbacks = []
|
596
|
+
events_dispatched = []
|
597
|
+
|
598
|
+
if runtime_config:
|
599
|
+
callbacks = runtime_config.get('callbacks', [])
|
600
|
+
if callbacks:
|
601
|
+
config = RunnableConfig(
|
602
|
+
callbacks=callbacks,
|
603
|
+
configurable=runtime_config.get('configurable', {}),
|
604
|
+
tags=runtime_config.get('tags', [])
|
605
|
+
)
|
606
|
+
|
607
|
+
# Create LLM instance using the client's get_llm method
|
608
|
+
try:
|
609
|
+
llm = self.get_llm(llm_model, llm_config)
|
610
|
+
logger.info(f"Created LLM instance: {llm_model} with config: {llm_config}")
|
611
|
+
except Exception as llm_error:
|
612
|
+
logger.error(f"Failed to create LLM instance: {str(llm_error)}")
|
613
|
+
return {
|
614
|
+
"success": False,
|
615
|
+
"error": f"Failed to create LLM instance '{llm_model}': {str(llm_error)}",
|
616
|
+
"tool_name": tool_name,
|
617
|
+
"toolkit_config": toolkit_config,
|
618
|
+
"llm_model": llm_model,
|
619
|
+
"events_dispatched": events_dispatched,
|
620
|
+
"execution_time_seconds": 0.0
|
621
|
+
}
|
622
|
+
|
623
|
+
# Instantiate the toolkit with client and LLM support
|
624
|
+
tools = instantiate_toolkit_with_client(toolkit_config, llm, self)
|
625
|
+
|
626
|
+
if not tools:
|
627
|
+
return {
|
628
|
+
"success": False,
|
629
|
+
"error": f"Failed to instantiate toolkit '{toolkit_config.get('toolkit_name')}' or no tools found",
|
630
|
+
"tool_name": tool_name,
|
631
|
+
"toolkit_config": toolkit_config,
|
632
|
+
"llm_model": llm_model,
|
633
|
+
"events_dispatched": events_dispatched,
|
634
|
+
"execution_time_seconds": 0.0
|
635
|
+
}
|
636
|
+
|
637
|
+
# Find the specific tool
|
638
|
+
target_tool = None
|
639
|
+
for tool in tools:
|
640
|
+
if hasattr(tool, 'name') and tool.name == tool_name:
|
641
|
+
target_tool = tool
|
642
|
+
break
|
643
|
+
elif hasattr(tool, 'func') and hasattr(tool.func, '__name__') and tool.func.__name__ == tool_name:
|
644
|
+
target_tool = tool
|
645
|
+
break
|
646
|
+
|
647
|
+
if target_tool is None:
|
648
|
+
available_tools = []
|
649
|
+
for tool in tools:
|
650
|
+
if hasattr(tool, 'name'):
|
651
|
+
available_tools.append(tool.name)
|
652
|
+
elif hasattr(tool, 'func') and hasattr(tool.func, '__name__'):
|
653
|
+
available_tools.append(tool.func.__name__)
|
654
|
+
|
655
|
+
return {
|
656
|
+
"success": False,
|
657
|
+
"error": f"Tool '{tool_name}' not found. Available tools: {available_tools}",
|
658
|
+
"tool_name": tool_name,
|
659
|
+
"toolkit_config": toolkit_config,
|
660
|
+
"llm_model": llm_model,
|
661
|
+
"events_dispatched": events_dispatched,
|
662
|
+
"execution_time_seconds": 0.0
|
663
|
+
}
|
664
|
+
|
665
|
+
# Execute the tool with callback support
|
666
|
+
try:
|
667
|
+
logger.info(f"Executing tool '{tool_name}' with parameters: {tool_params}")
|
668
|
+
|
669
|
+
# Start timing the tool execution
|
670
|
+
start_time = time.time()
|
671
|
+
|
672
|
+
# Different tools might have different invocation patterns
|
673
|
+
if hasattr(target_tool, 'invoke'):
|
674
|
+
# Use config for tools that support RunnableConfig
|
675
|
+
if config is not None:
|
676
|
+
result = target_tool.invoke(tool_params, config=config)
|
677
|
+
else:
|
678
|
+
result = target_tool.invoke(tool_params)
|
679
|
+
elif hasattr(target_tool, 'run'):
|
680
|
+
result = target_tool.run(tool_params)
|
681
|
+
elif callable(target_tool):
|
682
|
+
result = target_tool(**tool_params)
|
683
|
+
else:
|
684
|
+
execution_time = time.time() - start_time
|
685
|
+
return {
|
686
|
+
"success": False,
|
687
|
+
"error": f"Tool '{tool_name}' is not callable",
|
688
|
+
"tool_name": tool_name,
|
689
|
+
"toolkit_config": toolkit_config,
|
690
|
+
"llm_model": llm_model,
|
691
|
+
"events_dispatched": events_dispatched,
|
692
|
+
"execution_time_seconds": execution_time
|
693
|
+
}
|
694
|
+
|
695
|
+
# Calculate execution time
|
696
|
+
execution_time = time.time() - start_time
|
697
|
+
|
698
|
+
# Extract events from callbacks if they support it
|
699
|
+
for callback in callbacks:
|
700
|
+
if hasattr(callback, 'events'):
|
701
|
+
events_dispatched.extend(callback.events)
|
702
|
+
elif hasattr(callback, 'get_events'):
|
703
|
+
events_dispatched.extend(callback.get_events())
|
704
|
+
elif hasattr(callback, 'dispatched_events'):
|
705
|
+
events_dispatched.extend(callback.dispatched_events)
|
706
|
+
|
707
|
+
logger.info(f"Tool '{tool_name}' executed successfully in {execution_time:.3f} seconds")
|
708
|
+
|
709
|
+
return {
|
710
|
+
"success": True,
|
711
|
+
"result": result,
|
712
|
+
"tool_name": tool_name,
|
713
|
+
"toolkit_config": toolkit_config,
|
714
|
+
"llm_model": llm_model,
|
715
|
+
"events_dispatched": events_dispatched,
|
716
|
+
"execution_time_seconds": execution_time
|
717
|
+
}
|
718
|
+
|
719
|
+
except Exception as tool_error:
|
720
|
+
# Calculate execution time even for failed executions
|
721
|
+
execution_time = time.time() - start_time
|
722
|
+
logger.error(f"Error executing tool '{tool_name}' after {execution_time:.3f} seconds: {str(tool_error)}")
|
723
|
+
|
724
|
+
# Still collect events even if tool execution failed
|
725
|
+
for callback in callbacks:
|
726
|
+
if hasattr(callback, 'events'):
|
727
|
+
events_dispatched.extend(callback.events)
|
728
|
+
elif hasattr(callback, 'get_events'):
|
729
|
+
events_dispatched.extend(callback.get_events())
|
730
|
+
elif hasattr(callback, 'dispatched_events'):
|
731
|
+
events_dispatched.extend(callback.dispatched_events)
|
732
|
+
|
733
|
+
return {
|
734
|
+
"success": False,
|
735
|
+
"error": f"Tool execution failed: {str(tool_error)}",
|
736
|
+
"tool_name": tool_name,
|
737
|
+
"toolkit_config": toolkit_config,
|
738
|
+
"llm_model": llm_model,
|
739
|
+
"events_dispatched": events_dispatched,
|
740
|
+
"execution_time_seconds": execution_time
|
741
|
+
}
|
742
|
+
|
743
|
+
except Exception as e:
|
744
|
+
logger = logging.getLogger(__name__)
|
745
|
+
logger.error(f"Error in test_toolkit_tool: {str(e)}")
|
746
|
+
return {
|
747
|
+
"success": False,
|
748
|
+
"error": f"Method execution failed: {str(e)}",
|
749
|
+
"tool_name": tool_name,
|
750
|
+
"toolkit_config": toolkit_config,
|
751
|
+
"llm_model": llm_model if 'llm_model' in locals() else None,
|
752
|
+
"events_dispatched": [],
|
753
|
+
"execution_time_seconds": 0.0
|
754
|
+
}
|
755
|
+
|
756
|
+
def _get_real_user_id(self) -> str:
|
757
|
+
"""Extract the real user ID from the auth token for MCP tool calls."""
|
758
|
+
try:
|
759
|
+
import base64
|
760
|
+
import json
|
761
|
+
# Assuming JWT token, extract user ID from payload
|
762
|
+
# This is a basic implementation - adjust based on your token format
|
763
|
+
token_parts = self.auth_token.split('.')
|
764
|
+
if len(token_parts) >= 2:
|
765
|
+
payload_part = token_parts[1]
|
766
|
+
# Add padding if needed
|
767
|
+
padding = len(payload_part) % 4
|
768
|
+
if padding:
|
769
|
+
payload_part += '=' * (4 - padding)
|
770
|
+
payload = json.loads(base64.b64decode(payload_part))
|
771
|
+
return payload.get('user_id') or payload.get('sub') or payload.get('uid')
|
772
|
+
except Exception as e:
|
773
|
+
logger.error(f"Error extracting user ID from token: {e}")
|
774
|
+
return None
|
472
775
|
|
@@ -36,29 +36,30 @@ class Assistant:
|
|
36
36
|
|
37
37
|
logger.debug("Data for agent creation: %s", data)
|
38
38
|
logger.info("App type: %s", app_type)
|
39
|
-
|
39
|
+
|
40
|
+
self.client = client
|
40
41
|
# For predict agents, use the client as-is since it's already configured
|
41
|
-
if app_type == "predict":
|
42
|
-
|
43
|
-
else:
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
42
|
+
# if app_type == "predict":
|
43
|
+
# self.client = client
|
44
|
+
# else:
|
45
|
+
# # For other agent types, configure client from llm_settings
|
46
|
+
# self.client = copy(client)
|
47
|
+
# self.client.max_tokens = data['llm_settings']['max_tokens']
|
48
|
+
# self.client.temperature = data['llm_settings']['temperature']
|
49
|
+
# self.client.top_p = data['llm_settings']['top_p']
|
50
|
+
# self.client.top_k = data['llm_settings']['top_k']
|
51
|
+
# self.client.model_name = data['llm_settings']['model_name']
|
52
|
+
# self.client.integration_uid = data['llm_settings']['integration_uid']
|
52
53
|
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
54
|
+
# model_type = data["llm_settings"]["indexer_config"]["ai_model"]
|
55
|
+
# model_params = data["llm_settings"]["indexer_config"]["ai_model_params"]
|
56
|
+
# #
|
57
|
+
# target_pkg, target_name = model_type.rsplit(".", 1)
|
58
|
+
# target_cls = getattr(
|
59
|
+
# importlib.import_module(target_pkg),
|
60
|
+
# target_name
|
61
|
+
# )
|
62
|
+
# self.client = target_cls(**model_params)
|
62
63
|
# validate agents compatibility: non-pipeline agents cannot have pipelines as toolkits
|
63
64
|
if app_type not in ["pipeline", "predict"]:
|
64
65
|
tools_to_check = data.get('tools', [])
|
@@ -50,9 +50,6 @@ def get_model(model_type: str, model_params: dict):
|
|
50
50
|
return get_llm(model_type)(**model_params)
|
51
51
|
if model_type == "PreloadedChatModel":
|
52
52
|
return PreloadedChatModel(**model_params)
|
53
|
-
if model_type == "Alita":
|
54
|
-
from ...llms.alita import AlitaClient
|
55
|
-
return AlitaClient(**model_params)
|
56
53
|
if model_type in chat_models:
|
57
54
|
model = getattr(
|
58
55
|
__import__("langchain_community.chat_models", fromlist=[model_type]),
|
@@ -185,7 +182,7 @@ def add_documents(vectorstore, documents):
|
|
185
182
|
texts.append(document.page_content)
|
186
183
|
for key in document.metadata:
|
187
184
|
if isinstance(document.metadata[key], list):
|
188
|
-
document.metadata[key] = "; ".join(document.metadata[key])
|
185
|
+
document.metadata[key] = "; ".join([str(val) for val in document.metadata[key]])
|
189
186
|
if isinstance(document.metadata[key], dict):
|
190
187
|
document.metadata[key] = dumps(document.metadata[key])
|
191
188
|
metadata.append(document.metadata)
|
@@ -500,7 +500,12 @@ def create_graph(
|
|
500
500
|
}
|
501
501
|
|
502
502
|
# Check if tools should be bound to this LLM node
|
503
|
-
|
503
|
+
connected_tools = node.get('tool_names', {})
|
504
|
+
tool_names = []
|
505
|
+
if isinstance(connected_tools, dict):
|
506
|
+
for toolkit, selected_tools in connected_tools.items():
|
507
|
+
for tool in selected_tools:
|
508
|
+
tool_names.append(f"{toolkit}___{tool}")
|
504
509
|
|
505
510
|
# Filter tools if specific tool names are provided
|
506
511
|
available_tools = []
|
@@ -3,9 +3,6 @@ import atexit
|
|
3
3
|
import logging
|
4
4
|
from urllib.parse import urlparse, unquote
|
5
5
|
|
6
|
-
from psycopg import Connection
|
7
|
-
from langgraph.store.postgres import PostgresStore
|
8
|
-
|
9
6
|
logger = logging.getLogger(__name__)
|
10
7
|
|
11
8
|
class StoreManager:
|
@@ -37,7 +34,10 @@ class StoreManager:
|
|
37
34
|
"dbname": parsed.path.lstrip("/") if parsed.path else None
|
38
35
|
}
|
39
36
|
|
40
|
-
def get_store(self, conn_str: str)
|
37
|
+
def get_store(self, conn_str: str):
|
38
|
+
from psycopg import Connection
|
39
|
+
from langgraph.store.postgres import PostgresStore
|
40
|
+
|
41
41
|
store = self._stores.get(conn_str)
|
42
42
|
if store is None:
|
43
43
|
logger.info(f"Creating new PostgresStore for connection: {conn_str}")
|
@@ -21,25 +21,20 @@ class ApplicationToolkit(BaseToolkit):
|
|
21
21
|
)
|
22
22
|
|
23
23
|
@classmethod
|
24
|
-
def get_toolkit(cls, client:
|
24
|
+
def get_toolkit(cls, client: 'AlitaClient', application_id: int, application_version_id: int,
|
25
25
|
selected_tools: list[str] = [], store: Optional[BaseStore] = None):
|
26
|
-
from ..llms.alita import AlitaChatModel
|
27
26
|
|
28
27
|
app_details = client.get_app_details(application_id)
|
29
28
|
version_details = client.get_app_version_details(application_id, application_version_id)
|
30
|
-
|
31
|
-
"deployment": client.base_url,
|
32
|
-
"model": version_details['llm_settings']['model_name'],
|
33
|
-
"api_key": app_api_key,
|
34
|
-
"project_id": client.project_id,
|
35
|
-
"integration_uid": version_details['llm_settings']['integration_uid'],
|
29
|
+
model_settings = {
|
36
30
|
"max_tokens": version_details['llm_settings']['max_tokens'],
|
37
31
|
"top_p": version_details['llm_settings']['top_p'],
|
38
|
-
"top_k": version_details['llm_settings']['top_k'],
|
39
32
|
"temperature": version_details['llm_settings']['temperature'],
|
40
33
|
}
|
41
34
|
|
42
|
-
app = client.application(
|
35
|
+
app = client.application(application_id, application_version_id, store=store,
|
36
|
+
llm=client.get_llm(version_details['llm_settings']['model_name'],
|
37
|
+
model_settings))
|
43
38
|
return cls(tools=[Application(name=app_details.get("name"),
|
44
39
|
description=app_details.get("description"),
|
45
40
|
application=app,
|
@@ -14,18 +14,15 @@ from .vectorstore import VectorStoreToolkit
|
|
14
14
|
from ..tools.mcp_server_tool import McpServerTool
|
15
15
|
# Import community tools
|
16
16
|
from ...community import get_toolkits as community_toolkits, get_tools as community_tools
|
17
|
-
|
17
|
+
from ...tools.memory import MemoryToolkit
|
18
18
|
|
19
19
|
logger = logging.getLogger(__name__)
|
20
20
|
|
21
21
|
|
22
22
|
def get_toolkits():
|
23
23
|
core_toolkits = [
|
24
|
-
# PromptToolkit.toolkit_config_schema(),
|
25
|
-
# DatasourcesToolkit.toolkit_config_schema(),
|
26
|
-
# ApplicationToolkit.toolkit_config_schema(),
|
27
24
|
ArtifactToolkit.toolkit_config_schema(),
|
28
|
-
|
25
|
+
MemoryToolkit.toolkit_config_schema(),
|
29
26
|
VectorStoreToolkit.toolkit_config_schema()
|
30
27
|
]
|
31
28
|
|
@@ -37,12 +34,7 @@ def get_tools(tools_list: list, alita_client, llm, memory_store: BaseStore = Non
|
|
37
34
|
tools = []
|
38
35
|
|
39
36
|
for tool in tools_list:
|
40
|
-
if tool['type'] == '
|
41
|
-
prompts.append([
|
42
|
-
int(tool['settings']['prompt_id']),
|
43
|
-
int(tool['settings']['prompt_version_id'])
|
44
|
-
])
|
45
|
-
elif tool['type'] == 'datasource':
|
37
|
+
if tool['type'] == 'datasource':
|
46
38
|
tools.extend(DatasourcesToolkit.get_toolkit(
|
47
39
|
alita_client,
|
48
40
|
datasource_ids=[int(tool['settings']['datasource_id'])],
|
@@ -54,7 +46,6 @@ def get_tools(tools_list: list, alita_client, llm, memory_store: BaseStore = Non
|
|
54
46
|
alita_client,
|
55
47
|
application_id=int(tool['settings']['application_id']),
|
56
48
|
application_version_id=int(tool['settings']['application_version_id']),
|
57
|
-
app_api_key=alita_client.auth_token,
|
58
49
|
selected_tools=[]
|
59
50
|
).get_tools())
|
60
51
|
elif tool['type'] == 'application' and tool.get('agent_type', '') == 'pipeline':
|
@@ -67,15 +58,14 @@ def get_tools(tools_list: list, alita_client, llm, memory_store: BaseStore = Non
|
|
67
58
|
selected_tools=[],
|
68
59
|
llm=llm
|
69
60
|
))
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
# ).get_tools()
|
61
|
+
elif tool['type'] == 'memory':
|
62
|
+
if memory_store is None:
|
63
|
+
raise ToolException(f"Memory store is not provided for memory tool: {tool.get('name', tool.get('toolkit_name', 'unknown'))}")
|
64
|
+
tools += MemoryToolkit.get_toolkit(
|
65
|
+
namespace=tool['settings'].get('namespace', str(tool['id'])),
|
66
|
+
store=memory_store,
|
67
|
+
toolkit_name=tool.get('toolkit_name', '')
|
68
|
+
).get_tools()
|
79
69
|
elif tool['type'] == 'artifact':
|
80
70
|
tools.extend(ArtifactToolkit.get_toolkit(
|
81
71
|
client=alita_client,
|