alita-sdk 0.3.181__py3-none-any.whl → 0.3.183__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- alita_sdk/runtime/clients/client.py +2 -0
- alita_sdk/runtime/langchain/assistant.py +132 -59
- alita_sdk/runtime/langchain/langraph_agent.py +66 -9
- alita_sdk/runtime/llms/alita.py +12 -5
- alita_sdk/runtime/tools/llm.py +260 -41
- alita_sdk/runtime/tools/mcp_server_tool.py +15 -0
- alita_sdk/runtime/utils/streamlit.py +4 -8
- alita_sdk/tools/__init__.py +1 -0
- alita_sdk/tools/ado/test_plan/test_plan_wrapper.py +55 -3
- alita_sdk/tools/ado/work_item/ado_wrapper.py +10 -6
- alita_sdk/tools/carrier/api_wrapper.py +3 -0
- alita_sdk/tools/carrier/backend_tests_tool.py +101 -7
- alita_sdk/tools/carrier/carrier_sdk.py +4 -0
- alita_sdk/tools/slack/__init__.py +57 -0
- alita_sdk/tools/slack/api_wrapper.py +190 -0
- {alita_sdk-0.3.181.dist-info → alita_sdk-0.3.183.dist-info}/METADATA +3 -2
- {alita_sdk-0.3.181.dist-info → alita_sdk-0.3.183.dist-info}/RECORD +20 -19
- alita_sdk/runtime/langchain/agents/react_agent.py +0 -157
- {alita_sdk-0.3.181.dist-info → alita_sdk-0.3.183.dist-info}/WHEEL +0 -0
- {alita_sdk-0.3.181.dist-info → alita_sdk-0.3.183.dist-info}/licenses/LICENSE +0 -0
- {alita_sdk-0.3.181.dist-info → alita_sdk-0.3.183.dist-info}/top_level.txt +0 -0
@@ -1,157 +0,0 @@
|
|
1
|
-
import logging
|
2
|
-
from typing import Any, Optional, Callable
|
3
|
-
from langchain_core.messages import BaseMessage, AIMessage, HumanMessage
|
4
|
-
from langchain_core.tools import BaseTool
|
5
|
-
from langgraph.prebuilt import create_react_agent
|
6
|
-
from langgraph.checkpoint.memory import MemorySaver
|
7
|
-
|
8
|
-
logger = logging.getLogger(__name__)
|
9
|
-
|
10
|
-
def create_react_agent_with_auto_continue(
|
11
|
-
prompt: Any,
|
12
|
-
model: Any,
|
13
|
-
tools: list[BaseTool],
|
14
|
-
memory: Optional[Any] = None,
|
15
|
-
debug: bool = False
|
16
|
-
) -> Any:
|
17
|
-
"""
|
18
|
-
Create a LangGraph React agent with auto-continue capability for when responses get truncated.
|
19
|
-
This provides better handling of length-limited responses compared to traditional AgentExecutor.
|
20
|
-
Uses simple in-memory checkpointing for auto-continue functionality.
|
21
|
-
|
22
|
-
Args:
|
23
|
-
prompt: The prompt template to use for the agent
|
24
|
-
model: The language model to use
|
25
|
-
tools: List of tools available to the agent
|
26
|
-
memory: Optional memory store for checkpointing (will create MemorySaver if None)
|
27
|
-
debug: Whether to enable debug mode
|
28
|
-
|
29
|
-
Returns:
|
30
|
-
A configured React agent with auto-continue capability
|
31
|
-
|
32
|
-
Note: Requires LangGraph 0.5.x or higher that supports post_model_hook.
|
33
|
-
"""
|
34
|
-
# Use simple in-memory checkpointer for auto-continue functionality if not provided
|
35
|
-
if memory is None:
|
36
|
-
memory = MemorySaver()
|
37
|
-
|
38
|
-
# Set up parameters for the agent
|
39
|
-
kwargs = {
|
40
|
-
"prompt": prompt,
|
41
|
-
"model": model,
|
42
|
-
"tools": tools,
|
43
|
-
"checkpointer": memory,
|
44
|
-
"post_model_hook": _create_auto_continue_hook() # Auto-continue hook
|
45
|
-
}
|
46
|
-
|
47
|
-
# Create the base React agent with langgraph's prebuilt function
|
48
|
-
base_agent = create_react_agent(**kwargs)
|
49
|
-
|
50
|
-
return base_agent
|
51
|
-
|
52
|
-
def _create_auto_continue_hook() -> Callable:
|
53
|
-
"""
|
54
|
-
Create a post-model hook for LangGraph 0.5.x that detects truncated responses
|
55
|
-
and adds continuation prompts.
|
56
|
-
This checks if the last AI message was truncated and automatically continues if needed.
|
57
|
-
"""
|
58
|
-
MAX_CONTINUATIONS = 3 # Maximum number of auto-continuations allowed
|
59
|
-
|
60
|
-
def post_model_hook(state):
|
61
|
-
messages = state.get("messages", [])
|
62
|
-
|
63
|
-
# Count how many auto-continue messages we've already sent
|
64
|
-
continuation_count = sum(
|
65
|
-
1 for msg in messages
|
66
|
-
if isinstance(msg, HumanMessage) and
|
67
|
-
"continue your previous response" in msg.content.lower()
|
68
|
-
)
|
69
|
-
|
70
|
-
# Don't continue if we've reached the limit
|
71
|
-
if continuation_count >= MAX_CONTINUATIONS:
|
72
|
-
return state
|
73
|
-
|
74
|
-
# Check if the last message is from AI and was truncated
|
75
|
-
if messages and isinstance(messages[-1], AIMessage):
|
76
|
-
last_ai_message = messages[-1]
|
77
|
-
|
78
|
-
# Check for truncation indicators
|
79
|
-
is_truncated = (
|
80
|
-
hasattr(last_ai_message, 'response_metadata') and
|
81
|
-
last_ai_message.response_metadata.get('finish_reason') == 'length'
|
82
|
-
) or (
|
83
|
-
# Fallback: check if message seems to end abruptly
|
84
|
-
last_ai_message.content and
|
85
|
-
not last_ai_message.content.rstrip().endswith(('.', '!', '?', ':', ';'))
|
86
|
-
)
|
87
|
-
|
88
|
-
# Add continuation request if truncated
|
89
|
-
if is_truncated:
|
90
|
-
logger.info("Detected truncated response, adding continuation request")
|
91
|
-
new_messages = messages.copy()
|
92
|
-
new_messages.append(HumanMessage(content="Continue your previous response from where you left off"))
|
93
|
-
return {"messages": new_messages}
|
94
|
-
|
95
|
-
return state
|
96
|
-
|
97
|
-
return post_model_hook
|
98
|
-
|
99
|
-
def get_langgraph_agent_with_auto_continue(
|
100
|
-
prompt: Any,
|
101
|
-
model: Any,
|
102
|
-
tools: list[BaseTool],
|
103
|
-
memory: Optional[Any] = None,
|
104
|
-
debug: bool = False
|
105
|
-
) -> Any:
|
106
|
-
"""
|
107
|
-
Create a LangGraph agent with auto-continue capability for when responses get truncated.
|
108
|
-
This provides better handling of length-limited responses compared to traditional AgentExecutor.
|
109
|
-
Uses simple in-memory checkpointing for auto-continue functionality.
|
110
|
-
|
111
|
-
Args:
|
112
|
-
prompt: The prompt template to use for the agent
|
113
|
-
model: The language model to use
|
114
|
-
tools: List of tools available to the agent
|
115
|
-
memory: Optional memory store for checkpointing (will create MemorySaver if None)
|
116
|
-
debug: Whether to enable debug mode
|
117
|
-
|
118
|
-
Returns:
|
119
|
-
A configured LangGraphAgentRunnable with auto-continue capability
|
120
|
-
|
121
|
-
Note: Requires LangGraph 0.5.x or higher that supports post_model_hook.
|
122
|
-
"""
|
123
|
-
from ...langchain.langraph_agent import LangGraphAgentRunnable
|
124
|
-
|
125
|
-
# Use simple in-memory checkpointer for auto-continue functionality if not provided
|
126
|
-
if memory is None:
|
127
|
-
memory = MemorySaver()
|
128
|
-
|
129
|
-
# Create the base React agent with auto-continue capability
|
130
|
-
base_agent = create_react_agent_with_auto_continue(
|
131
|
-
prompt=prompt,
|
132
|
-
model=model,
|
133
|
-
tools=tools,
|
134
|
-
memory=memory,
|
135
|
-
debug=debug
|
136
|
-
)
|
137
|
-
|
138
|
-
# Wrap the base agent in our custom LangGraphAgentRunnable to handle input properly
|
139
|
-
# This ensures that our invoke() input handling logic is applied
|
140
|
-
agent = LangGraphAgentRunnable(
|
141
|
-
builder=base_agent.builder,
|
142
|
-
config_type=base_agent.builder.config_schema,
|
143
|
-
nodes=base_agent.nodes,
|
144
|
-
channels=base_agent.channels,
|
145
|
-
input_channels=base_agent.input_channels,
|
146
|
-
stream_mode=base_agent.stream_mode,
|
147
|
-
output_channels=base_agent.output_channels,
|
148
|
-
stream_channels=base_agent.stream_channels,
|
149
|
-
checkpointer=memory,
|
150
|
-
interrupt_before_nodes=base_agent.interrupt_before_nodes,
|
151
|
-
interrupt_after_nodes=base_agent.interrupt_after_nodes,
|
152
|
-
debug=debug,
|
153
|
-
store=base_agent.store,
|
154
|
-
schema_to_mapper=base_agent.schema_to_mapper
|
155
|
-
)
|
156
|
-
|
157
|
-
return agent
|
File without changes
|
File without changes
|
File without changes
|