alita-sdk 0.3.178__py3-none-any.whl → 0.3.179__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- alita_sdk/runtime/langchain/assistant.py +93 -4
- {alita_sdk-0.3.178.dist-info → alita_sdk-0.3.179.dist-info}/METADATA +1 -1
- {alita_sdk-0.3.178.dist-info → alita_sdk-0.3.179.dist-info}/RECORD +6 -6
- {alita_sdk-0.3.178.dist-info → alita_sdk-0.3.179.dist-info}/WHEEL +0 -0
- {alita_sdk-0.3.178.dist-info → alita_sdk-0.3.179.dist-info}/licenses/LICENSE +0 -0
- {alita_sdk-0.3.178.dist-info → alita_sdk-0.3.179.dist-info}/top_level.txt +0 -0
@@ -7,10 +7,14 @@ from langchain.agents import (
|
|
7
7
|
create_json_chat_agent)
|
8
8
|
from langgraph.store.base import BaseStore
|
9
9
|
|
10
|
+
# Note: Traditional LangChain agents (OpenAI, XML, JSON) do not support
|
11
|
+
# checkpointing natively. Only LangGraph agents support checkpointing.
|
12
|
+
# For checkpointing in traditional agents, you need to migrate to LangGraph.
|
13
|
+
|
10
14
|
from .agents.xml_chat import create_xml_chat_agent
|
11
15
|
from .langraph_agent import create_graph
|
12
16
|
from langchain_core.messages import (
|
13
|
-
BaseMessage, SystemMessage, HumanMessage
|
17
|
+
BaseMessage, SystemMessage, HumanMessage, AIMessage
|
14
18
|
)
|
15
19
|
from langchain_core.prompts import MessagesPlaceholder
|
16
20
|
from .constants import REACT_ADDON, REACT_VARS, XML_ADDON
|
@@ -29,7 +33,8 @@ class Assistant:
|
|
29
33
|
app_type: str = "openai",
|
30
34
|
tools: Optional[list] = [],
|
31
35
|
memory: Optional[Any] = None,
|
32
|
-
store: Optional[BaseStore] = None
|
36
|
+
store: Optional[BaseStore] = None,
|
37
|
+
use_checkpointing: bool = False):
|
33
38
|
|
34
39
|
self.client = copy(client)
|
35
40
|
self.client.max_tokens = data['llm_settings']['max_tokens']
|
@@ -42,6 +47,7 @@ class Assistant:
|
|
42
47
|
self.app_type = app_type
|
43
48
|
self.memory = memory
|
44
49
|
self.store = store
|
50
|
+
self.use_checkpointing = use_checkpointing
|
45
51
|
|
46
52
|
logger.debug("Data for agent creation: %s", data)
|
47
53
|
logger.info("App type: %s", app_type)
|
@@ -118,12 +124,17 @@ class Assistant:
|
|
118
124
|
if self.app_type == 'pipeline':
|
119
125
|
return self.pipeline()
|
120
126
|
elif self.app_type == 'openai':
|
127
|
+
# Check if checkpointing is enabled - if so, use LangGraph for auto-continue capability
|
128
|
+
if self.use_checkpointing:
|
129
|
+
return self.getLangGraphAgentWithAutoContinue()
|
121
130
|
return self.getOpenAIToolsAgentExecutor()
|
122
131
|
elif self.app_type == 'xml':
|
132
|
+
# Check if checkpointing is enabled - if so, use LangGraph for auto-continue capability
|
133
|
+
if self.use_checkpointing:
|
134
|
+
return self.getLangGraphAgentWithAutoContinue()
|
123
135
|
return self.getXMLAgentExecutor()
|
124
136
|
else:
|
125
|
-
self.
|
126
|
-
return self.getAgentExecutor()
|
137
|
+
return self.getLangGraphAgentWithAutoContinue()
|
127
138
|
|
128
139
|
def _agent_executor(self, agent: Any):
|
129
140
|
return AgentExecutor.from_agent_and_tools(agent=agent, tools=self.tools,
|
@@ -169,3 +180,81 @@ class Assistant:
|
|
169
180
|
|
170
181
|
def predict(self, messages: list[BaseMessage]):
|
171
182
|
return self.client.invoke(messages)
|
183
|
+
|
184
|
+
def getLangGraphAgentWithAutoContinue(self):
|
185
|
+
"""
|
186
|
+
Create a LangGraph agent with auto-continue capability for when responses get truncated.
|
187
|
+
This provides better handling of length-limited responses compared to traditional AgentExecutor.
|
188
|
+
Uses simple in-memory checkpointing for auto-continue functionality.
|
189
|
+
"""
|
190
|
+
from langgraph.prebuilt import create_react_agent
|
191
|
+
from langgraph.checkpoint.memory import MemorySaver
|
192
|
+
|
193
|
+
# Use simple in-memory checkpointer for auto-continue functionality
|
194
|
+
# This ensures clean, predictable behavior for continuation logic
|
195
|
+
memory = MemorySaver()
|
196
|
+
|
197
|
+
# Filter tools to only include BaseTool instances
|
198
|
+
simple_tools = [t for t in self.tools if isinstance(t, BaseTool)]
|
199
|
+
|
200
|
+
# Create the agent with checkpointing enabled
|
201
|
+
agent = create_react_agent(
|
202
|
+
prompt=self.prompt,
|
203
|
+
model=self.client,
|
204
|
+
tools=simple_tools,
|
205
|
+
checkpointer=memory,
|
206
|
+
state_modifier=self._create_auto_continue_state_modifier()
|
207
|
+
)
|
208
|
+
|
209
|
+
return agent
|
210
|
+
|
211
|
+
def _create_auto_continue_state_modifier(self):
|
212
|
+
"""
|
213
|
+
Create a state modifier that implements auto-continue logic.
|
214
|
+
This checks if the last AI response was truncated and automatically continues if needed.
|
215
|
+
Limits auto-continue to prevent infinite loops.
|
216
|
+
"""
|
217
|
+
MAX_CONTINUATIONS = 3 # Maximum number of auto-continuations allowed
|
218
|
+
|
219
|
+
def auto_continue_modifier(state):
|
220
|
+
messages = state.get("messages", [])
|
221
|
+
|
222
|
+
# Count how many auto-continue messages we've already sent
|
223
|
+
continuation_count = sum(
|
224
|
+
1 for msg in messages
|
225
|
+
if isinstance(msg, HumanMessage) and
|
226
|
+
"continue your previous response" in msg.content.lower()
|
227
|
+
)
|
228
|
+
|
229
|
+
# Don't continue if we've reached the limit
|
230
|
+
if continuation_count >= MAX_CONTINUATIONS:
|
231
|
+
return state
|
232
|
+
|
233
|
+
# Check if the last AI message was truncated
|
234
|
+
if messages and isinstance(messages[-1], AIMessage):
|
235
|
+
last_ai_message = messages[-1]
|
236
|
+
|
237
|
+
# Check for truncation indicators
|
238
|
+
is_truncated = (
|
239
|
+
hasattr(last_ai_message, 'response_metadata') and
|
240
|
+
last_ai_message.response_metadata.get('finish_reason') == 'length'
|
241
|
+
) or (
|
242
|
+
# Fallback: check if message seems to end abruptly
|
243
|
+
last_ai_message.content and
|
244
|
+
not last_ai_message.content.rstrip().endswith(('.', '!', '?', ':', ';'))
|
245
|
+
)
|
246
|
+
|
247
|
+
# Add continuation logic
|
248
|
+
if is_truncated:
|
249
|
+
# Add a human message to continue
|
250
|
+
continuation_msg = HumanMessage(
|
251
|
+
content="Continue your previous response from where you left off"
|
252
|
+
)
|
253
|
+
return {
|
254
|
+
**state,
|
255
|
+
"messages": messages + [continuation_msg]
|
256
|
+
}
|
257
|
+
|
258
|
+
return state
|
259
|
+
|
260
|
+
return auto_continue_modifier
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: alita_sdk
|
3
|
-
Version: 0.3.
|
3
|
+
Version: 0.3.179
|
4
4
|
Summary: SDK for building langchain agents using resources from Alita
|
5
5
|
Author-email: Artem Rozumenko <artyom.rozumenko@gmail.com>, Mikalai Biazruchka <mikalai_biazruchka@epam.com>, Roman Mitusov <roman_mitusov@epam.com>, Ivan Krakhmaliuk <lifedjik@gmail.com>, Artem Dubrovskiy <ad13box@gmail.com>
|
6
6
|
License-Expression: Apache-2.0
|
@@ -17,7 +17,7 @@ alita_sdk/runtime/clients/client.py,sha256=6ezOJ92CSw6b2PVs4uFMQKQdp40uT1awoFEqW
|
|
17
17
|
alita_sdk/runtime/clients/datasource.py,sha256=HAZovoQN9jBg0_-lIlGBQzb4FJdczPhkHehAiVG3Wx0,1020
|
18
18
|
alita_sdk/runtime/clients/prompt.py,sha256=li1RG9eBwgNK_Qf0qUaZ8QNTmsncFrAL2pv3kbxZRZg,1447
|
19
19
|
alita_sdk/runtime/langchain/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
20
|
-
alita_sdk/runtime/langchain/assistant.py,sha256=
|
20
|
+
alita_sdk/runtime/langchain/assistant.py,sha256=VE6Gq7O1ErJV0eTHgM-7TOxdATd62aFimUKprz92o-I,11526
|
21
21
|
alita_sdk/runtime/langchain/chat_message_template.py,sha256=kPz8W2BG6IMyITFDA5oeb5BxVRkHEVZhuiGl4MBZKdc,2176
|
22
22
|
alita_sdk/runtime/langchain/constants.py,sha256=eHVJ_beJNTf1WJo4yq7KMK64fxsRvs3lKc34QCXSbpk,3319
|
23
23
|
alita_sdk/runtime/langchain/indexer.py,sha256=0ENHy5EOhThnAiYFc7QAsaTNp9rr8hDV_hTK8ahbatk,37592
|
@@ -295,8 +295,8 @@ alita_sdk/tools/zephyr_scale/api_wrapper.py,sha256=UHVQUVqcBc3SZvDfO78HSuBzwAsRw
|
|
295
295
|
alita_sdk/tools/zephyr_squad/__init__.py,sha256=rq4jOb3lRW2GXvAguk4H1KinO5f-zpygzhBJf-E1Ucw,2773
|
296
296
|
alita_sdk/tools/zephyr_squad/api_wrapper.py,sha256=iOMxyE7vOc_LwFB_nBMiSFXkNtvbptA4i-BrTlo7M0A,5854
|
297
297
|
alita_sdk/tools/zephyr_squad/zephyr_squad_cloud_client.py,sha256=IYUJoMFOMA70knLhLtAnuGoy3OK80RuqeQZ710oyIxE,3631
|
298
|
-
alita_sdk-0.3.
|
299
|
-
alita_sdk-0.3.
|
300
|
-
alita_sdk-0.3.
|
301
|
-
alita_sdk-0.3.
|
302
|
-
alita_sdk-0.3.
|
298
|
+
alita_sdk-0.3.179.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
299
|
+
alita_sdk-0.3.179.dist-info/METADATA,sha256=GkBo37E1OM9ej52nimuYYnsfys22QnRSuuR4rDutZgg,18753
|
300
|
+
alita_sdk-0.3.179.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
301
|
+
alita_sdk-0.3.179.dist-info/top_level.txt,sha256=0vJYy5p_jK6AwVb1aqXr7Kgqgk3WDtQ6t5C-XI9zkmg,10
|
302
|
+
alita_sdk-0.3.179.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|