alita-sdk 0.3.178__py3-none-any.whl → 0.3.180__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -7,10 +7,14 @@ from langchain.agents import (
7
7
  create_json_chat_agent)
8
8
  from langgraph.store.base import BaseStore
9
9
 
10
+ # Note: Traditional LangChain agents (OpenAI, XML, JSON) do not support
11
+ # checkpointing natively. Only LangGraph agents support checkpointing.
12
+ # For checkpointing in traditional agents, you need to migrate to LangGraph.
13
+
10
14
  from .agents.xml_chat import create_xml_chat_agent
11
15
  from .langraph_agent import create_graph
12
16
  from langchain_core.messages import (
13
- BaseMessage, SystemMessage, HumanMessage
17
+ BaseMessage, SystemMessage, HumanMessage, AIMessage
14
18
  )
15
19
  from langchain_core.prompts import MessagesPlaceholder
16
20
  from .constants import REACT_ADDON, REACT_VARS, XML_ADDON
@@ -29,7 +33,8 @@ class Assistant:
29
33
  app_type: str = "openai",
30
34
  tools: Optional[list] = [],
31
35
  memory: Optional[Any] = None,
32
- store: Optional[BaseStore] = None):
36
+ store: Optional[BaseStore] = None,
37
+ use_checkpointing: bool = False):
33
38
 
34
39
  self.client = copy(client)
35
40
  self.client.max_tokens = data['llm_settings']['max_tokens']
@@ -42,6 +47,7 @@ class Assistant:
42
47
  self.app_type = app_type
43
48
  self.memory = memory
44
49
  self.store = store
50
+ self.use_checkpointing = use_checkpointing
45
51
 
46
52
  logger.debug("Data for agent creation: %s", data)
47
53
  logger.info("App type: %s", app_type)
@@ -118,12 +124,17 @@ class Assistant:
118
124
  if self.app_type == 'pipeline':
119
125
  return self.pipeline()
120
126
  elif self.app_type == 'openai':
127
+ # Check if checkpointing is enabled - if so, use LangGraph for auto-continue capability
128
+ if self.use_checkpointing:
129
+ return self.getLangGraphAgentWithAutoContinue()
121
130
  return self.getOpenAIToolsAgentExecutor()
122
131
  elif self.app_type == 'xml':
132
+ # Check if checkpointing is enabled - if so, use LangGraph for auto-continue capability
133
+ if self.use_checkpointing:
134
+ return self.getLangGraphAgentWithAutoContinue()
123
135
  return self.getXMLAgentExecutor()
124
136
  else:
125
- self.tools = [EchoTool()] + self.tools
126
- return self.getAgentExecutor()
137
+ return self.getLangGraphAgentWithAutoContinue()
127
138
 
128
139
  def _agent_executor(self, agent: Any):
129
140
  return AgentExecutor.from_agent_and_tools(agent=agent, tools=self.tools,
@@ -169,3 +180,84 @@ class Assistant:
169
180
 
170
181
  def predict(self, messages: list[BaseMessage]):
171
182
  return self.client.invoke(messages)
183
+
184
+ def getLangGraphAgentWithAutoContinue(self):
185
+ """
186
+ Create a LangGraph agent with auto-continue capability for when responses get truncated.
187
+ This provides better handling of length-limited responses compared to traditional AgentExecutor.
188
+ Uses simple in-memory checkpointing for auto-continue functionality.
189
+
190
+ Note: Requires LangGraph 0.5.x or higher that supports post_model_hook.
191
+ """
192
+
193
+ from langgraph.prebuilt import create_react_agent
194
+ from langgraph.checkpoint.memory import MemorySaver
195
+
196
+ # Use simple in-memory checkpointer for auto-continue functionality
197
+ memory = MemorySaver()
198
+
199
+ # Filter tools to only include BaseTool instances
200
+ simple_tools = [t for t in self.tools if isinstance(t, BaseTool)]
201
+
202
+ # Set up parameters for the agent
203
+ kwargs = {
204
+ "prompt": self.prompt,
205
+ "model": self.client,
206
+ "tools": simple_tools,
207
+ "checkpointer": memory,
208
+ "post_model_hook": self._create_auto_continue_hook() # Auto-continue hook
209
+ }
210
+
211
+ # Create the agent with the new hook-based API
212
+ agent = create_react_agent(**kwargs)
213
+ return agent
214
+
215
+ # The state_modifier approach has been removed in favor of post_model_hook which
216
+ # is the recommended approach for LangGraph 0.5.x and higher
217
+
218
+ def _create_auto_continue_hook(self):
219
+ """
220
+ Create a post-model hook for LangGraph 0.5.x that detects truncated responses
221
+ and adds continuation prompts.
222
+ This checks if the last AI message was truncated and automatically continues if needed.
223
+ """
224
+ MAX_CONTINUATIONS = 3 # Maximum number of auto-continuations allowed
225
+
226
+ def post_model_hook(state):
227
+ messages = state.get("messages", [])
228
+
229
+ # Count how many auto-continue messages we've already sent
230
+ continuation_count = sum(
231
+ 1 for msg in messages
232
+ if isinstance(msg, HumanMessage) and
233
+ "continue your previous response" in msg.content.lower()
234
+ )
235
+
236
+ # Don't continue if we've reached the limit
237
+ if continuation_count >= MAX_CONTINUATIONS:
238
+ return state
239
+
240
+ # Check if the last message is from AI and was truncated
241
+ if messages and isinstance(messages[-1], AIMessage):
242
+ last_ai_message = messages[-1]
243
+
244
+ # Check for truncation indicators
245
+ is_truncated = (
246
+ hasattr(last_ai_message, 'response_metadata') and
247
+ last_ai_message.response_metadata.get('finish_reason') == 'length'
248
+ ) or (
249
+ # Fallback: check if message seems to end abruptly
250
+ last_ai_message.content and
251
+ not last_ai_message.content.rstrip().endswith(('.', '!', '?', ':', ';'))
252
+ )
253
+
254
+ # Add continuation request if truncated
255
+ if is_truncated:
256
+ logger.info("Detected truncated response, adding continuation request")
257
+ new_messages = messages.copy()
258
+ new_messages.append(HumanMessage(content="Continue your previous response from where you left off"))
259
+ return {"messages": new_messages}
260
+
261
+ return state
262
+
263
+ return post_model_hook
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: alita_sdk
3
- Version: 0.3.178
3
+ Version: 0.3.180
4
4
  Summary: SDK for building langchain agents using resources from Alita
5
5
  Author-email: Artem Rozumenko <artyom.rozumenko@gmail.com>, Mikalai Biazruchka <mikalai_biazruchka@epam.com>, Roman Mitusov <roman_mitusov@epam.com>, Ivan Krakhmaliuk <lifedjik@gmail.com>, Artem Dubrovskiy <ad13box@gmail.com>
6
6
  License-Expression: Apache-2.0
@@ -17,7 +17,7 @@ alita_sdk/runtime/clients/client.py,sha256=6ezOJ92CSw6b2PVs4uFMQKQdp40uT1awoFEqW
17
17
  alita_sdk/runtime/clients/datasource.py,sha256=HAZovoQN9jBg0_-lIlGBQzb4FJdczPhkHehAiVG3Wx0,1020
18
18
  alita_sdk/runtime/clients/prompt.py,sha256=li1RG9eBwgNK_Qf0qUaZ8QNTmsncFrAL2pv3kbxZRZg,1447
19
19
  alita_sdk/runtime/langchain/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
20
- alita_sdk/runtime/langchain/assistant.py,sha256=QJEMiEOrFMJ4GpnK24U2pKFblrvdQpKFdfhZsI2wAUI,7507
20
+ alita_sdk/runtime/langchain/assistant.py,sha256=eY36htHBvB7n0sW4cr79HZ-FsTh-rQIQga66th_Tpe0,11759
21
21
  alita_sdk/runtime/langchain/chat_message_template.py,sha256=kPz8W2BG6IMyITFDA5oeb5BxVRkHEVZhuiGl4MBZKdc,2176
22
22
  alita_sdk/runtime/langchain/constants.py,sha256=eHVJ_beJNTf1WJo4yq7KMK64fxsRvs3lKc34QCXSbpk,3319
23
23
  alita_sdk/runtime/langchain/indexer.py,sha256=0ENHy5EOhThnAiYFc7QAsaTNp9rr8hDV_hTK8ahbatk,37592
@@ -295,8 +295,8 @@ alita_sdk/tools/zephyr_scale/api_wrapper.py,sha256=UHVQUVqcBc3SZvDfO78HSuBzwAsRw
295
295
  alita_sdk/tools/zephyr_squad/__init__.py,sha256=rq4jOb3lRW2GXvAguk4H1KinO5f-zpygzhBJf-E1Ucw,2773
296
296
  alita_sdk/tools/zephyr_squad/api_wrapper.py,sha256=iOMxyE7vOc_LwFB_nBMiSFXkNtvbptA4i-BrTlo7M0A,5854
297
297
  alita_sdk/tools/zephyr_squad/zephyr_squad_cloud_client.py,sha256=IYUJoMFOMA70knLhLtAnuGoy3OK80RuqeQZ710oyIxE,3631
298
- alita_sdk-0.3.178.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
299
- alita_sdk-0.3.178.dist-info/METADATA,sha256=eWCrlvpO3Mjv3AQ7AMghklM9k8wTzrQN2k-jp5ymvGg,18753
300
- alita_sdk-0.3.178.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
301
- alita_sdk-0.3.178.dist-info/top_level.txt,sha256=0vJYy5p_jK6AwVb1aqXr7Kgqgk3WDtQ6t5C-XI9zkmg,10
302
- alita_sdk-0.3.178.dist-info/RECORD,,
298
+ alita_sdk-0.3.180.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
299
+ alita_sdk-0.3.180.dist-info/METADATA,sha256=1_Zfqgg2jYnpzujyCog3Z8_bNx07CjpO1ZjHGqlWzLE,18753
300
+ alita_sdk-0.3.180.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
301
+ alita_sdk-0.3.180.dist-info/top_level.txt,sha256=0vJYy5p_jK6AwVb1aqXr7Kgqgk3WDtQ6t5C-XI9zkmg,10
302
+ alita_sdk-0.3.180.dist-info/RECORD,,