alita-sdk 0.3.522__py3-none-any.whl → 0.3.528__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of alita-sdk might be problematic. Click here for more details.

@@ -13,6 +13,7 @@ from langchain_core.messages import (
13
13
  from langchain_core.tools import ToolException
14
14
  from langgraph.store.base import BaseStore
15
15
  from langchain_openai import OpenAIEmbeddings, ChatOpenAI
16
+ from langchain_anthropic import ChatAnthropic
16
17
 
17
18
  from ..langchain.assistant import Assistant as LangChainAssistant
18
19
  # from ..llamaindex.assistant import Assistant as LLamaAssistant
@@ -219,21 +220,25 @@ class AlitaClient:
219
220
  request_timeout=self.model_timeout
220
221
  )
221
222
 
222
- def get_llm(self, model_name: str, model_config: dict) -> ChatOpenAI:
223
+ def get_llm(self, model_name: str, model_config: dict):
223
224
  """
224
- Get a ChatOpenAI model instance based on the model name and configuration.
225
+ Get a ChatOpenAI or ChatAnthropic model instance based on the model name and configuration.
225
226
 
226
227
  Args:
227
228
  model_name: Name of the model to retrieve
228
229
  model_config: Configuration parameters for the model
229
230
 
230
231
  Returns:
231
- An instance of ChatOpenAI configured with the provided parameters.
232
+ An instance of ChatOpenAI or ChatAnthropic configured with the provided parameters.
232
233
  """
233
234
  if not model_name:
234
235
  raise ValueError("Model name must be provided")
235
236
 
236
- logger.info(f"Creating ChatOpenAI model: {model_name} with config: {model_config}")
237
+ # Determine if this is an Anthropic model
238
+ model_name_lower = model_name.lower()
239
+ is_anthropic = "anthropic" in model_name_lower or "claude" in model_name_lower
240
+
241
+ logger.info(f"Creating {'ChatAnthropic' if is_anthropic else 'ChatOpenAI'} model: {model_name} with config: {model_config}")
237
242
 
238
243
  try:
239
244
  from tools import this # pylint: disable=E0401,C0415
@@ -256,25 +261,48 @@ class AlitaClient:
256
261
  # default nuber for a case when auto is selected for an agent
257
262
  llm_max_tokens = 4000
258
263
 
259
- target_kwargs = {
260
- "base_url": f"{self.base_url}{self.llm_path}",
261
- "model": model_name,
262
- "api_key": self.auth_token,
263
- "streaming": model_config.get("streaming", True),
264
- "stream_usage": model_config.get("stream_usage", True),
265
- "max_tokens": llm_max_tokens,
266
- "temperature": model_config.get("temperature"),
267
- "reasoning_effort": model_config.get("reasoning_effort"),
268
- "max_retries": model_config.get("max_retries", 3),
269
- "seed": model_config.get("seed", None),
270
- "openai_organization": str(self.project_id),
271
- }
272
-
273
- if use_responses_api:
274
- target_kwargs["use_responses_api"] = True
264
+ if is_anthropic:
265
+ # ChatAnthropic configuration
266
+ target_kwargs = {
267
+ "base_url": f"{self.base_url}{self.llm_path}",
268
+ "model": model_name,
269
+ "api_key": self.auth_token,
270
+ "streaming": model_config.get("streaming", True),
271
+ "max_tokens": llm_max_tokens,
272
+ "effort": model_config.get("reasoning_effort"),
273
+ "temperature": model_config.get("temperature"),
274
+ "max_retries": model_config.get("max_retries", 3),
275
+ "default_headers": {"openai-organization": str(self.project_id)},
276
+ }
275
277
 
276
- return ChatOpenAI(**target_kwargs)
278
+ # Add http_client if provided
279
+ if "http_client" in model_config:
280
+ target_kwargs["http_client"] = model_config["http_client"]
281
+
282
+ llm = ChatAnthropic(**target_kwargs)
283
+ else:
284
+ # ChatOpenAI configuration
285
+ target_kwargs = {
286
+ "base_url": f"{self.base_url}{self.llm_path}",
287
+ "model": model_name,
288
+ "api_key": self.auth_token,
289
+ "streaming": model_config.get("streaming", True),
290
+ "stream_usage": model_config.get("stream_usage", True),
291
+ "max_tokens": llm_max_tokens,
292
+ "temperature": model_config.get("temperature"),
293
+ "reasoning_effort": model_config.get("reasoning_effort"),
294
+ "max_retries": model_config.get("max_retries", 3),
295
+ "seed": model_config.get("seed", None),
296
+ "openai_organization": str(self.project_id),
297
+ }
277
298
 
299
+ if use_responses_api:
300
+ target_kwargs["use_responses_api"] = True
301
+
302
+ llm = ChatOpenAI(**target_kwargs)
303
+
304
+ return llm
305
+
278
306
  def generate_image(self,
279
307
  prompt: str,
280
308
  n: int = 1,
@@ -6,7 +6,6 @@ import requests
6
6
  from typing import Any
7
7
  from json import dumps
8
8
  import chardet
9
- from ...tools import instantiate_toolkit
10
9
 
11
10
  logger = logging.getLogger(__name__)
12
11
 
@@ -49,27 +48,6 @@ class SandboxArtifact:
49
48
  return f'{data['error']}. {data['content'] if data['content'] else ''}'
50
49
  detected = chardet.detect(data)
51
50
  return data
52
- # TODO: add proper handling for binary files (images, pdf, etc.) for sandbox
53
- # if detected['encoding'] is not None:
54
- # try:
55
- # return data.decode(detected['encoding'])
56
- # except Exception:
57
- # logger.error('Error while default encoding')
58
- # return parse_file_content(file_name=artifact_name,
59
- # file_content=data,
60
- # is_capture_image=is_capture_image,
61
- # page_number=page_number,
62
- # sheet_name=sheet_name,
63
- # excel_by_sheets=excel_by_sheets,
64
- # llm=llm)
65
- # else:
66
- # return parse_file_content(file_name=artifact_name,
67
- # file_content=data,
68
- # is_capture_image=is_capture_image,
69
- # page_number=page_number,
70
- # sheet_name=sheet_name,
71
- # excel_by_sheets=excel_by_sheets,
72
- # llm=llm)
73
51
 
74
52
  def delete(self, artifact_name: str, bucket_name=None):
75
53
  if not bucket_name:
@@ -185,19 +163,6 @@ class SandboxClient:
185
163
  data = requests.get(url, headers=self.headers, verify=False).json()
186
164
  return data
187
165
 
188
- def toolkit(self, toolkit_id: int):
189
- url = f"{self.base_url}{self.api_path}/tool/prompt_lib/{self.project_id}/{toolkit_id}"
190
- response = requests.get(url, headers=self.headers, verify=False)
191
- if not response.ok:
192
- raise ValueError(f"Failed to fetch toolkit {toolkit_id}: {response.text}")
193
-
194
- tool_data = response.json()
195
- if 'settings' not in tool_data:
196
- tool_data['settings'] = {}
197
- tool_data['settings']['alita'] = self
198
-
199
- return instantiate_toolkit(tool_data)
200
-
201
166
  def get_list_of_apps(self):
202
167
  apps = []
203
168
  limit = 10
@@ -937,7 +937,7 @@ class LangGraphAgentRunnable(CompiledStateGraph):
937
937
  "with no accompanying text."
938
938
  )
939
939
 
940
- logging.info(f"Input: {thread_id} - {input}")
940
+ logger.info(f"Input: {thread_id} - {input}")
941
941
  try:
942
942
  if self.checkpointer and self.checkpointer.get_tuple(config):
943
943
  if config.pop("should_continue", False):
@@ -14,6 +14,35 @@ from ..langchain.utils import create_pydantic_model, propagate_the_input_mapping
14
14
  logger = logging.getLogger(__name__)
15
15
 
16
16
 
17
+ # def _is_thinking_model(llm_client: Any) -> bool:
18
+ # """
19
+ # Check if a model uses extended thinking capability by reading cached metadata.
20
+
21
+ # Thinking models require special message formatting where assistant messages
22
+ # must start with thinking blocks before tool_use blocks.
23
+
24
+ # This function reads the `_supports_reasoning` attribute that should be set
25
+ # when the LLM client is created (by checking the model's supports_reasoning field).
26
+
27
+ # Args:
28
+ # llm_client: LLM client instance with optional _supports_reasoning attribute
29
+
30
+ # Returns:
31
+ # True if the model is a thinking model, False otherwise
32
+ # """
33
+ # if not llm_client:
34
+ # return False
35
+
36
+ # # Check if supports_reasoning was cached on the client
37
+ # supports_reasoning = getattr(llm_client, '_supports_reasoning', False)
38
+
39
+ # if supports_reasoning:
40
+ # model_name = getattr(llm_client, 'model_name', None) or getattr(llm_client, 'model', 'unknown')
41
+ # logger.debug(f"Model '{model_name}' is a thinking/reasoning model (cached from API metadata)")
42
+
43
+ # return supports_reasoning
44
+
45
+
17
46
  class LLMNode(BaseTool):
18
47
  """Enhanced LLM node with chat history and tool binding support"""
19
48
 
@@ -242,7 +271,12 @@ class LLMNode(BaseTool):
242
271
  return {"messages": new_messages}
243
272
 
244
273
  except Exception as e:
274
+ # Enhanced error logging with model diagnostics
275
+ model_info = getattr(llm_client, 'model_name', None) or getattr(llm_client, 'model', 'unknown')
245
276
  logger.error(f"Error in LLM Node: {format_exc()}")
277
+ logger.error(f"Model being used: {model_info}")
278
+ logger.error(f"Error type: {type(e).__name__}")
279
+
246
280
  error_msg = f"Error: {e}"
247
281
  new_messages = messages + [AIMessage(content=error_msg)]
248
282
  return {"messages": new_messages}
@@ -403,6 +437,20 @@ class LLMNode(BaseTool):
403
437
  async def __perform_tool_calling(self, completion, messages, llm_client, config):
404
438
  # Handle iterative tool-calling and execution
405
439
  logger.info(f"__perform_tool_calling called with {len(completion.tool_calls) if hasattr(completion, 'tool_calls') else 0} tool calls")
440
+
441
+ # Check if this is a thinking model - they require special message handling
442
+ # model_name = getattr(llm_client, 'model_name', None) or getattr(llm_client, 'model', '')
443
+ # if _is_thinking_model(llm_client):
444
+ # logger.warning(
445
+ # f"⚠️ THINKING/REASONING MODEL DETECTED: '{model_name}'\n"
446
+ # f"Tool execution with thinking models may fail due to message format requirements.\n"
447
+ # f"Thinking models require 'thinking_blocks' to be preserved between turns, which this "
448
+ # f"framework cannot do.\n"
449
+ # f"Recommendation: Use standard model variants (e.g., claude-3-5-sonnet-20241022-v2:0) "
450
+ # f"instead of thinking/reasoning variants for tool calling.\n"
451
+ # f"See: https://docs.litellm.ai/docs/reasoning_content"
452
+ # )
453
+
406
454
  new_messages = messages + [completion]
407
455
  iteration = 0
408
456
 
@@ -511,6 +559,29 @@ class LLMNode(BaseTool):
511
559
  except Exception as e:
512
560
  error_str = str(e).lower()
513
561
 
562
+ # Check for thinking model message format errors
563
+ is_thinking_format_error = any(indicator in error_str for indicator in [
564
+ 'expected `thinking`',
565
+ 'expected `redacted_thinking`',
566
+ 'thinking block',
567
+ 'must start with a thinking block',
568
+ 'when `thinking` is enabled'
569
+ ])
570
+
571
+ # Check for non-recoverable errors that should fail immediately
572
+ # These indicate configuration or permission issues, not content size issues
573
+ is_non_recoverable = any(indicator in error_str for indicator in [
574
+ 'model identifier is invalid',
575
+ 'authentication',
576
+ 'unauthorized',
577
+ 'access denied',
578
+ 'permission denied',
579
+ 'invalid credentials',
580
+ 'api key',
581
+ 'quota exceeded',
582
+ 'rate limit'
583
+ ])
584
+
514
585
  # Check for context window / token limit errors
515
586
  is_context_error = any(indicator in error_str for indicator in [
516
587
  'context window', 'context_window', 'token limit', 'too long',
@@ -518,17 +589,76 @@ class LLMNode(BaseTool):
518
589
  'contextwindowexceedederror', 'max_tokens', 'content too large'
519
590
  ])
520
591
 
521
- # Check for Bedrock/Claude output limit errors
522
- # These often manifest as "model identifier is invalid" when output exceeds limits
592
+ # Check for Bedrock/Claude output limit errors (recoverable by truncation)
523
593
  is_output_limit_error = any(indicator in error_str for indicator in [
524
- 'model identifier is invalid',
525
- 'bedrockexception',
526
594
  'output token',
527
595
  'response too large',
528
596
  'max_tokens_to_sample',
529
- 'output_token_limit'
597
+ 'output_token_limit',
598
+ 'output exceeds'
530
599
  ])
531
600
 
601
+ # Handle thinking model format errors
602
+ if is_thinking_format_error:
603
+ model_info = getattr(llm_client, 'model_name', None) or getattr(llm_client, 'model', 'unknown')
604
+ logger.error(f"Thinking model message format error during tool execution iteration {iteration}")
605
+ logger.error(f"Model: {model_info}")
606
+ logger.error(f"Error details: {e}")
607
+
608
+ error_msg = (
609
+ f"⚠️ THINKING MODEL FORMAT ERROR\n\n"
610
+ f"The model '{model_info}' uses extended thinking and requires specific message formatting.\n\n"
611
+ f"**Issue**: When 'thinking' is enabled, assistant messages must start with thinking blocks "
612
+ f"before any tool_use blocks. This framework cannot preserve thinking_blocks during iterative "
613
+ f"tool execution.\n\n"
614
+ f"**Root Cause**: Anthropic's Messages API is stateless - clients must manually preserve and "
615
+ f"resend thinking_blocks with every tool response. LangChain's message abstraction doesn't "
616
+ f"include thinking_blocks, so they are lost between turns.\n\n"
617
+ f"**Solutions**:\n"
618
+ f"1. **Recommended**: Use non-thinking model variants:\n"
619
+ f" - claude-3-5-sonnet-20241022-v2:0 (instead of thinking variants)\n"
620
+ f" - anthropic.claude-3-5-sonnet-20241022-v2:0 (Bedrock)\n"
621
+ f"2. Disable extended thinking: Set reasoning_effort=None or remove thinking config\n"
622
+ f"3. Use LiteLLM directly with modify_params=True (handles thinking_blocks automatically)\n"
623
+ f"4. Avoid tool calling with thinking models (use for reasoning tasks only)\n\n"
624
+ f"**Technical Context**: {str(e)}\n\n"
625
+ f"References:\n"
626
+ f"- https://docs.claude.com/en/docs/build-with-claude/extended-thinking\n"
627
+ f"- https://docs.litellm.ai/docs/reasoning_content (See 'Tool Calling with thinking' section)"
628
+ )
629
+ new_messages.append(AIMessage(content=error_msg))
630
+ raise ValueError(error_msg)
631
+
632
+ # Handle non-recoverable errors immediately
633
+ if is_non_recoverable:
634
+ # Enhanced error logging with model information for better diagnostics
635
+ model_info = getattr(llm_client, 'model_name', None) or getattr(llm_client, 'model', 'unknown')
636
+ logger.error(f"Non-recoverable error during tool execution iteration {iteration}")
637
+ logger.error(f"Model: {model_info}")
638
+ logger.error(f"Error details: {e}")
639
+ logger.error(f"Error type: {type(e).__name__}")
640
+
641
+ # Provide detailed error message for debugging
642
+ error_details = []
643
+ error_details.append(f"Model configuration error: {str(e)}")
644
+ error_details.append(f"Model identifier: {model_info}")
645
+
646
+ # Check for common Bedrock model ID issues
647
+ if 'model identifier is invalid' in error_str:
648
+ error_details.append("\nPossible causes:")
649
+ error_details.append("1. Model not available in the configured AWS region")
650
+ error_details.append("2. Model not enabled in your AWS Bedrock account")
651
+ error_details.append("3. LiteLLM model group prefix not stripped (check for prefixes like '1_')")
652
+ error_details.append("4. Incorrect model version or typo in model name")
653
+ error_details.append("\nPlease verify:")
654
+ error_details.append("- AWS Bedrock console shows this model as available")
655
+ error_details.append("- LiteLLM router configuration is correct")
656
+ error_details.append("- Model ID doesn't contain unexpected prefixes")
657
+
658
+ error_msg = "\n".join(error_details)
659
+ new_messages.append(AIMessage(content=error_msg))
660
+ break
661
+
532
662
  if is_context_error or is_output_limit_error:
533
663
  error_type = "output limit" if is_output_limit_error else "context window"
534
664
  logger.warning(f"{error_type.title()} exceeded during tool execution iteration {iteration}")
@@ -595,9 +725,27 @@ class LLMNode(BaseTool):
595
725
  )
596
726
  new_messages[last_tool_msg_idx] = truncated_msg
597
727
 
598
- logger.info(f"Truncated large tool result from '{last_tool_name}' and continuing")
599
- # Continue to next iteration - the model will see the truncation message
600
- continue
728
+ logger.info(f"Truncated large tool result from '{last_tool_name}' and retrying LLM call")
729
+
730
+ # CRITICAL FIX: Call LLM again with truncated message to get fresh completion
731
+ # This prevents duplicate tool_call_ids that occur when we continue with
732
+ # the same current_completion that still has the original tool_calls
733
+ try:
734
+ current_completion = llm_client.invoke(new_messages, config=config)
735
+ new_messages.append(current_completion)
736
+
737
+ # Continue to process any new tool calls in the fresh completion
738
+ if hasattr(current_completion, 'tool_calls') and current_completion.tool_calls:
739
+ logger.info(f"LLM requested {len(current_completion.tool_calls)} more tool calls after truncation")
740
+ continue
741
+ else:
742
+ logger.info("LLM completed after truncation without requesting more tools")
743
+ break
744
+ except Exception as retry_error:
745
+ logger.error(f"Error retrying LLM after truncation: {retry_error}")
746
+ error_msg = f"Failed to retry after truncation: {str(retry_error)}"
747
+ new_messages.append(AIMessage(content=error_msg))
748
+ break
601
749
  else:
602
750
  # Couldn't find tool message, add error and break
603
751
  if is_output_limit_error:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: alita_sdk
3
- Version: 0.3.522
3
+ Version: 0.3.528
4
4
  Summary: SDK for building langchain agents using resources from Alita
5
5
  Author-email: Artem Rozumenko <artyom.rozumenko@gmail.com>, Mikalai Biazruchka <mikalai_biazruchka@epam.com>, Roman Mitusov <roman_mitusov@epam.com>, Ivan Krakhmaliuk <lifedj27@gmail.com>, Artem Dubrovskiy <ad13box@gmail.com>
6
6
  License-Expression: Apache-2.0
@@ -93,18 +93,18 @@ alita_sdk/configurations/zephyr_essential.py,sha256=TiZedsBlfIDroflipvoqxjJeEWPo
93
93
  alita_sdk/runtime/__init__.py,sha256=4W0UF-nl3QF2bvET5lnah4o24CoTwSoKXhuN0YnwvEE,828
94
94
  alita_sdk/runtime/clients/__init__.py,sha256=BdehU5GBztN1Qi1Wul0cqlU46FxUfMnI6Vq2Zd_oq1M,296
95
95
  alita_sdk/runtime/clients/artifact.py,sha256=7C1e9RtftqOJd3Mo5gNDnBuYg1Z9xTqjxmfdWeJH5Cc,4014
96
- alita_sdk/runtime/clients/client.py,sha256=8DGrto3RcURZQE2qUTFJ3boDat_YvedqmbTTHpRXyDg,52069
96
+ alita_sdk/runtime/clients/client.py,sha256=LUQ-pH3tmp_f4uh_8ss0KP1c-wyr34ZJMT9Qyonpg6Y,53394
97
97
  alita_sdk/runtime/clients/datasource.py,sha256=HAZovoQN9jBg0_-lIlGBQzb4FJdczPhkHehAiVG3Wx0,1020
98
98
  alita_sdk/runtime/clients/mcp_discovery.py,sha256=aFJ0wYQ8EAmXa9qLUusHZfQXkNec1wbgkqHdVeSFX-g,11697
99
99
  alita_sdk/runtime/clients/mcp_manager.py,sha256=DRbqiO761l7UgOdv_keHbD2g0oZodtPHejpArXYZIoE,9050
100
100
  alita_sdk/runtime/clients/prompt.py,sha256=li1RG9eBwgNK_Qf0qUaZ8QNTmsncFrAL2pv3kbxZRZg,1447
101
- alita_sdk/runtime/clients/sandbox_client.py,sha256=4GLoCFZXtTYKM3SFMJAfFO7QNE38c1V7DI1b88uOySY,17227
101
+ alita_sdk/runtime/clients/sandbox_client.py,sha256=z8emjrMe9hkEKOnr19qDxRzXG1ug-XJXBKQzGtjXEAs,15390
102
102
  alita_sdk/runtime/langchain/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
103
103
  alita_sdk/runtime/langchain/assistant.py,sha256=yVTosONjQYUHbzhtTWG53odpXbWCQLLe18oaqniqvx8,18447
104
104
  alita_sdk/runtime/langchain/chat_message_template.py,sha256=kPz8W2BG6IMyITFDA5oeb5BxVRkHEVZhuiGl4MBZKdc,2176
105
105
  alita_sdk/runtime/langchain/constants.py,sha256=tbVA-OPRDzEMspO9raOj_jb57Yt-TUYulG6FOXCmu78,17150
106
106
  alita_sdk/runtime/langchain/indexer.py,sha256=0ENHy5EOhThnAiYFc7QAsaTNp9rr8hDV_hTK8ahbatk,37592
107
- alita_sdk/runtime/langchain/langraph_agent.py,sha256=4rWJ6tQXIzVHgF9zzDL3kiR67rvBAxrJxpglJ6Z_2w0,59364
107
+ alita_sdk/runtime/langchain/langraph_agent.py,sha256=vQ5HPzgngNgZ6amco7PPx0Gn0TdaW8dvYVexPT68bB8,59363
108
108
  alita_sdk/runtime/langchain/mixedAgentParser.py,sha256=M256lvtsL3YtYflBCEp-rWKrKtcY1dJIyRGVv7KW9ME,2611
109
109
  alita_sdk/runtime/langchain/mixedAgentRenderes.py,sha256=asBtKqm88QhZRILditjYICwFVKF5KfO38hu2O-WrSWE,5964
110
110
  alita_sdk/runtime/langchain/store_manager.py,sha256=i8Fl11IXJhrBXq1F1ukEVln57B1IBe-tqSUvfUmBV4A,2218
@@ -177,7 +177,7 @@ alita_sdk/runtime/tools/function.py,sha256=HSMO1nBTRKMvWC_m0M8TOLGaZ2k_7ksPgLqzu
177
177
  alita_sdk/runtime/tools/graph.py,sha256=7jImBBSEdP5Mjnn2keOiyUwdGDFhEXLUrgUiugO3mgA,3503
178
178
  alita_sdk/runtime/tools/image_generation.py,sha256=Kls9D_ke_SK7xmVr7I9SlQcAEBJc86gf66haN0qIj9k,7469
179
179
  alita_sdk/runtime/tools/indexer_tool.py,sha256=whSLPevB4WD6dhh2JDXEivDmTvbjiMV1MrPl9cz5eLA,4375
180
- alita_sdk/runtime/tools/llm.py,sha256=jcOwqdYH8VIlnxn775bxYJ2haFUB5GbySUYD4WPlA5o,35700
180
+ alita_sdk/runtime/tools/llm.py,sha256=lLDqsOef6-zakNcZdd9_5iJyZ3-wBXunPEH0h9qsnyY,44774
181
181
  alita_sdk/runtime/tools/loop.py,sha256=uds0WhZvwMxDVFI6MZHrcmMle637cQfBNg682iLxoJA,8335
182
182
  alita_sdk/runtime/tools/loop_output.py,sha256=U4hO9PCQgWlXwOq6jdmCGbegtAxGAPXObSxZQ3z38uk,8069
183
183
  alita_sdk/runtime/tools/mcp_inspect_tool.py,sha256=38X8euaxDbEGjcfp6ElvExZalpZun6QEr6ZEW4nU5pQ,11496
@@ -427,9 +427,9 @@ alita_sdk/tools/zephyr_scale/api_wrapper.py,sha256=kT0TbmMvuKhDUZc0i7KO18O38JM9S
427
427
  alita_sdk/tools/zephyr_squad/__init__.py,sha256=gZTEanHf9pRCiZaKobF4Wbm33wUxxXoIjOr544TcXas,2903
428
428
  alita_sdk/tools/zephyr_squad/api_wrapper.py,sha256=kmw_xol8YIYFplBLWTqP_VKPRhL_1ItDD0_vXTe_UuI,14906
429
429
  alita_sdk/tools/zephyr_squad/zephyr_squad_cloud_client.py,sha256=R371waHsms4sllHCbijKYs90C-9Yu0sSR3N4SUfQOgU,5066
430
- alita_sdk-0.3.522.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
431
- alita_sdk-0.3.522.dist-info/METADATA,sha256=wfMuqtLZNiFVBOEI0aUg4OV61OhqNGmnpp5j4LNKxo8,24266
432
- alita_sdk-0.3.522.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
433
- alita_sdk-0.3.522.dist-info/entry_points.txt,sha256=VijN0h4alp1WXm8tfS3P7vuGxN4a5RZqHjXAoEIBZnI,49
434
- alita_sdk-0.3.522.dist-info/top_level.txt,sha256=0vJYy5p_jK6AwVb1aqXr7Kgqgk3WDtQ6t5C-XI9zkmg,10
435
- alita_sdk-0.3.522.dist-info/RECORD,,
430
+ alita_sdk-0.3.528.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
431
+ alita_sdk-0.3.528.dist-info/METADATA,sha256=eTapGprJ7IEFsGKfI9BndpjRPD-eh0oTLK2yS7cLvlw,24266
432
+ alita_sdk-0.3.528.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
433
+ alita_sdk-0.3.528.dist-info/entry_points.txt,sha256=VijN0h4alp1WXm8tfS3P7vuGxN4a5RZqHjXAoEIBZnI,49
434
+ alita_sdk-0.3.528.dist-info/top_level.txt,sha256=0vJYy5p_jK6AwVb1aqXr7Kgqgk3WDtQ6t5C-XI9zkmg,10
435
+ alita_sdk-0.3.528.dist-info/RECORD,,