alita-sdk 0.3.528__py3-none-any.whl → 0.3.532__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of alita-sdk might be problematic. Click here for more details.

@@ -44,6 +44,7 @@ class AlitaClient:
44
44
  self.base_url = base_url.rstrip('/')
45
45
  self.api_path = '/api/v1'
46
46
  self.llm_path = '/llm/v1'
47
+ self.allm_path = '/llm'
47
48
  self.project_id = project_id
48
49
  self.auth_token = auth_token
49
50
  self.headers = {
@@ -75,6 +76,10 @@ class AlitaClient:
75
76
  self.configurations: list = configurations or []
76
77
  self.model_timeout = kwargs.get('model_timeout', 120)
77
78
  self.model_image_generation = kwargs.get('model_image_generation')
79
+
80
+ # Cache for generated images to avoid token consumption
81
+ # This is used by image_generation and artifact toolkits to pass data via reference
82
+ self._generated_images_cache: Dict[str, Dict[str, Any]] = {}
78
83
 
79
84
  def get_mcp_toolkits(self):
80
85
  if user_id := self._get_real_user_id():
@@ -264,17 +269,32 @@ class AlitaClient:
264
269
  if is_anthropic:
265
270
  # ChatAnthropic configuration
266
271
  target_kwargs = {
267
- "base_url": f"{self.base_url}{self.llm_path}",
272
+ "base_url": f"{self.base_url}{self.allm_path}",
268
273
  "model": model_name,
269
274
  "api_key": self.auth_token,
270
275
  "streaming": model_config.get("streaming", True),
271
276
  "max_tokens": llm_max_tokens,
272
- "effort": model_config.get("reasoning_effort"),
273
277
  "temperature": model_config.get("temperature"),
274
278
  "max_retries": model_config.get("max_retries", 3),
275
- "default_headers": {"openai-organization": str(self.project_id)},
279
+ "default_headers": {"openai-organization": str(self.project_id),
280
+ "Authorization": f"Bearer {self.auth_token}"},
276
281
  }
277
-
282
+
283
+ # TODO": Check on ChatAnthropic client when they get "effort" support back
284
+ if model_config.get("reasoning_effort"):
285
+ if model_config["reasoning_effort"].lower() == "low":
286
+ target_kwargs['thinking'] = {"type": "enabled", "budget_tokens": 2048}
287
+ target_kwargs['temperature'] = 1
288
+ target_kwargs["max_tokens"] = 2048 + target_kwargs["max_tokens"]
289
+ elif model_config["reasoning_effort"].lower() == "medium":
290
+ target_kwargs['thinking'] = {"type": "enabled", "budget_tokens": 4096}
291
+ target_kwargs['temperature'] = 1
292
+ target_kwargs["max_tokens"] = 4096 + target_kwargs["max_tokens"]
293
+ elif model_config["reasoning_effort"].lower() == "high":
294
+ target_kwargs['thinking'] = {"type": "enabled", "budget_tokens": 9092}
295
+ target_kwargs['temperature'] = 1
296
+ target_kwargs["max_tokens"] = 9092 + target_kwargs["max_tokens"]
297
+
278
298
  # Add http_client if provided
279
299
  if "http_client" in model_config:
280
300
  target_kwargs["http_client"] = model_config["http_client"]
@@ -300,7 +320,6 @@ class AlitaClient:
300
320
  target_kwargs["use_responses_api"] = True
301
321
 
302
322
  llm = ChatOpenAI(**target_kwargs)
303
-
304
323
  return llm
305
324
 
306
325
  def generate_image(self,
@@ -1,3 +1,4 @@
1
+ import base64
1
2
  import hashlib
2
3
  import io
3
4
  import json
@@ -14,7 +15,7 @@ from pydantic import create_model, Field, model_validator
14
15
  from ...tools.non_code_indexer_toolkit import NonCodeIndexerToolkit
15
16
  from ...tools.utils.available_tools_decorator import extend_with_parent_available_tools
16
17
  from ...tools.elitea_base import extend_with_file_operations, BaseCodeToolApiWrapper
17
- from ...runtime.utils.utils import IndexerKeywords
18
+ from ...runtime.utils.utils import IndexerKeywords, resolve_image_from_cache
18
19
 
19
20
 
20
21
  class ArtifactWrapper(NonCodeIndexerToolkit):
@@ -63,23 +64,30 @@ class ArtifactWrapper(NonCodeIndexerToolkit):
63
64
  if was_modified:
64
65
  logging.warning(f"Filename sanitized: '{filename}' -> '{sanitized_filename}'")
65
66
 
67
+ # Auto-detect and extract base64 from image_url structures (from image_generation tool)
68
+ # Returns tuple: (processed_data, is_from_image_generation)
69
+ filedata, is_from_image_generation = self._extract_base64_if_needed(filedata)
70
+
66
71
  if sanitized_filename.endswith(".xlsx"):
67
72
  data = json.loads(filedata)
68
73
  filedata = self.create_xlsx_filedata(data)
69
74
 
70
75
  result = self.artifact.create(sanitized_filename, filedata, bucket_name)
71
76
 
72
- # Dispatch custom event for file creation
73
- dispatch_custom_event("file_modified", {
74
- "message": f"File '{filename}' created successfully",
75
- "filename": filename,
76
- "tool_name": "createFile",
77
- "toolkit": "artifact",
78
- "operation_type": "create",
79
- "meta": {
80
- "bucket": bucket_name or self.bucket
81
- }
82
- })
77
+ # Skip file_modified event for images from image_generation tool
78
+ # These are already tracked in the tool output and don't need duplicate events
79
+ if not is_from_image_generation:
80
+ # Dispatch custom event for file creation
81
+ dispatch_custom_event("file_modified", {
82
+ "message": f"File '{filename}' created successfully",
83
+ "filename": filename,
84
+ "tool_name": "createFile",
85
+ "toolkit": "artifact",
86
+ "operation_type": "create",
87
+ "meta": {
88
+ "bucket": bucket_name or self.bucket
89
+ }
90
+ })
83
91
 
84
92
  return result
85
93
 
@@ -109,6 +117,43 @@ class ArtifactWrapper(NonCodeIndexerToolkit):
109
117
 
110
118
  sanitized = sanitized_name + extension
111
119
  return sanitized, (sanitized != original)
120
+
121
+ def _extract_base64_if_needed(self, filedata: str) -> tuple[str | bytes, bool]:
122
+ """
123
+ Resolve cached_image_id references from cache and decode to binary data.
124
+
125
+ Requires JSON format with cached_image_id field: {"cached_image_id": "img_xxx"}
126
+ LLM must extract specific cached_image_id from generate_image response.
127
+
128
+ Returns:
129
+ tuple: (processed_data, is_from_image_generation)
130
+ - processed_data: Original filedata or resolved binary image data
131
+ - is_from_image_generation: True if data came from image_generation cache
132
+ """
133
+ if not filedata or not isinstance(filedata, str):
134
+ return filedata, False
135
+
136
+ # Require JSON format - fail fast if not JSON
137
+ if '{' not in filedata:
138
+ return filedata, False
139
+
140
+ try:
141
+ data = json.loads(filedata)
142
+ except json.JSONDecodeError:
143
+ # Not valid JSON, return as-is (regular file content)
144
+ return filedata, False
145
+
146
+ if not isinstance(data, dict):
147
+ return filedata, False
148
+
149
+ # Only accept direct cached_image_id format: {"cached_image_id": "img_xxx"}
150
+ # LLM must parse generate_image response and extract specific cached_image_id
151
+ if 'cached_image_id' in data:
152
+ binary_data = resolve_image_from_cache(self.alita, data['cached_image_id'])
153
+ return binary_data, True # Mark as from image_generation
154
+
155
+ # If JSON doesn't have cached_image_id, treat as regular file content
156
+ return filedata, False
112
157
 
113
158
  def create_xlsx_filedata(self, data: dict[str, list[list]]) -> bytes:
114
159
  try:
@@ -377,15 +422,19 @@ class ArtifactWrapper(NonCodeIndexerToolkit):
377
422
  "createFile",
378
423
  filename=(str, Field(description="Filename")),
379
424
  filedata=(str, Field(description="""Stringified content of the file.
380
- Example for .xlsx filedata format:
381
- {
382
- "Sheet1":[
383
- ["Name", "Age", "City"],
384
- ["Alice", 25, "New York"],
385
- ["Bob", 30, "San Francisco"],
386
- ["Charlie", 35, "Los Angeles"]
387
- ]
388
- }
425
+
426
+ Supports three input formats:
427
+
428
+ 1. CACHED IMAGE REFERENCE (for generated/cached images):
429
+ Pass JSON with cached_image_id field: {"cached_image_id": "img_xxx"}
430
+ The tool will automatically resolve and decode the image from cache.
431
+ This is typically used when another tool returns an image reference.
432
+
433
+ 2. EXCEL FILES (.xlsx extension):
434
+ Pass JSON with sheet structure: {"Sheet1": [["Name", "Age"], ["Alice", 25], ["Bob", 30]]}
435
+
436
+ 3. TEXT/OTHER FILES:
437
+ Pass the plain text string directly.
389
438
  """)),
390
439
  bucket_name=bucket_name
391
440
  )
@@ -1,7 +1,9 @@
1
1
  """
2
2
  Image generation tool for Alita SDK.
3
3
  """
4
+ import json
4
5
  import logging
6
+ import uuid
5
7
  from typing import Optional, Type, Any, List, Literal
6
8
  from langchain_core.tools import BaseTool, BaseToolkit
7
9
  from pydantic import BaseModel, Field, create_model, ConfigDict
@@ -76,7 +78,12 @@ class ImageGenerationTool(BaseTool):
76
78
  """Tool for generating images using the Alita client."""
77
79
 
78
80
  name: str = "generate_image"
79
- description: str = "Generate images from text prompts using AI models"
81
+ description: str = (
82
+ "Generate images from text prompts using AI models. "
83
+ "Returns a JSON object with 'cached_image_id' field containing a reference to the generated image data. "
84
+ "The cached_image_id can be used to save or process the image. "
85
+ "The actual image data is stored temporarily and can be retrieved using the cached_image_id reference."
86
+ )
80
87
  args_schema: Type[BaseModel] = ImageGenerationInput
81
88
  alita_client: Any = None
82
89
 
@@ -85,10 +92,10 @@ class ImageGenerationTool(BaseTool):
85
92
  self.alita_client = client
86
93
 
87
94
  def _run(self, prompt: str, n: int = 1, size: str = "auto",
88
- quality: str = "auto", style: Optional[str] = None) -> list:
95
+ quality: str = "auto", style: Optional[str] = None) -> str:
89
96
  """Generate an image based on the provided parameters."""
90
97
  try:
91
- logger.info(f"Generating image with prompt: {prompt[:50]}...")
98
+ logger.debug(f"Generating image with prompt: {prompt[:50]}...")
92
99
 
93
100
  result = self.alita_client.generate_image(
94
101
  prompt=prompt,
@@ -98,57 +105,56 @@ class ImageGenerationTool(BaseTool):
98
105
  style=style
99
106
  )
100
107
 
101
- # Return multimodal content format for LLM consumption
108
+ # Return simple JSON structure with reference ID instead of full base64
102
109
  if 'data' in result:
103
110
  images = result['data']
104
- content_chunks = []
105
111
 
106
- # Add a text description of what was generated
107
- if len(images) == 1:
108
- content_chunks.append({
109
- "type": "text",
110
- "text": f"Generated image for prompt: '{prompt}'"
111
- })
112
- else:
113
- content_chunks.append({
114
- "type": "text",
115
- "text": f"Generated {len(images)} images for "
116
- f"prompt: '{prompt}'"
112
+ # Process all images with unified structure
113
+ images_list = []
114
+ for idx, image_data in enumerate(images, 1):
115
+ if not image_data.get('b64_json'):
116
+ continue
117
+
118
+ cached_image_id = f"img_{uuid.uuid4().hex[:12]}"
119
+
120
+ # Store in cache
121
+ if hasattr(self.alita_client, '_generated_images_cache'):
122
+ self.alita_client._generated_images_cache[cached_image_id] = {
123
+ 'base64_data': image_data['b64_json']
124
+ }
125
+ logger.debug(f"Stored generated image in cache with ID: {cached_image_id}")
126
+
127
+ images_list.append({
128
+ "image_number": idx,
129
+ "image_type": "png",
130
+ "cached_image_id": cached_image_id
117
131
  })
118
132
 
119
- # Add image content for each generated image
120
- for image_data in images:
121
- if image_data.get('url'):
122
- content_chunks.append({
123
- "type": "image_url",
124
- "image_url": {
125
- "url": image_data['url']
126
- }
127
- })
128
- elif image_data.get('b64_json'):
129
- content_chunks.append({
130
- "type": "image_url",
131
- "image_url": {
132
- "url": f"data:image/png;base64,"
133
- f"{image_data['b64_json']}"
134
- }
135
- })
133
+ if not images_list:
134
+ return json.dumps({
135
+ "status": "error",
136
+ "message": "No base64 image data found"
137
+ })
136
138
 
137
- return content_chunks
139
+ return json.dumps({
140
+ "status": "success",
141
+ "prompt": prompt,
142
+ "total_images": len(images_list),
143
+ "images": images_list
144
+ })
138
145
 
139
- # Fallback to text response if no images in result
140
- return [{
141
- "type": "text",
142
- "text": f"Image generation completed but no images "
143
- f"returned: {result}"
144
- }]
146
+ # Fallback to error response if no images in result
147
+ return json.dumps({
148
+ "status": "error",
149
+ "message": f"Image generation completed but no images returned: {result}"
150
+ })
145
151
 
146
152
  except Exception as e:
147
153
  logger.error(f"Error generating image: {e}")
148
- return [{
149
- "type": "text",
150
- "text": f"Error generating image: {str(e)}"
151
- }]
154
+ return json.dumps({
155
+ "status": "error",
156
+ "message": f"Error generating image: {str(e)}"
157
+ })
152
158
 
153
159
  async def _arun(self, prompt: str, n: int = 1, size: str = "256x256",
154
160
  quality: str = "auto",
@@ -6,6 +6,8 @@ from typing import Any, Optional, List, Union, Literal
6
6
  from langchain_core.messages import HumanMessage, SystemMessage, AIMessage
7
7
  from langchain_core.runnables import RunnableConfig
8
8
  from langchain_core.tools import BaseTool, ToolException
9
+ from langchain_core.exceptions import OutputParserException
10
+ from langchain_core.callbacks import dispatch_custom_event
9
11
  from pydantic import Field
10
12
 
11
13
  from ..langchain.constants import ELITEA_RS
@@ -219,20 +221,54 @@ class LLMNode(BaseTool):
219
221
  try:
220
222
  llm = self.__get_struct_output_model(llm_client, struct_model)
221
223
  completion = llm.invoke(messages, config=config)
222
- except ValueError as e:
224
+ except (ValueError, OutputParserException) as e:
223
225
  logger.error(f"Error invoking structured output model: {format_exc()}")
224
- logger.info("Attemping to fall back to json mode")
225
- # Fallback to regular LLM with JSON extraction
226
- completion = self.__get_struct_output_model(llm_client, struct_model,
227
- method="json_mode").invoke(messages, config=config)
226
+ logger.info("Attempting to fall back to json mode")
227
+ try:
228
+ # Fallback to regular LLM with JSON extraction
229
+ completion = self.__get_struct_output_model(llm_client, struct_model,
230
+ method="json_mode").invoke(messages, config=config)
231
+ except (ValueError, OutputParserException) as e2:
232
+ logger.error(f"json_mode fallback also failed: {format_exc()}")
233
+ logger.info("Attempting to fall back to function_calling")
234
+ # Final fallback to function_calling method
235
+ completion = self.__get_struct_output_model(llm_client, struct_model,
236
+ method="json_schema").invoke(messages, config=config)
228
237
  result = completion.model_dump()
229
238
 
230
239
  # Ensure messages are properly formatted
231
240
  if result.get('messages') and isinstance(result['messages'], list):
232
241
  result['messages'] = [{'role': 'assistant', 'content': '\n'.join(result['messages'])}]
233
242
  else:
234
- result['messages'] = messages + [
235
- AIMessage(content=result.get(ELITEA_RS, '') or initial_completion.content)]
243
+ # Extract content from initial_completion, handling thinking blocks
244
+ fallback_content = result.get(ELITEA_RS, '')
245
+ if not fallback_content and initial_completion:
246
+ content_parts = self._extract_content_from_completion(initial_completion)
247
+ fallback_content = content_parts.get('text') or ''
248
+ thinking = content_parts.get('thinking')
249
+
250
+ # Dispatch thinking event if present
251
+ if thinking:
252
+ try:
253
+ model_name = getattr(llm_client, 'model_name', None) or getattr(llm_client, 'model', 'LLM')
254
+ dispatch_custom_event(
255
+ name="thinking_step",
256
+ data={
257
+ "message": thinking,
258
+ "tool_name": f"LLM ({model_name})",
259
+ "toolkit": "reasoning",
260
+ },
261
+ config=config,
262
+ )
263
+ except Exception as e:
264
+ logger.warning(f"Failed to dispatch thinking event: {e}")
265
+
266
+ if not fallback_content:
267
+ # Final fallback to raw content
268
+ content = initial_completion.content
269
+ fallback_content = content if isinstance(content, str) else str(content)
270
+
271
+ result['messages'] = messages + [AIMessage(content=fallback_content)]
236
272
 
237
273
  return result
238
274
  else:
@@ -250,24 +286,89 @@ class LLMNode(BaseTool):
250
286
  if self.output_variables:
251
287
  if self.output_variables[0] == 'messages':
252
288
  return output_msgs
253
- output_msgs[self.output_variables[0]] = current_completion.content if current_completion else None
289
+ # Extract content properly from thinking-enabled responses
290
+ if current_completion:
291
+ content_parts = self._extract_content_from_completion(current_completion)
292
+ text_content = content_parts.get('text')
293
+ thinking = content_parts.get('thinking')
294
+
295
+ # Dispatch thinking event if present
296
+ if thinking:
297
+ try:
298
+ model_name = getattr(llm_client, 'model_name', None) or getattr(llm_client, 'model', 'LLM')
299
+ dispatch_custom_event(
300
+ name="thinking_step",
301
+ data={
302
+ "message": thinking,
303
+ "tool_name": f"LLM ({model_name})",
304
+ "toolkit": "reasoning",
305
+ },
306
+ config=config,
307
+ )
308
+ except Exception as e:
309
+ logger.warning(f"Failed to dispatch thinking event: {e}")
310
+
311
+ if text_content:
312
+ output_msgs[self.output_variables[0]] = text_content
313
+ else:
314
+ # Fallback to raw content
315
+ content = current_completion.content
316
+ output_msgs[self.output_variables[0]] = content if isinstance(content, str) else str(content)
317
+ else:
318
+ output_msgs[self.output_variables[0]] = None
254
319
 
255
320
  return output_msgs
256
321
  else:
257
- # Regular text response
258
- content = completion.content.strip() if hasattr(completion, 'content') else str(completion)
322
+ # Regular text response - handle both simple strings and thinking-enabled responses
323
+ content_parts = self._extract_content_from_completion(completion)
324
+ thinking = content_parts.get('thinking')
325
+ text_content = content_parts.get('text') or ''
326
+
327
+ # Fallback to string representation if no content extracted
328
+ if not text_content:
329
+ if hasattr(completion, 'content'):
330
+ content = completion.content
331
+ text_content = content.strip() if isinstance(content, str) else str(content)
332
+ else:
333
+ text_content = str(completion)
334
+
335
+ # Dispatch thinking step event to chat if present
336
+ if thinking:
337
+ logger.info(f"Model thinking: {thinking[:200]}..." if len(thinking) > 200 else f"Model thinking: {thinking}")
338
+
339
+ # Dispatch custom event for thinking step to be displayed in chat
340
+ try:
341
+ model_name = getattr(llm_client, 'model_name', None) or getattr(llm_client, 'model', 'LLM')
342
+ dispatch_custom_event(
343
+ name="thinking_step",
344
+ data={
345
+ "message": thinking,
346
+ "tool_name": f"LLM ({model_name})",
347
+ "toolkit": "reasoning",
348
+ },
349
+ config=config,
350
+ )
351
+ except Exception as e:
352
+ logger.warning(f"Failed to dispatch thinking event: {e}")
353
+
354
+ # Build the AI message with both thinking and text
355
+ # Store thinking in additional_kwargs for potential future use
356
+ ai_message_kwargs = {'content': text_content}
357
+ if thinking:
358
+ ai_message_kwargs['additional_kwargs'] = {'thinking': thinking}
359
+ ai_message = AIMessage(**ai_message_kwargs)
259
360
 
260
361
  # Try to extract JSON if output variables are specified (but exclude 'messages' which is handled separately)
261
362
  json_output_vars = [var for var in (self.output_variables or []) if var != 'messages']
262
363
  if json_output_vars:
263
364
  # set response to be the first output variable for non-structured output
264
- response_data = {json_output_vars[0]: content}
265
- new_messages = messages + [AIMessage(content=content)]
365
+ response_data = {json_output_vars[0]: text_content}
366
+ new_messages = messages + [ai_message]
266
367
  response_data['messages'] = new_messages
267
368
  return response_data
268
369
 
269
370
  # Simple text response (either no output variables or JSON parsing failed)
270
- new_messages = messages + [AIMessage(content=content)]
371
+ new_messages = messages + [ai_message]
271
372
  return {"messages": new_messages}
272
373
 
273
374
  except Exception as e:
@@ -285,6 +386,56 @@ class LLMNode(BaseTool):
285
386
  # Legacy support for old interface
286
387
  return self.invoke(kwargs, **kwargs)
287
388
 
389
+ @staticmethod
390
+ def _extract_content_from_completion(completion) -> dict:
391
+ """Extract thinking and text content from LLM completion.
392
+
393
+ Handles Anthropic's extended thinking format where content is a list
394
+ of blocks with types: 'thinking' and 'text'.
395
+
396
+ Args:
397
+ completion: LLM completion object with content attribute
398
+
399
+ Returns:
400
+ dict with 'thinking' and 'text' keys
401
+ """
402
+ result = {'thinking': None, 'text': None}
403
+
404
+ if not hasattr(completion, 'content'):
405
+ return result
406
+
407
+ content = completion.content
408
+
409
+ # Handle list of content blocks (Anthropic extended thinking format)
410
+ if isinstance(content, list):
411
+ thinking_blocks = []
412
+ text_blocks = []
413
+
414
+ for block in content:
415
+ if isinstance(block, dict):
416
+ block_type = block.get('type', '')
417
+ if block_type == 'thinking':
418
+ thinking_blocks.append(block.get('thinking', ''))
419
+ elif block_type == 'text':
420
+ text_blocks.append(block.get('text', ''))
421
+ elif hasattr(block, 'type'):
422
+ # Handle object format
423
+ if block.type == 'thinking':
424
+ thinking_blocks.append(getattr(block, 'thinking', ''))
425
+ elif block.type == 'text':
426
+ text_blocks.append(getattr(block, 'text', ''))
427
+
428
+ if thinking_blocks:
429
+ result['thinking'] = '\n\n'.join(thinking_blocks)
430
+ if text_blocks:
431
+ result['text'] = '\n\n'.join(text_blocks)
432
+
433
+ # Handle simple string content
434
+ elif isinstance(content, str):
435
+ result['text'] = content
436
+
437
+ return result
438
+
288
439
  def _run_async_in_sync_context(self, coro):
289
440
  """Run async coroutine from sync context.
290
441
 
@@ -313,7 +313,8 @@ class AlitaStreamlitCallback(BaseCallbackHandler):
313
313
  if self.debug:
314
314
  log.debug("on_llm_end(%s, %s)", response, kwargs)
315
315
  llm_run_id = str(run_id)
316
- if self.callback_state.get(llm_run_id):
316
+ # Check if callback_state exists and is not None before accessing
317
+ if self.callback_state is not None and self.callback_state.get(llm_run_id):
317
318
  status_widget = self.callback_state[llm_run_id]
318
319
  self._safe_streamlit_call(
319
320
  status_widget.update,
@@ -1,5 +1,8 @@
1
+ import base64
2
+ import logging
1
3
  import re
2
4
  from enum import Enum
5
+ from typing import Any
3
6
 
4
7
  # DEPRECATED: Tool names no longer use prefixes
5
8
  # Kept for backward compatibility only
@@ -32,3 +35,34 @@ def clean_node_str(s: str) -> str:
32
35
  """Cleans a node string by removing all non-alphanumeric characters except underscores and spaces."""
33
36
  cleaned_string = re.sub(r'[^\w\s]', '', s)
34
37
  return cleaned_string
38
+
39
+
40
+ def resolve_image_from_cache(client: Any, cached_image_id: str) -> bytes:
41
+ """
42
+ Resolve cached_image_id from client's image cache and return decoded binary data.
43
+
44
+ Args:
45
+ client: AlitaClient instance with _generated_images_cache attribute
46
+ cached_image_id: The cached image ID to resolve
47
+
48
+ Returns:
49
+ bytes: Decoded binary image data
50
+
51
+ Raises:
52
+ ValueError: If cached_image_id not found or decoding fails
53
+ """
54
+ cache = getattr(client, '_generated_images_cache', {})
55
+
56
+ if cached_image_id not in cache:
57
+ raise ValueError(f"Image reference '{cached_image_id}' not found. The image may have expired.")
58
+
59
+ cached_data = cache[cached_image_id]
60
+ base64_data = cached_data.get('base64_data', '')
61
+ logging.debug(f"Resolved cached_image_id '{cached_image_id}' from cache (length: {len(base64_data)} chars)")
62
+ # Decode base64 to binary data for image files
63
+ try:
64
+ binary_data = base64.b64decode(base64_data)
65
+ logging.debug(f"Decoded base64 to binary data ({len(binary_data)} bytes)")
66
+ return binary_data
67
+ except Exception as e:
68
+ raise ValueError(f"Failed to decode image data for '{cached_image_id}': {e}")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: alita_sdk
3
- Version: 0.3.528
3
+ Version: 0.3.532
4
4
  Summary: SDK for building langchain agents using resources from Alita
5
5
  Author-email: Artem Rozumenko <artyom.rozumenko@gmail.com>, Mikalai Biazruchka <mikalai_biazruchka@epam.com>, Roman Mitusov <roman_mitusov@epam.com>, Ivan Krakhmaliuk <lifedj27@gmail.com>, Artem Dubrovskiy <ad13box@gmail.com>
6
6
  License-Expression: Apache-2.0
@@ -31,7 +31,7 @@ Requires-Dist: langchain_core<0.4.0,>=0.3.76; extra == "runtime"
31
31
  Requires-Dist: langchain<0.4.0,>=0.3.22; extra == "runtime"
32
32
  Requires-Dist: langchain_community<0.4.0,>=0.3.7; extra == "runtime"
33
33
  Requires-Dist: langchain-openai<0.4.0,>=0.3.0; extra == "runtime"
34
- Requires-Dist: langchain-anthropic<0.4.0,>=0.3.10; extra == "runtime"
34
+ Requires-Dist: langchain-anthropic<0.4.0,>=0.3.20; extra == "runtime"
35
35
  Requires-Dist: anthropic>=0.57.0; extra == "runtime"
36
36
  Requires-Dist: langgraph<0.5,>=0.4.8; extra == "runtime"
37
37
  Requires-Dist: langgraph-prebuilt==0.5.2; extra == "runtime"
@@ -93,7 +93,7 @@ alita_sdk/configurations/zephyr_essential.py,sha256=TiZedsBlfIDroflipvoqxjJeEWPo
93
93
  alita_sdk/runtime/__init__.py,sha256=4W0UF-nl3QF2bvET5lnah4o24CoTwSoKXhuN0YnwvEE,828
94
94
  alita_sdk/runtime/clients/__init__.py,sha256=BdehU5GBztN1Qi1Wul0cqlU46FxUfMnI6Vq2Zd_oq1M,296
95
95
  alita_sdk/runtime/clients/artifact.py,sha256=7C1e9RtftqOJd3Mo5gNDnBuYg1Z9xTqjxmfdWeJH5Cc,4014
96
- alita_sdk/runtime/clients/client.py,sha256=LUQ-pH3tmp_f4uh_8ss0KP1c-wyr34ZJMT9Qyonpg6Y,53394
96
+ alita_sdk/runtime/clients/client.py,sha256=qsXM5wfQ6BaT44sH9wW3M4YkbFUK7WGLcWT8U0SHWGA,54748
97
97
  alita_sdk/runtime/clients/datasource.py,sha256=HAZovoQN9jBg0_-lIlGBQzb4FJdczPhkHehAiVG3Wx0,1020
98
98
  alita_sdk/runtime/clients/mcp_discovery.py,sha256=aFJ0wYQ8EAmXa9qLUusHZfQXkNec1wbgkqHdVeSFX-g,11697
99
99
  alita_sdk/runtime/clients/mcp_manager.py,sha256=DRbqiO761l7UgOdv_keHbD2g0oZodtPHejpArXYZIoE,9050
@@ -170,14 +170,14 @@ alita_sdk/runtime/toolkits/vectorstore.py,sha256=H-HQsHhLm-vQWS3kvwkh-OHrOWKuylB
170
170
  alita_sdk/runtime/tools/__init__.py,sha256=Fx7iHqkzA90-KfjdcUUzMUI_7kDarjuTsSpSzOW2pN0,568
171
171
  alita_sdk/runtime/tools/agent.py,sha256=m98QxOHwnCRTT9j18Olbb5UPS8-ZGeQaGiUyZJSyFck,3162
172
172
  alita_sdk/runtime/tools/application.py,sha256=RCGe-mRfj8372gTFkEX2xBvcYhw7IKdU1t50lXaBPOY,3701
173
- alita_sdk/runtime/tools/artifact.py,sha256=1ZeqgHwDToFr98SX4aUWwvr5iVltdpWjooRd0euO7pg,21081
173
+ alita_sdk/runtime/tools/artifact.py,sha256=wZ6nPracF-SWkH52YtZWs2pePQavIDutUZ6BQpr5THU,23625
174
174
  alita_sdk/runtime/tools/datasource.py,sha256=pvbaSfI-ThQQnjHG-QhYNSTYRnZB0rYtZFpjCfpzxYI,2443
175
175
  alita_sdk/runtime/tools/echo.py,sha256=spw9eCweXzixJqHnZofHE1yWiSUa04L4VKycf3KCEaM,486
176
176
  alita_sdk/runtime/tools/function.py,sha256=HSMO1nBTRKMvWC_m0M8TOLGaZ2k_7ksPgLqzuRh6kV4,7083
177
177
  alita_sdk/runtime/tools/graph.py,sha256=7jImBBSEdP5Mjnn2keOiyUwdGDFhEXLUrgUiugO3mgA,3503
178
- alita_sdk/runtime/tools/image_generation.py,sha256=Kls9D_ke_SK7xmVr7I9SlQcAEBJc86gf66haN0qIj9k,7469
178
+ alita_sdk/runtime/tools/image_generation.py,sha256=waxxFIAgmh9-COcljL9uZ7e_s7EL9OWveUxYk0ulEUM,7855
179
179
  alita_sdk/runtime/tools/indexer_tool.py,sha256=whSLPevB4WD6dhh2JDXEivDmTvbjiMV1MrPl9cz5eLA,4375
180
- alita_sdk/runtime/tools/llm.py,sha256=lLDqsOef6-zakNcZdd9_5iJyZ3-wBXunPEH0h9qsnyY,44774
180
+ alita_sdk/runtime/tools/llm.py,sha256=j2d7Pd0TUMYCGNSD7B440N7gpbzuJVVshftRlqe8Jgw,52911
181
181
  alita_sdk/runtime/tools/loop.py,sha256=uds0WhZvwMxDVFI6MZHrcmMle637cQfBNg682iLxoJA,8335
182
182
  alita_sdk/runtime/tools/loop_output.py,sha256=U4hO9PCQgWlXwOq6jdmCGbegtAxGAPXObSxZQ3z38uk,8069
183
183
  alita_sdk/runtime/tools/mcp_inspect_tool.py,sha256=38X8euaxDbEGjcfp6ElvExZalpZun6QEr6ZEW4nU5pQ,11496
@@ -193,7 +193,7 @@ alita_sdk/runtime/tools/vectorstore_base.py,sha256=GUO7Gxgy4GKTttsOrsPQTUb_I5EDe
193
193
  alita_sdk/runtime/tools/planning/__init__.py,sha256=15eWTtz4oMB5vnKsLEFPW7lVY7y1Fxk3edo2bNf0ooE,821
194
194
  alita_sdk/runtime/tools/planning/models.py,sha256=bcwfjEnDTqirTT9bjHEDF8o3UYIAD8IqiqrZsca8gfw,8816
195
195
  alita_sdk/runtime/tools/planning/wrapper.py,sha256=om-4f3qMzkqBcBmINQ469IykBubm_UwJ-WZsEchehto,22412
196
- alita_sdk/runtime/utils/AlitaCallback.py,sha256=t2KYBrLLjjpw79KWJJo9PLjwRj4PZWCc3PbQ-6FXfLk,12020
196
+ alita_sdk/runtime/utils/AlitaCallback.py,sha256=G-UU30P_Q9jiCr7eBZUDVRZ7Z6qlBPX5f0Cvt5tx528,12130
197
197
  alita_sdk/runtime/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
198
198
  alita_sdk/runtime/utils/constants.py,sha256=Xntx1b_uxUzT4clwqHA_U6K8y5bBqf_4lSQwXdcWrp4,13586
199
199
  alita_sdk/runtime/utils/evaluate.py,sha256=iM1P8gzBLHTuSCe85_Ng_h30m52hFuGuhNXJ7kB1tgI,1872
@@ -206,7 +206,7 @@ alita_sdk/runtime/utils/save_dataframe.py,sha256=i-E1wp-t4wb17Zq3nA3xYwgSILjoXNi
206
206
  alita_sdk/runtime/utils/streamlit.py,sha256=0TotNKnvMPHuwBdhMEpM5DhIedQQa1AUz9BlmXFBhAU,107179
207
207
  alita_sdk/runtime/utils/toolkit_runtime.py,sha256=MU63Fpxj0b5_r1IUUc0Q3-PN9VwL7rUxp2MRR4tmYR8,5136
208
208
  alita_sdk/runtime/utils/toolkit_utils.py,sha256=g1Au_nzJgde2NW732GACZGSIQOt7o0mjAbrRxG6GVwA,6579
209
- alita_sdk/runtime/utils/utils.py,sha256=6XkmoWjG_ZCIycU4qPhUSr9EdTrknhWbedbqE1fk1iU,1128
209
+ alita_sdk/runtime/utils/utils.py,sha256=d0RLiKfBnobC3PrEFPvZt3uUx3Jie2rR32Fp-3hkWCU,2380
210
210
  alita_sdk/tools/__init__.py,sha256=jzj502O3yO40cjs37Uzqcbd6fG3pFmoU1TLw1-j4_3M,13011
211
211
  alita_sdk/tools/base_indexer_toolkit.py,sha256=AXygnaQZFEEeq6kkJbWIzUF1i31HoWdL7yidcDy_iKk,34305
212
212
  alita_sdk/tools/code_indexer_toolkit.py,sha256=4uQHnv7sHzECmOWbeqoVPT4prt_hv91gYxWxvvRdOjg,9219
@@ -427,9 +427,9 @@ alita_sdk/tools/zephyr_scale/api_wrapper.py,sha256=kT0TbmMvuKhDUZc0i7KO18O38JM9S
427
427
  alita_sdk/tools/zephyr_squad/__init__.py,sha256=gZTEanHf9pRCiZaKobF4Wbm33wUxxXoIjOr544TcXas,2903
428
428
  alita_sdk/tools/zephyr_squad/api_wrapper.py,sha256=kmw_xol8YIYFplBLWTqP_VKPRhL_1ItDD0_vXTe_UuI,14906
429
429
  alita_sdk/tools/zephyr_squad/zephyr_squad_cloud_client.py,sha256=R371waHsms4sllHCbijKYs90C-9Yu0sSR3N4SUfQOgU,5066
430
- alita_sdk-0.3.528.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
431
- alita_sdk-0.3.528.dist-info/METADATA,sha256=eTapGprJ7IEFsGKfI9BndpjRPD-eh0oTLK2yS7cLvlw,24266
432
- alita_sdk-0.3.528.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
433
- alita_sdk-0.3.528.dist-info/entry_points.txt,sha256=VijN0h4alp1WXm8tfS3P7vuGxN4a5RZqHjXAoEIBZnI,49
434
- alita_sdk-0.3.528.dist-info/top_level.txt,sha256=0vJYy5p_jK6AwVb1aqXr7Kgqgk3WDtQ6t5C-XI9zkmg,10
435
- alita_sdk-0.3.528.dist-info/RECORD,,
430
+ alita_sdk-0.3.532.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
431
+ alita_sdk-0.3.532.dist-info/METADATA,sha256=bAsh58nb6kyLbv_sI2BbOdxFiYfqGcmyhJrOpnlXo70,24266
432
+ alita_sdk-0.3.532.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
433
+ alita_sdk-0.3.532.dist-info/entry_points.txt,sha256=VijN0h4alp1WXm8tfS3P7vuGxN4a5RZqHjXAoEIBZnI,49
434
+ alita_sdk-0.3.532.dist-info/top_level.txt,sha256=0vJYy5p_jK6AwVb1aqXr7Kgqgk3WDtQ6t5C-XI9zkmg,10
435
+ alita_sdk-0.3.532.dist-info/RECORD,,