alita-sdk 0.3.528__py3-none-any.whl → 0.3.554__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of alita-sdk might be problematic. Click here for more details.

Files changed (46) hide show
  1. alita_sdk/community/__init__.py +8 -4
  2. alita_sdk/configurations/__init__.py +1 -0
  3. alita_sdk/configurations/openapi.py +111 -0
  4. alita_sdk/runtime/clients/client.py +185 -10
  5. alita_sdk/runtime/langchain/langraph_agent.py +2 -2
  6. alita_sdk/runtime/langchain/utils.py +46 -0
  7. alita_sdk/runtime/skills/__init__.py +91 -0
  8. alita_sdk/runtime/skills/callbacks.py +498 -0
  9. alita_sdk/runtime/skills/discovery.py +540 -0
  10. alita_sdk/runtime/skills/executor.py +610 -0
  11. alita_sdk/runtime/skills/input_builder.py +371 -0
  12. alita_sdk/runtime/skills/models.py +330 -0
  13. alita_sdk/runtime/skills/registry.py +355 -0
  14. alita_sdk/runtime/skills/skill_runner.py +330 -0
  15. alita_sdk/runtime/toolkits/__init__.py +2 -0
  16. alita_sdk/runtime/toolkits/skill_router.py +238 -0
  17. alita_sdk/runtime/toolkits/tools.py +76 -9
  18. alita_sdk/runtime/tools/__init__.py +3 -1
  19. alita_sdk/runtime/tools/artifact.py +70 -21
  20. alita_sdk/runtime/tools/image_generation.py +50 -44
  21. alita_sdk/runtime/tools/llm.py +363 -44
  22. alita_sdk/runtime/tools/loop.py +3 -1
  23. alita_sdk/runtime/tools/loop_output.py +3 -1
  24. alita_sdk/runtime/tools/skill_router.py +776 -0
  25. alita_sdk/runtime/tools/tool.py +3 -1
  26. alita_sdk/runtime/tools/vectorstore.py +7 -2
  27. alita_sdk/runtime/tools/vectorstore_base.py +7 -2
  28. alita_sdk/runtime/utils/AlitaCallback.py +2 -1
  29. alita_sdk/runtime/utils/utils.py +34 -0
  30. alita_sdk/tools/__init__.py +41 -1
  31. alita_sdk/tools/ado/work_item/ado_wrapper.py +33 -2
  32. alita_sdk/tools/base_indexer_toolkit.py +36 -24
  33. alita_sdk/tools/confluence/api_wrapper.py +5 -6
  34. alita_sdk/tools/confluence/loader.py +4 -2
  35. alita_sdk/tools/openapi/__init__.py +280 -120
  36. alita_sdk/tools/openapi/api_wrapper.py +883 -0
  37. alita_sdk/tools/openapi/tool.py +20 -0
  38. alita_sdk/tools/pandas/dataframe/generator/base.py +3 -1
  39. alita_sdk/tools/servicenow/__init__.py +9 -9
  40. alita_sdk/tools/servicenow/api_wrapper.py +1 -1
  41. {alita_sdk-0.3.528.dist-info → alita_sdk-0.3.554.dist-info}/METADATA +2 -2
  42. {alita_sdk-0.3.528.dist-info → alita_sdk-0.3.554.dist-info}/RECORD +46 -33
  43. {alita_sdk-0.3.528.dist-info → alita_sdk-0.3.554.dist-info}/WHEEL +0 -0
  44. {alita_sdk-0.3.528.dist-info → alita_sdk-0.3.554.dist-info}/entry_points.txt +0 -0
  45. {alita_sdk-0.3.528.dist-info → alita_sdk-0.3.554.dist-info}/licenses/LICENSE +0 -0
  46. {alita_sdk-0.3.528.dist-info → alita_sdk-0.3.554.dist-info}/top_level.txt +0 -0
@@ -1,7 +1,9 @@
1
1
  """
2
2
  Image generation tool for Alita SDK.
3
3
  """
4
+ import json
4
5
  import logging
6
+ import uuid
5
7
  from typing import Optional, Type, Any, List, Literal
6
8
  from langchain_core.tools import BaseTool, BaseToolkit
7
9
  from pydantic import BaseModel, Field, create_model, ConfigDict
@@ -76,7 +78,12 @@ class ImageGenerationTool(BaseTool):
76
78
  """Tool for generating images using the Alita client."""
77
79
 
78
80
  name: str = "generate_image"
79
- description: str = "Generate images from text prompts using AI models"
81
+ description: str = (
82
+ "Generate images from text prompts using AI models. "
83
+ "Returns a JSON object with 'cached_image_id' field containing a reference to the generated image data. "
84
+ "The cached_image_id can be used to save or process the image. "
85
+ "The actual image data is stored temporarily and can be retrieved using the cached_image_id reference."
86
+ )
80
87
  args_schema: Type[BaseModel] = ImageGenerationInput
81
88
  alita_client: Any = None
82
89
 
@@ -85,10 +92,10 @@ class ImageGenerationTool(BaseTool):
85
92
  self.alita_client = client
86
93
 
87
94
  def _run(self, prompt: str, n: int = 1, size: str = "auto",
88
- quality: str = "auto", style: Optional[str] = None) -> list:
95
+ quality: str = "auto", style: Optional[str] = None) -> str:
89
96
  """Generate an image based on the provided parameters."""
90
97
  try:
91
- logger.info(f"Generating image with prompt: {prompt[:50]}...")
98
+ logger.debug(f"Generating image with prompt: {prompt[:50]}...")
92
99
 
93
100
  result = self.alita_client.generate_image(
94
101
  prompt=prompt,
@@ -98,57 +105,56 @@ class ImageGenerationTool(BaseTool):
98
105
  style=style
99
106
  )
100
107
 
101
- # Return multimodal content format for LLM consumption
108
+ # Return simple JSON structure with reference ID instead of full base64
102
109
  if 'data' in result:
103
110
  images = result['data']
104
- content_chunks = []
105
111
 
106
- # Add a text description of what was generated
107
- if len(images) == 1:
108
- content_chunks.append({
109
- "type": "text",
110
- "text": f"Generated image for prompt: '{prompt}'"
111
- })
112
- else:
113
- content_chunks.append({
114
- "type": "text",
115
- "text": f"Generated {len(images)} images for "
116
- f"prompt: '{prompt}'"
112
+ # Process all images with unified structure
113
+ images_list = []
114
+ for idx, image_data in enumerate(images, 1):
115
+ if not image_data.get('b64_json'):
116
+ continue
117
+
118
+ cached_image_id = f"img_{uuid.uuid4().hex[:12]}"
119
+
120
+ # Store in cache
121
+ if hasattr(self.alita_client, '_generated_images_cache'):
122
+ self.alita_client._generated_images_cache[cached_image_id] = {
123
+ 'base64_data': image_data['b64_json']
124
+ }
125
+ logger.debug(f"Stored generated image in cache with ID: {cached_image_id}")
126
+
127
+ images_list.append({
128
+ "image_number": idx,
129
+ "image_type": "png",
130
+ "cached_image_id": cached_image_id
117
131
  })
118
132
 
119
- # Add image content for each generated image
120
- for image_data in images:
121
- if image_data.get('url'):
122
- content_chunks.append({
123
- "type": "image_url",
124
- "image_url": {
125
- "url": image_data['url']
126
- }
127
- })
128
- elif image_data.get('b64_json'):
129
- content_chunks.append({
130
- "type": "image_url",
131
- "image_url": {
132
- "url": f"data:image/png;base64,"
133
- f"{image_data['b64_json']}"
134
- }
135
- })
133
+ if not images_list:
134
+ return json.dumps({
135
+ "status": "error",
136
+ "message": "No base64 image data found"
137
+ })
136
138
 
137
- return content_chunks
139
+ return json.dumps({
140
+ "status": "success",
141
+ "prompt": prompt,
142
+ "total_images": len(images_list),
143
+ "images": images_list
144
+ })
138
145
 
139
- # Fallback to text response if no images in result
140
- return [{
141
- "type": "text",
142
- "text": f"Image generation completed but no images "
143
- f"returned: {result}"
144
- }]
146
+ # Fallback to error response if no images in result
147
+ return json.dumps({
148
+ "status": "error",
149
+ "message": f"Image generation completed but no images returned: {result}"
150
+ })
145
151
 
146
152
  except Exception as e:
147
153
  logger.error(f"Error generating image: {e}")
148
- return [{
149
- "type": "text",
150
- "text": f"Error generating image: {str(e)}"
151
- }]
154
+ return json.dumps({
155
+ "status": "error",
156
+ "message": f"Error generating image: {str(e)}"
157
+ })
152
158
 
153
159
  async def _arun(self, prompt: str, n: int = 1, size: str = "256x256",
154
160
  quality: str = "auto",
@@ -6,6 +6,7 @@ from typing import Any, Optional, List, Union, Literal
6
6
  from langchain_core.messages import HumanMessage, SystemMessage, AIMessage
7
7
  from langchain_core.runnables import RunnableConfig
8
8
  from langchain_core.tools import BaseTool, ToolException
9
+ from langchain_core.callbacks import dispatch_custom_event
9
10
  from pydantic import Field
10
11
 
11
12
  from ..langchain.constants import ELITEA_RS
@@ -42,6 +43,17 @@ logger = logging.getLogger(__name__)
42
43
 
43
44
  # return supports_reasoning
44
45
 
46
+ JSON_INSTRUCTION_TEMPLATE = (
47
+ "\n\n**IMPORTANT: You MUST respond with ONLY a valid JSON object.**\n\n"
48
+ "Required JSON fields:\n{field_descriptions}\n\n"
49
+ "Example format:\n"
50
+ "{{\n{example_fields}\n}}\n\n"
51
+ "Rules:\n"
52
+ "1. Output ONLY the JSON object - no markdown, no explanations, no extra text\n"
53
+ "2. Ensure all required fields are present\n"
54
+ "3. Use proper JSON syntax with double quotes for strings\n"
55
+ "4. Do not wrap the JSON in code blocks or backticks"
56
+ )
45
57
 
46
58
  class LLMNode(BaseTool):
47
59
  """Enhanced LLM node with chat history and tool binding support"""
@@ -65,6 +77,221 @@ class LLMNode(BaseTool):
65
77
  steps_limit: Optional[int] = Field(default=25, description='Maximum steps for tool execution')
66
78
  tool_execution_timeout: Optional[int] = Field(default=900, description='Timeout (seconds) for tool execution. Default is 15 minutes.')
67
79
 
80
+ def _prepare_structured_output_params(self) -> dict:
81
+ """
82
+ Prepare structured output parameters from structured_output_dict.
83
+
84
+ Returns:
85
+ Dictionary with parameter definitions for creating Pydantic model
86
+ """
87
+ struct_params = {
88
+ key: {
89
+ "type": 'list[str]' if 'list' in value else value,
90
+ "description": ""
91
+ }
92
+ for key, value in (self.structured_output_dict or {}).items()
93
+ }
94
+ # Add default output field for proper response to user
95
+ struct_params[ELITEA_RS] = {
96
+ 'description': 'final output to user (summarized output from LLM)',
97
+ 'type': 'str',
98
+ "default": None
99
+ }
100
+ return struct_params
101
+
102
+ def _invoke_with_structured_output(self, llm_client: Any, messages: List, struct_model: Any, config: RunnableConfig):
103
+ """
104
+ Invoke LLM with structured output, handling tool calls if present.
105
+
106
+ Args:
107
+ llm_client: LLM client instance
108
+ messages: List of conversation messages
109
+ struct_model: Pydantic model for structured output
110
+ config: Runnable configuration
111
+
112
+ Returns:
113
+ Tuple of (completion, initial_completion, final_messages)
114
+ """
115
+ initial_completion = llm_client.invoke(messages, config=config)
116
+
117
+ if hasattr(initial_completion, 'tool_calls') and initial_completion.tool_calls:
118
+ # Handle tool calls first, then apply structured output
119
+ new_messages, _ = self._run_async_in_sync_context(
120
+ self.__perform_tool_calling(initial_completion, messages, llm_client, config)
121
+ )
122
+ llm = self.__get_struct_output_model(llm_client, struct_model)
123
+ completion = llm.invoke(new_messages, config=config)
124
+ return completion, initial_completion, new_messages
125
+ else:
126
+ # Direct structured output without tool calls
127
+ llm = self.__get_struct_output_model(llm_client, struct_model)
128
+ completion = llm.invoke(messages, config=config)
129
+ return completion, initial_completion, messages
130
+
131
+ def _build_json_instruction(self, struct_model: Any) -> str:
132
+ """
133
+ Build JSON instruction message for fallback handling.
134
+
135
+ Args:
136
+ struct_model: Pydantic model with field definitions
137
+
138
+ Returns:
139
+ Formatted JSON instruction string
140
+ """
141
+ field_descriptions = []
142
+ for name, field in struct_model.model_fields.items():
143
+ field_type = field.annotation.__name__ if hasattr(field.annotation, '__name__') else str(field.annotation)
144
+ field_desc = field.description or field_type
145
+ field_descriptions.append(f" - {name} ({field_type}): {field_desc}")
146
+
147
+ example_fields = ",\n".join([
148
+ f' "{k}": <{field.annotation.__name__ if hasattr(field.annotation, "__name__") else "value"}>'
149
+ for k, field in struct_model.model_fields.items()
150
+ ])
151
+
152
+ return JSON_INSTRUCTION_TEMPLATE.format(
153
+ field_descriptions="\n".join(field_descriptions),
154
+ example_fields=example_fields
155
+ )
156
+
157
+ def _create_fallback_completion(self, content: str, struct_model: Any) -> Any:
158
+ """
159
+ Create a fallback completion object when JSON parsing fails.
160
+
161
+ Args:
162
+ content: Plain text content from LLM
163
+ struct_model: Pydantic model to construct
164
+
165
+ Returns:
166
+ Pydantic model instance with fallback values
167
+ """
168
+ result_dict = {}
169
+ for k, field in struct_model.model_fields.items():
170
+ if k == ELITEA_RS:
171
+ result_dict[k] = content
172
+ elif field.is_required():
173
+ # Set default values for required fields based on type
174
+ result_dict[k] = field.default if field.default is not None else None
175
+ else:
176
+ result_dict[k] = field.default
177
+ return struct_model.model_construct(**result_dict)
178
+
179
+ def _handle_structured_output_fallback(self, llm_client: Any, messages: List, struct_model: Any,
180
+ config: RunnableConfig, original_error: Exception) -> Any:
181
+ """
182
+ Handle structured output fallback through multiple strategies.
183
+
184
+ Tries fallback methods in order:
185
+ 1. json_mode with explicit instructions
186
+ 2. function_calling method
187
+ 3. Plain text with JSON extraction
188
+
189
+ Args:
190
+ llm_client: LLM client instance
191
+ messages: Original conversation messages
192
+ struct_model: Pydantic model for structured output
193
+ config: Runnable configuration
194
+ original_error: The original ValueError that triggered fallback
195
+
196
+ Returns:
197
+ Completion with structured output (best effort)
198
+
199
+ Raises:
200
+ Propagates exceptions from LLM invocation
201
+ """
202
+ logger.error(f"Error invoking structured output model: {format_exc()}")
203
+ logger.info("Attempting to fall back to json mode")
204
+
205
+ # Build JSON instruction once
206
+ json_instruction = self._build_json_instruction(struct_model)
207
+
208
+ # Add instruction to messages
209
+ modified_messages = messages.copy()
210
+ if modified_messages and isinstance(modified_messages[-1], HumanMessage):
211
+ modified_messages[-1] = HumanMessage(
212
+ content=modified_messages[-1].content + json_instruction
213
+ )
214
+ else:
215
+ modified_messages.append(HumanMessage(content=json_instruction))
216
+
217
+ # Try json_mode with explicit instructions
218
+ try:
219
+ completion = self.__get_struct_output_model(
220
+ llm_client, struct_model, method="json_mode"
221
+ ).invoke(modified_messages, config=config)
222
+ return completion
223
+ except Exception as json_mode_error:
224
+ logger.warning(f"json_mode also failed: {json_mode_error}")
225
+ logger.info("Falling back to function_calling method")
226
+
227
+ # Try function_calling as a third fallback
228
+ try:
229
+ completion = self.__get_struct_output_model(
230
+ llm_client, struct_model, method="function_calling"
231
+ ).invoke(modified_messages, config=config)
232
+ return completion
233
+ except Exception as function_calling_error:
234
+ logger.error(f"function_calling also failed: {function_calling_error}")
235
+ logger.info("Final fallback: using plain LLM response")
236
+
237
+ # Last resort: get plain text response and wrap in structure
238
+ plain_completion = llm_client.invoke(modified_messages, config=config)
239
+ content = plain_completion.content.strip() if hasattr(plain_completion, 'content') else str(plain_completion)
240
+
241
+ # Try to extract JSON from the response
242
+ import json
243
+ import re
244
+
245
+ json_match = re.search(r'\{.*\}', content, re.DOTALL)
246
+ if json_match:
247
+ try:
248
+ parsed_json = json.loads(json_match.group(0))
249
+ # Validate it has expected fields and wrap in pydantic model
250
+ completion = struct_model(**parsed_json)
251
+ return completion
252
+ except (json.JSONDecodeError, Exception) as parse_error:
253
+ logger.warning(f"Could not parse extracted JSON: {parse_error}")
254
+ return self._create_fallback_completion(content, struct_model)
255
+ else:
256
+ # No JSON found, create response with content in elitea_response
257
+ return self._create_fallback_completion(content, struct_model)
258
+
259
+ def _format_structured_output_result(self, result: dict, messages: List, initial_completion: Any) -> dict:
260
+ """
261
+ Format structured output result with properly formatted messages.
262
+
263
+ Args:
264
+ result: Result dictionary from model_dump()
265
+ messages: Original conversation messages
266
+ initial_completion: Initial completion before tool calls
267
+
268
+ Returns:
269
+ Formatted result dictionary with messages
270
+ """
271
+ # Ensure messages are properly formatted
272
+ if result.get('messages') and isinstance(result['messages'], list):
273
+ result['messages'] = [{'role': 'assistant', 'content': '\n'.join(result['messages'])}]
274
+ else:
275
+ # Extract content from initial_completion, handling thinking blocks
276
+ fallback_content = result.get(ELITEA_RS, '')
277
+ if not fallback_content and initial_completion:
278
+ content_parts = self._extract_content_from_completion(initial_completion)
279
+ fallback_content = content_parts.get('text') or ''
280
+ thinking = content_parts.get('thinking')
281
+
282
+ # Log thinking if present
283
+ if thinking:
284
+ logger.debug(f"Thinking content present in structured output: {thinking[:100]}...")
285
+
286
+ if not fallback_content:
287
+ # Final fallback to raw content
288
+ content = initial_completion.content
289
+ fallback_content = content if isinstance(content, str) else str(content)
290
+
291
+ result['messages'] = messages + [AIMessage(content=fallback_content)]
292
+
293
+ return result
294
+
68
295
  def get_filtered_tools(self) -> List[BaseTool]:
69
296
  """
70
297
  Filter available tools based on tool_names list.
@@ -162,8 +389,6 @@ class LLMNode(BaseTool):
162
389
  if func_args.get('system') is None or func_args.get('task') is None:
163
390
  raise ToolException(f"LLMNode requires 'system' and 'task' parameters in input mapping. "
164
391
  f"Actual params: {func_args}")
165
- raise ToolException(f"LLMNode requires 'system' and 'task' parameters in input mapping. "
166
- f"Actual params: {func_args}")
167
392
  # cast to str in case user passes variable different from str
168
393
  messages = [SystemMessage(content=str(func_args.get('system'))), *func_args.get('chat_history', []), HumanMessage(content=str(func_args.get('task')))]
169
394
  # Remove pre-last item if last two messages are same type and content
@@ -195,44 +420,23 @@ class LLMNode(BaseTool):
195
420
  try:
196
421
  if self.structured_output and self.output_variables:
197
422
  # Handle structured output
198
- struct_params = {
199
- key: {
200
- "type": 'list[str]' if 'list' in value else value,
201
- "description": ""
202
- }
203
- for key, value in (self.structured_output_dict or {}).items()
204
- }
205
- # Add default output field for proper response to user
206
- struct_params['elitea_response'] = {
207
- 'description': 'final output to user (summarized output from LLM)', 'type': 'str',
208
- "default": None}
423
+ struct_params = self._prepare_structured_output_params()
209
424
  struct_model = create_pydantic_model(f"LLMOutput", struct_params)
210
- initial_completion = llm_client.invoke(messages, config=config)
211
- if hasattr(initial_completion, 'tool_calls') and initial_completion.tool_calls:
212
- new_messages, _ = self._run_async_in_sync_context(
213
- self.__perform_tool_calling(initial_completion, messages, llm_client, config)
425
+
426
+ try:
427
+ completion, initial_completion, final_messages = self._invoke_with_structured_output(
428
+ llm_client, messages, struct_model, config
214
429
  )
215
- llm = self.__get_struct_output_model(llm_client, struct_model)
216
- completion = llm.invoke(new_messages, config=config)
217
- result = completion.model_dump()
218
- else:
219
- try:
220
- llm = self.__get_struct_output_model(llm_client, struct_model)
221
- completion = llm.invoke(messages, config=config)
222
- except ValueError as e:
223
- logger.error(f"Error invoking structured output model: {format_exc()}")
224
- logger.info("Attemping to fall back to json mode")
225
- # Fallback to regular LLM with JSON extraction
226
- completion = self.__get_struct_output_model(llm_client, struct_model,
227
- method="json_mode").invoke(messages, config=config)
228
- result = completion.model_dump()
229
-
230
- # Ensure messages are properly formatted
231
- if result.get('messages') and isinstance(result['messages'], list):
232
- result['messages'] = [{'role': 'assistant', 'content': '\n'.join(result['messages'])}]
233
- else:
234
- result['messages'] = messages + [
235
- AIMessage(content=result.get(ELITEA_RS, '') or initial_completion.content)]
430
+ except ValueError as e:
431
+ # Handle fallback for structured output failures
432
+ completion = self._handle_structured_output_fallback(
433
+ llm_client, messages, struct_model, config, e
434
+ )
435
+ initial_completion = None
436
+ final_messages = messages
437
+
438
+ result = completion.model_dump()
439
+ result = self._format_structured_output_result(result, final_messages, initial_completion or completion)
236
440
 
237
441
  return result
238
442
  else:
@@ -250,24 +454,89 @@ class LLMNode(BaseTool):
250
454
  if self.output_variables:
251
455
  if self.output_variables[0] == 'messages':
252
456
  return output_msgs
253
- output_msgs[self.output_variables[0]] = current_completion.content if current_completion else None
457
+ # Extract content properly from thinking-enabled responses
458
+ if current_completion:
459
+ content_parts = self._extract_content_from_completion(current_completion)
460
+ text_content = content_parts.get('text')
461
+ thinking = content_parts.get('thinking')
462
+
463
+ # Dispatch thinking event if present
464
+ if thinking:
465
+ try:
466
+ model_name = getattr(llm_client, 'model_name', None) or getattr(llm_client, 'model', 'LLM')
467
+ dispatch_custom_event(
468
+ name="thinking_step",
469
+ data={
470
+ "message": thinking,
471
+ "tool_name": f"LLM ({model_name})",
472
+ "toolkit": "reasoning",
473
+ },
474
+ config=config,
475
+ )
476
+ except Exception as e:
477
+ logger.warning(f"Failed to dispatch thinking event: {e}")
478
+
479
+ if text_content:
480
+ output_msgs[self.output_variables[0]] = text_content
481
+ else:
482
+ # Fallback to raw content
483
+ content = current_completion.content
484
+ output_msgs[self.output_variables[0]] = content if isinstance(content, str) else str(content)
485
+ else:
486
+ output_msgs[self.output_variables[0]] = None
254
487
 
255
488
  return output_msgs
256
489
  else:
257
- # Regular text response
258
- content = completion.content.strip() if hasattr(completion, 'content') else str(completion)
490
+ # Regular text response - handle both simple strings and thinking-enabled responses
491
+ content_parts = self._extract_content_from_completion(completion)
492
+ thinking = content_parts.get('thinking')
493
+ text_content = content_parts.get('text') or ''
494
+
495
+ # Fallback to string representation if no content extracted
496
+ if not text_content:
497
+ if hasattr(completion, 'content'):
498
+ content = completion.content
499
+ text_content = content.strip() if isinstance(content, str) else str(content)
500
+ else:
501
+ text_content = str(completion)
502
+
503
+ # Dispatch thinking step event to chat if present
504
+ if thinking:
505
+ logger.info(f"Model thinking: {thinking[:200]}..." if len(thinking) > 200 else f"Model thinking: {thinking}")
506
+
507
+ # Dispatch custom event for thinking step to be displayed in chat
508
+ try:
509
+ model_name = getattr(llm_client, 'model_name', None) or getattr(llm_client, 'model', 'LLM')
510
+ dispatch_custom_event(
511
+ name="thinking_step",
512
+ data={
513
+ "message": thinking,
514
+ "tool_name": f"LLM ({model_name})",
515
+ "toolkit": "reasoning",
516
+ },
517
+ config=config,
518
+ )
519
+ except Exception as e:
520
+ logger.warning(f"Failed to dispatch thinking event: {e}")
521
+
522
+ # Build the AI message with both thinking and text
523
+ # Store thinking in additional_kwargs for potential future use
524
+ ai_message_kwargs = {'content': text_content}
525
+ if thinking:
526
+ ai_message_kwargs['additional_kwargs'] = {'thinking': thinking}
527
+ ai_message = AIMessage(**ai_message_kwargs)
259
528
 
260
529
  # Try to extract JSON if output variables are specified (but exclude 'messages' which is handled separately)
261
530
  json_output_vars = [var for var in (self.output_variables or []) if var != 'messages']
262
531
  if json_output_vars:
263
532
  # set response to be the first output variable for non-structured output
264
- response_data = {json_output_vars[0]: content}
265
- new_messages = messages + [AIMessage(content=content)]
533
+ response_data = {json_output_vars[0]: text_content}
534
+ new_messages = messages + [ai_message]
266
535
  response_data['messages'] = new_messages
267
536
  return response_data
268
537
 
269
538
  # Simple text response (either no output variables or JSON parsing failed)
270
- new_messages = messages + [AIMessage(content=content)]
539
+ new_messages = messages + [ai_message]
271
540
  return {"messages": new_messages}
272
541
 
273
542
  except Exception as e:
@@ -285,6 +554,56 @@ class LLMNode(BaseTool):
285
554
  # Legacy support for old interface
286
555
  return self.invoke(kwargs, **kwargs)
287
556
 
557
+ @staticmethod
558
+ def _extract_content_from_completion(completion) -> dict:
559
+ """Extract thinking and text content from LLM completion.
560
+
561
+ Handles Anthropic's extended thinking format where content is a list
562
+ of blocks with types: 'thinking' and 'text'.
563
+
564
+ Args:
565
+ completion: LLM completion object with content attribute
566
+
567
+ Returns:
568
+ dict with 'thinking' and 'text' keys
569
+ """
570
+ result = {'thinking': None, 'text': None}
571
+
572
+ if not hasattr(completion, 'content'):
573
+ return result
574
+
575
+ content = completion.content
576
+
577
+ # Handle list of content blocks (Anthropic extended thinking format)
578
+ if isinstance(content, list):
579
+ thinking_blocks = []
580
+ text_blocks = []
581
+
582
+ for block in content:
583
+ if isinstance(block, dict):
584
+ block_type = block.get('type', '')
585
+ if block_type == 'thinking':
586
+ thinking_blocks.append(block.get('thinking', ''))
587
+ elif block_type == 'text':
588
+ text_blocks.append(block.get('text', ''))
589
+ elif hasattr(block, 'type'):
590
+ # Handle object format
591
+ if block.type == 'thinking':
592
+ thinking_blocks.append(getattr(block, 'thinking', ''))
593
+ elif block.type == 'text':
594
+ text_blocks.append(getattr(block, 'text', ''))
595
+
596
+ if thinking_blocks:
597
+ result['thinking'] = '\n\n'.join(thinking_blocks)
598
+ if text_blocks:
599
+ result['text'] = '\n\n'.join(text_blocks)
600
+
601
+ # Handle simple string content
602
+ elif isinstance(content, str):
603
+ result['text'] = content
604
+
605
+ return result
606
+
288
607
  def _run_async_in_sync_context(self, coro):
289
608
  """Run async coroutine from sync context.
290
609
 
@@ -102,7 +102,9 @@ Input Data:
102
102
  logger.debug(f"LoopNode input: {predict_input}")
103
103
  completion = self.client.invoke(predict_input, config=config)
104
104
  logger.debug(f"LoopNode pure output: {completion}")
105
- loop_data = _old_extract_json(completion.content.strip())
105
+ from ..langchain.utils import extract_text_from_completion
106
+ content_text = extract_text_from_completion(completion)
107
+ loop_data = _old_extract_json(content_text.strip())
106
108
  logger.debug(f"LoopNode output: {loop_data}")
107
109
  if self.return_type == "str":
108
110
  accumulated_response = ''
@@ -93,7 +93,9 @@ Answer must be JSON only extractable by JSON.LOADS."""
93
93
  else:
94
94
  input_[-1].content += self.unstructured_output
95
95
  completion = self.client.invoke(input_, config=config)
96
- result = _extract_json(completion.content.strip())
96
+ from ..langchain.utils import extract_text_from_completion
97
+ content_text = extract_text_from_completion(completion)
98
+ result = _extract_json(content_text.strip())
97
99
  try:
98
100
  tool_result: dict | List[dict] = self.tool.invoke(result, config=config, kwargs=kwargs)
99
101
  dispatch_custom_event(