iatoolkit 0.71.4__py3-none-any.whl → 1.4.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (114) hide show
  1. iatoolkit/__init__.py +19 -7
  2. iatoolkit/base_company.py +1 -71
  3. iatoolkit/cli_commands.py +9 -21
  4. iatoolkit/common/exceptions.py +2 -0
  5. iatoolkit/common/interfaces/__init__.py +0 -0
  6. iatoolkit/common/interfaces/asset_storage.py +34 -0
  7. iatoolkit/common/interfaces/database_provider.py +38 -0
  8. iatoolkit/common/model_registry.py +159 -0
  9. iatoolkit/common/routes.py +53 -32
  10. iatoolkit/common/util.py +17 -12
  11. iatoolkit/company_registry.py +55 -14
  12. iatoolkit/{iatoolkit.py → core.py} +102 -72
  13. iatoolkit/infra/{mail_app.py → brevo_mail_app.py} +15 -37
  14. iatoolkit/infra/llm_providers/__init__.py +0 -0
  15. iatoolkit/infra/llm_providers/deepseek_adapter.py +278 -0
  16. iatoolkit/infra/{gemini_adapter.py → llm_providers/gemini_adapter.py} +11 -17
  17. iatoolkit/infra/{openai_adapter.py → llm_providers/openai_adapter.py} +41 -7
  18. iatoolkit/infra/llm_proxy.py +235 -134
  19. iatoolkit/infra/llm_response.py +5 -0
  20. iatoolkit/locales/en.yaml +134 -4
  21. iatoolkit/locales/es.yaml +293 -162
  22. iatoolkit/repositories/database_manager.py +92 -22
  23. iatoolkit/repositories/document_repo.py +7 -0
  24. iatoolkit/repositories/filesystem_asset_repository.py +36 -0
  25. iatoolkit/repositories/llm_query_repo.py +36 -22
  26. iatoolkit/repositories/models.py +86 -95
  27. iatoolkit/repositories/profile_repo.py +64 -13
  28. iatoolkit/repositories/vs_repo.py +31 -28
  29. iatoolkit/services/auth_service.py +1 -1
  30. iatoolkit/services/branding_service.py +1 -1
  31. iatoolkit/services/company_context_service.py +96 -39
  32. iatoolkit/services/configuration_service.py +329 -67
  33. iatoolkit/services/dispatcher_service.py +51 -227
  34. iatoolkit/services/document_service.py +10 -1
  35. iatoolkit/services/embedding_service.py +9 -6
  36. iatoolkit/services/excel_service.py +50 -2
  37. iatoolkit/services/file_processor_service.py +0 -5
  38. iatoolkit/services/history_manager_service.py +208 -0
  39. iatoolkit/services/jwt_service.py +1 -1
  40. iatoolkit/services/knowledge_base_service.py +412 -0
  41. iatoolkit/services/language_service.py +8 -2
  42. iatoolkit/services/license_service.py +82 -0
  43. iatoolkit/{infra/llm_client.py → services/llm_client_service.py} +42 -29
  44. iatoolkit/services/load_documents_service.py +18 -47
  45. iatoolkit/services/mail_service.py +171 -25
  46. iatoolkit/services/profile_service.py +69 -36
  47. iatoolkit/services/{prompt_manager_service.py → prompt_service.py} +136 -25
  48. iatoolkit/services/query_service.py +229 -203
  49. iatoolkit/services/sql_service.py +116 -34
  50. iatoolkit/services/tool_service.py +246 -0
  51. iatoolkit/services/user_feedback_service.py +18 -6
  52. iatoolkit/services/user_session_context_service.py +121 -51
  53. iatoolkit/static/images/iatoolkit_core.png +0 -0
  54. iatoolkit/static/images/iatoolkit_logo.png +0 -0
  55. iatoolkit/static/js/chat_feedback_button.js +1 -1
  56. iatoolkit/static/js/chat_help_content.js +4 -4
  57. iatoolkit/static/js/chat_main.js +61 -9
  58. iatoolkit/static/js/chat_model_selector.js +227 -0
  59. iatoolkit/static/js/chat_onboarding_button.js +1 -1
  60. iatoolkit/static/js/chat_reload_button.js +4 -1
  61. iatoolkit/static/styles/chat_iatoolkit.css +59 -3
  62. iatoolkit/static/styles/chat_public.css +28 -0
  63. iatoolkit/static/styles/documents.css +598 -0
  64. iatoolkit/static/styles/landing_page.css +223 -7
  65. iatoolkit/static/styles/llm_output.css +34 -1
  66. iatoolkit/system_prompts/__init__.py +0 -0
  67. iatoolkit/system_prompts/query_main.prompt +28 -3
  68. iatoolkit/system_prompts/sql_rules.prompt +47 -12
  69. iatoolkit/templates/_company_header.html +30 -5
  70. iatoolkit/templates/_login_widget.html +3 -3
  71. iatoolkit/templates/base.html +13 -0
  72. iatoolkit/templates/chat.html +45 -3
  73. iatoolkit/templates/forgot_password.html +3 -2
  74. iatoolkit/templates/onboarding_shell.html +1 -2
  75. iatoolkit/templates/signup.html +3 -0
  76. iatoolkit/views/base_login_view.py +8 -3
  77. iatoolkit/views/change_password_view.py +1 -1
  78. iatoolkit/views/chat_view.py +76 -0
  79. iatoolkit/views/forgot_password_view.py +9 -4
  80. iatoolkit/views/history_api_view.py +3 -3
  81. iatoolkit/views/home_view.py +4 -2
  82. iatoolkit/views/init_context_api_view.py +1 -1
  83. iatoolkit/views/llmquery_api_view.py +4 -3
  84. iatoolkit/views/load_company_configuration_api_view.py +49 -0
  85. iatoolkit/views/{file_store_api_view.py → load_document_api_view.py} +15 -11
  86. iatoolkit/views/login_view.py +25 -8
  87. iatoolkit/views/logout_api_view.py +10 -2
  88. iatoolkit/views/prompt_api_view.py +1 -1
  89. iatoolkit/views/rag_api_view.py +216 -0
  90. iatoolkit/views/root_redirect_view.py +22 -0
  91. iatoolkit/views/signup_view.py +12 -4
  92. iatoolkit/views/static_page_view.py +27 -0
  93. iatoolkit/views/users_api_view.py +33 -0
  94. iatoolkit/views/verify_user_view.py +1 -1
  95. iatoolkit-1.4.2.dist-info/METADATA +268 -0
  96. iatoolkit-1.4.2.dist-info/RECORD +133 -0
  97. iatoolkit-1.4.2.dist-info/licenses/LICENSE_COMMUNITY.md +15 -0
  98. iatoolkit/repositories/tasks_repo.py +0 -52
  99. iatoolkit/services/history_service.py +0 -37
  100. iatoolkit/services/search_service.py +0 -55
  101. iatoolkit/services/tasks_service.py +0 -188
  102. iatoolkit/templates/about.html +0 -13
  103. iatoolkit/templates/index.html +0 -145
  104. iatoolkit/templates/login_simulation.html +0 -45
  105. iatoolkit/views/external_login_view.py +0 -73
  106. iatoolkit/views/index_view.py +0 -14
  107. iatoolkit/views/login_simulation_view.py +0 -93
  108. iatoolkit/views/tasks_api_view.py +0 -72
  109. iatoolkit/views/tasks_review_api_view.py +0 -55
  110. iatoolkit-0.71.4.dist-info/METADATA +0 -276
  111. iatoolkit-0.71.4.dist-info/RECORD +0 -122
  112. {iatoolkit-0.71.4.dist-info → iatoolkit-1.4.2.dist-info}/WHEEL +0 -0
  113. {iatoolkit-0.71.4.dist-info → iatoolkit-1.4.2.dist-info}/licenses/LICENSE +0 -0
  114. {iatoolkit-0.71.4.dist-info → iatoolkit-1.4.2.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,278 @@
1
+ # deepseek_adapter.py
2
+ # Copyright (c) 2024 Fernando Libedinsky
3
+ # Product: IAToolkit
4
+ #
5
+ # IAToolkit is open source software.
6
+
7
+ import logging
8
+ from typing import Dict, List, Optional, Any
9
+
10
+ from iatoolkit.infra.llm_response import LLMResponse, ToolCall, Usage
11
+ from iatoolkit.common.exceptions import IAToolkitException
12
+ import json
13
+
14
+ class DeepseekAdapter:
15
+ """
16
+ Adapter for DeepSeek using the OpenAI-compatible Chat Completions API.
17
+ It translates IAToolkit's common request/response format into
18
+ DeepSeek chat.completions calls.
19
+ """
20
+
21
+ def __init__(self, deepseek_client):
22
+ # deepseek_client is an OpenAI client configured with base_url="https://api.deepseek.com"
23
+ self.client = deepseek_client
24
+
25
+ # ------------------------------------------------------------------
26
+ # Public entry point
27
+ # ------------------------------------------------------------------
28
+
29
+ def create_response(self, model: str, input: List[Dict], **kwargs) -> LLMResponse:
30
+ """
31
+ Entry point called by LLMProxy.
32
+
33
+ :param model: DeepSeek model name (e.g. "deepseek-chat").
34
+ :param input: Common IAToolkit input list. It may contain:
35
+ - normal messages: {"role": "...", "content": "..."}
36
+ - function outputs: {"type": "function_call_output",
37
+ "call_id": "...", "output": "..."}
38
+ :param kwargs: extra options (tools, tool_choice, context_history, etc.).
39
+ """
40
+ tools = kwargs.get("tools") or []
41
+ tool_choice = kwargs.get("tool_choice", "auto")
42
+ context_history = kwargs.get("context_history") or []
43
+
44
+ try:
45
+ # 1) Build messages from history (if any)
46
+ messages: List[Dict[str, Any]] = []
47
+ if context_history:
48
+ history_messages = self._build_messages_from_input(context_history)
49
+ messages.extend(history_messages)
50
+
51
+ # 2) Append current turn messages
52
+ current_messages = self._build_messages_from_input(input)
53
+ messages.extend(current_messages)
54
+
55
+ # Detect if this input already contains function_call_output items.
56
+ # That means we are in the "second phase" after executing tools.
57
+ has_function_outputs = any(
58
+ item.get("type") == "function_call_output" for item in input
59
+ )
60
+
61
+ # 3) Build the tools payload
62
+ tools_payload = self._build_tools_payload(tools)
63
+
64
+ # If we already have function_call_output messages and the caller did not force
65
+ # a specific tool_choice (e.g. "required" for SQL retry), we disable tools and
66
+ # tool_choice to avoid infinite tool-calling loops (especially with iat_sql_query).
67
+ if has_function_outputs and tool_choice == "auto":
68
+ logging.debug(
69
+ "[DeepseekAdapter] Detected function_call_output in input; "
70
+ "disabling tools and tool_choice to avoid tool loop."
71
+ )
72
+ tools_payload = None
73
+ tool_choice = None
74
+
75
+ logging.debug(f"[DeepseekAdapter] messages={messages}")
76
+ logging.debug(f"[DeepseekAdapter] tools={tools_payload}, tool_choice={tool_choice}")
77
+
78
+ # Build kwargs for API call, skipping empty parameters
79
+ call_kwargs: Dict[str, Any] = {
80
+ "model": model,
81
+ "messages": messages,
82
+ }
83
+ if tools_payload:
84
+ call_kwargs["tools"] = tools_payload
85
+ if tool_choice:
86
+ call_kwargs["tool_choice"] = tool_choice
87
+
88
+ logging.debug(f"[DeepseekAdapter] Calling DeepSeek chat.completions API...: {json.dumps(messages, indent=2)}")
89
+ response = self.client.chat.completions.create(**call_kwargs)
90
+
91
+ return self._map_deepseek_chat_response(response)
92
+
93
+ except IAToolkitException:
94
+ # Re-raise IAToolkit exceptions as is
95
+ raise
96
+ except Exception as ex:
97
+ logging.exception("Unexpected error calling DeepSeek")
98
+ raise IAToolkitException(
99
+ IAToolkitException.ErrorType.LLM_ERROR,
100
+ f"DeepSeek error: {ex}"
101
+ ) from ex
102
+
103
+ # ------------------------------------------------------------------
104
+ # Helpers to build the request
105
+ # ------------------------------------------------------------------
106
+
107
+ def _build_messages_from_input(self, input_items: List[Dict]) -> List[Dict]:
108
+ """
109
+ Transform IAToolkit 'input' items into ChatCompletion 'messages'.
110
+
111
+ We handle:
112
+ - Standard messages with 'role' and 'content'.
113
+ - function_call_output items by converting them into assistant messages
114
+ containing the tool result, so the model can use them to answer.
115
+ """
116
+ messages: List[Dict[str, Any]] = []
117
+
118
+ for item in input_items:
119
+ # Tool call outputs are mapped to assistant messages with the tool result.
120
+ if item.get("type") == "function_call_output":
121
+ output = item.get("output", "")
122
+ if not output:
123
+ logging.warning(
124
+ "[DeepseekAdapter] function_call_output item without 'output': %s",
125
+ item
126
+ )
127
+ continue
128
+
129
+ messages.append(
130
+ {
131
+ "role": "user",
132
+ "content": f"Tool result:\n{output}",
133
+ }
134
+ )
135
+ continue
136
+
137
+ role = item.get("role")
138
+ content = item.get("content")
139
+
140
+ # Skip tool-role messages completely for DeepSeek
141
+ if role == "tool":
142
+ logging.warning(f"[DeepseekAdapter] Skipping tool-role message: {item}")
143
+ continue
144
+
145
+ if not role:
146
+ logging.warning(f"[DeepseekAdapter] Skipping message without role: {item}")
147
+ continue
148
+
149
+ messages.append({"role": role, "content": content})
150
+
151
+ return messages
152
+
153
+ def _build_tools_payload(self, tools: List[Dict]) -> Optional[List[Dict]]:
154
+ """
155
+ Transform IAToolkit tool definitions into DeepSeek/OpenAI chat tools format.
156
+
157
+ Expected internal tool format:
158
+ {
159
+ "type": "function",
160
+ "name": ...,
161
+ "description": ...,
162
+ "parameters": {...},
163
+ "strict": True/False
164
+ }
165
+ Or already in OpenAI tools format with "function" key.
166
+ """
167
+ if not tools:
168
+ return None
169
+
170
+ tools_payload: List[Dict[str, Any]] = []
171
+
172
+ for tool in tools:
173
+ # If it's already in OpenAI 'function' format, reuse it
174
+ if "function" in tool:
175
+ func_def = tool["function"]
176
+ else:
177
+ # Build function definition from flattened structure
178
+ func_def = {
179
+ "name": tool.get("name"),
180
+ "description": tool.get("description", ""),
181
+ "parameters": tool.get("parameters", {}) or {},
182
+ }
183
+
184
+ # Ensure parameters is a dict
185
+ if "parameters" in func_def and not isinstance(func_def["parameters"], dict):
186
+ logging.warning(
187
+ "Tool parameters must be a dict; got %s",
188
+ type(func_def["parameters"])
189
+ )
190
+ func_def["parameters"] = {}
191
+
192
+ ds_tool: Dict[str, Any] = {
193
+ "type": tool.get("type", "function"),
194
+ "function": func_def,
195
+ }
196
+
197
+ if tool.get("strict") is True:
198
+ ds_tool["strict"] = True
199
+
200
+ tools_payload.append(ds_tool)
201
+
202
+ return tools_payload or None
203
+
204
+ # ------------------------------------------------------------------
205
+ # Mapping DeepSeek response -> LLMResponse
206
+ # ------------------------------------------------------------------
207
+
208
+ def _map_deepseek_chat_response(self, response: Any) -> LLMResponse:
209
+ """
210
+ Map DeepSeek Chat Completion response to our common LLMResponse.
211
+ Handles both plain assistant messages and tool_calls.
212
+ """
213
+ # We only look at the first choice
214
+ if not response.choices:
215
+ raise IAToolkitException(
216
+ IAToolkitException.ErrorType.LLM_ERROR,
217
+ "DeepSeek response has no choices."
218
+ )
219
+
220
+ choice = response.choices[0]
221
+ message = choice.message
222
+
223
+ # Usage mapping
224
+ usage = Usage(
225
+ input_tokens=getattr(getattr(response, "usage", None), "prompt_tokens", 0) or 0,
226
+ output_tokens=getattr(getattr(response, "usage", None), "completion_tokens", 0) or 0,
227
+ total_tokens=getattr(getattr(response, "usage", None), "total_tokens", 0) or 0,
228
+ )
229
+
230
+ # Capture reasoning content (specific to deepseek-reasoner)
231
+ reasoning_content = getattr(message, "reasoning_content", "") or ""
232
+
233
+ # If the model produced tool calls, fills this list
234
+ tool_calls_out: List[ToolCall] = []
235
+
236
+ tool_calls = getattr(message, "tool_calls", None) or []
237
+ if not tool_calls:
238
+ # No tool calls: standard assistant message
239
+ output_text = getattr(message, "content", "") or ""
240
+ status = "completed"
241
+
242
+ else:
243
+ logging.debug(f"[DeepSeek] RAW tool_calls: {tool_calls}")
244
+
245
+ for tc in tool_calls:
246
+ func = getattr(tc, "function", None)
247
+ if not func:
248
+ continue
249
+
250
+ name = getattr(func, "name", "")
251
+ arguments = getattr(func, "arguments", "") or "{}"
252
+
253
+ # DeepSeek/OpenAI return arguments as JSON string
254
+ logging.debug(
255
+ f"[DeepSeek] ToolCall generated -> id={getattr(tc, 'id', '')} "
256
+ f"name={name} arguments_raw={arguments}"
257
+ )
258
+ tool_calls_out.append(
259
+ ToolCall(
260
+ call_id=getattr(tc, "id", ""),
261
+ type="function_call",
262
+ name=name,
263
+ arguments=arguments,
264
+ )
265
+ )
266
+
267
+ status = "tool_calls"
268
+ output_text = "" # caller will inspect tool_calls in .output
269
+
270
+ return LLMResponse(
271
+ id=getattr(response, "id", "deepseek-unknown"),
272
+ model=getattr(response, "model", "deepseek-unknown"),
273
+ status=status,
274
+ output_text=output_text,
275
+ output=tool_calls_out,
276
+ usage=usage,
277
+ reasoning_content=reasoning_content
278
+ )
@@ -13,13 +13,11 @@ import json
13
13
  import uuid
14
14
 
15
15
  class GeminiAdapter:
16
- """Adaptador para la API de Gemini"""
17
16
 
18
17
  def __init__(self, gemini_client):
19
- """Inicializar con cliente Gemini ya configurado"""
20
18
  self.client = gemini_client
21
19
 
22
- # Configuración de seguridad - permitir contenido que podría ser bloqueado por defecto
20
+ # security configuration - allow content that might be blocked by default
23
21
  self.safety_settings = {
24
22
  HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_NONE,
25
23
  HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_NONE,
@@ -37,38 +35,36 @@ class GeminiAdapter:
37
35
  reasoning: Optional[Dict] = None,
38
36
  tool_choice: str = "auto",
39
37
  ) -> LLMResponse:
40
- """Llamada a la API de Gemini y mapeo a estructura común"""
41
38
  try:
42
- # Inicializar el modelo de Gemini usando el cliente configurado
39
+ # init the model with the configured client
43
40
  gemini_model = self.client.GenerativeModel(
44
41
  model_name=self._map_model_name(model),
45
42
  safety_settings=self.safety_settings
46
43
  )
47
44
 
48
- # Preparar el contenido para Gemini
45
+ # prepare the content for gemini
49
46
  if context_history:
50
- # Concatenar el historial de conversación con el input actual
47
+ # concat the history with the current input
51
48
  contents = self._prepare_gemini_contents(context_history + input)
52
49
  else:
53
- # Usar solo el input actual si no hay historial
54
50
  contents = self._prepare_gemini_contents(input)
55
51
 
56
- # Preparar herramientas si están disponibles
52
+ # prepare tools
57
53
  gemini_tools = self._prepare_gemini_tools(tools) if tools else None
58
54
 
59
- # Configurar generación
55
+ # config generation
60
56
  generation_config = self._prepare_generation_config(text, tool_choice)
61
57
 
62
- # Llamar a Gemini
58
+ # call gemini
63
59
  if gemini_tools:
64
- # Con herramientas
60
+ # with tools
65
61
  response = gemini_model.generate_content(
66
62
  contents,
67
63
  tools=gemini_tools,
68
64
  generation_config=generation_config
69
65
  )
70
66
  else:
71
- # Sin herramientas
67
+ # without tools
72
68
  response = gemini_model.generate_content(
73
69
  contents,
74
70
  generation_config=generation_config
@@ -102,9 +98,7 @@ class GeminiAdapter:
102
98
 
103
99
  raise IAToolkitException(IAToolkitException.ErrorType.LLM_ERROR, error_message)
104
100
 
105
- # ... rest of the methods keep the same ...
106
101
  def _map_model_name(self, model: str) -> str:
107
- """Mapear nombre del modelo a formato de Gemini"""
108
102
  model_mapping = {
109
103
  "gemini-pro": "gemini-2.5-pro",
110
104
  "gemini": "gemini-2.5-pro",
@@ -115,7 +109,7 @@ class GeminiAdapter:
115
109
  return model_mapping.get(model.lower(), model)
116
110
 
117
111
  def _prepare_gemini_contents(self, input: List[Dict]) -> List[Dict]:
118
- """Convertir mensajes de formato OpenAI a formato Gemini"""
112
+ # convert input messages to Gemini format
119
113
  gemini_contents = []
120
114
 
121
115
  for message in input:
@@ -143,7 +137,7 @@ class GeminiAdapter:
143
137
  return gemini_contents
144
138
 
145
139
  def _prepare_gemini_tools(self, tools: List[Dict]) -> List[Dict]:
146
- """Convertir herramientas de formato OpenAI a formato Gemini"""
140
+ # convert tools to Gemini format
147
141
  if not tools:
148
142
  return None
149
143
 
@@ -7,7 +7,8 @@ import logging
7
7
  from typing import Dict, List, Optional
8
8
  from iatoolkit.infra.llm_response import LLMResponse, ToolCall, Usage
9
9
  from iatoolkit.common.exceptions import IAToolkitException
10
-
10
+ import html
11
+ from typing import List
11
12
 
12
13
  class OpenAIAdapter:
13
14
  """Adaptador para la API de OpenAI"""
@@ -53,10 +54,6 @@ class OpenAIAdapter:
53
54
  error_message = f"Error calling OpenAI API: {str(e)}"
54
55
  logging.error(error_message)
55
56
 
56
- # En caso de error de contexto
57
- if "context_length_exceeded" in str(e):
58
- error_message = 'Tu consulta supera el limite de contexto. Reinicia el contexto con el boton de la barra superior.'
59
-
60
57
  raise IAToolkitException(IAToolkitException.ErrorType.LLM_ERROR, error_message)
61
58
 
62
59
  def _map_openai_response(self, openai_response) -> LLMResponse:
@@ -80,11 +77,48 @@ class OpenAIAdapter:
80
77
  total_tokens=openai_response.usage.total_tokens if openai_response.usage else 0
81
78
  )
82
79
 
80
+ # Reasoning content extracted from Responses output items (type="reasoning")
81
+ reasoning_list = self._extract_reasoning_content(openai_response)
82
+ reasoning_str = "\n".join(reasoning_list)
83
+
83
84
  return LLMResponse(
84
85
  id=openai_response.id,
85
86
  model=openai_response.model,
86
87
  status=openai_response.status,
87
88
  output_text=getattr(openai_response, 'output_text', ''),
88
89
  output=tool_calls,
89
- usage=usage
90
- )
90
+ usage=usage,
91
+ reasoning_content=reasoning_str
92
+ )
93
+
94
+ def _extract_reasoning_content(self, openai_response) -> List[str]:
95
+ """
96
+ Extract reasoning summaries (preferred) or reasoning content fragments from Responses API output.
97
+
98
+ Format required by caller:
99
+ 1. reason is ...
100
+ 2. reason is ...
101
+ """
102
+ reasons: List[str] = []
103
+
104
+ output_items = getattr(openai_response, "output", None) or []
105
+ for item in output_items:
106
+ if getattr(item, "type", None) != "reasoning":
107
+ continue
108
+
109
+ # 1) Preferred: reasoning summaries (requires reasoning={"summary":"auto"} or similar)
110
+ summary = getattr(item, "summary", None) or []
111
+ for s in summary:
112
+ text = getattr(s, "text", None)
113
+ if text:
114
+ reasons.append(str(text).strip())
115
+
116
+ # 2) Fallback: some responses may carry reasoning content in "content"
117
+ # (e.g., content parts like {"type":"reasoning_text","text":"..."}).
118
+ content = getattr(item, "content", None) or []
119
+ for c in content:
120
+ text = getattr(c, "text", None)
121
+ if text:
122
+ reasons.append(str(text).strip())
123
+
124
+ return reasons