iatoolkit 0.71.4__py3-none-any.whl → 1.4.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- iatoolkit/__init__.py +19 -7
- iatoolkit/base_company.py +1 -71
- iatoolkit/cli_commands.py +9 -21
- iatoolkit/common/exceptions.py +2 -0
- iatoolkit/common/interfaces/__init__.py +0 -0
- iatoolkit/common/interfaces/asset_storage.py +34 -0
- iatoolkit/common/interfaces/database_provider.py +38 -0
- iatoolkit/common/model_registry.py +159 -0
- iatoolkit/common/routes.py +53 -32
- iatoolkit/common/util.py +17 -12
- iatoolkit/company_registry.py +55 -14
- iatoolkit/{iatoolkit.py → core.py} +102 -72
- iatoolkit/infra/{mail_app.py → brevo_mail_app.py} +15 -37
- iatoolkit/infra/llm_providers/__init__.py +0 -0
- iatoolkit/infra/llm_providers/deepseek_adapter.py +278 -0
- iatoolkit/infra/{gemini_adapter.py → llm_providers/gemini_adapter.py} +11 -17
- iatoolkit/infra/{openai_adapter.py → llm_providers/openai_adapter.py} +41 -7
- iatoolkit/infra/llm_proxy.py +235 -134
- iatoolkit/infra/llm_response.py +5 -0
- iatoolkit/locales/en.yaml +134 -4
- iatoolkit/locales/es.yaml +293 -162
- iatoolkit/repositories/database_manager.py +92 -22
- iatoolkit/repositories/document_repo.py +7 -0
- iatoolkit/repositories/filesystem_asset_repository.py +36 -0
- iatoolkit/repositories/llm_query_repo.py +36 -22
- iatoolkit/repositories/models.py +86 -95
- iatoolkit/repositories/profile_repo.py +64 -13
- iatoolkit/repositories/vs_repo.py +31 -28
- iatoolkit/services/auth_service.py +1 -1
- iatoolkit/services/branding_service.py +1 -1
- iatoolkit/services/company_context_service.py +96 -39
- iatoolkit/services/configuration_service.py +329 -67
- iatoolkit/services/dispatcher_service.py +51 -227
- iatoolkit/services/document_service.py +10 -1
- iatoolkit/services/embedding_service.py +9 -6
- iatoolkit/services/excel_service.py +50 -2
- iatoolkit/services/file_processor_service.py +0 -5
- iatoolkit/services/history_manager_service.py +208 -0
- iatoolkit/services/jwt_service.py +1 -1
- iatoolkit/services/knowledge_base_service.py +412 -0
- iatoolkit/services/language_service.py +8 -2
- iatoolkit/services/license_service.py +82 -0
- iatoolkit/{infra/llm_client.py → services/llm_client_service.py} +42 -29
- iatoolkit/services/load_documents_service.py +18 -47
- iatoolkit/services/mail_service.py +171 -25
- iatoolkit/services/profile_service.py +69 -36
- iatoolkit/services/{prompt_manager_service.py → prompt_service.py} +136 -25
- iatoolkit/services/query_service.py +229 -203
- iatoolkit/services/sql_service.py +116 -34
- iatoolkit/services/tool_service.py +246 -0
- iatoolkit/services/user_feedback_service.py +18 -6
- iatoolkit/services/user_session_context_service.py +121 -51
- iatoolkit/static/images/iatoolkit_core.png +0 -0
- iatoolkit/static/images/iatoolkit_logo.png +0 -0
- iatoolkit/static/js/chat_feedback_button.js +1 -1
- iatoolkit/static/js/chat_help_content.js +4 -4
- iatoolkit/static/js/chat_main.js +61 -9
- iatoolkit/static/js/chat_model_selector.js +227 -0
- iatoolkit/static/js/chat_onboarding_button.js +1 -1
- iatoolkit/static/js/chat_reload_button.js +4 -1
- iatoolkit/static/styles/chat_iatoolkit.css +59 -3
- iatoolkit/static/styles/chat_public.css +28 -0
- iatoolkit/static/styles/documents.css +598 -0
- iatoolkit/static/styles/landing_page.css +223 -7
- iatoolkit/static/styles/llm_output.css +34 -1
- iatoolkit/system_prompts/__init__.py +0 -0
- iatoolkit/system_prompts/query_main.prompt +28 -3
- iatoolkit/system_prompts/sql_rules.prompt +47 -12
- iatoolkit/templates/_company_header.html +30 -5
- iatoolkit/templates/_login_widget.html +3 -3
- iatoolkit/templates/base.html +13 -0
- iatoolkit/templates/chat.html +45 -3
- iatoolkit/templates/forgot_password.html +3 -2
- iatoolkit/templates/onboarding_shell.html +1 -2
- iatoolkit/templates/signup.html +3 -0
- iatoolkit/views/base_login_view.py +8 -3
- iatoolkit/views/change_password_view.py +1 -1
- iatoolkit/views/chat_view.py +76 -0
- iatoolkit/views/forgot_password_view.py +9 -4
- iatoolkit/views/history_api_view.py +3 -3
- iatoolkit/views/home_view.py +4 -2
- iatoolkit/views/init_context_api_view.py +1 -1
- iatoolkit/views/llmquery_api_view.py +4 -3
- iatoolkit/views/load_company_configuration_api_view.py +49 -0
- iatoolkit/views/{file_store_api_view.py → load_document_api_view.py} +15 -11
- iatoolkit/views/login_view.py +25 -8
- iatoolkit/views/logout_api_view.py +10 -2
- iatoolkit/views/prompt_api_view.py +1 -1
- iatoolkit/views/rag_api_view.py +216 -0
- iatoolkit/views/root_redirect_view.py +22 -0
- iatoolkit/views/signup_view.py +12 -4
- iatoolkit/views/static_page_view.py +27 -0
- iatoolkit/views/users_api_view.py +33 -0
- iatoolkit/views/verify_user_view.py +1 -1
- iatoolkit-1.4.2.dist-info/METADATA +268 -0
- iatoolkit-1.4.2.dist-info/RECORD +133 -0
- iatoolkit-1.4.2.dist-info/licenses/LICENSE_COMMUNITY.md +15 -0
- iatoolkit/repositories/tasks_repo.py +0 -52
- iatoolkit/services/history_service.py +0 -37
- iatoolkit/services/search_service.py +0 -55
- iatoolkit/services/tasks_service.py +0 -188
- iatoolkit/templates/about.html +0 -13
- iatoolkit/templates/index.html +0 -145
- iatoolkit/templates/login_simulation.html +0 -45
- iatoolkit/views/external_login_view.py +0 -73
- iatoolkit/views/index_view.py +0 -14
- iatoolkit/views/login_simulation_view.py +0 -93
- iatoolkit/views/tasks_api_view.py +0 -72
- iatoolkit/views/tasks_review_api_view.py +0 -55
- iatoolkit-0.71.4.dist-info/METADATA +0 -276
- iatoolkit-0.71.4.dist-info/RECORD +0 -122
- {iatoolkit-0.71.4.dist-info → iatoolkit-1.4.2.dist-info}/WHEEL +0 -0
- {iatoolkit-0.71.4.dist-info → iatoolkit-1.4.2.dist-info}/licenses/LICENSE +0 -0
- {iatoolkit-0.71.4.dist-info → iatoolkit-1.4.2.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,278 @@
|
|
|
1
|
+
# deepseek_adapter.py
|
|
2
|
+
# Copyright (c) 2024 Fernando Libedinsky
|
|
3
|
+
# Product: IAToolkit
|
|
4
|
+
#
|
|
5
|
+
# IAToolkit is open source software.
|
|
6
|
+
|
|
7
|
+
import logging
|
|
8
|
+
from typing import Dict, List, Optional, Any
|
|
9
|
+
|
|
10
|
+
from iatoolkit.infra.llm_response import LLMResponse, ToolCall, Usage
|
|
11
|
+
from iatoolkit.common.exceptions import IAToolkitException
|
|
12
|
+
import json
|
|
13
|
+
|
|
14
|
+
class DeepseekAdapter:
|
|
15
|
+
"""
|
|
16
|
+
Adapter for DeepSeek using the OpenAI-compatible Chat Completions API.
|
|
17
|
+
It translates IAToolkit's common request/response format into
|
|
18
|
+
DeepSeek chat.completions calls.
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
def __init__(self, deepseek_client):
|
|
22
|
+
# deepseek_client is an OpenAI client configured with base_url="https://api.deepseek.com"
|
|
23
|
+
self.client = deepseek_client
|
|
24
|
+
|
|
25
|
+
# ------------------------------------------------------------------
|
|
26
|
+
# Public entry point
|
|
27
|
+
# ------------------------------------------------------------------
|
|
28
|
+
|
|
29
|
+
def create_response(self, model: str, input: List[Dict], **kwargs) -> LLMResponse:
|
|
30
|
+
"""
|
|
31
|
+
Entry point called by LLMProxy.
|
|
32
|
+
|
|
33
|
+
:param model: DeepSeek model name (e.g. "deepseek-chat").
|
|
34
|
+
:param input: Common IAToolkit input list. It may contain:
|
|
35
|
+
- normal messages: {"role": "...", "content": "..."}
|
|
36
|
+
- function outputs: {"type": "function_call_output",
|
|
37
|
+
"call_id": "...", "output": "..."}
|
|
38
|
+
:param kwargs: extra options (tools, tool_choice, context_history, etc.).
|
|
39
|
+
"""
|
|
40
|
+
tools = kwargs.get("tools") or []
|
|
41
|
+
tool_choice = kwargs.get("tool_choice", "auto")
|
|
42
|
+
context_history = kwargs.get("context_history") or []
|
|
43
|
+
|
|
44
|
+
try:
|
|
45
|
+
# 1) Build messages from history (if any)
|
|
46
|
+
messages: List[Dict[str, Any]] = []
|
|
47
|
+
if context_history:
|
|
48
|
+
history_messages = self._build_messages_from_input(context_history)
|
|
49
|
+
messages.extend(history_messages)
|
|
50
|
+
|
|
51
|
+
# 2) Append current turn messages
|
|
52
|
+
current_messages = self._build_messages_from_input(input)
|
|
53
|
+
messages.extend(current_messages)
|
|
54
|
+
|
|
55
|
+
# Detect if this input already contains function_call_output items.
|
|
56
|
+
# That means we are in the "second phase" after executing tools.
|
|
57
|
+
has_function_outputs = any(
|
|
58
|
+
item.get("type") == "function_call_output" for item in input
|
|
59
|
+
)
|
|
60
|
+
|
|
61
|
+
# 3) Build the tools payload
|
|
62
|
+
tools_payload = self._build_tools_payload(tools)
|
|
63
|
+
|
|
64
|
+
# If we already have function_call_output messages and the caller did not force
|
|
65
|
+
# a specific tool_choice (e.g. "required" for SQL retry), we disable tools and
|
|
66
|
+
# tool_choice to avoid infinite tool-calling loops (especially with iat_sql_query).
|
|
67
|
+
if has_function_outputs and tool_choice == "auto":
|
|
68
|
+
logging.debug(
|
|
69
|
+
"[DeepseekAdapter] Detected function_call_output in input; "
|
|
70
|
+
"disabling tools and tool_choice to avoid tool loop."
|
|
71
|
+
)
|
|
72
|
+
tools_payload = None
|
|
73
|
+
tool_choice = None
|
|
74
|
+
|
|
75
|
+
logging.debug(f"[DeepseekAdapter] messages={messages}")
|
|
76
|
+
logging.debug(f"[DeepseekAdapter] tools={tools_payload}, tool_choice={tool_choice}")
|
|
77
|
+
|
|
78
|
+
# Build kwargs for API call, skipping empty parameters
|
|
79
|
+
call_kwargs: Dict[str, Any] = {
|
|
80
|
+
"model": model,
|
|
81
|
+
"messages": messages,
|
|
82
|
+
}
|
|
83
|
+
if tools_payload:
|
|
84
|
+
call_kwargs["tools"] = tools_payload
|
|
85
|
+
if tool_choice:
|
|
86
|
+
call_kwargs["tool_choice"] = tool_choice
|
|
87
|
+
|
|
88
|
+
logging.debug(f"[DeepseekAdapter] Calling DeepSeek chat.completions API...: {json.dumps(messages, indent=2)}")
|
|
89
|
+
response = self.client.chat.completions.create(**call_kwargs)
|
|
90
|
+
|
|
91
|
+
return self._map_deepseek_chat_response(response)
|
|
92
|
+
|
|
93
|
+
except IAToolkitException:
|
|
94
|
+
# Re-raise IAToolkit exceptions as is
|
|
95
|
+
raise
|
|
96
|
+
except Exception as ex:
|
|
97
|
+
logging.exception("Unexpected error calling DeepSeek")
|
|
98
|
+
raise IAToolkitException(
|
|
99
|
+
IAToolkitException.ErrorType.LLM_ERROR,
|
|
100
|
+
f"DeepSeek error: {ex}"
|
|
101
|
+
) from ex
|
|
102
|
+
|
|
103
|
+
# ------------------------------------------------------------------
|
|
104
|
+
# Helpers to build the request
|
|
105
|
+
# ------------------------------------------------------------------
|
|
106
|
+
|
|
107
|
+
def _build_messages_from_input(self, input_items: List[Dict]) -> List[Dict]:
|
|
108
|
+
"""
|
|
109
|
+
Transform IAToolkit 'input' items into ChatCompletion 'messages'.
|
|
110
|
+
|
|
111
|
+
We handle:
|
|
112
|
+
- Standard messages with 'role' and 'content'.
|
|
113
|
+
- function_call_output items by converting them into assistant messages
|
|
114
|
+
containing the tool result, so the model can use them to answer.
|
|
115
|
+
"""
|
|
116
|
+
messages: List[Dict[str, Any]] = []
|
|
117
|
+
|
|
118
|
+
for item in input_items:
|
|
119
|
+
# Tool call outputs are mapped to assistant messages with the tool result.
|
|
120
|
+
if item.get("type") == "function_call_output":
|
|
121
|
+
output = item.get("output", "")
|
|
122
|
+
if not output:
|
|
123
|
+
logging.warning(
|
|
124
|
+
"[DeepseekAdapter] function_call_output item without 'output': %s",
|
|
125
|
+
item
|
|
126
|
+
)
|
|
127
|
+
continue
|
|
128
|
+
|
|
129
|
+
messages.append(
|
|
130
|
+
{
|
|
131
|
+
"role": "user",
|
|
132
|
+
"content": f"Tool result:\n{output}",
|
|
133
|
+
}
|
|
134
|
+
)
|
|
135
|
+
continue
|
|
136
|
+
|
|
137
|
+
role = item.get("role")
|
|
138
|
+
content = item.get("content")
|
|
139
|
+
|
|
140
|
+
# Skip tool-role messages completely for DeepSeek
|
|
141
|
+
if role == "tool":
|
|
142
|
+
logging.warning(f"[DeepseekAdapter] Skipping tool-role message: {item}")
|
|
143
|
+
continue
|
|
144
|
+
|
|
145
|
+
if not role:
|
|
146
|
+
logging.warning(f"[DeepseekAdapter] Skipping message without role: {item}")
|
|
147
|
+
continue
|
|
148
|
+
|
|
149
|
+
messages.append({"role": role, "content": content})
|
|
150
|
+
|
|
151
|
+
return messages
|
|
152
|
+
|
|
153
|
+
def _build_tools_payload(self, tools: List[Dict]) -> Optional[List[Dict]]:
|
|
154
|
+
"""
|
|
155
|
+
Transform IAToolkit tool definitions into DeepSeek/OpenAI chat tools format.
|
|
156
|
+
|
|
157
|
+
Expected internal tool format:
|
|
158
|
+
{
|
|
159
|
+
"type": "function",
|
|
160
|
+
"name": ...,
|
|
161
|
+
"description": ...,
|
|
162
|
+
"parameters": {...},
|
|
163
|
+
"strict": True/False
|
|
164
|
+
}
|
|
165
|
+
Or already in OpenAI tools format with "function" key.
|
|
166
|
+
"""
|
|
167
|
+
if not tools:
|
|
168
|
+
return None
|
|
169
|
+
|
|
170
|
+
tools_payload: List[Dict[str, Any]] = []
|
|
171
|
+
|
|
172
|
+
for tool in tools:
|
|
173
|
+
# If it's already in OpenAI 'function' format, reuse it
|
|
174
|
+
if "function" in tool:
|
|
175
|
+
func_def = tool["function"]
|
|
176
|
+
else:
|
|
177
|
+
# Build function definition from flattened structure
|
|
178
|
+
func_def = {
|
|
179
|
+
"name": tool.get("name"),
|
|
180
|
+
"description": tool.get("description", ""),
|
|
181
|
+
"parameters": tool.get("parameters", {}) or {},
|
|
182
|
+
}
|
|
183
|
+
|
|
184
|
+
# Ensure parameters is a dict
|
|
185
|
+
if "parameters" in func_def and not isinstance(func_def["parameters"], dict):
|
|
186
|
+
logging.warning(
|
|
187
|
+
"Tool parameters must be a dict; got %s",
|
|
188
|
+
type(func_def["parameters"])
|
|
189
|
+
)
|
|
190
|
+
func_def["parameters"] = {}
|
|
191
|
+
|
|
192
|
+
ds_tool: Dict[str, Any] = {
|
|
193
|
+
"type": tool.get("type", "function"),
|
|
194
|
+
"function": func_def,
|
|
195
|
+
}
|
|
196
|
+
|
|
197
|
+
if tool.get("strict") is True:
|
|
198
|
+
ds_tool["strict"] = True
|
|
199
|
+
|
|
200
|
+
tools_payload.append(ds_tool)
|
|
201
|
+
|
|
202
|
+
return tools_payload or None
|
|
203
|
+
|
|
204
|
+
# ------------------------------------------------------------------
|
|
205
|
+
# Mapping DeepSeek response -> LLMResponse
|
|
206
|
+
# ------------------------------------------------------------------
|
|
207
|
+
|
|
208
|
+
def _map_deepseek_chat_response(self, response: Any) -> LLMResponse:
|
|
209
|
+
"""
|
|
210
|
+
Map DeepSeek Chat Completion response to our common LLMResponse.
|
|
211
|
+
Handles both plain assistant messages and tool_calls.
|
|
212
|
+
"""
|
|
213
|
+
# We only look at the first choice
|
|
214
|
+
if not response.choices:
|
|
215
|
+
raise IAToolkitException(
|
|
216
|
+
IAToolkitException.ErrorType.LLM_ERROR,
|
|
217
|
+
"DeepSeek response has no choices."
|
|
218
|
+
)
|
|
219
|
+
|
|
220
|
+
choice = response.choices[0]
|
|
221
|
+
message = choice.message
|
|
222
|
+
|
|
223
|
+
# Usage mapping
|
|
224
|
+
usage = Usage(
|
|
225
|
+
input_tokens=getattr(getattr(response, "usage", None), "prompt_tokens", 0) or 0,
|
|
226
|
+
output_tokens=getattr(getattr(response, "usage", None), "completion_tokens", 0) or 0,
|
|
227
|
+
total_tokens=getattr(getattr(response, "usage", None), "total_tokens", 0) or 0,
|
|
228
|
+
)
|
|
229
|
+
|
|
230
|
+
# Capture reasoning content (specific to deepseek-reasoner)
|
|
231
|
+
reasoning_content = getattr(message, "reasoning_content", "") or ""
|
|
232
|
+
|
|
233
|
+
# If the model produced tool calls, fills this list
|
|
234
|
+
tool_calls_out: List[ToolCall] = []
|
|
235
|
+
|
|
236
|
+
tool_calls = getattr(message, "tool_calls", None) or []
|
|
237
|
+
if not tool_calls:
|
|
238
|
+
# No tool calls: standard assistant message
|
|
239
|
+
output_text = getattr(message, "content", "") or ""
|
|
240
|
+
status = "completed"
|
|
241
|
+
|
|
242
|
+
else:
|
|
243
|
+
logging.debug(f"[DeepSeek] RAW tool_calls: {tool_calls}")
|
|
244
|
+
|
|
245
|
+
for tc in tool_calls:
|
|
246
|
+
func = getattr(tc, "function", None)
|
|
247
|
+
if not func:
|
|
248
|
+
continue
|
|
249
|
+
|
|
250
|
+
name = getattr(func, "name", "")
|
|
251
|
+
arguments = getattr(func, "arguments", "") or "{}"
|
|
252
|
+
|
|
253
|
+
# DeepSeek/OpenAI return arguments as JSON string
|
|
254
|
+
logging.debug(
|
|
255
|
+
f"[DeepSeek] ToolCall generated -> id={getattr(tc, 'id', '')} "
|
|
256
|
+
f"name={name} arguments_raw={arguments}"
|
|
257
|
+
)
|
|
258
|
+
tool_calls_out.append(
|
|
259
|
+
ToolCall(
|
|
260
|
+
call_id=getattr(tc, "id", ""),
|
|
261
|
+
type="function_call",
|
|
262
|
+
name=name,
|
|
263
|
+
arguments=arguments,
|
|
264
|
+
)
|
|
265
|
+
)
|
|
266
|
+
|
|
267
|
+
status = "tool_calls"
|
|
268
|
+
output_text = "" # caller will inspect tool_calls in .output
|
|
269
|
+
|
|
270
|
+
return LLMResponse(
|
|
271
|
+
id=getattr(response, "id", "deepseek-unknown"),
|
|
272
|
+
model=getattr(response, "model", "deepseek-unknown"),
|
|
273
|
+
status=status,
|
|
274
|
+
output_text=output_text,
|
|
275
|
+
output=tool_calls_out,
|
|
276
|
+
usage=usage,
|
|
277
|
+
reasoning_content=reasoning_content
|
|
278
|
+
)
|
|
@@ -13,13 +13,11 @@ import json
|
|
|
13
13
|
import uuid
|
|
14
14
|
|
|
15
15
|
class GeminiAdapter:
|
|
16
|
-
"""Adaptador para la API de Gemini"""
|
|
17
16
|
|
|
18
17
|
def __init__(self, gemini_client):
|
|
19
|
-
"""Inicializar con cliente Gemini ya configurado"""
|
|
20
18
|
self.client = gemini_client
|
|
21
19
|
|
|
22
|
-
#
|
|
20
|
+
# security configuration - allow content that might be blocked by default
|
|
23
21
|
self.safety_settings = {
|
|
24
22
|
HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_NONE,
|
|
25
23
|
HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_NONE,
|
|
@@ -37,38 +35,36 @@ class GeminiAdapter:
|
|
|
37
35
|
reasoning: Optional[Dict] = None,
|
|
38
36
|
tool_choice: str = "auto",
|
|
39
37
|
) -> LLMResponse:
|
|
40
|
-
"""Llamada a la API de Gemini y mapeo a estructura común"""
|
|
41
38
|
try:
|
|
42
|
-
#
|
|
39
|
+
# init the model with the configured client
|
|
43
40
|
gemini_model = self.client.GenerativeModel(
|
|
44
41
|
model_name=self._map_model_name(model),
|
|
45
42
|
safety_settings=self.safety_settings
|
|
46
43
|
)
|
|
47
44
|
|
|
48
|
-
#
|
|
45
|
+
# prepare the content for gemini
|
|
49
46
|
if context_history:
|
|
50
|
-
#
|
|
47
|
+
# concat the history with the current input
|
|
51
48
|
contents = self._prepare_gemini_contents(context_history + input)
|
|
52
49
|
else:
|
|
53
|
-
# Usar solo el input actual si no hay historial
|
|
54
50
|
contents = self._prepare_gemini_contents(input)
|
|
55
51
|
|
|
56
|
-
#
|
|
52
|
+
# prepare tools
|
|
57
53
|
gemini_tools = self._prepare_gemini_tools(tools) if tools else None
|
|
58
54
|
|
|
59
|
-
#
|
|
55
|
+
# config generation
|
|
60
56
|
generation_config = self._prepare_generation_config(text, tool_choice)
|
|
61
57
|
|
|
62
|
-
#
|
|
58
|
+
# call gemini
|
|
63
59
|
if gemini_tools:
|
|
64
|
-
#
|
|
60
|
+
# with tools
|
|
65
61
|
response = gemini_model.generate_content(
|
|
66
62
|
contents,
|
|
67
63
|
tools=gemini_tools,
|
|
68
64
|
generation_config=generation_config
|
|
69
65
|
)
|
|
70
66
|
else:
|
|
71
|
-
#
|
|
67
|
+
# without tools
|
|
72
68
|
response = gemini_model.generate_content(
|
|
73
69
|
contents,
|
|
74
70
|
generation_config=generation_config
|
|
@@ -102,9 +98,7 @@ class GeminiAdapter:
|
|
|
102
98
|
|
|
103
99
|
raise IAToolkitException(IAToolkitException.ErrorType.LLM_ERROR, error_message)
|
|
104
100
|
|
|
105
|
-
# ... rest of the methods keep the same ...
|
|
106
101
|
def _map_model_name(self, model: str) -> str:
|
|
107
|
-
"""Mapear nombre del modelo a formato de Gemini"""
|
|
108
102
|
model_mapping = {
|
|
109
103
|
"gemini-pro": "gemini-2.5-pro",
|
|
110
104
|
"gemini": "gemini-2.5-pro",
|
|
@@ -115,7 +109,7 @@ class GeminiAdapter:
|
|
|
115
109
|
return model_mapping.get(model.lower(), model)
|
|
116
110
|
|
|
117
111
|
def _prepare_gemini_contents(self, input: List[Dict]) -> List[Dict]:
|
|
118
|
-
|
|
112
|
+
# convert input messages to Gemini format
|
|
119
113
|
gemini_contents = []
|
|
120
114
|
|
|
121
115
|
for message in input:
|
|
@@ -143,7 +137,7 @@ class GeminiAdapter:
|
|
|
143
137
|
return gemini_contents
|
|
144
138
|
|
|
145
139
|
def _prepare_gemini_tools(self, tools: List[Dict]) -> List[Dict]:
|
|
146
|
-
|
|
140
|
+
# convert tools to Gemini format
|
|
147
141
|
if not tools:
|
|
148
142
|
return None
|
|
149
143
|
|
|
@@ -7,7 +7,8 @@ import logging
|
|
|
7
7
|
from typing import Dict, List, Optional
|
|
8
8
|
from iatoolkit.infra.llm_response import LLMResponse, ToolCall, Usage
|
|
9
9
|
from iatoolkit.common.exceptions import IAToolkitException
|
|
10
|
-
|
|
10
|
+
import html
|
|
11
|
+
from typing import List
|
|
11
12
|
|
|
12
13
|
class OpenAIAdapter:
|
|
13
14
|
"""Adaptador para la API de OpenAI"""
|
|
@@ -53,10 +54,6 @@ class OpenAIAdapter:
|
|
|
53
54
|
error_message = f"Error calling OpenAI API: {str(e)}"
|
|
54
55
|
logging.error(error_message)
|
|
55
56
|
|
|
56
|
-
# En caso de error de contexto
|
|
57
|
-
if "context_length_exceeded" in str(e):
|
|
58
|
-
error_message = 'Tu consulta supera el limite de contexto. Reinicia el contexto con el boton de la barra superior.'
|
|
59
|
-
|
|
60
57
|
raise IAToolkitException(IAToolkitException.ErrorType.LLM_ERROR, error_message)
|
|
61
58
|
|
|
62
59
|
def _map_openai_response(self, openai_response) -> LLMResponse:
|
|
@@ -80,11 +77,48 @@ class OpenAIAdapter:
|
|
|
80
77
|
total_tokens=openai_response.usage.total_tokens if openai_response.usage else 0
|
|
81
78
|
)
|
|
82
79
|
|
|
80
|
+
# Reasoning content extracted from Responses output items (type="reasoning")
|
|
81
|
+
reasoning_list = self._extract_reasoning_content(openai_response)
|
|
82
|
+
reasoning_str = "\n".join(reasoning_list)
|
|
83
|
+
|
|
83
84
|
return LLMResponse(
|
|
84
85
|
id=openai_response.id,
|
|
85
86
|
model=openai_response.model,
|
|
86
87
|
status=openai_response.status,
|
|
87
88
|
output_text=getattr(openai_response, 'output_text', ''),
|
|
88
89
|
output=tool_calls,
|
|
89
|
-
usage=usage
|
|
90
|
-
|
|
90
|
+
usage=usage,
|
|
91
|
+
reasoning_content=reasoning_str
|
|
92
|
+
)
|
|
93
|
+
|
|
94
|
+
def _extract_reasoning_content(self, openai_response) -> List[str]:
|
|
95
|
+
"""
|
|
96
|
+
Extract reasoning summaries (preferred) or reasoning content fragments from Responses API output.
|
|
97
|
+
|
|
98
|
+
Format required by caller:
|
|
99
|
+
1. reason is ...
|
|
100
|
+
2. reason is ...
|
|
101
|
+
"""
|
|
102
|
+
reasons: List[str] = []
|
|
103
|
+
|
|
104
|
+
output_items = getattr(openai_response, "output", None) or []
|
|
105
|
+
for item in output_items:
|
|
106
|
+
if getattr(item, "type", None) != "reasoning":
|
|
107
|
+
continue
|
|
108
|
+
|
|
109
|
+
# 1) Preferred: reasoning summaries (requires reasoning={"summary":"auto"} or similar)
|
|
110
|
+
summary = getattr(item, "summary", None) or []
|
|
111
|
+
for s in summary:
|
|
112
|
+
text = getattr(s, "text", None)
|
|
113
|
+
if text:
|
|
114
|
+
reasons.append(str(text).strip())
|
|
115
|
+
|
|
116
|
+
# 2) Fallback: some responses may carry reasoning content in "content"
|
|
117
|
+
# (e.g., content parts like {"type":"reasoning_text","text":"..."}).
|
|
118
|
+
content = getattr(item, "content", None) or []
|
|
119
|
+
for c in content:
|
|
120
|
+
text = getattr(c, "text", None)
|
|
121
|
+
if text:
|
|
122
|
+
reasons.append(str(text).strip())
|
|
123
|
+
|
|
124
|
+
return reasons
|