iatoolkit 0.91.1__py3-none-any.whl → 1.4.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (69) hide show
  1. iatoolkit/__init__.py +6 -4
  2. iatoolkit/base_company.py +0 -16
  3. iatoolkit/cli_commands.py +3 -14
  4. iatoolkit/common/exceptions.py +1 -0
  5. iatoolkit/common/interfaces/__init__.py +0 -0
  6. iatoolkit/common/interfaces/asset_storage.py +34 -0
  7. iatoolkit/common/interfaces/database_provider.py +38 -0
  8. iatoolkit/common/model_registry.py +159 -0
  9. iatoolkit/common/routes.py +42 -5
  10. iatoolkit/common/util.py +11 -12
  11. iatoolkit/company_registry.py +5 -0
  12. iatoolkit/core.py +51 -20
  13. iatoolkit/infra/llm_providers/__init__.py +0 -0
  14. iatoolkit/infra/llm_providers/deepseek_adapter.py +278 -0
  15. iatoolkit/infra/{gemini_adapter.py → llm_providers/gemini_adapter.py} +11 -17
  16. iatoolkit/infra/{openai_adapter.py → llm_providers/openai_adapter.py} +41 -7
  17. iatoolkit/infra/llm_proxy.py +235 -134
  18. iatoolkit/infra/llm_response.py +5 -0
  19. iatoolkit/locales/en.yaml +124 -2
  20. iatoolkit/locales/es.yaml +122 -0
  21. iatoolkit/repositories/database_manager.py +44 -19
  22. iatoolkit/repositories/document_repo.py +7 -0
  23. iatoolkit/repositories/filesystem_asset_repository.py +36 -0
  24. iatoolkit/repositories/llm_query_repo.py +2 -0
  25. iatoolkit/repositories/models.py +72 -79
  26. iatoolkit/repositories/profile_repo.py +59 -3
  27. iatoolkit/repositories/vs_repo.py +22 -24
  28. iatoolkit/services/company_context_service.py +88 -39
  29. iatoolkit/services/configuration_service.py +157 -68
  30. iatoolkit/services/dispatcher_service.py +21 -3
  31. iatoolkit/services/file_processor_service.py +0 -5
  32. iatoolkit/services/history_manager_service.py +43 -24
  33. iatoolkit/services/knowledge_base_service.py +412 -0
  34. iatoolkit/{infra/llm_client.py → services/llm_client_service.py} +38 -29
  35. iatoolkit/services/load_documents_service.py +18 -47
  36. iatoolkit/services/profile_service.py +32 -4
  37. iatoolkit/services/prompt_service.py +32 -30
  38. iatoolkit/services/query_service.py +51 -26
  39. iatoolkit/services/sql_service.py +105 -74
  40. iatoolkit/services/tool_service.py +26 -11
  41. iatoolkit/services/user_session_context_service.py +115 -63
  42. iatoolkit/static/js/chat_main.js +44 -4
  43. iatoolkit/static/js/chat_model_selector.js +227 -0
  44. iatoolkit/static/js/chat_onboarding_button.js +1 -1
  45. iatoolkit/static/js/chat_reload_button.js +4 -1
  46. iatoolkit/static/styles/chat_iatoolkit.css +58 -2
  47. iatoolkit/static/styles/llm_output.css +34 -1
  48. iatoolkit/system_prompts/query_main.prompt +26 -2
  49. iatoolkit/templates/base.html +13 -0
  50. iatoolkit/templates/chat.html +44 -2
  51. iatoolkit/templates/onboarding_shell.html +0 -1
  52. iatoolkit/views/base_login_view.py +7 -2
  53. iatoolkit/views/chat_view.py +76 -0
  54. iatoolkit/views/load_company_configuration_api_view.py +49 -0
  55. iatoolkit/views/load_document_api_view.py +14 -10
  56. iatoolkit/views/login_view.py +8 -3
  57. iatoolkit/views/rag_api_view.py +216 -0
  58. iatoolkit/views/users_api_view.py +33 -0
  59. {iatoolkit-0.91.1.dist-info → iatoolkit-1.4.2.dist-info}/METADATA +4 -4
  60. {iatoolkit-0.91.1.dist-info → iatoolkit-1.4.2.dist-info}/RECORD +64 -56
  61. iatoolkit/repositories/tasks_repo.py +0 -52
  62. iatoolkit/services/search_service.py +0 -55
  63. iatoolkit/services/tasks_service.py +0 -188
  64. iatoolkit/views/tasks_api_view.py +0 -72
  65. iatoolkit/views/tasks_review_api_view.py +0 -55
  66. {iatoolkit-0.91.1.dist-info → iatoolkit-1.4.2.dist-info}/WHEEL +0 -0
  67. {iatoolkit-0.91.1.dist-info → iatoolkit-1.4.2.dist-info}/licenses/LICENSE +0 -0
  68. {iatoolkit-0.91.1.dist-info → iatoolkit-1.4.2.dist-info}/licenses/LICENSE_COMMUNITY.md +0 -0
  69. {iatoolkit-0.91.1.dist-info → iatoolkit-1.4.2.dist-info}/top_level.txt +0 -0
iatoolkit/core.py CHANGED
@@ -9,20 +9,26 @@ from flask_injector import FlaskInjector
9
9
  from flask_bcrypt import Bcrypt
10
10
  from flask_cors import CORS
11
11
  from iatoolkit.common.exceptions import IAToolkitException
12
- from typing import Optional, Dict, Any
13
12
  from iatoolkit.repositories.database_manager import DatabaseManager
13
+ from iatoolkit.common.interfaces.asset_storage import AssetRepository
14
+ from iatoolkit.company_registry import get_registered_companies
14
15
  from werkzeug.middleware.proxy_fix import ProxyFix
15
16
  from injector import Binder, Injector, singleton
17
+ from typing import Optional, Dict, Any
16
18
  from urllib.parse import urlparse
17
19
  import redis
18
20
  import logging
19
21
  import os
20
22
 
21
23
  from iatoolkit import __version__ as IATOOLKIT_VERSION
24
+ from iatoolkit.services.configuration_service import ConfigurationService
22
25
 
23
26
  # global variable for the unique instance of IAToolkit
24
27
  _iatoolkit_instance: Optional['IAToolkit'] = None
25
28
 
29
+ def is_bound(injector: Injector, cls) -> bool:
30
+ return cls in injector.binder._bindings
31
+
26
32
  class IAToolkit:
27
33
  """
28
34
  IAToolkit main class
@@ -49,8 +55,8 @@ class IAToolkit:
49
55
  self.config = config or {}
50
56
  self.app = None
51
57
  self.db_manager = None
52
- self._injector = None
53
- self.version = IATOOLKIT_VERSION # default version
58
+ self._injector = Injector() # init empty injector
59
+ self.version = IATOOLKIT_VERSION
54
60
  self.license = "Community Edition"
55
61
 
56
62
  @classmethod
@@ -61,7 +67,7 @@ class IAToolkit:
61
67
  _iatoolkit_instance = cls()
62
68
  return _iatoolkit_instance
63
69
 
64
- def create_iatoolkit(self):
70
+ def create_iatoolkit(self, start: bool = True):
65
71
  """
66
72
  Creates, configures, and returns the Flask application instance.
67
73
  this is the main entry point for the application factory.
@@ -77,8 +83,8 @@ class IAToolkit:
77
83
  # Step 2: Set up the core components that DI depends on
78
84
  self._setup_database()
79
85
 
80
- # Step 3: Create the Injector and configure all dependencies in one place
81
- self._injector = Injector(self._configure_core_dependencies)
86
+ # Step 3: Configure dependencies using the existing injector
87
+ self._configure_core_dependencies(self._injector)
82
88
 
83
89
  # Step 4: Register routes using the fully configured injector
84
90
  self._register_routes()
@@ -98,6 +104,7 @@ class IAToolkit:
98
104
 
99
105
  # Step 8: Finalize setup within the application context
100
106
  self._setup_redis_sessions()
107
+
101
108
  self._setup_cors()
102
109
  self._setup_additional_services()
103
110
  self._setup_cli_commands()
@@ -107,11 +114,21 @@ class IAToolkit:
107
114
  # Step 9: define the download_dir
108
115
  self._setup_download_dir()
109
116
 
117
+ # register data source
118
+ if start:
119
+ self.register_data_sources()
120
+
110
121
  logging.info(f"🎉 IAToolkit {self.license} version {self.version} correctly initialized.")
111
122
  self._initialized = True
112
123
 
113
124
  return self.app
114
125
 
126
+ def register_data_sources(self):
127
+ # load the company configurations
128
+ configuration_service = self._injector.get(ConfigurationService)
129
+ for company in get_registered_companies():
130
+ configuration_service.register_data_sources(company)
131
+
115
132
  def _get_config_value(self, key: str, default=None):
116
133
  # get a value from the config dict or the environment variable
117
134
  return self.config.get(key, os.getenv(key, default))
@@ -181,11 +198,11 @@ class IAToolkit:
181
198
  os.environ["TOKENIZERS_PARALLELISM"] = "false"
182
199
 
183
200
  def _setup_database(self):
184
- database_uri = self._get_config_value('DATABASE_URI')
201
+ database_uri = self._get_config_value('DATABASE_URI') or self._get_config_value('DATABASE_URL')
185
202
  if not database_uri:
186
203
  raise IAToolkitException(
187
204
  IAToolkitException.ErrorType.CONFIG_ERROR,
188
- "DATABASE_URI is requires (config dict or env. variable)"
205
+ "DATABASE_URI is required (config dict or env. variable)"
189
206
  )
190
207
 
191
208
  self.db_manager = DatabaseManager(database_url=database_uri, schema='iatoolkit')
@@ -240,8 +257,9 @@ class IAToolkit:
240
257
  extra_origins = []
241
258
  all_company_instances = get_company_registry().get_all_company_instances()
242
259
  for company_name, company_instance in all_company_instances.items():
243
- cors_origin = company_instance.company.parameters.get('cors_origin', [])
244
- extra_origins += cors_origin
260
+ if company_instance.company:
261
+ cors_origin = company_instance.company.parameters.get('cors_origin', [])
262
+ extra_origins += cors_origin
245
263
 
246
264
  all_origins = default_origins + extra_origins
247
265
 
@@ -256,8 +274,11 @@ class IAToolkit:
256
274
 
257
275
  logging.info(f"✅ CORS configured for: {all_origins}")
258
276
 
259
- def _configure_core_dependencies(self, binder: Binder):
277
+ def _configure_core_dependencies(self, injector: Injector):
260
278
  """⚙️ Configures all system dependencies."""
279
+
280
+ # get the binder from injector
281
+ binder = injector.binder
261
282
  try:
262
283
  # Core dependencies
263
284
  binder.bind(Flask, to=self.app)
@@ -282,17 +303,19 @@ class IAToolkit:
282
303
  from iatoolkit.repositories.profile_repo import ProfileRepo
283
304
  from iatoolkit.repositories.llm_query_repo import LLMQueryRepo
284
305
  from iatoolkit.repositories.vs_repo import VSRepo
285
- from iatoolkit.repositories.tasks_repo import TaskRepo
306
+ from iatoolkit.repositories.filesystem_asset_repository import FileSystemAssetRepository
286
307
 
287
308
  binder.bind(DocumentRepo, to=DocumentRepo)
288
309
  binder.bind(ProfileRepo, to=ProfileRepo)
289
310
  binder.bind(LLMQueryRepo, to=LLMQueryRepo)
290
311
  binder.bind(VSRepo, to=VSRepo)
291
- binder.bind(TaskRepo, to=TaskRepo)
312
+
313
+ # this class can be setup befor by iatoolkit enterprise
314
+ if not is_bound(self._injector, AssetRepository):
315
+ binder.bind(AssetRepository, to=FileSystemAssetRepository)
292
316
 
293
317
  def _bind_services(self, binder: Binder):
294
318
  from iatoolkit.services.query_service import QueryService
295
- from iatoolkit.services.tasks_service import TaskService
296
319
  from iatoolkit.services.benchmark_service import BenchmarkService
297
320
  from iatoolkit.services.document_service import DocumentService
298
321
  from iatoolkit.services.prompt_service import PromptService
@@ -309,9 +332,12 @@ class IAToolkit:
309
332
  from iatoolkit.services.embedding_service import EmbeddingService
310
333
  from iatoolkit.services.history_manager_service import HistoryManagerService
311
334
  from iatoolkit.services.tool_service import ToolService
335
+ from iatoolkit.services.llm_client_service import llmClient
336
+ from iatoolkit.services.auth_service import AuthService
337
+ from iatoolkit.services.sql_service import SqlService
338
+ from iatoolkit.services.knowledge_base_service import KnowledgeBaseService
312
339
 
313
340
  binder.bind(QueryService, to=QueryService)
314
- binder.bind(TaskService, to=TaskService)
315
341
  binder.bind(BenchmarkService, to=BenchmarkService)
316
342
  binder.bind(DocumentService, to=DocumentService)
317
343
  binder.bind(PromptService, to=PromptService)
@@ -328,21 +354,23 @@ class IAToolkit:
328
354
  binder.bind(EmbeddingService, to=EmbeddingService)
329
355
  binder.bind(HistoryManagerService, to=HistoryManagerService)
330
356
  binder.bind(ToolService, to=ToolService)
357
+ binder.bind(llmClient, to=llmClient)
358
+ binder.bind(AuthService, to=AuthService)
359
+ binder.bind(SqlService, to=SqlService)
360
+ binder.bind(KnowledgeBaseService, to=KnowledgeBaseService)
331
361
 
332
362
  def _bind_infrastructure(self, binder: Binder):
333
- from iatoolkit.infra.llm_client import llmClient
334
363
  from iatoolkit.infra.llm_proxy import LLMProxy
335
364
  from iatoolkit.infra.google_chat_app import GoogleChatApp
336
365
  from iatoolkit.infra.brevo_mail_app import BrevoMailApp
337
- from iatoolkit.services.auth_service import AuthService
338
366
  from iatoolkit.common.util import Utility
367
+ from iatoolkit.common.model_registry import ModelRegistry
339
368
 
340
369
  binder.bind(LLMProxy, to=LLMProxy)
341
- binder.bind(llmClient, to=llmClient)
342
370
  binder.bind(GoogleChatApp, to=GoogleChatApp)
343
371
  binder.bind(BrevoMailApp, to=BrevoMailApp)
344
- binder.bind(AuthService, to=AuthService)
345
372
  binder.bind(Utility, to=Utility)
373
+ binder.bind(ModelRegistry, to=ModelRegistry)
346
374
 
347
375
  def _setup_additional_services(self):
348
376
  Bcrypt(self.app)
@@ -406,11 +434,13 @@ class IAToolkit:
406
434
  'app_name': 'IAToolkit',
407
435
  'user_identifier': SessionManager.get('user_identifier'),
408
436
  'company_short_name': SessionManager.get('company_short_name'),
437
+ 'user_role': user_profile.get('user_role'),
409
438
  'user_is_local': user_profile.get('user_is_local'),
410
439
  'user_email': user_profile.get('user_email'),
411
440
  'iatoolkit_base_url': request.url_root,
412
441
  'flashed_messages': get_flashed_messages(with_categories=True),
413
- 't': translate_for_template
442
+ 't': translate_for_template,
443
+ 'google_analytics_id': self._get_config_value('GOOGLE_ANALYTICS_ID', ''),
414
444
  }
415
445
 
416
446
  def _get_default_static_folder(self) -> str:
@@ -474,6 +504,7 @@ class IAToolkit:
474
504
  logging.info(f"✅ download dir created in: {download_dir}")
475
505
 
476
506
 
507
+
477
508
  def current_iatoolkit() -> IAToolkit:
478
509
  return IAToolkit.get_instance()
479
510
 
File without changes
@@ -0,0 +1,278 @@
1
+ # deepseek_adapter.py
2
+ # Copyright (c) 2024 Fernando Libedinsky
3
+ # Product: IAToolkit
4
+ #
5
+ # IAToolkit is open source software.
6
+
7
+ import logging
8
+ from typing import Dict, List, Optional, Any
9
+
10
+ from iatoolkit.infra.llm_response import LLMResponse, ToolCall, Usage
11
+ from iatoolkit.common.exceptions import IAToolkitException
12
+ import json
13
+
14
+ class DeepseekAdapter:
15
+ """
16
+ Adapter for DeepSeek using the OpenAI-compatible Chat Completions API.
17
+ It translates IAToolkit's common request/response format into
18
+ DeepSeek chat.completions calls.
19
+ """
20
+
21
+ def __init__(self, deepseek_client):
22
+ # deepseek_client is an OpenAI client configured with base_url="https://api.deepseek.com"
23
+ self.client = deepseek_client
24
+
25
+ # ------------------------------------------------------------------
26
+ # Public entry point
27
+ # ------------------------------------------------------------------
28
+
29
+ def create_response(self, model: str, input: List[Dict], **kwargs) -> LLMResponse:
30
+ """
31
+ Entry point called by LLMProxy.
32
+
33
+ :param model: DeepSeek model name (e.g. "deepseek-chat").
34
+ :param input: Common IAToolkit input list. It may contain:
35
+ - normal messages: {"role": "...", "content": "..."}
36
+ - function outputs: {"type": "function_call_output",
37
+ "call_id": "...", "output": "..."}
38
+ :param kwargs: extra options (tools, tool_choice, context_history, etc.).
39
+ """
40
+ tools = kwargs.get("tools") or []
41
+ tool_choice = kwargs.get("tool_choice", "auto")
42
+ context_history = kwargs.get("context_history") or []
43
+
44
+ try:
45
+ # 1) Build messages from history (if any)
46
+ messages: List[Dict[str, Any]] = []
47
+ if context_history:
48
+ history_messages = self._build_messages_from_input(context_history)
49
+ messages.extend(history_messages)
50
+
51
+ # 2) Append current turn messages
52
+ current_messages = self._build_messages_from_input(input)
53
+ messages.extend(current_messages)
54
+
55
+ # Detect if this input already contains function_call_output items.
56
+ # That means we are in the "second phase" after executing tools.
57
+ has_function_outputs = any(
58
+ item.get("type") == "function_call_output" for item in input
59
+ )
60
+
61
+ # 3) Build the tools payload
62
+ tools_payload = self._build_tools_payload(tools)
63
+
64
+ # If we already have function_call_output messages and the caller did not force
65
+ # a specific tool_choice (e.g. "required" for SQL retry), we disable tools and
66
+ # tool_choice to avoid infinite tool-calling loops (especially with iat_sql_query).
67
+ if has_function_outputs and tool_choice == "auto":
68
+ logging.debug(
69
+ "[DeepseekAdapter] Detected function_call_output in input; "
70
+ "disabling tools and tool_choice to avoid tool loop."
71
+ )
72
+ tools_payload = None
73
+ tool_choice = None
74
+
75
+ logging.debug(f"[DeepseekAdapter] messages={messages}")
76
+ logging.debug(f"[DeepseekAdapter] tools={tools_payload}, tool_choice={tool_choice}")
77
+
78
+ # Build kwargs for API call, skipping empty parameters
79
+ call_kwargs: Dict[str, Any] = {
80
+ "model": model,
81
+ "messages": messages,
82
+ }
83
+ if tools_payload:
84
+ call_kwargs["tools"] = tools_payload
85
+ if tool_choice:
86
+ call_kwargs["tool_choice"] = tool_choice
87
+
88
+ logging.debug(f"[DeepseekAdapter] Calling DeepSeek chat.completions API...: {json.dumps(messages, indent=2)}")
89
+ response = self.client.chat.completions.create(**call_kwargs)
90
+
91
+ return self._map_deepseek_chat_response(response)
92
+
93
+ except IAToolkitException:
94
+ # Re-raise IAToolkit exceptions as is
95
+ raise
96
+ except Exception as ex:
97
+ logging.exception("Unexpected error calling DeepSeek")
98
+ raise IAToolkitException(
99
+ IAToolkitException.ErrorType.LLM_ERROR,
100
+ f"DeepSeek error: {ex}"
101
+ ) from ex
102
+
103
+ # ------------------------------------------------------------------
104
+ # Helpers to build the request
105
+ # ------------------------------------------------------------------
106
+
107
+ def _build_messages_from_input(self, input_items: List[Dict]) -> List[Dict]:
108
+ """
109
+ Transform IAToolkit 'input' items into ChatCompletion 'messages'.
110
+
111
+ We handle:
112
+ - Standard messages with 'role' and 'content'.
113
+ - function_call_output items by converting them into assistant messages
114
+ containing the tool result, so the model can use them to answer.
115
+ """
116
+ messages: List[Dict[str, Any]] = []
117
+
118
+ for item in input_items:
119
+ # Tool call outputs are mapped to assistant messages with the tool result.
120
+ if item.get("type") == "function_call_output":
121
+ output = item.get("output", "")
122
+ if not output:
123
+ logging.warning(
124
+ "[DeepseekAdapter] function_call_output item without 'output': %s",
125
+ item
126
+ )
127
+ continue
128
+
129
+ messages.append(
130
+ {
131
+ "role": "user",
132
+ "content": f"Tool result:\n{output}",
133
+ }
134
+ )
135
+ continue
136
+
137
+ role = item.get("role")
138
+ content = item.get("content")
139
+
140
+ # Skip tool-role messages completely for DeepSeek
141
+ if role == "tool":
142
+ logging.warning(f"[DeepseekAdapter] Skipping tool-role message: {item}")
143
+ continue
144
+
145
+ if not role:
146
+ logging.warning(f"[DeepseekAdapter] Skipping message without role: {item}")
147
+ continue
148
+
149
+ messages.append({"role": role, "content": content})
150
+
151
+ return messages
152
+
153
+ def _build_tools_payload(self, tools: List[Dict]) -> Optional[List[Dict]]:
154
+ """
155
+ Transform IAToolkit tool definitions into DeepSeek/OpenAI chat tools format.
156
+
157
+ Expected internal tool format:
158
+ {
159
+ "type": "function",
160
+ "name": ...,
161
+ "description": ...,
162
+ "parameters": {...},
163
+ "strict": True/False
164
+ }
165
+ Or already in OpenAI tools format with "function" key.
166
+ """
167
+ if not tools:
168
+ return None
169
+
170
+ tools_payload: List[Dict[str, Any]] = []
171
+
172
+ for tool in tools:
173
+ # If it's already in OpenAI 'function' format, reuse it
174
+ if "function" in tool:
175
+ func_def = tool["function"]
176
+ else:
177
+ # Build function definition from flattened structure
178
+ func_def = {
179
+ "name": tool.get("name"),
180
+ "description": tool.get("description", ""),
181
+ "parameters": tool.get("parameters", {}) or {},
182
+ }
183
+
184
+ # Ensure parameters is a dict
185
+ if "parameters" in func_def and not isinstance(func_def["parameters"], dict):
186
+ logging.warning(
187
+ "Tool parameters must be a dict; got %s",
188
+ type(func_def["parameters"])
189
+ )
190
+ func_def["parameters"] = {}
191
+
192
+ ds_tool: Dict[str, Any] = {
193
+ "type": tool.get("type", "function"),
194
+ "function": func_def,
195
+ }
196
+
197
+ if tool.get("strict") is True:
198
+ ds_tool["strict"] = True
199
+
200
+ tools_payload.append(ds_tool)
201
+
202
+ return tools_payload or None
203
+
204
+ # ------------------------------------------------------------------
205
+ # Mapping DeepSeek response -> LLMResponse
206
+ # ------------------------------------------------------------------
207
+
208
+ def _map_deepseek_chat_response(self, response: Any) -> LLMResponse:
209
+ """
210
+ Map DeepSeek Chat Completion response to our common LLMResponse.
211
+ Handles both plain assistant messages and tool_calls.
212
+ """
213
+ # We only look at the first choice
214
+ if not response.choices:
215
+ raise IAToolkitException(
216
+ IAToolkitException.ErrorType.LLM_ERROR,
217
+ "DeepSeek response has no choices."
218
+ )
219
+
220
+ choice = response.choices[0]
221
+ message = choice.message
222
+
223
+ # Usage mapping
224
+ usage = Usage(
225
+ input_tokens=getattr(getattr(response, "usage", None), "prompt_tokens", 0) or 0,
226
+ output_tokens=getattr(getattr(response, "usage", None), "completion_tokens", 0) or 0,
227
+ total_tokens=getattr(getattr(response, "usage", None), "total_tokens", 0) or 0,
228
+ )
229
+
230
+ # Capture reasoning content (specific to deepseek-reasoner)
231
+ reasoning_content = getattr(message, "reasoning_content", "") or ""
232
+
233
+ # If the model produced tool calls, fills this list
234
+ tool_calls_out: List[ToolCall] = []
235
+
236
+ tool_calls = getattr(message, "tool_calls", None) or []
237
+ if not tool_calls:
238
+ # No tool calls: standard assistant message
239
+ output_text = getattr(message, "content", "") or ""
240
+ status = "completed"
241
+
242
+ else:
243
+ logging.debug(f"[DeepSeek] RAW tool_calls: {tool_calls}")
244
+
245
+ for tc in tool_calls:
246
+ func = getattr(tc, "function", None)
247
+ if not func:
248
+ continue
249
+
250
+ name = getattr(func, "name", "")
251
+ arguments = getattr(func, "arguments", "") or "{}"
252
+
253
+ # DeepSeek/OpenAI return arguments as JSON string
254
+ logging.debug(
255
+ f"[DeepSeek] ToolCall generated -> id={getattr(tc, 'id', '')} "
256
+ f"name={name} arguments_raw={arguments}"
257
+ )
258
+ tool_calls_out.append(
259
+ ToolCall(
260
+ call_id=getattr(tc, "id", ""),
261
+ type="function_call",
262
+ name=name,
263
+ arguments=arguments,
264
+ )
265
+ )
266
+
267
+ status = "tool_calls"
268
+ output_text = "" # caller will inspect tool_calls in .output
269
+
270
+ return LLMResponse(
271
+ id=getattr(response, "id", "deepseek-unknown"),
272
+ model=getattr(response, "model", "deepseek-unknown"),
273
+ status=status,
274
+ output_text=output_text,
275
+ output=tool_calls_out,
276
+ usage=usage,
277
+ reasoning_content=reasoning_content
278
+ )
@@ -13,13 +13,11 @@ import json
13
13
  import uuid
14
14
 
15
15
  class GeminiAdapter:
16
- """Adaptador para la API de Gemini"""
17
16
 
18
17
  def __init__(self, gemini_client):
19
- """Inicializar con cliente Gemini ya configurado"""
20
18
  self.client = gemini_client
21
19
 
22
- # Configuración de seguridad - permitir contenido que podría ser bloqueado por defecto
20
+ # security configuration - allow content that might be blocked by default
23
21
  self.safety_settings = {
24
22
  HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_NONE,
25
23
  HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_NONE,
@@ -37,38 +35,36 @@ class GeminiAdapter:
37
35
  reasoning: Optional[Dict] = None,
38
36
  tool_choice: str = "auto",
39
37
  ) -> LLMResponse:
40
- """Llamada a la API de Gemini y mapeo a estructura común"""
41
38
  try:
42
- # Inicializar el modelo de Gemini usando el cliente configurado
39
+ # init the model with the configured client
43
40
  gemini_model = self.client.GenerativeModel(
44
41
  model_name=self._map_model_name(model),
45
42
  safety_settings=self.safety_settings
46
43
  )
47
44
 
48
- # Preparar el contenido para Gemini
45
+ # prepare the content for gemini
49
46
  if context_history:
50
- # Concatenar el historial de conversación con el input actual
47
+ # concat the history with the current input
51
48
  contents = self._prepare_gemini_contents(context_history + input)
52
49
  else:
53
- # Usar solo el input actual si no hay historial
54
50
  contents = self._prepare_gemini_contents(input)
55
51
 
56
- # Preparar herramientas si están disponibles
52
+ # prepare tools
57
53
  gemini_tools = self._prepare_gemini_tools(tools) if tools else None
58
54
 
59
- # Configurar generación
55
+ # config generation
60
56
  generation_config = self._prepare_generation_config(text, tool_choice)
61
57
 
62
- # Llamar a Gemini
58
+ # call gemini
63
59
  if gemini_tools:
64
- # Con herramientas
60
+ # with tools
65
61
  response = gemini_model.generate_content(
66
62
  contents,
67
63
  tools=gemini_tools,
68
64
  generation_config=generation_config
69
65
  )
70
66
  else:
71
- # Sin herramientas
67
+ # without tools
72
68
  response = gemini_model.generate_content(
73
69
  contents,
74
70
  generation_config=generation_config
@@ -102,9 +98,7 @@ class GeminiAdapter:
102
98
 
103
99
  raise IAToolkitException(IAToolkitException.ErrorType.LLM_ERROR, error_message)
104
100
 
105
- # ... rest of the methods keep the same ...
106
101
  def _map_model_name(self, model: str) -> str:
107
- """Mapear nombre del modelo a formato de Gemini"""
108
102
  model_mapping = {
109
103
  "gemini-pro": "gemini-2.5-pro",
110
104
  "gemini": "gemini-2.5-pro",
@@ -115,7 +109,7 @@ class GeminiAdapter:
115
109
  return model_mapping.get(model.lower(), model)
116
110
 
117
111
  def _prepare_gemini_contents(self, input: List[Dict]) -> List[Dict]:
118
- """Convertir mensajes de formato OpenAI a formato Gemini"""
112
+ # convert input messages to Gemini format
119
113
  gemini_contents = []
120
114
 
121
115
  for message in input:
@@ -143,7 +137,7 @@ class GeminiAdapter:
143
137
  return gemini_contents
144
138
 
145
139
  def _prepare_gemini_tools(self, tools: List[Dict]) -> List[Dict]:
146
- """Convertir herramientas de formato OpenAI a formato Gemini"""
140
+ # convert tools to Gemini format
147
141
  if not tools:
148
142
  return None
149
143
 
@@ -7,7 +7,8 @@ import logging
7
7
  from typing import Dict, List, Optional
8
8
  from iatoolkit.infra.llm_response import LLMResponse, ToolCall, Usage
9
9
  from iatoolkit.common.exceptions import IAToolkitException
10
-
10
+ import html
11
+ from typing import List
11
12
 
12
13
  class OpenAIAdapter:
13
14
  """Adaptador para la API de OpenAI"""
@@ -53,10 +54,6 @@ class OpenAIAdapter:
53
54
  error_message = f"Error calling OpenAI API: {str(e)}"
54
55
  logging.error(error_message)
55
56
 
56
- # En caso de error de contexto
57
- if "context_length_exceeded" in str(e):
58
- error_message = 'Tu consulta supera el limite de contexto. Reinicia el contexto con el boton de la barra superior.'
59
-
60
57
  raise IAToolkitException(IAToolkitException.ErrorType.LLM_ERROR, error_message)
61
58
 
62
59
  def _map_openai_response(self, openai_response) -> LLMResponse:
@@ -80,11 +77,48 @@ class OpenAIAdapter:
80
77
  total_tokens=openai_response.usage.total_tokens if openai_response.usage else 0
81
78
  )
82
79
 
80
+ # Reasoning content extracted from Responses output items (type="reasoning")
81
+ reasoning_list = self._extract_reasoning_content(openai_response)
82
+ reasoning_str = "\n".join(reasoning_list)
83
+
83
84
  return LLMResponse(
84
85
  id=openai_response.id,
85
86
  model=openai_response.model,
86
87
  status=openai_response.status,
87
88
  output_text=getattr(openai_response, 'output_text', ''),
88
89
  output=tool_calls,
89
- usage=usage
90
- )
90
+ usage=usage,
91
+ reasoning_content=reasoning_str
92
+ )
93
+
94
+ def _extract_reasoning_content(self, openai_response) -> List[str]:
95
+ """
96
+ Extract reasoning summaries (preferred) or reasoning content fragments from Responses API output.
97
+
98
+ Format required by caller:
99
+ 1. reason is ...
100
+ 2. reason is ...
101
+ """
102
+ reasons: List[str] = []
103
+
104
+ output_items = getattr(openai_response, "output", None) or []
105
+ for item in output_items:
106
+ if getattr(item, "type", None) != "reasoning":
107
+ continue
108
+
109
+ # 1) Preferred: reasoning summaries (requires reasoning={"summary":"auto"} or similar)
110
+ summary = getattr(item, "summary", None) or []
111
+ for s in summary:
112
+ text = getattr(s, "text", None)
113
+ if text:
114
+ reasons.append(str(text).strip())
115
+
116
+ # 2) Fallback: some responses may carry reasoning content in "content"
117
+ # (e.g., content parts like {"type":"reasoning_text","text":"..."}).
118
+ content = getattr(item, "content", None) or []
119
+ for c in content:
120
+ text = getattr(c, "text", None)
121
+ if text:
122
+ reasons.append(str(text).strip())
123
+
124
+ return reasons