iatoolkit 0.71.4__py3-none-any.whl → 0.91.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (86) hide show
  1. iatoolkit/__init__.py +15 -5
  2. iatoolkit/base_company.py +4 -58
  3. iatoolkit/cli_commands.py +6 -7
  4. iatoolkit/common/exceptions.py +1 -0
  5. iatoolkit/common/routes.py +12 -28
  6. iatoolkit/common/util.py +7 -1
  7. iatoolkit/company_registry.py +50 -14
  8. iatoolkit/{iatoolkit.py → core.py} +54 -55
  9. iatoolkit/infra/{mail_app.py → brevo_mail_app.py} +15 -37
  10. iatoolkit/infra/llm_client.py +9 -5
  11. iatoolkit/locales/en.yaml +10 -2
  12. iatoolkit/locales/es.yaml +171 -162
  13. iatoolkit/repositories/database_manager.py +59 -14
  14. iatoolkit/repositories/llm_query_repo.py +34 -22
  15. iatoolkit/repositories/models.py +16 -18
  16. iatoolkit/repositories/profile_repo.py +5 -10
  17. iatoolkit/repositories/vs_repo.py +9 -4
  18. iatoolkit/services/auth_service.py +1 -1
  19. iatoolkit/services/branding_service.py +1 -1
  20. iatoolkit/services/company_context_service.py +19 -11
  21. iatoolkit/services/configuration_service.py +219 -46
  22. iatoolkit/services/dispatcher_service.py +31 -225
  23. iatoolkit/services/document_service.py +10 -1
  24. iatoolkit/services/embedding_service.py +9 -6
  25. iatoolkit/services/excel_service.py +50 -2
  26. iatoolkit/services/history_manager_service.py +189 -0
  27. iatoolkit/services/jwt_service.py +1 -1
  28. iatoolkit/services/language_service.py +8 -2
  29. iatoolkit/services/license_service.py +82 -0
  30. iatoolkit/services/mail_service.py +171 -25
  31. iatoolkit/services/profile_service.py +37 -32
  32. iatoolkit/services/{prompt_manager_service.py → prompt_service.py} +110 -1
  33. iatoolkit/services/query_service.py +192 -191
  34. iatoolkit/services/sql_service.py +63 -12
  35. iatoolkit/services/tool_service.py +231 -0
  36. iatoolkit/services/user_feedback_service.py +18 -6
  37. iatoolkit/services/user_session_context_service.py +18 -0
  38. iatoolkit/static/images/iatoolkit_core.png +0 -0
  39. iatoolkit/static/images/iatoolkit_logo.png +0 -0
  40. iatoolkit/static/js/chat_feedback_button.js +1 -1
  41. iatoolkit/static/js/chat_help_content.js +4 -4
  42. iatoolkit/static/js/chat_main.js +17 -5
  43. iatoolkit/static/js/chat_onboarding_button.js +1 -1
  44. iatoolkit/static/styles/chat_iatoolkit.css +1 -1
  45. iatoolkit/static/styles/chat_public.css +28 -0
  46. iatoolkit/static/styles/documents.css +598 -0
  47. iatoolkit/static/styles/landing_page.css +223 -7
  48. iatoolkit/system_prompts/__init__.py +0 -0
  49. iatoolkit/system_prompts/query_main.prompt +2 -1
  50. iatoolkit/system_prompts/sql_rules.prompt +47 -12
  51. iatoolkit/templates/_company_header.html +30 -5
  52. iatoolkit/templates/_login_widget.html +3 -3
  53. iatoolkit/templates/chat.html +1 -1
  54. iatoolkit/templates/forgot_password.html +3 -2
  55. iatoolkit/templates/onboarding_shell.html +1 -1
  56. iatoolkit/templates/signup.html +3 -0
  57. iatoolkit/views/base_login_view.py +1 -1
  58. iatoolkit/views/change_password_view.py +1 -1
  59. iatoolkit/views/forgot_password_view.py +9 -4
  60. iatoolkit/views/history_api_view.py +3 -3
  61. iatoolkit/views/home_view.py +4 -2
  62. iatoolkit/views/init_context_api_view.py +1 -1
  63. iatoolkit/views/llmquery_api_view.py +4 -3
  64. iatoolkit/views/{file_store_api_view.py → load_document_api_view.py} +1 -1
  65. iatoolkit/views/login_view.py +17 -5
  66. iatoolkit/views/logout_api_view.py +10 -2
  67. iatoolkit/views/prompt_api_view.py +1 -1
  68. iatoolkit/views/root_redirect_view.py +22 -0
  69. iatoolkit/views/signup_view.py +12 -4
  70. iatoolkit/views/static_page_view.py +27 -0
  71. iatoolkit/views/verify_user_view.py +1 -1
  72. iatoolkit-0.91.1.dist-info/METADATA +268 -0
  73. iatoolkit-0.91.1.dist-info/RECORD +125 -0
  74. iatoolkit-0.91.1.dist-info/licenses/LICENSE_COMMUNITY.md +15 -0
  75. iatoolkit/services/history_service.py +0 -37
  76. iatoolkit/templates/about.html +0 -13
  77. iatoolkit/templates/index.html +0 -145
  78. iatoolkit/templates/login_simulation.html +0 -45
  79. iatoolkit/views/external_login_view.py +0 -73
  80. iatoolkit/views/index_view.py +0 -14
  81. iatoolkit/views/login_simulation_view.py +0 -93
  82. iatoolkit-0.71.4.dist-info/METADATA +0 -276
  83. iatoolkit-0.71.4.dist-info/RECORD +0 -122
  84. {iatoolkit-0.71.4.dist-info → iatoolkit-0.91.1.dist-info}/WHEEL +0 -0
  85. {iatoolkit-0.71.4.dist-info → iatoolkit-0.91.1.dist-info}/licenses/LICENSE +0 -0
  86. {iatoolkit-0.71.4.dist-info → iatoolkit-0.91.1.dist-info}/top_level.txt +0 -0
@@ -5,19 +5,18 @@
5
5
 
6
6
  from iatoolkit.infra.llm_client import llmClient
7
7
  from iatoolkit.services.profile_service import ProfileService
8
- from iatoolkit.repositories.document_repo import DocumentRepo
9
8
  from iatoolkit.repositories.profile_repo import ProfileRepo
9
+ from iatoolkit.services.tool_service import ToolService
10
10
  from iatoolkit.services.document_service import DocumentService
11
11
  from iatoolkit.services.company_context_service import CompanyContextService
12
12
  from iatoolkit.services.i18n_service import I18nService
13
13
  from iatoolkit.services.configuration_service import ConfigurationService
14
- from iatoolkit.repositories.llm_query_repo import LLMQueryRepo
15
14
  from iatoolkit.repositories.models import Task
16
15
  from iatoolkit.services.dispatcher_service import Dispatcher
17
- from iatoolkit.services.prompt_manager_service import PromptService
16
+ from iatoolkit.services.prompt_service import PromptService
18
17
  from iatoolkit.services.user_session_context_service import UserSessionContextService
18
+ from iatoolkit.services.history_manager_service import HistoryManagerService
19
19
  from iatoolkit.common.util import Utility
20
- from iatoolkit.common.exceptions import IAToolkitException
21
20
  from injector import inject
22
21
  import base64
23
22
  import logging
@@ -25,34 +24,40 @@ from typing import Optional
25
24
  import json
26
25
  import time
27
26
  import hashlib
28
- import os
27
+ from dataclasses import dataclass
29
28
 
30
29
 
31
- GEMINI_MAX_TOKENS_CONTEXT_HISTORY = 200000
30
+ @dataclass
31
+ class HistoryHandle:
32
+ """Encapsulates the state needed to manage history for a single turn."""
33
+ company_short_name: str
34
+ user_identifier: str
35
+ type: str
36
+ request_params: dict = None
37
+
32
38
 
33
39
  class QueryService:
34
40
  @inject
35
41
  def __init__(self,
42
+ dispatcher: Dispatcher,
43
+ tool_service: ToolService,
36
44
  llm_client: llmClient,
37
45
  profile_service: ProfileService,
38
46
  company_context_service: CompanyContextService,
39
47
  document_service: DocumentService,
40
- document_repo: DocumentRepo,
41
- llmquery_repo: LLMQueryRepo,
42
48
  profile_repo: ProfileRepo,
43
49
  prompt_service: PromptService,
44
50
  i18n_service: I18nService,
45
- util: Utility,
46
- dispatcher: Dispatcher,
47
51
  session_context: UserSessionContextService,
48
- configuration_service: ConfigurationService
52
+ configuration_service: ConfigurationService,
53
+ history_manager: HistoryManagerService,
54
+ util: Utility,
49
55
  ):
50
56
  self.profile_service = profile_service
51
57
  self.company_context_service = company_context_service
52
58
  self.document_service = document_service
53
- self.document_repo = document_repo
54
- self.llmquery_repo = llmquery_repo
55
59
  self.profile_repo = profile_repo
60
+ self.tool_service = tool_service
56
61
  self.prompt_service = prompt_service
57
62
  self.i18n_service = i18n_service
58
63
  self.util = util
@@ -60,35 +65,106 @@ class QueryService:
60
65
  self.session_context = session_context
61
66
  self.configuration_service = configuration_service
62
67
  self.llm_client = llm_client
68
+ self.history_manager = history_manager
63
69
 
64
- # get the model from the environment variable
65
- self.default_model = os.getenv("LLM_MODEL", "")
66
- if not self.default_model:
67
- raise IAToolkitException(IAToolkitException.ErrorType.API_KEY,
68
- "missing ENV variable 'LLM_MODEL' configuration.")
69
70
 
70
- def init_context(self, company_short_name: str,
71
- user_identifier: str,
72
- model: str = None) -> dict:
71
+ def _resolve_model(self, company_short_name: str, model: Optional[str]) -> str:
72
+ # Priority: 1. Explicit model -> 2. Company config
73
+ effective_model = model
74
+ if not effective_model:
75
+ llm_config = self.configuration_service.get_configuration(company_short_name, 'llm')
76
+ if llm_config and llm_config.get('model'):
77
+ effective_model = llm_config['model']
78
+ return effective_model
79
+
80
+ def _get_history_type(self, model: str) -> str:
81
+ return HistoryManagerService.TYPE_SERVER_SIDE if self.util.is_openai_model(
82
+ model) else HistoryManagerService.TYPE_CLIENT_SIDE
83
+
84
+
85
+ def _build_user_facing_prompt(self, company, user_identifier: str,
86
+ client_data: dict, files: list,
87
+ prompt_name: Optional[str], question: str) -> str:
88
+ # get the user profile data from the session context
89
+ user_profile = self.profile_service.get_profile_by_identifier(company.short_name, user_identifier)
90
+
91
+ # combine client_data with user_profile
92
+ final_client_data = (user_profile or {}).copy()
93
+ final_client_data.update(client_data)
94
+
95
+ # Load attached files into the context
96
+ files_context = self.load_files_for_context(files)
97
+
98
+ # Initialize prompt_content. It will be an empty string for direct questions.
99
+ main_prompt = ""
100
+ # We use a local variable for the question to avoid modifying the argument reference if it were mutable,
101
+ # although strings are immutable, this keeps the logic clean regarding what 'question' means in each context.
102
+ effective_question = question
103
+
104
+ if prompt_name:
105
+ question_dict = {'prompt': prompt_name, 'data': final_client_data}
106
+ effective_question = json.dumps(question_dict)
107
+ prompt_content = self.prompt_service.get_prompt_content(company, prompt_name)
108
+
109
+ # Render the user requested prompt
110
+ main_prompt = self.util.render_prompt_from_string(
111
+ template_string=prompt_content,
112
+ question=effective_question,
113
+ client_data=final_client_data,
114
+ user_identifier=user_identifier,
115
+ company=company,
116
+ )
73
117
 
74
- # 1. Execute the forced rebuild sequence using the unified identifier.
75
- self.session_context.clear_all_context(company_short_name, user_identifier)
76
- logging.info(f"Context for {company_short_name}/{user_identifier} has been cleared.")
118
+ # This is the final user-facing prompt for this specific turn
119
+ user_turn_prompt = f"{main_prompt}\n{files_context}"
120
+ if not prompt_name:
121
+ user_turn_prompt += f"\n### La pregunta que debes responder es: {effective_question}"
122
+ else:
123
+ user_turn_prompt += f'\n### Contexto Adicional: El usuario ha aportado este contexto puede ayudar: {effective_question}'
77
124
 
78
- # 2. LLM context is clean, now we can load it again
79
- self.prepare_context(
80
- company_short_name=company_short_name,
81
- user_identifier=user_identifier
82
- )
125
+ return user_turn_prompt, effective_question
83
126
 
84
- # 3. communicate the new context to the LLM
85
- response = self.set_context_for_llm(
86
- company_short_name=company_short_name,
127
+ def _ensure_valid_history(self, company, user_identifier: str,
128
+ effective_model: str, user_turn_prompt: str,
129
+ ignore_history: bool) -> tuple[Optional[HistoryHandle], Optional[dict]]:
130
+ """
131
+ Manages the history strategy and rebuilds context if necessary.
132
+ Returns: (HistoryHandle, error_response)
133
+ """
134
+ history_type = self._get_history_type(effective_model)
135
+
136
+ # Initialize the handle with base context info
137
+ handle = HistoryHandle(
138
+ company_short_name=company.short_name,
87
139
  user_identifier=user_identifier,
88
- model=model
140
+ type=history_type
89
141
  )
90
142
 
91
- return response
143
+ # pass the handle to populate request_params
144
+ needs_rebuild = self.history_manager.populate_request_params(
145
+ handle, user_turn_prompt, ignore_history
146
+ )
147
+
148
+ if needs_rebuild:
149
+ logging.warning(f"No valid history for {company.short_name}/{user_identifier}. Rebuilding context...")
150
+
151
+ # try to rebuild the context
152
+ self.prepare_context(company_short_name=company.short_name, user_identifier=user_identifier)
153
+ self.set_context_for_llm(company_short_name=company.short_name, user_identifier=user_identifier,
154
+ model=effective_model)
155
+
156
+ # Retry populating params with the same handle
157
+ needs_rebuild = self.history_manager.populate_request_params(
158
+ handle, user_turn_prompt, ignore_history
159
+ )
160
+
161
+ if needs_rebuild:
162
+ error_key = 'errors.services.context_rebuild_failed'
163
+ error_message = self.i18n_service.t(error_key, company_short_name=company.short_name,
164
+ user_identifier=user_identifier)
165
+ return None, {'error': True, "error_message": error_message}
166
+
167
+ return handle, None
92
168
 
93
169
  def _build_context_and_profile(self, company_short_name: str, user_identifier: str) -> tuple:
94
170
  # this method read the user/company context from the database and renders the system prompt
@@ -106,7 +182,7 @@ class QueryService:
106
182
  question=None,
107
183
  client_data=user_profile,
108
184
  company=company,
109
- service_list=self.dispatcher.get_company_services(company)
185
+ service_list=self.tool_service.get_tools_for_llm(company)
110
186
  )
111
187
 
112
188
  # get the company context: schemas, database models, .md files
@@ -117,6 +193,31 @@ class QueryService:
117
193
 
118
194
  return final_system_context, user_profile
119
195
 
196
+
197
+ def init_context(self, company_short_name: str,
198
+ user_identifier: str,
199
+ model: str = None) -> dict:
200
+
201
+ # 1. Execute the forced rebuild sequence using the unified identifier.
202
+ self.session_context.clear_all_context(company_short_name, user_identifier)
203
+ logging.info(f"Context for {company_short_name}/{user_identifier} has been cleared.")
204
+
205
+ # 2. LLM context is clean, now we can load it again
206
+ self.prepare_context(
207
+ company_short_name=company_short_name,
208
+ user_identifier=user_identifier
209
+ )
210
+
211
+ # 3. communicate the new context to the LLM
212
+ response = self.set_context_for_llm(
213
+ company_short_name=company_short_name,
214
+ user_identifier=user_identifier,
215
+ model=model
216
+ )
217
+
218
+ return response
219
+
220
+
120
221
  def prepare_context(self, company_short_name: str, user_identifier: str) -> dict:
121
222
  # prepare the context and decide if it needs to be rebuilt
122
223
  # save the generated context in the session context for later use
@@ -134,21 +235,22 @@ class QueryService:
134
235
  # calculate the context version
135
236
  current_version = self._compute_context_version_from_string(final_system_context)
136
237
 
238
+ # get the current version from the session cache
137
239
  try:
138
240
  prev_version = self.session_context.get_context_version(company_short_name, user_identifier)
139
241
  except Exception:
140
242
  prev_version = None
141
243
 
142
- rebuild_is_needed = not (prev_version and prev_version == current_version and
143
- self._has_valid_cached_context(company_short_name, user_identifier))
144
-
145
- if rebuild_is_needed:
146
- # Guardar el contexto preparado y su versión para que `finalize_context_rebuild` los use.
147
- self.session_context.save_prepared_context(company_short_name,
148
- user_identifier,
149
- final_system_context,
150
- current_version)
244
+ # Determine if we need to persist the prepared context again.
245
+ # If versions match, we assume the artifact is likely safe, but forcing a save
246
+ # on version mismatch ensures data consistency.
247
+ rebuild_is_needed = (prev_version != current_version)
151
248
 
249
+ # Save the prepared context and its version for `set_context_for_llm` to use.
250
+ self.session_context.save_prepared_context(company_short_name,
251
+ user_identifier,
252
+ final_system_context,
253
+ current_version)
152
254
  return {'rebuild_needed': rebuild_is_needed}
153
255
 
154
256
  def set_context_for_llm(self,
@@ -163,14 +265,7 @@ class QueryService:
163
265
  return
164
266
 
165
267
  # --- Model Resolution ---
166
- # Priority: 1. Explicit model -> 2. Company config -> 3. Global default
167
- effective_model = model
168
- if not effective_model:
169
- llm_config = self.configuration_service.get_configuration(company_short_name, 'llm')
170
- if llm_config and llm_config.get('model'):
171
- effective_model = llm_config['model']
172
-
173
- effective_model = effective_model or self.default_model
268
+ effective_model = self._resolve_model(company_short_name, model)
174
269
 
175
270
  # blocking logic to avoid multiple requests for the same user/company at the same time
176
271
  lock_key = f"lock:context:{company_short_name}/{user_identifier}"
@@ -181,37 +276,29 @@ class QueryService:
181
276
 
182
277
  try:
183
278
  start_time = time.time()
184
- company = self.profile_repo.get_company_by_short_name(company_short_name)
185
279
 
186
280
  # get the prepared context and version from the session cache
187
- prepared_context, version_to_save = self.session_context.get_and_clear_prepared_context(company_short_name,
188
- user_identifier)
281
+ prepared_context, version_to_save = self.session_context.get_and_clear_prepared_context(company_short_name, user_identifier)
189
282
  if not prepared_context:
190
283
  return
191
284
 
192
285
  logging.info(f"sending context to LLM model {effective_model} for: {company_short_name}/{user_identifier}...")
193
286
 
194
- # clean only the chat history and the last response ID for this user/company
195
- self.session_context.clear_llm_history(company_short_name, user_identifier)
196
-
197
- response_id = ''
198
- if self.util.is_gemini_model(effective_model):
199
- context_history = [{"role": "user", "content": prepared_context}]
200
- self.session_context.save_context_history(company_short_name, user_identifier, context_history)
201
- elif self.util.is_openai_model(effective_model):
202
- # Here is the call to the LLM client for settling the company/user context
203
- response_id = self.llm_client.set_company_context(
204
- company=company,
205
- company_base_context=prepared_context,
206
- model=effective_model
207
- )
208
- self.session_context.save_last_response_id(company_short_name, user_identifier, response_id)
287
+ # --- Use Strategy Pattern for History/Context Initialization ---
288
+ history_type = self._get_history_type(effective_model)
289
+ response_data = self.history_manager.initialize_context(
290
+ company_short_name, user_identifier, history_type, prepared_context, company, effective_model
291
+ )
209
292
 
210
293
  if version_to_save:
211
294
  self.session_context.save_context_version(company_short_name, user_identifier, version_to_save)
212
295
 
213
296
  logging.info(
214
297
  f"Context for: {company_short_name}/{user_identifier} settled in {int(time.time() - start_time)} sec.")
298
+
299
+ # Return data (e.g., response_id) if the manager generated any
300
+ return response_data
301
+
215
302
  except Exception as e:
216
303
  logging.exception(f"Error in finalize_context_rebuild for {company_short_name}: {e}")
217
304
  raise e
@@ -219,7 +306,6 @@ class QueryService:
219
306
  # release the lock
220
307
  self.session_context.release_lock(lock_key)
221
308
 
222
- return {'response_id': response_id }
223
309
 
224
310
  def llm_query(self,
225
311
  company_short_name: str,
@@ -228,7 +314,7 @@ class QueryService:
228
314
  prompt_name: str = None,
229
315
  question: str = '',
230
316
  client_data: dict = {},
231
- response_id: str = '',
317
+ ignore_history: bool = False,
232
318
  files: list = [],
233
319
  model: Optional[str] = None) -> dict:
234
320
  try:
@@ -242,86 +328,48 @@ class QueryService:
242
328
  "error_message": self.i18n_service.t('services.start_query')}
243
329
 
244
330
  # --- Model Resolution ---
245
- # Priority: 1. Explicit model -> 2. Company config -> 3. Global default
246
- effective_model = model
247
- if not effective_model:
248
- llm_config = self.configuration_service.get_configuration(company_short_name, 'llm')
249
- if llm_config and llm_config.get('model'):
250
- effective_model = llm_config['model']
251
-
252
- effective_model = effective_model or self.default_model
253
-
254
- # get the previous response_id and context history
255
- previous_response_id = None
256
- context_history = self.session_context.get_context_history(company.short_name, user_identifier) or []
257
-
258
- if self.util.is_openai_model(effective_model):
259
- if response_id:
260
- # context is getting from this response_id
261
- previous_response_id = response_id
262
- else:
263
- # use the full user history context
264
- previous_response_id = self.session_context.get_last_response_id(company.short_name, user_identifier)
265
- if not previous_response_id:
266
- return {'error': True,
267
- "error_message": self.i18n_service.t('errors.services.missing_response_id', company_short_name=company.short_name, user_identifier=user_identifier)
268
- }
269
- elif self.util.is_gemini_model(effective_model):
270
- # check the length of the context_history and remove old messages
271
- self._trim_context_history(context_history)
272
-
273
- # get the user profile data from the session context
274
- user_profile = self.profile_service.get_profile_by_identifier(company.short_name, user_identifier)
275
-
276
- # combine client_data with user_profile
277
- final_client_data = (user_profile or {}).copy()
278
- final_client_data.update(client_data)
279
-
280
- # Load attached files into the context
281
- files_context = self.load_files_for_context(files)
282
-
283
- # Initialize prompt_content. It will be an empty string for direct questions.
284
- main_prompt = ""
285
- if prompt_name:
286
- # For task-based queries, wrap data into a JSON string and get the specific prompt template
287
- question_dict = {'prompt': prompt_name, 'data': final_client_data }
288
- question = json.dumps(question_dict)
289
- prompt_content = self.prompt_service.get_prompt_content(company, prompt_name)
290
-
291
- # Render the main user prompt using the appropriate template (or an empty one)
292
- main_prompt = self.util.render_prompt_from_string(
293
- template_string=prompt_content,
294
- question=question,
295
- client_data=final_client_data,
296
- user_identifier=user_identifier,
297
- company=company,
298
- )
331
+ effective_model = self._resolve_model(company_short_name, model)
299
332
 
300
- # This is the final user-facing prompt for this specific turn
301
- user_turn_prompt = f"{main_prompt}\n{files_context}"
302
- if not prompt_name:
303
- user_turn_prompt += f"\n### La pregunta que debes responder es: {question}"
304
- else:
305
- user_turn_prompt += f'\n### Contexto Adicional: El usuario ha aportado este contexto puede ayudar: {question}'
333
+ # --- Build User-Facing Prompt ---
334
+ user_turn_prompt, effective_question = self._build_user_facing_prompt(
335
+ company=company,
336
+ user_identifier=user_identifier,
337
+ client_data=client_data,
338
+ files=files,
339
+ prompt_name=prompt_name,
340
+ question=question
341
+ )
306
342
 
307
- # add to the history context
308
- if self.util.is_gemini_model(effective_model):
309
- context_history.append({"role": "user", "content": user_turn_prompt})
343
+ # --- History Management (Strategy Pattern) ---
344
+ history_handle, error_response = self._ensure_valid_history(
345
+ company=company,
346
+ user_identifier=user_identifier,
347
+ effective_model=effective_model,
348
+ user_turn_prompt=user_turn_prompt,
349
+ ignore_history=ignore_history
350
+ )
351
+ if error_response:
352
+ return error_response
310
353
 
311
- # service list for the function calls
312
- tools = self.dispatcher.get_company_services(company)
354
+ # get the tools availables for this company
355
+ tools = self.tool_service.get_tools_for_llm(company)
313
356
 
314
357
  # openai structured output instructions
315
358
  output_schema = {}
316
359
 
360
+ # Safely extract parameters for invoke using the handle
361
+ # The handle is guaranteed to have request_params populated if no error returned
362
+ previous_response_id = history_handle.request_params.get('previous_response_id')
363
+ context_history = history_handle.request_params.get('context_history')
364
+
317
365
  # Now send the instructions to the llm
318
366
  response = self.llm_client.invoke(
319
367
  company=company,
320
368
  user_identifier=user_identifier,
321
369
  model=effective_model,
322
370
  previous_response_id=previous_response_id,
323
- context_history=context_history if self.util.is_gemini_model(effective_model) else None,
324
- question=question,
371
+ context_history=context_history,
372
+ question=effective_question,
325
373
  context=user_turn_prompt,
326
374
  tools=tools,
327
375
  text=output_schema
@@ -330,11 +378,10 @@ class QueryService:
330
378
  if not response.get('valid_response'):
331
379
  response['error'] = True
332
380
 
333
- # save last_response_id for the history chain
334
- if "response_id" in response:
335
- self.session_context.save_last_response_id(company.short_name, user_identifier, response["response_id"])
336
- if self.util.is_gemini_model(effective_model):
337
- self.session_context.save_context_history(company.short_name, user_identifier, context_history)
381
+ # save history using the manager passing the handle
382
+ self.history_manager.update_history(
383
+ history_handle, user_turn_prompt, response
384
+ )
338
385
 
339
386
  return response
340
387
  except Exception as e:
@@ -348,23 +395,6 @@ class QueryService:
348
395
  except Exception:
349
396
  return "unknown"
350
397
 
351
- def _has_valid_cached_context(self, company_short_name: str, user_identifier: str) -> bool:
352
- """
353
- Verifica si existe un estado de contexto reutilizable en sesión.
354
- - OpenAI: last_response_id presente.
355
- - Gemini: context_history con al menos 1 mensaje.
356
- """
357
- try:
358
- if self.util.is_openai_model(self.default_model):
359
- prev_id = self.session_context.get_last_response_id(company_short_name, user_identifier)
360
- return bool(prev_id)
361
- if self.util.is_gemini_model(self.default_model):
362
- history = self.session_context.get_context_history(company_short_name, user_identifier) or []
363
- return len(history) >= 1
364
- return False
365
- except Exception as e:
366
- logging.warning(f"error verifying context cache: {e}")
367
- return False
368
398
 
369
399
  def load_files_for_context(self, files: list) -> str:
370
400
  """
@@ -381,7 +411,7 @@ class QueryService:
381
411
  """
382
412
  for document in files:
383
413
  # Support both 'file_id' and 'filename' for robustness
384
- filename = document.get('file_id') or document.get('filename')
414
+ filename = document.get('file_id') or document.get('filename') or document.get('name')
385
415
  if not filename:
386
416
  context += "\n<error>Documento adjunto sin nombre ignorado.</error>\n"
387
417
  continue
@@ -410,32 +440,3 @@ class QueryService:
410
440
 
411
441
  return context
412
442
 
413
- def _trim_context_history(self, context_history: list):
414
- """
415
- Verifica el tamaño del historial de contexto y elimina los mensajes más antiguos
416
- si supera un umbral, conservando siempre el mensaje del sistema (índice 0).
417
- """
418
- if not context_history or len(context_history) <= 1:
419
- return # nothing to remember
420
-
421
- # calculate total tokens
422
- try:
423
- total_tokens = sum(self.llm_client.count_tokens(json.dumps(message)) for message in context_history)
424
- except Exception as e:
425
- logging.error(f"error counting tokens for history: {e}.")
426
- return
427
-
428
- # Si se excede el límite, eliminar mensajes antiguos (empezando por el segundo)
429
- while total_tokens > GEMINI_MAX_TOKENS_CONTEXT_HISTORY and len(context_history) > 1:
430
- try:
431
- # Eliminar el mensaje más antiguo después del prompt del sistema
432
- removed_message = context_history.pop(1)
433
- removed_tokens = self.llm_client.count_tokens(json.dumps(removed_message))
434
- total_tokens -= removed_tokens
435
- logging.warning(
436
- f"history tokens ({total_tokens + removed_tokens} tokens) exceed the limit of: {GEMINI_MAX_TOKENS_CONTEXT_HISTORY}. "
437
- f"new context: {total_tokens} tokens."
438
- )
439
- except IndexError:
440
- # Se produce si solo queda el mensaje del sistema, el bucle debería detenerse.
441
- break
@@ -8,6 +8,7 @@ from iatoolkit.common.util import Utility
8
8
  from iatoolkit.services.i18n_service import I18nService
9
9
  from iatoolkit.common.exceptions import IAToolkitException
10
10
  from sqlalchemy import text
11
+ from sqlalchemy.exc import SQLAlchemyError
11
12
  from injector import inject, singleton
12
13
  import json
13
14
  import logging
@@ -30,7 +31,7 @@ class SqlService:
30
31
  # Cache for database connections
31
32
  self._db_connections: dict[str, DatabaseManager] = {}
32
33
 
33
- def register_database(self, db_name: str, db_uri: str):
34
+ def register_database(self, db_uri: str, db_name: str, schema: str | None = None):
34
35
  """
35
36
  Creates and caches a DatabaseManager instance for a given database name and URI.
36
37
  If a database with the same name is already registered, it does nothing.
@@ -38,10 +39,10 @@ class SqlService:
38
39
  if db_name in self._db_connections:
39
40
  return
40
41
 
41
- logging.debug(f"Registering and creating connection for database: '{db_name}'")
42
+ logging.info(f"Registering and creating connection for database: '{db_name}' (schema: {schema})")
42
43
 
43
44
  # create the database connection and save it on the cache
44
- db_manager = DatabaseManager(db_uri, register_pgvector=False)
45
+ db_manager = DatabaseManager(db_uri, schema=schema, register_pgvector=False)
45
46
  self._db_connections[db_name] = db_manager
46
47
 
47
48
  def get_database_manager(self, db_name: str) -> DatabaseManager:
@@ -57,23 +58,53 @@ class SqlService:
57
58
  f"Database '{db_name}' is not registered with the SqlService."
58
59
  )
59
60
 
60
- def exec_sql(self, database: str, query: str) -> str:
61
+ def exec_sql(self, company_short_name: str,
62
+ database: str,
63
+ query: str,
64
+ format: str = 'json',
65
+ commit: bool = False):
61
66
  """
62
- Executes a raw SQL statement against a registered database and returns the result as a JSON string.
67
+ Executes a raw SQL statement against a registered database.
68
+
69
+ Args:
70
+ company_short_name: The company identifier (for logging/context).
71
+ database: The logical name of the database to query.
72
+ query: The SQL statement to execute.
73
+ format: The output format ('json' or 'dict'). Only relevant for SELECT queries.
74
+ commit: Whether to commit the transaction immediately after execution.
75
+ Use True for INSERT/UPDATE/DELETE statements.
76
+
77
+ Returns:
78
+ - A JSON string or list of dicts for SELECT queries.
79
+ - A dictionary {'rowcount': N} for non-returning statements (INSERT/UPDATE) if not using RETURNING.
63
80
  """
64
81
  try:
65
82
  # 1. Get the database manager from the cache
66
83
  db_manager = self.get_database_manager(database)
84
+ session = db_manager.get_session()
67
85
 
68
86
  # 2. Execute the SQL statement
69
- result = db_manager.get_session().execute(text(query))
70
- cols = result.keys()
71
- rows_context = [dict(zip(cols, row)) for row in result.fetchall()]
87
+ result = session.execute(text(query))
88
+
89
+ # 3. Handle Commit
90
+ if commit:
91
+ session.commit()
92
+
93
+ # 4. Process Results
94
+ # Check if the query returns rows (e.g., SELECT or INSERT ... RETURNING)
95
+ if result.returns_rows:
96
+ cols = result.keys()
97
+ rows_context = [dict(zip(cols, row)) for row in result.fetchall()]
72
98
 
73
- # seialize the result
74
- sql_result_json = json.dumps(rows_context, default=self.util.serialize)
99
+ if format == 'dict':
100
+ return rows_context
101
+
102
+ # serialize the result
103
+ return json.dumps(rows_context, default=self.util.serialize)
104
+
105
+ # For statements that don't return rows (standard UPDATE/DELETE)
106
+ return {'rowcount': result.rowcount}
75
107
 
76
- return sql_result_json
77
108
  except IAToolkitException:
78
109
  # Re-raise exceptions from get_database_manager to preserve the specific error
79
110
  raise
@@ -89,4 +120,24 @@ class SqlService:
89
120
 
90
121
  logging.error(f"Error executing SQL statement: {error_message}")
91
122
  raise IAToolkitException(IAToolkitException.ErrorType.DATABASE_ERROR,
92
- error_message) from e
123
+ error_message) from e
124
+
125
+ def commit(self, database: str):
126
+ """
127
+ Commits the current transaction for a registered database.
128
+ Useful when multiple exec_sql calls are part of a single transaction.
129
+ """
130
+
131
+ # Get the database manager from the cache
132
+ db_manager = self.get_database_manager(database)
133
+ try:
134
+ db_manager.get_session().commit()
135
+ except SQLAlchemyError as db_error:
136
+ db_manager.get_session().rollback()
137
+ logging.error(f"Error de base de datos: {str(db_error)}")
138
+ raise db_error
139
+ except Exception as e:
140
+ logging.error(f"error while commiting sql: '{str(e)}'")
141
+ raise IAToolkitException(
142
+ IAToolkitException.ErrorType.DATABASE_ERROR, str(e)
143
+ )