iatoolkit 1.7.0__py3-none-any.whl → 1.15.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. iatoolkit/__init__.py +1 -1
  2. iatoolkit/common/routes.py +16 -3
  3. iatoolkit/common/util.py +8 -123
  4. iatoolkit/core.py +1 -0
  5. iatoolkit/infra/connectors/file_connector.py +10 -2
  6. iatoolkit/infra/connectors/google_drive_connector.py +3 -0
  7. iatoolkit/infra/connectors/local_file_connector.py +3 -0
  8. iatoolkit/infra/connectors/s3_connector.py +24 -1
  9. iatoolkit/infra/llm_providers/deepseek_adapter.py +17 -1
  10. iatoolkit/infra/llm_providers/gemini_adapter.py +117 -18
  11. iatoolkit/infra/llm_providers/openai_adapter.py +175 -18
  12. iatoolkit/infra/llm_response.py +13 -0
  13. iatoolkit/locales/en.yaml +82 -4
  14. iatoolkit/locales/es.yaml +79 -4
  15. iatoolkit/repositories/llm_query_repo.py +51 -18
  16. iatoolkit/repositories/models.py +16 -7
  17. iatoolkit/services/company_context_service.py +294 -133
  18. iatoolkit/services/configuration_service.py +140 -121
  19. iatoolkit/services/dispatcher_service.py +1 -4
  20. iatoolkit/services/knowledge_base_service.py +26 -4
  21. iatoolkit/services/llm_client_service.py +58 -2
  22. iatoolkit/services/prompt_service.py +251 -164
  23. iatoolkit/services/query_service.py +37 -18
  24. iatoolkit/services/storage_service.py +92 -0
  25. iatoolkit/static/js/chat_filepond.js +188 -63
  26. iatoolkit/static/js/chat_main.js +105 -52
  27. iatoolkit/static/styles/chat_iatoolkit.css +96 -0
  28. iatoolkit/system_prompts/query_main.prompt +24 -41
  29. iatoolkit/templates/chat.html +15 -6
  30. iatoolkit/views/base_login_view.py +1 -1
  31. iatoolkit/views/categories_api_view.py +111 -0
  32. iatoolkit/views/chat_view.py +1 -1
  33. iatoolkit/views/configuration_api_view.py +1 -1
  34. iatoolkit/views/login_view.py +1 -1
  35. iatoolkit/views/prompt_api_view.py +88 -7
  36. {iatoolkit-1.7.0.dist-info → iatoolkit-1.15.3.dist-info}/METADATA +1 -1
  37. {iatoolkit-1.7.0.dist-info → iatoolkit-1.15.3.dist-info}/RECORD +41 -39
  38. {iatoolkit-1.7.0.dist-info → iatoolkit-1.15.3.dist-info}/WHEEL +0 -0
  39. {iatoolkit-1.7.0.dist-info → iatoolkit-1.15.3.dist-info}/licenses/LICENSE +0 -0
  40. {iatoolkit-1.7.0.dist-info → iatoolkit-1.15.3.dist-info}/licenses/LICENSE_COMMUNITY.md +0 -0
  41. {iatoolkit-1.7.0.dist-info → iatoolkit-1.15.3.dist-info}/top_level.txt +0 -0
@@ -4,12 +4,14 @@
4
4
  # IAToolkit is open source software.
5
5
 
6
6
  from injector import inject
7
+ from iatoolkit import current_iatoolkit
7
8
  from iatoolkit.common.interfaces.asset_storage import AssetRepository, AssetType
8
9
  from iatoolkit.repositories.llm_query_repo import LLMQueryRepo
9
10
  from iatoolkit.services.i18n_service import I18nService
10
11
  from iatoolkit.repositories.profile_repo import ProfileRepo
11
12
  from collections import defaultdict
12
- from iatoolkit.repositories.models import Prompt, PromptCategory, Company
13
+ from iatoolkit.repositories.models import (Prompt, PromptCategory,
14
+ Company, PromptType)
13
15
  from iatoolkit.common.exceptions import IAToolkitException
14
16
  import importlib.resources
15
17
  import logging
@@ -17,9 +19,9 @@ import os
17
19
 
18
20
  # iatoolkit system prompts definitions
19
21
  _SYSTEM_PROMPTS = [
20
- {'name': 'query_main', 'description': 'iatoolkit main prompt'},
21
- {'name': 'format_styles', 'description': 'output format styles'},
22
- {'name': 'sql_rules', 'description': 'instructions for SQL queries'}
22
+ {'name': 'query_main', 'description': 'iatoolkit main prompt', 'order': 1},
23
+ {'name': 'format_styles', 'description': 'output format styles', 'order': 2},
24
+ {'name': 'sql_rules', 'description': 'instructions for SQL queries', 'order': 3},
23
25
  ]
24
26
 
25
27
  class PromptService:
@@ -34,14 +36,197 @@ class PromptService:
34
36
  self.profile_repo = profile_repo
35
37
  self.i18n_service = i18n_service
36
38
 
37
- def sync_company_prompts(self, company_short_name: str, prompts_config: list, categories_config: list):
39
+ def get_prompts(self, company_short_name: str, include_all: bool = False) -> dict:
40
+ try:
41
+ # validate company
42
+ company = self.profile_repo.get_company_by_short_name(company_short_name)
43
+ if not company:
44
+ return {"error": self.i18n_service.t('errors.company_not_found', company_short_name=company_short_name)}
45
+
46
+ # get all the company prompts
47
+ # If include_all is True, repo should return everything for the company
48
+ # Otherwise, it should return only active prompts
49
+ all_prompts = self.llm_query_repo.get_prompts(company, include_all=include_all)
50
+
51
+ # Deduplicate prompts by id
52
+ all_prompts = list({p.id: p for p in all_prompts}.values())
53
+
54
+ # group by category
55
+ prompts_by_category = defaultdict(list)
56
+ for prompt in all_prompts:
57
+ # Filter logic moved here or in repo.
58
+ # If include_all is False, we only want active prompts (and maybe only specific types)
59
+ if not include_all:
60
+
61
+ # Standard user view: excludes system/agent hidden prompts if any?
62
+ if prompt.prompt_type != PromptType.COMPANY.value:
63
+ continue
64
+
65
+ # Grouping logic
66
+ cat_key = (0, "Uncategorized") # Default
67
+ if prompt.category:
68
+ cat_key = (prompt.category.order, prompt.category.name)
69
+
70
+ prompts_by_category[cat_key].append(prompt)
71
+
72
+ # sort each category by order
73
+ for cat_key in prompts_by_category:
74
+ prompts_by_category[cat_key].sort(key=lambda p: p.order)
75
+
76
+ categorized_prompts = []
77
+
78
+ # sort categories by order
79
+ sorted_categories = sorted(prompts_by_category.items(), key=lambda item: item[0][0])
80
+
81
+ for (cat_order, cat_name), prompts in sorted_categories:
82
+ categorized_prompts.append({
83
+ 'category_name': cat_name,
84
+ 'category_order': cat_order,
85
+ 'prompts': [
86
+ {
87
+ 'prompt': p.name,
88
+ 'description': p.description,
89
+ 'type': p.prompt_type,
90
+ 'active': p.active,
91
+ 'custom_fields': p.custom_fields,
92
+ 'order': p.order
93
+ }
94
+ for p in prompts
95
+ ]
96
+ })
97
+
98
+ return {'message': categorized_prompts}
99
+
100
+ except Exception as e:
101
+ logging.error(f"error in get_prompts: {e}")
102
+ return {'error': str(e)}
103
+
104
+
105
+ def get_prompt_content(self, company: Company, prompt_name: str):
106
+ try:
107
+ # get the prompt from database
108
+ prompt = self.llm_query_repo.get_prompt_by_name(company, prompt_name)
109
+ if not prompt:
110
+ raise IAToolkitException(IAToolkitException.ErrorType.DOCUMENT_NOT_FOUND,
111
+ f"prompt not found '{prompt}' for company '{company.short_name}'")
112
+
113
+ try:
114
+ # read the prompt content from asset repository
115
+ user_prompt_content = self.asset_repo.read_text(
116
+ company.short_name,
117
+ AssetType.PROMPT,
118
+ prompt.filename
119
+ )
120
+ except FileNotFoundError:
121
+ raise IAToolkitException(IAToolkitException.ErrorType.FILE_IO_ERROR,
122
+ f"prompt file '{prompt.filename}' does not exist for company '{company.short_name}'")
123
+ except Exception as e:
124
+ raise IAToolkitException(IAToolkitException.ErrorType.FILE_IO_ERROR,
125
+ f"error while reading prompt: '{prompt_name}': {e}")
126
+
127
+ return user_prompt_content
128
+
129
+ except IAToolkitException:
130
+ raise
131
+ except Exception as e:
132
+ logging.exception(
133
+ f"error loading prompt '{prompt_name}' content for '{company.short_name}': {e}")
134
+ raise IAToolkitException(IAToolkitException.ErrorType.PROMPT_ERROR,
135
+ f'error loading prompt "{prompt_name}" content for company {company.short_name}: {str(e)}')
136
+
137
+ def save_prompt(self, company_short_name: str, prompt_name: str, data: dict):
138
+ """
139
+ Create or Update a prompt.
140
+ 1. Saves the Jinja content to the .prompt asset file.
141
+ 2. Updates the Database.
142
+ """
143
+ company = self.profile_repo.get_company_by_short_name(company_short_name)
144
+ if not company:
145
+ raise IAToolkitException(IAToolkitException.ErrorType.INVALID_NAME,
146
+ f"Company {company_short_name} not found")
147
+
148
+ # Validate category if present
149
+ category_id = None
150
+ if 'category' in data:
151
+ # simple lookup, assuming category names are unique per company
152
+ cat = self.llm_query_repo.get_category_by_name(company.id, data['category'])
153
+ if cat:
154
+ category_id = cat.id
155
+
156
+ # 1. save the phisical part of the prompt (content)
157
+ if 'content' in data:
158
+ filename = f"{prompt_name}.prompt"
159
+ filename = filename.lower().replace(' ', '_')
160
+ self.asset_repo.write_text(company_short_name, AssetType.PROMPT, filename, data['content'])
161
+
162
+ # 2. update the prompt in the database
163
+ new_prompt = Prompt(
164
+ company_id=company.id,
165
+ name=prompt_name,
166
+ description=data.get('description', ''),
167
+ order=data.get('order', 1),
168
+ category_id=category_id,
169
+ active=data.get('active', True),
170
+ prompt_type=data.get('prompt_type', 'company'),
171
+ filename=f"{prompt_name.lower().replace(' ', '_')}.prompt",
172
+ custom_fields=data.get('custom_fields', [])
173
+ )
174
+ self.llm_query_repo.create_or_update_prompt(new_prompt)
175
+
176
+ def delete_prompt(self, company_short_name: str, prompt_name: str):
177
+ """
178
+ Deletes a prompt:
179
+ 1. Removes from DB.
180
+ 2. Removes from YAML config.
181
+ 3. (Optional) Deletes/Archives physical file.
182
+ """
183
+ company = self.profile_repo.get_company_by_short_name(company_short_name)
184
+ if not company:
185
+ raise IAToolkitException(IAToolkitException.ErrorType.INVALID_NAME, f"Company not found")
186
+
187
+ prompt_db = self.llm_query_repo.get_prompt_by_name(company, prompt_name)
188
+ if not prompt_db:
189
+ raise IAToolkitException(IAToolkitException.ErrorType.DOCUMENT_NOT_FOUND, f"Prompt {prompt_name} not found")
190
+
191
+ # 1. Remove from DB
192
+ self.llm_query_repo.delete_prompt(prompt_db)
193
+
194
+ def get_system_prompt(self):
195
+ try:
196
+ system_prompt_content = []
197
+
198
+ # read all the system prompts from the database
199
+ system_prompts = self.llm_query_repo.get_system_prompts()
200
+
201
+ for prompt in system_prompts:
202
+ try:
203
+ content = importlib.resources.read_text('iatoolkit.system_prompts', prompt.filename)
204
+ system_prompt_content.append(content)
205
+ except FileNotFoundError:
206
+ logging.warning(f"Prompt file does not exist in the package: {prompt.filename}")
207
+ except Exception as e:
208
+ raise IAToolkitException(IAToolkitException.ErrorType.FILE_IO_ERROR,
209
+ f"error reading system prompt '{prompt.filename}': {e}")
210
+
211
+ # join the system prompts into a single string
212
+ return "\n".join(system_prompt_content)
213
+
214
+ except IAToolkitException:
215
+ raise
216
+ except Exception as e:
217
+ logging.exception(
218
+ f"Error al obtener el contenido del prompt de sistema: {e}")
219
+ raise IAToolkitException(IAToolkitException.ErrorType.PROMPT_ERROR,
220
+ f'error reading the system prompts": {str(e)}')
221
+
222
+ def sync_company_prompts(self, company_short_name: str, prompt_list: list, categories_config: list):
38
223
  """
39
224
  Synchronizes prompt categories and prompts from YAML config to Database.
40
225
  Strategies:
41
226
  - Categories: Create or Update existing based on name.
42
227
  - Prompts: Create or Update existing based on name. Soft-delete or Delete unused.
43
228
  """
44
- if not prompts_config:
229
+ if not prompt_list:
45
230
  return
46
231
 
47
232
  company = self.profile_repo.get_company_by_short_name(company_short_name)
@@ -49,6 +234,13 @@ class PromptService:
49
234
  raise IAToolkitException(IAToolkitException.ErrorType.INVALID_NAME,
50
235
  f'Company {company_short_name} not found')
51
236
 
237
+ # Register system prompts
238
+ self._register_system_prompts(company)
239
+
240
+ # community edition has its own prompt management
241
+ if not current_iatoolkit().is_community:
242
+ return
243
+
52
244
  try:
53
245
  # 1. Sync Categories
54
246
  category_map = {}
@@ -66,7 +258,7 @@ class PromptService:
66
258
  # 2. Sync Prompts
67
259
  defined_prompt_names = set()
68
260
 
69
- for prompt_data in prompts_config:
261
+ for prompt_data in prompt_list:
70
262
  category_name = prompt_data.get('category')
71
263
  if not category_name or category_name not in category_map:
72
264
  logging.warning(
@@ -86,7 +278,7 @@ class PromptService:
86
278
  order=prompt_data.get('order'),
87
279
  category_id=category_obj.id,
88
280
  active=prompt_data.get('active', True),
89
- is_system_prompt=False,
281
+ prompt_type=prompt_data.get('prompt_type', PromptType.COMPANY.value).lower(),
90
282
  filename=filename,
91
283
  custom_fields=prompt_data.get('custom_fields', [])
92
284
  )
@@ -106,30 +298,42 @@ class PromptService:
106
298
  self.llm_query_repo.rollback()
107
299
  raise IAToolkitException(IAToolkitException.ErrorType.DATABASE_ERROR, str(e))
108
300
 
109
- def register_system_prompts(self):
301
+ def _register_system_prompts(self, company: Company):
110
302
  """
111
303
  Synchronizes system prompts defined in Dispatcher/Code to Database.
112
304
  """
305
+
306
+ # if there are system prompts already registered, skip
307
+ # if self.llm_query_repo.get_system_prompts(): return
308
+
309
+ sys_category = PromptCategory(company_id=company.id, name="System", order=0)
310
+ self.llm_query_repo.create_or_update_prompt_category(sys_category)
311
+
113
312
  try:
114
313
  defined_names = set()
115
314
 
116
315
  for i, prompt_data in enumerate(_SYSTEM_PROMPTS):
117
316
  prompt_name = prompt_data['name']
118
317
  defined_names.add(prompt_name)
318
+ prompt_filename = f"{prompt_name}.prompt"
119
319
 
120
320
  new_prompt = Prompt(
121
- company_id=None, # System prompts have no company
321
+ company_id=company.id,
122
322
  name=prompt_name,
123
323
  description=prompt_data['description'],
124
- order=i + 1,
125
- category_id=None,
324
+ order=prompt_data['order'],
325
+ category_id=sys_category.id,
126
326
  active=True,
127
- is_system_prompt=True,
128
- filename=f"{prompt_name}.prompt",
327
+ prompt_type=PromptType.SYSTEM.value,
328
+ filename=prompt_filename,
129
329
  custom_fields=[]
130
330
  )
131
331
  self.llm_query_repo.create_or_update_prompt(new_prompt)
132
332
 
333
+ # add prompt to company assets
334
+ prompt_content = importlib.resources.read_text('iatoolkit.system_prompts', prompt_filename)
335
+ self.asset_repo.write_text(company.short_name, AssetType.PROMPT, prompt_filename, prompt_content)
336
+
133
337
  # Cleanup old system prompts
134
338
  existing_sys_prompts = self.llm_query_repo.get_system_prompts()
135
339
  for p in existing_sys_prompts:
@@ -142,162 +346,45 @@ class PromptService:
142
346
  self.llm_query_repo.rollback()
143
347
  raise IAToolkitException(IAToolkitException.ErrorType.DATABASE_ERROR, str(e))
144
348
 
145
- def create_prompt(self,
146
- prompt_name: str,
147
- description: str,
148
- order: int,
149
- company: Company = None,
150
- category: PromptCategory = None,
151
- active: bool = True,
152
- is_system_prompt: bool = False,
153
- custom_fields: list = []
154
- ):
349
+ def sync_prompt_categories(self, company_short_name: str, categories_config: list):
155
350
  """
156
- Direct creation method (used by sync or direct calls).
157
- Validates file existence before creating DB entry.
351
+ Syncs only the prompt categories based on a simple list of names.
352
+ The order in the list determines the 'order' field in DB.
353
+ Removes categories not present in the list.
354
+ Finally, updates the YAML configuration.
158
355
  """
159
- prompt_filename = prompt_name.lower() + '.prompt'
160
- if is_system_prompt:
161
- if not importlib.resources.files('iatoolkit.system_prompts').joinpath(prompt_filename).is_file():
162
- raise IAToolkitException(IAToolkitException.ErrorType.INVALID_NAME,
163
- f'missing system prompt file: {prompt_filename}')
164
- else:
165
- if not self.asset_repo.exists(company.short_name, AssetType.PROMPT, prompt_filename):
166
- raise IAToolkitException(IAToolkitException.ErrorType.INVALID_NAME,
167
- f'missing prompt file: {prompt_filename} in prompts/')
168
-
169
- if custom_fields:
170
- for f in custom_fields:
171
- if ('data_key' not in f) or ('label' not in f):
172
- raise IAToolkitException(IAToolkitException.ErrorType.INVALID_PARAMETER,
173
- f'The field "custom_fields" must contain the following keys: data_key y label')
174
-
175
- # add default value for data_type
176
- if 'type' not in f:
177
- f['type'] = 'text'
178
-
179
- prompt = Prompt(
180
- company_id=company.id if company else None,
181
- name=prompt_name,
182
- description=description,
183
- order=order,
184
- category_id=category.id if category and not is_system_prompt else None,
185
- active=active,
186
- filename=prompt_filename,
187
- is_system_prompt=is_system_prompt,
188
- custom_fields=custom_fields
189
- )
190
-
191
- try:
192
- self.llm_query_repo.create_or_update_prompt(prompt)
193
- except Exception as e:
194
- raise IAToolkitException(IAToolkitException.ErrorType.DATABASE_ERROR,
195
- f'error creating prompt "{prompt_name}": {str(e)}')
356
+ company = self.profile_repo.get_company_by_short_name(company_short_name)
357
+ if not company:
358
+ raise IAToolkitException(IAToolkitException.ErrorType.INVALID_NAME,
359
+ f'Company {company_short_name} not found')
196
360
 
197
- def get_prompt_content(self, company: Company, prompt_name: str):
198
361
  try:
199
- # get the user prompt
200
- user_prompt = self.llm_query_repo.get_prompt_by_name(company, prompt_name)
201
- if not user_prompt:
202
- raise IAToolkitException(IAToolkitException.ErrorType.DOCUMENT_NOT_FOUND,
203
- f"prompt not found '{prompt_name}' for company '{company.short_name}'")
362
+ processed_categories_ids = []
204
363
 
205
- try:
206
- user_prompt_content = self.asset_repo.read_text(
207
- company.short_name,
208
- AssetType.PROMPT,
209
- user_prompt.filename
364
+ # 1. Update/Create Categories
365
+ for idx, cat_name in enumerate(categories_config):
366
+ # Order is 0-based index or 1-based, consistent with current usage (seems 0 or 1 is fine, usually 0 for arrays)
367
+ new_cat = PromptCategory(
368
+ company_id=company.id,
369
+ name=cat_name,
370
+ order=idx
210
371
  )
211
- except FileNotFoundError:
212
- raise IAToolkitException(IAToolkitException.ErrorType.FILE_IO_ERROR,
213
- f"prompt file '{user_prompt.filename}' does not exist for company '{company.short_name}'")
214
- except Exception as e:
215
- raise IAToolkitException(IAToolkitException.ErrorType.FILE_IO_ERROR,
216
- f"error while reading prompt: '{prompt_name}': {e}")
217
-
218
- return user_prompt_content
219
-
220
- except IAToolkitException:
221
- raise
222
- except Exception as e:
223
- logging.exception(
224
- f"error loading prompt '{prompt_name}' content for '{company.short_name}': {e}")
225
- raise IAToolkitException(IAToolkitException.ErrorType.PROMPT_ERROR,
226
- f'error loading prompt "{prompt_name}" content for company {company.short_name}: {str(e)}')
372
+ persisted_cat = self.llm_query_repo.create_or_update_prompt_category(new_cat)
373
+ processed_categories_ids.append(persisted_cat.id)
374
+
375
+ # 2. Delete missing categories
376
+ # We fetch all categories for the company and delete those not in processed_ids
377
+ all_categories = self.llm_query_repo.get_all_categories(company.id)
378
+ for cat in all_categories:
379
+ if cat.id not in processed_categories_ids:
380
+ # Depending on logic, we might want to check if they have prompts assigned.
381
+ # Usually, sync logic implies "force state", so we delete.
382
+ # SQLAlchemy cascading might handle prompts or set them to null depending on model config.
383
+ self.llm_query_repo.session.delete(cat)
227
384
 
228
- def get_system_prompt(self):
229
- try:
230
- system_prompt_content = []
231
-
232
- # read all the system prompts from the database
233
- system_prompts = self.llm_query_repo.get_system_prompts()
234
-
235
- for prompt in system_prompts:
236
- try:
237
- content = importlib.resources.read_text('iatoolkit.system_prompts', prompt.filename)
238
- system_prompt_content.append(content)
239
- except FileNotFoundError:
240
- logging.warning(f"Prompt file does not exist in the package: {prompt.filename}")
241
- except Exception as e:
242
- raise IAToolkitException(IAToolkitException.ErrorType.FILE_IO_ERROR,
243
- f"error reading system prompt '{prompt.filename}': {e}")
244
-
245
- # join the system prompts into a single string
246
- return "\n".join(system_prompt_content)
247
-
248
- except IAToolkitException:
249
- raise
250
- except Exception as e:
251
- logging.exception(
252
- f"Error al obtener el contenido del prompt de sistema: {e}")
253
- raise IAToolkitException(IAToolkitException.ErrorType.PROMPT_ERROR,
254
- f'error reading the system prompts": {str(e)}')
255
-
256
- def get_user_prompts(self, company_short_name: str) -> dict:
257
- try:
258
- # validate company
259
- company = self.profile_repo.get_company_by_short_name(company_short_name)
260
- if not company:
261
- return {"error": self.i18n_service.t('errors.company_not_found', company_short_name=company_short_name)}
262
-
263
- # get all the prompts
264
- all_prompts = self.llm_query_repo.get_prompts(company)
265
-
266
- # group by category
267
- prompts_by_category = defaultdict(list)
268
- for prompt in all_prompts:
269
- if prompt.active:
270
- if prompt.category:
271
- cat_key = (prompt.category.order, prompt.category.name)
272
- prompts_by_category[cat_key].append(prompt)
273
-
274
- # sort each category by order
275
- for cat_key in prompts_by_category:
276
- prompts_by_category[cat_key].sort(key=lambda p: p.order)
277
-
278
- categorized_prompts = []
279
-
280
- # sort categories by order
281
- sorted_categories = sorted(prompts_by_category.items(), key=lambda item: item[0][0])
282
-
283
- for (cat_order, cat_name), prompts in sorted_categories:
284
- categorized_prompts.append({
285
- 'category_name': cat_name,
286
- 'category_order': cat_order,
287
- 'prompts': [
288
- {
289
- 'prompt': p.name,
290
- 'description': p.description,
291
- 'custom_fields': p.custom_fields,
292
- 'order': p.order
293
- }
294
- for p in prompts
295
- ]
296
- })
297
-
298
- return {'message': categorized_prompts}
385
+ self.llm_query_repo.commit()
299
386
 
300
387
  except Exception as e:
301
- logging.error(f"error in get_prompts: {e}")
302
- return {'error': str(e)}
303
-
388
+ self.llm_query_repo.rollback()
389
+ logging.exception(f"Error syncing prompt categories: {e}")
390
+ raise IAToolkitException(IAToolkitException.ErrorType.DATABASE_ERROR, str(e))
@@ -99,7 +99,7 @@ class QueryService:
99
99
  final_client_data.update(client_data)
100
100
 
101
101
  # Load attached files into the context
102
- files_context = self.load_files_for_context(files)
102
+ files_context, images = self.load_files_for_context(files)
103
103
 
104
104
  # Initialize prompt_content. It will be an empty string for direct questions.
105
105
  main_prompt = ""
@@ -128,7 +128,7 @@ class QueryService:
128
128
  else:
129
129
  user_turn_prompt += f'\n### Contexto Adicional: El usuario ha aportado este contexto puede ayudar: {effective_question}'
130
130
 
131
- return user_turn_prompt, effective_question
131
+ return user_turn_prompt, effective_question, images
132
132
 
133
133
  def _ensure_valid_history(self, company,
134
134
  user_identifier: str,
@@ -356,7 +356,7 @@ class QueryService:
356
356
  effective_model = self._resolve_model(company_short_name, model)
357
357
 
358
358
  # --- Build User-Facing Prompt ---
359
- user_turn_prompt, effective_question = self._build_user_facing_prompt(
359
+ user_turn_prompt, effective_question, images = self._build_user_facing_prompt(
360
360
  company=company,
361
361
  user_identifier=user_identifier,
362
362
  client_data=client_data,
@@ -397,7 +397,8 @@ class QueryService:
397
397
  question=effective_question,
398
398
  context=user_turn_prompt,
399
399
  tools=tools,
400
- text=output_schema
400
+ text=output_schema,
401
+ images=images,
401
402
  )
402
403
 
403
404
  if not response.get('valid_response'):
@@ -421,24 +422,23 @@ class QueryService:
421
422
  return "unknown"
422
423
 
423
424
 
424
- def load_files_for_context(self, files: list) -> str:
425
+ def load_files_for_context(self, files: list) -> tuple[str, list]:
425
426
  """
426
- Processes a list of attached files, decodes their content,
427
- and formats them into a string context for the LLM.
427
+ Processes a list of attached files.
428
+ Decodes text documents into context string and separates images for multimodal processing.
428
429
  """
429
430
  if not files:
430
- return ''
431
+ return '', []
432
+
433
+ context_parts = []
434
+ images = []
435
+ text_files_count = 0
431
436
 
432
- context = f"""
433
- A continuación encontraras una lista de documentos adjuntos
434
- enviados por el usuario que hace la pregunta,
435
- en total son: {len(files)} documentos adjuntos
436
- """
437
437
  for document in files:
438
438
  # Support both 'file_id' and 'filename' for robustness
439
439
  filename = document.get('file_id') or document.get('filename') or document.get('name')
440
440
  if not filename:
441
- context += "\n<error>Documento adjunto sin nombre ignorado.</error>\n"
441
+ context_parts.append("\n<error>Documento adjunto sin nombre ignorado.</error>\n")
442
442
  continue
443
443
 
444
444
  # Support both 'base64' and 'content' for robustness
@@ -446,7 +446,12 @@ class QueryService:
446
446
 
447
447
  if not base64_content:
448
448
  # Handles the case where a file is referenced but no content is provided
449
- context += f"\n<error>El archivo '{filename}' no fue encontrado y no pudo ser cargado.</error>\n"
449
+ context_parts.append(f"\n<error>El archivo '{filename}' no fue encontrado y no pudo ser cargado.</error>\n")
450
+ continue
451
+
452
+ # Detect if the file is an image
453
+ if self._is_image(filename):
454
+ images.append({'name': filename, 'base64': base64_content})
450
455
  continue
451
456
 
452
457
  try:
@@ -456,12 +461,26 @@ class QueryService:
456
461
 
457
462
  file_content = base64.b64decode(base64_content)
458
463
  document_text = self.document_service.file_to_txt(filename, file_content)
459
- context += f"\n<document name='{filename}'>\n{document_text}\n</document>\n"
464
+ context_parts.append(f"\n<document name='{filename}'>\n{document_text}\n</document>\n")
465
+ text_files_count += 1
460
466
  except Exception as e:
461
467
  # Catches errors from b64decode or file_to_txt
462
468
  logging.error(f"Failed to process file {filename}: {e}")
463
- context += f"\n<error>Error al procesar el archivo {filename}: {str(e)}</error>\n"
469
+ context_parts.append(f"\n<error>Error al procesar el archivo {filename}: {str(e)}</error>\n")
464
470
  continue
465
471
 
466
- return context
472
+ context = ""
473
+ if text_files_count > 0:
474
+ context = f"""
475
+ A continuación encontraras una lista de documentos adjuntos
476
+ enviados por el usuario que hace la pregunta,
477
+ en total son: {text_files_count} documentos adjuntos
478
+ """ + "".join(context_parts)
479
+ elif context_parts:
480
+ # If only errors were collected
481
+ context = "".join(context_parts)
482
+
483
+ return context, images
467
484
 
485
+ def _is_image(self, filename: str) -> bool:
486
+ return filename.lower().endswith(('.png', '.jpg', '.jpeg', '.webp', '.gif'))