iatoolkit 0.66.2__py3-none-any.whl → 0.71.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (73) hide show
  1. iatoolkit/__init__.py +2 -6
  2. iatoolkit/base_company.py +3 -31
  3. iatoolkit/cli_commands.py +1 -1
  4. iatoolkit/common/routes.py +5 -1
  5. iatoolkit/common/session_manager.py +2 -0
  6. iatoolkit/company_registry.py +1 -2
  7. iatoolkit/iatoolkit.py +13 -13
  8. iatoolkit/infra/llm_client.py +8 -12
  9. iatoolkit/infra/llm_proxy.py +38 -10
  10. iatoolkit/locales/en.yaml +25 -2
  11. iatoolkit/locales/es.yaml +27 -4
  12. iatoolkit/repositories/database_manager.py +8 -3
  13. iatoolkit/repositories/document_repo.py +1 -1
  14. iatoolkit/repositories/models.py +6 -8
  15. iatoolkit/repositories/profile_repo.py +0 -4
  16. iatoolkit/repositories/vs_repo.py +26 -20
  17. iatoolkit/services/auth_service.py +2 -2
  18. iatoolkit/services/branding_service.py +11 -7
  19. iatoolkit/services/company_context_service.py +155 -0
  20. iatoolkit/services/configuration_service.py +133 -0
  21. iatoolkit/services/dispatcher_service.py +75 -70
  22. iatoolkit/services/document_service.py +5 -2
  23. iatoolkit/services/embedding_service.py +145 -0
  24. iatoolkit/services/excel_service.py +15 -11
  25. iatoolkit/services/file_processor_service.py +4 -12
  26. iatoolkit/services/history_service.py +7 -7
  27. iatoolkit/services/i18n_service.py +4 -4
  28. iatoolkit/services/jwt_service.py +7 -9
  29. iatoolkit/services/language_service.py +29 -23
  30. iatoolkit/services/load_documents_service.py +100 -113
  31. iatoolkit/services/mail_service.py +9 -4
  32. iatoolkit/services/profile_service.py +10 -7
  33. iatoolkit/services/prompt_manager_service.py +20 -16
  34. iatoolkit/services/query_service.py +112 -43
  35. iatoolkit/services/search_service.py +11 -4
  36. iatoolkit/services/sql_service.py +57 -25
  37. iatoolkit/services/user_feedback_service.py +15 -13
  38. iatoolkit/static/js/chat_history_button.js +3 -5
  39. iatoolkit/static/js/chat_main.js +2 -17
  40. iatoolkit/static/js/chat_onboarding_button.js +6 -0
  41. iatoolkit/static/styles/chat_iatoolkit.css +69 -158
  42. iatoolkit/static/styles/chat_modal.css +1 -37
  43. iatoolkit/static/styles/onboarding.css +7 -0
  44. iatoolkit/system_prompts/query_main.prompt +2 -10
  45. iatoolkit/templates/change_password.html +1 -1
  46. iatoolkit/templates/chat.html +12 -4
  47. iatoolkit/templates/chat_modals.html +4 -0
  48. iatoolkit/templates/error.html +1 -1
  49. iatoolkit/templates/login_simulation.html +17 -6
  50. iatoolkit/templates/onboarding_shell.html +4 -1
  51. iatoolkit/views/base_login_view.py +7 -8
  52. iatoolkit/views/change_password_view.py +2 -3
  53. iatoolkit/views/embedding_api_view.py +65 -0
  54. iatoolkit/views/external_login_view.py +1 -1
  55. iatoolkit/views/file_store_api_view.py +1 -1
  56. iatoolkit/views/forgot_password_view.py +2 -4
  57. iatoolkit/views/help_content_api_view.py +9 -9
  58. iatoolkit/views/history_api_view.py +1 -1
  59. iatoolkit/views/home_view.py +2 -2
  60. iatoolkit/views/init_context_api_view.py +18 -17
  61. iatoolkit/views/llmquery_api_view.py +3 -2
  62. iatoolkit/views/login_simulation_view.py +14 -2
  63. iatoolkit/views/login_view.py +9 -9
  64. iatoolkit/views/signup_view.py +2 -4
  65. iatoolkit/views/verify_user_view.py +2 -4
  66. {iatoolkit-0.66.2.dist-info → iatoolkit-0.71.4.dist-info}/METADATA +40 -22
  67. iatoolkit-0.71.4.dist-info/RECORD +122 -0
  68. iatoolkit-0.71.4.dist-info/licenses/LICENSE +21 -0
  69. iatoolkit/services/help_content_service.py +0 -30
  70. iatoolkit/services/onboarding_service.py +0 -43
  71. iatoolkit-0.66.2.dist-info/RECORD +0 -119
  72. {iatoolkit-0.66.2.dist-info → iatoolkit-0.71.4.dist-info}/WHEEL +0 -0
  73. {iatoolkit-0.66.2.dist-info → iatoolkit-0.71.4.dist-info}/top_level.txt +0 -0
@@ -1,12 +1,13 @@
1
1
  # iatoolkit/services/language_service.py
2
2
 
3
3
  import logging
4
- from injector import inject
4
+ from injector import inject, singleton
5
5
  from flask import g, request
6
6
  from iatoolkit.repositories.profile_repo import ProfileRepo
7
+ from iatoolkit.services.configuration_service import ConfigurationService
7
8
  from iatoolkit.common.session_manager import SessionManager
8
9
 
9
-
10
+ @singleton
10
11
  class LanguageService:
11
12
  """
12
13
  Determines the correct language for the current request
@@ -14,8 +15,13 @@ class LanguageService:
14
15
  and caches it in the Flask 'g' object for the request's lifecycle.
15
16
  """
16
17
 
18
+ FALLBACK_LANGUAGE = 'es'
19
+
17
20
  @inject
18
- def __init__(self, profile_repo: ProfileRepo):
21
+ def __init__(self,
22
+ config_service: ConfigurationService,
23
+ profile_repo: ProfileRepo):
24
+ self.config_service = config_service
19
25
  self.profile_repo = profile_repo
20
26
 
21
27
  def _get_company_short_name(self) -> str | None:
@@ -49,29 +55,29 @@ class LanguageService:
49
55
  if 'lang' in g:
50
56
  return g.lang
51
57
 
52
- from iatoolkit.services.i18n_service import I18nService
53
- lang = I18nService.FALLBACK_LANGUAGE
54
-
55
58
  try:
59
+ # Priority 1: User's preferred language
60
+ user_identifier = SessionManager.get('user_identifier')
61
+ if user_identifier:
62
+ user = self.profile_repo.get_user_by_email(user_identifier)
63
+ if user and user.preferred_language:
64
+ logging.debug(f"Language determined by user preference: {user.preferred_language}")
65
+ g.lang = user.preferred_language
66
+ return g.lang
67
+
68
+ # Priority 2: Company's default language
56
69
  company_short_name = self._get_company_short_name()
57
70
  if company_short_name:
58
- # Prioridad 1: Preferencia del Usuario
59
- user_identifier = SessionManager.get('user_identifier')
60
- if user_identifier:
61
- # Usamos el repositorio para obtener el objeto User
62
- user = self.profile_repo.get_user_by_email(
63
- user_identifier) # Asumiendo que el email es el identificador
64
- if user and user.preferred_language:
65
- g.lang = user.preferred_language
66
- return g.lang
67
-
68
- # Prioridad 2: Idioma por defecto de la Compañía (si no se encontró preferencia de usuario)
69
- company = self.profile_repo.get_company_by_short_name(company_short_name)
70
- if company and company.default_language:
71
- lang = company.default_language
71
+ locale = self.config_service.get_configuration(company_short_name, 'locale')
72
+ if locale:
73
+ company_language = locale.split('_')[0]
74
+ g.lang = company_language
75
+ return g.lang
72
76
  except Exception as e:
73
- logging.debug(f"Could not determine language, falling back to default. Reason: {e}")
77
+ logging.info(f"Could not determine language, falling back to default. Reason: {e}")
74
78
  pass
75
79
 
76
- g.lang = lang
77
- return lang
80
+ # Priority 3: System-wide fallback
81
+ logging.info(f"Language determined by system fallback: {self.FALLBACK_LANGUAGE}")
82
+ g.lang = self.FALLBACK_LANGUAGE
83
+ return g.lang
@@ -1,50 +1,41 @@
1
1
  # Copyright (c) 2024 Fernando Libedinsky
2
2
  # Product: IAToolkit
3
- #
4
- # IAToolkit is open source software.
5
3
 
6
4
  from iatoolkit.repositories.vs_repo import VSRepo
7
5
  from iatoolkit.repositories.document_repo import DocumentRepo
8
- from iatoolkit.repositories.profile_repo import ProfileRepo
9
- from iatoolkit.repositories.llm_query_repo import LLMQueryRepo
10
-
11
6
  from iatoolkit.repositories.models import Document, VSDoc, Company
12
7
  from iatoolkit.services.document_service import DocumentService
8
+ from iatoolkit.services.configuration_service import ConfigurationService
13
9
  from langchain.text_splitter import RecursiveCharacterTextSplitter
14
10
  from iatoolkit.infra.connectors.file_connector_factory import FileConnectorFactory
15
11
  from iatoolkit.services.file_processor_service import FileProcessorConfig, FileProcessor
16
- from iatoolkit.services.dispatcher_service import Dispatcher
17
12
  from iatoolkit.common.exceptions import IAToolkitException
18
13
  import logging
19
14
  import base64
20
- from injector import inject
21
- from typing import Dict
15
+ from injector import inject, singleton
16
+ import os
22
17
 
23
18
 
19
+ @singleton
24
20
  class LoadDocumentsService:
25
21
  """
26
22
  Orchestrates the process of loading, processing, and storing documents
27
- from various sources for different companies.
23
+ from various sources defined in the company's configuration.
28
24
  """
29
25
  @inject
30
26
  def __init__(self,
27
+ config_service: ConfigurationService,
31
28
  file_connector_factory: FileConnectorFactory,
32
29
  doc_service: DocumentService,
33
30
  doc_repo: DocumentRepo,
34
31
  vector_store: VSRepo,
35
- profile_repo: ProfileRepo,
36
- dispatcher: Dispatcher,
37
- llm_query_repo: LLMQueryRepo
38
32
  ):
33
+ self.config_service = config_service
39
34
  self.doc_service = doc_service
40
35
  self.doc_repo = doc_repo
41
- self.profile_repo = profile_repo
42
- self.llm_query_repo = llm_query_repo
43
36
  self.vector_store = vector_store
44
37
  self.file_connector_factory = file_connector_factory
45
- self.dispatcher = dispatcher
46
38
 
47
- # lower warnings
48
39
  logging.getLogger().setLevel(logging.ERROR)
49
40
 
50
41
  self.splitter = RecursiveCharacterTextSplitter(
@@ -53,135 +44,131 @@ class LoadDocumentsService:
53
44
  separators=["\n\n", "\n", "."]
54
45
  )
55
46
 
56
- def load_company_files(self,
57
- company: Company,
58
- connector_config: Dict,
59
- predefined_metadata: Dict = None,
60
- filters: Dict = None):
47
+ def load_sources(self,
48
+ company: Company,
49
+ sources_to_load: list[str] = None,
50
+ filters: dict = None) -> int:
61
51
  """
62
- Loads all the company files from a connector
52
+ Loads documents from one or more configured sources for a company.
63
53
 
64
54
  Args:
65
55
  company (Company): The company to load files for.
66
- connector_config (Dict): The configuration for the file connector.
67
- predefined_metadata (Dict, optional): Metadata to be added to all documents from this source.
68
- filters (Dict, optional): Filters to apply to the files.
56
+ sources_to_load (list[str], optional): A list of specific source names to load.
57
+ If None, all configured sources will be loaded.
58
+ filters (dict, optional): Filters to apply when listing files (e.g., file extension).
69
59
 
70
60
  Returns:
71
- int: The number of processed files.
61
+ int: The total number of processed files.
72
62
  """
73
- if not connector_config:
74
- raise IAToolkitException(IAToolkitException.ErrorType.MISSING_PARAMETER,
75
- f"Falta configurar conector")
63
+ knowledge_base_config = self.config_service.get_configuration(company.short_name, 'knowledge_base')
64
+ if not knowledge_base_config:
65
+ raise IAToolkitException(IAToolkitException.ErrorType.CONFIG_ERROR,
66
+ f"Missing 'knowledge_base' configuration for company '{company.short_name}'.")
67
+
68
+ if not sources_to_load:
69
+ raise IAToolkitException(IAToolkitException.ErrorType.PARAM_NOT_FILLED,
70
+ f"Missing sources to load for company '{company.short_name}'.")
71
+
72
+ base_connector_config = self._get_base_connector_config(knowledge_base_config)
73
+ all_sources = knowledge_base_config.get('document_sources', {})
74
+
75
+ total_processed_files = 0
76
+ for source_name in sources_to_load:
77
+ source_config = all_sources.get(source_name)
78
+ if not source_config:
79
+ logging.warning(f"Source '{source_name}' not found in configuration for company '{company.short_name}'. Skipping.")
80
+ continue
81
+
82
+ try:
83
+ logging.info(f"Processing source '{source_name}' for company '{company.short_name}'...")
84
+
85
+ # Combine the base connector configuration with the specific path from the source.
86
+ full_connector_config = base_connector_config.copy()
87
+ full_connector_config['path'] = source_config.get('path')
88
+
89
+ # Prepare the context for the callback function.
90
+ context = {
91
+ 'company': company,
92
+ 'metadata': source_config.get('metadata', {})
93
+ }
94
+
95
+ processor_config = FileProcessorConfig(
96
+ callback=self._file_processing_callback,
97
+ context=context,
98
+ filters=filters or {"filename_contains": ".pdf"},
99
+ continue_on_error=True,
100
+ echo=True
101
+ )
76
102
 
77
- try:
78
- if not filters:
79
- filters = {"filename_contains": ".pdf"}
80
-
81
- # Pasar metadata predefinida como parte del contexto al procesador
82
- # para que esté disponible en la función load_file_callback
83
- context = {
84
- 'company': company,
85
- 'metadata': {}
86
- }
87
-
88
- if predefined_metadata:
89
- context['metadata'] = predefined_metadata
90
-
91
- # config the processor
92
- processor_config = FileProcessorConfig(
93
- callback=self.load_file_callback,
94
- context=context,
95
- filters=filters,
96
- continue_on_error=True,
97
- echo=True
98
- )
103
+ connector = self.file_connector_factory.create(full_connector_config)
104
+ processor = FileProcessor(connector, processor_config)
105
+ processor.process_files()
99
106
 
100
- connector = self.file_connector_factory.create(connector_config)
101
- processor = FileProcessor(connector, processor_config)
107
+ total_processed_files += processor.processed_files
108
+ logging.info(f"Finished processing source '{source_name}'. Processed {processor.processed_files} files.")
102
109
 
103
- # process the files
104
- processor.process_files()
110
+ except Exception as e:
111
+ logging.exception(f"Failed to process source '{source_name}' for company '{company.short_name}': {e}")
105
112
 
106
- return processor.processed_files
107
- except Exception as e:
108
- logging.exception("Loading files error: %s", str(e))
109
- return {"error": str(e)}
113
+ return total_processed_files
110
114
 
111
- def load_file_callback(self, company: Company, filename: str, content: bytes, context: dict = {}):
112
- """
113
- Processes a single file: extracts text, generates metadata, and saves it
114
- to the relational database and the vector store.
115
- This method is intended to be used as the 'action' for FileProcessor.
115
+ def _get_base_connector_config(self, knowledge_base_config: dict) -> dict:
116
+ """Determines and returns the appropriate base connector configuration (dev vs prod)."""
117
+ connectors = knowledge_base_config.get('connectors', {})
118
+ env = os.getenv('FLASK_ENV', 'dev')
116
119
 
117
- Args:
118
- company (Company): The company associated with the file.
119
- filename (str): The name of the file.
120
- content (bytes): The binary content of the file.
121
- context (dict, optional): A context dictionary, may contain predefined metadata.
122
- """
120
+ if env == 'dev':
121
+ return connectors.get('development', {'type': 'local'})
122
+ else:
123
+ prod_config = connectors.get('production')
124
+ if not prod_config:
125
+ raise IAToolkitException(IAToolkitException.ErrorType.CONFIG_ERROR,
126
+ "Production connector configuration is missing.")
127
+ # The S3 connector itself is responsible for reading AWS environment variables.
128
+ # No need to pass credentials explicitly here.
129
+ return prod_config
123
130
 
131
+ def _file_processing_callback(self, company: Company, filename: str, content: bytes, context: dict = None):
132
+ """
133
+ Callback method to process a single file. It extracts text, merges metadata,
134
+ and saves the document to both relational and vector stores.
135
+ """
124
136
  if not company:
125
- raise IAToolkitException(IAToolkitException.ErrorType.MISSING_PARAMETER,
126
- f"Falta configurar empresa")
137
+ raise IAToolkitException(IAToolkitException.ErrorType.MISSING_PARAMETER, "Missing company object in callback.")
127
138
 
128
- # check if file exist in repositories
129
- if self.doc_repo.get(company_id=company.id,filename=filename):
139
+ if self.doc_repo.get(company_id=company.id, filename=filename):
140
+ logging.debug(f"File '{filename}' already exists for company '{company.id}'. Skipping.")
130
141
  return
131
142
 
132
143
  try:
133
- # extract text from the document
134
144
  document_content = self.doc_service.file_to_txt(filename, content)
135
- content_base64 = base64.b64encode(content).decode('utf-8')
136
145
 
137
- # generate metada based on the filename structure
138
- dynamic_metadata = self.dispatcher.get_metadata_from_filename(company_name=company.short_name, filename=filename)
146
+ # Get predefined metadata from the context passed by the processor.
147
+ predefined_metadata = context.get('metadata', {}) if context else {}
139
148
 
140
- # Obtener metadatos del contexto si existen
141
- context_metadata = context.get('metadata', {}).copy() if context else {}
142
-
143
- # Fusionar los metadatos. El orden de prioridad es:
144
- # 1. dynamic_metadata (tiene mayor prioridad)
145
- # 2. context_metadata (del parámetro context)
146
- # Los valores en dynamic_metadata tendrán precedencia sobre los de context_metadata
147
- final_meta = {**context_metadata, **dynamic_metadata}
148
-
149
- # save the file in the document repositories
149
+ # Save the document to the relational database.
150
+ session = self.doc_repo.session
150
151
  new_document = Document(
151
152
  company_id=company.id,
152
153
  filename=filename,
153
154
  content=document_content,
154
- content_b64=content_base64,
155
- meta=final_meta
155
+ content_b64=base64.b64encode(content).decode('utf-8'),
156
+ meta=predefined_metadata
156
157
  )
157
-
158
- # insert the document into the Database (without commit)
159
- session = self.doc_repo.session
160
158
  session.add(new_document)
161
- session.flush() # get the ID without commit
162
-
163
- # split the content, and create the chunk list
164
- splitted_content = self.splitter.split_text(document_content)
165
- chunk_list = [
166
- VSDoc(
167
- company_id=company.id,
168
- document_id=new_document.id,
169
- text=text
170
- )
171
- for text in splitted_content
172
- ]
159
+ session.flush() # Flush to get the new_document.id without committing.
173
160
 
174
- # save to vector store
175
- self.vector_store.add_document(chunk_list)
161
+ # Split into chunks and prepare for vector store.
162
+ chunks = self.splitter.split_text(document_content)
163
+ vs_docs = [VSDoc(company_id=company.id, document_id=new_document.id, text=text) for text in chunks]
176
164
 
177
- # confirm the transaction
178
- session.commit()
165
+ # Add document chunks to the vector store.
166
+ self.vector_store.add_document(company.short_name, vs_docs)
179
167
 
168
+ session.commit()
180
169
  return new_document
181
170
  except Exception as e:
182
171
  self.doc_repo.session.rollback()
183
-
184
- # if something fails, throw exception
185
- logging.exception("Error procesando el archivo %s: %s", filename, str(e))
172
+ logging.exception(f"Error processing file '{filename}': {e}")
186
173
  raise IAToolkitException(IAToolkitException.ErrorType.LOAD_DOCUMENT_ERROR,
187
- f"Error al procesar el archivo {filename}")
174
+ f"Error while processing file: {filename}")
@@ -4,6 +4,7 @@
4
4
  # IAToolkit is open source software.
5
5
 
6
6
  from iatoolkit.infra.mail_app import MailApp
7
+ from iatoolkit.services.i18n_service import I18nService
7
8
  from injector import inject
8
9
  from pathlib import Path
9
10
  from iatoolkit.common.exceptions import IAToolkitException
@@ -13,18 +14,22 @@ TEMP_DIR = Path("static/temp")
13
14
 
14
15
  class MailService:
15
16
  @inject
16
- def __init__(self, mail_app: MailApp):
17
+ def __init__(self,
18
+ mail_app: MailApp,
19
+ i18n_service: I18nService):
17
20
  self.mail_app = mail_app
21
+ self.i18n_service = i18n_service
22
+
18
23
 
19
24
  def _read_token_bytes(self, token: str) -> bytes:
20
25
  # Defensa simple contra path traversal
21
26
  if not token or "/" in token or "\\" in token or token.startswith("."):
22
27
  raise IAToolkitException(IAToolkitException.ErrorType.MAIL_ERROR,
23
- "attachment_token inválido")
28
+ "attachment_token invalid")
24
29
  path = TEMP_DIR / token
25
30
  if not path.is_file():
26
31
  raise IAToolkitException(IAToolkitException.ErrorType.MAIL_ERROR,
27
- f"Adjunto no encontrado: {token}")
32
+ f"attach file not found: {token}")
28
33
  return path.read_bytes()
29
34
 
30
35
  def send_mail(self, **kwargs):
@@ -59,4 +64,4 @@ class MailService:
59
64
  body=body,
60
65
  attachments=norm_attachments)
61
66
 
62
- return 'mail enviado'
67
+ return self.i18n_service.t('services.mail_sent')
@@ -49,15 +49,15 @@ class ProfileService:
49
49
 
50
50
  company = self.profile_repo.get_company_by_short_name(company_short_name)
51
51
  if not company:
52
- return {'success': False, "message": "Empresa no encontrada"}
52
+ return {'success': False, "message": "missing company"}
53
53
 
54
54
  # check that user belongs to company
55
55
  if company not in user.companies:
56
- return {'success': False, "message": "Usuario no esta autorizado para esta empresa"}
56
+ return {'success': False, "message": self.i18n_service.t('errors.services.user_not_authorized')}
57
57
 
58
58
  if not user.verified:
59
59
  return {'success': False,
60
- "message": "Tu cuenta no ha sido verificada. Por favor, revisa tu correo."}
60
+ "message": self.i18n_service.t('errors.services.account_not_verified')}
61
61
 
62
62
  # 1. Build the local user profile dictionary here.
63
63
  # the user_profile variables are used on the LLM templates also (see in query_main.prompt)
@@ -74,7 +74,7 @@ class ProfileService:
74
74
 
75
75
  # 3. create the web session
76
76
  self.set_session_for_user(company.short_name, user_identifier)
77
- return {'success': True, "user_identifier": user_identifier, "message": "Login exitoso"}
77
+ return {'success': True, "user_identifier": user_identifier, "message": "Login ok"}
78
78
  except Exception as e:
79
79
  logging.error(f"Error in login: {e}")
80
80
  return {'success': False, "message": str(e)}
@@ -95,6 +95,9 @@ class ProfileService:
95
95
  user_identifier=user_identifier,
96
96
  user_profile=external_user_profile)
97
97
 
98
+ # 3. make sure the flask session is clean
99
+ SessionManager.clear()
100
+
98
101
  def save_user_profile(self, company: Company, user_identifier: str, user_profile: dict):
99
102
  """
100
103
  Private helper: Takes a pre-built profile, saves it to Redis, and sets the Flask cookie.
@@ -151,7 +154,7 @@ class ProfileService:
151
154
  except Exception as e:
152
155
  # Log the error and return a generic failure message.
153
156
  logging.error(f"Failed to update language for {user_identifier}: {e}")
154
- return {'success': False, 'error_message': self.i18n_service.t('errors.general.unexpected_error')}
157
+ return {'success': False, 'error_message': self.i18n_service.t('errors.general.unexpected_error', error=str(e))}
155
158
 
156
159
 
157
160
  def get_profile_by_identifier(self, company_short_name: str, user_identifier: str) -> dict:
@@ -230,7 +233,7 @@ class ProfileService:
230
233
 
231
234
  return {"message": self.i18n_service.t('flash_messages.signup_success')}
232
235
  except Exception as e:
233
- return {"error": self.i18n_service.t('errors.general.unexpected_error')}
236
+ return {"error": self.i18n_service.t('errors.general.unexpected_error', error=str(e))}
234
237
 
235
238
  def update_user(self, email: str, **kwargs) -> User:
236
239
  return self.profile_repo.update_user(email, **kwargs)
@@ -324,7 +327,7 @@ class ProfileService:
324
327
  def new_api_key(self, company_short_name: str):
325
328
  company = self.get_company_by_short_name(company_short_name)
326
329
  if not company:
327
- return {"error": f"la empresa {company_short_name} no existe"}
330
+ return {"error": self.i18n_service.t('errors.company_not_found', company_short_name=company_short_name)}
328
331
 
329
332
  length = 40 # lenght of the api key
330
333
  alphabet = string.ascii_letters + string.digits
@@ -5,21 +5,25 @@
5
5
 
6
6
  from injector import inject
7
7
  from iatoolkit.repositories.llm_query_repo import LLMQueryRepo
8
-
9
- import logging
8
+ from iatoolkit.services.i18n_service import I18nService
10
9
  from iatoolkit.repositories.profile_repo import ProfileRepo
11
10
  from collections import defaultdict
12
11
  from iatoolkit.repositories.models import Prompt, PromptCategory, Company
13
12
  import os
14
13
  from iatoolkit.common.exceptions import IAToolkitException
15
14
  import importlib.resources
15
+ import logging
16
16
 
17
17
 
18
18
  class PromptService:
19
19
  @inject
20
- def __init__(self, llm_query_repo: LLMQueryRepo, profile_repo: ProfileRepo):
20
+ def __init__(self,
21
+ llm_query_repo: LLMQueryRepo,
22
+ profile_repo: ProfileRepo,
23
+ i18n_service: I18nService):
21
24
  self.llm_query_repo = llm_query_repo
22
25
  self.profile_repo = profile_repo
26
+ self.i18n_service = i18n_service
23
27
 
24
28
  def create_prompt(self,
25
29
  prompt_name: str,
@@ -36,20 +40,20 @@ class PromptService:
36
40
  if is_system_prompt:
37
41
  if not importlib.resources.files('iatoolkit.system_prompts').joinpath(prompt_filename).is_file():
38
42
  raise IAToolkitException(IAToolkitException.ErrorType.INVALID_NAME,
39
- f'No existe el archivo de prompt de sistemas: {prompt_filename}')
43
+ f'missing system prompt file: {prompt_filename}')
40
44
  else:
41
45
  template_dir = f'companies/{company.short_name}/prompts'
42
46
 
43
47
  relative_prompt_path = os.path.join(template_dir, prompt_filename)
44
48
  if not os.path.exists(relative_prompt_path):
45
49
  raise IAToolkitException(IAToolkitException.ErrorType.INVALID_NAME,
46
- f'No existe el archivo de prompt: {relative_prompt_path}')
50
+ f'missing prompt file: {relative_prompt_path}')
47
51
 
48
52
  if custom_fields:
49
53
  for f in custom_fields:
50
54
  if ('data_key' not in f) or ('label' not in f):
51
55
  raise IAToolkitException(IAToolkitException.ErrorType.INVALID_PARAMETER,
52
- f'El campo custom_fields debe contener los campos: data_key y label')
56
+ f'The field "custom_fields" must contain the following keys: data_key y label')
53
57
 
54
58
  # add default value for data_type
55
59
  if 'type' not in f:
@@ -82,20 +86,20 @@ class PromptService:
82
86
  user_prompt = self.llm_query_repo.get_prompt_by_name(company, prompt_name)
83
87
  if not user_prompt:
84
88
  raise IAToolkitException(IAToolkitException.ErrorType.DOCUMENT_NOT_FOUND,
85
- f"No se encontró el prompt '{prompt_name}' para la empresa '{company.short_name}'")
89
+ f"prompt not found '{prompt_name}' for company '{company.short_name}'")
86
90
 
87
91
  prompt_file = f'companies/{company.short_name}/prompts/{user_prompt.filename}'
88
92
  absolute_filepath = os.path.join(execution_dir, prompt_file)
89
93
  if not os.path.exists(absolute_filepath):
90
94
  raise IAToolkitException(IAToolkitException.ErrorType.FILE_IO_ERROR,
91
- f"El archivo para el prompt '{prompt_name}' no existe: {absolute_filepath}")
95
+ f"prompt file '{prompt_name}' does not exist: {absolute_filepath}")
92
96
 
93
97
  try:
94
98
  with open(absolute_filepath, 'r', encoding='utf-8') as f:
95
99
  user_prompt_content = f.read()
96
100
  except Exception as e:
97
101
  raise IAToolkitException(IAToolkitException.ErrorType.FILE_IO_ERROR,
98
- f"Error leyendo el archivo de prompt '{prompt_name}' en {absolute_filepath}: {e}")
102
+ f"error while reading prompt: '{prompt_name}' in this pathname {absolute_filepath}: {e}")
99
103
 
100
104
  return user_prompt_content
101
105
 
@@ -105,9 +109,9 @@ class PromptService:
105
109
  raise
106
110
  except Exception as e:
107
111
  logging.exception(
108
- f"Error al obtener el contenido del prompt para la empresa '{company.short_name}' y prompt '{prompt_name}': {e}")
112
+ f"error loading prompt '{prompt_name}' content for '{company.short_name}': {e}")
109
113
  raise IAToolkitException(IAToolkitException.ErrorType.PROMPT_ERROR,
110
- f'Error al obtener el contenido del prompt "{prompt_name}" para la empresa {company.short_name}: {str(e)}')
114
+ f'error loading prompt "{prompt_name}" content for company {company.short_name}: {str(e)}')
111
115
 
112
116
  def get_system_prompt(self):
113
117
  try:
@@ -121,10 +125,10 @@ class PromptService:
121
125
  content = importlib.resources.read_text('iatoolkit.system_prompts', prompt.filename)
122
126
  system_prompt_content.append(content)
123
127
  except FileNotFoundError:
124
- logging.warning(f"El archivo para el prompt de sistema no existe en el paquete: {prompt.filename}")
128
+ logging.warning(f"Prompt file does not exist in the package: {prompt.filename}")
125
129
  except Exception as e:
126
130
  raise IAToolkitException(IAToolkitException.ErrorType.FILE_IO_ERROR,
127
- f"Error leyendo el archivo de prompt del sistema '{prompt.filename}': {e}")
131
+ f"error reading system prompt '{prompt.filename}': {e}")
128
132
 
129
133
  # join the system prompts into a single string
130
134
  return "\n".join(system_prompt_content)
@@ -135,14 +139,14 @@ class PromptService:
135
139
  logging.exception(
136
140
  f"Error al obtener el contenido del prompt de sistema: {e}")
137
141
  raise IAToolkitException(IAToolkitException.ErrorType.PROMPT_ERROR,
138
- f'Error al obtener el contenido de los prompts de sistema": {str(e)}')
142
+ f'error reading the system prompts": {str(e)}')
139
143
 
140
144
  def get_user_prompts(self, company_short_name: str) -> dict:
141
145
  try:
142
146
  # validate company
143
147
  company = self.profile_repo.get_company_by_short_name(company_short_name)
144
148
  if not company:
145
- return {'error': f'No existe la empresa: {company_short_name}'}
149
+ return {"error": self.i18n_service.t('errors.company_not_found', company_short_name=company_short_name)}
146
150
 
147
151
  # get all the prompts
148
152
  all_prompts = self.llm_query_repo.get_prompts(company)
@@ -183,6 +187,6 @@ class PromptService:
183
187
  return {'message': categorized_prompts}
184
188
 
185
189
  except Exception as e:
186
- logging.error(f"Error en get_prompts: {e}")
190
+ logging.error(f"error in get_prompts: {e}")
187
191
  return {'error': str(e)}
188
192