iatoolkit 0.71.4__py3-none-any.whl → 1.4.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (114) hide show
  1. iatoolkit/__init__.py +19 -7
  2. iatoolkit/base_company.py +1 -71
  3. iatoolkit/cli_commands.py +9 -21
  4. iatoolkit/common/exceptions.py +2 -0
  5. iatoolkit/common/interfaces/__init__.py +0 -0
  6. iatoolkit/common/interfaces/asset_storage.py +34 -0
  7. iatoolkit/common/interfaces/database_provider.py +38 -0
  8. iatoolkit/common/model_registry.py +159 -0
  9. iatoolkit/common/routes.py +53 -32
  10. iatoolkit/common/util.py +17 -12
  11. iatoolkit/company_registry.py +55 -14
  12. iatoolkit/{iatoolkit.py → core.py} +102 -72
  13. iatoolkit/infra/{mail_app.py → brevo_mail_app.py} +15 -37
  14. iatoolkit/infra/llm_providers/__init__.py +0 -0
  15. iatoolkit/infra/llm_providers/deepseek_adapter.py +278 -0
  16. iatoolkit/infra/{gemini_adapter.py → llm_providers/gemini_adapter.py} +11 -17
  17. iatoolkit/infra/{openai_adapter.py → llm_providers/openai_adapter.py} +41 -7
  18. iatoolkit/infra/llm_proxy.py +235 -134
  19. iatoolkit/infra/llm_response.py +5 -0
  20. iatoolkit/locales/en.yaml +134 -4
  21. iatoolkit/locales/es.yaml +293 -162
  22. iatoolkit/repositories/database_manager.py +92 -22
  23. iatoolkit/repositories/document_repo.py +7 -0
  24. iatoolkit/repositories/filesystem_asset_repository.py +36 -0
  25. iatoolkit/repositories/llm_query_repo.py +36 -22
  26. iatoolkit/repositories/models.py +86 -95
  27. iatoolkit/repositories/profile_repo.py +64 -13
  28. iatoolkit/repositories/vs_repo.py +31 -28
  29. iatoolkit/services/auth_service.py +1 -1
  30. iatoolkit/services/branding_service.py +1 -1
  31. iatoolkit/services/company_context_service.py +96 -39
  32. iatoolkit/services/configuration_service.py +329 -67
  33. iatoolkit/services/dispatcher_service.py +51 -227
  34. iatoolkit/services/document_service.py +10 -1
  35. iatoolkit/services/embedding_service.py +9 -6
  36. iatoolkit/services/excel_service.py +50 -2
  37. iatoolkit/services/file_processor_service.py +0 -5
  38. iatoolkit/services/history_manager_service.py +208 -0
  39. iatoolkit/services/jwt_service.py +1 -1
  40. iatoolkit/services/knowledge_base_service.py +412 -0
  41. iatoolkit/services/language_service.py +8 -2
  42. iatoolkit/services/license_service.py +82 -0
  43. iatoolkit/{infra/llm_client.py → services/llm_client_service.py} +42 -29
  44. iatoolkit/services/load_documents_service.py +18 -47
  45. iatoolkit/services/mail_service.py +171 -25
  46. iatoolkit/services/profile_service.py +69 -36
  47. iatoolkit/services/{prompt_manager_service.py → prompt_service.py} +136 -25
  48. iatoolkit/services/query_service.py +229 -203
  49. iatoolkit/services/sql_service.py +116 -34
  50. iatoolkit/services/tool_service.py +246 -0
  51. iatoolkit/services/user_feedback_service.py +18 -6
  52. iatoolkit/services/user_session_context_service.py +121 -51
  53. iatoolkit/static/images/iatoolkit_core.png +0 -0
  54. iatoolkit/static/images/iatoolkit_logo.png +0 -0
  55. iatoolkit/static/js/chat_feedback_button.js +1 -1
  56. iatoolkit/static/js/chat_help_content.js +4 -4
  57. iatoolkit/static/js/chat_main.js +61 -9
  58. iatoolkit/static/js/chat_model_selector.js +227 -0
  59. iatoolkit/static/js/chat_onboarding_button.js +1 -1
  60. iatoolkit/static/js/chat_reload_button.js +4 -1
  61. iatoolkit/static/styles/chat_iatoolkit.css +59 -3
  62. iatoolkit/static/styles/chat_public.css +28 -0
  63. iatoolkit/static/styles/documents.css +598 -0
  64. iatoolkit/static/styles/landing_page.css +223 -7
  65. iatoolkit/static/styles/llm_output.css +34 -1
  66. iatoolkit/system_prompts/__init__.py +0 -0
  67. iatoolkit/system_prompts/query_main.prompt +28 -3
  68. iatoolkit/system_prompts/sql_rules.prompt +47 -12
  69. iatoolkit/templates/_company_header.html +30 -5
  70. iatoolkit/templates/_login_widget.html +3 -3
  71. iatoolkit/templates/base.html +13 -0
  72. iatoolkit/templates/chat.html +45 -3
  73. iatoolkit/templates/forgot_password.html +3 -2
  74. iatoolkit/templates/onboarding_shell.html +1 -2
  75. iatoolkit/templates/signup.html +3 -0
  76. iatoolkit/views/base_login_view.py +8 -3
  77. iatoolkit/views/change_password_view.py +1 -1
  78. iatoolkit/views/chat_view.py +76 -0
  79. iatoolkit/views/forgot_password_view.py +9 -4
  80. iatoolkit/views/history_api_view.py +3 -3
  81. iatoolkit/views/home_view.py +4 -2
  82. iatoolkit/views/init_context_api_view.py +1 -1
  83. iatoolkit/views/llmquery_api_view.py +4 -3
  84. iatoolkit/views/load_company_configuration_api_view.py +49 -0
  85. iatoolkit/views/{file_store_api_view.py → load_document_api_view.py} +15 -11
  86. iatoolkit/views/login_view.py +25 -8
  87. iatoolkit/views/logout_api_view.py +10 -2
  88. iatoolkit/views/prompt_api_view.py +1 -1
  89. iatoolkit/views/rag_api_view.py +216 -0
  90. iatoolkit/views/root_redirect_view.py +22 -0
  91. iatoolkit/views/signup_view.py +12 -4
  92. iatoolkit/views/static_page_view.py +27 -0
  93. iatoolkit/views/users_api_view.py +33 -0
  94. iatoolkit/views/verify_user_view.py +1 -1
  95. iatoolkit-1.4.2.dist-info/METADATA +268 -0
  96. iatoolkit-1.4.2.dist-info/RECORD +133 -0
  97. iatoolkit-1.4.2.dist-info/licenses/LICENSE_COMMUNITY.md +15 -0
  98. iatoolkit/repositories/tasks_repo.py +0 -52
  99. iatoolkit/services/history_service.py +0 -37
  100. iatoolkit/services/search_service.py +0 -55
  101. iatoolkit/services/tasks_service.py +0 -188
  102. iatoolkit/templates/about.html +0 -13
  103. iatoolkit/templates/index.html +0 -145
  104. iatoolkit/templates/login_simulation.html +0 -45
  105. iatoolkit/views/external_login_view.py +0 -73
  106. iatoolkit/views/index_view.py +0 -14
  107. iatoolkit/views/login_simulation_view.py +0 -93
  108. iatoolkit/views/tasks_api_view.py +0 -72
  109. iatoolkit/views/tasks_review_api_view.py +0 -55
  110. iatoolkit-0.71.4.dist-info/METADATA +0 -276
  111. iatoolkit-0.71.4.dist-info/RECORD +0 -122
  112. {iatoolkit-0.71.4.dist-info → iatoolkit-1.4.2.dist-info}/WHEEL +0 -0
  113. {iatoolkit-0.71.4.dist-info → iatoolkit-1.4.2.dist-info}/licenses/LICENSE +0 -0
  114. {iatoolkit-0.71.4.dist-info → iatoolkit-1.4.2.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,208 @@
1
+ # Copyright (c) 2024 Fernando Libedinsky
2
+ # Product: IAToolkit
3
+ #
4
+ # IAToolkit is open source software.
5
+
6
+
7
+ import logging
8
+ import json
9
+ from typing import Dict, Any, Optional
10
+ from iatoolkit.services.user_session_context_service import UserSessionContextService
11
+ from iatoolkit.services.i18n_service import I18nService
12
+ from iatoolkit.services.llm_client_service import llmClient
13
+ from iatoolkit.repositories.models import Company
14
+ from iatoolkit.repositories.llm_query_repo import LLMQueryRepo
15
+ from iatoolkit.repositories.profile_repo import ProfileRepo
16
+ from injector import inject
17
+
18
+
19
+ class HistoryManagerService:
20
+ """
21
+ Manages conversation history for LLMs in a unified way.
22
+ Handles:
23
+ 1. Server-side history (e.g., OpenAI response_ids).
24
+ 2. Client-side history (e.g., Gemini message lists).
25
+ 3. Database persistence retrieval (full chat history).
26
+ """
27
+ TYPE_SERVER_SIDE = 'server_side' # For models like OpenAI
28
+ TYPE_CLIENT_SIDE = 'client_side' # For models like Gemini and Deepseek
29
+
30
+ GEMINI_MAX_TOKENS_CONTEXT_HISTORY = 200000
31
+
32
+
33
+ @inject
34
+ def __init__(self,
35
+ session_context: UserSessionContextService,
36
+ i18n: I18nService,
37
+ llm_query_repo: LLMQueryRepo,
38
+ profile_repo: ProfileRepo,
39
+ llm_client: Optional[llmClient] = None):
40
+ self.session_context = session_context
41
+ self.i18n = i18n
42
+ self.llm_query_repo = llm_query_repo
43
+ self.profile_repo = profile_repo
44
+ self.llm_client = llm_client
45
+
46
+ def initialize_context(self,
47
+ company_short_name: str,
48
+ user_identifier: str,
49
+ history_type: str,
50
+ prepared_context: str,
51
+ company: Company, model: str) -> Dict[str, Any]:
52
+ """
53
+ Initializes a new conversation history.
54
+ """
55
+ # 1. Clear existing history
56
+ self.session_context.clear_llm_history(company_short_name, user_identifier, model=model)
57
+
58
+ if history_type == self.TYPE_SERVER_SIDE:
59
+ # OpenAI: Send system prompt to API and store the resulting ID
60
+ response_id = self.llm_client.set_company_context(
61
+ company=company,
62
+ company_base_context=prepared_context,
63
+ model=model
64
+ )
65
+ self.session_context.save_last_response_id(company_short_name, user_identifier, response_id, model=model)
66
+ self.session_context.save_initial_response_id(company_short_name, user_identifier, response_id, model=model)
67
+ return {'response_id': response_id}
68
+
69
+ elif history_type == self.TYPE_CLIENT_SIDE:
70
+ # Gemini: Store system prompt as the first message in the list
71
+ context_history = [{"role": "user", "content": prepared_context}]
72
+ self.session_context.save_context_history(company_short_name, user_identifier, context_history, model=model)
73
+ return {}
74
+
75
+ return {}
76
+
77
+ def populate_request_params(self,
78
+ handle: Any,
79
+ user_turn_prompt: str,
80
+ ignore_history: bool = False) -> bool:
81
+ """
82
+ Populates the request_params within the HistoryHandle.
83
+ Returns True if a rebuild is needed, False otherwise.
84
+ """
85
+ model = getattr(handle, "model", None)
86
+
87
+ if handle.type == self.TYPE_SERVER_SIDE:
88
+ if ignore_history:
89
+ previous_response_id = self.session_context.get_initial_response_id(
90
+ handle.company_short_name,handle.user_identifier,model=model)
91
+ else:
92
+ previous_response_id = self.session_context.get_last_response_id(
93
+ handle.company_short_name,handle.user_identifier,model=model)
94
+
95
+
96
+ if not previous_response_id:
97
+ handle.request_params = {}
98
+ return True # Needs rebuild
99
+
100
+ handle.request_params = {'previous_response_id': previous_response_id}
101
+ return False
102
+
103
+ elif handle.type == self.TYPE_CLIENT_SIDE:
104
+ context_history = self.session_context.get_context_history(
105
+ handle.company_short_name,handle.user_identifier,model=model) or []
106
+
107
+ if not context_history:
108
+ handle.request_params = {}
109
+ return True # Needs rebuild
110
+
111
+ if ignore_history and len(context_history) > 1:
112
+ # Keep only system prompt
113
+ context_history = [context_history[0]]
114
+
115
+ # Append the current user turn to the context sent to the API
116
+ context_history.append({"role": "user", "content": user_turn_prompt})
117
+
118
+ self._trim_context_history(context_history)
119
+
120
+ handle.request_params = {'context_history': context_history}
121
+ return False
122
+
123
+ handle.request_params = {}
124
+ return False
125
+
126
+ def update_history(self,
127
+ history_handle: Any,
128
+ user_turn_prompt: str,
129
+ response: Dict[str, Any]):
130
+ """Saves or updates the history after a successful LLM call."""
131
+
132
+ # We access the type from the handle
133
+ history_type = history_handle.type
134
+ company_short_name = history_handle.company_short_name
135
+ user_identifier = history_handle.user_identifier
136
+ model = getattr(history_handle, "model", None)
137
+
138
+ if history_type == self.TYPE_SERVER_SIDE:
139
+ if "response_id" in response:
140
+ self.session_context.save_last_response_id(
141
+ company_short_name,
142
+ user_identifier,
143
+ response["response_id"],
144
+ model=model)
145
+
146
+ elif history_type == self.TYPE_CLIENT_SIDE:
147
+ # get the history for this company/user/model
148
+ context_history = self.session_context.get_context_history(
149
+ company_short_name,
150
+ user_identifier,
151
+ model=model)
152
+
153
+ # Ensure the user prompt is recorded if not already.
154
+ # We check content equality to handle the case where the previous message was
155
+ # also 'user' (e.g., System Prompt) but different content.
156
+ last_content = context_history[-1].get("content") if context_history else None
157
+
158
+ if last_content != user_turn_prompt:
159
+ context_history.append({"role": "user", "content": user_turn_prompt})
160
+
161
+ if response.get('answer'):
162
+ context_history.append({"role": "assistant", "content": response.get('answer', '')})
163
+
164
+ self.session_context.save_context_history(
165
+ company_short_name,
166
+ user_identifier,
167
+ context_history,
168
+ model=model)
169
+
170
+ def _trim_context_history(self, context_history: list):
171
+ """Internal helper to keep token usage within limits for client-side history."""
172
+ if not context_history or len(context_history) <= 1:
173
+ return
174
+ try:
175
+ total_tokens = sum(self.llm_client.count_tokens(json.dumps(message)) for message in context_history)
176
+ except Exception as e:
177
+ logging.error(f"Error counting tokens for history: {e}.")
178
+ return
179
+
180
+ while total_tokens > self.GEMINI_MAX_TOKENS_CONTEXT_HISTORY and len(context_history) > 1:
181
+ try:
182
+ # Remove the oldest message after system prompt
183
+ removed_message = context_history.pop(1)
184
+ removed_tokens = self.llm_client.count_tokens(json.dumps(removed_message))
185
+ total_tokens -= removed_tokens
186
+ logging.warning(
187
+ f"History tokens exceed limit. Removed old message. New total: {total_tokens} tokens."
188
+ )
189
+ except IndexError:
190
+ break
191
+
192
+ # --- this is for the history popup in the chat page
193
+ def get_full_history(self, company_short_name: str, user_identifier: str) -> dict:
194
+ """Retrieves the full persisted history from the database."""
195
+ try:
196
+ company = self.profile_repo.get_company_by_short_name(company_short_name)
197
+ if not company:
198
+ return {"error": self.i18n.t('errors.company_not_found', company_short_name=company_short_name)}
199
+
200
+ history = self.llm_query_repo.get_history(company, user_identifier)
201
+ if not history:
202
+ return {'message': 'empty history', 'history': []}
203
+
204
+ history_list = [query.to_dict() for query in history]
205
+ return {'message': 'history loaded ok', 'history': history_list}
206
+
207
+ except Exception as e:
208
+ return {'error': str(e)}
@@ -17,7 +17,7 @@ class JWTService:
17
17
  def __init__(self, app: Flask):
18
18
  # Acceder a la configuración directamente desde app.config
19
19
  try:
20
- self.secret_key = app.config['JWT_SECRET_KEY']
20
+ self.secret_key = app.config['IATOOLKIT_SECRET_KEY']
21
21
  self.algorithm = app.config['JWT_ALGORITHM']
22
22
  except KeyError as e:
23
23
  logging.error(f"missing JWT configuration: {e}.")
@@ -0,0 +1,412 @@
1
+ # Copyright (c) 2024 Fernando Libedinsky
2
+ # Product: IAToolkit
3
+ #
4
+ # IAToolkit is open source software.
5
+
6
+
7
+ from iatoolkit.repositories.models import Document, VSDoc, Company, DocumentStatus
8
+ from iatoolkit.repositories.document_repo import DocumentRepo
9
+ from iatoolkit.repositories.vs_repo import VSRepo
10
+ from iatoolkit.repositories.models import CollectionType
11
+ from iatoolkit.services.document_service import DocumentService
12
+ from iatoolkit.services.profile_service import ProfileService
13
+ from iatoolkit.services.i18n_service import I18nService
14
+ from langchain_text_splitters import RecursiveCharacterTextSplitter
15
+ from sqlalchemy import desc
16
+ from typing import Dict
17
+ from iatoolkit.common.exceptions import IAToolkitException
18
+ import base64
19
+ import logging
20
+ import hashlib
21
+ from typing import List, Optional, Union
22
+ from datetime import datetime
23
+ from injector import inject
24
+
25
+
26
+ class KnowledgeBaseService:
27
+ """
28
+ Central service for managing the RAG (Retrieval-Augmented Generation) Knowledge Base.
29
+ Orchestrates ingestion (OCR -> Split -> Embed -> Store), retrieval, and management.
30
+ """
31
+
32
+ @inject
33
+ def __init__(self,
34
+ document_repo: DocumentRepo,
35
+ vs_repo: VSRepo,
36
+ document_service: DocumentService,
37
+ profile_service: ProfileService,
38
+ i18n_service: I18nService):
39
+ self.document_repo = document_repo
40
+ self.vs_repo = vs_repo
41
+ self.document_service = document_service
42
+ self.profile_service = profile_service
43
+ self.i18n_service = i18n_service
44
+
45
+ # Configure LangChain for intelligent text splitting
46
+ self.text_splitter = RecursiveCharacterTextSplitter(
47
+ chunk_size=1000,
48
+ chunk_overlap=100,
49
+ separators=["\n\n", "\n", ".", " ", ""]
50
+ )
51
+
52
+ def ingest_document_sync(self,
53
+ company: Company,
54
+ filename: str,
55
+ content: bytes,
56
+ user_identifier: str = None,
57
+ metadata: dict = None,
58
+ collection: str = None) -> Document:
59
+ """
60
+ Synchronously processes a document through the entire RAG pipeline:
61
+ 1. Saves initial metadata and raw content (base64) to the SQL Document table.
62
+ 2. Extracts text using DocumentService (handles OCR, PDF, DOCX).
63
+ 3. Splits the text into semantic chunks using LangChain.
64
+ 4. Vectorizes and saves chunks to the Vector Store (VSRepo).
65
+ 5. Updates the document status to ACTIVE or FAILED.
66
+
67
+ Args:
68
+ company: The company owning the document.
69
+ filename: Original filename.
70
+ content: Raw bytes of the file.
71
+ metadata: Optional dictionary with additional info (e.g., document_type).
72
+
73
+ Returns:
74
+ The created Document object.
75
+ """
76
+ if not metadata:
77
+ metadata = {}
78
+
79
+ # --- Logic for Collection ---
80
+ # priority: 1. method parameter 2. metadata
81
+ collection_name = collection or metadata.get('collection')
82
+ collection_type_id = self._get_collection_type_id(company.id, collection_name)
83
+
84
+ # 1. Calculate SHA-256 hash of the content
85
+ file_hash = hashlib.sha256(content).hexdigest()
86
+
87
+ # 2. Check for duplicates by HASH (Content deduplication)
88
+ # If the same content exists (even with a different filename), we skip processing.
89
+ existing_doc = self.document_repo.get_by_hash(company.id, file_hash)
90
+ if existing_doc:
91
+ msg = self.i18n_service.t('rag.ingestion.duplicate', filename=filename, company_short_name=company.short_name)
92
+ logging.info(msg)
93
+ return existing_doc
94
+
95
+
96
+ # 3. Create initial record with PENDING status
97
+ try:
98
+ # Encode to b64 for safe storage in DB if needed later for download
99
+ content_b64 = base64.b64encode(content).decode('utf-8')
100
+
101
+ new_doc = Document(
102
+ company_id=company.id,
103
+ collection_type_id=collection_type_id,
104
+ filename=filename,
105
+ hash=file_hash,
106
+ user_identifier=user_identifier,
107
+ content="", # Will be populated after text extraction
108
+ content_b64=content_b64,
109
+ meta=metadata,
110
+ status=DocumentStatus.PENDING
111
+ )
112
+
113
+ self.document_repo.insert(new_doc)
114
+
115
+ # 3. Start processing (Extraction + Vectorization)
116
+ self._process_document_content(company.short_name, new_doc, content)
117
+
118
+ return new_doc
119
+
120
+ except Exception as e:
121
+ logging.exception(f"Error initializing document ingestion for {filename}: {e}")
122
+ error_msg = self.i18n_service.t('rag.ingestion.failed', error=str(e))
123
+
124
+ raise IAToolkitException(IAToolkitException.ErrorType.LOAD_DOCUMENT_ERROR, error_msg)
125
+
126
+
127
+ def _process_document_content(self, company_short_name: str, document: Document, raw_content: bytes):
128
+ """
129
+ Internal method to handle the heavy lifting of extraction and vectorization.
130
+ Updates the document status directly via the session.
131
+ """
132
+ session = self.document_repo.session
133
+
134
+ try:
135
+ # A. Update status to PROCESSING
136
+ document.status = DocumentStatus.PROCESSING
137
+ session.commit()
138
+
139
+ # B. Text Extraction (Uses existing service logic for OCR, etc.)
140
+ extracted_text = self.document_service.file_to_txt(document.filename, raw_content)
141
+
142
+ if not extracted_text:
143
+ raise ValueError(self.i18n_service.t('rag.ingestion.empty_text'))
144
+
145
+ # Update the extracted content in the original document record
146
+ document.content = extracted_text
147
+
148
+ # C. Splitting (LangChain)
149
+ chunks = self.text_splitter.split_text(extracted_text)
150
+
151
+ # D. Create VSDocs (Chunks)
152
+ # Note: The embedding generation happens inside VSRepo or can be explicit here
153
+ vs_docs = []
154
+ for chunk_text in chunks:
155
+ vs_doc = VSDoc(
156
+ company_id=document.company_id,
157
+ document_id=document.id,
158
+ text=chunk_text
159
+ )
160
+ vs_docs.append(vs_doc)
161
+
162
+ # E. Vector Storage
163
+ # We need the short_name so VSRepo knows which API Key to use for embeddings
164
+ self.vs_repo.add_document(company_short_name, vs_docs)
165
+
166
+ # F. Finalize
167
+ document.status = DocumentStatus.ACTIVE
168
+ session.commit()
169
+ logging.info(f"Successfully ingested {document.description} with {len(chunks)} chunks.")
170
+
171
+ except Exception as e:
172
+ session.rollback()
173
+ logging.error(f"Failed to process document {document.id}: {e}")
174
+
175
+ # Attempt to save the error state
176
+ try:
177
+ document.status = DocumentStatus.FAILED
178
+ document.error_message = str(e)
179
+ session.commit()
180
+ except:
181
+ pass # If error commit fails, we can't do much more
182
+
183
+ error_msg = self.i18n_service.t('rag.ingestion.processing_failed', error=str(e))
184
+ raise IAToolkitException(IAToolkitException.ErrorType.LOAD_DOCUMENT_ERROR, error_msg)
185
+
186
+
187
+ def search(self, company_short_name: str, query: str, n_results: int = 5, metadata_filter: dict = None) -> str:
188
+ """
189
+ Performs a semantic search against the vector store and formats the result as a context string for LLMs.
190
+ Replaces the legacy SearchService logic.
191
+
192
+ Args:
193
+ company_short_name: The target company.
194
+ query: The user's question or search term.
195
+ n_results: Max number of chunks to retrieve.
196
+ metadata_filter: Optional filter for document metadata.
197
+
198
+ Returns:
199
+ Formatted string with context.
200
+ """
201
+ company = self.profile_service.get_company_by_short_name(company_short_name)
202
+ if not company:
203
+ return f"error: {self.i18n_service.t('rag.search.company_not_found', company_short_name=company_short_name)}"
204
+
205
+ # Queries VSRepo (which typically uses pgvector/SQL underneath)
206
+ chunk_list = self.vs_repo.query(
207
+ company_short_name=company_short_name,
208
+ query_text=query,
209
+ n_results=n_results,
210
+ metadata_filter=metadata_filter
211
+ )
212
+
213
+ search_context = ''
214
+ for chunk in chunk_list:
215
+ # 'doc' here is a reconstructed Document object containing the chunk text
216
+ search_context += f'document "{chunk['filename']}"'
217
+
218
+ if chunk.get('meta') and 'document_type' in chunk.get('meta'):
219
+ doc_type = chunk.get('meta').get('document_type', '')
220
+ search_context += f' type: {doc_type}'
221
+
222
+ search_context += f': {chunk.get('text')}\n\n'
223
+
224
+ return search_context
225
+
226
+ def search_raw(self,
227
+ company_short_name: str,
228
+ query: str, n_results: int = 5,
229
+ collection: str = None,
230
+ metadata_filter: dict = None
231
+ ) -> List[Dict]:
232
+ """
233
+ Performs a semantic search and returns the list of Document objects (chunks).
234
+ Useful for UI displays where structured data is needed instead of a raw string context.
235
+
236
+ Args:
237
+ company_short_name: The target company.
238
+ query: The user's question or search term.
239
+ n_results: Max number of chunks to retrieve.
240
+ metadata_filter: Optional filter for document metadata.
241
+
242
+ Returns:
243
+ List of Document objects found.
244
+ """
245
+ company = self.profile_service.get_company_by_short_name(company_short_name)
246
+ if not company:
247
+ # We return empty list instead of error string for consistency
248
+ logging.warning(f"Company {company_short_name} not found during raw search.")
249
+ return []
250
+
251
+ # If collection name provided, resolve to ID or handle in VSRepo
252
+ collection_id = None
253
+ if collection:
254
+ collection_id = self._get_collection_type_id(company.id, collection)
255
+ if not collection_id:
256
+ logging.warning(f"Collection '{collection}' not found. Searching all.")
257
+
258
+
259
+ # Queries VSRepo directly
260
+ chunk_list = self.vs_repo.query(
261
+ company_short_name=company_short_name,
262
+ query_text=query,
263
+ n_results=n_results,
264
+ metadata_filter=metadata_filter,
265
+ collection_id=collection_id,
266
+ )
267
+
268
+ return chunk_list
269
+
270
+ def list_documents(self,
271
+ company_short_name: str,
272
+ status: Optional[Union[str, List[str]]] = None,
273
+ user_identifier: Optional[str] = None,
274
+ collection: str = None,
275
+ filename_keyword: Optional[str] = None,
276
+ from_date: Optional[datetime] = None,
277
+ to_date: Optional[datetime] = None,
278
+ limit: int = 100,
279
+ offset: int = 0) -> List[Document]:
280
+ """
281
+ Retrieves a paginated list of documents based on various filters.
282
+ Used by the frontend to display the Knowledge Base grid.
283
+
284
+ Args:
285
+ company_short_name: Required. Filters by company.
286
+ status: Optional status enum value or list of values (e.g. 'active' or ['active', 'failed']).
287
+ user_identifier: Optional. Filters by the user who uploaded the document.
288
+ filename_keyword: Optional substring to search in filename.
289
+ from_date: Optional start date filter (created_at).
290
+ to_date: Optional end date filter (created_at).
291
+ limit: Pagination limit.
292
+ offset: Pagination offset.
293
+
294
+ Returns:
295
+ List of Document objects matching the criteria.
296
+ """
297
+ session = self.document_repo.session
298
+
299
+ # Start building the query
300
+ query = session.query(Document).join(Company).filter(Company.short_name == company_short_name)
301
+
302
+ # Filter by status (single string or list)
303
+ if status:
304
+ if isinstance(status, list):
305
+ query = query.filter(Document.status.in_(status))
306
+ else:
307
+ query = query.filter(Document.status == status)
308
+
309
+ # filter by collection
310
+ if collection:
311
+ query = query.join(CollectionType).filter(CollectionType.name == collection)
312
+
313
+ # Filter by user identifier
314
+ if user_identifier:
315
+ query = query.filter(Document.user_identifier.ilike(f"%{user_identifier}%"))
316
+
317
+ if filename_keyword:
318
+ # Case-insensitive search
319
+ query = query.filter(Document.filename.ilike(f"%{filename_keyword}%"))
320
+
321
+ if from_date:
322
+ query = query.filter(Document.created_at >= from_date)
323
+
324
+ if to_date:
325
+ query = query.filter(Document.created_at <= to_date)
326
+
327
+ # Apply sorting (newest first) and pagination
328
+ query = query.order_by(desc(Document.created_at))
329
+ query = query.limit(limit).offset(offset)
330
+
331
+ return query.all()
332
+
333
+ def get_document_content(self, document_id: int) -> tuple[bytes, str]:
334
+ """
335
+ Retrieves the raw content of a document and its filename.
336
+
337
+ Args:
338
+ document_id: ID of the document.
339
+
340
+ Returns:
341
+ A tuple containing (file_bytes, filename).
342
+ Returns (None, None) if document not found.
343
+ """
344
+ doc = self.document_repo.get_by_id(document_id)
345
+ if not doc or not doc.content_b64:
346
+ return None, None
347
+
348
+ try:
349
+ file_bytes = base64.b64decode(doc.content_b64)
350
+ return file_bytes, doc.filename
351
+ except Exception as e:
352
+ logging.error(f"Error decoding content for document {document_id}: {e}")
353
+ raise IAToolkitException(IAToolkitException.ErrorType.FILE_FORMAT_ERROR,
354
+ f"Error reading file content: {e}")
355
+
356
+ def delete_document(self, document_id: int) -> bool:
357
+ """
358
+ Deletes a document and its associated vectors.
359
+ Since vectors are linked via FK with ON DELETE CASCADE, deleting the Document record is sufficient.
360
+
361
+ Args:
362
+ document_id: The ID of the document to delete.
363
+
364
+ Returns:
365
+ True if deleted, False if not found.
366
+ """
367
+ doc = self.document_repo.get_by_id(document_id)
368
+ if not doc:
369
+ return False
370
+
371
+ session = self.document_repo.session
372
+ try:
373
+ session.delete(doc)
374
+ session.commit()
375
+ return True
376
+ except Exception as e:
377
+ session.rollback()
378
+ logging.error(f"Error deleting document {document_id}: {e}")
379
+ raise IAToolkitException(IAToolkitException.ErrorType.DATABASE_ERROR,
380
+ f"Error deleting document: {e}")
381
+
382
+ def sync_collection_types(self, company_short_name: str, categories_config: list):
383
+ """
384
+ This should be called during company initialization or configuration reload.
385
+ """
386
+ company = self.profile_service.get_company_by_short_name(company_short_name)
387
+ if not company:
388
+ raise IAToolkitException(IAToolkitException.ErrorType.INVALID_NAME,
389
+ f'Company {company_short_name} not found')
390
+
391
+
392
+ session = self.document_repo.session
393
+ existing_types = session.query(CollectionType).filter_by(company_id=company.id).all()
394
+ existing_names = {ct.name: ct for ct in existing_types}
395
+
396
+ for cat_name in categories_config:
397
+ if cat_name not in existing_names:
398
+ new_type = CollectionType(company_id=company.id, name=cat_name)
399
+ session.add(new_type)
400
+
401
+ # Opcional: Eliminar los que ya no están en el config?
402
+ # Por seguridad de datos, mejor no borrar automáticamente, o marcarlos inactivos.
403
+
404
+ session.commit()
405
+
406
+ def _get_collection_type_id(self, company_id: int, collection_name: str) -> Optional[int]:
407
+ """Helper to get ID by name"""
408
+ if not collection_name:
409
+ return None
410
+ session = self.document_repo.session
411
+ ct = session.query(CollectionType).filter_by(company_id=company_id, name=collection_name).first()
412
+ return ct.id if ct else None
@@ -48,6 +48,7 @@ class LanguageService:
48
48
  def get_current_language(self) -> str:
49
49
  """
50
50
  Determines and caches the language for the current request using a priority order:
51
+ 0. Query parameter '?lang=<code>' (highest priority; e.g., 'en', 'es').
51
52
  1. User's preference (from their profile).
52
53
  2. Company's default language.
53
54
  3. System-wide fallback language ('es').
@@ -56,6 +57,12 @@ class LanguageService:
56
57
  return g.lang
57
58
 
58
59
  try:
60
+ # Priority 0: Explicit query parameter (?lang=)
61
+ lang_arg = request.args.get('lang')
62
+ if lang_arg:
63
+ g.lang = lang_arg
64
+ return g.lang
65
+
59
66
  # Priority 1: User's preferred language
60
67
  user_identifier = SessionManager.get('user_identifier')
61
68
  if user_identifier:
@@ -74,10 +81,9 @@ class LanguageService:
74
81
  g.lang = company_language
75
82
  return g.lang
76
83
  except Exception as e:
77
- logging.info(f"Could not determine language, falling back to default. Reason: {e}")
78
84
  pass
79
85
 
80
86
  # Priority 3: System-wide fallback
81
- logging.info(f"Language determined by system fallback: {self.FALLBACK_LANGUAGE}")
87
+ logging.debug(f"Language determined by system fallback: {self.FALLBACK_LANGUAGE}")
82
88
  g.lang = self.FALLBACK_LANGUAGE
83
89
  return g.lang