iatoolkit 0.91.1__py3-none-any.whl → 1.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (71) hide show
  1. iatoolkit/__init__.py +6 -4
  2. iatoolkit/base_company.py +0 -16
  3. iatoolkit/cli_commands.py +3 -14
  4. iatoolkit/common/exceptions.py +1 -0
  5. iatoolkit/common/interfaces/__init__.py +0 -0
  6. iatoolkit/common/interfaces/asset_storage.py +34 -0
  7. iatoolkit/common/interfaces/database_provider.py +43 -0
  8. iatoolkit/common/model_registry.py +159 -0
  9. iatoolkit/common/routes.py +47 -5
  10. iatoolkit/common/util.py +32 -13
  11. iatoolkit/company_registry.py +5 -0
  12. iatoolkit/core.py +51 -20
  13. iatoolkit/infra/connectors/file_connector_factory.py +1 -0
  14. iatoolkit/infra/connectors/s3_connector.py +4 -2
  15. iatoolkit/infra/llm_providers/__init__.py +0 -0
  16. iatoolkit/infra/llm_providers/deepseek_adapter.py +278 -0
  17. iatoolkit/infra/{gemini_adapter.py → llm_providers/gemini_adapter.py} +11 -17
  18. iatoolkit/infra/{openai_adapter.py → llm_providers/openai_adapter.py} +41 -7
  19. iatoolkit/infra/llm_proxy.py +235 -134
  20. iatoolkit/infra/llm_response.py +5 -0
  21. iatoolkit/locales/en.yaml +158 -2
  22. iatoolkit/locales/es.yaml +158 -0
  23. iatoolkit/repositories/database_manager.py +52 -47
  24. iatoolkit/repositories/document_repo.py +7 -0
  25. iatoolkit/repositories/filesystem_asset_repository.py +36 -0
  26. iatoolkit/repositories/llm_query_repo.py +2 -0
  27. iatoolkit/repositories/models.py +72 -79
  28. iatoolkit/repositories/profile_repo.py +59 -3
  29. iatoolkit/repositories/vs_repo.py +22 -24
  30. iatoolkit/services/company_context_service.py +126 -53
  31. iatoolkit/services/configuration_service.py +299 -73
  32. iatoolkit/services/dispatcher_service.py +21 -3
  33. iatoolkit/services/file_processor_service.py +0 -5
  34. iatoolkit/services/history_manager_service.py +43 -24
  35. iatoolkit/services/knowledge_base_service.py +425 -0
  36. iatoolkit/{infra/llm_client.py → services/llm_client_service.py} +38 -29
  37. iatoolkit/services/load_documents_service.py +26 -48
  38. iatoolkit/services/profile_service.py +32 -4
  39. iatoolkit/services/prompt_service.py +32 -30
  40. iatoolkit/services/query_service.py +51 -26
  41. iatoolkit/services/sql_service.py +122 -74
  42. iatoolkit/services/tool_service.py +26 -11
  43. iatoolkit/services/user_session_context_service.py +115 -63
  44. iatoolkit/static/js/chat_main.js +44 -4
  45. iatoolkit/static/js/chat_model_selector.js +227 -0
  46. iatoolkit/static/js/chat_onboarding_button.js +1 -1
  47. iatoolkit/static/js/chat_reload_button.js +4 -1
  48. iatoolkit/static/styles/chat_iatoolkit.css +58 -2
  49. iatoolkit/static/styles/llm_output.css +34 -1
  50. iatoolkit/system_prompts/query_main.prompt +26 -2
  51. iatoolkit/templates/base.html +13 -0
  52. iatoolkit/templates/chat.html +45 -2
  53. iatoolkit/templates/onboarding_shell.html +0 -1
  54. iatoolkit/views/base_login_view.py +7 -2
  55. iatoolkit/views/chat_view.py +76 -0
  56. iatoolkit/views/configuration_api_view.py +163 -0
  57. iatoolkit/views/load_document_api_view.py +14 -10
  58. iatoolkit/views/login_view.py +8 -3
  59. iatoolkit/views/rag_api_view.py +216 -0
  60. iatoolkit/views/users_api_view.py +33 -0
  61. {iatoolkit-0.91.1.dist-info → iatoolkit-1.7.0.dist-info}/METADATA +4 -4
  62. {iatoolkit-0.91.1.dist-info → iatoolkit-1.7.0.dist-info}/RECORD +66 -58
  63. iatoolkit/repositories/tasks_repo.py +0 -52
  64. iatoolkit/services/search_service.py +0 -55
  65. iatoolkit/services/tasks_service.py +0 -188
  66. iatoolkit/views/tasks_api_view.py +0 -72
  67. iatoolkit/views/tasks_review_api_view.py +0 -55
  68. {iatoolkit-0.91.1.dist-info → iatoolkit-1.7.0.dist-info}/WHEEL +0 -0
  69. {iatoolkit-0.91.1.dist-info → iatoolkit-1.7.0.dist-info}/licenses/LICENSE +0 -0
  70. {iatoolkit-0.91.1.dist-info → iatoolkit-1.7.0.dist-info}/licenses/LICENSE_COMMUNITY.md +0 -0
  71. {iatoolkit-0.91.1.dist-info → iatoolkit-1.7.0.dist-info}/top_level.txt +0 -0
@@ -1,9 +1,15 @@
1
+ # Copyright (c) 2024 Fernando Libedinsky
2
+ # Product: IAToolkit
3
+ #
4
+ # IAToolkit is open source software.
5
+
6
+
1
7
  import logging
2
8
  import json
3
- from typing import Dict, Any, Tuple, Optional
9
+ from typing import Dict, Any, Optional
4
10
  from iatoolkit.services.user_session_context_service import UserSessionContextService
5
11
  from iatoolkit.services.i18n_service import I18nService
6
- from iatoolkit.infra.llm_client import llmClient
12
+ from iatoolkit.services.llm_client_service import llmClient
7
13
  from iatoolkit.repositories.models import Company
8
14
  from iatoolkit.repositories.llm_query_repo import LLMQueryRepo
9
15
  from iatoolkit.repositories.profile_repo import ProfileRepo
@@ -19,7 +25,7 @@ class HistoryManagerService:
19
25
  3. Database persistence retrieval (full chat history).
20
26
  """
21
27
  TYPE_SERVER_SIDE = 'server_side' # For models like OpenAI
22
- TYPE_CLIENT_SIDE = 'client_side' # For models like Gemini
28
+ TYPE_CLIENT_SIDE = 'client_side' # For models like Gemini and Deepseek
23
29
 
24
30
  GEMINI_MAX_TOKENS_CONTEXT_HISTORY = 200000
25
31
 
@@ -47,7 +53,7 @@ class HistoryManagerService:
47
53
  Initializes a new conversation history.
48
54
  """
49
55
  # 1. Clear existing history
50
- self.session_context.clear_llm_history(company_short_name, user_identifier)
56
+ self.session_context.clear_llm_history(company_short_name, user_identifier, model=model)
51
57
 
52
58
  if history_type == self.TYPE_SERVER_SIDE:
53
59
  # OpenAI: Send system prompt to API and store the resulting ID
@@ -56,14 +62,14 @@ class HistoryManagerService:
56
62
  company_base_context=prepared_context,
57
63
  model=model
58
64
  )
59
- self.session_context.save_last_response_id(company_short_name, user_identifier, response_id)
60
- self.session_context.save_initial_response_id(company_short_name, user_identifier, response_id)
65
+ self.session_context.save_last_response_id(company_short_name, user_identifier, response_id, model=model)
66
+ self.session_context.save_initial_response_id(company_short_name, user_identifier, response_id, model=model)
61
67
  return {'response_id': response_id}
62
68
 
63
69
  elif history_type == self.TYPE_CLIENT_SIDE:
64
70
  # Gemini: Store system prompt as the first message in the list
65
71
  context_history = [{"role": "user", "content": prepared_context}]
66
- self.session_context.save_context_history(company_short_name, user_identifier, context_history)
72
+ self.session_context.save_context_history(company_short_name, user_identifier, context_history, model=model)
67
73
  return {}
68
74
 
69
75
  return {}
@@ -76,14 +82,16 @@ class HistoryManagerService:
76
82
  Populates the request_params within the HistoryHandle.
77
83
  Returns True if a rebuild is needed, False otherwise.
78
84
  """
85
+ model = getattr(handle, "model", None)
86
+
79
87
  if handle.type == self.TYPE_SERVER_SIDE:
80
- previous_response_id = None
81
88
  if ignore_history:
82
- previous_response_id = self.session_context.get_initial_response_id(handle.company_short_name,
83
- handle.user_identifier)
89
+ previous_response_id = self.session_context.get_initial_response_id(
90
+ handle.company_short_name,handle.user_identifier,model=model)
84
91
  else:
85
- previous_response_id = self.session_context.get_last_response_id(handle.company_short_name,
86
- handle.user_identifier)
92
+ previous_response_id = self.session_context.get_last_response_id(
93
+ handle.company_short_name,handle.user_identifier,model=model)
94
+
87
95
 
88
96
  if not previous_response_id:
89
97
  handle.request_params = {}
@@ -93,8 +101,8 @@ class HistoryManagerService:
93
101
  return False
94
102
 
95
103
  elif handle.type == self.TYPE_CLIENT_SIDE:
96
- context_history = self.session_context.get_context_history(handle.company_short_name,
97
- handle.user_identifier) or []
104
+ context_history = self.session_context.get_context_history(
105
+ handle.company_short_name,handle.user_identifier,model=model) or []
98
106
 
99
107
  if not context_history:
100
108
  handle.request_params = {}
@@ -104,7 +112,7 @@ class HistoryManagerService:
104
112
  # Keep only system prompt
105
113
  context_history = [context_history[0]]
106
114
 
107
- # For Gemini, we append the current user turn to the context sent to the API
115
+ # Append the current user turn to the context sent to the API
108
116
  context_history.append({"role": "user", "content": user_turn_prompt})
109
117
 
110
118
  self._trim_context_history(context_history)
@@ -125,15 +133,23 @@ class HistoryManagerService:
125
133
  history_type = history_handle.type
126
134
  company_short_name = history_handle.company_short_name
127
135
  user_identifier = history_handle.user_identifier
136
+ model = getattr(history_handle, "model", None)
128
137
 
129
138
  if history_type == self.TYPE_SERVER_SIDE:
130
139
  if "response_id" in response:
131
- self.session_context.save_last_response_id(company_short_name, user_identifier,
132
- response["response_id"])
140
+ self.session_context.save_last_response_id(
141
+ company_short_name,
142
+ user_identifier,
143
+ response["response_id"],
144
+ model=model)
133
145
 
134
146
  elif history_type == self.TYPE_CLIENT_SIDE:
135
- context_history = self.session_context.get_context_history(company_short_name,
136
- user_identifier) or []
147
+ # get the history for this company/user/model
148
+ context_history = self.session_context.get_context_history(
149
+ company_short_name,
150
+ user_identifier,
151
+ model=model)
152
+
137
153
  # Ensure the user prompt is recorded if not already.
138
154
  # We check content equality to handle the case where the previous message was
139
155
  # also 'user' (e.g., System Prompt) but different content.
@@ -142,10 +158,14 @@ class HistoryManagerService:
142
158
  if last_content != user_turn_prompt:
143
159
  context_history.append({"role": "user", "content": user_turn_prompt})
144
160
 
145
- if response.get('output'):
146
- context_history.append({"role": "model", "content": response['output']})
161
+ if response.get('answer'):
162
+ context_history.append({"role": "assistant", "content": response.get('answer', '')})
147
163
 
148
- self.session_context.save_context_history(company_short_name, user_identifier, context_history)
164
+ self.session_context.save_context_history(
165
+ company_short_name,
166
+ user_identifier,
167
+ context_history,
168
+ model=model)
149
169
 
150
170
  def _trim_context_history(self, context_history: list):
151
171
  """Internal helper to keep token usage within limits for client-side history."""
@@ -169,8 +189,7 @@ class HistoryManagerService:
169
189
  except IndexError:
170
190
  break
171
191
 
172
- # --- Database History Management (Legacy HistoryService) ---
173
-
192
+ # --- this is for the history popup in the chat page
174
193
  def get_full_history(self, company_short_name: str, user_identifier: str) -> dict:
175
194
  """Retrieves the full persisted history from the database."""
176
195
  try:
@@ -0,0 +1,425 @@
1
+ # Copyright (c) 2024 Fernando Libedinsky
2
+ # Product: IAToolkit
3
+ #
4
+ # IAToolkit is open source software.
5
+
6
+
7
+ from iatoolkit.repositories.models import Document, VSDoc, Company, DocumentStatus
8
+ from iatoolkit.repositories.document_repo import DocumentRepo
9
+ from iatoolkit.repositories.vs_repo import VSRepo
10
+ from iatoolkit.repositories.models import CollectionType
11
+ from iatoolkit.services.document_service import DocumentService
12
+ from iatoolkit.services.profile_service import ProfileService
13
+ from iatoolkit.services.i18n_service import I18nService
14
+ from langchain_text_splitters import RecursiveCharacterTextSplitter
15
+ from sqlalchemy import desc
16
+ from typing import Dict
17
+ from iatoolkit.common.exceptions import IAToolkitException
18
+ import base64
19
+ import logging
20
+ import hashlib
21
+ from typing import List, Optional, Union
22
+ from datetime import datetime
23
+ from injector import inject
24
+
25
+
26
+ class KnowledgeBaseService:
27
+ """
28
+ Central service for managing the RAG (Retrieval-Augmented Generation) Knowledge Base.
29
+ Orchestrates ingestion (OCR -> Split -> Embed -> Store), retrieval, and management.
30
+ """
31
+
32
+ @inject
33
+ def __init__(self,
34
+ document_repo: DocumentRepo,
35
+ vs_repo: VSRepo,
36
+ document_service: DocumentService,
37
+ profile_service: ProfileService,
38
+ i18n_service: I18nService):
39
+ self.document_repo = document_repo
40
+ self.vs_repo = vs_repo
41
+ self.document_service = document_service
42
+ self.profile_service = profile_service
43
+ self.i18n_service = i18n_service
44
+
45
+ # Configure LangChain for intelligent text splitting
46
+ self.text_splitter = RecursiveCharacterTextSplitter(
47
+ chunk_size=1000,
48
+ chunk_overlap=100,
49
+ separators=["\n\n", "\n", ".", " ", ""]
50
+ )
51
+
52
+ def ingest_document_sync(self,
53
+ company: Company,
54
+ filename: str,
55
+ content: bytes,
56
+ user_identifier: str = None,
57
+ metadata: dict = None,
58
+ collection: str = None) -> Document:
59
+ """
60
+ Synchronously processes a document through the entire RAG pipeline:
61
+ 1. Saves initial metadata and raw content (base64) to the SQL Document table.
62
+ 2. Extracts text using DocumentService (handles OCR, PDF, DOCX).
63
+ 3. Splits the text into semantic chunks using LangChain.
64
+ 4. Vectorizes and saves chunks to the Vector Store (VSRepo).
65
+ 5. Updates the document status to ACTIVE or FAILED.
66
+
67
+ Args:
68
+ company: The company owning the document.
69
+ filename: Original filename.
70
+ content: Raw bytes of the file.
71
+ metadata: Optional dictionary with additional info (e.g., document_type).
72
+
73
+ Returns:
74
+ The created Document object.
75
+ """
76
+ if not metadata:
77
+ metadata = {}
78
+
79
+ # --- Logic for Collection ---
80
+ # priority: 1. method parameter 2. metadata
81
+ collection_name = collection or metadata.get('collection')
82
+ collection_type_id = self._get_collection_type_id(company.id, collection_name)
83
+
84
+ # 1. Calculate SHA-256 hash of the content
85
+ file_hash = hashlib.sha256(content).hexdigest()
86
+
87
+ # 2. Check for duplicates by HASH (Content deduplication)
88
+ # If the same content exists (even with a different filename), we skip processing.
89
+ existing_doc = self.document_repo.get_by_hash(company.id, file_hash)
90
+ if existing_doc:
91
+ msg = self.i18n_service.t('rag.ingestion.duplicate', filename=filename, company_short_name=company.short_name)
92
+ logging.info(msg)
93
+ return existing_doc
94
+
95
+
96
+ # 3. Create initial record with PENDING status
97
+ try:
98
+ # Encode to b64 for safe storage in DB if needed later for download
99
+ content_b64 = base64.b64encode(content).decode('utf-8')
100
+
101
+ new_doc = Document(
102
+ company_id=company.id,
103
+ collection_type_id=collection_type_id,
104
+ filename=filename,
105
+ hash=file_hash,
106
+ user_identifier=user_identifier,
107
+ content="", # Will be populated after text extraction
108
+ content_b64=content_b64,
109
+ meta=metadata,
110
+ status=DocumentStatus.PENDING
111
+ )
112
+
113
+ self.document_repo.insert(new_doc)
114
+
115
+ # 3. Start processing (Extraction + Vectorization)
116
+ self._process_document_content(company.short_name, new_doc, content)
117
+
118
+ return new_doc
119
+
120
+ except Exception as e:
121
+ logging.exception(f"Error initializing document ingestion for {filename}: {e}")
122
+ error_msg = self.i18n_service.t('rag.ingestion.failed', error=str(e))
123
+
124
+ raise IAToolkitException(IAToolkitException.ErrorType.LOAD_DOCUMENT_ERROR, error_msg)
125
+
126
+
127
+ def _process_document_content(self, company_short_name: str, document: Document, raw_content: bytes):
128
+ """
129
+ Internal method to handle the heavy lifting of extraction and vectorization.
130
+ Updates the document status directly via the session.
131
+ """
132
+ session = self.document_repo.session
133
+
134
+ try:
135
+ # A. Update status to PROCESSING
136
+ document.status = DocumentStatus.PROCESSING
137
+ session.commit()
138
+
139
+ # B. Text Extraction (Uses existing service logic for OCR, etc.)
140
+ extracted_text = self.document_service.file_to_txt(document.filename, raw_content)
141
+
142
+ if not extracted_text:
143
+ raise ValueError(self.i18n_service.t('rag.ingestion.empty_text'))
144
+
145
+ # Update the extracted content in the original document record
146
+ document.content = extracted_text
147
+
148
+ # C. Splitting (LangChain)
149
+ chunks = self.text_splitter.split_text(extracted_text)
150
+
151
+ # D. Create VSDocs (Chunks)
152
+ # Note: The embedding generation happens inside VSRepo or can be explicit here
153
+ vs_docs = []
154
+ for chunk_text in chunks:
155
+ vs_doc = VSDoc(
156
+ company_id=document.company_id,
157
+ document_id=document.id,
158
+ text=chunk_text
159
+ )
160
+ vs_docs.append(vs_doc)
161
+
162
+ # E. Vector Storage
163
+ # We need the short_name so VSRepo knows which API Key to use for embeddings
164
+ self.vs_repo.add_document(company_short_name, vs_docs)
165
+
166
+ # F. Finalize
167
+ document.status = DocumentStatus.ACTIVE
168
+ session.commit()
169
+ logging.info(f"Successfully ingested {document.description} with {len(chunks)} chunks.")
170
+
171
+ except Exception as e:
172
+ session.rollback()
173
+ logging.error(f"Failed to process document {document.id}: {e}")
174
+
175
+ # Attempt to save the error state
176
+ try:
177
+ document.status = DocumentStatus.FAILED
178
+ document.error_message = str(e)
179
+ session.commit()
180
+ except:
181
+ pass # If error commit fails, we can't do much more
182
+
183
+ error_msg = self.i18n_service.t('rag.ingestion.processing_failed', error=str(e))
184
+ raise IAToolkitException(IAToolkitException.ErrorType.LOAD_DOCUMENT_ERROR, error_msg)
185
+
186
+
187
+ def search(self, company_short_name: str, query: str, n_results: int = 5, metadata_filter: dict = None) -> str:
188
+ """
189
+ Performs a semantic search against the vector store and formats the result as a context string for LLMs.
190
+ Replaces the legacy SearchService logic.
191
+
192
+ Args:
193
+ company_short_name: The target company.
194
+ query: The user's question or search term.
195
+ n_results: Max number of chunks to retrieve.
196
+ metadata_filter: Optional filter for document metadata.
197
+
198
+ Returns:
199
+ Formatted string with context.
200
+ """
201
+ company = self.profile_service.get_company_by_short_name(company_short_name)
202
+ if not company:
203
+ return f"error: {self.i18n_service.t('rag.search.company_not_found', company_short_name=company_short_name)}"
204
+
205
+ # Queries VSRepo (which typically uses pgvector/SQL underneath)
206
+ chunk_list = self.vs_repo.query(
207
+ company_short_name=company_short_name,
208
+ query_text=query,
209
+ n_results=n_results,
210
+ metadata_filter=metadata_filter
211
+ )
212
+
213
+ search_context = ''
214
+ for chunk in chunk_list:
215
+ # 'doc' here is a reconstructed Document object containing the chunk text
216
+ search_context += f'document "{chunk['filename']}"'
217
+
218
+ if chunk.get('meta') and 'document_type' in chunk.get('meta'):
219
+ doc_type = chunk.get('meta').get('document_type', '')
220
+ search_context += f' type: {doc_type}'
221
+
222
+ search_context += f': {chunk.get('text')}\n\n'
223
+
224
+ return search_context
225
+
226
+ def search_raw(self,
227
+ company_short_name: str,
228
+ query: str, n_results: int = 5,
229
+ collection: str = None,
230
+ metadata_filter: dict = None
231
+ ) -> List[Dict]:
232
+ """
233
+ Performs a semantic search and returns the list of Document objects (chunks).
234
+ Useful for UI displays where structured data is needed instead of a raw string context.
235
+
236
+ Args:
237
+ company_short_name: The target company.
238
+ query: The user's question or search term.
239
+ n_results: Max number of chunks to retrieve.
240
+ metadata_filter: Optional filter for document metadata.
241
+
242
+ Returns:
243
+ List of Document objects found.
244
+ """
245
+ company = self.profile_service.get_company_by_short_name(company_short_name)
246
+ if not company:
247
+ # We return empty list instead of error string for consistency
248
+ logging.warning(f"Company {company_short_name} not found during raw search.")
249
+ return []
250
+
251
+ # If collection name provided, resolve to ID or handle in VSRepo
252
+ collection_id = None
253
+ if collection:
254
+ collection_id = self._get_collection_type_id(company.id, collection)
255
+ if not collection_id:
256
+ logging.warning(f"Collection '{collection}' not found. Searching all.")
257
+
258
+
259
+ # Queries VSRepo directly
260
+ chunk_list = self.vs_repo.query(
261
+ company_short_name=company_short_name,
262
+ query_text=query,
263
+ n_results=n_results,
264
+ metadata_filter=metadata_filter,
265
+ collection_id=collection_id,
266
+ )
267
+
268
+ return chunk_list
269
+
270
+ def list_documents(self,
271
+ company_short_name: str,
272
+ status: Optional[Union[str, List[str]]] = None,
273
+ user_identifier: Optional[str] = None,
274
+ collection: str = None,
275
+ filename_keyword: Optional[str] = None,
276
+ from_date: Optional[datetime] = None,
277
+ to_date: Optional[datetime] = None,
278
+ limit: int = 100,
279
+ offset: int = 0) -> List[Document]:
280
+ """
281
+ Retrieves a paginated list of documents based on various filters.
282
+ Used by the frontend to display the Knowledge Base grid.
283
+
284
+ Args:
285
+ company_short_name: Required. Filters by company.
286
+ status: Optional status enum value or list of values (e.g. 'active' or ['active', 'failed']).
287
+ user_identifier: Optional. Filters by the user who uploaded the document.
288
+ filename_keyword: Optional substring to search in filename.
289
+ from_date: Optional start date filter (created_at).
290
+ to_date: Optional end date filter (created_at).
291
+ limit: Pagination limit.
292
+ offset: Pagination offset.
293
+
294
+ Returns:
295
+ List of Document objects matching the criteria.
296
+ """
297
+ session = self.document_repo.session
298
+
299
+ # Start building the query
300
+ query = session.query(Document).join(Company).filter(Company.short_name == company_short_name)
301
+
302
+ # Filter by status (single string or list)
303
+ if status:
304
+ if isinstance(status, list):
305
+ query = query.filter(Document.status.in_(status))
306
+ else:
307
+ query = query.filter(Document.status == status)
308
+
309
+ # filter by collection
310
+ if collection:
311
+ query = query.join(Document.collection_type).filter(CollectionType.name == collection)
312
+
313
+ # Filter by user identifier
314
+ if user_identifier:
315
+ query = query.filter(Document.user_identifier.ilike(f"%{user_identifier}%"))
316
+
317
+ if filename_keyword:
318
+ # Case-insensitive search
319
+ query = query.filter(Document.filename.ilike(f"%{filename_keyword}%"))
320
+
321
+ if from_date:
322
+ query = query.filter(Document.created_at >= from_date)
323
+
324
+ if to_date:
325
+ query = query.filter(Document.created_at <= to_date)
326
+
327
+ # Apply sorting (newest first) and pagination
328
+ query = query.order_by(desc(Document.created_at))
329
+ query = query.limit(limit).offset(offset)
330
+
331
+ return query.all()
332
+
333
+ def get_document_content(self, document_id: int) -> tuple[bytes, str]:
334
+ """
335
+ Retrieves the raw content of a document and its filename.
336
+
337
+ Args:
338
+ document_id: ID of the document.
339
+
340
+ Returns:
341
+ A tuple containing (file_bytes, filename).
342
+ Returns (None, None) if document not found.
343
+ """
344
+ doc = self.document_repo.get_by_id(document_id)
345
+ if not doc or not doc.content_b64:
346
+ return None, None
347
+
348
+ try:
349
+ file_bytes = base64.b64decode(doc.content_b64)
350
+ return file_bytes, doc.filename
351
+ except Exception as e:
352
+ logging.error(f"Error decoding content for document {document_id}: {e}")
353
+ raise IAToolkitException(IAToolkitException.ErrorType.FILE_FORMAT_ERROR,
354
+ f"Error reading file content: {e}")
355
+
356
+ def delete_document(self, document_id: int) -> bool:
357
+ """
358
+ Deletes a document and its associated vectors.
359
+ Since vectors are linked via FK with ON DELETE CASCADE, deleting the Document record is sufficient.
360
+
361
+ Args:
362
+ document_id: The ID of the document to delete.
363
+
364
+ Returns:
365
+ True if deleted, False if not found.
366
+ """
367
+ doc = self.document_repo.get_by_id(document_id)
368
+ if not doc:
369
+ return False
370
+
371
+ session = self.document_repo.session
372
+ try:
373
+ session.delete(doc)
374
+ session.commit()
375
+ return True
376
+ except Exception as e:
377
+ session.rollback()
378
+ logging.error(f"Error deleting document {document_id}: {e}")
379
+ raise IAToolkitException(IAToolkitException.ErrorType.DATABASE_ERROR,
380
+ f"Error deleting document: {e}")
381
+
382
+ def sync_collection_types(self, company_short_name: str, categories_config: list):
383
+ """
384
+ This should be called during company initialization or configuration reload.
385
+ """
386
+ company = self.profile_service.get_company_by_short_name(company_short_name)
387
+ if not company:
388
+ raise IAToolkitException(IAToolkitException.ErrorType.INVALID_NAME,
389
+ f'Company {company_short_name} not found')
390
+
391
+
392
+ session = self.document_repo.session
393
+ existing_types = session.query(CollectionType).filter_by(company_id=company.id).all()
394
+ existing_names = {ct.name: ct for ct in existing_types}
395
+
396
+ for cat_name in categories_config:
397
+ if cat_name not in existing_names:
398
+ new_type = CollectionType(company_id=company.id, name=cat_name)
399
+ session.add(new_type)
400
+
401
+ # Opcional: Eliminar los que ya no están en el config?
402
+ # Por seguridad de datos, mejor no borrar automáticamente, o marcarlos inactivos.
403
+
404
+ session.commit()
405
+
406
+ def get_collection_names(self, company_short_name: str) -> List[str]:
407
+ """
408
+ Retrieves the names of all collections defined for a specific company.
409
+ """
410
+ company = self.profile_service.get_company_by_short_name(company_short_name)
411
+ if not company:
412
+ logging.warning(f"Company {company_short_name} not found when listing collections.")
413
+ return []
414
+
415
+ session = self.document_repo.session
416
+ collections = session.query(CollectionType).filter_by(company_id=company.id).all()
417
+ return [c.name for c in collections]
418
+
419
+ def _get_collection_type_id(self, company_id: int, collection_name: str) -> Optional[int]:
420
+ """Helper to get ID by name"""
421
+ if not collection_name:
422
+ return None
423
+ session = self.document_repo.session
424
+ ct = session.query(CollectionType).filter_by(company_id=company_id, name=collection_name).first()
425
+ return ct.id if ct else None