iatoolkit 1.7.0__py3-none-any.whl → 1.15.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- iatoolkit/__init__.py +1 -1
- iatoolkit/common/routes.py +16 -3
- iatoolkit/common/util.py +8 -123
- iatoolkit/core.py +1 -0
- iatoolkit/infra/connectors/file_connector.py +10 -2
- iatoolkit/infra/connectors/google_drive_connector.py +3 -0
- iatoolkit/infra/connectors/local_file_connector.py +3 -0
- iatoolkit/infra/connectors/s3_connector.py +24 -1
- iatoolkit/infra/llm_providers/deepseek_adapter.py +17 -1
- iatoolkit/infra/llm_providers/gemini_adapter.py +117 -18
- iatoolkit/infra/llm_providers/openai_adapter.py +175 -18
- iatoolkit/infra/llm_response.py +13 -0
- iatoolkit/locales/en.yaml +82 -4
- iatoolkit/locales/es.yaml +79 -4
- iatoolkit/repositories/llm_query_repo.py +51 -18
- iatoolkit/repositories/models.py +16 -7
- iatoolkit/services/company_context_service.py +294 -133
- iatoolkit/services/configuration_service.py +140 -121
- iatoolkit/services/dispatcher_service.py +1 -4
- iatoolkit/services/knowledge_base_service.py +26 -4
- iatoolkit/services/llm_client_service.py +58 -2
- iatoolkit/services/prompt_service.py +251 -164
- iatoolkit/services/query_service.py +37 -18
- iatoolkit/services/storage_service.py +92 -0
- iatoolkit/static/js/chat_filepond.js +188 -63
- iatoolkit/static/js/chat_main.js +105 -52
- iatoolkit/static/styles/chat_iatoolkit.css +96 -0
- iatoolkit/system_prompts/query_main.prompt +24 -41
- iatoolkit/templates/chat.html +15 -6
- iatoolkit/views/base_login_view.py +1 -1
- iatoolkit/views/categories_api_view.py +111 -0
- iatoolkit/views/chat_view.py +1 -1
- iatoolkit/views/configuration_api_view.py +1 -1
- iatoolkit/views/login_view.py +1 -1
- iatoolkit/views/prompt_api_view.py +88 -7
- {iatoolkit-1.7.0.dist-info → iatoolkit-1.15.3.dist-info}/METADATA +1 -1
- {iatoolkit-1.7.0.dist-info → iatoolkit-1.15.3.dist-info}/RECORD +41 -39
- {iatoolkit-1.7.0.dist-info → iatoolkit-1.15.3.dist-info}/WHEEL +0 -0
- {iatoolkit-1.7.0.dist-info → iatoolkit-1.15.3.dist-info}/licenses/LICENSE +0 -0
- {iatoolkit-1.7.0.dist-info → iatoolkit-1.15.3.dist-info}/licenses/LICENSE_COMMUNITY.md +0 -0
- {iatoolkit-1.7.0.dist-info → iatoolkit-1.15.3.dist-info}/top_level.txt +0 -0
|
@@ -7,8 +7,10 @@ import logging
|
|
|
7
7
|
from typing import Dict, List, Optional
|
|
8
8
|
from iatoolkit.infra.llm_response import LLMResponse, ToolCall, Usage
|
|
9
9
|
from iatoolkit.common.exceptions import IAToolkitException
|
|
10
|
-
import html
|
|
11
10
|
from typing import List
|
|
11
|
+
import mimetypes
|
|
12
|
+
import re
|
|
13
|
+
|
|
12
14
|
|
|
13
15
|
class OpenAIAdapter:
|
|
14
16
|
"""Adaptador para la API de OpenAI"""
|
|
@@ -24,9 +26,14 @@ class OpenAIAdapter:
|
|
|
24
26
|
tools: Optional[List[Dict]] = None,
|
|
25
27
|
text: Optional[Dict] = None,
|
|
26
28
|
reasoning: Optional[Dict] = None,
|
|
27
|
-
tool_choice: str = "auto"
|
|
29
|
+
tool_choice: str = "auto",
|
|
30
|
+
images: Optional[List[Dict]] = None) -> LLMResponse:
|
|
28
31
|
"""Llamada a la API de OpenAI y mapeo a estructura común"""
|
|
29
32
|
try:
|
|
33
|
+
# Handle multimodal input if images are present
|
|
34
|
+
if images:
|
|
35
|
+
input = self._prepare_multimodal_input(input, images)
|
|
36
|
+
|
|
30
37
|
# Preparar parámetros para OpenAI
|
|
31
38
|
params = {
|
|
32
39
|
'model': model,
|
|
@@ -56,28 +63,177 @@ class OpenAIAdapter:
|
|
|
56
63
|
|
|
57
64
|
raise IAToolkitException(IAToolkitException.ErrorType.LLM_ERROR, error_message)
|
|
58
65
|
|
|
66
|
+
def _prepare_multimodal_input(self, messages: List[Dict], images: List[Dict]) -> List[Dict]:
|
|
67
|
+
"""
|
|
68
|
+
Transforma el mensaje del usuario de texto simple a contenido multimodal (texto + imágenes)
|
|
69
|
+
usando el formato de Responses API (input_text/input_image).
|
|
70
|
+
"""
|
|
71
|
+
# Encontrar el último mensaje del usuario
|
|
72
|
+
target_message = None
|
|
73
|
+
for msg in reversed(messages):
|
|
74
|
+
if msg.get('role') == 'user':
|
|
75
|
+
target_message = msg
|
|
76
|
+
break
|
|
77
|
+
|
|
78
|
+
if not target_message:
|
|
79
|
+
return messages
|
|
80
|
+
|
|
81
|
+
text_content = target_message.get('content', '')
|
|
82
|
+
content_parts = []
|
|
83
|
+
|
|
84
|
+
# Agregar parte de texto (Responses API)
|
|
85
|
+
if text_content:
|
|
86
|
+
content_parts.append({"type": "input_text", "text": text_content})
|
|
87
|
+
|
|
88
|
+
# Agregar partes de imagen (Responses API)
|
|
89
|
+
for img in images:
|
|
90
|
+
filename = img.get('name', '')
|
|
91
|
+
mime_type, _ = mimetypes.guess_type(filename)
|
|
92
|
+
if not mime_type:
|
|
93
|
+
mime_type = 'image/jpeg'
|
|
94
|
+
|
|
95
|
+
base64_data = img.get('base64', '')
|
|
96
|
+
url = f"data:{mime_type};base64,{base64_data}"
|
|
97
|
+
|
|
98
|
+
content_parts.append({
|
|
99
|
+
"type": "input_image",
|
|
100
|
+
"image_url": url
|
|
101
|
+
})
|
|
102
|
+
|
|
103
|
+
# Construir nueva lista de mensajes con el contenido actualizado
|
|
104
|
+
final_messages = []
|
|
105
|
+
for msg in messages:
|
|
106
|
+
if msg is target_message:
|
|
107
|
+
new_msg = msg.copy()
|
|
108
|
+
new_msg['content'] = content_parts
|
|
109
|
+
final_messages.append(new_msg)
|
|
110
|
+
else:
|
|
111
|
+
final_messages.append(msg)
|
|
112
|
+
|
|
113
|
+
return final_messages
|
|
114
|
+
|
|
59
115
|
def _map_openai_response(self, openai_response) -> LLMResponse:
|
|
60
116
|
"""Mapear respuesta de OpenAI a estructura común"""
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
117
|
+
tool_calls: List[ToolCall] = []
|
|
118
|
+
content_parts: List[Dict] = []
|
|
119
|
+
output_text = ""
|
|
120
|
+
|
|
121
|
+
print(f'openai_response.output: {openai_response.output}')
|
|
122
|
+
output_items = getattr(openai_response, 'output', []) or []
|
|
123
|
+
|
|
124
|
+
def _extract_markdown_images(text: str) -> None:
|
|
125
|
+
# Pattern: 
|
|
126
|
+
markdown_images = re.findall(r'!\[([^\]]*)\]\((https?://[^)]+)\)', text or "")
|
|
127
|
+
for _alt_text, url in markdown_images:
|
|
128
|
+
content_parts.append({
|
|
129
|
+
"type": "image",
|
|
130
|
+
"source": {
|
|
131
|
+
"type": "url",
|
|
132
|
+
"media_type": "image/webp",
|
|
133
|
+
"url": url
|
|
134
|
+
}
|
|
135
|
+
})
|
|
136
|
+
|
|
137
|
+
for item in output_items:
|
|
138
|
+
item_type = getattr(item, 'type', '')
|
|
139
|
+
|
|
140
|
+
# 1) Tool calls (Responses API)
|
|
141
|
+
if item_type == "function_call":
|
|
142
|
+
tool_calls.append(ToolCall(
|
|
143
|
+
call_id=getattr(item, 'call_id', ''),
|
|
144
|
+
type=item_type,
|
|
145
|
+
name=getattr(item, 'name', ''),
|
|
146
|
+
arguments=getattr(item, 'arguments', '{}')
|
|
147
|
+
))
|
|
148
|
+
continue
|
|
149
|
+
|
|
150
|
+
# 2) Mensajes (lo más común en Responses API)
|
|
151
|
+
if item_type == "message":
|
|
152
|
+
msg_content = getattr(item, "content", None) or []
|
|
153
|
+
for part in msg_content:
|
|
154
|
+
part_type = getattr(part, "type", "") or ""
|
|
155
|
+
|
|
156
|
+
# 2.A) Texto
|
|
157
|
+
if part_type in ("output_text", "text"):
|
|
158
|
+
text_content = getattr(part, "text", "") or ""
|
|
159
|
+
if text_content:
|
|
160
|
+
_extract_markdown_images(text_content)
|
|
161
|
+
output_text += text_content
|
|
162
|
+
content_parts.append({"type": "text", "text": text_content})
|
|
163
|
+
|
|
164
|
+
# 2.B) Imagen (puede venir como URL o base64 según el SDK/endpoint)
|
|
165
|
+
elif part_type in ("output_image", "image"):
|
|
166
|
+
# Algunas variantes comunes:
|
|
167
|
+
# - part.image_url (string URL)
|
|
168
|
+
# - part.url
|
|
169
|
+
# - part.b64_json (base64)
|
|
170
|
+
image_url = getattr(part, "image_url", None) or getattr(part, "url", None)
|
|
171
|
+
b64 = getattr(part, "b64_json", None) or getattr(part, "image", None) or getattr(part, "data", None)
|
|
172
|
+
|
|
173
|
+
# mime_type a veces viene, a veces no
|
|
174
|
+
mime_type = getattr(part, "media_type", None) or getattr(part, "mime_type", None) or "image/png"
|
|
175
|
+
|
|
176
|
+
if image_url:
|
|
177
|
+
content_parts.append({
|
|
178
|
+
"type": "image",
|
|
179
|
+
"source": {
|
|
180
|
+
"type": "url",
|
|
181
|
+
"media_type": mime_type,
|
|
182
|
+
"url": image_url
|
|
183
|
+
}
|
|
184
|
+
})
|
|
185
|
+
output_text += "\n[Imagen Generada]\n"
|
|
186
|
+
elif b64:
|
|
187
|
+
content_parts.append({
|
|
188
|
+
"type": "image",
|
|
189
|
+
"source": {
|
|
190
|
+
"type": "base64",
|
|
191
|
+
"media_type": mime_type,
|
|
192
|
+
"data": b64
|
|
193
|
+
}
|
|
194
|
+
})
|
|
195
|
+
output_text += "\n[Imagen Generada]\n"
|
|
196
|
+
|
|
197
|
+
continue
|
|
198
|
+
|
|
199
|
+
# 3) Compatibilidad hacia atrás: ítems planos "text"
|
|
200
|
+
if item_type == "text":
|
|
201
|
+
text_content = getattr(item, 'text', '') or ""
|
|
202
|
+
if text_content:
|
|
203
|
+
_extract_markdown_images(text_content)
|
|
204
|
+
output_text += text_content
|
|
205
|
+
content_parts.append({"type": "text", "text": text_content})
|
|
206
|
+
continue
|
|
207
|
+
|
|
208
|
+
# 4) Compatibilidad hacia atrás: ítems planos "image"
|
|
209
|
+
if item_type == "image":
|
|
210
|
+
base64_data = getattr(item, 'image', '') or getattr(item, 'data', '')
|
|
211
|
+
mime_type = getattr(item, 'media_type', 'image/png')
|
|
212
|
+
if base64_data:
|
|
213
|
+
content_parts.append({
|
|
214
|
+
"type": "image",
|
|
215
|
+
"source": {
|
|
216
|
+
"type": "base64",
|
|
217
|
+
"media_type": mime_type,
|
|
218
|
+
"data": base64_data
|
|
219
|
+
}
|
|
220
|
+
})
|
|
221
|
+
output_text += "\n[Imagen Generada]\n"
|
|
222
|
+
continue
|
|
223
|
+
|
|
224
|
+
# Fallback: Si no se extrajo texto, probamos output_text directo
|
|
225
|
+
if not output_text:
|
|
226
|
+
output_text = getattr(openai_response, 'output_text', '') or ""
|
|
227
|
+
if output_text and not content_parts:
|
|
228
|
+
_extract_markdown_images(output_text)
|
|
229
|
+
content_parts.append({"type": "text", "text": output_text})
|
|
230
|
+
|
|
74
231
|
usage = Usage(
|
|
75
232
|
input_tokens=openai_response.usage.input_tokens if openai_response.usage else 0,
|
|
76
233
|
output_tokens=openai_response.usage.output_tokens if openai_response.usage else 0,
|
|
77
234
|
total_tokens=openai_response.usage.total_tokens if openai_response.usage else 0
|
|
78
235
|
)
|
|
79
236
|
|
|
80
|
-
# Reasoning content extracted from Responses output items (type="reasoning")
|
|
81
237
|
reasoning_list = self._extract_reasoning_content(openai_response)
|
|
82
238
|
reasoning_str = "\n".join(reasoning_list)
|
|
83
239
|
|
|
@@ -85,10 +241,11 @@ class OpenAIAdapter:
|
|
|
85
241
|
id=openai_response.id,
|
|
86
242
|
model=openai_response.model,
|
|
87
243
|
status=openai_response.status,
|
|
88
|
-
output_text=
|
|
244
|
+
output_text=output_text,
|
|
89
245
|
output=tool_calls,
|
|
90
246
|
usage=usage,
|
|
91
|
-
reasoning_content=reasoning_str
|
|
247
|
+
reasoning_content=reasoning_str,
|
|
248
|
+
content_parts=content_parts
|
|
92
249
|
)
|
|
93
250
|
|
|
94
251
|
def _extract_reasoning_content(self, openai_response) -> List[str]:
|
iatoolkit/infra/llm_response.py
CHANGED
|
@@ -34,6 +34,9 @@ class LLMResponse:
|
|
|
34
34
|
usage: Usage
|
|
35
35
|
reasoning_content: str = None # campo opcional para Chain of Thought
|
|
36
36
|
|
|
37
|
+
# ordered list of content blocks (text and image mixed)
|
|
38
|
+
# Example: [{"type": "text", "text": "..."}, {"type": "image", "source": {"type": "base64", "data": "..."}}]
|
|
39
|
+
content_parts: List[Dict] = None
|
|
37
40
|
|
|
38
41
|
def __post_init__(self):
|
|
39
42
|
"""Asegura que output sea una lista"""
|
|
@@ -43,3 +46,13 @@ class LLMResponse:
|
|
|
43
46
|
if self.reasoning_content is None:
|
|
44
47
|
self.reasoning_content = ""
|
|
45
48
|
|
|
49
|
+
if self.content_parts is None:
|
|
50
|
+
self.content_parts = []
|
|
51
|
+
|
|
52
|
+
# if the response has legacy text and no content parts, create a default text part
|
|
53
|
+
if self.output_text:
|
|
54
|
+
self.content_parts.append({
|
|
55
|
+
"type": "text",
|
|
56
|
+
"text": self.output_text
|
|
57
|
+
})
|
|
58
|
+
|
iatoolkit/locales/en.yaml
CHANGED
|
@@ -1,5 +1,14 @@
|
|
|
1
1
|
# Language: English
|
|
2
2
|
ui:
|
|
3
|
+
common:
|
|
4
|
+
actions: "Actions"
|
|
5
|
+
save: "Save"
|
|
6
|
+
cancel: "Cancel"
|
|
7
|
+
apply: "Apply Changes"
|
|
8
|
+
discard_changes: "Discard Changes"
|
|
9
|
+
order: "Order"
|
|
10
|
+
|
|
11
|
+
|
|
3
12
|
login_widget:
|
|
4
13
|
title: "Sign In"
|
|
5
14
|
welcome_message: "Enter your credentials or register to access this platform."
|
|
@@ -52,8 +61,7 @@ ui:
|
|
|
52
61
|
company_config: "Company Configuration (company.yaml)"
|
|
53
62
|
prompts: "Prompts"
|
|
54
63
|
prompts_description: "System Prompts"
|
|
55
|
-
|
|
56
|
-
knowledge_rag: "RAG (Vector)"
|
|
64
|
+
knowledge_rag: "Knowledge Base (RAG)"
|
|
57
65
|
knowledge_static: "Static Context"
|
|
58
66
|
schemas: "Schemas"
|
|
59
67
|
schemas_description: "Data Definitions (YAML)"
|
|
@@ -86,6 +94,18 @@ ui:
|
|
|
86
94
|
logout: "Close session"
|
|
87
95
|
error_loading: "Error loading file content"
|
|
88
96
|
loading: "Loading..."
|
|
97
|
+
add: "Add"
|
|
98
|
+
create: "Create"
|
|
99
|
+
delete: "Delete"
|
|
100
|
+
add_new: "Add new"
|
|
101
|
+
category_name_placeholder: "Category name"
|
|
102
|
+
items: "Items"
|
|
103
|
+
drag_drop_hint: "Use arrows to reorder items."
|
|
104
|
+
current_items: "Current items"
|
|
105
|
+
manage: "Manage"
|
|
106
|
+
unsaved_changes_title: "Unsaved Changes"
|
|
107
|
+
unsaved_changes_message: "You have unsaved changes. Do you want to save them before proceeding?"
|
|
108
|
+
discard_changes: "Discard Changes"
|
|
89
109
|
|
|
90
110
|
db_explorer:
|
|
91
111
|
data_explorer: "Data Explorer"
|
|
@@ -108,6 +128,36 @@ ui:
|
|
|
108
128
|
meta_synonyms: "Synonyms"
|
|
109
129
|
pii_sesitive: "PII Sensitive"
|
|
110
130
|
|
|
131
|
+
prompts:
|
|
132
|
+
title: "Prompts Manager"
|
|
133
|
+
subtitle: "Manage and test your AI personas and templates"
|
|
134
|
+
library: "Prompt Library"
|
|
135
|
+
new_btn: "New"
|
|
136
|
+
filter_placeholder: "Filter prompts..."
|
|
137
|
+
select_prompt: "Select a prompt"
|
|
138
|
+
toggle_settings: "Toggle Variables & Config"
|
|
139
|
+
settings_btn: "Settings"
|
|
140
|
+
save_btn: "Save"
|
|
141
|
+
tab_editor: "Editor"
|
|
142
|
+
tab_playground: "Playground"
|
|
143
|
+
config_title: "Config"
|
|
144
|
+
desc_label: "Description"
|
|
145
|
+
desc_placeholder: "Describe this prompt..."
|
|
146
|
+
vars_label: "Input Variables"
|
|
147
|
+
no_vars: "No inputs defined"
|
|
148
|
+
playground_inputs: "Variables & Inputs"
|
|
149
|
+
no_vars_detected: "No variables detected in prompt template."
|
|
150
|
+
model_override: "Model Override (Optional)"
|
|
151
|
+
default_model: "Default (from config)"
|
|
152
|
+
run_btn: "Run"
|
|
153
|
+
output_placeholder: "Output will appear here..."
|
|
154
|
+
new_modal_title: "Create New Prompt"
|
|
155
|
+
name_label: "Name (Slug)"
|
|
156
|
+
name_help: "Use lowercase, numbers, and underscores only."
|
|
157
|
+
category_label: "Category"
|
|
158
|
+
delete_confirmation: "Delete Prompt?"
|
|
159
|
+
|
|
160
|
+
|
|
111
161
|
config:
|
|
112
162
|
editor_description: "IAToolkit configuration file"
|
|
113
163
|
title: "Configuration Editor"
|
|
@@ -146,12 +196,34 @@ ui:
|
|
|
146
196
|
delete_message: "This action cannot be undone. The file will be permanently removed."
|
|
147
197
|
delete_button: "Delete"
|
|
148
198
|
delete_cancel: "Cancel"
|
|
149
|
-
target_collection: "
|
|
199
|
+
target_collection: "Collection"
|
|
150
200
|
select_collection_placeholder: "Select a collection"
|
|
201
|
+
select_collection: "Select a collection"
|
|
151
202
|
collection_required: "Collection is required"
|
|
152
203
|
collection: "Collection"
|
|
204
|
+
manage_collections: "Manage collections"
|
|
153
205
|
all_collections: "All collections"
|
|
154
206
|
|
|
207
|
+
json_editor:
|
|
208
|
+
title: "JSON Schema Editor"
|
|
209
|
+
subtitle: "Define the structure and metadata for this JSON field"
|
|
210
|
+
field_name: "Field Name"
|
|
211
|
+
type: "Data Type"
|
|
212
|
+
description: "Description"
|
|
213
|
+
empty_schema: "No fields defined yet."
|
|
214
|
+
add_root: "Add Root Field"
|
|
215
|
+
add_field: "Add Field"
|
|
216
|
+
types:
|
|
217
|
+
string: "String"
|
|
218
|
+
integer: "Integer"
|
|
219
|
+
number: "Number"
|
|
220
|
+
boolean: "Boolean"
|
|
221
|
+
object: "Object (Map)"
|
|
222
|
+
array: "Array (List)"
|
|
223
|
+
jsonb: "JSON Raw"
|
|
224
|
+
|
|
225
|
+
|
|
226
|
+
|
|
155
227
|
tooltips:
|
|
156
228
|
history: "History of my queries"
|
|
157
229
|
reload_context: "Force Context Reload"
|
|
@@ -317,7 +389,6 @@ js_messages:
|
|
|
317
389
|
db_created: "Created"
|
|
318
390
|
db_last_access: "Last access"
|
|
319
391
|
db_filename: "Filename"
|
|
320
|
-
db_user: "User"
|
|
321
392
|
db_status: "Status"
|
|
322
393
|
db_collection: "Collection"
|
|
323
394
|
editor_no_file_selected: "No file selected"
|
|
@@ -325,6 +396,13 @@ js_messages:
|
|
|
325
396
|
cant_load_company: "Could not load company.yaml"
|
|
326
397
|
config_saved: "Configuration saved successfully."
|
|
327
398
|
config_error: "Error saving configuration."
|
|
399
|
+
all_collections: "All collections"
|
|
400
|
+
no_variables_found: "No variables found in prompt template."
|
|
401
|
+
manage_collections: "Manage collections"
|
|
402
|
+
edit: "Edit"
|
|
403
|
+
|
|
404
|
+
|
|
405
|
+
|
|
328
406
|
|
|
329
407
|
|
|
330
408
|
|
iatoolkit/locales/es.yaml
CHANGED
|
@@ -1,5 +1,13 @@
|
|
|
1
1
|
# locales/es.yaml
|
|
2
2
|
ui:
|
|
3
|
+
common:
|
|
4
|
+
actions: "Acciones"
|
|
5
|
+
save: "Guardar"
|
|
6
|
+
cancel: "Cancelar"
|
|
7
|
+
apply: "Aplicar Cambios"
|
|
8
|
+
discard_changes: "Descartar Cambios"
|
|
9
|
+
order: "Orden"
|
|
10
|
+
|
|
3
11
|
login_widget:
|
|
4
12
|
title: "Iniciar Sesión"
|
|
5
13
|
welcome_message: "Ingresa tus credenciales o registrate para acceder a la plataforma."
|
|
@@ -49,8 +57,7 @@ ui:
|
|
|
49
57
|
workspace: "Recursos"
|
|
50
58
|
configuration: "Configuración"
|
|
51
59
|
company_config: "Configuración Empresa (company.yaml)"
|
|
52
|
-
|
|
53
|
-
knowledge_rag: "RAG (Vectorial)"
|
|
60
|
+
knowledge_rag: "Knowledge Base (RAG)"
|
|
54
61
|
knowledge_static: "Contenido Estático"
|
|
55
62
|
prompts: "Prompts"
|
|
56
63
|
prompts_description: "Prompts de sistema"
|
|
@@ -83,6 +90,19 @@ ui:
|
|
|
83
90
|
load_configuration: "Guardar configuración"
|
|
84
91
|
goto_chat: "Ir al chat"
|
|
85
92
|
logout: "Cerrar sesión"
|
|
93
|
+
add: "Agregar"
|
|
94
|
+
create: "Crear"
|
|
95
|
+
delete: "Borrar"
|
|
96
|
+
add_new: "Agregar nuevo"
|
|
97
|
+
category_name_placeholder: "nombre de categoria"
|
|
98
|
+
items: "Items"
|
|
99
|
+
drag_drop_hint: "Utiliza las flechas para ordenar items.."
|
|
100
|
+
current_items: "items actuales"
|
|
101
|
+
manage: "Administrar"
|
|
102
|
+
unsaved_changes_title: "Cambios sin guardar"
|
|
103
|
+
unsaved_changes_message: "Tienes modificaciones sin guardar. Quieres grabarlos antes de salir?"
|
|
104
|
+
discard_changes: "Descartar cambios"
|
|
105
|
+
|
|
86
106
|
|
|
87
107
|
db_explorer:
|
|
88
108
|
data_explorer: "Explorador de datos"
|
|
@@ -105,6 +125,36 @@ ui:
|
|
|
105
125
|
meta_synonyms: "Sinonimos"
|
|
106
126
|
pii_sesitive: "IP Sensible"
|
|
107
127
|
|
|
128
|
+
prompts:
|
|
129
|
+
title: "Gestor de Prompts"
|
|
130
|
+
subtitle: "Gestiona y prueba tus plantillas y personas de IA"
|
|
131
|
+
library: "Biblioteca de Prompts"
|
|
132
|
+
new_btn: "Nuevo"
|
|
133
|
+
filter_placeholder: "Filtrar prompts..."
|
|
134
|
+
select_prompt: "Selecciona un prompt"
|
|
135
|
+
toggle_settings: "Alternar Variables y Configuración"
|
|
136
|
+
settings_btn: "Ajustes"
|
|
137
|
+
save_btn: "Guardar"
|
|
138
|
+
tab_editor: "Editor"
|
|
139
|
+
tab_playground: "Playground"
|
|
140
|
+
config_title: "Configuración"
|
|
141
|
+
desc_label: "Descripción"
|
|
142
|
+
desc_placeholder: "Describe este prompt..."
|
|
143
|
+
vars_label: "Variables de Entrada"
|
|
144
|
+
no_vars: "Sin variables definidas"
|
|
145
|
+
playground_inputs: "Variables y Entradas"
|
|
146
|
+
no_vars_detected: "No se detectaron variables en la plantilla."
|
|
147
|
+
model_override: "Modelo Específico (Opcional)"
|
|
148
|
+
default_model: "Por defecto (según config)"
|
|
149
|
+
run_btn: "Ejecutar"
|
|
150
|
+
output_placeholder: "El resultado aparecerá aquí..."
|
|
151
|
+
new_modal_title: "Crear Nuevo Prompt"
|
|
152
|
+
name_label: "Nombre (Slug)"
|
|
153
|
+
name_help: "Solo minúsculas, números y guiones bajos."
|
|
154
|
+
category_label: "Categoría"
|
|
155
|
+
delete_confirmation: "Eliminar el prompt?"
|
|
156
|
+
|
|
157
|
+
|
|
108
158
|
config:
|
|
109
159
|
editor_description: "Editor de configuración"
|
|
110
160
|
title: "Editor de configuraciones"
|
|
@@ -143,12 +193,31 @@ ui:
|
|
|
143
193
|
delete_message: "Esta acción no se puede deshacer. El archivo se eliminará permanentemente."
|
|
144
194
|
delete_button: "Eliminar"
|
|
145
195
|
delete_cancel: "Cancelar"
|
|
146
|
-
target_collection: "Categoría
|
|
196
|
+
target_collection: "Categoría"
|
|
147
197
|
select_collection_placeholder: "Selecciona una categoría"
|
|
148
198
|
collection_required: "Debe seleccionar una categoría"
|
|
149
199
|
all_collections: "Todas las categorías"
|
|
150
200
|
collection: "Categoría"
|
|
201
|
+
manage_collections: "Administra collections"
|
|
202
|
+
select_collection: "Selecciona una categorìa"
|
|
151
203
|
|
|
204
|
+
json_editor:
|
|
205
|
+
title: "Editor de Esquema JSON"
|
|
206
|
+
subtitle: "Define la estructura y metadatos para este campo JSON"
|
|
207
|
+
field_name: "Nombre del Campo"
|
|
208
|
+
type: "Tipo de Dato"
|
|
209
|
+
description: "Descripción"
|
|
210
|
+
empty_schema: "Aún no hay campos definidos."
|
|
211
|
+
add_root: "Agregar Campo Raíz"
|
|
212
|
+
add_field: "Agregar Campo"
|
|
213
|
+
types:
|
|
214
|
+
string: "Texto (String)"
|
|
215
|
+
integer: "Entero"
|
|
216
|
+
number: "Número (Decimal)"
|
|
217
|
+
boolean: "Booleano"
|
|
218
|
+
object: "Objeto (Mapa)"
|
|
219
|
+
array: "Lista (Array)"
|
|
220
|
+
jsonb: "JSON Crudo"
|
|
152
221
|
|
|
153
222
|
tooltips:
|
|
154
223
|
history: "Historial con mis consultas"
|
|
@@ -309,7 +378,6 @@ js_messages:
|
|
|
309
378
|
search_placeholder: "Buscar usuarios..."
|
|
310
379
|
showing: "Mostrando"
|
|
311
380
|
records: "Registros"
|
|
312
|
-
db_user: "Usuario"
|
|
313
381
|
db_role: "Rol"
|
|
314
382
|
db_verified: "Verificado"
|
|
315
383
|
db_collection: "Colección"
|
|
@@ -323,6 +391,13 @@ js_messages:
|
|
|
323
391
|
cant_load_company: "No puede cargarcompany.yaml"
|
|
324
392
|
config_saved: "Configuración guardada correctamente."
|
|
325
393
|
config_error: "Error guardando configuración"
|
|
394
|
+
all_collections: "Todas las colecciones"
|
|
395
|
+
no_variables_found: "El template no contiene variables."
|
|
396
|
+
manage_collections: "Administra collections"
|
|
397
|
+
edit: "Editar"
|
|
398
|
+
|
|
399
|
+
|
|
400
|
+
|
|
326
401
|
|
|
327
402
|
|
|
328
403
|
|
|
@@ -3,10 +3,13 @@
|
|
|
3
3
|
#
|
|
4
4
|
# IAToolkit is open source software.
|
|
5
5
|
|
|
6
|
-
from iatoolkit.repositories.models import LLMQuery, Tool,
|
|
6
|
+
from iatoolkit.repositories.models import (LLMQuery, Tool,
|
|
7
|
+
Company, Prompt, PromptCategory, PromptType)
|
|
7
8
|
from injector import inject
|
|
8
9
|
from iatoolkit.repositories.database_manager import DatabaseManager
|
|
9
10
|
from sqlalchemy import or_
|
|
11
|
+
from typing import List
|
|
12
|
+
|
|
10
13
|
|
|
11
14
|
class LLMQueryRepo:
|
|
12
15
|
@inject
|
|
@@ -19,12 +22,20 @@ class LLMQueryRepo:
|
|
|
19
22
|
def rollback(self):
|
|
20
23
|
self.session.rollback()
|
|
21
24
|
|
|
25
|
+
# save new query result in the database
|
|
22
26
|
def add_query(self, query: LLMQuery):
|
|
23
27
|
self.session.add(query)
|
|
24
28
|
self.session.commit()
|
|
25
29
|
return query
|
|
26
30
|
|
|
31
|
+
# get user query history
|
|
32
|
+
def get_history(self, company: Company, user_identifier: str) -> list[LLMQuery]:
|
|
33
|
+
return self.session.query(LLMQuery).filter(
|
|
34
|
+
LLMQuery.user_identifier == user_identifier,
|
|
35
|
+
).filter_by(company_id=company.id).order_by(LLMQuery.created_at.desc()).limit(100).all()
|
|
27
36
|
|
|
37
|
+
|
|
38
|
+
## --- Tools related methods
|
|
28
39
|
def get_company_tools(self, company: Company) -> list[Tool]:
|
|
29
40
|
return (
|
|
30
41
|
self.session.query(Tool)
|
|
@@ -54,12 +65,36 @@ class LLMQueryRepo:
|
|
|
54
65
|
self.session.add(new_tool)
|
|
55
66
|
tool = new_tool
|
|
56
67
|
|
|
57
|
-
self.session.
|
|
68
|
+
self.session.commit()
|
|
58
69
|
return tool
|
|
59
70
|
|
|
60
71
|
def delete_tool(self, tool: Tool):
|
|
61
72
|
self.session.query(Tool).filter_by(id=tool.id).delete(synchronize_session=False)
|
|
62
73
|
|
|
74
|
+
# -- Prompt related methods
|
|
75
|
+
|
|
76
|
+
def get_prompt_by_name(self, company: Company, prompt_name: str):
|
|
77
|
+
return self.session.query(Prompt).filter_by(company_id=company.id, name=prompt_name).first()
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
def get_prompts(self, company: Company, include_all: bool = False) -> list[Prompt]:
|
|
81
|
+
if include_all:
|
|
82
|
+
# Include all prompts (for the prompt admin dashboard)
|
|
83
|
+
return self.session.query(Prompt).filter(
|
|
84
|
+
Prompt.company_id == company.id,
|
|
85
|
+
).all()
|
|
86
|
+
else:
|
|
87
|
+
# Only active company prompts (default behavior for end users)
|
|
88
|
+
return self.session.query(Prompt).filter(
|
|
89
|
+
Prompt.company_id == company.id,
|
|
90
|
+
Prompt.prompt_type == PromptType.COMPANY.value,
|
|
91
|
+
Prompt.active == True
|
|
92
|
+
).all()
|
|
93
|
+
|
|
94
|
+
def get_system_prompts(self) -> list[Prompt]:
|
|
95
|
+
return self.session.query(Prompt).filter_by(prompt_type=PromptType.SYSTEM.value, active=True).order_by(
|
|
96
|
+
Prompt.order).all()
|
|
97
|
+
|
|
63
98
|
def create_or_update_prompt(self, new_prompt: Prompt):
|
|
64
99
|
prompt = self.session.query(Prompt).filter_by(company_id=new_prompt.company_id,
|
|
65
100
|
name=new_prompt.name).first()
|
|
@@ -67,16 +102,28 @@ class LLMQueryRepo:
|
|
|
67
102
|
prompt.category_id = new_prompt.category_id
|
|
68
103
|
prompt.description = new_prompt.description
|
|
69
104
|
prompt.order = new_prompt.order
|
|
70
|
-
prompt.
|
|
105
|
+
prompt.prompt_type = new_prompt.prompt_type
|
|
71
106
|
prompt.filename = new_prompt.filename
|
|
72
107
|
prompt.custom_fields = new_prompt.custom_fields
|
|
73
108
|
else:
|
|
74
109
|
self.session.add(new_prompt)
|
|
75
110
|
prompt = new_prompt
|
|
76
111
|
|
|
77
|
-
self.session.
|
|
112
|
+
self.session.commit()
|
|
78
113
|
return prompt
|
|
79
114
|
|
|
115
|
+
def delete_prompt(self, prompt: Prompt):
|
|
116
|
+
self.session.delete(prompt)
|
|
117
|
+
self.session.commit()
|
|
118
|
+
|
|
119
|
+
# -- Prompt category methods
|
|
120
|
+
|
|
121
|
+
def get_category_by_name(self, company_id: int, name: str) -> PromptCategory:
|
|
122
|
+
return self.session.query(PromptCategory).filter_by(company_id=company_id, name=name).first()
|
|
123
|
+
|
|
124
|
+
def get_all_categories(self, company_id: int) -> List[PromptCategory]:
|
|
125
|
+
return self.session.query(PromptCategory).filter_by(company_id=company_id).order_by(PromptCategory.order).all()
|
|
126
|
+
|
|
80
127
|
def create_or_update_prompt_category(self, new_category: PromptCategory):
|
|
81
128
|
category = self.session.query(PromptCategory).filter_by(company_id=new_category.company_id,
|
|
82
129
|
name=new_category.name).first()
|
|
@@ -89,17 +136,3 @@ class LLMQueryRepo:
|
|
|
89
136
|
self.session.flush()
|
|
90
137
|
return category
|
|
91
138
|
|
|
92
|
-
def get_history(self, company: Company, user_identifier: str) -> list[LLMQuery]:
|
|
93
|
-
return self.session.query(LLMQuery).filter(
|
|
94
|
-
LLMQuery.user_identifier == user_identifier,
|
|
95
|
-
).filter_by(company_id=company.id).order_by(LLMQuery.created_at.desc()).limit(100).all()
|
|
96
|
-
|
|
97
|
-
def get_prompts(self, company: Company) -> list[Prompt]:
|
|
98
|
-
return self.session.query(Prompt).filter_by(company_id=company.id, is_system_prompt=False).all()
|
|
99
|
-
|
|
100
|
-
def get_prompt_by_name(self, company: Company, prompt_name: str):
|
|
101
|
-
return self.session.query(Prompt).filter_by(company_id=company.id, name=prompt_name).first()
|
|
102
|
-
|
|
103
|
-
def get_system_prompts(self) -> list[Prompt]:
|
|
104
|
-
return self.session.query(Prompt).filter_by(is_system_prompt=True, active=True).order_by(Prompt.order).all()
|
|
105
|
-
|