iatoolkit 0.7.5__py3-none-any.whl → 0.7.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of iatoolkit might be problematic. Click here for more details.

Files changed (49) hide show
  1. {iatoolkit-0.7.5.dist-info → iatoolkit-0.7.6.dist-info}/METADATA +1 -1
  2. iatoolkit-0.7.6.dist-info/RECORD +80 -0
  3. {iatoolkit-0.7.5.dist-info → iatoolkit-0.7.6.dist-info}/top_level.txt +3 -0
  4. infra/__init__.py +5 -0
  5. infra/call_service.py +140 -0
  6. infra/connectors/__init__.py +5 -0
  7. infra/connectors/file_connector.py +17 -0
  8. infra/connectors/file_connector_factory.py +57 -0
  9. infra/connectors/google_cloud_storage_connector.py +53 -0
  10. infra/connectors/google_drive_connector.py +68 -0
  11. infra/connectors/local_file_connector.py +46 -0
  12. infra/connectors/s3_connector.py +33 -0
  13. infra/gemini_adapter.py +356 -0
  14. infra/google_chat_app.py +57 -0
  15. infra/llm_client.py +430 -0
  16. infra/llm_proxy.py +139 -0
  17. infra/llm_response.py +40 -0
  18. infra/mail_app.py +145 -0
  19. infra/openai_adapter.py +90 -0
  20. infra/redis_session_manager.py +76 -0
  21. repositories/__init__.py +5 -0
  22. repositories/database_manager.py +95 -0
  23. repositories/document_repo.py +33 -0
  24. repositories/llm_query_repo.py +91 -0
  25. repositories/models.py +309 -0
  26. repositories/profile_repo.py +118 -0
  27. repositories/tasks_repo.py +52 -0
  28. repositories/vs_repo.py +139 -0
  29. views/__init__.py +5 -0
  30. views/change_password_view.py +91 -0
  31. views/chat_token_request_view.py +98 -0
  32. views/chat_view.py +51 -0
  33. views/download_file_view.py +58 -0
  34. views/external_chat_login_view.py +88 -0
  35. views/external_login_view.py +40 -0
  36. views/file_store_view.py +58 -0
  37. views/forgot_password_view.py +64 -0
  38. views/history_view.py +57 -0
  39. views/home_view.py +34 -0
  40. views/llmquery_view.py +65 -0
  41. views/login_view.py +60 -0
  42. views/prompt_view.py +37 -0
  43. views/signup_view.py +87 -0
  44. views/tasks_review_view.py +83 -0
  45. views/tasks_view.py +98 -0
  46. views/user_feedback_view.py +74 -0
  47. views/verify_user_view.py +55 -0
  48. iatoolkit-0.7.5.dist-info/RECORD +0 -36
  49. {iatoolkit-0.7.5.dist-info → iatoolkit-0.7.6.dist-info}/WHEEL +0 -0
@@ -0,0 +1,356 @@
1
+ # Copyright (c) 2024 Fernando Libedinsky
2
+ # Product: IAToolkit
3
+ #
4
+ # IAToolkit is open source software.
5
+
6
+ from infra.llm_response import LLMResponse, ToolCall, Usage
7
+ from typing import Dict, List, Optional
8
+ from google.generativeai.types import HarmCategory, HarmBlockThreshold
9
+ from google.protobuf.json_format import MessageToDict
10
+ from common.exceptions import IAToolkitException
11
+ import logging
12
+ import json
13
+ import uuid
14
+
15
+ class GeminiAdapter:
16
+ """Adaptador para la API de Gemini"""
17
+
18
+ def __init__(self, gemini_client):
19
+ """Inicializar con cliente Gemini ya configurado"""
20
+ self.client = gemini_client
21
+
22
+ # Configuración de seguridad - permitir contenido que podría ser bloqueado por defecto
23
+ self.safety_settings = {
24
+ HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_NONE,
25
+ HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_NONE,
26
+ HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: HarmBlockThreshold.BLOCK_NONE,
27
+ HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_NONE,
28
+ }
29
+
30
+ def create_response(self,
31
+ model: str,
32
+ input: List[Dict],
33
+ previous_response_id: Optional[str] = None,
34
+ context_history: Optional[List[Dict]] = None,
35
+ tools: Optional[List[Dict]] = None,
36
+ text: Optional[Dict] = None,
37
+ reasoning: Optional[Dict] = None,
38
+ tool_choice: str = "auto",
39
+ ) -> LLMResponse:
40
+ """Llamada a la API de Gemini y mapeo a estructura común"""
41
+ try:
42
+ # Inicializar el modelo de Gemini usando el cliente configurado
43
+ gemini_model = self.client.GenerativeModel(
44
+ model_name=self._map_model_name(model),
45
+ safety_settings=self.safety_settings
46
+ )
47
+
48
+ # Preparar el contenido para Gemini
49
+ if context_history:
50
+ # Concatenar el historial de conversación con el input actual
51
+ contents = self._prepare_gemini_contents(context_history + input)
52
+ else:
53
+ # Usar solo el input actual si no hay historial
54
+ contents = self._prepare_gemini_contents(input)
55
+
56
+ # Preparar herramientas si están disponibles
57
+ gemini_tools = self._prepare_gemini_tools(tools) if tools else None
58
+
59
+ # Configurar generación
60
+ generation_config = self._prepare_generation_config(text, tool_choice)
61
+
62
+ # Llamar a Gemini
63
+ if gemini_tools:
64
+ # Con herramientas
65
+ response = gemini_model.generate_content(
66
+ contents,
67
+ tools=gemini_tools,
68
+ generation_config=generation_config
69
+ )
70
+ else:
71
+ # Sin herramientas
72
+ response = gemini_model.generate_content(
73
+ contents,
74
+ generation_config=generation_config
75
+ )
76
+
77
+ # map the answer to a common structure
78
+ llm_response = self._map_gemini_response(response, model)
79
+
80
+ # add the model answer to the history
81
+ if context_history and llm_response.output_text:
82
+ context_history.append(
83
+ {
84
+ 'role': 'assistant',
85
+ 'context': llm_response.output_text
86
+ }
87
+ )
88
+
89
+ return llm_response
90
+
91
+ except Exception as e:
92
+ error_message = f"Error calling Gemini API: {str(e)}"
93
+ logging.error(error_message)
94
+
95
+ # handle gemini specific errors
96
+ if "quota" in str(e).lower():
97
+ error_message = "Se ha excedido la cuota de la API de Gemini"
98
+ elif "blocked" in str(e).lower():
99
+ error_message = "El contenido fue bloqueado por las políticas de seguridad de Gemini"
100
+ elif "token" in str(e).lower():
101
+ error_message = "Tu consulta supera el límite de contexto de Gemini"
102
+
103
+ raise IAToolkitException(IAToolkitException.ErrorType.LLM_ERROR, error_message)
104
+
105
+ # ... rest of the methods keep the same ...
106
+ def _map_model_name(self, model: str) -> str:
107
+ """Mapear nombre del modelo a formato de Gemini"""
108
+ model_mapping = {
109
+ "gemini-pro": "gemini-2.5-pro",
110
+ "gemini": "gemini-2.5-pro",
111
+ "gemini-1.5": "gemini-2.5-pro",
112
+ "gemini-flash": "gemini-1.5-flash",
113
+ "gemini-2.0": "gemini-2.0-flash-exp"
114
+ }
115
+ return model_mapping.get(model.lower(), model)
116
+
117
+ def _prepare_gemini_contents(self, input: List[Dict]) -> List[Dict]:
118
+ """Convertir mensajes de formato OpenAI a formato Gemini"""
119
+ gemini_contents = []
120
+
121
+ for message in input:
122
+ if message.get("role") == "system":
123
+ gemini_contents.append({
124
+ "role": "user",
125
+ "parts": [{"text": f"[INSTRUCCIONES DEL SISTEMA]\n{message.get('content', '')}"}]
126
+ })
127
+ elif message.get("role") == "user":
128
+ gemini_contents.append({
129
+ "role": "user",
130
+ "parts": [{"text": message.get("content", "")}]
131
+ })
132
+ elif message.get("type") == "function_call_output":
133
+ gemini_contents.append({
134
+ "role": "function",
135
+ "parts": [{
136
+ "function_response": {
137
+ "name": "tool_result",
138
+ "response": {"output": message.get("output", "")}
139
+ }
140
+ }]
141
+ })
142
+
143
+ return gemini_contents
144
+
145
+ def _prepare_gemini_tools(self, tools: List[Dict]) -> List[Dict]:
146
+ """Convertir herramientas de formato OpenAI a formato Gemini"""
147
+ if not tools:
148
+ return None
149
+
150
+ function_declarations = []
151
+ for i, tool in enumerate(tools):
152
+ # Verificar estructura básica
153
+ tool_type = tool.get("type")
154
+
155
+ if tool_type != "function":
156
+ logging.warning(f"Herramienta {i} no es de tipo 'function': {tool_type}")
157
+ continue
158
+
159
+ # Extraer datos de la herramienta (estructura plana)
160
+ function_name = tool.get("name")
161
+ function_description = tool.get("description", "")
162
+ function_parameters = tool.get("parameters", {})
163
+
164
+ # Verificar si el nombre existe y no está vacío
165
+ if not function_name or not isinstance(function_name, str) or not function_name.strip():
166
+ logging.error(f"PROBLEMA: Herramienta {i} sin nombre válido")
167
+ continue
168
+
169
+ # Preparar la declaración de función para Gemini
170
+ gemini_function = {
171
+ "name": function_name,
172
+ "description": function_description,
173
+ }
174
+
175
+ # Agregar parámetros si existen y limpiar campos específicos de OpenAI
176
+ if function_parameters:
177
+ clean_parameters = self._clean_openai_specific_fields(function_parameters)
178
+ gemini_function["parameters"] = clean_parameters
179
+
180
+ function_declarations.append(gemini_function)
181
+
182
+ if function_declarations:
183
+ final_tools = [{
184
+ "function_declarations": function_declarations
185
+ }]
186
+
187
+ # Log de la estructura final para debug
188
+ # logging.info("Estructura final de herramientas para Gemini:")
189
+ # logging.info(f"{json.dumps(final_tools, indent=2)}")
190
+
191
+ return final_tools
192
+
193
+ return None
194
+
195
+
196
+ def _clean_openai_specific_fields(self, parameters: Dict) -> Dict:
197
+ """Limpiar campos específicos de OpenAI que Gemini no entiende"""
198
+ clean_params = {}
199
+
200
+ # Campos permitidos por Gemini según su Schema protobuf
201
+ # Estos son los únicos campos que Gemini acepta en sus esquemas
202
+ allowed_fields = {
203
+ "type", # Tipo de datos: string, number, object, array, boolean
204
+ "properties", # Para objetos: define las propiedades
205
+ "required", # Array de propiedades requeridas
206
+ "items", # Para arrays: define el tipo de elementos
207
+ "description", # Descripción del campo
208
+ "enum", # Lista de valores permitidos
209
+ # Gemini NO soporta estos campos comunes de JSON Schema:
210
+ # "pattern", "format", "minimum", "maximum", "minItems", "maxItems",
211
+ # "minLength", "maxLength", "additionalProperties", "strict"
212
+ }
213
+
214
+ for key, value in parameters.items():
215
+ if key in allowed_fields:
216
+ if key == "properties" and isinstance(value, dict):
217
+ # Limpiar recursivamente las propiedades
218
+ clean_props = {}
219
+ for prop_name, prop_def in value.items():
220
+ if isinstance(prop_def, dict):
221
+ clean_props[prop_name] = self._clean_openai_specific_fields(prop_def)
222
+ else:
223
+ clean_props[prop_name] = prop_def
224
+ clean_params[key] = clean_props
225
+ elif key == "items" and isinstance(value, dict):
226
+ # Limpiar recursivamente los items de array
227
+ clean_params[key] = self._clean_openai_specific_fields(value)
228
+ else:
229
+ clean_params[key] = value
230
+ else:
231
+ logging.debug(f"Campo '{key}' removido (no soportado por Gemini)")
232
+
233
+ return clean_params
234
+
235
+ def _prepare_generation_config(self, text: Optional[Dict], tool_choice: str) -> Dict:
236
+ """Preparar configuración de generación para Gemini"""
237
+ config = {"candidate_count": 1}
238
+
239
+ if text:
240
+ if "temperature" in text:
241
+ config["temperature"] = float(text["temperature"])
242
+ if "max_tokens" in text:
243
+ config["max_output_tokens"] = int(text["max_tokens"])
244
+ if "top_p" in text:
245
+ config["top_p"] = float(text["top_p"])
246
+
247
+ return config
248
+
249
+ def _map_gemini_response(self, gemini_response, model: str) -> LLMResponse:
250
+ """Mapear respuesta de Gemini a estructura común"""
251
+ response_id = str(uuid.uuid4())
252
+ output_text = ""
253
+ tool_calls = []
254
+
255
+ if gemini_response.candidates and len(gemini_response.candidates) > 0:
256
+ candidate = gemini_response.candidates[0]
257
+
258
+ for part in candidate.content.parts:
259
+ if hasattr(part, 'text') and part.text:
260
+ output_text += part.text
261
+ elif hasattr(part, 'function_call') and part.function_call:
262
+ func_call = part.function_call
263
+ tool_calls.append(ToolCall(
264
+ call_id=f"call_{uuid.uuid4().hex[:8]}",
265
+ type="function_call",
266
+ name=func_call.name,
267
+ arguments=json.dumps(MessageToDict(func_call._pb).get('args', {}))
268
+ ))
269
+
270
+ # Determinar status
271
+ status = "completed"
272
+ if gemini_response.candidates:
273
+ candidate = gemini_response.candidates[0]
274
+ if hasattr(candidate, 'finish_reason'):
275
+ # Manejar finish_reason tanto como objeto con .name como entero/enum directo
276
+ finish_reason = candidate.finish_reason
277
+
278
+ # Si finish_reason tiene un atributo .name, usarlo
279
+ if hasattr(finish_reason, 'name'):
280
+ finish_reason_name = finish_reason.name
281
+ else:
282
+ # Si es un entero o enum directo, convertirlo a string
283
+ finish_reason_name = str(finish_reason)
284
+
285
+ if finish_reason_name in ["SAFETY", "RECITATION", "3", "4"]: # Agregar valores numéricos también
286
+ status = "blocked"
287
+ elif finish_reason_name in ["MAX_TOKENS", "LENGTH", "2"]: # Agregar valores numéricos también
288
+ status = "length_exceeded"
289
+
290
+ # Calcular usage de tokens
291
+ usage = self._extract_usage_metadata(gemini_response)
292
+
293
+ # Estimación básica si no hay datos de usage
294
+ if usage.total_tokens == 0:
295
+ estimated_output_tokens = len(output_text) // 4
296
+ usage = Usage(
297
+ input_tokens=0,
298
+ output_tokens=estimated_output_tokens,
299
+ total_tokens=estimated_output_tokens
300
+ )
301
+
302
+ return LLMResponse(
303
+ id=response_id,
304
+ model=model,
305
+ status=status,
306
+ output_text=output_text,
307
+ output=tool_calls,
308
+ usage=usage
309
+ )
310
+
311
+ def _extract_usage_metadata(self, gemini_response) -> Usage:
312
+ """Extraer información de uso de tokens de manera segura"""
313
+ input_tokens = 0
314
+ output_tokens = 0
315
+ total_tokens = 0
316
+
317
+ try:
318
+ # Verificar si existe usage_metadata
319
+ if hasattr(gemini_response, 'usage_metadata') and gemini_response.usage_metadata:
320
+ usage_metadata = gemini_response.usage_metadata
321
+
322
+ # Acceder a los atributos directamente, no con .get()
323
+ if hasattr(usage_metadata, 'prompt_token_count'):
324
+ input_tokens = usage_metadata.prompt_token_count
325
+ if hasattr(usage_metadata, 'candidates_token_count'):
326
+ output_tokens = usage_metadata.candidates_token_count
327
+ if hasattr(usage_metadata, 'total_token_count'):
328
+ total_tokens = usage_metadata.total_token_count
329
+
330
+ except Exception as e:
331
+ logging.warning(f"No se pudo extraer usage_metadata de Gemini: {e}")
332
+
333
+ # Si no hay datos de usage o son cero, hacer estimación básica
334
+ if total_tokens == 0 and output_tokens == 0:
335
+ # Obtener texto de salida para estimación
336
+ output_text = ""
337
+ if (hasattr(gemini_response, 'candidates') and
338
+ gemini_response.candidates and
339
+ len(gemini_response.candidates) > 0):
340
+
341
+ candidate = gemini_response.candidates[0]
342
+ if hasattr(candidate, 'content') and hasattr(candidate.content, 'parts'):
343
+ for part in candidate.content.parts:
344
+ if hasattr(part, 'text') and part.text:
345
+ output_text += part.text
346
+
347
+ # Estimación básica (4 caracteres por token aproximadamente)
348
+ estimated_output_tokens = len(output_text) // 4 if output_text else 0
349
+ output_tokens = estimated_output_tokens
350
+ total_tokens = estimated_output_tokens
351
+
352
+ return Usage(
353
+ input_tokens=input_tokens,
354
+ output_tokens=output_tokens,
355
+ total_tokens=total_tokens
356
+ )
@@ -0,0 +1,57 @@
1
+ # Copyright (c) 2024 Fernando Libedinsky
2
+ # Product: IAToolkit
3
+ #
4
+ # IAToolkit is open source software.
5
+
6
+ from injector import inject
7
+ from infra.call_service import CallServiceClient
8
+ import logging
9
+ import os
10
+ from typing import Dict, Any
11
+
12
+
13
+ class GoogleChatApp:
14
+ @inject
15
+ def __init__(self, call_service: CallServiceClient):
16
+ self.call_service = call_service
17
+
18
+ def send_message(self, message_data: Dict[str, Any]) -> Dict[str, Any]:
19
+ """
20
+ Sends a message to Google Chat.
21
+
22
+ Args:
23
+ message_data: Complete message data structure with type, space, and message
24
+
25
+ Returns:
26
+ Dict with the service response
27
+ """
28
+ try:
29
+ # get the bot URL from environment variables
30
+ bot_url = os.getenv('GOOGLE_CHAT_BOT_URL')
31
+ if not bot_url:
32
+ raise Exception('GOOGLE_CHAT_BOT_URL no está configurada en las variables de entorno')
33
+
34
+ # send the POST request with the complete message data
35
+ response, status_code = self.call_service.post(bot_url, message_data)
36
+
37
+ if status_code == 200:
38
+ return {
39
+ "success": True,
40
+ "message": "Mensaje enviado correctamente",
41
+ "response": response
42
+ }
43
+ else:
44
+ logging.error(f"Error al enviar mensaje a Google Chat. Status: {status_code}, Response: {response}")
45
+ return {
46
+ "success": False,
47
+ "message": f"Error al enviar mensaje. Status: {status_code}",
48
+ "response": response
49
+ }
50
+
51
+ except Exception as e:
52
+ logging.exception(f"Error inesperado al enviar mensaje a Google Chat: {e}")
53
+ return {
54
+ "success": False,
55
+ "message": f"Error interno del servidor: {str(e)}",
56
+ "response": None
57
+ }