evolutia 0.1.1__py3-none-any.whl → 0.1.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- evolutia/__init__.py +9 -0
- evolutia/async_llm_providers.py +157 -0
- evolutia/cache/__init__.py +9 -0
- evolutia/cache/exercise_cache.py +226 -0
- evolutia/cache/llm_cache.py +487 -0
- evolutia/complexity_validator.py +33 -31
- evolutia/config_manager.py +53 -40
- evolutia/evolutia_engine.py +341 -66
- evolutia/exam_generator.py +44 -43
- evolutia/exceptions.py +38 -0
- evolutia/exercise_analyzer.py +54 -91
- evolutia/imports.py +175 -0
- evolutia/llm_providers.py +223 -61
- evolutia/material_extractor.py +166 -88
- evolutia/rag/rag_indexer.py +107 -90
- evolutia/rag/rag_retriever.py +130 -103
- evolutia/retry_utils.py +280 -0
- evolutia/utils/json_parser.py +29 -19
- evolutia/utils/markdown_parser.py +185 -159
- evolutia/utils/math_extractor.py +153 -144
- evolutia/validation/__init__.py +1 -0
- evolutia/validation/args_validator.py +253 -0
- evolutia/validation/config_validator.py +502 -0
- evolutia/variation_generator.py +82 -70
- evolutia-0.1.2.dist-info/METADATA +536 -0
- evolutia-0.1.2.dist-info/RECORD +37 -0
- evolutia_cli.py +22 -9
- evolutia-0.1.1.dist-info/METADATA +0 -221
- evolutia-0.1.1.dist-info/RECORD +0 -27
- {evolutia-0.1.1.dist-info → evolutia-0.1.2.dist-info}/WHEEL +0 -0
- {evolutia-0.1.1.dist-info → evolutia-0.1.2.dist-info}/entry_points.txt +0 -0
- {evolutia-0.1.1.dist-info → evolutia-0.1.2.dist-info}/licenses/LICENSE +0 -0
- {evolutia-0.1.1.dist-info → evolutia-0.1.2.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,487 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Caché de respuestas LLM para EvolutIA.
|
|
3
|
+
Reduce costos y tiempo de ejecución almacenando respuestas de LLMs.
|
|
4
|
+
"""
|
|
5
|
+
import hashlib
|
|
6
|
+
import json
|
|
7
|
+
import logging
|
|
8
|
+
import sys
|
|
9
|
+
import time
|
|
10
|
+
import threading
|
|
11
|
+
import atexit
|
|
12
|
+
from pathlib import Path
|
|
13
|
+
from typing import Optional, Dict, List, Any
|
|
14
|
+
|
|
15
|
+
logger = logging.getLogger(__name__)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class LLMCache:
|
|
19
|
+
"""
|
|
20
|
+
Sistema de caché para respuestas de LLMs.
|
|
21
|
+
|
|
22
|
+
Características:
|
|
23
|
+
- Caché en memoria con persistencia opcional en disco
|
|
24
|
+
- TTL configurable para expirar entradas
|
|
25
|
+
- Tamaño máximo configurable con LRU eviction
|
|
26
|
+
- Filtrado de respuestas vacías o de error
|
|
27
|
+
- Hash basado en (prompt, provider, model)
|
|
28
|
+
- Logging de cache hits y misses
|
|
29
|
+
- Write-behind con debounce para optimizar I/O de disco
|
|
30
|
+
"""
|
|
31
|
+
|
|
32
|
+
def __init__(
|
|
33
|
+
self,
|
|
34
|
+
max_size: int = 1000,
|
|
35
|
+
ttl_hours: int = 24,
|
|
36
|
+
persist_to_disk: bool = True,
|
|
37
|
+
cache_dir: Optional[Path] = None,
|
|
38
|
+
debounce_seconds: float = 5.0,
|
|
39
|
+
max_memory_mb: int = 500
|
|
40
|
+
):
|
|
41
|
+
"""
|
|
42
|
+
Inicializa el caché de LLM.
|
|
43
|
+
|
|
44
|
+
Args:
|
|
45
|
+
max_size: Número máximo de entradas en caché
|
|
46
|
+
ttl_hours: Tiempo de vida en horas (0 = sin expiración)
|
|
47
|
+
persist_to_disk: Si True, persiste caché en disco
|
|
48
|
+
cache_dir: Directorio para caché persistente (defecto: ./storage/cache/llm)
|
|
49
|
+
debounce_seconds: Tiempo de debounce para persistir a disco (write-behind)
|
|
50
|
+
max_memory_mb: Límite máximo de memoria en MB (0 = sin límite)
|
|
51
|
+
"""
|
|
52
|
+
self.max_size = max_size
|
|
53
|
+
self.ttl = ttl_hours * 3600 if ttl_hours > 0 else 0
|
|
54
|
+
self.persist_to_disk = persist_to_disk
|
|
55
|
+
self.debounce_seconds = debounce_seconds
|
|
56
|
+
self.max_memory_bytes = max_memory_mb * 1024 * 1024 if max_memory_mb > 0 else 0
|
|
57
|
+
|
|
58
|
+
if cache_dir is None:
|
|
59
|
+
cache_dir = Path('./storage/cache/llm')
|
|
60
|
+
self.cache_dir = Path(cache_dir)
|
|
61
|
+
self.cache_dir.mkdir(parents=True, exist_ok=True)
|
|
62
|
+
|
|
63
|
+
self.cache: Dict[str, Dict[str, Any]] = {}
|
|
64
|
+
self.timestamps: Dict[str, float] = {}
|
|
65
|
+
self.entry_sizes: Dict[str, int] = {}
|
|
66
|
+
self.total_memory_bytes = 0
|
|
67
|
+
self.hits = 0
|
|
68
|
+
self.misses = 0
|
|
69
|
+
|
|
70
|
+
# Write-behind con debounce
|
|
71
|
+
self._pending_persist = False
|
|
72
|
+
self._persist_lock = threading.Lock()
|
|
73
|
+
self._persist_thread = None
|
|
74
|
+
self._stop_event = threading.Event()
|
|
75
|
+
|
|
76
|
+
# Cargar caché desde disco si está habilitado
|
|
77
|
+
if self.persist_to_disk:
|
|
78
|
+
self._load_from_disk()
|
|
79
|
+
# Registrar persistencia al salir
|
|
80
|
+
atexit.register(self._force_persist_to_disk)
|
|
81
|
+
|
|
82
|
+
logger.info(
|
|
83
|
+
f"[LLMCache] Inicializado: max_size={max_size}, "
|
|
84
|
+
f"ttl={ttl_hours}h, persist={persist_to_disk}, "
|
|
85
|
+
f"debounce={debounce_seconds}s, max_memory={max_memory_mb}MB"
|
|
86
|
+
)
|
|
87
|
+
|
|
88
|
+
def _get_cache_key(self, prompt: str, provider: str, model: str) -> str:
|
|
89
|
+
"""
|
|
90
|
+
Genera una clave de caché basada en prompt, provider y model.
|
|
91
|
+
|
|
92
|
+
Args:
|
|
93
|
+
prompt: El prompt enviado al LLM
|
|
94
|
+
provider: Nombre del proveedor (ej: 'openai', 'anthropic')
|
|
95
|
+
model: Nombre del modelo (ej: 'gpt-4', 'claude-3-opus')
|
|
96
|
+
|
|
97
|
+
Returns:
|
|
98
|
+
String hash SHA256 como clave de caché
|
|
99
|
+
"""
|
|
100
|
+
key_data = f"{provider}:{model}:{prompt}"
|
|
101
|
+
return hashlib.sha256(key_data.encode()).hexdigest()
|
|
102
|
+
|
|
103
|
+
def get(self, prompt: str, provider: str, model: str) -> Optional[str]:
|
|
104
|
+
"""
|
|
105
|
+
Obtiene una respuesta del caché.
|
|
106
|
+
|
|
107
|
+
Args:
|
|
108
|
+
prompt: El prompt enviado al LLM
|
|
109
|
+
provider: Nombre del proveedor
|
|
110
|
+
model: Nombre del modelo
|
|
111
|
+
|
|
112
|
+
Returns:
|
|
113
|
+
Respuesta cacheada si existe y no ha expirado, None en caso contrario
|
|
114
|
+
"""
|
|
115
|
+
key = self._get_cache_key(prompt, provider, model)
|
|
116
|
+
|
|
117
|
+
if key not in self.cache:
|
|
118
|
+
self.misses += 1
|
|
119
|
+
logger.debug(f"[LLMCache] Cache miss para {provider}:{model}")
|
|
120
|
+
return None
|
|
121
|
+
|
|
122
|
+
# Verificar TTL
|
|
123
|
+
if self.ttl > 0:
|
|
124
|
+
age = time.time() - self.timestamps[key]
|
|
125
|
+
if age > self.ttl:
|
|
126
|
+
logger.debug(f"[LLMCache] Entrada expirada para {provider}:{model} (age={age:.0f}s)")
|
|
127
|
+
self._remove_entry(key)
|
|
128
|
+
self.misses += 1
|
|
129
|
+
return None
|
|
130
|
+
|
|
131
|
+
self.hits += 1
|
|
132
|
+
logger.info(
|
|
133
|
+
f"[LLMCache] Cache HIT para {provider}:{model} "
|
|
134
|
+
f"(hit_rate={self.hit_rate:.1%})"
|
|
135
|
+
)
|
|
136
|
+
return self.cache[key]['response']
|
|
137
|
+
|
|
138
|
+
def put(
|
|
139
|
+
self,
|
|
140
|
+
prompt: str,
|
|
141
|
+
provider: str,
|
|
142
|
+
model: str,
|
|
143
|
+
response: str,
|
|
144
|
+
metadata: Optional[Dict] = None
|
|
145
|
+
) -> bool:
|
|
146
|
+
"""
|
|
147
|
+
Almacena una respuesta en el caché.
|
|
148
|
+
|
|
149
|
+
Args:
|
|
150
|
+
prompt: El prompt enviado al LLM
|
|
151
|
+
provider: Nombre del proveedor
|
|
152
|
+
model: Nombre del modelo
|
|
153
|
+
response: La respuesta del LLM
|
|
154
|
+
metadata: Metadatos adicionales (tokens, cost, etc.)
|
|
155
|
+
|
|
156
|
+
Returns:
|
|
157
|
+
True si se almacenó exitosamente, False si se rechazó
|
|
158
|
+
"""
|
|
159
|
+
# Evitar cachear respuestas vacías
|
|
160
|
+
if not response or not response.strip():
|
|
161
|
+
logger.debug(f"[LLMCache] Rechazando respuesta vacía para {provider}:{model}")
|
|
162
|
+
return False
|
|
163
|
+
|
|
164
|
+
# Evitar cachear respuestas de error comunes
|
|
165
|
+
error_indicators = [
|
|
166
|
+
'error', 'lo siento', 'sorry', 'cannot',
|
|
167
|
+
'unable', 'failed', 'unknown error'
|
|
168
|
+
]
|
|
169
|
+
response_lower = response.lower()
|
|
170
|
+
if any(indicator in response_lower for indicator in error_indicators):
|
|
171
|
+
logger.debug(
|
|
172
|
+
f"[LLMCache] Rechazando respuesta de error para {provider}:{model}"
|
|
173
|
+
)
|
|
174
|
+
return False
|
|
175
|
+
|
|
176
|
+
# Evitar cachear respuestas muy cortas (probablemente errores)
|
|
177
|
+
if len(response) < 20:
|
|
178
|
+
logger.debug(
|
|
179
|
+
f"[LLMCache] Rechazando respuesta muy corta ({len(response)} chars) "
|
|
180
|
+
f"para {provider}:{model}"
|
|
181
|
+
)
|
|
182
|
+
return False
|
|
183
|
+
|
|
184
|
+
key = self._get_cache_key(prompt, provider, model)
|
|
185
|
+
timestamp = time.time()
|
|
186
|
+
|
|
187
|
+
# Calcular tamaño de la nueva entrada
|
|
188
|
+
entry_size = self._estimate_entry_size(key, response, metadata)
|
|
189
|
+
|
|
190
|
+
# Verificar límites de tamaño y memoria
|
|
191
|
+
if len(self.cache) >= self.max_size:
|
|
192
|
+
self._evict_oldest_entries(count=1)
|
|
193
|
+
|
|
194
|
+
# Verificar límite de memoria
|
|
195
|
+
self._check_memory_limit(entry_size)
|
|
196
|
+
self.cache[key] = {
|
|
197
|
+
'response': response,
|
|
198
|
+
'timestamp': timestamp,
|
|
199
|
+
'metadata': metadata or {}
|
|
200
|
+
}
|
|
201
|
+
self.timestamps[key] = timestamp
|
|
202
|
+
self.entry_sizes[key] = entry_size
|
|
203
|
+
self.total_memory_bytes += entry_size
|
|
204
|
+
|
|
205
|
+
logger.debug(f"[LLMCache] Cache guardado para {provider}:{model} ({entry_size} bytes)")
|
|
206
|
+
|
|
207
|
+
# Persistir en disco si está habilitado (write-behind con debounce)
|
|
208
|
+
if self.persist_to_disk:
|
|
209
|
+
self._schedule_persist()
|
|
210
|
+
|
|
211
|
+
return True
|
|
212
|
+
|
|
213
|
+
def _evict_oldest_entries(self, count: int = 1):
|
|
214
|
+
"""
|
|
215
|
+
Elimina las entradas más viejas del caché (LRU eviction).
|
|
216
|
+
|
|
217
|
+
Args:
|
|
218
|
+
count: Número de entradas a eliminar
|
|
219
|
+
"""
|
|
220
|
+
if count <= 0 or not self.timestamps:
|
|
221
|
+
return
|
|
222
|
+
|
|
223
|
+
# Encontrar las 'count' entradas más viejas
|
|
224
|
+
oldest_keys = sorted(
|
|
225
|
+
self.timestamps.items(),
|
|
226
|
+
key=lambda x: x[1]
|
|
227
|
+
)[:count]
|
|
228
|
+
|
|
229
|
+
logger.debug(f"[LLMCache] Evicting {count} entradas más viejas: {oldest_keys}")
|
|
230
|
+
|
|
231
|
+
for key, timestamp in oldest_keys:
|
|
232
|
+
logger.debug(f"[LLMCache] Removing key={key[:16]}..., timestamp={timestamp}")
|
|
233
|
+
if key in self.cache:
|
|
234
|
+
del self.cache[key]
|
|
235
|
+
if key in self.timestamps:
|
|
236
|
+
del self.timestamps[key]
|
|
237
|
+
if key in self.entry_sizes:
|
|
238
|
+
entry_size = self.entry_sizes[key]
|
|
239
|
+
del self.entry_sizes[key]
|
|
240
|
+
self.total_memory_bytes -= entry_size
|
|
241
|
+
|
|
242
|
+
logger.debug(f"[LLMCache] Evicted {count} entradas más viejas")
|
|
243
|
+
|
|
244
|
+
def _remove_entry(self, key: str):
|
|
245
|
+
"""
|
|
246
|
+
Elimina una entrada del caché.
|
|
247
|
+
|
|
248
|
+
Args:
|
|
249
|
+
key: Clave de la entrada a eliminar
|
|
250
|
+
"""
|
|
251
|
+
if key in self.cache:
|
|
252
|
+
del self.cache[key]
|
|
253
|
+
if key in self.timestamps:
|
|
254
|
+
del self.timestamps[key]
|
|
255
|
+
if key in self.entry_sizes:
|
|
256
|
+
entry_size = self.entry_sizes[key]
|
|
257
|
+
del self.entry_sizes[key]
|
|
258
|
+
self.total_memory_bytes -= entry_size
|
|
259
|
+
|
|
260
|
+
def clear(self):
|
|
261
|
+
"""Limpia todo el caché."""
|
|
262
|
+
initial_size = len(self.cache)
|
|
263
|
+
self.cache.clear()
|
|
264
|
+
self.timestamps.clear()
|
|
265
|
+
self.entry_sizes.clear()
|
|
266
|
+
self.total_memory_bytes = 0
|
|
267
|
+
self.hits = 0
|
|
268
|
+
self.misses = 0
|
|
269
|
+
|
|
270
|
+
logger.info(f"[LLMCache] Caché limpiado (eliminadas {initial_size} entradas)")
|
|
271
|
+
|
|
272
|
+
# Eliminar archivos de caché si persiste a disco
|
|
273
|
+
if self.persist_to_disk and self.cache_dir.exists():
|
|
274
|
+
for cache_file in self.cache_dir.glob('*.json'):
|
|
275
|
+
try:
|
|
276
|
+
cache_file.unlink()
|
|
277
|
+
except Exception as e:
|
|
278
|
+
logger.warning(f"[LLMCache] Error eliminando {cache_file}: {e}")
|
|
279
|
+
|
|
280
|
+
def _persist_to_disk(self):
|
|
281
|
+
"""Persiste el caché en disco."""
|
|
282
|
+
if not self.persist_to_disk:
|
|
283
|
+
return
|
|
284
|
+
|
|
285
|
+
try:
|
|
286
|
+
metadata_file = self.cache_dir / 'llm_cache_metadata.json'
|
|
287
|
+
metadata = {
|
|
288
|
+
'version': '1.0',
|
|
289
|
+
'last_persisted': time.time(),
|
|
290
|
+
'entries_count': len(self.cache),
|
|
291
|
+
'hits': self.hits,
|
|
292
|
+
'misses': self.misses
|
|
293
|
+
}
|
|
294
|
+
|
|
295
|
+
with open(metadata_file, 'w', encoding='utf-8') as f:
|
|
296
|
+
json.dump(metadata, f, indent=2)
|
|
297
|
+
|
|
298
|
+
logger.debug(f"[LLMCache] Metadatos persistidos en {metadata_file}")
|
|
299
|
+
except Exception as e:
|
|
300
|
+
logger.warning(f"[LLMCache] Error persistiendo metadatos: {e}")
|
|
301
|
+
|
|
302
|
+
def _load_from_disk(self):
|
|
303
|
+
"""Carga metadatos del caché desde disco."""
|
|
304
|
+
if not self.persist_to_disk:
|
|
305
|
+
return
|
|
306
|
+
|
|
307
|
+
try:
|
|
308
|
+
metadata_file = self.cache_dir / 'llm_cache_metadata.json'
|
|
309
|
+
if not metadata_file.exists():
|
|
310
|
+
return
|
|
311
|
+
|
|
312
|
+
with open(metadata_file, 'r', encoding='utf-8') as f:
|
|
313
|
+
metadata = json.load(f)
|
|
314
|
+
|
|
315
|
+
self.hits = metadata.get('hits', 0)
|
|
316
|
+
self.misses = metadata.get('misses', 0)
|
|
317
|
+
entries_count = metadata.get('entries_count', 0)
|
|
318
|
+
|
|
319
|
+
logger.info(
|
|
320
|
+
f"[LLMCache] Metadatos cargados: {entries_count} entradas, "
|
|
321
|
+
f"hits={self.hits}, misses={self.misses}"
|
|
322
|
+
)
|
|
323
|
+
except Exception as e:
|
|
324
|
+
logger.warning(f"[LLMCache] Error cargando metadatos: {e}")
|
|
325
|
+
|
|
326
|
+
def _estimate_entry_size(self, key: str, value: str, metadata: Optional[Dict] = None) -> int:
|
|
327
|
+
"""
|
|
328
|
+
Estima el tamaño en bytes de una entrada del caché.
|
|
329
|
+
|
|
330
|
+
Args:
|
|
331
|
+
key: Clave de la entrada
|
|
332
|
+
value: Valor de la entrada
|
|
333
|
+
metadata: Metadatos opcionales
|
|
334
|
+
|
|
335
|
+
Returns:
|
|
336
|
+
Tamaño estimado en bytes
|
|
337
|
+
"""
|
|
338
|
+
size = sys.getsizeof(key) + sys.getsizeof(value)
|
|
339
|
+
if metadata:
|
|
340
|
+
size += sys.getsizeof(json.dumps(metadata))
|
|
341
|
+
# Overhead por diccionarios y estructuras de Python
|
|
342
|
+
size += 100
|
|
343
|
+
return size
|
|
344
|
+
|
|
345
|
+
def _check_memory_limit(self, new_entry_size: int) -> bool:
|
|
346
|
+
"""
|
|
347
|
+
Verifica si agregar una nueva entrada excedería el límite de memoria.
|
|
348
|
+
|
|
349
|
+
Args:
|
|
350
|
+
new_entry_size: Tamaño de la nueva entrada en bytes
|
|
351
|
+
|
|
352
|
+
Returns:
|
|
353
|
+
True si hay suficiente espacio, False si se debe hacer eviction
|
|
354
|
+
"""
|
|
355
|
+
if self.max_memory_bytes == 0:
|
|
356
|
+
return True # Sin límite de memoria
|
|
357
|
+
|
|
358
|
+
projected_size = self.total_memory_bytes + new_entry_size
|
|
359
|
+
|
|
360
|
+
if projected_size <= self.max_memory_bytes:
|
|
361
|
+
return True
|
|
362
|
+
|
|
363
|
+
# Necesitamos hacer eviction
|
|
364
|
+
self._evict_until_within_limit(projected_size - self.max_memory_bytes)
|
|
365
|
+
return True
|
|
366
|
+
|
|
367
|
+
def _evict_until_within_limit(self, bytes_to_free: int):
|
|
368
|
+
"""
|
|
369
|
+
Evicta entradas hasta liberar la cantidad de bytes especificada.
|
|
370
|
+
|
|
371
|
+
Args:
|
|
372
|
+
bytes_to_free: Cantidad de bytes a liberar
|
|
373
|
+
"""
|
|
374
|
+
bytes_freed = 0
|
|
375
|
+
while bytes_freed < bytes_to_free and self.cache:
|
|
376
|
+
# Encontrar la entrada más vieja (LRU)
|
|
377
|
+
oldest_key = min(self.timestamps.items(), key=lambda x: x[1])[0]
|
|
378
|
+
oldest_size = self.entry_sizes.get(oldest_key, 0)
|
|
379
|
+
|
|
380
|
+
self._remove_entry(oldest_key)
|
|
381
|
+
bytes_freed += oldest_size
|
|
382
|
+
|
|
383
|
+
if bytes_freed > 0:
|
|
384
|
+
logger.debug(
|
|
385
|
+
f"[LLMCache] Memoria excedida - evictadas entradas hasta liberar "
|
|
386
|
+
f"{bytes_freed / 1024:.1f} KB"
|
|
387
|
+
)
|
|
388
|
+
|
|
389
|
+
def _schedule_persist(self):
|
|
390
|
+
"""
|
|
391
|
+
Programa persistencia a disco con debounce (write-behind).
|
|
392
|
+
Evita escribir a disco en cada put() - solo escribe después de un periodo de inactividad.
|
|
393
|
+
"""
|
|
394
|
+
with self._persist_lock:
|
|
395
|
+
self._pending_persist = True
|
|
396
|
+
|
|
397
|
+
# Si no hay un thread activo, iniciar uno
|
|
398
|
+
if self._persist_thread is None or not self._persist_thread.is_alive():
|
|
399
|
+
self._persist_thread = threading.Thread(
|
|
400
|
+
target=self._debounced_persist_worker,
|
|
401
|
+
daemon=True
|
|
402
|
+
)
|
|
403
|
+
self._persist_thread.start()
|
|
404
|
+
|
|
405
|
+
def _debounced_persist_worker(self):
|
|
406
|
+
"""
|
|
407
|
+
Worker thread que espera el periodo de debounce antes de persistir.
|
|
408
|
+
"""
|
|
409
|
+
try:
|
|
410
|
+
# Esperar el periodo de debounce
|
|
411
|
+
time.sleep(self.debounce_seconds)
|
|
412
|
+
|
|
413
|
+
with self._persist_lock:
|
|
414
|
+
# Verificar si aún debemos persistir (podría haber sido cancelado)
|
|
415
|
+
if not self._pending_persist or self._stop_event.is_set():
|
|
416
|
+
return
|
|
417
|
+
|
|
418
|
+
self._pending_persist = False
|
|
419
|
+
|
|
420
|
+
# Persistir fuera del lock para no bloquear el acceso al caché
|
|
421
|
+
self._persist_to_disk()
|
|
422
|
+
except Exception as e:
|
|
423
|
+
logger.warning(f"[LLMCache] Error en worker de persistencia: {e}")
|
|
424
|
+
|
|
425
|
+
def _force_persist_to_disk(self):
|
|
426
|
+
"""
|
|
427
|
+
Fuerza la persistencia inmediata a disco.
|
|
428
|
+
Se usa al salir del programa (atexit).
|
|
429
|
+
"""
|
|
430
|
+
if self._stop_event.is_set():
|
|
431
|
+
return
|
|
432
|
+
|
|
433
|
+
# Detener el worker de debounce si está corriendo
|
|
434
|
+
self._stop_event.set()
|
|
435
|
+
|
|
436
|
+
with self._persist_lock:
|
|
437
|
+
self._pending_persist = False
|
|
438
|
+
|
|
439
|
+
# Persistir inmediatamente
|
|
440
|
+
self._persist_to_disk()
|
|
441
|
+
|
|
442
|
+
def get_stats(self) -> Dict[str, Any]:
|
|
443
|
+
"""
|
|
444
|
+
Obtiene estadísticas del caché.
|
|
445
|
+
|
|
446
|
+
Returns:
|
|
447
|
+
Diccionario con estadísticas
|
|
448
|
+
"""
|
|
449
|
+
total_requests = self.hits + self.misses
|
|
450
|
+
hit_rate = self.hits / total_requests if total_requests > 0 else 0.0
|
|
451
|
+
memory_mb = self.total_memory_bytes / (1024 * 1024)
|
|
452
|
+
|
|
453
|
+
return {
|
|
454
|
+
'entries': len(self.cache),
|
|
455
|
+
'hits': self.hits,
|
|
456
|
+
'misses': self.misses,
|
|
457
|
+
'hit_rate': hit_rate,
|
|
458
|
+
'max_size': self.max_size,
|
|
459
|
+
'ttl_hours': self.ttl / 3600,
|
|
460
|
+
'persist_to_disk': self.persist_to_disk,
|
|
461
|
+
'cache_dir': str(self.cache_dir),
|
|
462
|
+
'memory_mb': round(memory_mb, 2),
|
|
463
|
+
'memory_limit_mb': round(self.max_memory_bytes / (1024 * 1024), 2) if self.max_memory_bytes > 0 else 0,
|
|
464
|
+
'memory_usage_percent': round((self.total_memory_bytes / self.max_memory_bytes) * 100, 1) if self.max_memory_bytes > 0 else 0
|
|
465
|
+
}
|
|
466
|
+
|
|
467
|
+
@property
|
|
468
|
+
def hit_rate(self) -> float:
|
|
469
|
+
"""
|
|
470
|
+
Tasa de aciertos del caché.
|
|
471
|
+
|
|
472
|
+
Returns:
|
|
473
|
+
Proporción de aciertos (0.0 a 1.0)
|
|
474
|
+
"""
|
|
475
|
+
total_requests = self.hits + self.misses
|
|
476
|
+
return self.hits / total_requests if total_requests > 0 else 0.0
|
|
477
|
+
|
|
478
|
+
def __len__(self) -> int:
|
|
479
|
+
"""Retorna el número de entradas en caché."""
|
|
480
|
+
return len(self.cache)
|
|
481
|
+
|
|
482
|
+
def __repr__(self) -> str:
|
|
483
|
+
"""Representación del caché."""
|
|
484
|
+
return (
|
|
485
|
+
f"LLMCache(entries={len(self.cache)}, hits={self.hits}, "
|
|
486
|
+
f"misses={self.misses}, hit_rate={self.hit_rate:.1%})"
|
|
487
|
+
)
|
evolutia/complexity_validator.py
CHANGED
|
@@ -1,12 +1,12 @@
|
|
|
1
|
-
"""
|
|
2
|
-
Validador de complejidad.
|
|
3
|
-
Verifica que las variaciones generadas sean más complejas que los originales.
|
|
4
|
-
"""
|
|
5
|
-
import logging
|
|
6
|
-
from typing import Dict
|
|
7
|
-
|
|
8
|
-
from .exercise_analyzer import ExerciseAnalyzer
|
|
9
|
-
from .utils.math_extractor import extract_math_expressions, estimate_complexity
|
|
1
|
+
"""
|
|
2
|
+
Validador de complejidad.
|
|
3
|
+
Verifica que las variaciones generadas sean más complejas que los originales.
|
|
4
|
+
"""
|
|
5
|
+
import logging
|
|
6
|
+
from typing import Dict, List, Tuple
|
|
7
|
+
|
|
8
|
+
from .exercise_analyzer import ExerciseAnalyzer
|
|
9
|
+
from .utils.math_extractor import extract_math_expressions, estimate_complexity
|
|
10
10
|
|
|
11
11
|
|
|
12
12
|
logger = logging.getLogger(__name__)
|
|
@@ -154,26 +154,28 @@ class ComplexityValidator:
|
|
|
154
154
|
)
|
|
155
155
|
}
|
|
156
156
|
|
|
157
|
-
def validate_batch(self, exercises_and_variations:
|
|
158
|
-
"""
|
|
159
|
-
Valida un lote de variaciones.
|
|
160
|
-
|
|
161
|
-
Args:
|
|
162
|
-
exercises_and_variations: Lista de tuplas (ejercicio_original, análisis_original, variación)
|
|
163
|
-
|
|
164
|
-
Returns:
|
|
165
|
-
Lista de resultados de validación
|
|
166
|
-
"""
|
|
167
|
-
results = []
|
|
168
|
-
|
|
169
|
-
for original_exercise, original_analysis, variation in exercises_and_variations:
|
|
170
|
-
result = self.validate(original_exercise, original_analysis, variation)
|
|
171
|
-
results.append(result)
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
157
|
+
def validate_batch(self, exercises_and_variations: List[Tuple[Dict, Dict, Dict]]) -> List[Dict]:
|
|
158
|
+
"""
|
|
159
|
+
Valida un lote de variaciones.
|
|
160
|
+
|
|
161
|
+
Args:
|
|
162
|
+
exercises_and_variations: Lista de tuplas (ejercicio_original, análisis_original, variación)
|
|
163
|
+
|
|
164
|
+
Returns:
|
|
165
|
+
Lista de resultados de validación
|
|
166
|
+
"""
|
|
167
|
+
results = []
|
|
168
|
+
|
|
169
|
+
for i, (original_exercise, original_analysis, variation) in enumerate(exercises_and_variations, 1):
|
|
170
|
+
result = self.validate(original_exercise, original_analysis, variation)
|
|
171
|
+
results.append(result)
|
|
172
|
+
|
|
173
|
+
label = variation.get('label') or f"variación_{i}"
|
|
174
|
+
if result['is_valid']:
|
|
175
|
+
logger.info(f"[ComplexityValidator] Variación válida '{label}': {len(result['improvements'])} mejoras detectadas")
|
|
176
|
+
else:
|
|
177
|
+
logger.warning(f"[ComplexityValidator] Variación inválida '{label}': {result.get('reason', 'Complejidad insuficiente')}")
|
|
178
|
+
|
|
179
|
+
logger.info(f"[ComplexityValidator] Validación de lote completada: {len(results)} variaciones procesadas")
|
|
180
|
+
return results
|
|
179
181
|
|
evolutia/config_manager.py
CHANGED
|
@@ -1,14 +1,14 @@
|
|
|
1
|
-
#!/usr/bin/env python3
|
|
2
|
-
"""
|
|
3
|
-
Gestor de configuración automática.
|
|
4
|
-
Genera config.yaml basado en la estructura del proyecto y metadatos de archivos.
|
|
5
|
-
"""
|
|
6
|
-
import yaml
|
|
7
|
-
import logging
|
|
8
|
-
from pathlib import Path
|
|
9
|
-
from typing import Dict, List, Set, Any
|
|
10
|
-
import sys
|
|
11
|
-
import json
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Gestor de configuración automática.
|
|
4
|
+
Genera config.yaml basado en la estructura del proyecto y metadatos de archivos.
|
|
5
|
+
"""
|
|
6
|
+
import yaml
|
|
7
|
+
import logging
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
from typing import Dict, List, Set, Any, Union, Optional
|
|
10
|
+
import sys
|
|
11
|
+
import json
|
|
12
12
|
try:
|
|
13
13
|
import jsonschema
|
|
14
14
|
JSONSCHEMA_AVAILABLE = True
|
|
@@ -39,21 +39,21 @@ EXCLUDED_DIRS = {
|
|
|
39
39
|
'config'
|
|
40
40
|
}
|
|
41
41
|
|
|
42
|
-
class ConfigManager:
|
|
43
|
-
def __init__(self, base_path: Path, config_path: Path = None):
|
|
44
|
-
self.base_path = Path(base_path)
|
|
45
|
-
|
|
46
|
-
if config_path:
|
|
47
|
-
self.config_path = Path(config_path)
|
|
48
|
-
else:
|
|
49
|
-
# Intentar encontrar evolutia_config.yaml en la raíz
|
|
50
|
-
root_config = self.base_path / 'evolutia_config.yaml'
|
|
51
|
-
if root_config.exists():
|
|
52
|
-
self.config_path = root_config
|
|
53
|
-
else:
|
|
54
|
-
# Default interno: ubicado en el paquete instalado
|
|
55
|
-
# Obtenemos la ruta de este archivo (config_manager.py) -> parent (evolutia/) -> config -> config.yaml
|
|
56
|
-
self.config_path = Path(__file__).parent / 'config' / 'config.yaml'
|
|
42
|
+
class ConfigManager:
|
|
43
|
+
def __init__(self, base_path: Union[Path, str], config_path: Optional[Union[Path, str]] = None):
|
|
44
|
+
self.base_path = Path(base_path)
|
|
45
|
+
|
|
46
|
+
if config_path:
|
|
47
|
+
self.config_path = Path(config_path)
|
|
48
|
+
else:
|
|
49
|
+
# Intentar encontrar evolutia_config.yaml en la raíz
|
|
50
|
+
root_config = self.base_path / 'evolutia_config.yaml'
|
|
51
|
+
if root_config.exists():
|
|
52
|
+
self.config_path = root_config
|
|
53
|
+
else:
|
|
54
|
+
# Default interno: ubicado en el paquete instalado
|
|
55
|
+
# Obtenemos la ruta de este archivo (config_manager.py) -> parent (evolutia/) -> config -> config.yaml
|
|
56
|
+
self.config_path = Path(__file__).parent / 'config' / 'config.yaml'
|
|
57
57
|
|
|
58
58
|
if self.config_path.exists():
|
|
59
59
|
logger.info(f"Usando archivo de configuración: {self.config_path}")
|
|
@@ -89,20 +89,33 @@ class ConfigManager:
|
|
|
89
89
|
logger.error(f"Error inesperado validando esquema: {e}")
|
|
90
90
|
return False
|
|
91
91
|
|
|
92
|
-
def load_current_config(self) -> Dict[str, Any]:
|
|
93
|
-
"""Carga la configuración actual si existe."""
|
|
94
|
-
if self.config_path.exists():
|
|
95
|
-
try:
|
|
96
|
-
with open(self.config_path, 'r', encoding='utf-8') as f:
|
|
97
|
-
config = yaml.safe_load(f) or {}
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
92
|
+
def load_current_config(self) -> Dict[str, Any]:
|
|
93
|
+
"""Carga la configuración actual si existe."""
|
|
94
|
+
if self.config_path.exists():
|
|
95
|
+
try:
|
|
96
|
+
with open(self.config_path, 'r', encoding='utf-8') as f:
|
|
97
|
+
config = yaml.safe_load(f) or {}
|
|
98
|
+
|
|
99
|
+
# Validar esquema JSON si está disponible
|
|
100
|
+
if config and not self.validate_config(config):
|
|
101
|
+
logger.warning("Configuración inválida según esquema JSON, pero continuando...")
|
|
102
|
+
|
|
103
|
+
# Validar valores con ConfigValidator
|
|
104
|
+
from .validation import ConfigValidator
|
|
105
|
+
validator = ConfigValidator()
|
|
106
|
+
is_valid, errors = validator.validate_config(config)
|
|
107
|
+
|
|
108
|
+
if not is_valid:
|
|
109
|
+
logger.error("Errores de validación en configuración:")
|
|
110
|
+
for error in errors:
|
|
111
|
+
logger.error(f" - {error}")
|
|
112
|
+
logger.warning("Continuando con configuración inválida (puede causar errores)")
|
|
113
|
+
|
|
114
|
+
return config
|
|
115
|
+
except Exception as e:
|
|
116
|
+
logger.error(f"Error leyendo config actual: {e}")
|
|
117
|
+
return {}
|
|
118
|
+
return {}
|
|
106
119
|
|
|
107
120
|
def discover_topics(self) -> List[str]:
|
|
108
121
|
"""Descubre directorios de temas basados en la existencia de archivos .md."""
|