evolutia 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- evolutia/__init__.py +5 -0
- evolutia/complexity_validator.py +179 -0
- evolutia/config_manager.py +208 -0
- evolutia/evolutia_engine.py +284 -0
- evolutia/exam_generator.py +328 -0
- evolutia/exercise_analyzer.py +256 -0
- evolutia/llm_providers.py +217 -0
- evolutia/material_extractor.py +237 -0
- evolutia/rag/__init__.py +6 -0
- evolutia/rag/consistency_validator.py +200 -0
- evolutia/rag/context_enricher.py +285 -0
- evolutia/rag/enhanced_variation_generator.py +349 -0
- evolutia/rag/rag_indexer.py +424 -0
- evolutia/rag/rag_manager.py +221 -0
- evolutia/rag/rag_retriever.py +366 -0
- evolutia/utils/__init__.py +4 -0
- evolutia/utils/json_parser.py +69 -0
- evolutia/utils/markdown_parser.py +160 -0
- evolutia/utils/math_extractor.py +144 -0
- evolutia/variation_generator.py +97 -0
- evolutia-0.1.0.dist-info/METADATA +723 -0
- evolutia-0.1.0.dist-info/RECORD +27 -0
- evolutia-0.1.0.dist-info/WHEEL +5 -0
- evolutia-0.1.0.dist-info/entry_points.txt +2 -0
- evolutia-0.1.0.dist-info/licenses/LICENSE +201 -0
- evolutia-0.1.0.dist-info/top_level.txt +2 -0
- evolutia_cli.py +160 -0
|
@@ -0,0 +1,285 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Context Enricher: Enriquece prompts con contexto recuperado del RAG.
|
|
3
|
+
"""
|
|
4
|
+
import logging
|
|
5
|
+
from typing import Dict, List, Optional
|
|
6
|
+
|
|
7
|
+
logger = logging.getLogger(__name__)
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class ContextEnricher:
|
|
11
|
+
"""Enriquece prompts con contexto recuperado."""
|
|
12
|
+
|
|
13
|
+
def __init__(self, max_context_length: int = 3000):
|
|
14
|
+
"""
|
|
15
|
+
Inicializa el enricher.
|
|
16
|
+
|
|
17
|
+
Args:
|
|
18
|
+
max_context_length: Longitud máxima del contexto (en caracteres)
|
|
19
|
+
"""
|
|
20
|
+
self.max_context_length = max_context_length
|
|
21
|
+
|
|
22
|
+
def enrich_with_similar_exercises(self, similar_exercises: List[Dict],
|
|
23
|
+
max_examples: int = 3) -> str:
|
|
24
|
+
"""
|
|
25
|
+
Formatea ejercicios similares para incluir en el prompt.
|
|
26
|
+
|
|
27
|
+
Args:
|
|
28
|
+
similar_exercises: Lista de ejercicios similares recuperados
|
|
29
|
+
max_examples: Número máximo de ejemplos a incluir
|
|
30
|
+
|
|
31
|
+
Returns:
|
|
32
|
+
Texto formateado con ejercicios similares
|
|
33
|
+
"""
|
|
34
|
+
if not similar_exercises:
|
|
35
|
+
return ""
|
|
36
|
+
|
|
37
|
+
# Ordenar por similitud y tomar los mejores
|
|
38
|
+
sorted_exercises = sorted(
|
|
39
|
+
similar_exercises,
|
|
40
|
+
key=lambda x: x.get('similarity', 0),
|
|
41
|
+
reverse=True
|
|
42
|
+
)[:max_examples]
|
|
43
|
+
|
|
44
|
+
context = "EJERCICIOS SIMILARES DEL CURSO (para referencia de estilo y nivel):\n\n"
|
|
45
|
+
|
|
46
|
+
for i, exercise in enumerate(sorted_exercises, 1):
|
|
47
|
+
content = exercise.get('content', '')
|
|
48
|
+
metadata = exercise.get('metadata', {})
|
|
49
|
+
similarity = exercise.get('similarity', 0)
|
|
50
|
+
|
|
51
|
+
# Extraer solo el enunciado si es muy largo
|
|
52
|
+
if len(content) > 500:
|
|
53
|
+
# Intentar encontrar donde termina el enunciado
|
|
54
|
+
parts = content.split('\n\n')
|
|
55
|
+
if len(parts) > 1:
|
|
56
|
+
content = parts[0] # Solo el enunciado
|
|
57
|
+
|
|
58
|
+
context += f"Ejemplo {i} (similitud: {similarity:.2f}):\n"
|
|
59
|
+
context += f"{content[:400]}\n\n"
|
|
60
|
+
|
|
61
|
+
return context.strip()
|
|
62
|
+
|
|
63
|
+
def enrich_with_related_concepts(self, related_docs: List[Dict],
|
|
64
|
+
concepts: List[str]) -> str:
|
|
65
|
+
"""
|
|
66
|
+
Formatea documentos relacionados con conceptos.
|
|
67
|
+
|
|
68
|
+
Args:
|
|
69
|
+
related_docs: Documentos relacionados recuperados
|
|
70
|
+
concepts: Lista de conceptos buscados
|
|
71
|
+
|
|
72
|
+
Returns:
|
|
73
|
+
Texto formateado con conceptos relacionados
|
|
74
|
+
"""
|
|
75
|
+
if not related_docs:
|
|
76
|
+
return ""
|
|
77
|
+
|
|
78
|
+
context = f"CONTEXTO TEÓRICO RELACIONADO CON LOS CONCEPTOS: {', '.join(concepts)}\n\n"
|
|
79
|
+
|
|
80
|
+
# Agrupar por tipo
|
|
81
|
+
exercises = [d for d in related_docs if d.get('metadata', {}).get('type') == 'exercise']
|
|
82
|
+
readings = [d for d in related_docs if d.get('metadata', {}).get('type') == 'reading']
|
|
83
|
+
|
|
84
|
+
if readings:
|
|
85
|
+
context += "Información de lecturas:\n"
|
|
86
|
+
for reading in readings[:2]: # Máximo 2 chunks de lectura
|
|
87
|
+
content = reading.get('content', '')
|
|
88
|
+
context += f"- {content[:300]}...\n\n"
|
|
89
|
+
|
|
90
|
+
if exercises:
|
|
91
|
+
context += "Ejercicios relacionados:\n"
|
|
92
|
+
for exercise in exercises[:2]: # Máximo 2 ejercicios
|
|
93
|
+
content = exercise.get('content', '')
|
|
94
|
+
# Solo el enunciado
|
|
95
|
+
if 'EJERCICIO:' in content:
|
|
96
|
+
content = content.split('SOLUCIÓN:')[0] if 'SOLUCIÓN:' in content else content
|
|
97
|
+
context += f"- {content[:300]}...\n\n"
|
|
98
|
+
|
|
99
|
+
return context.strip()
|
|
100
|
+
|
|
101
|
+
def enrich_with_complexity_examples(self, complexity_examples: List[Dict]) -> str:
|
|
102
|
+
"""
|
|
103
|
+
Formatea ejemplos de ejercicios con complejidad similar.
|
|
104
|
+
|
|
105
|
+
Args:
|
|
106
|
+
complexity_examples: Ejercicios con complejidad similar
|
|
107
|
+
|
|
108
|
+
Returns:
|
|
109
|
+
Texto formateado
|
|
110
|
+
"""
|
|
111
|
+
if not complexity_examples:
|
|
112
|
+
return ""
|
|
113
|
+
|
|
114
|
+
context = "EJERCICIOS CON COMPLEJIDAD SIMILAR (para referencia de nivel):\n\n"
|
|
115
|
+
|
|
116
|
+
for i, example in enumerate(complexity_examples[:2], 1): # Máximo 2 ejemplos
|
|
117
|
+
content = example.get('content', '')
|
|
118
|
+
metadata = example.get('metadata', {})
|
|
119
|
+
complexity = metadata.get('complexity', 'N/A')
|
|
120
|
+
|
|
121
|
+
# Solo el enunciado
|
|
122
|
+
if 'EJERCICIO:' in content:
|
|
123
|
+
content = content.split('SOLUCIÓN:')[0] if 'SOLUCIÓN:' in content else content
|
|
124
|
+
|
|
125
|
+
context += f"Ejemplo {i} (complejidad: {complexity}):\n"
|
|
126
|
+
context += f"{content[:300]}...\n\n"
|
|
127
|
+
|
|
128
|
+
return context.strip()
|
|
129
|
+
|
|
130
|
+
def create_enriched_prompt(self, original_prompt: str, exercise: Dict,
|
|
131
|
+
analysis: Dict, retriever_results: Dict) -> str:
|
|
132
|
+
"""
|
|
133
|
+
Crea un prompt enriquecido con todo el contexto recuperado.
|
|
134
|
+
|
|
135
|
+
Args:
|
|
136
|
+
original_prompt: Prompt original
|
|
137
|
+
exercise: Ejercicio original
|
|
138
|
+
analysis: Análisis del ejercicio
|
|
139
|
+
retriever_results: Resultados del retriever con claves:
|
|
140
|
+
- similar_exercises: Lista de ejercicios similares
|
|
141
|
+
- related_concepts: Lista de documentos relacionados
|
|
142
|
+
- reading_context: Lista de chunks de lectura
|
|
143
|
+
- complexity_examples: Lista de ejercicios con complejidad similar
|
|
144
|
+
|
|
145
|
+
Returns:
|
|
146
|
+
Prompt enriquecido
|
|
147
|
+
"""
|
|
148
|
+
enriched_parts = []
|
|
149
|
+
|
|
150
|
+
# Agregar ejercicios similares
|
|
151
|
+
similar = retriever_results.get('similar_exercises', [])
|
|
152
|
+
if similar:
|
|
153
|
+
similar_context = self.enrich_with_similar_exercises(similar)
|
|
154
|
+
if similar_context:
|
|
155
|
+
enriched_parts.append(similar_context)
|
|
156
|
+
|
|
157
|
+
# Agregar conceptos relacionados
|
|
158
|
+
concepts = analysis.get('concepts', [])
|
|
159
|
+
related = retriever_results.get('related_concepts', [])
|
|
160
|
+
if related and concepts:
|
|
161
|
+
concepts_context = self.enrich_with_related_concepts(related, concepts)
|
|
162
|
+
if concepts_context:
|
|
163
|
+
enriched_parts.append(concepts_context)
|
|
164
|
+
|
|
165
|
+
# Agregar contexto de lecturas
|
|
166
|
+
readings = retriever_results.get('reading_context', [])
|
|
167
|
+
if readings:
|
|
168
|
+
reading_context = "CONTEXTO DE LECTURAS RELACIONADAS:\n\n"
|
|
169
|
+
for reading in readings[:2]:
|
|
170
|
+
content = reading.get('content', '')
|
|
171
|
+
reading_context += f"- {content[:400]}...\n\n"
|
|
172
|
+
enriched_parts.append(reading_context.strip())
|
|
173
|
+
|
|
174
|
+
# Agregar ejemplos de complejidad
|
|
175
|
+
complexity_examples = retriever_results.get('complexity_examples', [])
|
|
176
|
+
if complexity_examples:
|
|
177
|
+
complexity_context = self.enrich_with_complexity_examples(complexity_examples)
|
|
178
|
+
if complexity_context:
|
|
179
|
+
enriched_parts.append(complexity_context)
|
|
180
|
+
|
|
181
|
+
# Combinar todo
|
|
182
|
+
if not enriched_parts:
|
|
183
|
+
return original_prompt
|
|
184
|
+
|
|
185
|
+
# Insertar contexto antes de las instrucciones
|
|
186
|
+
context_section = "\n\n" + "="*80 + "\n"
|
|
187
|
+
context_section += "CONTEXTO ADICIONAL DEL CURSO:\n"
|
|
188
|
+
context_section += "="*80 + "\n\n"
|
|
189
|
+
context_section += "\n\n---\n\n".join(enriched_parts)
|
|
190
|
+
context_section += "\n\n" + "="*80 + "\n"
|
|
191
|
+
|
|
192
|
+
# Insertar después del análisis pero antes de las instrucciones
|
|
193
|
+
insertion_point = original_prompt.find("INSTRUCCIONES PARA LA VARIACIÓN:")
|
|
194
|
+
if insertion_point > 0:
|
|
195
|
+
enriched_prompt = (
|
|
196
|
+
original_prompt[:insertion_point] +
|
|
197
|
+
context_section +
|
|
198
|
+
original_prompt[insertion_point:]
|
|
199
|
+
)
|
|
200
|
+
else:
|
|
201
|
+
# Si no encontramos el punto de inserción, agregar al final
|
|
202
|
+
enriched_prompt = original_prompt + "\n\n" + context_section
|
|
203
|
+
|
|
204
|
+
# Limitar longitud total
|
|
205
|
+
if len(enriched_prompt) > self.max_context_length:
|
|
206
|
+
logger.warning(f"Prompt enriquecido muy largo ({len(enriched_prompt)} chars), truncando...")
|
|
207
|
+
# Mantener el prompt original y truncar solo el contexto
|
|
208
|
+
original_length = len(original_prompt)
|
|
209
|
+
max_context = self.max_context_length - original_length - 100
|
|
210
|
+
if max_context > 0:
|
|
211
|
+
context_section = context_section[:max_context] + "\n\n[Contexto truncado...]"
|
|
212
|
+
insertion_point = original_prompt.find("INSTRUCCIONES PARA LA VARIACIÓN:")
|
|
213
|
+
enriched_prompt = (
|
|
214
|
+
original_prompt[:insertion_point] +
|
|
215
|
+
context_section +
|
|
216
|
+
original_prompt[insertion_point:]
|
|
217
|
+
)
|
|
218
|
+
else:
|
|
219
|
+
# Si no hay espacio, usar prompt original
|
|
220
|
+
enriched_prompt = original_prompt
|
|
221
|
+
|
|
222
|
+
return enriched_prompt
|
|
223
|
+
|
|
224
|
+
def format_for_consistency_check(self, similar_exercises: List[Dict]) -> str:
|
|
225
|
+
"""
|
|
226
|
+
Formatea ejercicios similares para validación de consistencia.
|
|
227
|
+
|
|
228
|
+
Args:
|
|
229
|
+
similar_exercises: Ejercicios similares del curso
|
|
230
|
+
|
|
231
|
+
Returns:
|
|
232
|
+
Texto formateado para comparación
|
|
233
|
+
"""
|
|
234
|
+
if not similar_exercises:
|
|
235
|
+
return "No hay ejercicios similares para comparar."
|
|
236
|
+
|
|
237
|
+
formatted = "EJERCICIOS SIMILARES DEL CURSO PARA COMPARACIÓN:\n\n"
|
|
238
|
+
|
|
239
|
+
for i, exercise in enumerate(similar_exercises[:5], 1): # Top 5
|
|
240
|
+
content = exercise.get('content', '')
|
|
241
|
+
metadata = exercise.get('metadata', {})
|
|
242
|
+
similarity = exercise.get('similarity', 0)
|
|
243
|
+
|
|
244
|
+
# Extraer solo enunciado
|
|
245
|
+
if 'EJERCICIO:' in content:
|
|
246
|
+
content = content.split('SOLUCIÓN:')[0] if 'SOLUCIÓN:' in content else content
|
|
247
|
+
|
|
248
|
+
formatted += f"{i}. Similitud: {similarity:.2f}\n"
|
|
249
|
+
formatted += f" Complejidad: {metadata.get('complexity', 'N/A')}\n"
|
|
250
|
+
formatted += f" Conceptos: {metadata.get('concepts', 'N/A')}\n"
|
|
251
|
+
formatted += f" Enunciado: {content[:200]}...\n\n"
|
|
252
|
+
|
|
253
|
+
return formatted
|
|
254
|
+
|
|
255
|
+
def format_context_dict(self, context: Dict) -> str:
|
|
256
|
+
"""
|
|
257
|
+
Formatea un diccionario de contexto completo en una cadena.
|
|
258
|
+
|
|
259
|
+
Args:
|
|
260
|
+
context: Diccionario con claves como 'reading_context', 'related_exercises', etc.
|
|
261
|
+
|
|
262
|
+
Returns:
|
|
263
|
+
Texto formateado concatenando todas las secciones disponibles.
|
|
264
|
+
"""
|
|
265
|
+
parts = []
|
|
266
|
+
|
|
267
|
+
# 1. Contexto de lecturas
|
|
268
|
+
readings = context.get('reading_context', [])
|
|
269
|
+
if readings:
|
|
270
|
+
reading_text = "MATERIAL DE LECTURA Y TEORÍA:\n\n"
|
|
271
|
+
for reading in readings[:3]:
|
|
272
|
+
content = reading.get('content', '')
|
|
273
|
+
reading_text += f"- {content[:500]}...\n\n"
|
|
274
|
+
parts.append(reading_text)
|
|
275
|
+
|
|
276
|
+
# 2. Ejercicios relacionados
|
|
277
|
+
related = context.get('related_exercises', [])
|
|
278
|
+
if related:
|
|
279
|
+
# Reutilizamos la visualización de ejercicios similares
|
|
280
|
+
exercises_text = self.enrich_with_similar_exercises(related, max_examples=3)
|
|
281
|
+
if exercises_text:
|
|
282
|
+
parts.append(exercises_text)
|
|
283
|
+
|
|
284
|
+
return "\n\n".join(parts)
|
|
285
|
+
|
|
@@ -0,0 +1,349 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Enhanced Variation Generator: Genera variaciones usando RAG.
|
|
3
|
+
"""
|
|
4
|
+
import logging
|
|
5
|
+
import os
|
|
6
|
+
from typing import Dict, Optional
|
|
7
|
+
import google.generativeai as genai
|
|
8
|
+
|
|
9
|
+
try:
|
|
10
|
+
from ..variation_generator import VariationGenerator
|
|
11
|
+
except ImportError:
|
|
12
|
+
# Fallback for standalone execution tests (though discouraged in package)
|
|
13
|
+
import sys
|
|
14
|
+
from pathlib import Path
|
|
15
|
+
sys.path.append(str(Path(__file__).parent.parent))
|
|
16
|
+
from variation_generator import VariationGenerator
|
|
17
|
+
|
|
18
|
+
try:
|
|
19
|
+
from .rag_retriever import RAGRetriever
|
|
20
|
+
from .context_enricher import ContextEnricher
|
|
21
|
+
except ImportError:
|
|
22
|
+
from rag_retriever import RAGRetriever
|
|
23
|
+
from context_enricher import ContextEnricher
|
|
24
|
+
|
|
25
|
+
logger = logging.getLogger(__name__)
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class EnhancedVariationGenerator(VariationGenerator):
|
|
29
|
+
"""Genera variaciones usando RAG para enriquecer el contexto."""
|
|
30
|
+
|
|
31
|
+
def __init__(self, api_provider: str = "openai", retriever: RAGRetriever = None,
|
|
32
|
+
context_enricher: ContextEnricher = None):
|
|
33
|
+
"""
|
|
34
|
+
Inicializa el generador mejorado.
|
|
35
|
+
|
|
36
|
+
Args:
|
|
37
|
+
api_provider: Proveedor de API ('openai' o 'anthropic')
|
|
38
|
+
retriever: Instancia de RAGRetriever
|
|
39
|
+
context_enricher: Instancia de ContextEnricher
|
|
40
|
+
"""
|
|
41
|
+
super().__init__(api_provider)
|
|
42
|
+
self.retriever = retriever
|
|
43
|
+
self.context_enricher = context_enricher or ContextEnricher()
|
|
44
|
+
|
|
45
|
+
# Configurar Gemini si es necesario
|
|
46
|
+
if self.api_provider == 'gemini':
|
|
47
|
+
api_key = os.getenv("GOOGLE_API_KEY")
|
|
48
|
+
if not api_key:
|
|
49
|
+
logger.warning("GOOGLE_API_KEY no encontrada en variables de entorno")
|
|
50
|
+
else:
|
|
51
|
+
genai.configure(api_key=api_key)
|
|
52
|
+
|
|
53
|
+
def _retrieve_context(self, exercise: Dict, analysis: Dict) -> Dict:
|
|
54
|
+
"""
|
|
55
|
+
Recupera contexto relevante usando RAG.
|
|
56
|
+
|
|
57
|
+
Args:
|
|
58
|
+
exercise: Información del ejercicio original
|
|
59
|
+
analysis: Análisis de complejidad
|
|
60
|
+
|
|
61
|
+
Returns:
|
|
62
|
+
Diccionario con contexto recuperado
|
|
63
|
+
"""
|
|
64
|
+
if not self.retriever:
|
|
65
|
+
return {}
|
|
66
|
+
|
|
67
|
+
context = {}
|
|
68
|
+
|
|
69
|
+
try:
|
|
70
|
+
# Buscar ejercicios similares
|
|
71
|
+
exercise_content = exercise.get('content', '')
|
|
72
|
+
similar = self.retriever.retrieve_similar_exercises(
|
|
73
|
+
exercise_content,
|
|
74
|
+
exclude_label=exercise.get('label'),
|
|
75
|
+
top_k=5
|
|
76
|
+
)
|
|
77
|
+
context['similar_exercises'] = similar
|
|
78
|
+
|
|
79
|
+
# Buscar conceptos relacionados
|
|
80
|
+
concepts = analysis.get('concepts', [])
|
|
81
|
+
if concepts:
|
|
82
|
+
related = self.retriever.retrieve_related_concepts(concepts, top_k=3)
|
|
83
|
+
context['related_concepts'] = related
|
|
84
|
+
|
|
85
|
+
# Buscar contexto de lecturas
|
|
86
|
+
topic = exercise.get('source_file', {}).name if hasattr(exercise.get('source_file'), 'name') else ''
|
|
87
|
+
if topic:
|
|
88
|
+
reading_context = self.retriever.retrieve_reading_context(topic, top_k=2)
|
|
89
|
+
context['reading_context'] = reading_context
|
|
90
|
+
|
|
91
|
+
# Buscar ejercicios con complejidad similar (para referencia)
|
|
92
|
+
target_complexity = analysis.get('total_complexity', 0)
|
|
93
|
+
if target_complexity > 0:
|
|
94
|
+
complexity_examples = self.retriever.retrieve_by_complexity(
|
|
95
|
+
target_complexity,
|
|
96
|
+
tolerance=0.3,
|
|
97
|
+
top_k=3
|
|
98
|
+
)
|
|
99
|
+
context['complexity_examples'] = complexity_examples
|
|
100
|
+
|
|
101
|
+
except Exception as e:
|
|
102
|
+
logger.warning(f"Error recuperando contexto RAG: {e}")
|
|
103
|
+
context = {}
|
|
104
|
+
|
|
105
|
+
return context
|
|
106
|
+
|
|
107
|
+
def _create_prompt(self, exercise: Dict, analysis: Dict, context: Dict = None) -> str:
|
|
108
|
+
"""
|
|
109
|
+
Crea el prompt enriquecido con contexto RAG.
|
|
110
|
+
|
|
111
|
+
Args:
|
|
112
|
+
exercise: Información del ejercicio original
|
|
113
|
+
analysis: Análisis de complejidad del ejercicio
|
|
114
|
+
context: Contexto RAG opcional (para evitar re-búsqueda)
|
|
115
|
+
|
|
116
|
+
Returns:
|
|
117
|
+
Prompt enriquecido
|
|
118
|
+
"""
|
|
119
|
+
# Crear prompt base usando el método del padre
|
|
120
|
+
base_prompt = super()._create_prompt(exercise, analysis)
|
|
121
|
+
|
|
122
|
+
# Si no hay retriever, usar prompt base
|
|
123
|
+
if not self.retriever:
|
|
124
|
+
return base_prompt
|
|
125
|
+
|
|
126
|
+
# Recuperar contexto si no se proporciona
|
|
127
|
+
if context is None:
|
|
128
|
+
context = self._retrieve_context(exercise, analysis)
|
|
129
|
+
|
|
130
|
+
# Enriquecer prompt con contexto
|
|
131
|
+
enriched_prompt = self.context_enricher.create_enriched_prompt(
|
|
132
|
+
base_prompt,
|
|
133
|
+
exercise,
|
|
134
|
+
analysis,
|
|
135
|
+
context
|
|
136
|
+
)
|
|
137
|
+
|
|
138
|
+
return enriched_prompt
|
|
139
|
+
|
|
140
|
+
def generate_variation(self, exercise: Dict, analysis: Dict, exercise_type: str = "development") -> Optional[Dict]:
|
|
141
|
+
"""
|
|
142
|
+
Genera una variación de un ejercicio existente.
|
|
143
|
+
Permite generar variaciones de desarrollo o convertir a quiz conceptual.
|
|
144
|
+
"""
|
|
145
|
+
# 1. Recuperar contexto RAG si aplica
|
|
146
|
+
context = self._retrieve_context(exercise, analysis)
|
|
147
|
+
|
|
148
|
+
# 2. Construir prompt según tipo
|
|
149
|
+
if exercise_type == 'multiple_choice':
|
|
150
|
+
# Enriquecer contexto para string
|
|
151
|
+
context_str = self.context_enricher.format_context_dict(context)
|
|
152
|
+
|
|
153
|
+
# Para quiz, usamos el contenido del ejercicio como base
|
|
154
|
+
context_info = {
|
|
155
|
+
'content': f"Ejercicio Base:\n{exercise.get('content')}\n\nSolución Base:\n{(exercise.get('solution') or '')[:500]}...\n\nContexto Adicional:\n{context_str}"
|
|
156
|
+
}
|
|
157
|
+
prompt = self._create_quiz_prompt(context_info)
|
|
158
|
+
else:
|
|
159
|
+
# Flujo normal de variación desarrollo (llamando a lógica padre modificada o directa)
|
|
160
|
+
# Pasamos el contexto ya recuperado a _create_prompt
|
|
161
|
+
prompt = self._create_prompt(exercise, analysis, context=context)
|
|
162
|
+
|
|
163
|
+
# 3. Get Provider
|
|
164
|
+
provider = self._get_provider()
|
|
165
|
+
if not provider: return None
|
|
166
|
+
|
|
167
|
+
# 4. Generar variación
|
|
168
|
+
content = provider.generate_content(prompt, system_prompt="Eres un experto en métodos matemáticos para física e ingeniería.")
|
|
169
|
+
|
|
170
|
+
if not content:
|
|
171
|
+
return None
|
|
172
|
+
|
|
173
|
+
# 5. Parsear respuesta
|
|
174
|
+
variation_content = ""
|
|
175
|
+
variation_solution = ""
|
|
176
|
+
|
|
177
|
+
if exercise_type == 'multiple_choice':
|
|
178
|
+
data = extract_and_parse_json(content)
|
|
179
|
+
|
|
180
|
+
if data and 'question' in data and 'options' in data:
|
|
181
|
+
variation_content = f"{data['question']}\n\n"
|
|
182
|
+
for opt, text in data['options'].items():
|
|
183
|
+
variation_content += f"- **{opt})** {text}\n"
|
|
184
|
+
|
|
185
|
+
variation_solution = f"**Respuesta Correcta: {data.get('correct_option', '?')}**\n\n{data.get('explanation', '')}"
|
|
186
|
+
else:
|
|
187
|
+
logger.warning("No se pudo parsear el JSON del quiz (enhanced), usando contenido raw")
|
|
188
|
+
variation_content = content
|
|
189
|
+
else:
|
|
190
|
+
variation_content = content
|
|
191
|
+
variation_solution = "Solución pendiente..."
|
|
192
|
+
|
|
193
|
+
# Intento de mejora de parsing standard si el modelo siguio instrucciones
|
|
194
|
+
parts = content.split("SOLUCIÓN REQUERIDA:")
|
|
195
|
+
if len(parts) == 2:
|
|
196
|
+
# Si el modelo siguió las instrucciones de separar con esa marca (no siempre garantizado en simple variation)
|
|
197
|
+
pass
|
|
198
|
+
|
|
199
|
+
variation = {
|
|
200
|
+
'variation_content': variation_content,
|
|
201
|
+
'variation_solution': variation_solution,
|
|
202
|
+
'original_frontmatter': exercise.get('frontmatter', {}),
|
|
203
|
+
'original_label': exercise.get('label'),
|
|
204
|
+
'type': exercise_type
|
|
205
|
+
}
|
|
206
|
+
|
|
207
|
+
if self.retriever and context:
|
|
208
|
+
variation['rag_context'] = {
|
|
209
|
+
'similar_exercises_count': len(context.get('similar_exercises', [])),
|
|
210
|
+
'related_concepts_count': len(context.get('related_concepts', [])),
|
|
211
|
+
'reading_context_count': len(context.get('reading_context', []))
|
|
212
|
+
}
|
|
213
|
+
# Extraer references de similar_exercises y reading_context
|
|
214
|
+
refs = []
|
|
215
|
+
for ex in context.get('similar_exercises', []):
|
|
216
|
+
# Prefer label from metadata, fallback to id
|
|
217
|
+
ref_label = ex.get('metadata', {}).get('label') or ex.get('id')
|
|
218
|
+
if ref_label: refs.append(ref_label)
|
|
219
|
+
|
|
220
|
+
for reading in context.get('reading_context', []):
|
|
221
|
+
# Reading may not have label, use id or source
|
|
222
|
+
ref_src = reading.get('metadata', {}).get('source') or reading.get('id')
|
|
223
|
+
if ref_src: refs.append(ref_src)
|
|
224
|
+
|
|
225
|
+
if refs:
|
|
226
|
+
variation['rag_references'] = refs
|
|
227
|
+
|
|
228
|
+
return variation
|
|
229
|
+
|
|
230
|
+
def generate_variation_with_solution(self, exercise: Dict, analysis: Dict) -> Optional[Dict]:
|
|
231
|
+
"""
|
|
232
|
+
Genera una variación con su solución usando RAG.
|
|
233
|
+
"""
|
|
234
|
+
# Generar variación (ya usa RAG)
|
|
235
|
+
variation = self.generate_variation(exercise, analysis)
|
|
236
|
+
|
|
237
|
+
if not variation:
|
|
238
|
+
return None
|
|
239
|
+
|
|
240
|
+
provider = self._get_provider()
|
|
241
|
+
if not provider: return None
|
|
242
|
+
|
|
243
|
+
# Generar solución (usar método del padre)
|
|
244
|
+
solution_prompt = f"""Eres un experto en métodos matemáticos para física e ingeniería. Resuelve el siguiente ejercicio paso a paso, mostrando todos los cálculos y procedimientos.
|
|
245
|
+
|
|
246
|
+
EJERCICIO:
|
|
247
|
+
{variation['variation_content']}
|
|
248
|
+
|
|
249
|
+
INSTRUCCIONES:
|
|
250
|
+
1. Resuelve el ejercicio de forma completa y detallada
|
|
251
|
+
2. Muestra todos los pasos intermedios
|
|
252
|
+
3. Usa notación matemática LaTeX correcta
|
|
253
|
+
4. Explica el razonamiento cuando sea necesario
|
|
254
|
+
5. Usa bloques :::{{math}} para ecuaciones display y $...$ para inline
|
|
255
|
+
6. Escribe en español
|
|
256
|
+
|
|
257
|
+
GENERA LA SOLUCIÓN COMPLETA:"""
|
|
258
|
+
|
|
259
|
+
solution_content = provider.generate_content(solution_prompt)
|
|
260
|
+
|
|
261
|
+
if solution_content:
|
|
262
|
+
variation['variation_solution'] = solution_content
|
|
263
|
+
|
|
264
|
+
return variation
|
|
265
|
+
|
|
266
|
+
def generate_new_exercise_from_topic(self, topic: str, tags: list = None, difficulty: str = "alta", exercise_type: str = "development") -> Optional[Dict]:
|
|
267
|
+
"""
|
|
268
|
+
Genera un ejercicio nuevo desde cero basado en un tema y tags.
|
|
269
|
+
"""
|
|
270
|
+
if not self.retriever:
|
|
271
|
+
logger.info("Generando sin contexto RAG (retriever no disponible)")
|
|
272
|
+
# Continuar sin contexto
|
|
273
|
+
|
|
274
|
+
tags = tags or []
|
|
275
|
+
context = {}
|
|
276
|
+
|
|
277
|
+
# Normalizar topic para manejar lista o string
|
|
278
|
+
if isinstance(topic, list):
|
|
279
|
+
topic_str = ", ".join(topic)
|
|
280
|
+
topic_list = topic
|
|
281
|
+
else:
|
|
282
|
+
topic_str = topic
|
|
283
|
+
topic_list = [topic]
|
|
284
|
+
|
|
285
|
+
if self.retriever:
|
|
286
|
+
# 1. Recuperar contexto teórico
|
|
287
|
+
reading_context = self.retriever.retrieve_reading_context(topic_str, top_k=3)
|
|
288
|
+
context['reading_context'] = reading_context
|
|
289
|
+
|
|
290
|
+
# 2. Recuperar ejercicios relacionados para estilo
|
|
291
|
+
search_terms = tags + topic_list
|
|
292
|
+
related_exercises = self.retriever.retrieve_related_concepts(search_terms, top_k=3)
|
|
293
|
+
context['related_exercises'] = related_exercises
|
|
294
|
+
|
|
295
|
+
# 3. Construir prompt
|
|
296
|
+
if exercise_type == 'multiple_choice':
|
|
297
|
+
# Preparar info para el prompt de quiz
|
|
298
|
+
context_info = {
|
|
299
|
+
'content': f"Tema: {topic_str}\nTags: {', '.join(tags)}\nDificultad: {difficulty}\nContexto: {str(context)}"
|
|
300
|
+
}
|
|
301
|
+
prompt = self._create_quiz_prompt(context_info)
|
|
302
|
+
else:
|
|
303
|
+
prompt = self._create_new_exercise_prompt(topic_str, tags, context, difficulty) # Use topic_str
|
|
304
|
+
|
|
305
|
+
# 4. Get Provider and Generate
|
|
306
|
+
provider = self._get_provider()
|
|
307
|
+
if not provider: return None
|
|
308
|
+
|
|
309
|
+
content = provider.generate_content(prompt)
|
|
310
|
+
|
|
311
|
+
if not content:
|
|
312
|
+
return None
|
|
313
|
+
|
|
314
|
+
# 5. Parsear respuesta
|
|
315
|
+
exercise_text = ""
|
|
316
|
+
solution_text = ""
|
|
317
|
+
|
|
318
|
+
if exercise_type == 'multiple_choice':
|
|
319
|
+
data = extract_and_parse_json(content)
|
|
320
|
+
|
|
321
|
+
if data and 'question' in data:
|
|
322
|
+
exercise_text = f"{data['question']}\n\n"
|
|
323
|
+
for opt, text in data.get('options', {}).items():
|
|
324
|
+
exercise_text += f"- **{opt})** {text}\n"
|
|
325
|
+
solution_text = f"**Respuesta Correcta: {data.get('correct_option', '?')}**\n\n{data.get('explanation', '')}"
|
|
326
|
+
else:
|
|
327
|
+
logger.error("No se pudo parsear JSON de quiz nuevo")
|
|
328
|
+
exercise_text = content
|
|
329
|
+
solution_text = "Verificar formato generado."
|
|
330
|
+
else:
|
|
331
|
+
# Parseo normal de ejercicio de desarrollo
|
|
332
|
+
parts = content.split("SOLUCIÓN REQUERIDA:")
|
|
333
|
+
if len(parts) == 2:
|
|
334
|
+
exercise_text = parts[0].replace("EJERCICIO NUEVO:", "").strip()
|
|
335
|
+
solution_text = parts[1].strip()
|
|
336
|
+
else:
|
|
337
|
+
exercise_text = content
|
|
338
|
+
solution_text = ""
|
|
339
|
+
|
|
340
|
+
return {
|
|
341
|
+
'variation_content': exercise_text,
|
|
342
|
+
'variation_solution': solution_text,
|
|
343
|
+
'original_frontmatter': {
|
|
344
|
+
'subject': topic_str,
|
|
345
|
+
'tags': tags,
|
|
346
|
+
'complexity': difficulty,
|
|
347
|
+
'type': exercise_type
|
|
348
|
+
}
|
|
349
|
+
}
|