evolutia 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- evolutia/__init__.py +5 -0
- evolutia/complexity_validator.py +179 -0
- evolutia/config_manager.py +208 -0
- evolutia/evolutia_engine.py +284 -0
- evolutia/exam_generator.py +328 -0
- evolutia/exercise_analyzer.py +256 -0
- evolutia/llm_providers.py +217 -0
- evolutia/material_extractor.py +237 -0
- evolutia/rag/__init__.py +6 -0
- evolutia/rag/consistency_validator.py +200 -0
- evolutia/rag/context_enricher.py +285 -0
- evolutia/rag/enhanced_variation_generator.py +349 -0
- evolutia/rag/rag_indexer.py +424 -0
- evolutia/rag/rag_manager.py +221 -0
- evolutia/rag/rag_retriever.py +366 -0
- evolutia/utils/__init__.py +4 -0
- evolutia/utils/json_parser.py +69 -0
- evolutia/utils/markdown_parser.py +160 -0
- evolutia/utils/math_extractor.py +144 -0
- evolutia/variation_generator.py +97 -0
- evolutia-0.1.0.dist-info/METADATA +723 -0
- evolutia-0.1.0.dist-info/RECORD +27 -0
- evolutia-0.1.0.dist-info/WHEEL +5 -0
- evolutia-0.1.0.dist-info/entry_points.txt +2 -0
- evolutia-0.1.0.dist-info/licenses/LICENSE +201 -0
- evolutia-0.1.0.dist-info/top_level.txt +2 -0
- evolutia_cli.py +160 -0
|
@@ -0,0 +1,366 @@
|
|
|
1
|
+
"""
|
|
2
|
+
RAG Retriever: Busca información relevante del vector store.
|
|
3
|
+
"""
|
|
4
|
+
import logging
|
|
5
|
+
from typing import Dict, List, Optional, Any
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
|
|
8
|
+
try:
|
|
9
|
+
import chromadb
|
|
10
|
+
from chromadb.config import Settings
|
|
11
|
+
CHROMADB_AVAILABLE = True
|
|
12
|
+
except ImportError:
|
|
13
|
+
CHROMADB_AVAILABLE = False
|
|
14
|
+
|
|
15
|
+
try:
|
|
16
|
+
from openai import OpenAI
|
|
17
|
+
OPENAI_AVAILABLE = True
|
|
18
|
+
except ImportError:
|
|
19
|
+
OPENAI_AVAILABLE = False
|
|
20
|
+
|
|
21
|
+
try:
|
|
22
|
+
from sentence_transformers import SentenceTransformer
|
|
23
|
+
SENTENCE_TRANSFORMERS_AVAILABLE = True
|
|
24
|
+
except ImportError:
|
|
25
|
+
SENTENCE_TRANSFORMERS_AVAILABLE = False
|
|
26
|
+
|
|
27
|
+
import os
|
|
28
|
+
from dotenv import load_dotenv
|
|
29
|
+
|
|
30
|
+
load_dotenv()
|
|
31
|
+
|
|
32
|
+
logger = logging.getLogger(__name__)
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
class RAGRetriever:
|
|
36
|
+
"""Recupera información relevante del vector store."""
|
|
37
|
+
|
|
38
|
+
def __init__(self, config: Dict[str, Any], base_path: Path, chroma_client=None):
|
|
39
|
+
"""
|
|
40
|
+
Inicializa el retriever.
|
|
41
|
+
|
|
42
|
+
Args:
|
|
43
|
+
config: Configuración de RAG desde config.yaml
|
|
44
|
+
base_path: Ruta base del proyecto
|
|
45
|
+
chroma_client: Cliente ChromaDB compartido (opcional)
|
|
46
|
+
"""
|
|
47
|
+
self.config = config
|
|
48
|
+
self.base_path = Path(base_path)
|
|
49
|
+
self.embedding_provider = config.get('embeddings', {}).get('provider', 'openai')
|
|
50
|
+
self.chroma_client = chroma_client
|
|
51
|
+
self._setup_embeddings()
|
|
52
|
+
self._setup_vector_store()
|
|
53
|
+
|
|
54
|
+
def _setup_embeddings(self):
|
|
55
|
+
"""Configura el modelo de embeddings (debe coincidir con el indexer)."""
|
|
56
|
+
embeddings_config = self.config.get('embeddings', {})
|
|
57
|
+
provider = embeddings_config.get('provider', 'openai')
|
|
58
|
+
model_name = embeddings_config.get('model', 'text-embedding-3-small')
|
|
59
|
+
|
|
60
|
+
if provider == 'openai':
|
|
61
|
+
if not OPENAI_AVAILABLE:
|
|
62
|
+
raise ImportError("openai no está instalado")
|
|
63
|
+
|
|
64
|
+
api_key = os.getenv("OPENAI_API_KEY")
|
|
65
|
+
if not api_key:
|
|
66
|
+
raise ValueError("OPENAI_API_KEY no encontrada")
|
|
67
|
+
|
|
68
|
+
self.embedding_client = OpenAI(api_key=api_key)
|
|
69
|
+
self.embedding_model_name = model_name
|
|
70
|
+
|
|
71
|
+
elif provider == 'sentence-transformers':
|
|
72
|
+
if not SENTENCE_TRANSFORMERS_AVAILABLE:
|
|
73
|
+
raise ImportError("sentence-transformers no está instalado")
|
|
74
|
+
|
|
75
|
+
self.embedding_model = SentenceTransformer(model_name)
|
|
76
|
+
|
|
77
|
+
def _setup_vector_store(self):
|
|
78
|
+
"""Configura la conexión al vector store."""
|
|
79
|
+
if not CHROMADB_AVAILABLE:
|
|
80
|
+
raise ImportError("chromadb no está instalado")
|
|
81
|
+
|
|
82
|
+
vs_config = self.config.get('vector_store', {})
|
|
83
|
+
persist_dir = Path(vs_config.get('persist_directory', './storage/vector_store'))
|
|
84
|
+
collection_name = vs_config.get('collection_name', 'ejercicios_mmfi')
|
|
85
|
+
|
|
86
|
+
# Usar cliente compartido si está disponible, sino crear uno nuevo
|
|
87
|
+
if self.chroma_client is not None:
|
|
88
|
+
self.client = self.chroma_client
|
|
89
|
+
else:
|
|
90
|
+
self.client = chromadb.PersistentClient(
|
|
91
|
+
path=str(persist_dir.resolve()),
|
|
92
|
+
settings=Settings(anonymized_telemetry=False)
|
|
93
|
+
)
|
|
94
|
+
|
|
95
|
+
try:
|
|
96
|
+
self.collection = self.client.get_collection(name=collection_name)
|
|
97
|
+
except Exception as e:
|
|
98
|
+
raise ValueError(f"No se pudo cargar la colección {collection_name}. ¿Está indexado? Error: {e}")
|
|
99
|
+
|
|
100
|
+
def _generate_query_embedding(self, query: str) -> List[float]:
|
|
101
|
+
"""
|
|
102
|
+
Genera embedding para una consulta.
|
|
103
|
+
|
|
104
|
+
Args:
|
|
105
|
+
query: Texto de consulta
|
|
106
|
+
|
|
107
|
+
Returns:
|
|
108
|
+
Embedding del query
|
|
109
|
+
"""
|
|
110
|
+
if self.embedding_provider == 'openai':
|
|
111
|
+
response = self.embedding_client.embeddings.create(
|
|
112
|
+
model=self.embedding_model_name,
|
|
113
|
+
input=query
|
|
114
|
+
)
|
|
115
|
+
return response.data[0].embedding
|
|
116
|
+
|
|
117
|
+
elif self.embedding_provider == 'sentence-transformers':
|
|
118
|
+
return self.embedding_model.encode(query, show_progress_bar=False).tolist()
|
|
119
|
+
|
|
120
|
+
def retrieve_similar_exercises(self, exercise_content: str, top_k: int = 5,
|
|
121
|
+
exclude_label: Optional[str] = None,
|
|
122
|
+
min_complexity: Optional[float] = None,
|
|
123
|
+
max_complexity: Optional[float] = None) -> List[Dict]:
|
|
124
|
+
"""
|
|
125
|
+
Recupera ejercicios similares al contenido dado.
|
|
126
|
+
|
|
127
|
+
Args:
|
|
128
|
+
exercise_content: Contenido del ejercicio de referencia
|
|
129
|
+
top_k: Número de resultados a recuperar
|
|
130
|
+
exclude_label: Label del ejercicio a excluir (el original)
|
|
131
|
+
min_complexity: Complejidad mínima
|
|
132
|
+
max_complexity: Complejidad máxima
|
|
133
|
+
|
|
134
|
+
Returns:
|
|
135
|
+
Lista de ejercicios similares con sus metadatos
|
|
136
|
+
"""
|
|
137
|
+
retrieval_config = self.config.get('retrieval', {})
|
|
138
|
+
top_k = retrieval_config.get('top_k', top_k)
|
|
139
|
+
similarity_threshold = retrieval_config.get('similarity_threshold', 0.7)
|
|
140
|
+
|
|
141
|
+
# Generar embedding del query
|
|
142
|
+
query_embedding = self._generate_query_embedding(exercise_content)
|
|
143
|
+
|
|
144
|
+
# Construir filtros de metadatos usando sintaxis correcta de ChromaDB
|
|
145
|
+
conditions = [{'type': 'exercise'}]
|
|
146
|
+
|
|
147
|
+
if exclude_label:
|
|
148
|
+
conditions.append({'label': {'$ne': exclude_label}})
|
|
149
|
+
|
|
150
|
+
if min_complexity is not None and max_complexity is not None:
|
|
151
|
+
conditions.append({'complexity': {'$gte': float(min_complexity)}})
|
|
152
|
+
conditions.append({'complexity': {'$lte': float(max_complexity)}})
|
|
153
|
+
elif min_complexity is not None:
|
|
154
|
+
conditions.append({'complexity': {'$gte': float(min_complexity)}})
|
|
155
|
+
elif max_complexity is not None:
|
|
156
|
+
conditions.append({'complexity': {'$lte': float(max_complexity)}})
|
|
157
|
+
|
|
158
|
+
# Si hay múltiples condiciones, usar $and
|
|
159
|
+
if len(conditions) > 1:
|
|
160
|
+
where = {'$and': conditions}
|
|
161
|
+
elif len(conditions) == 1:
|
|
162
|
+
where = conditions[0]
|
|
163
|
+
else:
|
|
164
|
+
where = None
|
|
165
|
+
|
|
166
|
+
# Buscar en el vector store
|
|
167
|
+
results = self.collection.query(
|
|
168
|
+
query_embeddings=[query_embedding],
|
|
169
|
+
n_results=top_k * 2, # Buscar más para filtrar después
|
|
170
|
+
where=where
|
|
171
|
+
)
|
|
172
|
+
|
|
173
|
+
# Procesar resultados
|
|
174
|
+
similar_exercises = []
|
|
175
|
+
|
|
176
|
+
if results['ids'] and len(results['ids'][0]) > 0:
|
|
177
|
+
for i, (doc_id, doc, metadata, distance) in enumerate(zip(
|
|
178
|
+
results['ids'][0],
|
|
179
|
+
results['documents'][0],
|
|
180
|
+
results['metadatas'][0],
|
|
181
|
+
results['distances'][0]
|
|
182
|
+
)):
|
|
183
|
+
# Filtrar por umbral de similitud (distance es distancia, menor = más similar)
|
|
184
|
+
similarity = 1 - distance # Convertir distancia a similitud
|
|
185
|
+
|
|
186
|
+
if similarity >= similarity_threshold:
|
|
187
|
+
similar_exercises.append({
|
|
188
|
+
'id': doc_id,
|
|
189
|
+
'content': doc,
|
|
190
|
+
'metadata': metadata,
|
|
191
|
+
'similarity': similarity,
|
|
192
|
+
'distance': distance
|
|
193
|
+
})
|
|
194
|
+
|
|
195
|
+
if len(similar_exercises) >= top_k:
|
|
196
|
+
break
|
|
197
|
+
|
|
198
|
+
logger.info(f"Recuperados {len(similar_exercises)} ejercicios similares")
|
|
199
|
+
return similar_exercises
|
|
200
|
+
|
|
201
|
+
def retrieve_related_concepts(self, concepts: List[str], top_k: int = 3) -> List[Dict]:
|
|
202
|
+
"""
|
|
203
|
+
Recupera ejercicios o lecturas relacionados con conceptos específicos.
|
|
204
|
+
|
|
205
|
+
Args:
|
|
206
|
+
concepts: Lista de conceptos a buscar
|
|
207
|
+
top_k: Número de resultados por concepto
|
|
208
|
+
|
|
209
|
+
Returns:
|
|
210
|
+
Lista de documentos relacionados
|
|
211
|
+
"""
|
|
212
|
+
query = f"Conceptos: {', '.join(concepts)}"
|
|
213
|
+
query_embedding = self._generate_query_embedding(query)
|
|
214
|
+
|
|
215
|
+
retrieval_config = self.config.get('retrieval', {})
|
|
216
|
+
top_k_total = retrieval_config.get('top_k', top_k * len(concepts))
|
|
217
|
+
|
|
218
|
+
results = self.collection.query(
|
|
219
|
+
query_embeddings=[query_embedding],
|
|
220
|
+
n_results=top_k_total
|
|
221
|
+
)
|
|
222
|
+
|
|
223
|
+
related_docs = []
|
|
224
|
+
|
|
225
|
+
if results['ids'] and len(results['ids'][0]) > 0:
|
|
226
|
+
for doc_id, doc, metadata, distance in zip(
|
|
227
|
+
results['ids'][0],
|
|
228
|
+
results['documents'][0],
|
|
229
|
+
results['metadatas'][0],
|
|
230
|
+
results['distances'][0]
|
|
231
|
+
):
|
|
232
|
+
similarity = 1 - distance
|
|
233
|
+
related_docs.append({
|
|
234
|
+
'id': doc_id,
|
|
235
|
+
'content': doc,
|
|
236
|
+
'metadata': metadata,
|
|
237
|
+
'similarity': similarity
|
|
238
|
+
})
|
|
239
|
+
|
|
240
|
+
logger.info(f"Recuperados {len(related_docs)} documentos relacionados con conceptos")
|
|
241
|
+
return related_docs
|
|
242
|
+
|
|
243
|
+
def retrieve_reading_context(self, topic: str, top_k: int = 2) -> List[Dict]:
|
|
244
|
+
"""
|
|
245
|
+
Recupera contexto de lecturas relacionadas con un tema.
|
|
246
|
+
|
|
247
|
+
Args:
|
|
248
|
+
topic: Tema o concepto
|
|
249
|
+
top_k: Número de chunks de lectura a recuperar
|
|
250
|
+
|
|
251
|
+
Returns:
|
|
252
|
+
Lista de chunks de lecturas
|
|
253
|
+
"""
|
|
254
|
+
query_embedding = self._generate_query_embedding(topic)
|
|
255
|
+
|
|
256
|
+
results = self.collection.query(
|
|
257
|
+
query_embeddings=[query_embedding],
|
|
258
|
+
n_results=top_k,
|
|
259
|
+
where={'type': 'reading'}
|
|
260
|
+
)
|
|
261
|
+
|
|
262
|
+
reading_chunks = []
|
|
263
|
+
|
|
264
|
+
if results['ids'] and len(results['ids'][0]) > 0:
|
|
265
|
+
for doc_id, doc, metadata, distance in zip(
|
|
266
|
+
results['ids'][0],
|
|
267
|
+
results['documents'][0],
|
|
268
|
+
results['metadatas'][0],
|
|
269
|
+
results['distances'][0]
|
|
270
|
+
):
|
|
271
|
+
reading_chunks.append({
|
|
272
|
+
'id': doc_id,
|
|
273
|
+
'content': doc,
|
|
274
|
+
'metadata': metadata,
|
|
275
|
+
'similarity': 1 - distance
|
|
276
|
+
})
|
|
277
|
+
|
|
278
|
+
logger.info(f"Recuperados {len(reading_chunks)} chunks de lecturas")
|
|
279
|
+
return reading_chunks
|
|
280
|
+
|
|
281
|
+
def retrieve_by_complexity(self, target_complexity: float, tolerance: float = 0.2,
|
|
282
|
+
top_k: int = 5) -> List[Dict]:
|
|
283
|
+
"""
|
|
284
|
+
Recupera ejercicios con complejidad similar a la objetivo.
|
|
285
|
+
|
|
286
|
+
Args:
|
|
287
|
+
target_complexity: Complejidad objetivo
|
|
288
|
+
tolerance: Tolerancia en la complejidad
|
|
289
|
+
top_k: Número de resultados
|
|
290
|
+
|
|
291
|
+
Returns:
|
|
292
|
+
Lista de ejercicios con complejidad similar
|
|
293
|
+
"""
|
|
294
|
+
min_complexity = target_complexity * (1 - tolerance)
|
|
295
|
+
max_complexity = target_complexity * (1 + tolerance)
|
|
296
|
+
|
|
297
|
+
# Usar búsqueda por metadatos con sintaxis correcta de ChromaDB
|
|
298
|
+
where = {
|
|
299
|
+
'$and': [
|
|
300
|
+
{'type': 'exercise'},
|
|
301
|
+
{'complexity': {'$gte': float(min_complexity)}},
|
|
302
|
+
{'complexity': {'$lte': float(max_complexity)}}
|
|
303
|
+
]
|
|
304
|
+
}
|
|
305
|
+
|
|
306
|
+
results = self.collection.get(
|
|
307
|
+
where=where,
|
|
308
|
+
limit=top_k
|
|
309
|
+
)
|
|
310
|
+
|
|
311
|
+
exercises = []
|
|
312
|
+
for i, (doc_id, doc, metadata) in enumerate(zip(
|
|
313
|
+
results['ids'],
|
|
314
|
+
results['documents'],
|
|
315
|
+
results['metadatas']
|
|
316
|
+
)):
|
|
317
|
+
exercises.append({
|
|
318
|
+
'id': doc_id,
|
|
319
|
+
'content': doc,
|
|
320
|
+
'metadata': metadata
|
|
321
|
+
})
|
|
322
|
+
|
|
323
|
+
logger.info(f"Recuperados {len(exercises)} ejercicios por complejidad")
|
|
324
|
+
return exercises
|
|
325
|
+
|
|
326
|
+
def hybrid_search(self, query: str, metadata_filters: Dict = None,
|
|
327
|
+
top_k: int = 5) -> List[Dict]:
|
|
328
|
+
"""
|
|
329
|
+
Búsqueda híbrida: semántica + filtros de metadatos.
|
|
330
|
+
|
|
331
|
+
Args:
|
|
332
|
+
query: Consulta de texto
|
|
333
|
+
metadata_filters: Filtros de metadatos (ej: {'type': 'exercise'})
|
|
334
|
+
top_k: Número de resultados
|
|
335
|
+
|
|
336
|
+
Returns:
|
|
337
|
+
Lista de resultados
|
|
338
|
+
"""
|
|
339
|
+
query_embedding = self._generate_query_embedding(query)
|
|
340
|
+
|
|
341
|
+
where = metadata_filters or {}
|
|
342
|
+
|
|
343
|
+
results = self.collection.query(
|
|
344
|
+
query_embeddings=[query_embedding],
|
|
345
|
+
n_results=top_k,
|
|
346
|
+
where=where if where else None
|
|
347
|
+
)
|
|
348
|
+
|
|
349
|
+
hybrid_results = []
|
|
350
|
+
|
|
351
|
+
if results['ids'] and len(results['ids'][0]) > 0:
|
|
352
|
+
for doc_id, doc, metadata, distance in zip(
|
|
353
|
+
results['ids'][0],
|
|
354
|
+
results['documents'][0],
|
|
355
|
+
results['metadatas'][0],
|
|
356
|
+
results['distances'][0]
|
|
357
|
+
):
|
|
358
|
+
hybrid_results.append({
|
|
359
|
+
'id': doc_id,
|
|
360
|
+
'content': doc,
|
|
361
|
+
'metadata': metadata,
|
|
362
|
+
'similarity': 1 - distance
|
|
363
|
+
})
|
|
364
|
+
|
|
365
|
+
return hybrid_results
|
|
366
|
+
|
|
@@ -0,0 +1,69 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Utilidades para parseo robusto de JSON, especialmente útil para respuestas de LLMs
|
|
3
|
+
que pueden contener LaTeX o formatos markdown incorrectos.
|
|
4
|
+
"""
|
|
5
|
+
import json
|
|
6
|
+
import re
|
|
7
|
+
import logging
|
|
8
|
+
from typing import Dict, Any, Optional
|
|
9
|
+
|
|
10
|
+
logger = logging.getLogger(__name__)
|
|
11
|
+
|
|
12
|
+
def extract_and_parse_json(text: str) -> Optional[Dict[str, Any]]:
|
|
13
|
+
"""
|
|
14
|
+
Intenta extraer y parsear un objeto JSON de un texto arbitrario.
|
|
15
|
+
Maneja bloques de código markdown y errores comunes de escape en LaTeX.
|
|
16
|
+
"""
|
|
17
|
+
if not text:
|
|
18
|
+
return None
|
|
19
|
+
|
|
20
|
+
# 1. Limpieza básica y extracción de bloque de código
|
|
21
|
+
clean_text = text.strip()
|
|
22
|
+
|
|
23
|
+
code_block_pattern = re.compile(r'```(?:json)?\s*(.*?)```', re.DOTALL)
|
|
24
|
+
match = code_block_pattern.search(clean_text)
|
|
25
|
+
|
|
26
|
+
if match:
|
|
27
|
+
clean_text = match.group(1).strip()
|
|
28
|
+
|
|
29
|
+
# HEURÍSTICA DE LATEX AGRESIVA
|
|
30
|
+
# En contextos matemáticos, secuencias como \frac, \textbf, \theta son muy comunes.
|
|
31
|
+
# json.loads interpreta \f, \b, \t como caracteres de control (form feed, backspace, tab).
|
|
32
|
+
# Esto corrompe el LaTeX (ej: \theta -> tab + heta).
|
|
33
|
+
# Por lo tanto, aplicamos una limpieza PREVIA al intento de parseo estándar para estas secuencias.
|
|
34
|
+
|
|
35
|
+
# Whitelist de escapes que REALMENTE queremos preservar como controles JSON estándar:
|
|
36
|
+
# " -> \" (comillas dentro de string)
|
|
37
|
+
# \ -> \\ (backslash literal ya escapado)
|
|
38
|
+
# / -> \/ (forward slash escapado, opcional)
|
|
39
|
+
# n -> \n (newline - muy común y necesario)
|
|
40
|
+
# r -> \r (carriage return)
|
|
41
|
+
# u -> \uXXXX (unicode - aunque \usepackage podría ser problematico, \u requiere 4 hex digits, asi que \usepackage falla json.loads y lo capturamos despues)
|
|
42
|
+
|
|
43
|
+
# REMOVIDOS de whitelist (se escaparán a doble backslash):
|
|
44
|
+
# t -> Para proteger \theta, \textbf, \text, etc.
|
|
45
|
+
# f -> Para proteger \frac, \forall, etc.
|
|
46
|
+
# b -> Para proteger \begin, \beta, etc.
|
|
47
|
+
|
|
48
|
+
# Regex: Lookbehind negativo para asegurar que no está ya escapado (?<!\\)
|
|
49
|
+
# Lookahead negativo para permitir solo los de whitelist (?!["\\/nru])
|
|
50
|
+
# Así, \t se convierte en \\t (literal \t string), \n se queda como \n (control char).
|
|
51
|
+
|
|
52
|
+
regex_latex_fix = r'(?<!\\)\\(?!["\\/nru])'
|
|
53
|
+
|
|
54
|
+
try:
|
|
55
|
+
# Aplicar fix agresivo
|
|
56
|
+
fixed_text = re.sub(regex_latex_fix, r'\\\\', clean_text)
|
|
57
|
+
return json.loads(fixed_text, strict=False)
|
|
58
|
+
except json.JSONDecodeError:
|
|
59
|
+
# Si falla el fix agresivo (quizas rompió algo sutil, o el error es otro),
|
|
60
|
+
# intentamos el texto original con strict=False por si acaso era un newline issue
|
|
61
|
+
pass
|
|
62
|
+
|
|
63
|
+
try:
|
|
64
|
+
return json.loads(clean_text, strict=False)
|
|
65
|
+
except json.JSONDecodeError as e:
|
|
66
|
+
logger.debug(f"Fallo parseo JSON tras intentos: {e}")
|
|
67
|
+
|
|
68
|
+
logger.error(f"No se pudo parsear JSON. Texto original (inicio): {text[:100]}...")
|
|
69
|
+
return None
|
|
@@ -0,0 +1,160 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Utilidades para parsear archivos Markdown/MyST y extraer ejercicios y soluciones.
|
|
3
|
+
"""
|
|
4
|
+
import re
|
|
5
|
+
import yaml
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
from typing import Dict, List, Optional, Tuple
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def extract_frontmatter(content: str) -> Tuple[Dict, str]:
|
|
11
|
+
"""
|
|
12
|
+
Extrae el frontmatter YAML del contenido Markdown.
|
|
13
|
+
|
|
14
|
+
Args:
|
|
15
|
+
content: Contenido completo del archivo
|
|
16
|
+
|
|
17
|
+
Returns:
|
|
18
|
+
Tupla (frontmatter_dict, contenido_sin_frontmatter)
|
|
19
|
+
"""
|
|
20
|
+
frontmatter_pattern = r'^---\s*\n(.*?)\n---\s*\n'
|
|
21
|
+
match = re.match(frontmatter_pattern, content, re.DOTALL)
|
|
22
|
+
|
|
23
|
+
if match:
|
|
24
|
+
frontmatter_str = match.group(1)
|
|
25
|
+
try:
|
|
26
|
+
frontmatter = yaml.safe_load(frontmatter_str) or {}
|
|
27
|
+
content_without_frontmatter = content[match.end():]
|
|
28
|
+
return frontmatter, content_without_frontmatter
|
|
29
|
+
except yaml.YAMLError:
|
|
30
|
+
return {}, content
|
|
31
|
+
return {}, content
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def extract_exercise_blocks(content: str) -> List[Dict]:
|
|
35
|
+
"""
|
|
36
|
+
Extrae bloques de ejercicio del formato MyST.
|
|
37
|
+
|
|
38
|
+
Busca bloques del tipo:
|
|
39
|
+
```{exercise} N
|
|
40
|
+
:label: exN-XX
|
|
41
|
+
...
|
|
42
|
+
```
|
|
43
|
+
|
|
44
|
+
Args:
|
|
45
|
+
content: Contenido Markdown
|
|
46
|
+
|
|
47
|
+
Returns:
|
|
48
|
+
Lista de diccionarios con información de cada ejercicio
|
|
49
|
+
"""
|
|
50
|
+
exercises = []
|
|
51
|
+
|
|
52
|
+
# Patrón para bloques de ejercicio MyST
|
|
53
|
+
# Captura delimitador (grupo 1), label (grupo 2) y contenido (grupo 3)
|
|
54
|
+
# Usa backreference \1 para coincidir con la longitud exacta del delimitador de cierre
|
|
55
|
+
exercise_pattern = r'(`{3,4})\{exercise\}(?:\s+\d+)?\s*\n:label:\s+(\S+)\s*\n(.*?)(?=\1)'
|
|
56
|
+
|
|
57
|
+
matches = re.finditer(exercise_pattern, content, re.DOTALL)
|
|
58
|
+
|
|
59
|
+
for match in matches:
|
|
60
|
+
# group(1) es el delimitador
|
|
61
|
+
label = match.group(2)
|
|
62
|
+
exercise_content = match.group(3).strip()
|
|
63
|
+
|
|
64
|
+
# Buscar si hay un include dentro
|
|
65
|
+
include_match = re.search(r'```\{include\}\s+(.+?)\s*```', exercise_content, re.DOTALL)
|
|
66
|
+
if include_match:
|
|
67
|
+
include_path = include_match.group(1).strip()
|
|
68
|
+
exercises.append({
|
|
69
|
+
'label': label,
|
|
70
|
+
'content': exercise_content,
|
|
71
|
+
'include_path': include_path,
|
|
72
|
+
'type': 'include'
|
|
73
|
+
})
|
|
74
|
+
else:
|
|
75
|
+
exercises.append({
|
|
76
|
+
'label': label,
|
|
77
|
+
'content': exercise_content,
|
|
78
|
+
'include_path': None,
|
|
79
|
+
'type': 'inline'
|
|
80
|
+
})
|
|
81
|
+
|
|
82
|
+
return exercises
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
def extract_solution_blocks(content: str) -> List[Dict]:
|
|
86
|
+
"""
|
|
87
|
+
Extrae bloques de solución del formato MyST.
|
|
88
|
+
|
|
89
|
+
Busca bloques del tipo:
|
|
90
|
+
````{solution} exN-XX
|
|
91
|
+
:label: solution-exN-XX
|
|
92
|
+
...
|
|
93
|
+
````
|
|
94
|
+
|
|
95
|
+
Args:
|
|
96
|
+
content: Contenido Markdown
|
|
97
|
+
|
|
98
|
+
Returns:
|
|
99
|
+
Lista de diccionarios con información de cada solución
|
|
100
|
+
"""
|
|
101
|
+
solutions = []
|
|
102
|
+
|
|
103
|
+
# Patrón para bloques de solución MyST
|
|
104
|
+
# Captura delimitador (grupo 1), exercise_label (grupo 2), solution_label (grupo 3), contenido (grupo 4)
|
|
105
|
+
solution_pattern = r'(`{3,4})\{solution\}\s+(\S+)\s*\n:label:\s+(\S+)\s*\n(.*?)(?=\1)'
|
|
106
|
+
|
|
107
|
+
matches = re.finditer(solution_pattern, content, re.DOTALL)
|
|
108
|
+
|
|
109
|
+
for match in matches:
|
|
110
|
+
# group(1) es delimitador
|
|
111
|
+
exercise_label = match.group(2)
|
|
112
|
+
solution_label = match.group(3)
|
|
113
|
+
solution_content = match.group(4).strip()
|
|
114
|
+
|
|
115
|
+
# Buscar includes dentro de la solución
|
|
116
|
+
include_matches = re.finditer(r'```\{include\}\s+(.+?)\s*```', solution_content, re.DOTALL)
|
|
117
|
+
include_paths = [m.group(1).strip() for m in include_matches]
|
|
118
|
+
|
|
119
|
+
solutions.append({
|
|
120
|
+
'exercise_label': exercise_label,
|
|
121
|
+
'label': solution_label,
|
|
122
|
+
'content': solution_content,
|
|
123
|
+
'include_paths': include_paths
|
|
124
|
+
})
|
|
125
|
+
|
|
126
|
+
return solutions
|
|
127
|
+
|
|
128
|
+
|
|
129
|
+
def read_markdown_file(file_path: Path) -> str:
|
|
130
|
+
"""
|
|
131
|
+
Lee un archivo Markdown y retorna su contenido.
|
|
132
|
+
|
|
133
|
+
Args:
|
|
134
|
+
file_path: Ruta al archivo
|
|
135
|
+
|
|
136
|
+
Returns:
|
|
137
|
+
Contenido del archivo
|
|
138
|
+
"""
|
|
139
|
+
try:
|
|
140
|
+
with open(file_path, 'r', encoding='utf-8') as f:
|
|
141
|
+
return f.read()
|
|
142
|
+
except Exception as e:
|
|
143
|
+
raise IOError(f"Error leyendo archivo {file_path}: {e}")
|
|
144
|
+
|
|
145
|
+
|
|
146
|
+
def resolve_include_path(include_path: str, base_dir: Path) -> Path:
|
|
147
|
+
"""
|
|
148
|
+
Resuelve una ruta de include relativa a un directorio base.
|
|
149
|
+
|
|
150
|
+
Args:
|
|
151
|
+
include_path: Ruta relativa del include
|
|
152
|
+
base_dir: Directorio base
|
|
153
|
+
|
|
154
|
+
Returns:
|
|
155
|
+
Ruta absoluta resuelta
|
|
156
|
+
"""
|
|
157
|
+
# Limpiar la ruta (puede tener ./ o espacios)
|
|
158
|
+
clean_path = include_path.strip().lstrip('./')
|
|
159
|
+
return (base_dir / clean_path).resolve()
|
|
160
|
+
|