django-lucy-assist 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- django_lucy_assist-0.1.0.dist-info/METADATA +206 -0
- django_lucy_assist-0.1.0.dist-info/RECORD +44 -0
- django_lucy_assist-0.1.0.dist-info/WHEEL +5 -0
- django_lucy_assist-0.1.0.dist-info/top_level.txt +1 -0
- lucy_assist/__init__.py +11 -0
- lucy_assist/admin.py +22 -0
- lucy_assist/apps.py +10 -0
- lucy_assist/conf.py +103 -0
- lucy_assist/constantes.py +120 -0
- lucy_assist/context_processors.py +65 -0
- lucy_assist/migrations/0001_initial.py +92 -0
- lucy_assist/migrations/__init__.py +0 -0
- lucy_assist/models/__init__.py +14 -0
- lucy_assist/models/base.py +54 -0
- lucy_assist/models/configuration.py +175 -0
- lucy_assist/models/conversation.py +54 -0
- lucy_assist/models/message.py +45 -0
- lucy_assist/models/project_context_cache.py +213 -0
- lucy_assist/services/__init__.py +21 -0
- lucy_assist/services/bug_notification_service.py +183 -0
- lucy_assist/services/claude_service.py +417 -0
- lucy_assist/services/context_service.py +350 -0
- lucy_assist/services/crud_service.py +364 -0
- lucy_assist/services/gitlab_service.py +248 -0
- lucy_assist/services/project_context_service.py +412 -0
- lucy_assist/services/tool_executor_service.py +343 -0
- lucy_assist/services/tools_definition.py +229 -0
- lucy_assist/signals.py +25 -0
- lucy_assist/static/lucy_assist/css/lucy-assist.css +160 -0
- lucy_assist/static/lucy_assist/image/icon-lucy.png +0 -0
- lucy_assist/static/lucy_assist/js/lucy-assist.js +824 -0
- lucy_assist/templates/lucy_assist/chatbot_sidebar.html +419 -0
- lucy_assist/templates/lucy_assist/partials/documentation_content.html +107 -0
- lucy_assist/tests/__init__.py +0 -0
- lucy_assist/tests/factories/__init__.py +15 -0
- lucy_assist/tests/factories/lucy_assist_factories.py +109 -0
- lucy_assist/tests/test_lucy_assist.py +186 -0
- lucy_assist/urls.py +36 -0
- lucy_assist/utils/__init__.py +7 -0
- lucy_assist/utils/log_utils.py +59 -0
- lucy_assist/utils/message_utils.py +130 -0
- lucy_assist/utils/token_utils.py +87 -0
- lucy_assist/views/__init__.py +13 -0
- lucy_assist/views/api_views.py +595 -0
|
@@ -0,0 +1,412 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Service de gestion du contexte projet avec cache intelligent.
|
|
3
|
+
|
|
4
|
+
Optimise l'utilisation des tokens Claude en cachant:
|
|
5
|
+
- La structure du projet
|
|
6
|
+
- Les résumés des apps
|
|
7
|
+
- Les patterns de code communs
|
|
8
|
+
- Les informations sur les modèles/vues
|
|
9
|
+
"""
|
|
10
|
+
import json
|
|
11
|
+
from typing import Dict, List, Optional, Any
|
|
12
|
+
from dataclasses import dataclass
|
|
13
|
+
|
|
14
|
+
from django.db import transaction
|
|
15
|
+
|
|
16
|
+
from lucy_assist.utils.log_utils import LogUtils
|
|
17
|
+
from lucy_assist.models import ProjectContextCache
|
|
18
|
+
from lucy_assist.services.gitlab_service import GitLabService
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
@dataclass
|
|
22
|
+
class CachedContext:
|
|
23
|
+
"""Représente un contexte mis en cache."""
|
|
24
|
+
key: str
|
|
25
|
+
data: Dict
|
|
26
|
+
from_cache: bool
|
|
27
|
+
tokens_estimated: int
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class ProjectContextService:
|
|
31
|
+
"""
|
|
32
|
+
Service pour gérer le contexte projet de manière optimisée.
|
|
33
|
+
|
|
34
|
+
Stratégies d'optimisation:
|
|
35
|
+
1. Cache hiérarchique (Redis/Django cache -> BDD)
|
|
36
|
+
2. Pré-calcul des résumés de structure
|
|
37
|
+
3. Indexation des patterns communs
|
|
38
|
+
4. Compression du contexte pour réduire les tokens
|
|
39
|
+
"""
|
|
40
|
+
|
|
41
|
+
# Estimation des tokens par type de contenu
|
|
42
|
+
TOKENS_PER_CHAR = 0.25 # ~4 caractères par token en moyenne
|
|
43
|
+
|
|
44
|
+
# TTL par type de cache (en heures)
|
|
45
|
+
CACHE_TTL = {
|
|
46
|
+
'project_structure': 168, # 7 jours - change rarement
|
|
47
|
+
'app_summary': 72, # 3 jours
|
|
48
|
+
'model_info': 48, # 2 jours
|
|
49
|
+
'view_info': 24, # 1 jour
|
|
50
|
+
'recent_changes': 1, # 1 heure - change souvent
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
def __init__(self):
|
|
54
|
+
self.gitlab_service = GitLabService()
|
|
55
|
+
|
|
56
|
+
def _estimate_tokens(self, content: Any) -> int:
|
|
57
|
+
"""Estime le nombre de tokens pour un contenu."""
|
|
58
|
+
if isinstance(content, dict) or isinstance(content, list):
|
|
59
|
+
content = json.dumps(content, ensure_ascii=False)
|
|
60
|
+
return int(len(str(content)) * self.TOKENS_PER_CHAR)
|
|
61
|
+
|
|
62
|
+
def get_project_structure(self, force_refresh: bool = False) -> CachedContext:
|
|
63
|
+
"""
|
|
64
|
+
Récupère la structure du projet (apps, modèles principaux).
|
|
65
|
+
|
|
66
|
+
Cette information change rarement et peut être cachée longtemps.
|
|
67
|
+
"""
|
|
68
|
+
cache_key = "project_structure"
|
|
69
|
+
|
|
70
|
+
if not force_refresh:
|
|
71
|
+
cached = ProjectContextCache.get_cached_content(cache_key)
|
|
72
|
+
if cached:
|
|
73
|
+
tokens_saved = self._estimate_tokens(cached)
|
|
74
|
+
# Incrémenter le compteur
|
|
75
|
+
try:
|
|
76
|
+
cache_obj = ProjectContextCache.objects.get(cache_key=cache_key)
|
|
77
|
+
cache_obj.increment_hit(tokens_saved)
|
|
78
|
+
except ProjectContextCache.DoesNotExist:
|
|
79
|
+
pass
|
|
80
|
+
|
|
81
|
+
return CachedContext(
|
|
82
|
+
key=cache_key,
|
|
83
|
+
data=cached,
|
|
84
|
+
from_cache=True,
|
|
85
|
+
tokens_estimated=tokens_saved
|
|
86
|
+
)
|
|
87
|
+
|
|
88
|
+
# Construire la structure depuis GitLab
|
|
89
|
+
structure = self._build_project_structure()
|
|
90
|
+
|
|
91
|
+
# Sauvegarder en cache
|
|
92
|
+
content_hash = ProjectContextCache.compute_hash(structure)
|
|
93
|
+
ProjectContextCache.set_cached_content(
|
|
94
|
+
cache_key=cache_key,
|
|
95
|
+
contenu=structure,
|
|
96
|
+
content_hash=content_hash,
|
|
97
|
+
ttl_hours=self.CACHE_TTL['project_structure']
|
|
98
|
+
)
|
|
99
|
+
|
|
100
|
+
return CachedContext(
|
|
101
|
+
key=cache_key,
|
|
102
|
+
data=structure,
|
|
103
|
+
from_cache=False,
|
|
104
|
+
tokens_estimated=self._estimate_tokens(structure)
|
|
105
|
+
)
|
|
106
|
+
|
|
107
|
+
def _build_project_structure(self) -> Dict:
|
|
108
|
+
"""Construit un résumé de la structure du projet."""
|
|
109
|
+
# Rechercher les apps Django
|
|
110
|
+
apps_search = self.gitlab_service.search_code("class.*Config.*AppConfig")
|
|
111
|
+
|
|
112
|
+
apps = []
|
|
113
|
+
for result in apps_search[:15]: # Limiter à 15 apps
|
|
114
|
+
filename = result.get('filename', '')
|
|
115
|
+
if 'apps.py' in filename:
|
|
116
|
+
app_name = filename.split('/')[1] if '/' in filename else filename.replace('/apps.py', '')
|
|
117
|
+
apps.append({
|
|
118
|
+
'name': app_name,
|
|
119
|
+
'path': f"apps/{app_name}/"
|
|
120
|
+
})
|
|
121
|
+
|
|
122
|
+
# Résumé compact
|
|
123
|
+
return {
|
|
124
|
+
'type': 'django_project',
|
|
125
|
+
'apps': apps,
|
|
126
|
+
'summary': f"Projet Django avec {len(apps)} applications",
|
|
127
|
+
'main_apps': [a['name'] for a in apps[:5]] # Les 5 premières
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
def get_app_summary(self, app_name: str, force_refresh: bool = False) -> CachedContext:
|
|
131
|
+
"""
|
|
132
|
+
Récupère le résumé d'une application.
|
|
133
|
+
|
|
134
|
+
Inclut: modèles, vues principales, patterns utilisés.
|
|
135
|
+
"""
|
|
136
|
+
cache_key = f"app_summary_{app_name}"
|
|
137
|
+
|
|
138
|
+
if not force_refresh:
|
|
139
|
+
cached = ProjectContextCache.get_cached_content(cache_key)
|
|
140
|
+
if cached:
|
|
141
|
+
return CachedContext(
|
|
142
|
+
key=cache_key,
|
|
143
|
+
data=cached,
|
|
144
|
+
from_cache=True,
|
|
145
|
+
tokens_estimated=self._estimate_tokens(cached)
|
|
146
|
+
)
|
|
147
|
+
|
|
148
|
+
# Construire le résumé de l'app
|
|
149
|
+
summary = self._build_app_summary(app_name)
|
|
150
|
+
|
|
151
|
+
ProjectContextCache.set_cached_content(
|
|
152
|
+
cache_key=cache_key,
|
|
153
|
+
contenu=summary,
|
|
154
|
+
ttl_hours=self.CACHE_TTL['app_summary']
|
|
155
|
+
)
|
|
156
|
+
|
|
157
|
+
return CachedContext(
|
|
158
|
+
key=cache_key,
|
|
159
|
+
data=summary,
|
|
160
|
+
from_cache=False,
|
|
161
|
+
tokens_estimated=self._estimate_tokens(summary)
|
|
162
|
+
)
|
|
163
|
+
|
|
164
|
+
def _build_app_summary(self, app_name: str) -> Dict:
|
|
165
|
+
"""Construit un résumé d'une application Django."""
|
|
166
|
+
summary = {
|
|
167
|
+
'name': app_name,
|
|
168
|
+
'models': [],
|
|
169
|
+
'views': [],
|
|
170
|
+
'urls_patterns': []
|
|
171
|
+
}
|
|
172
|
+
|
|
173
|
+
# Rechercher les modèles
|
|
174
|
+
model_search = self.gitlab_service.search_code(f"class.*Model.*apps/{app_name}/models")
|
|
175
|
+
for result in model_search[:10]:
|
|
176
|
+
# Extraire le nom du modèle du data
|
|
177
|
+
data = result.get('data', '')
|
|
178
|
+
if 'class ' in data:
|
|
179
|
+
model_name = data.split('class ')[1].split('(')[0].strip()
|
|
180
|
+
summary['models'].append(model_name)
|
|
181
|
+
|
|
182
|
+
# Rechercher les vues principales
|
|
183
|
+
view_search = self.gitlab_service.search_code(f"class.*View.*apps/{app_name}/views")
|
|
184
|
+
for result in view_search[:10]:
|
|
185
|
+
data = result.get('data', '')
|
|
186
|
+
if 'class ' in data:
|
|
187
|
+
view_name = data.split('class ')[1].split('(')[0].strip()
|
|
188
|
+
summary['views'].append(view_name)
|
|
189
|
+
|
|
190
|
+
return summary
|
|
191
|
+
|
|
192
|
+
def get_model_info(
|
|
193
|
+
self,
|
|
194
|
+
model_name: str,
|
|
195
|
+
include_code: bool = False,
|
|
196
|
+
force_refresh: bool = False
|
|
197
|
+
) -> CachedContext:
|
|
198
|
+
"""
|
|
199
|
+
Récupère les informations sur un modèle.
|
|
200
|
+
|
|
201
|
+
Args:
|
|
202
|
+
model_name: Nom du modèle
|
|
203
|
+
include_code: Inclure le code source (plus de tokens)
|
|
204
|
+
force_refresh: Forcer le rafraîchissement
|
|
205
|
+
"""
|
|
206
|
+
cache_key = f"model_info_{model_name}_{include_code}"
|
|
207
|
+
|
|
208
|
+
if not force_refresh:
|
|
209
|
+
cached = ProjectContextCache.get_cached_content(cache_key)
|
|
210
|
+
if cached:
|
|
211
|
+
return CachedContext(
|
|
212
|
+
key=cache_key,
|
|
213
|
+
data=cached,
|
|
214
|
+
from_cache=True,
|
|
215
|
+
tokens_estimated=self._estimate_tokens(cached)
|
|
216
|
+
)
|
|
217
|
+
|
|
218
|
+
# Récupérer les infos du modèle
|
|
219
|
+
model_data = self.gitlab_service.find_model_and_form(model_name)
|
|
220
|
+
|
|
221
|
+
info = {
|
|
222
|
+
'name': model_name,
|
|
223
|
+
'model_file': model_data.get('model_file'),
|
|
224
|
+
'form_file': model_data.get('form_file'),
|
|
225
|
+
'has_form': model_data.get('form_code') is not None
|
|
226
|
+
}
|
|
227
|
+
|
|
228
|
+
if include_code:
|
|
229
|
+
# Compresser le code en ne gardant que les parties essentielles
|
|
230
|
+
model_code = model_data.get('model_code', '')
|
|
231
|
+
if model_code:
|
|
232
|
+
info['model_fields'] = self._extract_model_fields(model_code, model_name)
|
|
233
|
+
|
|
234
|
+
ProjectContextCache.set_cached_content(
|
|
235
|
+
cache_key=cache_key,
|
|
236
|
+
contenu=info,
|
|
237
|
+
ttl_hours=self.CACHE_TTL['model_info']
|
|
238
|
+
)
|
|
239
|
+
|
|
240
|
+
return CachedContext(
|
|
241
|
+
key=cache_key,
|
|
242
|
+
data=info,
|
|
243
|
+
from_cache=False,
|
|
244
|
+
tokens_estimated=self._estimate_tokens(info)
|
|
245
|
+
)
|
|
246
|
+
|
|
247
|
+
def _extract_model_fields(self, code: str, model_name: str) -> List[str]:
|
|
248
|
+
"""Extrait les champs d'un modèle (version compressée)."""
|
|
249
|
+
import re
|
|
250
|
+
|
|
251
|
+
fields = []
|
|
252
|
+
# Pattern pour trouver les champs Django
|
|
253
|
+
field_pattern = r'(\w+)\s*=\s*models\.(\w+)'
|
|
254
|
+
matches = re.findall(field_pattern, code)
|
|
255
|
+
|
|
256
|
+
for field_name, field_type in matches[:20]: # Limiter à 20 champs
|
|
257
|
+
fields.append(f"{field_name}: {field_type}")
|
|
258
|
+
|
|
259
|
+
return fields
|
|
260
|
+
|
|
261
|
+
def get_optimized_context(
|
|
262
|
+
self,
|
|
263
|
+
page_url: str,
|
|
264
|
+
user_question: str,
|
|
265
|
+
conversation_history: List[Dict] = None
|
|
266
|
+
) -> Dict:
|
|
267
|
+
"""
|
|
268
|
+
Construit un contexte optimisé pour Claude.
|
|
269
|
+
|
|
270
|
+
Sélectionne intelligemment les informations pertinentes
|
|
271
|
+
en fonction de la question et de la page.
|
|
272
|
+
|
|
273
|
+
Returns:
|
|
274
|
+
Dict avec le contexte optimisé et les stats
|
|
275
|
+
"""
|
|
276
|
+
context = {
|
|
277
|
+
'page': page_url,
|
|
278
|
+
'relevant_info': {},
|
|
279
|
+
'stats': {
|
|
280
|
+
'tokens_from_cache': 0,
|
|
281
|
+
'tokens_fresh': 0,
|
|
282
|
+
'cache_hits': 0
|
|
283
|
+
}
|
|
284
|
+
}
|
|
285
|
+
|
|
286
|
+
# 1. Toujours inclure un résumé léger du projet
|
|
287
|
+
project = self.get_project_structure()
|
|
288
|
+
if project.from_cache:
|
|
289
|
+
context['stats']['tokens_from_cache'] += project.tokens_estimated
|
|
290
|
+
context['stats']['cache_hits'] += 1
|
|
291
|
+
else:
|
|
292
|
+
context['stats']['tokens_fresh'] += project.tokens_estimated
|
|
293
|
+
|
|
294
|
+
context['relevant_info']['project'] = {
|
|
295
|
+
'apps': project.data.get('main_apps', []),
|
|
296
|
+
'summary': project.data.get('summary', '')
|
|
297
|
+
}
|
|
298
|
+
|
|
299
|
+
# 2. Détecter l'app concernée par la page
|
|
300
|
+
app_name = self._detect_app_from_url(page_url)
|
|
301
|
+
if app_name:
|
|
302
|
+
app_summary = self.get_app_summary(app_name)
|
|
303
|
+
if app_summary.from_cache:
|
|
304
|
+
context['stats']['tokens_from_cache'] += app_summary.tokens_estimated
|
|
305
|
+
context['stats']['cache_hits'] += 1
|
|
306
|
+
else:
|
|
307
|
+
context['stats']['tokens_fresh'] += app_summary.tokens_estimated
|
|
308
|
+
|
|
309
|
+
context['relevant_info']['current_app'] = {
|
|
310
|
+
'name': app_name,
|
|
311
|
+
'models': app_summary.data.get('models', [])[:5],
|
|
312
|
+
'views': app_summary.data.get('views', [])[:5]
|
|
313
|
+
}
|
|
314
|
+
|
|
315
|
+
# 3. Si la question mentionne un modèle spécifique
|
|
316
|
+
model_name = self._detect_model_from_question(user_question)
|
|
317
|
+
if model_name:
|
|
318
|
+
model_info = self.get_model_info(model_name)
|
|
319
|
+
if model_info.from_cache:
|
|
320
|
+
context['stats']['tokens_from_cache'] += model_info.tokens_estimated
|
|
321
|
+
context['stats']['cache_hits'] += 1
|
|
322
|
+
|
|
323
|
+
context['relevant_info']['model'] = model_info.data
|
|
324
|
+
|
|
325
|
+
return context
|
|
326
|
+
|
|
327
|
+
def _detect_app_from_url(self, url: str) -> Optional[str]:
|
|
328
|
+
"""Détecte l'application Django depuis l'URL."""
|
|
329
|
+
parts = url.strip('/').split('/')
|
|
330
|
+
if parts:
|
|
331
|
+
return parts[0]
|
|
332
|
+
return None
|
|
333
|
+
|
|
334
|
+
def _detect_model_from_question(self, question: str) -> Optional[str]:
|
|
335
|
+
"""Détecte si la question mentionne un modèle."""
|
|
336
|
+
# Mots-clés courants et leurs modèles associés
|
|
337
|
+
model_keywords = {
|
|
338
|
+
'membre': 'Membre',
|
|
339
|
+
'adhésion': 'Adhesion',
|
|
340
|
+
'cotisation': 'Cotisation',
|
|
341
|
+
'utilisateur': 'Utilisateur',
|
|
342
|
+
'user': 'Utilisateur',
|
|
343
|
+
'paiement': 'Paiement',
|
|
344
|
+
'facture': 'Facture',
|
|
345
|
+
'structure': 'Structure',
|
|
346
|
+
}
|
|
347
|
+
|
|
348
|
+
question_lower = question.lower()
|
|
349
|
+
for keyword, model in model_keywords.items():
|
|
350
|
+
if keyword in question_lower:
|
|
351
|
+
return model
|
|
352
|
+
|
|
353
|
+
return None
|
|
354
|
+
|
|
355
|
+
def summarize_conversation(
|
|
356
|
+
self,
|
|
357
|
+
messages: List[Dict],
|
|
358
|
+
max_tokens: int = 500
|
|
359
|
+
) -> str:
|
|
360
|
+
"""
|
|
361
|
+
Résume une conversation longue pour économiser des tokens.
|
|
362
|
+
|
|
363
|
+
Au lieu d'envoyer tout l'historique, crée un résumé
|
|
364
|
+
des points clés de la conversation.
|
|
365
|
+
"""
|
|
366
|
+
if not messages or len(messages) <= 4:
|
|
367
|
+
# Conversation courte, pas besoin de résumer
|
|
368
|
+
return None
|
|
369
|
+
|
|
370
|
+
# Garder les 2 premiers et 2 derniers messages
|
|
371
|
+
# Résumer le milieu
|
|
372
|
+
first_messages = messages[:2]
|
|
373
|
+
last_messages = messages[-2:]
|
|
374
|
+
middle_messages = messages[2:-2]
|
|
375
|
+
|
|
376
|
+
if not middle_messages:
|
|
377
|
+
return None
|
|
378
|
+
|
|
379
|
+
# Créer un résumé textuel simple
|
|
380
|
+
summary_parts = []
|
|
381
|
+
|
|
382
|
+
for msg in middle_messages:
|
|
383
|
+
role = "Utilisateur" if msg.get('role') == 'user' else "Assistant"
|
|
384
|
+
content = msg.get('content', '')[:100] # Tronquer
|
|
385
|
+
summary_parts.append(f"- {role}: {content}...")
|
|
386
|
+
|
|
387
|
+
summary = "Résumé des échanges précédents:\n" + "\n".join(summary_parts)
|
|
388
|
+
|
|
389
|
+
# Retourner le format optimisé
|
|
390
|
+
return {
|
|
391
|
+
'type': 'conversation_summary',
|
|
392
|
+
'first_messages': first_messages,
|
|
393
|
+
'summary': summary,
|
|
394
|
+
'last_messages': last_messages,
|
|
395
|
+
'original_count': len(messages),
|
|
396
|
+
'tokens_saved_estimate': self._estimate_tokens(middle_messages) - self._estimate_tokens(summary)
|
|
397
|
+
}
|
|
398
|
+
|
|
399
|
+
@transaction.atomic
|
|
400
|
+
def cleanup_expired_caches(self) -> int:
|
|
401
|
+
"""Nettoie les caches expirés."""
|
|
402
|
+
from django.utils import timezone
|
|
403
|
+
|
|
404
|
+
deleted_count, _ = ProjectContextCache.objects.filter(
|
|
405
|
+
expire_at__lt=timezone.now()
|
|
406
|
+
).delete()
|
|
407
|
+
|
|
408
|
+
return deleted_count
|
|
409
|
+
|
|
410
|
+
def get_cache_stats(self) -> Dict:
|
|
411
|
+
"""Retourne les statistiques de cache."""
|
|
412
|
+
return ProjectContextCache.get_stats()
|