django-lucy-assist 1.1.1__py3-none-any.whl → 1.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {django_lucy_assist-1.1.1.dist-info → django_lucy_assist-1.2.0.dist-info}/METADATA +39 -26
- {django_lucy_assist-1.1.1.dist-info → django_lucy_assist-1.2.0.dist-info}/RECORD +13 -13
- lucy_assist/__init__.py +2 -2
- lucy_assist/conf.py +18 -18
- lucy_assist/constantes.py +1 -1
- lucy_assist/context_processors.py +2 -2
- lucy_assist/services/__init__.py +3 -2
- lucy_assist/services/mistral_service.py +489 -0
- lucy_assist/services/tool_executor_service.py +10 -10
- lucy_assist/services/tools_definition.py +238 -208
- lucy_assist/views/api_views.py +7 -7
- lucy_assist/services/claude_service.py +0 -423
- {django_lucy_assist-1.1.1.dist-info → django_lucy_assist-1.2.0.dist-info}/WHEEL +0 -0
- {django_lucy_assist-1.1.1.dist-info → django_lucy_assist-1.2.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,489 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Service d'integration avec Mistral AI API.
|
|
3
|
+
|
|
4
|
+
Optimisations tokens:
|
|
5
|
+
- Cache du contexte projet via ProjectContextService
|
|
6
|
+
- Resume des conversations longues
|
|
7
|
+
- Compression intelligente du contexte
|
|
8
|
+
|
|
9
|
+
Tools CRUD:
|
|
10
|
+
- Mistral peut executer des actions CRUD via les tools
|
|
11
|
+
- Les tools sont executes cote serveur avec les permissions de l'utilisateur
|
|
12
|
+
"""
|
|
13
|
+
import json
|
|
14
|
+
from typing import Generator, List, Dict, Any
|
|
15
|
+
|
|
16
|
+
from mistralai import Mistral
|
|
17
|
+
|
|
18
|
+
from lucy_assist.utils.log_utils import LogUtils
|
|
19
|
+
from lucy_assist.constantes import LucyAssistConstantes
|
|
20
|
+
from lucy_assist.services.tools_definition import LUCY_ASSIST_TOOLS
|
|
21
|
+
from lucy_assist.conf import lucy_assist_settings
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class MistralService:
|
|
25
|
+
"""Service pour interagir avec l'API Mistral AI."""
|
|
26
|
+
|
|
27
|
+
MAX_TOKENS = 4096
|
|
28
|
+
|
|
29
|
+
# Seuils pour l'optimisation
|
|
30
|
+
MAX_MESSAGES_BEFORE_SUMMARY = 10 # Resumer apres 10 messages
|
|
31
|
+
MAX_CONTEXT_TOKENS = 2000 # Limiter le contexte a 2000 tokens estimes
|
|
32
|
+
|
|
33
|
+
def __init__(self):
|
|
34
|
+
self.api_key = lucy_assist_settings.MISTRAL_LUCY_API_KEY
|
|
35
|
+
if not self.api_key:
|
|
36
|
+
raise ValueError("MISTRAL_LUCY_API_KEY non configuree dans les settings")
|
|
37
|
+
|
|
38
|
+
self.client = Mistral(api_key=self.api_key)
|
|
39
|
+
self._project_context_service = None
|
|
40
|
+
self._tools = LUCY_ASSIST_TOOLS
|
|
41
|
+
self._model = lucy_assist_settings.MISTRAL_MODEL
|
|
42
|
+
|
|
43
|
+
@property
|
|
44
|
+
def project_context_service(self):
|
|
45
|
+
"""Lazy loading du service de contexte projet."""
|
|
46
|
+
if self._project_context_service is None:
|
|
47
|
+
from lucy_assist.services.project_context_service import ProjectContextService
|
|
48
|
+
self._project_context_service = ProjectContextService()
|
|
49
|
+
return self._project_context_service
|
|
50
|
+
|
|
51
|
+
def _build_system_prompt(
|
|
52
|
+
self,
|
|
53
|
+
page_context: Dict,
|
|
54
|
+
user,
|
|
55
|
+
user_question: str = ""
|
|
56
|
+
) -> str:
|
|
57
|
+
"""
|
|
58
|
+
Construit le prompt systeme avec le contexte optimise.
|
|
59
|
+
|
|
60
|
+
Utilise le cache pour reduire la redondance des informations
|
|
61
|
+
sur le projet.
|
|
62
|
+
"""
|
|
63
|
+
# Recuperer les permissions utilisateur (compressees)
|
|
64
|
+
user_permissions = []
|
|
65
|
+
if hasattr(user, 'get_all_permissions'):
|
|
66
|
+
# Ne garder que les permissions pertinentes (sans prefixe d'app commun)
|
|
67
|
+
all_perms = list(user.get_all_permissions())
|
|
68
|
+
user_permissions = [p.split('.')[-1] for p in all_perms[:15]]
|
|
69
|
+
|
|
70
|
+
# Recuperer le contexte projet optimise depuis le cache
|
|
71
|
+
page_url = page_context.get('page_url', page_context.get('url', ''))
|
|
72
|
+
optimized_context = self.project_context_service.get_optimized_context(
|
|
73
|
+
page_url=page_url,
|
|
74
|
+
user_question=user_question
|
|
75
|
+
)
|
|
76
|
+
|
|
77
|
+
# Fusionner le contexte de page avec le contexte projet cache
|
|
78
|
+
enriched_context = {
|
|
79
|
+
'page': page_context,
|
|
80
|
+
'projet': optimized_context.get('relevant_info', {}),
|
|
81
|
+
'cache_stats': optimized_context.get('stats', {})
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
# Construire le prompt avec contexte compact
|
|
85
|
+
prompt = LucyAssistConstantes.SYSTEM_PROMPTS['default'].format(
|
|
86
|
+
page_context=json.dumps(enriched_context, ensure_ascii=False, indent=2),
|
|
87
|
+
user_permissions=', '.join(user_permissions)
|
|
88
|
+
)
|
|
89
|
+
|
|
90
|
+
# Ajouter les instructions complementaires si configurees
|
|
91
|
+
from lucy_assist.models import ConfigurationLucyAssist
|
|
92
|
+
config = ConfigurationLucyAssist.get_config()
|
|
93
|
+
if config.prompt_complementaire:
|
|
94
|
+
prompt += f"\n\n## Instructions complementaires\n{config.prompt_complementaire}"
|
|
95
|
+
|
|
96
|
+
return prompt
|
|
97
|
+
|
|
98
|
+
def _optimize_messages(self, messages: List) -> List[Dict]:
|
|
99
|
+
"""
|
|
100
|
+
Optimise l'historique des messages pour reduire les tokens.
|
|
101
|
+
|
|
102
|
+
Pour les conversations longues, resume les anciens messages
|
|
103
|
+
au lieu de les envoyer en entier.
|
|
104
|
+
"""
|
|
105
|
+
formatted = self._format_messages(messages)
|
|
106
|
+
|
|
107
|
+
if len(formatted) <= self.MAX_MESSAGES_BEFORE_SUMMARY:
|
|
108
|
+
return formatted
|
|
109
|
+
|
|
110
|
+
# Resumer la conversation
|
|
111
|
+
summary_data = self.project_context_service.summarize_conversation(
|
|
112
|
+
formatted,
|
|
113
|
+
max_tokens=500
|
|
114
|
+
)
|
|
115
|
+
|
|
116
|
+
if not summary_data:
|
|
117
|
+
return formatted
|
|
118
|
+
|
|
119
|
+
# Reconstruire les messages avec le resume
|
|
120
|
+
optimized = []
|
|
121
|
+
|
|
122
|
+
# Ajouter les premiers messages
|
|
123
|
+
optimized.extend(summary_data['first_messages'])
|
|
124
|
+
|
|
125
|
+
# Ajouter le resume comme message systeme
|
|
126
|
+
optimized.append({
|
|
127
|
+
'role': 'user',
|
|
128
|
+
'content': f"[Note: {summary_data['original_count'] - 4} messages resumes]\n{summary_data['summary']}"
|
|
129
|
+
})
|
|
130
|
+
|
|
131
|
+
# Ajouter les derniers messages
|
|
132
|
+
optimized.extend(summary_data['last_messages'])
|
|
133
|
+
|
|
134
|
+
LogUtils.info(
|
|
135
|
+
f"Conversation optimisee: {len(formatted)} -> {len(optimized)} messages, "
|
|
136
|
+
f"~{summary_data.get('tokens_saved_estimate', 0)} tokens economises"
|
|
137
|
+
)
|
|
138
|
+
|
|
139
|
+
return optimized
|
|
140
|
+
|
|
141
|
+
def _format_messages(self, messages: List) -> List[Dict]:
|
|
142
|
+
"""Formate les messages pour l'API Mistral."""
|
|
143
|
+
formatted = []
|
|
144
|
+
|
|
145
|
+
for msg in messages:
|
|
146
|
+
role = "user" if msg.repondant == LucyAssistConstantes.Repondant.UTILISATEUR else "assistant"
|
|
147
|
+
formatted.append({
|
|
148
|
+
"role": role,
|
|
149
|
+
"content": msg.contenu
|
|
150
|
+
})
|
|
151
|
+
|
|
152
|
+
return formatted
|
|
153
|
+
|
|
154
|
+
def _convert_tool_results_for_mistral(self, tool_results: List[Dict]) -> List[Dict]:
|
|
155
|
+
"""Convertit les resultats de tools au format Mistral."""
|
|
156
|
+
mistral_results = []
|
|
157
|
+
for result in tool_results:
|
|
158
|
+
mistral_results.append({
|
|
159
|
+
"role": "tool",
|
|
160
|
+
"tool_call_id": result.get('tool_use_id', result.get('tool_call_id')),
|
|
161
|
+
"content": result.get('content', '{}')
|
|
162
|
+
})
|
|
163
|
+
return mistral_results
|
|
164
|
+
|
|
165
|
+
def chat_completion_stream(
|
|
166
|
+
self,
|
|
167
|
+
messages: List,
|
|
168
|
+
page_context: Dict,
|
|
169
|
+
user,
|
|
170
|
+
tool_executor=None
|
|
171
|
+
) -> Generator[Dict[str, Any], None, None]:
|
|
172
|
+
"""
|
|
173
|
+
Genere une reponse en streaming avec support des tools.
|
|
174
|
+
|
|
175
|
+
Args:
|
|
176
|
+
messages: Liste des messages de la conversation
|
|
177
|
+
page_context: Contexte de la page courante
|
|
178
|
+
user: Utilisateur Django
|
|
179
|
+
tool_executor: Callable pour executer les tools (optionnel)
|
|
180
|
+
|
|
181
|
+
Yields:
|
|
182
|
+
Dict avec 'type' (content/tool_use/tool_result/usage/error) et les donnees associees
|
|
183
|
+
"""
|
|
184
|
+
try:
|
|
185
|
+
# Extraire la question utilisateur du dernier message
|
|
186
|
+
user_question = ""
|
|
187
|
+
if messages:
|
|
188
|
+
last_msg = messages[-1] if hasattr(messages[-1], 'contenu') else messages[-1]
|
|
189
|
+
user_question = getattr(last_msg, 'contenu', '') if hasattr(last_msg, 'contenu') else str(last_msg)
|
|
190
|
+
|
|
191
|
+
system_prompt = self._build_system_prompt(page_context, user, user_question)
|
|
192
|
+
|
|
193
|
+
# Utiliser l'optimisation des messages pour les longues conversations
|
|
194
|
+
formatted_messages = self._optimize_messages(messages)
|
|
195
|
+
|
|
196
|
+
if not formatted_messages:
|
|
197
|
+
yield {'type': 'error', 'error': 'Aucun message a traiter'}
|
|
198
|
+
return
|
|
199
|
+
|
|
200
|
+
# Ajouter le system prompt comme premier message
|
|
201
|
+
messages_with_system = [
|
|
202
|
+
{"role": "system", "content": system_prompt}
|
|
203
|
+
] + formatted_messages
|
|
204
|
+
|
|
205
|
+
# Boucle pour gerer les appels de tools
|
|
206
|
+
current_messages = messages_with_system.copy()
|
|
207
|
+
max_tool_iterations = 5 # Limite de securite
|
|
208
|
+
|
|
209
|
+
for iteration in range(max_tool_iterations):
|
|
210
|
+
response_text = ""
|
|
211
|
+
tool_calls = []
|
|
212
|
+
total_input_tokens = 0
|
|
213
|
+
total_output_tokens = 0
|
|
214
|
+
|
|
215
|
+
# Streaming avec Mistral
|
|
216
|
+
stream_response = self.client.chat.stream(
|
|
217
|
+
model=self._model,
|
|
218
|
+
max_tokens=self.MAX_TOKENS,
|
|
219
|
+
messages=current_messages,
|
|
220
|
+
tools=self._tools,
|
|
221
|
+
tool_choice="auto"
|
|
222
|
+
)
|
|
223
|
+
|
|
224
|
+
# Collecter le contenu et les tool calls
|
|
225
|
+
for chunk in stream_response:
|
|
226
|
+
if chunk.data.choices:
|
|
227
|
+
choice = chunk.data.choices[0]
|
|
228
|
+
delta = choice.delta
|
|
229
|
+
|
|
230
|
+
# Contenu textuel
|
|
231
|
+
if delta.content:
|
|
232
|
+
response_text += delta.content
|
|
233
|
+
yield {'type': 'content', 'content': delta.content}
|
|
234
|
+
|
|
235
|
+
# Tool calls
|
|
236
|
+
if delta.tool_calls:
|
|
237
|
+
for tc in delta.tool_calls:
|
|
238
|
+
# Accumuler les tool calls
|
|
239
|
+
if tc.id:
|
|
240
|
+
tool_calls.append({
|
|
241
|
+
'id': tc.id,
|
|
242
|
+
'name': tc.function.name if tc.function else '',
|
|
243
|
+
'arguments': tc.function.arguments if tc.function else ''
|
|
244
|
+
})
|
|
245
|
+
elif tool_calls and tc.function:
|
|
246
|
+
# Continuation du dernier tool call
|
|
247
|
+
tool_calls[-1]['arguments'] += tc.function.arguments or ''
|
|
248
|
+
|
|
249
|
+
# Usage tokens
|
|
250
|
+
if chunk.data.usage:
|
|
251
|
+
total_input_tokens = chunk.data.usage.prompt_tokens
|
|
252
|
+
total_output_tokens = chunk.data.usage.completion_tokens
|
|
253
|
+
|
|
254
|
+
# Verifier le stop reason
|
|
255
|
+
finish_reason = None
|
|
256
|
+
if stream_response:
|
|
257
|
+
# Le dernier chunk contient le finish_reason
|
|
258
|
+
pass # finish_reason est dans le dernier choice
|
|
259
|
+
|
|
260
|
+
# Si pas de tool calls, on a fini
|
|
261
|
+
if not tool_calls:
|
|
262
|
+
if total_input_tokens or total_output_tokens:
|
|
263
|
+
yield {
|
|
264
|
+
'type': 'usage',
|
|
265
|
+
'input_tokens': total_input_tokens,
|
|
266
|
+
'output_tokens': total_output_tokens,
|
|
267
|
+
'total_tokens': total_input_tokens + total_output_tokens
|
|
268
|
+
}
|
|
269
|
+
return
|
|
270
|
+
|
|
271
|
+
# Executer les tools
|
|
272
|
+
tool_results = []
|
|
273
|
+
assistant_tool_calls = []
|
|
274
|
+
|
|
275
|
+
for tool_call in tool_calls:
|
|
276
|
+
tool_name = tool_call['name']
|
|
277
|
+
try:
|
|
278
|
+
tool_input = json.loads(tool_call['arguments'])
|
|
279
|
+
except json.JSONDecodeError:
|
|
280
|
+
tool_input = {}
|
|
281
|
+
|
|
282
|
+
yield {
|
|
283
|
+
'type': 'tool_use',
|
|
284
|
+
'tool_name': tool_name,
|
|
285
|
+
'tool_input': tool_input
|
|
286
|
+
}
|
|
287
|
+
|
|
288
|
+
# Preparer le format pour l'assistant message
|
|
289
|
+
assistant_tool_calls.append({
|
|
290
|
+
"id": tool_call['id'],
|
|
291
|
+
"type": "function",
|
|
292
|
+
"function": {
|
|
293
|
+
"name": tool_name,
|
|
294
|
+
"arguments": tool_call['arguments']
|
|
295
|
+
}
|
|
296
|
+
})
|
|
297
|
+
|
|
298
|
+
# Executer le tool si un executor est fourni
|
|
299
|
+
if tool_executor:
|
|
300
|
+
try:
|
|
301
|
+
result = tool_executor(
|
|
302
|
+
tool_name,
|
|
303
|
+
tool_input,
|
|
304
|
+
user
|
|
305
|
+
)
|
|
306
|
+
tool_results.append({
|
|
307
|
+
'role': 'tool',
|
|
308
|
+
'tool_call_id': tool_call['id'],
|
|
309
|
+
'content': json.dumps(result, ensure_ascii=False)
|
|
310
|
+
})
|
|
311
|
+
yield {
|
|
312
|
+
'type': 'tool_result',
|
|
313
|
+
'tool_name': tool_name,
|
|
314
|
+
'result': result
|
|
315
|
+
}
|
|
316
|
+
except Exception as e:
|
|
317
|
+
error_result = {'error': str(e)}
|
|
318
|
+
tool_results.append({
|
|
319
|
+
'role': 'tool',
|
|
320
|
+
'tool_call_id': tool_call['id'],
|
|
321
|
+
'content': json.dumps(error_result)
|
|
322
|
+
})
|
|
323
|
+
yield {
|
|
324
|
+
'type': 'tool_error',
|
|
325
|
+
'tool_name': tool_name,
|
|
326
|
+
'error': str(e)
|
|
327
|
+
}
|
|
328
|
+
else:
|
|
329
|
+
# Pas d'executor, on ne peut pas executer le tool
|
|
330
|
+
tool_results.append({
|
|
331
|
+
'role': 'tool',
|
|
332
|
+
'tool_call_id': tool_call['id'],
|
|
333
|
+
'content': json.dumps({'error': 'Tool executor not available'})
|
|
334
|
+
})
|
|
335
|
+
|
|
336
|
+
# Ajouter les messages pour continuer la conversation
|
|
337
|
+
# Message assistant avec tool_calls
|
|
338
|
+
current_messages.append({
|
|
339
|
+
'role': 'assistant',
|
|
340
|
+
'content': response_text or None,
|
|
341
|
+
'tool_calls': assistant_tool_calls
|
|
342
|
+
})
|
|
343
|
+
|
|
344
|
+
# Messages tool results
|
|
345
|
+
current_messages.extend(tool_results)
|
|
346
|
+
|
|
347
|
+
except Exception as e:
|
|
348
|
+
LogUtils.error(f"Erreur lors de l'appel Mistral: {e}")
|
|
349
|
+
error_message = str(e)
|
|
350
|
+
if "rate" in error_message.lower() or "limit" in error_message.lower():
|
|
351
|
+
yield {'type': 'error', 'error': 'Service temporairement surcharge, veuillez reessayer'}
|
|
352
|
+
elif "connection" in error_message.lower() or "connect" in error_message.lower():
|
|
353
|
+
yield {'type': 'error', 'error': 'Impossible de se connecter au service IA'}
|
|
354
|
+
else:
|
|
355
|
+
yield {'type': 'error', 'error': error_message}
|
|
356
|
+
|
|
357
|
+
def chat_completion(
|
|
358
|
+
self,
|
|
359
|
+
messages: List,
|
|
360
|
+
page_context: Dict,
|
|
361
|
+
user
|
|
362
|
+
) -> Dict[str, Any]:
|
|
363
|
+
"""
|
|
364
|
+
Genere une reponse complete (non-streaming).
|
|
365
|
+
|
|
366
|
+
Returns:
|
|
367
|
+
Dict avec 'content', 'tokens_utilises', ou 'error'
|
|
368
|
+
"""
|
|
369
|
+
try:
|
|
370
|
+
# Extraire la question utilisateur
|
|
371
|
+
user_question = ""
|
|
372
|
+
if messages:
|
|
373
|
+
last_msg = messages[-1] if hasattr(messages[-1], 'contenu') else messages[-1]
|
|
374
|
+
user_question = getattr(last_msg, 'contenu', '') if hasattr(last_msg, 'contenu') else str(last_msg)
|
|
375
|
+
|
|
376
|
+
system_prompt = self._build_system_prompt(page_context, user, user_question)
|
|
377
|
+
|
|
378
|
+
# Utiliser l'optimisation des messages
|
|
379
|
+
formatted_messages = self._optimize_messages(messages)
|
|
380
|
+
|
|
381
|
+
if not formatted_messages:
|
|
382
|
+
return {'error': 'Aucun message a traiter'}
|
|
383
|
+
|
|
384
|
+
# Ajouter le system prompt comme premier message
|
|
385
|
+
messages_with_system = [
|
|
386
|
+
{"role": "system", "content": system_prompt}
|
|
387
|
+
] + formatted_messages
|
|
388
|
+
|
|
389
|
+
response = self.client.chat.complete(
|
|
390
|
+
model=self._model,
|
|
391
|
+
max_tokens=self.MAX_TOKENS,
|
|
392
|
+
messages=messages_with_system
|
|
393
|
+
)
|
|
394
|
+
|
|
395
|
+
content = ""
|
|
396
|
+
if response.choices:
|
|
397
|
+
content = response.choices[0].message.content or ""
|
|
398
|
+
|
|
399
|
+
total_tokens = 0
|
|
400
|
+
input_tokens = 0
|
|
401
|
+
output_tokens = 0
|
|
402
|
+
if response.usage:
|
|
403
|
+
input_tokens = response.usage.prompt_tokens
|
|
404
|
+
output_tokens = response.usage.completion_tokens
|
|
405
|
+
total_tokens = input_tokens + output_tokens
|
|
406
|
+
|
|
407
|
+
return {
|
|
408
|
+
'content': content,
|
|
409
|
+
'tokens_utilises': total_tokens,
|
|
410
|
+
'input_tokens': input_tokens,
|
|
411
|
+
'output_tokens': output_tokens
|
|
412
|
+
}
|
|
413
|
+
|
|
414
|
+
except Exception as e:
|
|
415
|
+
LogUtils.error(f"Erreur lors de l'appel Mistral: {e}")
|
|
416
|
+
return {'error': str(e)}
|
|
417
|
+
|
|
418
|
+
def analyze_code_for_bug(
|
|
419
|
+
self,
|
|
420
|
+
error_message: str,
|
|
421
|
+
code_context: str,
|
|
422
|
+
user_description: str
|
|
423
|
+
) -> Dict[str, Any]:
|
|
424
|
+
"""
|
|
425
|
+
Analyse du code pour detecter un bug potentiel.
|
|
426
|
+
|
|
427
|
+
Returns:
|
|
428
|
+
Dict avec 'is_bug', 'analysis', 'recommendation'
|
|
429
|
+
"""
|
|
430
|
+
prompt = f"""Analyse le probleme suivant signale par un utilisateur:
|
|
431
|
+
|
|
432
|
+
Description de l'utilisateur: {user_description}
|
|
433
|
+
|
|
434
|
+
Message d'erreur (si disponible): {error_message}
|
|
435
|
+
|
|
436
|
+
Code source pertinent:
|
|
437
|
+
```
|
|
438
|
+
{code_context}
|
|
439
|
+
```
|
|
440
|
+
|
|
441
|
+
Reponds au format JSON avec les cles suivantes:
|
|
442
|
+
- is_bug: boolean (true si c'est un bug dans le code, false si c'est une erreur utilisateur)
|
|
443
|
+
- analysis: string (explication du probleme)
|
|
444
|
+
- recommendation: string (recommandation pour resoudre le probleme)
|
|
445
|
+
- severity: string (low/medium/high si c'est un bug)
|
|
446
|
+
"""
|
|
447
|
+
|
|
448
|
+
try:
|
|
449
|
+
response = self.client.chat.complete(
|
|
450
|
+
model=self._model,
|
|
451
|
+
max_tokens=1024,
|
|
452
|
+
messages=[
|
|
453
|
+
{"role": "system", "content": "Tu es un expert en analyse de bugs. Reponds uniquement en JSON valide."},
|
|
454
|
+
{"role": "user", "content": prompt}
|
|
455
|
+
]
|
|
456
|
+
)
|
|
457
|
+
|
|
458
|
+
content = ""
|
|
459
|
+
if response.choices:
|
|
460
|
+
content = response.choices[0].message.content or "{}"
|
|
461
|
+
|
|
462
|
+
# Essayer de parser le JSON
|
|
463
|
+
try:
|
|
464
|
+
# Extraire le JSON de la reponse
|
|
465
|
+
import re
|
|
466
|
+
json_match = re.search(r'\{[^{}]*\}', content, re.DOTALL)
|
|
467
|
+
if json_match:
|
|
468
|
+
return json.loads(json_match.group())
|
|
469
|
+
except json.JSONDecodeError:
|
|
470
|
+
pass
|
|
471
|
+
|
|
472
|
+
return {
|
|
473
|
+
'is_bug': False,
|
|
474
|
+
'analysis': content,
|
|
475
|
+
'recommendation': 'Contactez le support si le probleme persiste.'
|
|
476
|
+
}
|
|
477
|
+
|
|
478
|
+
except Exception as e:
|
|
479
|
+
LogUtils.error(f"Erreur lors de l'analyse de bug: {e}")
|
|
480
|
+
return {
|
|
481
|
+
'error': str(e),
|
|
482
|
+
'is_bug': False,
|
|
483
|
+
'analysis': 'Impossible d\'analyser le probleme',
|
|
484
|
+
'recommendation': 'Veuillez contacter le support technique.'
|
|
485
|
+
}
|
|
486
|
+
|
|
487
|
+
|
|
488
|
+
# Alias pour compatibilite avec l'ancien code
|
|
489
|
+
ClaudeService = MistralService
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
"""
|
|
2
|
-
Service d'
|
|
2
|
+
Service d'execution des tools pour Lucy Assist.
|
|
3
3
|
|
|
4
|
-
Ce service fait le lien entre les appels de tools de
|
|
4
|
+
Ce service fait le lien entre les appels de tools de Mistral
|
|
5
5
|
et les services CRUD/Context de l'application.
|
|
6
6
|
"""
|
|
7
7
|
from typing import Dict, Any
|
|
@@ -17,7 +17,7 @@ from lucy_assist.services.bug_notification_service import BugNotificationService
|
|
|
17
17
|
|
|
18
18
|
|
|
19
19
|
class ToolExecutorService:
|
|
20
|
-
"""Service pour
|
|
20
|
+
"""Service pour executer les tools appeles par Mistral."""
|
|
21
21
|
|
|
22
22
|
def __init__(self, user):
|
|
23
23
|
self.user = user
|
|
@@ -270,10 +270,10 @@ class ToolExecutorService:
|
|
|
270
270
|
|
|
271
271
|
def _handle_analyze_bug(self, params: Dict) -> Dict:
|
|
272
272
|
"""
|
|
273
|
-
Analyse un bug potentiel en utilisant GitLab et
|
|
274
|
-
Si un bug est
|
|
273
|
+
Analyse un bug potentiel en utilisant GitLab et Mistral.
|
|
274
|
+
Si un bug est detecte, envoie automatiquement une notification a Revolucy.
|
|
275
275
|
"""
|
|
276
|
-
from lucy_assist.services.
|
|
276
|
+
from lucy_assist.services.mistral_service import MistralService
|
|
277
277
|
|
|
278
278
|
user_description = params.get('user_description', '')
|
|
279
279
|
error_message = params.get('error_message', '')
|
|
@@ -331,16 +331,16 @@ class ToolExecutorService:
|
|
|
331
331
|
LogUtils.error(f"Erreur lors de la récupération du code GitLab: {e}")
|
|
332
332
|
code_context = f"Impossible de récupérer le code source: {str(e)}"
|
|
333
333
|
|
|
334
|
-
# Analyser le bug avec
|
|
334
|
+
# Analyser le bug avec Mistral
|
|
335
335
|
try:
|
|
336
|
-
|
|
337
|
-
bug_analysis =
|
|
336
|
+
mistral_service = MistralService()
|
|
337
|
+
bug_analysis = mistral_service.analyze_code_for_bug(
|
|
338
338
|
error_message=error_message,
|
|
339
339
|
code_context=code_context,
|
|
340
340
|
user_description=user_description
|
|
341
341
|
)
|
|
342
342
|
except Exception as e:
|
|
343
|
-
LogUtils.error(f"Erreur lors de l'analyse
|
|
343
|
+
LogUtils.error(f"Erreur lors de l'analyse Mistral: {e}")
|
|
344
344
|
bug_analysis = {
|
|
345
345
|
'is_bug': False,
|
|
346
346
|
'analysis': f'Impossible d\'analyser: {str(e)}',
|