mindroot 10.5.0__py3-none-any.whl → 10.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindroot might be problematic. Click here for more details.

@@ -0,0 +1,625 @@
1
+ from lib.providers.services import service, service_manager
2
+ from lib.providers.commands import command_manager, command
3
+ from lib.providers.hooks import hook
4
+ from lib.pipelines.pipe import pipeline_manager, pipe
5
+ from lib.chatcontext import ChatContext
6
+ from lib.chatlog import ChatLog
7
+ from typing import List
8
+ from lib.utils.dataurl import dataurl_to_pil
9
+ from .models import MessageParts
10
+ from coreplugins.agent import agent
11
+ from lib.utils.debug import debug_box
12
+ import os
13
+ import sys
14
+ import colored
15
+ import time
16
+ import traceback
17
+ import asyncio
18
+ import json
19
+ import termcolor
20
+ from PIL import Image
21
+ from io import BytesIO
22
+ import base64
23
+ import nanoid
24
+ sse_clients = {}
25
+ from lib.chatcontext import get_context
26
+
27
+ # Track active processing tasks per session
28
+ active_tasks = {}
29
+
30
+ @service()
31
+ async def prompt(model: str, instructions: str, temperature=0, max_tokens=400, json=False, context=None):
32
+ messages = [
33
+ { "role": "system",
34
+ "content": "Respond to prompt with no extraneous commentary."
35
+ },
36
+ { "role": "user",
37
+ "content": instructions
38
+ }]
39
+
40
+ stream = await context.stream_chat(model, temperature=temperature,
41
+ max_tokens=max_tokens,
42
+ messages=messages,
43
+ json=False,
44
+ context=context)
45
+ text = ""
46
+ if os.environ.get("AH_DEBUG") == "True":
47
+ print("Prompting, instructions ", instructions)
48
+ async for chunk in stream:
49
+ #print("Chunk received: ", chunk)
50
+ if chunk is None or chunk == "":
51
+ continue
52
+ else:
53
+ text += chunk
54
+ if os.environ.get("AH_DEBUG") == "True":
55
+ print(chunk, end='', flush=True)
56
+
57
+ return text
58
+
59
+
60
+ def results_text(results):
61
+ text = ""
62
+ for result in results:
63
+ if 'text' in result['args']:
64
+ text += result['args']['text'] + "\n"
65
+ elif 'markdown' in result['args']:
66
+ text += result['args']['markdown'] + "\n"
67
+ text = text.rstrip()
68
+ return text
69
+
70
+ def results_output(results):
71
+ text = ""
72
+ for result in results:
73
+ if 'output' in result['args']:
74
+ return str(result['args']['output'])
75
+
76
+ def results_text_output(results):
77
+ text = ""
78
+ for result in results:
79
+ if 'output' in result['args']:
80
+ return result['args']['output']
81
+ text += result['args']['output'] + "\n"
82
+ text = text.rstrip()
83
+ return text
84
+
85
+
86
+ @service()
87
+ async def run_task(instructions: str, agent_name:str = None, user:str = None, log_id=None,
88
+ parent_log_id=None, llm=None, retries=3, context=None):
89
+ """
90
+ Run a task with the given instructions
91
+ IMPORTANT NOTE: agent must have the task_result() command enabled.
92
+ """
93
+
94
+ if context is None:
95
+ debug_box("Context is none")
96
+ debug_box("agent_name: " + agent_name)
97
+ if log_id is None:
98
+ log_id = nanoid.generate()
99
+ if user is None:
100
+ raise Exception("chat: run_task: user required")
101
+ if agent_name is None:
102
+ raise Exception("chat: run_task: agent_name required")
103
+ context = ChatContext(command_manager, service_manager, user)
104
+ context.agent_name = agent_name
105
+ context.username = user
106
+ context.name = agent_name
107
+ context.log_id = log_id
108
+ context.parent_log_id = parent_log_id
109
+ context.agent = await service_manager.get_agent_data(agent_name)
110
+ context.data['llm'] = llm
111
+ context.current_model = llm
112
+ context.chat_log = ChatLog(log_id=log_id, agent=agent_name, user=user, parent_log_id=parent_log_id)
113
+ await context.save_context()
114
+ else:
115
+ debug_box("Context is not none")
116
+ print(context)
117
+
118
+
119
+ print("run_task: ", instructions, "log_id: ", context.log_id)
120
+
121
+ await init_chat_session(context.username, context.agent_name, context.log_id, context)
122
+
123
+ retried = 0
124
+
125
+ msg = """
126
+ # SYSTEM NOTE
127
+
128
+ This task is being run via API and requires a textual or structured output.
129
+ If your instructions indicate multiple steps with multiple function calls,
130
+ wait for the system results as you process each step in turn, then
131
+ call task_result() with the final output after all steps are truly complete.
132
+ You MUST call task_result() with the final output if you are completing the task.
133
+ For multi-stage tasks, do not call task_result until the final step is complete.
134
+
135
+ """
136
+
137
+ instructions = instructions + msg
138
+
139
+ while retried < retries:
140
+ [results, full_results] = await send_message_to_agent(context.log_id, instructions, context=context)
141
+ print('#####################################################33')
142
+ print("Full results: ", full_results)
143
+ print("Results: ", results)
144
+ text = results_output(full_results)
145
+ if text == "":
146
+ retried += 1
147
+ debug_box(f"No output found, retrying task: {retried}")
148
+ instructions += f"\n\nNot output found (call task_result()!), retrying task: {retried}"
149
+ else:
150
+ debug_box(f"Task output found: {text}")
151
+ break
152
+
153
+ return (text, full_results, context.log_id)
154
+
155
+
156
+ @service()
157
+ async def init_chat_session(user:str, agent_name: str, log_id: str, context=None):
158
+ if agent_name is None or agent_name == "" or log_id is None or log_id == "":
159
+ print("Invalid agent_name or log_id")
160
+ print("agent_name: ", agent_name)
161
+ print("log_id: ", log_id)
162
+ raise Exception("Invalid agent_name or log_id")
163
+
164
+ if context is None:
165
+ context = ChatContext(command_manager, service_manager, user)
166
+ context.agent_name = agent_name
167
+ context.name = agent_name
168
+ context.log_id = log_id
169
+ context.agent = await service_manager.get_agent_data(agent_name)
170
+ context.chat_log = ChatLog(log_id=log_id, agent=agent_name, user=user)
171
+ print("context.agent_name: ", context.agent_name)
172
+ await context.save_context()
173
+ print("initiated_chat_session: ", log_id, agent_name, context.agent_name, context.agent)
174
+ return log_id
175
+
176
+ @service()
177
+ async def get_chat_history(agent_name: str, session_id: str, user:str):
178
+ print("-----------------")
179
+ print("get_chat_history: ", agent_name, session_id)
180
+ #context = ChatContext(command_manager, service_manager)
181
+ #await context.load_context(session_id)
182
+ agent = await service_manager.get_agent_data(agent_name)
183
+
184
+ persona = agent['persona']['name']
185
+ chat_log = ChatLog(log_id=session_id, agent=agent_name, user=user)
186
+ print("Got chat chat log")
187
+ messages = chat_log.get_recent()
188
+ print("messages length: ", len(messages))
189
+ for message in messages:
190
+ if message['role'] == 'user':
191
+ message['persona'] = 'user'
192
+ else:
193
+ message['persona'] = persona
194
+ return messages
195
+
196
+ def process_result(result, formatted_results):
197
+ print("type of result is ", type(result))
198
+ if 'result' in result and type(result['result']) is dict and 'type' in result['result'] and 'image' in result['result']['type']:
199
+ print("A")
200
+ img_data = result['result']
201
+ result['result'] = '...'
202
+ new_result = { "type": "text", "text": json.dumps(result) }
203
+ formatted_results.append(new_result)
204
+ formatted_results.append(img_data)
205
+ elif 'result' in result and type(result['result']) is list:
206
+ print("B")
207
+ found_image = json.dumps(result['result']).find('"image"') > -1
208
+ if found_image:
209
+ print("Found image")
210
+ for item in result['result']:
211
+ process_result({ "result": item}, formatted_results)
212
+ else:
213
+ new_result = { "type": "text", "text": json.dumps(result['result']) }
214
+ formatted_results.append(new_result)
215
+ else:
216
+ print("C")
217
+ new_result = { "type": "text", "text": json.dumps(result) }
218
+ formatted_results.append(new_result)
219
+
220
+ print("length of results is ", len(formatted_results))
221
+ #with open("output/processed_results.json", "w") as f:
222
+ # f.write(json.dumps(formatted_results) + "\n")
223
+
224
+ return formatted_results
225
+
226
+ # Deprecated - use active_tasks instead
227
+ in_progress = {}
228
+
229
+
230
+ # seems like sometimes it's too late to cancel
231
+ # so I tried just aborting
232
+ # but then the other one got cancelled by the interruption
233
+ # and since this was cancelled, we never responded
234
+ #
235
+ # but if we try to continue, we end up with both running
236
+
237
+ @service()
238
+ async def send_message_to_agent(session_id: str, message: str | List[MessageParts], max_iterations=35, context=None, user=None):
239
+ global in_progress, active_tasks
240
+
241
+ # Check if there's an active task for this session
242
+ existing_task = active_tasks.get(session_id)
243
+
244
+ if not user:
245
+ # check context
246
+ if not context.username:
247
+ raise Exception("User required")
248
+ else:
249
+ user = {"user": context.username }
250
+ else:
251
+ if hasattr(user, "dict"):
252
+ user = user.dict()
253
+
254
+ # If there's an existing task, cancel it and wait for it to finish
255
+ if existing_task and not existing_task.done():
256
+ print(f"SEND_MESSAGE: Cancelling existing task for session {session_id}")
257
+
258
+ # Load the context to set cancellation flags
259
+ try:
260
+ existing_context = await get_context(session_id, user)
261
+ existing_context.data['cancel_current_turn'] = True
262
+
263
+ # Cancel any active command task
264
+ if 'active_command_task' in existing_context.data:
265
+ cmd_task = existing_context.data['active_command_task']
266
+ if cmd_task and not cmd_task.done():
267
+ cmd_task.cancel()
268
+
269
+ await existing_context.save_context()
270
+ except Exception as e:
271
+ print(f"Error setting cancellation flags: {e}")
272
+
273
+ # Cancel the main task
274
+ existing_task.cancel()
275
+
276
+ # Wait for it to actually finish (with timeout)
277
+ try:
278
+ await asyncio.wait_for(existing_task, timeout=2.0)
279
+ except (asyncio.CancelledError, asyncio.TimeoutError):
280
+ pass # Expected
281
+
282
+ print(f"Previous task cancelled for session {session_id}")
283
+
284
+ in_progress[session_id] = True
285
+
286
+ print('b')
287
+ if os.environ.get("MR_MAX_ITERATIONS") is not None:
288
+ max_iterations = int(os.environ.get("MR_MAX_ITERATIONS"))
289
+ try:
290
+ if type(message) is list:
291
+ message = [m.dict() for m in message]
292
+
293
+ if session_id is None or session_id == "" or message is None or message == "":
294
+ print("Invalid session_id or message")
295
+ return []
296
+
297
+ # Create the main processing task and store it
298
+ processing_task = asyncio.current_task()
299
+ active_tasks[session_id] = processing_task
300
+
301
+ print("send_message_to_agent: ", session_id, message, max_iterations)
302
+ if context is None:
303
+ context = ChatContext(command_manager, service_manager, user)
304
+ await context.load_context(session_id)
305
+
306
+ agent_ = agent.Agent(agent=context.agent)
307
+ if user is not None and hasattr(user, "keys"):
308
+ for key in user.keys():
309
+ context.data[key] = user[key]
310
+
311
+ context.data['finished_conversation'] = False
312
+
313
+ tmp_data = { "message": message }
314
+ tmp_data = await pipeline_manager.pre_process_msg(tmp_data, context=context)
315
+ message = tmp_data['message']
316
+
317
+ termcolor.cprint("Final message: " + str(message), "yellow")
318
+ if type(message) is str:
319
+ #context.chat_log.add_message({"role": "user", "content": [{"type": "text", "text": message}]})
320
+ context.chat_log.add_message({"role": "user", "content": message })
321
+ else:
322
+ new_parts = []
323
+ has_image = False
324
+ for part in message:
325
+ if part['type'] == 'image':
326
+ has_image = True
327
+ img = dataurl_to_pil(part['data'])
328
+ img_msg = await context.format_image_message(img)
329
+ new_parts.append(img_msg)
330
+ elif part['type'] == 'text' and '[UPLOADED FILE]' in part['text']:
331
+ # Ensure we don't duplicate file entries
332
+ if not any('[UPLOADED FILE]' in p.get('text', '') for p in new_parts):
333
+ new_parts.append(part)
334
+ else:
335
+ new_parts.append(part)
336
+ msg_to_add= {"role": "user", "content": new_parts }
337
+ has_image = has_image or str(msg_to_add).find("image") > -1
338
+ context.chat_log.add_message(msg_to_add)
339
+
340
+ await context.save_context()
341
+
342
+ continue_processing = True
343
+ iterations = 0
344
+ results = []
345
+ full_results = []
346
+
347
+ invalid = "ERROR, invalid response format."
348
+
349
+ consecutive_parse_errors = 0
350
+
351
+ while continue_processing and iterations < max_iterations:
352
+ iterations += 1
353
+ continue_processing = False
354
+ try:
355
+ if context.current_model is None:
356
+ if 'llm' in context.data:
357
+ context.current_model = context.data['llm']
358
+
359
+ parse_error = False
360
+ max_tokens = os.environ.get("MR_MAX_TOKENS", 4000)
361
+ results, full_cmds = await agent_.chat_commands(context.current_model, context, messages=context.chat_log.get_recent(), max_tokens=max_tokens)
362
+ if results is not None:
363
+ try:
364
+ for result in results:
365
+ if result['cmd'] == 'UNKNOWN':
366
+ consecutive_parse_errors += 1
367
+ parse_error = True
368
+ except Exception as e:
369
+ pass
370
+
371
+ if not parse_error:
372
+ consecutive_parse_errors = 0
373
+ else:
374
+ await asyncio.sleep(1)
375
+
376
+ if consecutive_parse_errors > 6:
377
+ raise Exception("Too many consecutive parse errors, stopping processing.")
378
+
379
+ elif consecutive_parse_errors > 3:
380
+ results.append({"cmd": "UNKNOWN", "args": { "SYSTEM WARNING: Issue valid command list or task; processing will be halted. Simplify output."}})
381
+
382
+ try:
383
+ tmp_data3 = { "results": full_cmds }
384
+ tmp_data3 = await pipeline_manager.process_results(tmp_data3, context=context)
385
+ out_results = tmp_data3['results']
386
+ except Exception as e:
387
+ print("Error processing results: ", e)
388
+ print(traceback.format_exc())
389
+
390
+ for cmd in full_cmds:
391
+ full_results.append(cmd)
392
+ out_results = []
393
+ stop_requested= False
394
+ actual_results = False
395
+ await asyncio.sleep(0.001)
396
+ for result in results:
397
+ if 'result' in result and result['result'] is not None:
398
+ if result['result'] == 'continue':
399
+ out_results.append(result)
400
+ continue_processing = True
401
+ elif result['result'] == 'stop':
402
+ continue_processing = False
403
+ stop_requested = True
404
+ else:
405
+ out_results.append(result)
406
+ # only print up to 200 characters
407
+ truncated_result = str(result)[:200] + '...'
408
+ termcolor.cprint("Found result: " + truncated_result, "magenta")
409
+ actual_results = True
410
+ continue_processing = True
411
+ else:
412
+ continue_processing = False
413
+
414
+ if actual_results and not stop_requested:
415
+ continue_processing = True
416
+
417
+ if len(out_results) > 0:
418
+ try:
419
+ tmp_data2 = { "results": out_results }
420
+ tmp_data2 = await pipeline_manager.process_results(tmp_data2, context=context)
421
+ out_results = tmp_data2['results']
422
+ except Exception as e:
423
+ print("Error processing results: ", e)
424
+ print(traceback.format_exc())
425
+
426
+ formatted_results = []
427
+ st_process = time.time()
428
+ for result in out_results:
429
+ process_result(result, formatted_results)
430
+ print("Time to process results: ", time.time() - st_process)
431
+
432
+ context.chat_log.add_message({"role": "user", "content": formatted_results})
433
+ results.append(out_results)
434
+ else:
435
+ print("Processing iteration: ", iterations, "no message added")
436
+ if context.data.get('finished_conversation') is True:
437
+ termcolor.cprint("Finished conversation, exiting send_message_to_agent", "red")
438
+ if context.data.get('task_result') is not None:
439
+ task_result = context.data.get('task_result')
440
+ full_results.append({ "cmd": "task_result", "args": { "result": task_result } })
441
+ continue_processing = False
442
+ except Exception as e:
443
+ continue_processing = False
444
+ await asyncio.sleep(1)
445
+ trace = traceback.format_exc()
446
+ msg = str(e)
447
+ descr = msg + "\n\n" + trace
448
+ print(descr)
449
+
450
+ print('------')
451
+ print(msg)
452
+ try:
453
+ persona = agent_['persona']['name']
454
+ except Exception as e:
455
+ persona = "System error"
456
+ context.chat_log.add_message({"role": "user", "content": msg })
457
+ await context.agent_output("system_error", { "error": msg })
458
+
459
+
460
+ await asyncio.sleep(0.001)
461
+ print("Exiting send_message_to_agent: ", session_id, message, max_iterations)
462
+
463
+ await context.finished_chat()
464
+ in_progress.pop(session_id, None)
465
+ active_tasks.pop(session_id, None)
466
+ if len(results) == 0:
467
+ if context.data.get('task_result') is not None:
468
+ task_result = context.data.get('task_result')
469
+ results.append(task_result)
470
+ return [results, full_results]
471
+ except asyncio.CancelledError:
472
+ print(f"Task cancelled for session {session_id}")
473
+ in_progress.pop(session_id, None)
474
+ active_tasks.pop(session_id, None)
475
+ raise # Re-raise to properly handle cancellation
476
+ except Exception as e:
477
+ print("Error in send_message_to_agent: ", e)
478
+ print(traceback.format_exc())
479
+ in_progress.pop(session_id, None)
480
+ return []
481
+
482
+ @pipe(name='process_results', priority=5)
483
+ def add_current_time(data: dict, context=None) -> dict:
484
+ data['results'] = data['results']
485
+ return data
486
+
487
+
488
+ @service()
489
+ async def finished_chat(context=None):
490
+ await context.agent_output("finished_chat", { "persona": context.agent['persona']['name'] })
491
+
492
+ @hook()
493
+ async def quit(context=None):
494
+ print("Chat service is quitting..")
495
+ # Close all existing SSE connections
496
+ for session_id, queues in sse_clients.items():
497
+ for queue in queues.copy(): # Use copy to avoid modification during iteration
498
+ try:
499
+ await queue.put({'event': 'close', 'data': 'Server shutting down'})
500
+ except:
501
+ pass
502
+
503
+ # Clear the global sse_clients
504
+ sse_clients.clear()
505
+
506
+ # Give clients a moment to receive the close message
507
+ await asyncio.sleep(1)
508
+ print("Chat service finished.")
509
+ return {"status": "shutdown_complete"}
510
+
511
+ @service()
512
+ async def subscribe_to_agent_messages(session_id: str, context=None):
513
+ async def event_generator():
514
+ queue = asyncio.Queue()
515
+ if session_id not in sse_clients:
516
+ sse_clients[session_id] = set()
517
+ sse_clients[session_id].add(queue)
518
+ try:
519
+ while True:
520
+ data = await queue.get()
521
+ await asyncio.sleep(0.001)
522
+ print('.', end='', flush=True)
523
+ yield data
524
+ except asyncio.CancelledError:
525
+ sse_clients[session_id].remove(queue)
526
+ if not sse_clients[session_id]:
527
+ del sse_clients[session_id]
528
+ return event_generator()
529
+
530
+ @service()
531
+ async def close_chat_session(session_id: str, context=None):
532
+ if session_id in sse_clients:
533
+ del sse_clients[session_id]
534
+ # Any additional cleanup needed
535
+
536
+ @service()
537
+ async def agent_output(event: str, data: dict, context=None):
538
+ log_id = context.log_id
539
+ if log_id in sse_clients:
540
+ for queue in sse_clients[log_id]:
541
+ await queue.put({"event": event, "data": json.dumps(data)})
542
+
543
+ @service()
544
+ async def append_message(role: str, content, context=None):
545
+ await context.chat_log.add_message({"role": role, "content": content})
546
+
547
+ @service()
548
+ async def partial_command(command: str, chunk: str, params, context=None):
549
+ agent_ = context.agent
550
+ await context.agent_output("partial_command", { "command": command, "chunk": chunk, "params": params,
551
+ "persona": agent_['persona']['name'] })
552
+
553
+ @service()
554
+ async def running_command(command: str, args, context=None):
555
+ agent_ = context.agent
556
+ await context.agent_output("running_command", { "command": command, "args": args, "persona": agent_['persona']['name'] })
557
+
558
+ @service()
559
+ async def command_result(command: str, result, context=None):
560
+ agent_ = context.agent
561
+ await context.agent_output("command_result", { "command": command, "result": result, "persona": agent_['persona']['name'] })
562
+
563
+ @service()
564
+ async def backend_user_message(message: str, context=None):
565
+ """
566
+ Signal the frontend to display a user message.
567
+ """
568
+ agent_ = context.agent
569
+ persona = 'user'
570
+ await context.agent_output("backend_user_message", {
571
+ "content": message,
572
+ "sender": "user",
573
+ "persona": persona
574
+ })
575
+
576
+ @service()
577
+ async def cancel_active_response(log_id: str, context=None):
578
+ """
579
+ Cancel active AI response for eager end of turn processing.
580
+ Sets the finished_conversation flag to stop the agent processing loop.
581
+ """
582
+ if context is None:
583
+ # Get context from log_id - we need the username, but for SIP calls it might be 'system'
584
+ # Try to load context, fallback to system user if needed
585
+ try:
586
+ context = await get_context(log_id, 'system')
587
+ except Exception as e:
588
+ print(f"Error getting context for cancellation: {e}")
589
+ return {"status": "error", "message": f"Could not load context: {e}"}
590
+
591
+ # Set flag to stop current processing loop iteration
592
+ # But don't permanently mark conversation as finished - just this turn
593
+ context.data['cancel_current_turn'] = True
594
+
595
+ # DEBUG TRACE
596
+ print("\033[91;107m[DEBUG TRACE 5/6] Core cancel_active_response service executed.\033[0m")
597
+
598
+ # Cancel any active TTS streams (ElevenLabs)
599
+ try:
600
+ # Import here to avoid circular dependency
601
+ from mr_eleven_stream.mod import _active_tts_streams
602
+ for stream_id, stop_event in list(_active_tts_streams.items()):
603
+ stop_event.set()
604
+ logger.info(f"Cancelled TTS stream {stream_id}")
605
+ print("\033[91;107m[DEBUG TRACE 5.5/6] Cancelled active TTS stream.\033[0m")
606
+ except ImportError:
607
+ logger.debug("ElevenLabs TTS plugin not available for cancellation")
608
+
609
+ # Also, cancel any active command task (like speak())
610
+ if 'active_command_task' in context.data:
611
+ active_task = context.data['active_command_task']
612
+ if active_task and not active_task.done():
613
+ try:
614
+ active_task.cancel()
615
+ # DEBUG TRACE
616
+ print("\033[91;107m[DEBUG TRACE 6/6] Active command task found and cancelled.\033[0m")
617
+ print(f"Cancelled active command task for session {log_id}")
618
+ except Exception as e:
619
+ print(f"Error cancelling active command task: {e}")
620
+
621
+ await context.save_context()
622
+
623
+ print(f"Cancelled active response for session {log_id}")
624
+ return {"status": "cancelled", "log_id": log_id}
625
+
@@ -1,6 +1,7 @@
1
1
  from lib.providers.commands import command, command_manager
2
2
  from lib.providers.services import service_manager
3
3
  from lib.chatcontext import ChatContext
4
+ from lib.chatlog import ChatLog
4
5
  from .services import init_chat_session, send_message_to_agent, subscribe_to_agent_messages
5
6
  import asyncio
6
7
  import json
@@ -150,6 +151,9 @@ async def delegate_task(instructions: str, agent_name, log_id=None, retries=3, c
150
151
  (text, full_results, xx) = await service_manager.run_task(instructions, user=context.username, log_id=log_id,
151
152
  parent_log_id = context.log_id,
152
153
  llm=llm, agent_name=agent_name, retries=retries, context=None)
154
+ if text is None or text == "" or text == [] or text == '[]':
155
+ chatlog = ChatLog(log_id=context.log_id, user=context.username, agent=agent_name)
156
+ text = json.dumps(chatlog.messages)
153
157
  return f"""<a href="/session/{agent_name}/{log_id}" target="_blank">Task completed with log ID: {log_id}</a>\nResults:\n\n{text}"""
154
158
 
155
159