mindroot 10.6.0__py3-none-any.whl → 10.8.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindroot might be problematic. Click here for more details.

@@ -143,9 +143,14 @@ class Agent:
143
143
 
144
144
  async def handle_cmds(self, cmd_name, cmd_args, json_cmd=None, context=None):
145
145
  # Check both permanent finish and temporary cancellation
146
- if context.data.get('finished_conversation') or context.data.get('cancel_current_turn'):
147
- logger.warning("Conversation is finished, not executing command")
148
- print("\033[91mConversation is finished, not executing command\033[0m")
146
+ if context.data.get('cancel_current_turn'):
147
+ logger.warning("Turn cancelled, not executing command")
148
+ print("\033[91mTurn cancelled, not executing command\033[0m")
149
+ raise asyncio.CancelledError("Turn cancelled")
150
+
151
+ if context.data.get('finished_conversation'):
152
+ logger.warning("Conversation finished, not executing command")
153
+ print("\033[91mConversation finished, not executing command\033[0m")
149
154
  return None
150
155
 
151
156
  logger.info("Command execution: {command}", command=cmd_name)
@@ -298,19 +303,23 @@ class Agent:
298
303
 
299
304
  # Check for cancellation (either permanent or current turn)
300
305
  if context.data.get('finished_conversation') or context.data.get('cancel_current_turn'):
301
- # Clear the temporary cancel flag so next turn can proceed
302
- if 'cancel_current_turn' in context.data:
303
- del context.data['cancel_current_turn']
304
306
  logger.warning("Conversation is finished or halted, exiting stream parsing")
305
307
  debug_box(f"""Conversation is finished or halted, exiting stream""")
306
308
  debug_box(str(context))
307
- # stream is actually a generator
309
+
310
+ # Add partial command to chat log if present
308
311
  if partial_cmd is not None:
309
312
  cmd_name = next(iter(partial_cmd))
310
313
  if cmd_name in ["say", "json_encoded_md", "think"]:
311
314
  context.chat_log.add_message({"role": "assistant", "content": str(partial_cmd[cmd_name])})
312
315
  else:
313
316
  context.chat_log.add_message({"role": "assistant", "content": str(partial_cmd) + "(Interrupted)"})
317
+
318
+ # Clear the temporary cancel flag so next turn can proceed
319
+ if 'cancel_current_turn' in context.data:
320
+ del context.data['cancel_current_turn']
321
+ await context.save_context()
322
+
314
323
  try:
315
324
  stream.close()
316
325
  except Exception as e:
@@ -339,8 +348,6 @@ class Agent:
339
348
  cmd_args = cmd[cmd_name]
340
349
  logger.debug(f"Processing command: {cmd}")
341
350
  await context.partial_command(cmd_name, json.dumps(cmd_args), cmd_args)
342
-
343
- self.handle_cmds(cmd_name, cmd_args, json_cmd=json.dumps(cmd), context=context)
344
351
 
345
352
  cmd_task = asyncio.create_task(
346
353
  self.handle_cmds(cmd_name, cmd_args, json_cmd=json.dumps(cmd), context=context)
@@ -348,6 +355,8 @@ class Agent:
348
355
  context.data['active_command_task'] = cmd_task
349
356
  try:
350
357
  result = await cmd_task
358
+ except asyncio.CancelledError:
359
+ raise # Propagate cancellation up
351
360
  finally:
352
361
  # Clear the task from context once it's done or cancelled
353
362
  if context.data.get('active_command_task') == cmd_task:
@@ -358,10 +367,10 @@ class Agent:
358
367
  sys_header = ""
359
368
 
360
369
  if result == "SYSTEM: WARNING - Command interrupted!\n\n":
361
- logger.warning("Command was interrupted. Skipping any extra commands in list.")
370
+ logger.warning("Command was interrupted. Stopping processing.")
362
371
  await context.chat_log.drop_last('assistant')
363
- return results, full_cmds
364
372
  break
373
+ return results, full_cmds
365
374
 
366
375
 
367
376
  full_cmds.append({ "SYSTEM": sys_header, "cmd": cmd_name, "args": cmd_args, "result": result})
@@ -507,14 +516,20 @@ class Agent:
507
516
  if 'service_models' in context.agent and context.agent['service_models'] is not None:
508
517
  if context.agent['service_models'].get('stream_chat', None) is None:
509
518
  model = os.environ.get("DEFAULT_LLM_MODEL")
510
-
519
+
520
+ # we need to be able to abort this task if necessary
511
521
  stream = await context.stream_chat(model,
512
522
  temperature=temperature,
513
523
  max_tokens=max_tokens,
514
524
  messages=new_messages,
515
525
  context=context)
516
-
517
- ret, full_cmds = await self.parse_cmd_stream(stream, context)
526
+
527
+ try:
528
+ ret, full_cmds = await self.parse_cmd_stream(stream, context)
529
+ except asyncio.CancelledError:
530
+ logger.info("Command stream parsing cancelled")
531
+ raise # Propagate cancellation
532
+
518
533
  logger.debug("System message was:")
519
534
  logger.debug(await self.render_system_msg())
520
535
 
@@ -0,0 +1,540 @@
1
+ import asyncio
2
+ import json
3
+ import os
4
+ import re
5
+ import json
6
+ from json import JSONDecodeError
7
+ from jinja2 import Template
8
+ from lib.providers.commands import command_manager, command
9
+ from lib.providers.hooks import hook_manager
10
+ from lib.pipelines.pipe import pipeline_manager
11
+ from lib.providers.services import service
12
+ from lib.providers.services import service_manager
13
+ from lib.json_str_block import replace_raw_blocks
14
+ import sys
15
+ from lib.utils.check_args import *
16
+ from .command_parser import parse_streaming_commands, invalid_start_format
17
+ from datetime import datetime
18
+ import pytz
19
+ import traceback
20
+ from lib.logging.logfiles import logger
21
+ from lib.utils.debug import debug_box
22
+ from .init_models import *
23
+ from lib.chatcontext import ChatContext
24
+ from .cmd_start_example import *
25
+ from lib.templates import render
26
+
27
+
28
+ error_result = """
29
+ [SYSTEM]: ERROR, invalid response format.
30
+
31
+ Your response does not appear to adhere to the command list format.
32
+
33
+ Common causes:
34
+
35
+ - replied with JSON inside of fenced code blocks instead of JSON or RAW string format as below
36
+
37
+ - ONLY if your model supports this, for complex multiline string arguments, use the RAW format described in system instructions, e.g.:
38
+
39
+ ...
40
+
41
+ { "json_encoded_md": { "markdown": START_RAW
42
+ The moon, so bright
43
+ It's shining light
44
+ Like a pizza pie
45
+ In the sky
46
+ END_RAW
47
+ } }
48
+
49
+ ...
50
+
51
+ - iF your model does not support RAW format or it is not a complex multiline string like code, you MUST properly escape JSON strings!
52
+ - remember newlines, double quotes, etc. must be escaped (but not double escaped)!
53
+
54
+ - plain text response before JSON.
55
+
56
+ - some JSON args with unescaped newlines, etc.
57
+
58
+ - multiple command lists. Only one command list response is allowed!
59
+ - This is a frequent cause of parse errors.
60
+
61
+ - some characters escaped that did not need to be/invalid
62
+
63
+ Please adhere to the system JSON command list response format carefully.
64
+ """
65
+
66
+ @service()
67
+ async def get_agent_data(agent_name, context=None):
68
+ logger.info("Agent name: {agent_name}", agent_name=agent_name)
69
+
70
+ agent_path = os.path.join('data/agents', 'local', agent_name)
71
+
72
+ if not os.path.exists(agent_path):
73
+ agent_path = os.path.join('data/agents', 'shared', agent_name)
74
+ if not os.path.exists(agent_path):
75
+ return {}
76
+ agent_file = os.path.join(agent_path, 'agent.json')
77
+ if not os.path.exists(agent_file):
78
+ return {}
79
+ with open(agent_file, 'r') as f:
80
+ agent_data = json.load(f)
81
+
82
+ # Ensure required_plugins is present
83
+ if 'required_plugins' not in agent_data:
84
+ agent_data['required_plugins'] = []
85
+
86
+ try:
87
+ agent_data["persona"] = await service_manager.get_persona_data(agent_data["persona"])
88
+ except Exception as e:
89
+ logger.error("Error getting persona data", extra={"error": str(e)})
90
+ raise e
91
+
92
+ agent_data["flags"] = agent_data["flags"]
93
+ agent_data["flags"] = list(dict.fromkeys(agent_data["flags"]))
94
+ return agent_data
95
+
96
+
97
+
98
+ def find_new_substring(s1, s2):
99
+ if s1 in s2:
100
+ return s2.replace(s1, '', 1)
101
+ return s2
102
+
103
+ class Agent:
104
+
105
+ def __init__(self, model=None, sys_core_template=None, agent=None, clear_model=False, commands=[], context=None):
106
+ if model is None:
107
+ if os.environ.get('AH_DEFAULT_LLM'):
108
+ self.model = os.environ.get('AH_DEFAULT_LLM')
109
+ else:
110
+ self.model = 'llama3'
111
+ else:
112
+ self.model = model
113
+
114
+ self.agent = agent
115
+
116
+ #if sys_core_template is None:
117
+ # system_template_path = os.path.join(os.path.dirname(__file__), "system.j2")
118
+ # with open(system_template_path, "r") as f:
119
+ # self.sys_core_template = f.read()
120
+ #else:
121
+ # self.sys_core_template = sys_core_template
122
+
123
+ #self.sys_template = Template(self.sys_core_template)
124
+
125
+ self.cmd_handler = {}
126
+ self.context = context
127
+
128
+ #if clear_model:
129
+ # logger.debug("Unloading model")
130
+ # asyncio.create_task(use_ollama.unload(self.model))
131
+
132
+ def use_model(self, model_id, local=True):
133
+ self.current_model = model_id
134
+
135
+ async def set_cmd_handler(self, cmd_name, callback):
136
+ self.cmd_handler[cmd_name] = callback
137
+ logger.info("Recorded handler for command: {command}", command=cmd_name)
138
+
139
+ async def unload_llm_if_needed(self):
140
+ logger.info("Not unloading LLM")
141
+ #await use_ollama.unload(self.model)
142
+ #await asyncio.sleep(1)
143
+
144
+ async def handle_cmds(self, cmd_name, cmd_args, json_cmd=None, context=None):
145
+ # Check both permanent finish and temporary cancellation
146
+ if context.data.get('finished_conversation') or context.data.get('cancel_current_turn'):
147
+ logger.warning("Conversation is finished, not executing command")
148
+ print("\033[91mConversation is finished, not executing command\033[0m")
149
+ return None
150
+
151
+ logger.info("Command execution: {command}", command=cmd_name)
152
+ logger.debug("Command details: {details}", details={
153
+ "command": cmd_name,
154
+ "arguments": cmd_args,
155
+ "context": str(context)
156
+ })
157
+ context.chat_log.add_message({"role": "assistant", "content": [{"type": "text",
158
+ "text": '['+json_cmd+']' }]})
159
+ command_manager.context = context
160
+
161
+ if cmd_name == "reasoning":
162
+ return None
163
+
164
+ # cmd_args might be a single arg like integer or string, or it may be an array, or an object/dict with named args
165
+ try:
166
+ if isinstance(cmd_args, list):
167
+ #filter out empty strings
168
+ cmd_args = [x for x in cmd_args if x != '']
169
+ logger.debug("Executing command with list arguments", extra={"step": 1})
170
+ await context.running_command(cmd_name, cmd_args)
171
+ logger.debug("Executing command with list arguments", extra={"step": 2})
172
+ return await command_manager.execute(cmd_name, *cmd_args)
173
+ elif isinstance(cmd_args, dict):
174
+ logger.debug("Executing command with dict arguments", extra={"step": 1})
175
+ await context.running_command(cmd_name, cmd_args)
176
+ logger.debug("Executing command with dict arguments", extra={"step": 2})
177
+ return await command_manager.execute(cmd_name, **cmd_args)
178
+ else:
179
+ logger.debug("Executing command with single argument", extra={"step": 1})
180
+ await context.running_command(cmd_name, cmd_args)
181
+ logger.debug("Executing command with single argument", extra={"step": 2})
182
+ return await command_manager.execute(cmd_name, cmd_args)
183
+
184
+ except Exception as e:
185
+ trace = traceback.format_exc()
186
+ print("\033[96mError in handle_cmds: " + str(e) + "\033[0m")
187
+ print("\033[96m" + trace + "\033[0m")
188
+ logger.error("Error in handle_cmds", extra={
189
+ "error": str(e),
190
+ "command": cmd_name,
191
+ "arguments": cmd_args,
192
+ "traceback": trace
193
+ })
194
+
195
+ return {"error": str(e)}
196
+
197
+ def remove_braces(self, buffer):
198
+ if buffer.endswith("\n"):
199
+ buffer = buffer[:-1]
200
+ if buffer.startswith('[ '):
201
+ buffer = buffer[2:]
202
+ if buffer.startswith(' ['):
203
+ buffer = buffer[2:]
204
+ if buffer.endswith(','):
205
+ buffer = buffer[:-1]
206
+ if buffer.endswith(']'):
207
+ buffer = buffer[:-1]
208
+ if buffer.startswith('['):
209
+ buffer = buffer[1:]
210
+ if buffer.endswith('},'):
211
+ buffer = buffer[:-1]
212
+ return buffer
213
+
214
+ async def parse_single_cmd(self, json_str, context, buffer, match=None):
215
+ cmd_name = '?'
216
+ try:
217
+ cmd_obj = json.loads(json_str)
218
+ cmd_name = next(iter(cmd_obj))
219
+ if isinstance(cmd_obj, list):
220
+ cmd_obj = cmd_obj[0]
221
+ cmd_name = next(iter(cmd_obj))
222
+
223
+ cmd_args = cmd_obj[cmd_name]
224
+ # make sure that cmd_name is in self.agent["commands"]
225
+ if cmd_name not in self.agent["commands"]:
226
+ logger.warning("Command not found in agent commands", extra={"command": cmd_name})
227
+ return None, buffer
228
+ if check_empty_args(cmd_args):
229
+ logger.info("Empty arguments for command", extra={"command": cmd_name})
230
+ return None, buffer
231
+ else:
232
+ logger.info("Non-empty arguments for command", extra={"command": cmd_name, "arguments": cmd_args})
233
+ # Handle the full command
234
+ result = await self.handle_cmds(cmd_name, cmd_args, json_cmd=json_str, context=context)
235
+ await context.command_result(cmd_name, result)
236
+
237
+ cmd = {"cmd": cmd_name, "result": result}
238
+ # Remove the processed JSON object from the buffer
239
+ if match is not None:
240
+ buffer = buffer[match.end():]
241
+ buffer = buffer.lstrip(',').rstrip(',')
242
+ return [cmd], buffer
243
+ except Exception as e:
244
+ trace = traceback.format_exc()
245
+ logger.error("Error processing command", extra={"error": str(e) + "\n\n" + trace})
246
+
247
+ json_str = '[' + json_str + ']'
248
+
249
+ return None, buffer
250
+
251
+
252
+ async def parse_cmd_stream(self, stream, context):
253
+ buffer = ""
254
+ results = []
255
+ full_cmds = []
256
+
257
+ num_processed = 0
258
+ parse_failed = False
259
+ debug_box("Parsing command stream")
260
+ debug_box(str(context))
261
+ original_buffer = ""
262
+
263
+ async for part in stream:
264
+ buffer += part
265
+ original_buffer += part
266
+
267
+ logger.debug(f"Current buffer: ||{buffer}||")
268
+
269
+ if invalid_start_format(buffer):
270
+ print("Found invalid start to buffer", buffer)
271
+ context.chat_log.add_message({"role": "assistant", "content": buffer})
272
+ started_with = f"Your invalid command started with: {buffer[0:20]}"
273
+ results.append({"cmd": "UNKNOWN", "args": { "invalid": "(" }, "result": error_result + "\n\n" + started_with})
274
+ return results, full_cmds
275
+
276
+ if len(buffer) > 0 and buffer[0] == '{':
277
+ buffer = "[" + buffer
278
+
279
+ # happened with Qwen 3 for some reason
280
+ buffer = buffer.replace('}] <>\n\n[{','}, {')
281
+ buffer = buffer.replace('}] <>\n[{','}, {')
282
+
283
+ commands, partial_cmd = parse_streaming_commands(buffer)
284
+
285
+ if isinstance(commands, int):
286
+ continue
287
+
288
+ if not isinstance(commands, list):
289
+ commands = [commands]
290
+
291
+ try:
292
+ if len(commands) == 1 and 'commands' in commands[0]:
293
+ commands = commands[0]['commands']
294
+ except Exception as e:
295
+ continue
296
+
297
+ logger.debug(f"commands: {commands}, partial_cmd: {partial_cmd}")
298
+
299
+ # Check for cancellation (either permanent or current turn)
300
+ if context.data.get('finished_conversation') or context.data.get('cancel_current_turn'):
301
+ # Clear the temporary cancel flag so next turn can proceed
302
+ if 'cancel_current_turn' in context.data:
303
+ del context.data['cancel_current_turn']
304
+ logger.warning("Conversation is finished or halted, exiting stream parsing")
305
+ debug_box(f"""Conversation is finished or halted, exiting stream""")
306
+ debug_box(str(context))
307
+ # stream is actually a generator
308
+ if partial_cmd is not None:
309
+ cmd_name = next(iter(partial_cmd))
310
+ if cmd_name in ["say", "json_encoded_md", "think"]:
311
+ context.chat_log.add_message({"role": "assistant", "content": str(partial_cmd[cmd_name])})
312
+ else:
313
+ context.chat_log.add_message({"role": "assistant", "content": str(partial_cmd) + "(Interrupted)"})
314
+ try:
315
+ stream.close()
316
+ except Exception as e:
317
+ print("\033[91mError closing stream\033[0m")
318
+
319
+ return results, full_cmds
320
+
321
+
322
+ if len(commands) > num_processed:
323
+ logger.debug("New command(s) found")
324
+ logger.debug(f"Commands: {commands}")
325
+ for i in range(num_processed, len(commands)):
326
+ try:
327
+ cmd = commands[i]
328
+ try:
329
+ cmd_name = next(iter(cmd))
330
+ except Exception as e:
331
+ print("next iter failed. cmd is")
332
+ print(cmd)
333
+ break
334
+ if isinstance(cmd, str):
335
+ print("\033[91m" + "Invalid command format, expected object, trying to parse anyway" + "\033[0m")
336
+ print("\033[91m" + str(cmd) + "\033[0m")
337
+ cmd = json.loads(cmd)
338
+ cmd_name = next(iter(cmd))
339
+ cmd_args = cmd[cmd_name]
340
+ logger.debug(f"Processing command: {cmd}")
341
+ await context.partial_command(cmd_name, json.dumps(cmd_args), cmd_args)
342
+
343
+ self.handle_cmds(cmd_name, cmd_args, json_cmd=json.dumps(cmd), context=context)
344
+
345
+ cmd_task = asyncio.create_task(
346
+ self.handle_cmds(cmd_name, cmd_args, json_cmd=json.dumps(cmd), context=context)
347
+ )
348
+ context.data['active_command_task'] = cmd_task
349
+ try:
350
+ result = await cmd_task
351
+ finally:
352
+ # Clear the task from context once it's done or cancelled
353
+ if context.data.get('active_command_task') == cmd_task:
354
+ del context.data['active_command_task']
355
+
356
+ await context.command_result(cmd_name, result)
357
+ sys_header = "Note: tool command results follow, not user replies"
358
+ sys_header = ""
359
+
360
+ if result == "SYSTEM: WARNING - Command interrupted!\n\n":
361
+ logger.warning("Command was interrupted. Skipping any extra commands in list.")
362
+ await context.chat_log.drop_last('assistant')
363
+ return results, full_cmds
364
+ break
365
+
366
+
367
+ full_cmds.append({ "SYSTEM": sys_header, "cmd": cmd_name, "args": cmd_args, "result": result})
368
+ if result is not None:
369
+ results.append({"SYSTEM": sys_header, "cmd": cmd_name, "args": { "omitted": "(see command msg.)"}, "result": result})
370
+
371
+ num_processed = len(commands)
372
+ except Exception as e:
373
+ trace = traceback.format_exc()
374
+ logger.error(f"Error processing command: {e} \n{trace}")
375
+ logger.error(str(e))
376
+ pass
377
+ else:
378
+ logger.debug("No new commands found")
379
+ # sometimes partial_cmd is actually a string for some reason
380
+ # definitely skip that
381
+ # check if partial_cmd is a string
382
+ is_string = isinstance(partial_cmd, str)
383
+ if partial_cmd is not None and partial_cmd != {} and not is_string:
384
+ logger.debug(f"Partial command {partial_cmd}")
385
+ try:
386
+ cmd_name = next(iter(partial_cmd))
387
+ cmd_args = partial_cmd[cmd_name]
388
+ logger.debug(f"Partial command detected: {partial_cmd}")
389
+ await context.partial_command(cmd_name, json.dumps(cmd_args), cmd_args)
390
+ except Exception as de:
391
+ logger.error("Failed to parse partial command")
392
+ logger.error(str(de))
393
+ pass
394
+
395
+ #print("\033[92m" + str(full_cmds) + "\033[0m")
396
+ # getting false positive on this check
397
+ reasonOnly = False
398
+ try:
399
+ cmd_name = next(iter(full_cmds[0]))
400
+ if cmd_name == 'reasoning':
401
+ reasonOnly = True
402
+ for cmd in full_cmds:
403
+ if cmd_name != 'reasoning':
404
+ reasonOnly = False
405
+ break
406
+ except Exception as e:
407
+ pass
408
+ if len(full_cmds) == 0 or reasonOnly:
409
+ print("\033[91m" + "No results and parse failed" + "\033[0m")
410
+ try:
411
+ buffer = replace_raw_blocks(buffer)
412
+ parse_ok = json.loads(buffer)
413
+ parse_fail_reason = ""
414
+ tried_to_parse = ""
415
+ except JSONDecodeError as e:
416
+ print("final parse fail")
417
+ print(buffer)
418
+ parse_fail_reason = str(e)
419
+ context.chat_log.add_message({"role": "assistant", "content": buffer})
420
+ print(parse_fail_reason)
421
+ await asyncio.sleep(1)
422
+ tried_to_parse = f"\n\nTried to parse the following input: {original_buffer}"
423
+ results.append({"cmd": "UNKNOWN", "args": { "invalid": "("}, "result": error_result + '\n\nJSON parse error was: ' + parse_fail_reason +
424
+ tried_to_parse })
425
+
426
+ return results, full_cmds
427
+
428
+ async def render_system_msg(self):
429
+ logger.debug("Docstrings:")
430
+ logger.debug(command_manager.get_some_docstrings(self.agent["commands"]))
431
+ now = datetime.now()
432
+
433
+ formatted_time = now.strftime("~ %Y-%m-%d %I %p %Z%z")
434
+
435
+ data = {
436
+ "command_docs": command_manager.get_some_docstrings(self.agent["commands"]),
437
+ "agent": self.agent,
438
+ "persona": self.agent['persona'],
439
+ "formatted_datetime": formatted_time,
440
+ "context_data": self.context.data
441
+ }
442
+ # is say in the command_manager
443
+ if 'say' in command_manager.functions.keys():
444
+ print("I found say! in the functions!")
445
+ else:
446
+ print("Say is not in the functions!")
447
+ if 'say' in data['command_docs'].keys():
448
+ print("I found say in the command docs!")
449
+
450
+ # we need to be doubly sure to remove anything from command_docs that is not in command_manager.functions.keys()
451
+ for cmd in data['command_docs']:
452
+ if cmd not in command_manager.functions.keys():
453
+ print("Removing " + cmd + " from command_docs")
454
+ del data['command_docs'][cmd]
455
+
456
+ #self.system_message = self.sys_template.render(data)
457
+ self.system_message = await render('system', data)
458
+
459
+ additional_instructions = await hook_manager.add_instructions(self.context)
460
+
461
+ for instruction in additional_instructions:
462
+ self.system_message += instruction + "\n\n"
463
+
464
+ return self.system_message
465
+
466
+
467
+ async def chat_commands(self, model, context,
468
+ temperature=0, max_tokens=4000, messages=[]):
469
+
470
+ self.context = context
471
+ content = [ { "type": "text", "text": await self.render_system_msg() } ]
472
+ messages = [{"role": "system", "content": content }] + demo_boot_msgs() + messages
473
+
474
+ #logger.info("Messages for chat", extra={"messages": messages})
475
+
476
+ json_messages = json.dumps(messages)
477
+ new_messages = json.loads(json_messages)
478
+
479
+ if os.environ.get("AH_DEFAULT_MAX_TOKENS"):
480
+ max_tokens = int(os.environ.get("AH_DEFAULT_MAX_TOKENS"))
481
+ try:
482
+ tmp_data = { "messages": new_messages }
483
+ debug_box("Filtering messages")
484
+ #debug_box(tmp_data)
485
+
486
+ tmp_data = await pipeline_manager.filter_messages(tmp_data, context=context)
487
+ new_messages = tmp_data['messages']
488
+ except Exception as e:
489
+ logger.error("Error filtering messages")
490
+ logger.error(str(e))
491
+
492
+ if new_messages[0]['role'] != 'system':
493
+ logger.error("First message is not a system message")
494
+ print("\033[91mFirst message is not a system message\033[0m")
495
+ return None, None
496
+
497
+ if not isinstance(context.agent, dict):
498
+ context.agent = await get_agent_data(context.agent, context=context)
499
+
500
+ if 'max_tokens' in context.agent and context.agent['max_tokens'] is not None and context.agent['max_tokens'] != '':
501
+ logger.info(f"Using agent max tokens {max_tokens}")
502
+ max_tokens = context.agent['max_tokens']
503
+ else:
504
+ logger.info(f"Using default max tokens {max_tokens}")
505
+
506
+ if model is None:
507
+ if 'service_models' in context.agent and context.agent['service_models'] is not None:
508
+ if context.agent['service_models'].get('stream_chat', None) is None:
509
+ model = os.environ.get("DEFAULT_LLM_MODEL")
510
+
511
+ # we need to be able to abort this task if necessary
512
+ stream = await context.stream_chat(model,
513
+ temperature=temperature,
514
+ max_tokens=max_tokens,
515
+ messages=new_messages,
516
+ context=context)
517
+
518
+ ret, full_cmds = await self.parse_cmd_stream(stream, context)
519
+ logger.debug("System message was:")
520
+ logger.debug(await self.render_system_msg())
521
+
522
+ # use green text
523
+ print("\033[92m" + "Just after stream chat, last two messages in chat log:")
524
+ print("------------------------------------")
525
+ print(context.chat_log.messages[-1])
526
+ print(context.chat_log.messages[-2])
527
+ # switch back to normal text
528
+ print("\033[0m")
529
+
530
+ return ret, full_cmds
531
+
532
+ @service()
533
+ async def run_command(cmd_name, cmd_args, context=None):
534
+ if context is None:
535
+ raise Exception("run_command: No context provided")
536
+
537
+ agent = Agent(agent=context.agent)
538
+ json_cmd = json.dumps({cmd_name: cmd_args})
539
+ asyncio.create_task(agent.handle_cmds(cmd_name, cmd_args, json_cmd, context=context))
540
+