mindroot 10.2.0__py3-none-any.whl → 10.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindroot might be problematic. Click here for more details.

@@ -142,7 +142,8 @@ class Agent:
142
142
  #await asyncio.sleep(1)
143
143
 
144
144
  async def handle_cmds(self, cmd_name, cmd_args, json_cmd=None, context=None):
145
- if 'finished_conversation' in context.data and context.data['finished_conversation']:
145
+ # Check both permanent finish and temporary cancellation
146
+ if context.data.get('finished_conversation') or context.data.get('cancel_current_turn'):
146
147
  logger.warning("Conversation is finished, not executing command")
147
148
  print("\033[91mConversation is finished, not executing command\033[0m")
148
149
  return None
@@ -295,7 +296,11 @@ class Agent:
295
296
 
296
297
  logger.debug(f"commands: {commands}, partial_cmd: {partial_cmd}")
297
298
 
298
- if 'finished_conversation' in context.data and context.data['finished_conversation']:
299
+ # Check for cancellation (either permanent or current turn)
300
+ if context.data.get('finished_conversation') or context.data.get('cancel_current_turn'):
301
+ # Clear the temporary cancel flag so next turn can proceed
302
+ if 'cancel_current_turn' in context.data:
303
+ del context.data['cancel_current_turn']
299
304
  logger.warning("Conversation is finished or halted, exiting stream parsing")
300
305
  debug_box(f"""Conversation is finished or halted, exiting stream""")
301
306
  debug_box(str(context))
@@ -335,7 +340,18 @@ class Agent:
335
340
  logger.debug(f"Processing command: {cmd}")
336
341
  await context.partial_command(cmd_name, json.dumps(cmd_args), cmd_args)
337
342
 
338
- result = await self.handle_cmds(cmd_name, cmd_args, json_cmd=json.dumps(cmd), context=context)
343
+ # Create a task for the command so it can be cancelled
344
+ cmd_task = asyncio.create_task(
345
+ self.handle_cmds(cmd_name, cmd_args, json_cmd=json.dumps(cmd), context=context)
346
+ )
347
+ context.data['active_command_task'] = cmd_task
348
+ try:
349
+ result = await cmd_task
350
+ finally:
351
+ # Clear the task from context once it's done or cancelled
352
+ if context.data.get('active_command_task') == cmd_task:
353
+ del context.data['active_command_task']
354
+
339
355
  await context.command_result(cmd_name, result)
340
356
  sys_header = "Note: tool command results follow, not user replies"
341
357
  sys_header = ""
@@ -0,0 +1,507 @@
1
+ import asyncio
2
+ import json
3
+ import os
4
+ import re
5
+ import json
6
+ from json import JSONDecodeError
7
+ from jinja2 import Template
8
+ from lib.providers.commands import command_manager, command
9
+ from lib.providers.hooks import hook_manager
10
+ from lib.pipelines.pipe import pipeline_manager
11
+ from lib.providers.services import service
12
+ from lib.providers.services import service_manager
13
+ from lib.json_str_block import replace_raw_blocks
14
+ import sys
15
+ from lib.utils.check_args import *
16
+ from .command_parser import parse_streaming_commands, invalid_start_format
17
+ from datetime import datetime
18
+ import pytz
19
+ import traceback
20
+ from lib.logging.logfiles import logger
21
+ from lib.utils.debug import debug_box
22
+ from .init_models import *
23
+ from lib.chatcontext import ChatContext
24
+ from .cmd_start_example import *
25
+ from lib.templates import render
26
+
27
+
28
+ error_result = """
29
+ [SYSTEM]: ERROR, invalid response format.
30
+
31
+ Your response does not appear to adhere to the command list format.
32
+
33
+ Common causes:
34
+
35
+ - replied with JSON inside of fenced code blocks instead of JSON or RAW string format as below
36
+
37
+ - ONLY if your model supports this, for complex multiline string arguments, use the RAW format described in system instructions, e.g.:
38
+
39
+ ...
40
+
41
+ { "json_encoded_md": { "markdown": START_RAW
42
+ The moon, so bright
43
+ It's shining light
44
+ Like a pizza pie
45
+ In the sky
46
+ END_RAW
47
+ } }
48
+
49
+ ...
50
+
51
+ - iF your model does not support RAW format or it is not a complex multiline string like code, you MUST properly escape JSON strings!
52
+ - remember newlines, double quotes, etc. must be escaped (but not double escaped)!
53
+
54
+ - plain text response before JSON.
55
+
56
+ - some JSON args with unescaped newlines, etc.
57
+
58
+ - multiple command lists. Only one command list response is allowed!
59
+ - This is a frequent cause of parse errors.
60
+
61
+ - some characters escaped that did not need to be/invalid
62
+
63
+ Please adhere to the system JSON command list response format carefully.
64
+ """
65
+
66
+ @service()
67
+ async def get_agent_data(agent_name, context=None):
68
+ logger.info("Agent name: {agent_name}", agent_name=agent_name)
69
+
70
+ agent_path = os.path.join('data/agents', 'local', agent_name)
71
+
72
+ if not os.path.exists(agent_path):
73
+ agent_path = os.path.join('data/agents', 'shared', agent_name)
74
+ if not os.path.exists(agent_path):
75
+ return {}
76
+ agent_file = os.path.join(agent_path, 'agent.json')
77
+ if not os.path.exists(agent_file):
78
+ return {}
79
+ with open(agent_file, 'r') as f:
80
+ agent_data = json.load(f)
81
+
82
+ # Ensure required_plugins is present
83
+ if 'required_plugins' not in agent_data:
84
+ agent_data['required_plugins'] = []
85
+
86
+ try:
87
+ agent_data["persona"] = await service_manager.get_persona_data(agent_data["persona"])
88
+ except Exception as e:
89
+ logger.error("Error getting persona data", extra={"error": str(e)})
90
+ raise e
91
+
92
+ agent_data["flags"] = agent_data["flags"]
93
+ agent_data["flags"] = list(dict.fromkeys(agent_data["flags"]))
94
+ return agent_data
95
+
96
+
97
+
98
+ def find_new_substring(s1, s2):
99
+ if s1 in s2:
100
+ return s2.replace(s1, '', 1)
101
+ return s2
102
+
103
+ class Agent:
104
+
105
+ def __init__(self, model=None, sys_core_template=None, agent=None, clear_model=False, commands=[], context=None):
106
+ if model is None:
107
+ if os.environ.get('AH_DEFAULT_LLM'):
108
+ self.model = os.environ.get('AH_DEFAULT_LLM')
109
+ else:
110
+ self.model = 'llama3'
111
+ else:
112
+ self.model = model
113
+
114
+ self.agent = agent
115
+
116
+ #if sys_core_template is None:
117
+ # system_template_path = os.path.join(os.path.dirname(__file__), "system.j2")
118
+ # with open(system_template_path, "r") as f:
119
+ # self.sys_core_template = f.read()
120
+ #else:
121
+ # self.sys_core_template = sys_core_template
122
+
123
+ #self.sys_template = Template(self.sys_core_template)
124
+
125
+ self.cmd_handler = {}
126
+ self.context = context
127
+
128
+ #if clear_model:
129
+ # logger.debug("Unloading model")
130
+ # asyncio.create_task(use_ollama.unload(self.model))
131
+
132
+ def use_model(self, model_id, local=True):
133
+ self.current_model = model_id
134
+
135
+ async def set_cmd_handler(self, cmd_name, callback):
136
+ self.cmd_handler[cmd_name] = callback
137
+ logger.info("Recorded handler for command: {command}", command=cmd_name)
138
+
139
+ async def unload_llm_if_needed(self):
140
+ logger.info("Not unloading LLM")
141
+ #await use_ollama.unload(self.model)
142
+ #await asyncio.sleep(1)
143
+
144
+ async def handle_cmds(self, cmd_name, cmd_args, json_cmd=None, context=None):
145
+ if 'finished_conversation' in context.data and context.data['finished_conversation']:
146
+ logger.warning("Conversation is finished, not executing command")
147
+ print("\033[91mConversation is finished, not executing command\033[0m")
148
+ return None
149
+
150
+ logger.info("Command execution: {command}", command=cmd_name)
151
+ logger.debug("Command details: {details}", details={
152
+ "command": cmd_name,
153
+ "arguments": cmd_args,
154
+ "context": str(context)
155
+ })
156
+ context.chat_log.add_message({"role": "assistant", "content": [{"type": "text",
157
+ "text": '['+json_cmd+']' }]})
158
+ command_manager.context = context
159
+
160
+ if cmd_name == "reasoning":
161
+ return None
162
+
163
+ # cmd_args might be a single arg like integer or string, or it may be an array, or an object/dict with named args
164
+ try:
165
+ if isinstance(cmd_args, list):
166
+ #filter out empty strings
167
+ cmd_args = [x for x in cmd_args if x != '']
168
+ logger.debug("Executing command with list arguments", extra={"step": 1})
169
+ await context.running_command(cmd_name, cmd_args)
170
+ logger.debug("Executing command with list arguments", extra={"step": 2})
171
+ return await command_manager.execute(cmd_name, *cmd_args)
172
+ elif isinstance(cmd_args, dict):
173
+ logger.debug("Executing command with dict arguments", extra={"step": 1})
174
+ await context.running_command(cmd_name, cmd_args)
175
+ logger.debug("Executing command with dict arguments", extra={"step": 2})
176
+ return await command_manager.execute(cmd_name, **cmd_args)
177
+ else:
178
+ logger.debug("Executing command with single argument", extra={"step": 1})
179
+ await context.running_command(cmd_name, cmd_args)
180
+ logger.debug("Executing command with single argument", extra={"step": 2})
181
+ return await command_manager.execute(cmd_name, cmd_args)
182
+
183
+ except Exception as e:
184
+ trace = traceback.format_exc()
185
+ print("\033[96mError in handle_cmds: " + str(e) + "\033[0m")
186
+ print("\033[96m" + trace + "\033[0m")
187
+ logger.error("Error in handle_cmds", extra={
188
+ "error": str(e),
189
+ "command": cmd_name,
190
+ "arguments": cmd_args,
191
+ "traceback": trace
192
+ })
193
+
194
+ return {"error": str(e)}
195
+
196
+ def remove_braces(self, buffer):
197
+ if buffer.endswith("\n"):
198
+ buffer = buffer[:-1]
199
+ if buffer.startswith('[ '):
200
+ buffer = buffer[2:]
201
+ if buffer.startswith(' ['):
202
+ buffer = buffer[2:]
203
+ if buffer.endswith(','):
204
+ buffer = buffer[:-1]
205
+ if buffer.endswith(']'):
206
+ buffer = buffer[:-1]
207
+ if buffer.startswith('['):
208
+ buffer = buffer[1:]
209
+ if buffer.endswith('},'):
210
+ buffer = buffer[:-1]
211
+ return buffer
212
+
213
+ async def parse_single_cmd(self, json_str, context, buffer, match=None):
214
+ cmd_name = '?'
215
+ try:
216
+ cmd_obj = json.loads(json_str)
217
+ cmd_name = next(iter(cmd_obj))
218
+ if isinstance(cmd_obj, list):
219
+ cmd_obj = cmd_obj[0]
220
+ cmd_name = next(iter(cmd_obj))
221
+
222
+ cmd_args = cmd_obj[cmd_name]
223
+ # make sure that cmd_name is in self.agent["commands"]
224
+ if cmd_name not in self.agent["commands"]:
225
+ logger.warning("Command not found in agent commands", extra={"command": cmd_name})
226
+ return None, buffer
227
+ if check_empty_args(cmd_args):
228
+ logger.info("Empty arguments for command", extra={"command": cmd_name})
229
+ return None, buffer
230
+ else:
231
+ logger.info("Non-empty arguments for command", extra={"command": cmd_name, "arguments": cmd_args})
232
+ # Handle the full command
233
+ result = await self.handle_cmds(cmd_name, cmd_args, json_cmd=json_str, context=context)
234
+ await context.command_result(cmd_name, result)
235
+
236
+ cmd = {"cmd": cmd_name, "result": result}
237
+ # Remove the processed JSON object from the buffer
238
+ if match is not None:
239
+ buffer = buffer[match.end():]
240
+ buffer = buffer.lstrip(',').rstrip(',')
241
+ return [cmd], buffer
242
+ except Exception as e:
243
+ trace = traceback.format_exc()
244
+ logger.error("Error processing command", extra={"error": str(e) + "\n\n" + trace})
245
+
246
+ json_str = '[' + json_str + ']'
247
+
248
+ return None, buffer
249
+
250
+
251
+ async def parse_cmd_stream(self, stream, context):
252
+ buffer = ""
253
+ results = []
254
+ full_cmds = []
255
+
256
+ num_processed = 0
257
+ parse_failed = False
258
+ debug_box("Parsing command stream")
259
+ debug_box(str(context))
260
+ original_buffer = ""
261
+
262
+ async for part in stream:
263
+ buffer += part
264
+ original_buffer += part
265
+
266
+ logger.debug(f"Current buffer: ||{buffer}||")
267
+
268
+ if invalid_start_format(buffer):
269
+ print("Found invalid start to buffer", buffer)
270
+ context.chat_log.add_message({"role": "assistant", "content": buffer})
271
+ started_with = f"Your invalid command started with: {buffer[0:20]}"
272
+ results.append({"cmd": "UNKNOWN", "args": { "invalid": "(" }, "result": error_result + "\n\n" + started_with})
273
+ return results, full_cmds
274
+
275
+ if len(buffer) > 0 and buffer[0] == '{':
276
+ buffer = "[" + buffer
277
+
278
+ # happened with Qwen 3 for some reason
279
+ buffer = buffer.replace('}] <>\n\n[{','}, {')
280
+ buffer = buffer.replace('}] <>\n[{','}, {')
281
+
282
+ commands, partial_cmd = parse_streaming_commands(buffer)
283
+
284
+ if isinstance(commands, int):
285
+ continue
286
+
287
+ if not isinstance(commands, list):
288
+ commands = [commands]
289
+
290
+ try:
291
+ if len(commands) == 1 and 'commands' in commands[0]:
292
+ commands = commands[0]['commands']
293
+ except Exception as e:
294
+ continue
295
+
296
+ logger.debug(f"commands: {commands}, partial_cmd: {partial_cmd}")
297
+
298
+ if 'finished_conversation' in context.data and context.data['finished_conversation']:
299
+ logger.warning("Conversation is finished or halted, exiting stream parsing")
300
+ debug_box(f"""Conversation is finished or halted, exiting stream""")
301
+ debug_box(str(context))
302
+ # stream is actually a generator
303
+ if partial_cmd is not None:
304
+ cmd_name = next(iter(partial_cmd))
305
+ if cmd_name in ["say", "json_encoded_md", "think"]:
306
+ context.chat_log.add_message({"role": "assistant", "content": str(partial_cmd[cmd_name])})
307
+ else:
308
+ context.chat_log.add_message({"role": "assistant", "content": str(partial_cmd) + "(Interrupted)"})
309
+ try:
310
+ stream.close()
311
+ except Exception as e:
312
+ print("\033[91mError closing stream\033[0m")
313
+
314
+ return results, full_cmds
315
+
316
+
317
+ if len(commands) > num_processed:
318
+ logger.debug("New command(s) found")
319
+ logger.debug(f"Commands: {commands}")
320
+ for i in range(num_processed, len(commands)):
321
+ try:
322
+ cmd = commands[i]
323
+ try:
324
+ cmd_name = next(iter(cmd))
325
+ except Exception as e:
326
+ print("next iter failed. cmd is")
327
+ print(cmd)
328
+ break
329
+ if isinstance(cmd, str):
330
+ print("\033[91m" + "Invalid command format, expected object, trying to parse anyway" + "\033[0m")
331
+ print("\033[91m" + str(cmd) + "\033[0m")
332
+ cmd = json.loads(cmd)
333
+ cmd_name = next(iter(cmd))
334
+ cmd_args = cmd[cmd_name]
335
+ logger.debug(f"Processing command: {cmd}")
336
+ await context.partial_command(cmd_name, json.dumps(cmd_args), cmd_args)
337
+
338
+ result = await self.handle_cmds(cmd_name, cmd_args, json_cmd=json.dumps(cmd), context=context)
339
+ await context.command_result(cmd_name, result)
340
+ sys_header = "Note: tool command results follow, not user replies"
341
+ sys_header = ""
342
+ full_cmds.append({ "SYSTEM": sys_header, "cmd": cmd_name, "args": cmd_args, "result": result})
343
+ if result is not None:
344
+ results.append({"SYSTEM": sys_header, "cmd": cmd_name, "args": { "omitted": "(see command msg.)"}, "result": result})
345
+
346
+ num_processed = len(commands)
347
+ except Exception as e:
348
+ trace = traceback.format_exc()
349
+ logger.error(f"Error processing command: {e} \n{trace}")
350
+ logger.error(str(e))
351
+ pass
352
+ else:
353
+ logger.debug("No new commands found")
354
+ # sometimes partial_cmd is actually a string for some reason
355
+ # definitely skip that
356
+ # check if partial_cmd is a string
357
+ is_string = isinstance(partial_cmd, str)
358
+ if partial_cmd is not None and partial_cmd != {} and not is_string:
359
+ logger.debug(f"Partial command {partial_cmd}")
360
+ try:
361
+ cmd_name = next(iter(partial_cmd))
362
+ cmd_args = partial_cmd[cmd_name]
363
+ logger.debug(f"Partial command detected: {partial_cmd}")
364
+ await context.partial_command(cmd_name, json.dumps(cmd_args), cmd_args)
365
+ except Exception as de:
366
+ logger.error("Failed to parse partial command")
367
+ logger.error(str(de))
368
+ pass
369
+
370
+ #print("\033[92m" + str(full_cmds) + "\033[0m")
371
+ # getting false positive on this check
372
+ reasonOnly = False
373
+ try:
374
+ cmd_name = next(iter(full_cmds[0]))
375
+ if cmd_name == 'reasoning':
376
+ reasonOnly = True
377
+ for cmd in full_cmds:
378
+ if cmd_name != 'reasoning':
379
+ reasonOnly = False
380
+ break
381
+ except Exception as e:
382
+ pass
383
+ if len(full_cmds) == 0 or reasonOnly:
384
+ print("\033[91m" + "No results and parse failed" + "\033[0m")
385
+ try:
386
+ buffer = replace_raw_blocks(buffer)
387
+ parse_ok = json.loads(buffer)
388
+ parse_fail_reason = ""
389
+ tried_to_parse = ""
390
+ except JSONDecodeError as e:
391
+ print("final parse fail")
392
+ print(buffer)
393
+ parse_fail_reason = str(e)
394
+ context.chat_log.add_message({"role": "assistant", "content": buffer})
395
+ print(parse_fail_reason)
396
+ await asyncio.sleep(1)
397
+ tried_to_parse = f"\n\nTried to parse the following input: {original_buffer}"
398
+ results.append({"cmd": "UNKNOWN", "args": { "invalid": "("}, "result": error_result + '\n\nJSON parse error was: ' + parse_fail_reason +
399
+ tried_to_parse })
400
+
401
+ return results, full_cmds
402
+
403
+ async def render_system_msg(self):
404
+ logger.debug("Docstrings:")
405
+ logger.debug(command_manager.get_some_docstrings(self.agent["commands"]))
406
+ now = datetime.now()
407
+
408
+ formatted_time = now.strftime("~ %Y-%m-%d %I %p %Z%z")
409
+
410
+ data = {
411
+ "command_docs": command_manager.get_some_docstrings(self.agent["commands"]),
412
+ "agent": self.agent,
413
+ "persona": self.agent['persona'],
414
+ "formatted_datetime": formatted_time,
415
+ "context_data": self.context.data
416
+ }
417
+ # is say in the command_manager
418
+ if 'say' in command_manager.functions.keys():
419
+ print("I found say! in the functions!")
420
+ else:
421
+ print("Say is not in the functions!")
422
+ if 'say' in data['command_docs'].keys():
423
+ print("I found say in the command docs!")
424
+
425
+ # we need to be doubly sure to remove anything from command_docs that is not in command_manager.functions.keys()
426
+ for cmd in data['command_docs']:
427
+ if cmd not in command_manager.functions.keys():
428
+ print("Removing " + cmd + " from command_docs")
429
+ del data['command_docs'][cmd]
430
+
431
+ #self.system_message = self.sys_template.render(data)
432
+ self.system_message = await render('system', data)
433
+
434
+ additional_instructions = await hook_manager.add_instructions(self.context)
435
+
436
+ for instruction in additional_instructions:
437
+ self.system_message += instruction + "\n\n"
438
+
439
+ return self.system_message
440
+
441
+
442
+ async def chat_commands(self, model, context,
443
+ temperature=0, max_tokens=4000, messages=[]):
444
+
445
+ self.context = context
446
+ content = [ { "type": "text", "text": await self.render_system_msg() } ]
447
+ messages = [{"role": "system", "content": content }] + demo_boot_msgs() + messages
448
+
449
+ #logger.info("Messages for chat", extra={"messages": messages})
450
+
451
+ json_messages = json.dumps(messages)
452
+ new_messages = json.loads(json_messages)
453
+
454
+ if os.environ.get("AH_DEFAULT_MAX_TOKENS"):
455
+ max_tokens = int(os.environ.get("AH_DEFAULT_MAX_TOKENS"))
456
+ try:
457
+ tmp_data = { "messages": new_messages }
458
+ debug_box("Filtering messages")
459
+ #debug_box(tmp_data)
460
+ tmp_data = await pipeline_manager.filter_messages(tmp_data, context=context)
461
+ new_messages = tmp_data['messages']
462
+ except Exception as e:
463
+ logger.error("Error filtering messages")
464
+ logger.error(str(e))
465
+
466
+ if new_messages[0]['role'] != 'system':
467
+ logger.error("First message is not a system message")
468
+ print("\033[91mFirst message is not a system message\033[0m")
469
+ return None, None
470
+
471
+ if not isinstance(context.agent, dict):
472
+ context.agent = await get_agent_data(context.agent, context=context)
473
+
474
+ if model is None:
475
+ if 'service_models' in context.agent and context.agent['service_models'] is not None:
476
+ if context.agent['service_models'].get('stream_chat', None) is None:
477
+ model = os.environ.get("DEFAULT_LLM_MODEL")
478
+
479
+ stream = await context.stream_chat(model,
480
+ temperature=temperature,
481
+ max_tokens=max_tokens,
482
+ messages=new_messages,
483
+ context=context)
484
+
485
+ ret, full_cmds = await self.parse_cmd_stream(stream, context)
486
+ logger.debug("System message was:")
487
+ logger.debug(await self.render_system_msg())
488
+
489
+ # use green text
490
+ print("\033[92m" + "Just after stream chat, last two messages in chat log:")
491
+ print("------------------------------------")
492
+ print(context.chat_log.messages[-1])
493
+ print(context.chat_log.messages[-2])
494
+ # switch back to normal text
495
+ print("\033[0m")
496
+
497
+ return ret, full_cmds
498
+
499
+ @service()
500
+ async def run_command(cmd_name, cmd_args, context=None):
501
+ if context is None:
502
+ raise Exception("run_command: No context provided")
503
+
504
+ agent = Agent(agent=context.agent)
505
+ json_cmd = json.dumps({cmd_name: cmd_args})
506
+ asyncio.create_task(agent.handle_cmds(cmd_name, cmd_args, json_cmd, context=context))
507
+