npcpy 1.2.36__py3-none-any.whl → 1.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
npcpy/npc_compiler.py CHANGED
@@ -1,4 +1,5 @@
1
1
  import os
2
+ import shutil
2
3
  from pyexpat.errors import messages
3
4
  import yaml
4
5
  import json
@@ -18,7 +19,6 @@ from typing import Any, Dict, List, Optional, Union, Callable, Tuple
18
19
  from jinja2 import Environment, FileSystemLoader, Template, Undefined, DictLoader
19
20
  from sqlalchemy import create_engine, text
20
21
  import npcpy as npy
21
- from npcpy.llm_funcs import DEFAULT_ACTION_SPACE
22
22
  from npcpy.tools import auto_tools
23
23
  import math
24
24
  import random
@@ -183,6 +183,7 @@ def initialize_npc_project(
183
183
  """Initialize an NPC project"""
184
184
  if directory is None:
185
185
  directory = os.getcwd()
186
+ directory = os.path.expanduser(os.fspath(directory))
186
187
 
187
188
  npc_team_dir = os.path.join(directory, "npc_team")
188
189
  os.makedirs(npc_team_dir, exist_ok=True)
@@ -191,7 +192,8 @@ def initialize_npc_project(
191
192
  "assembly_lines",
192
193
  "sql_models",
193
194
  "jobs",
194
- "triggers"]:
195
+ "triggers",
196
+ "tools"]:
195
197
  os.makedirs(os.path.join(npc_team_dir, subdir), exist_ok=True)
196
198
 
197
199
  forenpc_path = os.path.join(npc_team_dir, "forenpc.npc")
@@ -206,20 +208,166 @@ def initialize_npc_project(
206
208
  }
207
209
  with open(forenpc_path, "w") as f:
208
210
  yaml.dump(default_npc, f)
209
- ctx_path = os.path.join(npc_team_dir, "team.ctx")
210
- if not os.path.exists(ctx_path):
211
+ parsed_templates: List[str] = []
212
+ if templates:
213
+ if isinstance(templates, str):
214
+ parsed_templates = [
215
+ t.strip() for t in re.split(r"[,\s]+", templates) if t.strip()
216
+ ]
217
+ elif isinstance(templates, (list, tuple, set)):
218
+ parsed_templates = [str(t).strip() for t in templates if str(t).strip()]
219
+ else:
220
+ parsed_templates = [str(templates).strip()]
221
+
222
+ ctx_destination: Optional[str] = None
223
+ preexisting_ctx = [
224
+ os.path.join(npc_team_dir, f)
225
+ for f in os.listdir(npc_team_dir)
226
+ if f.endswith(".ctx")
227
+ ]
228
+ if preexisting_ctx:
229
+ ctx_destination = preexisting_ctx[0]
230
+ if len(preexisting_ctx) > 1:
231
+ print(
232
+ "Warning: Multiple .ctx files already present; using first and ignoring the rest."
233
+ )
234
+
235
+ def _resolve_template_path(template_name: str) -> Optional[str]:
236
+ expanded = os.path.expanduser(template_name)
237
+ if os.path.exists(expanded):
238
+ return expanded
239
+
240
+ embedded_templates = {
241
+ "slean": """name: slean
242
+ primary_directive: You are slean, the marketing innovator AI. Your responsibility is to create marketing campaigns and manage them effectively, while also thinking creatively to solve marketing challenges. Guide the strategy that drives customer engagement and brand awareness.
243
+ """,
244
+ "turnic": """name: turnic
245
+ primary_directive: Assist with sales challenges and questions. Opt for straightforward solutions that help sales professionals achieve quick results.
246
+ """,
247
+ "budgeto": """name: budgeto
248
+ primary_directive: You manage marketing budgets, ensuring resources are allocated efficiently and spend is optimized.
249
+ """,
250
+ "relatio": """name: relatio
251
+ primary_directive: You manage customer relationships and ensure satisfaction throughout the sales process. Focus on nurturing clients and maintaining long-term connections.
252
+ """,
253
+ "funnel": """name: funnel
254
+ primary_directive: You oversee the sales pipeline, track progress, and optimize conversion rates to move leads efficiently.
255
+ """,
256
+ }
257
+
258
+ base_dirs = [
259
+ os.path.expanduser("~/.npcsh/npc_team/templates"),
260
+ os.path.expanduser("~/.npcpy/npc_team/templates"),
261
+ os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "tests", "template_tests", "npc_team")),
262
+ os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "examples", "npc_team")),
263
+ os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "example_npc_project", "npc_team")),
264
+ ]
265
+ base_dirs = [d for d in base_dirs if os.path.isdir(d)]
266
+
267
+ for base in base_dirs:
268
+ direct = os.path.join(base, template_name)
269
+ if os.path.exists(direct):
270
+ return direct
271
+ if not direct.endswith(".npc") and os.path.exists(direct + ".npc"):
272
+ return direct + ".npc"
273
+ for root, _, files in os.walk(base):
274
+ for fname in files:
275
+ stem, ext = os.path.splitext(fname)
276
+ if ext == ".npc" and stem == template_name:
277
+ return os.path.join(root, fname)
278
+
279
+ # If no on-disk template found, fall back to embedded definitions
280
+ if template_name in embedded_templates:
281
+ embedded_dir = os.path.join(npc_team_dir, "_embedded_templates", template_name)
282
+ os.makedirs(embedded_dir, exist_ok=True)
283
+ npc_file = os.path.join(embedded_dir, f"{template_name}.npc")
284
+ if not os.path.exists(npc_file):
285
+ with open(npc_file, "w") as f:
286
+ f.write(embedded_templates[template_name])
287
+ return embedded_dir
288
+ return None
289
+
290
+ def _copy_template(src_path: str) -> List[str]:
291
+ nonlocal ctx_destination
292
+ copied: List[str] = []
293
+ src_path = os.path.expanduser(src_path)
294
+
295
+ allowed_exts = {".npc", ".tool", ".pipe", ".sql", ".job", ".ctx", ".yaml", ".yml"}
296
+
297
+ if os.path.isfile(src_path):
298
+ if os.path.splitext(src_path)[1] in allowed_exts:
299
+ if os.path.splitext(src_path)[1] == ".ctx":
300
+ if ctx_destination:
301
+ print(
302
+ f"Warning: Skipping extra context file '{src_path}' because one already exists."
303
+ )
304
+ return copied
305
+ dest_path = os.path.join(npc_team_dir, os.path.basename(src_path))
306
+ ctx_destination = dest_path
307
+ else:
308
+ dest_path = os.path.join(npc_team_dir, os.path.basename(src_path))
309
+ if not os.path.exists(dest_path):
310
+ shutil.copy2(src_path, dest_path)
311
+ copied.append(dest_path)
312
+ return copied
313
+
314
+ for root, _, files in os.walk(src_path):
315
+ rel_dir = os.path.relpath(root, src_path)
316
+ dest_dir = npc_team_dir if rel_dir == "." else os.path.join(npc_team_dir, rel_dir)
317
+ os.makedirs(dest_dir, exist_ok=True)
318
+ for fname in files:
319
+ if os.path.splitext(fname)[1] not in allowed_exts:
320
+ continue
321
+ if os.path.splitext(fname)[1] == ".ctx":
322
+ if ctx_destination:
323
+ print(
324
+ f"Warning: Skipping extra context file '{os.path.join(root, fname)}' because one already exists."
325
+ )
326
+ continue
327
+ dest_path = os.path.join(npc_team_dir, fname)
328
+ ctx_destination = dest_path
329
+ else:
330
+ dest_path = os.path.join(dest_dir, fname)
331
+ if not os.path.exists(dest_path):
332
+ shutil.copy2(os.path.join(root, fname), dest_path)
333
+ copied.append(dest_path)
334
+ return copied
335
+
336
+ applied_templates: List[str] = []
337
+ if parsed_templates:
338
+ for template_name in parsed_templates:
339
+ template_path = _resolve_template_path(template_name)
340
+ if not template_path:
341
+ print(f"Warning: Template '{template_name}' not found in known template directories.")
342
+ continue
343
+ copied = _copy_template(template_path)
344
+ if copied:
345
+ applied_templates.append(template_name)
346
+
347
+ if applied_templates:
348
+ applied_templates = sorted(set(applied_templates))
349
+ if not ctx_destination:
350
+ default_ctx_path = os.path.join(npc_team_dir, "team.ctx")
211
351
  default_ctx = {
212
352
  'name': '',
213
- 'context' : '',
353
+ 'context' : context or '',
214
354
  'preferences': '',
215
355
  'mcp_servers': '',
216
356
  'databases':'',
217
357
  'use_global_jinxs': True,
218
358
  'forenpc': 'forenpc'
219
359
  }
220
- with open(ctx_path, "w") as f:
360
+ if parsed_templates:
361
+ default_ctx['templates'] = parsed_templates
362
+ with open(default_ctx_path, "w") as f:
221
363
  yaml.dump(default_ctx, f)
222
-
364
+ ctx_destination = default_ctx_path
365
+
366
+ if applied_templates:
367
+ return (
368
+ f"NPC project initialized in {npc_team_dir} "
369
+ f"using templates: {', '.join(applied_templates)}"
370
+ )
223
371
  return f"NPC project initialized in {npc_team_dir}"
224
372
 
225
373
 
@@ -236,7 +384,6 @@ def write_yaml_file(file_path, data):
236
384
  print(f"Error writing YAML file {file_path}: {e}")
237
385
  return False
238
386
 
239
-
240
387
  class Jinx:
241
388
  '''
242
389
  Jinx represents a workflow template with Jinja-rendered steps.
@@ -246,7 +393,8 @@ class Jinx:
246
393
  - inputs: list of input parameters
247
394
  - description: what the jinx does
248
395
  - npc: optional NPC to execute with
249
- - steps: list of step definitions with code
396
+ - steps: list of step definitions with code. This section can now be a Jinja template itself.
397
+ - file_context: optional list of file patterns to include as context
250
398
 
251
399
  Execution:
252
400
  - Renders Jinja templates in step code with input values
@@ -261,9 +409,13 @@ class Jinx:
261
409
  else:
262
410
  raise ValueError("Either jinx_data or jinx_path must be provided")
263
411
 
264
- # Keep a copy for macro expansion, but retain the executable steps by default
412
+ # _raw_steps will now hold the original, potentially templated, steps definition
265
413
  self._raw_steps = list(self.steps)
266
- self.steps = list(self._raw_steps)
414
+ self.steps = [] # Will be populated after first-pass rendering
415
+ self.parsed_files = {}
416
+ if self.file_context:
417
+ self.parsed_files = self._parse_file_patterns(self.file_context)
418
+
267
419
  def _load_from_file(self, path):
268
420
  jinx_data = load_yaml_file(path)
269
421
  if not jinx_data:
@@ -283,9 +435,38 @@ class Jinx:
283
435
  self.inputs = jinx_data.get("inputs", [])
284
436
  self.description = jinx_data.get("description", "")
285
437
  self.npc = jinx_data.get("npc")
286
- self.steps = jinx_data.get("steps", [])
438
+ self.steps = jinx_data.get("steps", []) # This can now be a Jinja templated list
439
+ self.file_context = jinx_data.get("file_context", [])
287
440
  self._source_path = jinx_data.get("_source_path", None)
288
441
 
442
+ def to_tool_def(self) -> Dict[str, Any]:
443
+ """Convert this Jinx to an OpenAI-style tool definition."""
444
+ properties = {}
445
+ required = []
446
+ for inp in self.inputs:
447
+ if isinstance(inp, str):
448
+ properties[inp] = {"type": "string", "description": f"Parameter: {inp}"}
449
+ required.append(inp)
450
+ elif isinstance(inp, dict):
451
+ name = list(inp.keys())[0]
452
+ default_val = inp.get(name, "")
453
+ desc = f"Parameter: {name}"
454
+ if default_val != "":
455
+ desc += f" (default: {default_val})"
456
+ properties[name] = {"type": "string", "description": desc}
457
+ return {
458
+ "type": "function",
459
+ "function": {
460
+ "name": self.jinx_name,
461
+ "description": self.description or f"Jinx: {self.jinx_name}",
462
+ "parameters": {
463
+ "type": "object",
464
+ "properties": properties,
465
+ "required": required
466
+ }
467
+ }
468
+ }
469
+
289
470
  def render_first_pass(
290
471
  self,
291
472
  jinja_env_for_macros: Environment,
@@ -293,21 +474,52 @@ class Jinx:
293
474
  ):
294
475
  """
295
476
  Performs the first-pass Jinja rendering on the Jinx's raw steps.
296
- This expands nested Jinx calls (e.g., {{ sh(...) }} or
297
- engine: jinx_name) but preserves runtime variables
298
- (e.g., {{ command_var }}).
299
-
300
- Args:
301
- jinja_env_for_macros: The Jinja Environment configured with
302
- Jinx callables in its globals.
303
- all_jinx_callables: A dictionary of Jinx names to their
304
- callable functions (from create_jinx_callable).
477
+ This expands Jinja control flow (for, if) to generate step structures,
478
+ then expands nested Jinx calls (e.g., {{ sh(...) }} or engine: jinx_name)
479
+ and inline macros.
305
480
  """
306
- rendered_steps_output = []
481
+ # 1. Join the list of raw steps (which are individual YAML lines) into a single string.
482
+ # This single string is the complete Jinja template for the 'steps' section.
483
+ raw_steps_template_string = "\n".join(self._raw_steps)
484
+
485
+ # 2. Render this single string as a Jinja template.
486
+ # Jinja will now process the {% for %} and {% if %} directives,
487
+ # dynamically generating the YAML structure.
488
+ try:
489
+ steps_template = jinja_env_for_macros.from_string(raw_steps_template_string)
490
+ # Pass globals (like num_tasks, include_greeting from Jinx inputs)
491
+ # to the Jinja rendering context for structural templating.
492
+ rendered_steps_yaml_string = steps_template.render(**jinja_env_for_macros.globals)
493
+ except Exception as e:
494
+ # In a real Jinx, this would go to a proper logger.
495
+ # For this context, we handle the error gracefully.
496
+ # self._log_debug(f"Warning: Error during first-pass templating of Jinx '{self.jinx_name}' steps YAML: {e}")
497
+ self.steps = list(self._raw_steps) # Fallback to original raw steps
498
+ return
499
+
500
+ # 3. Parse the rendered YAML string back into a list of step dictionaries.
501
+ # This step will now correctly interpret the YAML structure generated by Jinja.
502
+ try:
503
+ structurally_expanded_steps = yaml.safe_load(rendered_steps_yaml_string)
504
+ if not isinstance(structurally_expanded_steps, list):
505
+ # Handle cases where the rendered YAML might be empty or not a list
506
+ if structurally_expanded_steps is None:
507
+ structurally_expanded_steps = []
508
+ else:
509
+ raise ValueError(f"Rendered steps YAML did not result in a list: {type(structurally_expanded_steps)}")
510
+ self.steps = structurally_expanded_steps
511
+ except Exception as e:
512
+ # self._log_debug(f"Warning: Error re-parsing structurally expanded steps YAML for Jinx '{self.jinx_name}': {e}")
513
+ self.steps = list(self._raw_steps) # Fallback
514
+ return
307
515
 
308
- for raw_step in self._raw_steps:
516
+ # 4. Now, iterate through these `structurally_expanded_steps` to expand
517
+ # declarative Jinx calls (engine: jinx_name) and inline macros.
518
+ # This is the second phase of the first-pass rendering.
519
+ final_rendered_steps = []
520
+ for raw_step in structurally_expanded_steps:
309
521
  if not isinstance(raw_step, dict):
310
- rendered_steps_output.append(raw_step)
522
+ final_rendered_steps.append(raw_step)
311
523
  continue
312
524
 
313
525
  engine_name = raw_step.get('engine')
@@ -326,51 +538,58 @@ class Jinx:
326
538
  expanded_steps = yaml.safe_load(expanded_yaml_string)
327
539
 
328
540
  if isinstance(expanded_steps, list):
329
- rendered_steps_output.extend(expanded_steps)
330
-
541
+ final_rendered_steps.extend(expanded_steps)
331
542
  elif expanded_steps is not None:
332
- rendered_steps_output.append(expanded_steps)
333
-
543
+ final_rendered_steps.append(expanded_steps)
334
544
  except Exception as e:
335
- print(
336
- f"Warning: Error expanding Jinx '{engine_name}' "
337
- f"within Jinx '{self.jinx_name}' "
338
- f"(declarative): {e}"
339
- )
340
- rendered_steps_output.append(raw_step)
341
- # Skip rendering for python/bash engine steps - preserve runtime variables
545
+ # self._log_debug(
546
+ # f"Warning: Error expanding Jinx '{engine_name}' "
547
+ # f"within Jinx '{self.jinx_name}' "
548
+ # f"(declarative): {e}"
549
+ # )
550
+ final_rendered_steps.append(raw_step)
551
+ # For python/bash engine steps, only inline macro expansion happens in the next block.
552
+ # The code content itself is preserved for runtime Jinja rendering.
342
553
  elif raw_step.get('engine') in ['python', 'bash']:
343
- rendered_steps_output.append(raw_step)
554
+ processed_step = {}
555
+ for key, value in raw_step.items():
556
+ if isinstance(value, str):
557
+ try:
558
+ template = jinja_env_for_macros.from_string(value)
559
+ # Render with empty context for inline macros/static values
560
+ rendered_value = template.render({})
561
+ try:
562
+ loaded_value = yaml.safe_load(rendered_value)
563
+ processed_step[key] = loaded_value
564
+ except yaml.YAMLError:
565
+ processed_step[key] = rendered_value
566
+ except Exception as e:
567
+ # self._log_debug(f"Warning: Error during first-pass rendering of Jinx '{self.jinx_name}' step field '{key}' (inline macro): {e}")
568
+ processed_step[key] = value
569
+ else:
570
+ processed_step[key] = value
571
+ final_rendered_steps.append(processed_step)
344
572
  else:
345
- # For other steps, do first-pass rendering (inline macro expansion)
573
+ # For other steps (e.g., custom engines, or just data), perform inline macro expansion
346
574
  processed_step = {}
347
575
  for key, value in raw_step.items():
348
576
  if isinstance(value, str):
349
577
  try:
350
- template = jinja_env_for_macros.from_string(
351
- value
352
- )
578
+ template = jinja_env_for_macros.from_string(value)
353
579
  rendered_value = template.render({})
354
-
355
580
  try:
356
- loaded_value = yaml.safe_load(
357
- rendered_value
358
- )
581
+ loaded_value = yaml.safe_load(rendered_value)
359
582
  processed_step[key] = loaded_value
360
583
  except yaml.YAMLError:
361
584
  processed_step[key] = rendered_value
362
585
  except Exception as e:
363
- print(
364
- f"Warning: Error during first-pass "
365
- f"rendering of Jinx '{self.jinx_name}' "
366
- f"step field '{key}' (inline macro): {e}"
367
- )
586
+ # self._log_debug(f"Warning: Error during first-pass rendering of Jinx '{self.jinx_name}' step field '{key}' (inline macro): {e}")
368
587
  processed_step[key] = value
369
588
  else:
370
589
  processed_step[key] = value
371
- rendered_steps_output.append(processed_step)
590
+ final_rendered_steps.append(processed_step)
372
591
 
373
- self.steps = rendered_steps_output
592
+ self.steps = final_rendered_steps
374
593
 
375
594
  def execute(self,
376
595
  input_values: Dict[str, Any],
@@ -399,6 +618,11 @@ class Jinx:
399
618
  "messages": messages,
400
619
  "npc": active_npc
401
620
  })
621
+
622
+ # Add parsed file content to the context
623
+ if self.parsed_files:
624
+ context['file_context'] = self._format_parsed_files_context(self.parsed_files)
625
+ context['files'] = self.parsed_files # Also make raw dict available
402
626
 
403
627
  for i, step in enumerate(self.steps):
404
628
  context = self._execute_step(
@@ -409,9 +633,19 @@ class Jinx:
409
633
  messages=messages,
410
634
  extra_globals=extra_globals
411
635
  )
636
+ # If an error occurred in a step, propagate it and stop execution
637
+ if "error" in context.get("output", ""):
638
+ self._log_debug(f"DEBUG: Jinx '{self.jinx_name}' execution stopped due to error in step '{step.get('name', 'unnamed_step')}': {context['output']}")
639
+ break
412
640
 
413
641
  return context
414
642
 
643
+ def _log_debug(self, msg: str):
644
+ """Helper for logging debug messages to a file."""
645
+ log_file_path = os.path.expanduser("~/jinx_debug_log.txt")
646
+ with open(log_file_path, "a") as f:
647
+ f.write(f"[{datetime.now().isoformat()}] {msg}\n")
648
+
415
649
  def _execute_step(self,
416
650
  step: Dict[str, Any],
417
651
  context: Dict[str, Any],
@@ -420,40 +654,31 @@ class Jinx:
420
654
  messages: Optional[List[Dict[str, str]]] = None,
421
655
  extra_globals: Optional[Dict[str, Any]] = None):
422
656
 
423
- def _log_debug(msg):
424
- log_file_path = os.path.expanduser("~/jinx_debug_log.txt")
425
- with open(log_file_path, "a") as f:
426
- f.write(f"[{datetime.now().isoformat()}] {msg}\n")
427
-
428
657
  code_content = step.get("code", "")
429
658
  step_name = step.get("name", "unnamed_step")
430
659
  step_npc = step.get("npc")
431
660
 
432
661
  active_npc = step_npc if step_npc else npc
433
662
 
663
+ # Second pass Jinja rendering: render the step's code with the current runtime context
434
664
  try:
435
665
  template = jinja_env.from_string(code_content)
436
666
  rendered_code = template.render(**context)
437
667
  except Exception as e:
438
- _log_debug(
439
- f"Error rendering template for step {step_name} "
440
- f"(second pass): {e}"
668
+ error_msg = (
669
+ f"Error rendering template for step '{step_name}' "
670
+ f"(second pass): {type(e).__name__}: {e}"
441
671
  )
442
- rendered_code = code_content
672
+ context['output'] = error_msg
673
+ self._log_debug(error_msg)
674
+ return context
443
675
 
444
- _log_debug(f"rendered jinx code: {rendered_code}")
445
- _log_debug(
446
- f"DEBUG: Before exec - rendered_code: {rendered_code}"
447
- )
448
- _log_debug(
449
- f"DEBUG: Before exec - context['output'] before step: "
450
- f"{context.get('output')}"
451
- )
676
+ self._log_debug(f"DEBUG: Executing step '{step_name}' with rendered code: {rendered_code}")
452
677
 
453
678
  exec_globals = {
454
679
  "__builtins__": __builtins__,
455
680
  "npc": active_npc,
456
- "context": context,
681
+ "context": context, # Pass context by reference
457
682
  "math": math,
458
683
  "random": random,
459
684
  "datetime": datetime,
@@ -477,43 +702,27 @@ class Jinx:
477
702
  if extra_globals:
478
703
  exec_globals.update(extra_globals)
479
704
 
480
- exec_locals = {}
481
-
705
+ exec_locals = {} # Locals for this specific exec call
706
+
482
707
  try:
483
708
  exec(rendered_code, exec_globals, exec_locals)
484
709
  except Exception as e:
485
710
  error_msg = (
486
- f"Error executing step {step_name}: "
711
+ f"Error executing step '{step_name}': "
487
712
  f"{type(e).__name__}: {e}"
488
713
  )
489
714
  context['output'] = error_msg
490
- _log_debug(error_msg)
715
+ self._log_debug(error_msg)
491
716
  return context
492
717
 
493
- _log_debug(f"DEBUG: After exec - exec_locals: {exec_locals}")
494
- _log_debug(
495
- f"DEBUG: After exec - 'output' in exec_locals: "
496
- f"{'output' in exec_locals}"
497
- )
498
-
718
+ # Update the main context with any variables set in exec_locals
499
719
  context.update(exec_locals)
500
720
 
501
- _log_debug(
502
- f"DEBUG: After context.update(exec_locals) - "
503
- f"context['output']: {context.get('output')}"
504
- )
505
- _log_debug(f"context after jinx ex: {context}")
506
-
507
721
  if "output" in exec_locals:
508
722
  outp = exec_locals["output"]
509
723
  context["output"] = outp
510
724
  context[step_name] = outp
511
725
 
512
- _log_debug(
513
- f"DEBUG: Inside 'output' in exec_locals block - "
514
- f"context['output']: {context.get('output')}"
515
- )
516
-
517
726
  if messages is not None:
518
727
  messages.append({
519
728
  'role':'assistant',
@@ -526,13 +735,95 @@ class Jinx:
526
735
 
527
736
  return context
528
737
 
738
+ def _parse_file_patterns(self, patterns_config):
739
+ """Parse file patterns configuration and load matching files into KV cache"""
740
+ if not patterns_config:
741
+ return {}
742
+
743
+ file_cache = {}
744
+
745
+ for pattern_entry in patterns_config:
746
+ if isinstance(pattern_entry, str):
747
+ pattern_entry = {"pattern": pattern_entry}
748
+
749
+ pattern = pattern_entry.get("pattern", "")
750
+ recursive = pattern_entry.get("recursive", False)
751
+ base_path = pattern_entry.get("base_path", ".")
752
+
753
+ if not pattern:
754
+ continue
755
+
756
+ # Resolve base_path relative to jinx's source path or current working directory
757
+ if self._source_path:
758
+ base_path = os.path.join(os.path.dirname(self._source_path), base_path)
759
+ base_path = os.path.expanduser(base_path)
760
+
761
+ if not os.path.isabs(base_path):
762
+ base_path = os.path.join(os.getcwd(), base_path)
763
+
764
+ matching_files = self._find_matching_files(pattern, base_path, recursive)
765
+
766
+ for file_path in matching_files:
767
+ file_content = self._load_file_content(file_path)
768
+ if file_content:
769
+ relative_path = os.path.relpath(file_path, base_path)
770
+ file_cache[relative_path] = file_content
771
+
772
+ return file_cache
773
+
774
+ def _find_matching_files(self, pattern, base_path, recursive=False):
775
+ """Find files matching the given pattern"""
776
+ matching_files = []
777
+
778
+ if not os.path.exists(base_path):
779
+ return matching_files
780
+
781
+ if recursive:
782
+ for root, dirs, files in os.walk(base_path):
783
+ for filename in files:
784
+ if fnmatch.fnmatch(filename, pattern):
785
+ matching_files.append(os.path.join(root, filename))
786
+ else:
787
+ try:
788
+ for item in os.listdir(base_path):
789
+ item_path = os.path.join(base_path, item)
790
+ if os.path.isfile(item_path) and fnmatch.fnmatch(item, pattern):
791
+ matching_files.append(item_path)
792
+ except PermissionError:
793
+ print(f"Permission denied accessing {base_path}")
794
+
795
+ return matching_files
796
+
797
+ def _load_file_content(self, file_path):
798
+ """Load content from a file with error handling"""
799
+ try:
800
+ with open(file_path, 'r', encoding='utf-8') as f:
801
+ return f.read()
802
+ except Exception as e:
803
+ print(f"Error reading {file_path}: {e}")
804
+ return None
805
+
806
+ def _format_parsed_files_context(self, parsed_files):
807
+ """Format parsed files into context string"""
808
+ if not parsed_files:
809
+ return ""
810
+
811
+ context_parts = ["Additional context from files:"]
812
+
813
+ for file_path, content in parsed_files.items():
814
+ context_parts.append(f"\n--- {file_path} ---")
815
+ context_parts.append(content)
816
+ context_parts.append("")
817
+
818
+ return "\n".join(context_parts)
529
819
 
530
820
  def to_dict(self):
531
821
  result = {
532
822
  "jinx_name": self.jinx_name,
533
823
  "description": self.description,
534
824
  "inputs": self.inputs,
535
- "steps": self._raw_steps
825
+ "steps": self._raw_steps, # Save the original raw steps, which might be templated
826
+ "file_context": self.file_context
536
827
  }
537
828
 
538
829
  if self.npc:
@@ -578,6 +869,7 @@ class Jinx:
578
869
  "jinx_name": name,
579
870
  "description": doc.strip(),
580
871
  "inputs": inputs,
872
+ "file_context": [],
581
873
  "steps": [
582
874
  {
583
875
  "name": "mcp_function_call",
@@ -599,6 +891,8 @@ output = {mcp_tool.__module__}.{name}(
599
891
  except:
600
892
  pass
601
893
 
894
+
895
+
602
896
  def load_jinxs_from_directory(directory):
603
897
  """Load all jinxs from a directory recursively"""
604
898
  jinxs = []
@@ -621,100 +915,12 @@ def load_jinxs_from_directory(directory):
621
915
 
622
916
  def jinx_to_tool_def(jinx_obj: 'Jinx') -> Dict[str, Any]:
623
917
  """Convert a Jinx instance into an MCP/LLM-compatible tool schema definition."""
624
- properties: Dict[str, Any] = {}
625
- required: List[str] = []
626
- for inp in jinx_obj.inputs:
627
- if isinstance(inp, str):
628
- properties[inp] = {"type": "string"}
629
- required.append(inp)
630
- elif isinstance(inp, dict):
631
- name = list(inp.keys())[0]
632
- properties[name] = {"type": "string", "default": inp.get(name, "")}
633
- required.append(name)
634
- return {
635
- "type": "function",
636
- "function": {
637
- "name": jinx_obj.jinx_name,
638
- "description": jinx_obj.description or f"Jinx: {jinx_obj.jinx_name}",
639
- "parameters": {
640
- "type": "object",
641
- "properties": properties,
642
- "required": required
643
- }
644
- }
645
- }
918
+ return jinx_obj.to_tool_def()
646
919
 
647
920
  def build_jinx_tool_catalog(jinxs: Dict[str, 'Jinx']) -> Dict[str, Dict[str, Any]]:
648
921
  """Helper to build a name->tool_def catalog from a dict of Jinx objects."""
649
922
  return {name: jinx_to_tool_def(jinx_obj) for name, jinx_obj in jinxs.items()}
650
923
 
651
- def get_npc_action_space(npc=None, team=None):
652
- """Get action space for NPC including memory CRUD and core capabilities"""
653
- actions = DEFAULT_ACTION_SPACE.copy()
654
-
655
- if npc:
656
- core_tools = [
657
- npc.think_step_by_step,
658
- ]
659
- if hasattr(npc, "write_code"):
660
- core_tools.append(npc.write_code)
661
-
662
- if npc.command_history:
663
- core_tools.extend([
664
- npc.search_my_conversations,
665
- npc.search_my_memories,
666
- npc.create_memory,
667
- npc.read_memory,
668
- npc.update_memory,
669
- npc.delete_memory,
670
- npc.search_memories,
671
- npc.get_all_memories,
672
- npc.archive_old_memories,
673
- npc.get_memory_stats
674
- ])
675
-
676
- if npc.db_conn:
677
- core_tools.append(npc.query_database)
678
-
679
- if hasattr(npc, 'tools') and npc.tools:
680
- core_tools.extend([func for func in npc.tool_map.values() if callable(func)])
681
-
682
- if core_tools:
683
- tools_schema, tool_map = auto_tools(core_tools)
684
- actions.update({
685
- f"use_{tool.__name__}": {
686
- "description": f"Use {tool.__name__} capability",
687
- "handler": tool,
688
- "context": lambda **_: f"Available as automated capability",
689
- "output_keys": {"result": {"description": "Tool execution result", "type": "string"}}
690
- }
691
- for tool in core_tools
692
- })
693
-
694
- if team and hasattr(team, 'npcs') and len(team.npcs) > 1:
695
- available_npcs = [name for name in team.npcs.keys() if name != (npc.name if npc else None)]
696
-
697
- def team_aware_handler(command, extracted_data, **kwargs):
698
- if 'team' not in kwargs or kwargs['team'] is None:
699
- kwargs['team'] = team
700
- return agent_pass_handler(command, extracted_data, **kwargs)
701
-
702
- actions["pass_to_npc"] = {
703
- "description": "Pass request to another NPC - only when task requires their specific expertise",
704
- "handler": team_aware_handler,
705
- "context": lambda npc=npc, team=team, **_: (
706
- f"Available NPCs: {', '.join(available_npcs)}. "
707
- f"Only pass when you genuinely cannot complete the task."
708
- ),
709
- "output_keys": {
710
- "target_npc": {
711
- "description": "Name of the NPC to pass the request to",
712
- "type": "string"
713
- }
714
- }
715
- }
716
-
717
- return actions
718
924
  def extract_jinx_inputs(args: List[str], jinx: Jinx) -> Dict[str, Any]:
719
925
  print(f"DEBUG extract_jinx_inputs called with args: {args}")
720
926
  print(f"DEBUG jinx.inputs: {jinx.inputs}")
@@ -867,7 +1073,9 @@ class NPC:
867
1073
  self.npc_directory = None
868
1074
 
869
1075
  self.team = team # Store the team reference (can be None)
870
- self.jinxs_spec = jinxs or "*" # Store the jinx specification for later loading
1076
+ # Only set jinxs_spec from parameter if it wasn't already set by _load_from_file
1077
+ if not hasattr(self, 'jinxs_spec') or jinxs is not None:
1078
+ self.jinxs_spec = jinxs or "*" # Store the jinx specification for later loading
871
1079
 
872
1080
  if tools is not None:
873
1081
  tools_schema, tool_map = auto_tools(tools)
@@ -933,10 +1141,29 @@ class NPC:
933
1141
  print(f"Warning: Jinx '{jinx_item}' not found for NPC '{self.name}' during initial load.")
934
1142
 
935
1143
  self.shared_context = {
1144
+ # Data analysis (guac)
936
1145
  "dataframes": {},
937
1146
  "current_data": None,
938
1147
  "computation_results": [],
939
- "memories":{}
1148
+ "locals": {}, # Python exec locals for guac mode
1149
+
1150
+ # Memory
1151
+ "memories": {},
1152
+
1153
+ # MCP tools (corca)
1154
+ "mcp_client": None,
1155
+ "mcp_tools": [],
1156
+ "mcp_tool_map": {},
1157
+
1158
+ # Session tracking
1159
+ "session_input_tokens": 0,
1160
+ "session_output_tokens": 0,
1161
+ "session_cost_usd": 0.0,
1162
+ "turn_count": 0,
1163
+
1164
+ # Mode state
1165
+ "current_mode": "agent",
1166
+ "attachments": [],
940
1167
  }
941
1168
 
942
1169
  for key, value in kwargs.items():
@@ -1715,35 +1942,41 @@ class NPC:
1715
1942
  jinja_env=self.jinja_env # Pass the NPC's second-pass Jinja env
1716
1943
  )
1717
1944
 
1718
- if self.db_conn is not None:
1719
- self.db_conn.add_jinx_call(
1720
- triggering_message_id=message_id,
1721
- conversation_id=conversation_id,
1722
- jinx_name=jinx_name,
1723
- jinx_inputs=inputs,
1724
- jinx_output=result,
1725
- status="success",
1726
- error_message=None,
1727
- duration_ms=None,
1728
- npc_name=self.name,
1729
- team_name=team_name,
1730
- )
1945
+ # Log jinx call if we have a command_history with add_jinx_call method
1946
+ if self.command_history is not None and hasattr(self.command_history, 'add_jinx_call'):
1947
+ try:
1948
+ self.command_history.add_jinx_call(
1949
+ triggering_message_id=message_id,
1950
+ conversation_id=conversation_id,
1951
+ jinx_name=jinx_name,
1952
+ jinx_inputs=inputs,
1953
+ jinx_output=result,
1954
+ status="success",
1955
+ error_message=None,
1956
+ duration_ms=None,
1957
+ npc_name=self.name,
1958
+ team_name=team_name,
1959
+ )
1960
+ except Exception:
1961
+ pass # Don't fail jinx execution due to logging error
1731
1962
  return result
1732
1963
  def check_llm_command(self,
1733
- command,
1964
+ command,
1734
1965
  messages=None,
1735
1966
  context=None,
1736
1967
  team=None,
1737
- stream=False):
1968
+ stream=False,
1969
+ jinxs=None):
1738
1970
  """Check if a command is for the LLM"""
1739
1971
  if context is None:
1740
1972
  context = self.shared_context
1741
-
1973
+
1742
1974
  if team:
1743
1975
  self._current_team = team
1744
-
1745
- actions = get_npc_action_space(npc=self, team=team)
1746
-
1976
+
1977
+ # Use provided jinxs or fall back to NPC's own jinxs
1978
+ jinxs_to_use = jinxs if jinxs is not None else self.jinxs_dict
1979
+
1747
1980
  return npy.llm_funcs.check_llm_command(
1748
1981
  command,
1749
1982
  model=self.model,
@@ -1753,7 +1986,7 @@ class NPC:
1753
1986
  messages=self.memory if messages is None else messages,
1754
1987
  context=context,
1755
1988
  stream=stream,
1756
- actions=actions
1989
+ jinxs=jinxs_to_use,
1757
1990
  )
1758
1991
 
1759
1992
  def handle_agent_pass(self,
@@ -2170,13 +2403,9 @@ class Team:
2170
2403
  if 'file_patterns' in ctx_data:
2171
2404
  file_cache = self._parse_file_patterns(ctx_data['file_patterns'])
2172
2405
  self.shared_context['files'] = file_cache
2173
- if 'preferences' in ctx_data:
2174
- self.preferences = ctx_data['preferences']
2175
- else:
2176
- self.preferences = []
2177
-
2406
+ # All other keys (including preferences) are treated as generic context
2178
2407
  for key, item in ctx_data.items():
2179
- if key not in ['name', 'mcp_servers', 'databases', 'context', 'file_patterns', 'forenpc', 'model', 'provider', 'api_url', 'env', 'preferences']:
2408
+ if key not in ['name', 'mcp_servers', 'databases', 'context', 'file_patterns', 'forenpc', 'model', 'provider', 'api_url', 'env']:
2180
2409
  self.shared_context[key] = item
2181
2410
  return # Only load the first .ctx file found
2182
2411
 
@@ -2356,146 +2585,98 @@ class Team:
2356
2585
  else:
2357
2586
  return None
2358
2587
 
2359
- def orchestrate(self, request):
2588
+ def orchestrate(self, request, max_iterations=3):
2360
2589
  """Orchestrate a request through the team"""
2361
- forenpc = self.get_forenpc() # Now guaranteed to be an NPC object
2590
+ import re
2591
+ from termcolor import colored
2592
+
2593
+ forenpc = self.get_forenpc()
2362
2594
  if not forenpc:
2363
2595
  return {"error": "No forenpc available to coordinate the team"}
2364
-
2365
- log_entry(
2366
- self.name,
2367
- "orchestration_start",
2368
- {"request": request}
2369
- )
2370
-
2371
- result = forenpc.check_llm_command(request,
2372
- context=getattr(self, 'context', {}),
2373
- team = self,
2374
- )
2375
-
2376
- while True:
2377
- completion_prompt= ""
2378
- if isinstance(result, dict):
2379
- self.shared_context["execution_history"].append(result)
2380
-
2381
- if result.get("messages") and result.get("npc_name"):
2382
- if result["npc_name"] not in self.shared_context["npc_messages"]:
2383
- self.shared_context["npc_messages"][result["npc_name"]] = []
2384
- self.shared_context["npc_messages"][result["npc_name"]].extend(
2385
- result["messages"]
2386
- )
2387
-
2388
- completion_prompt += f"""Context:
2389
- User request '{request}', previous agent
2390
-
2391
- previous agent returned:
2392
- {result.get('output')}
2393
2596
 
2394
-
2395
- Instructions:
2597
+ print(colored(f"[orchestrate] Starting with forenpc={forenpc.name}, team={self.name}", "cyan"))
2598
+ print(colored(f"[orchestrate] Request: {request[:100]}...", "cyan"))
2396
2599
 
2397
- Check whether the response is relevant to the user's request.
2600
+ # Filter out 'orchestrate' jinx to prevent infinite recursion
2601
+ jinxs_for_orchestration = {k: v for k, v in forenpc.jinxs_dict.items() if k != 'orchestrate'}
2398
2602
 
2399
- """
2400
- if self.npcs is None or len(self.npcs) == 0:
2401
- completion_prompt += f"""
2402
- The team has no members, so the forenpc must handle the request alone.
2403
- """
2404
- else:
2405
- completion_prompt += f"""
2406
-
2407
- These are all the members of the team: {', '.join(self.npcs.keys())}
2408
-
2409
- Therefore, if you are trying to evaluate whether a request was fulfilled relevantly,
2410
- consider that requests are made to the forenpc: {forenpc.name}
2411
- and that the forenpc must pass those along to the other npcs.
2412
- """
2413
- completion_prompt += f"""
2414
-
2415
- Mainly concern yourself with ensuring there are no
2416
- glaring errors nor fundamental mishaps in the response.
2417
- Do not consider stylistic hiccups as the answers being
2418
- irrelevant. By providing responses back to for the user to
2419
- comment on, they can can more efficiently iterate and resolve any issues by
2420
- prompting more clearly.
2421
- natural language itself is very fuzzy so there will always be some level
2422
- of misunderstanding, but as long as the response is clearly relevant
2423
- to the input request and along the user's intended direction,
2424
- it is considered relevant.
2425
-
2426
-
2427
- If there is enough information to begin a fruitful conversation with the user,
2428
- please consider the request relevant so that we do not
2429
- arbritarily stall business logic which is more efficiently
2430
- determined by iterations than through unnecessary pedantry.
2431
-
2432
- It is more important to get a response to the user
2433
- than to account for all edge cases, so as long as the response more or less tackles the
2434
- initial problem to first order, consider it relevant.
2435
-
2436
- Return a JSON object with:
2437
- -'relevant' with boolean value
2438
- -'explanation' for irrelevance with quoted citations in your explanation noting why it is irrelevant to user input must be a single string.
2439
- Return only the JSON object."""
2440
-
2441
- completion_check = npy.llm_funcs.get_llm_response(
2442
- completion_prompt,
2443
- model=forenpc.model,
2444
- provider=forenpc.provider,
2445
- api_key=forenpc.api_key,
2446
- api_url=forenpc.api_url,
2447
- npc=forenpc,
2448
- format="json"
2603
+ try:
2604
+ result = forenpc.check_llm_command(
2605
+ request,
2606
+ context=getattr(self, 'context', {}),
2607
+ team=self,
2608
+ jinxs=jinxs_for_orchestration,
2449
2609
  )
2450
-
2451
- if isinstance(completion_check.get("response"), dict):
2452
- complete = completion_check["response"].get("relevant", False)
2453
- explanation = completion_check["response"].get("explanation", "")
2454
- else:
2455
- complete = False
2456
- explanation = "Could not determine completion status"
2457
-
2458
- if complete:
2459
- debrief = npy.llm_funcs.get_llm_response(
2460
- f"""Context:
2461
- Original request: {request}
2462
- Execution history: {self.shared_context['execution_history']}
2463
-
2464
- Instructions:
2465
- Provide summary of actions taken and recommendations.
2466
- Return a JSON object with:
2467
- - 'summary': Overview of what was accomplished
2468
- - 'recommendations': Suggested next steps
2469
- Return only the JSON object.""",
2470
- model=forenpc.model,
2471
- provider=forenpc.provider,
2472
- api_key=forenpc.api_key,
2473
- api_url=forenpc.api_url,
2474
- npc=forenpc,
2475
- format="json"
2476
- )
2477
-
2478
- return {
2479
- "debrief": debrief.get("response"),
2480
- "output": result.get("output"),
2481
- "execution_history": self.shared_context["execution_history"],
2482
- }
2483
- else:
2484
- updated_request = (
2485
- request
2486
- + "\n\nThe request has not yet been fully completed. "
2487
- + explanation
2488
- + "\nPlease address only the remaining parts of the request."
2489
- )
2490
- print('updating request', updated_request)
2491
-
2492
- result = forenpc.check_llm_command(
2493
- updated_request,
2494
- context=getattr(self, 'context', {}),
2495
- stream = False,
2496
- team = self
2497
-
2498
- )
2610
+ print(colored(f"[orchestrate] Initial result type={type(result)}", "cyan"))
2611
+ if isinstance(result, dict):
2612
+ print(colored(f"[orchestrate] Result keys={list(result.keys())}", "cyan"))
2613
+ if 'error' in result:
2614
+ print(colored(f"[orchestrate] Error in result: {result['error']}", "red"))
2615
+ return result
2616
+ except Exception as e:
2617
+ print(colored(f"[orchestrate] Exception in check_llm_command: {e}", "red"))
2618
+ return {"error": str(e), "output": f"Orchestration failed: {e}"}
2619
+
2620
+ # Check if forenpc mentioned other team members - if so, delegate to them
2621
+ output = ""
2622
+ if isinstance(result, dict):
2623
+ output = result.get('output') or result.get('response') or ""
2624
+
2625
+ print(colored(f"[orchestrate] Output preview: {output[:200] if output else 'EMPTY'}...", "cyan"))
2626
+
2627
+ if output and self.npcs:
2628
+ # Look for @npc_name mentions OR just npc names
2629
+ at_pattern = r'@(\w+)'
2630
+ mentions = re.findall(at_pattern, output)
2631
+
2632
+ # Also check for NPC names mentioned without @ (case insensitive)
2633
+ if not mentions:
2634
+ for npc_name in self.npcs.keys():
2635
+ if npc_name.lower() != forenpc.name.lower():
2636
+ if npc_name.lower() in output.lower():
2637
+ mentions.append(npc_name)
2638
+ break
2639
+
2640
+ print(colored(f"[orchestrate] Found mentions: {mentions}", "cyan"))
2641
+
2642
+ for mentioned in mentions:
2643
+ mentioned_lower = mentioned.lower()
2644
+ if mentioned_lower in self.npcs and mentioned_lower != forenpc.name:
2645
+ target_npc = self.npcs[mentioned_lower]
2646
+ print(colored(f"[orchestrate] Delegating to @{mentioned_lower}", "yellow"))
2647
+
2648
+ try:
2649
+ # Execute the request with the target NPC (exclude orchestrate to prevent loops)
2650
+ target_jinxs = {k: v for k, v in target_npc.jinxs_dict.items() if k != 'orchestrate'}
2651
+ delegate_result = target_npc.check_llm_command(
2652
+ request,
2653
+ context=getattr(self, 'context', {}),
2654
+ team=self,
2655
+ jinxs=target_jinxs,
2656
+ )
2657
+
2658
+ if isinstance(delegate_result, dict):
2659
+ delegate_output = delegate_result.get('output') or delegate_result.get('response') or ""
2660
+ if delegate_output:
2661
+ output = f"[{mentioned_lower}]: {delegate_output}"
2662
+ result = delegate_result
2663
+ print(colored(f"[orchestrate] Got response from {mentioned_lower}", "green"))
2664
+ except Exception as e:
2665
+ print(colored(f"[orchestrate] Delegation to {mentioned_lower} failed: {e}", "red"))
2666
+
2667
+ break # Only delegate to first mentioned NPC
2668
+
2669
+ if isinstance(result, dict):
2670
+ final_output = output if output else str(result)
2671
+ return {
2672
+ "output": final_output,
2673
+ "result": result,
2674
+ }
2675
+ else:
2676
+ return {
2677
+ "output": str(result),
2678
+ "result": result,
2679
+ }
2499
2680
 
2500
2681
  def to_dict(self):
2501
2682
  """Convert team to dictionary representation"""