npcpy 1.2.37__tar.gz → 1.3.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (76) hide show
  1. {npcpy-1.2.37/npcpy.egg-info → npcpy-1.3.2}/PKG-INFO +3 -4
  2. {npcpy-1.2.37 → npcpy-1.3.2}/README.md +2 -3
  3. {npcpy-1.2.37 → npcpy-1.3.2}/npcpy/gen/response.py +6 -5
  4. {npcpy-1.2.37 → npcpy-1.3.2}/npcpy/npc_compiler.py +208 -86
  5. {npcpy-1.2.37 → npcpy-1.3.2}/npcpy/serve.py +28 -13
  6. {npcpy-1.2.37 → npcpy-1.3.2/npcpy.egg-info}/PKG-INFO +3 -4
  7. {npcpy-1.2.37 → npcpy-1.3.2}/setup.py +1 -1
  8. {npcpy-1.2.37 → npcpy-1.3.2}/LICENSE +0 -0
  9. {npcpy-1.2.37 → npcpy-1.3.2}/MANIFEST.in +0 -0
  10. {npcpy-1.2.37 → npcpy-1.3.2}/npcpy/__init__.py +0 -0
  11. {npcpy-1.2.37 → npcpy-1.3.2}/npcpy/data/__init__.py +0 -0
  12. {npcpy-1.2.37 → npcpy-1.3.2}/npcpy/data/audio.py +0 -0
  13. {npcpy-1.2.37 → npcpy-1.3.2}/npcpy/data/data_models.py +0 -0
  14. {npcpy-1.2.37 → npcpy-1.3.2}/npcpy/data/image.py +0 -0
  15. {npcpy-1.2.37 → npcpy-1.3.2}/npcpy/data/load.py +0 -0
  16. {npcpy-1.2.37 → npcpy-1.3.2}/npcpy/data/text.py +0 -0
  17. {npcpy-1.2.37 → npcpy-1.3.2}/npcpy/data/video.py +0 -0
  18. {npcpy-1.2.37 → npcpy-1.3.2}/npcpy/data/web.py +0 -0
  19. {npcpy-1.2.37 → npcpy-1.3.2}/npcpy/ft/__init__.py +0 -0
  20. {npcpy-1.2.37 → npcpy-1.3.2}/npcpy/ft/diff.py +0 -0
  21. {npcpy-1.2.37 → npcpy-1.3.2}/npcpy/ft/ge.py +0 -0
  22. {npcpy-1.2.37 → npcpy-1.3.2}/npcpy/ft/memory_trainer.py +0 -0
  23. {npcpy-1.2.37 → npcpy-1.3.2}/npcpy/ft/model_ensembler.py +0 -0
  24. {npcpy-1.2.37 → npcpy-1.3.2}/npcpy/ft/rl.py +0 -0
  25. {npcpy-1.2.37 → npcpy-1.3.2}/npcpy/ft/sft.py +0 -0
  26. {npcpy-1.2.37 → npcpy-1.3.2}/npcpy/ft/usft.py +0 -0
  27. {npcpy-1.2.37 → npcpy-1.3.2}/npcpy/gen/__init__.py +0 -0
  28. {npcpy-1.2.37 → npcpy-1.3.2}/npcpy/gen/audio_gen.py +0 -0
  29. {npcpy-1.2.37 → npcpy-1.3.2}/npcpy/gen/embeddings.py +0 -0
  30. {npcpy-1.2.37 → npcpy-1.3.2}/npcpy/gen/image_gen.py +0 -0
  31. {npcpy-1.2.37 → npcpy-1.3.2}/npcpy/gen/ocr.py +0 -0
  32. {npcpy-1.2.37 → npcpy-1.3.2}/npcpy/gen/video_gen.py +0 -0
  33. {npcpy-1.2.37 → npcpy-1.3.2}/npcpy/llm_funcs.py +0 -0
  34. {npcpy-1.2.37 → npcpy-1.3.2}/npcpy/main.py +0 -0
  35. {npcpy-1.2.37 → npcpy-1.3.2}/npcpy/memory/__init__.py +0 -0
  36. {npcpy-1.2.37 → npcpy-1.3.2}/npcpy/memory/command_history.py +0 -0
  37. {npcpy-1.2.37 → npcpy-1.3.2}/npcpy/memory/kg_vis.py +0 -0
  38. {npcpy-1.2.37 → npcpy-1.3.2}/npcpy/memory/knowledge_graph.py +0 -0
  39. {npcpy-1.2.37 → npcpy-1.3.2}/npcpy/memory/memory_processor.py +0 -0
  40. {npcpy-1.2.37 → npcpy-1.3.2}/npcpy/memory/search.py +0 -0
  41. {npcpy-1.2.37 → npcpy-1.3.2}/npcpy/mix/__init__.py +0 -0
  42. {npcpy-1.2.37 → npcpy-1.3.2}/npcpy/mix/debate.py +0 -0
  43. {npcpy-1.2.37 → npcpy-1.3.2}/npcpy/ml_funcs.py +0 -0
  44. {npcpy-1.2.37 → npcpy-1.3.2}/npcpy/npc_array.py +0 -0
  45. {npcpy-1.2.37 → npcpy-1.3.2}/npcpy/npc_sysenv.py +0 -0
  46. {npcpy-1.2.37 → npcpy-1.3.2}/npcpy/npcs.py +0 -0
  47. {npcpy-1.2.37 → npcpy-1.3.2}/npcpy/sql/__init__.py +0 -0
  48. {npcpy-1.2.37 → npcpy-1.3.2}/npcpy/sql/ai_function_tools.py +0 -0
  49. {npcpy-1.2.37 → npcpy-1.3.2}/npcpy/sql/database_ai_adapters.py +0 -0
  50. {npcpy-1.2.37 → npcpy-1.3.2}/npcpy/sql/database_ai_functions.py +0 -0
  51. {npcpy-1.2.37 → npcpy-1.3.2}/npcpy/sql/model_runner.py +0 -0
  52. {npcpy-1.2.37 → npcpy-1.3.2}/npcpy/sql/npcsql.py +0 -0
  53. {npcpy-1.2.37 → npcpy-1.3.2}/npcpy/sql/sql_model_compiler.py +0 -0
  54. {npcpy-1.2.37 → npcpy-1.3.2}/npcpy/tools.py +0 -0
  55. {npcpy-1.2.37 → npcpy-1.3.2}/npcpy/work/__init__.py +0 -0
  56. {npcpy-1.2.37 → npcpy-1.3.2}/npcpy/work/desktop.py +0 -0
  57. {npcpy-1.2.37 → npcpy-1.3.2}/npcpy/work/plan.py +0 -0
  58. {npcpy-1.2.37 → npcpy-1.3.2}/npcpy/work/trigger.py +0 -0
  59. {npcpy-1.2.37 → npcpy-1.3.2}/npcpy.egg-info/SOURCES.txt +0 -0
  60. {npcpy-1.2.37 → npcpy-1.3.2}/npcpy.egg-info/dependency_links.txt +0 -0
  61. {npcpy-1.2.37 → npcpy-1.3.2}/npcpy.egg-info/requires.txt +0 -0
  62. {npcpy-1.2.37 → npcpy-1.3.2}/npcpy.egg-info/top_level.txt +0 -0
  63. {npcpy-1.2.37 → npcpy-1.3.2}/setup.cfg +0 -0
  64. {npcpy-1.2.37 → npcpy-1.3.2}/tests/test_audio.py +0 -0
  65. {npcpy-1.2.37 → npcpy-1.3.2}/tests/test_command_history.py +0 -0
  66. {npcpy-1.2.37 → npcpy-1.3.2}/tests/test_image.py +0 -0
  67. {npcpy-1.2.37 → npcpy-1.3.2}/tests/test_llm_funcs.py +0 -0
  68. {npcpy-1.2.37 → npcpy-1.3.2}/tests/test_load.py +0 -0
  69. {npcpy-1.2.37 → npcpy-1.3.2}/tests/test_npc_array.py +0 -0
  70. {npcpy-1.2.37 → npcpy-1.3.2}/tests/test_npc_compiler.py +0 -0
  71. {npcpy-1.2.37 → npcpy-1.3.2}/tests/test_npcsql.py +0 -0
  72. {npcpy-1.2.37 → npcpy-1.3.2}/tests/test_response.py +0 -0
  73. {npcpy-1.2.37 → npcpy-1.3.2}/tests/test_serve.py +0 -0
  74. {npcpy-1.2.37 → npcpy-1.3.2}/tests/test_text.py +0 -0
  75. {npcpy-1.2.37 → npcpy-1.3.2}/tests/test_tools.py +0 -0
  76. {npcpy-1.2.37 → npcpy-1.3.2}/tests/test_web.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: npcpy
3
- Version: 1.2.37
3
+ Version: 1.3.2
4
4
  Summary: npcpy is the premier open-source library for integrating LLMs and Agents into python systems.
5
5
  Home-page: https://github.com/NPC-Worldwide/npcpy
6
6
  Author: Christopher Agostino
@@ -275,9 +275,8 @@ output = context['research_summary']
275
275
  "code": '''
276
276
  # Access outputs from previous steps.
277
277
  research_summary = context['initial_llm_research']
278
- # The output of a declarative jinx call (like 'file_reader') is stored under its step name.
279
- # The actual content we want is the 'output' of the *last step* within that sub-jinx.
280
- file_summary = context['read_and_process_source_file'].get('output', 'No file summary available.')
278
+ # The file_reader jinx returns its output directly; also keep a fallback of file_raw_content.
279
+ file_summary = context.get('read_and_process_source_file', '') or context.get('file_raw_content', 'No file summary available.')
281
280
 
282
281
  prompt = f"""Based on the following information:
283
282
  1. Comprehensive Research Summary:
@@ -179,9 +179,8 @@ output = context['research_summary']
179
179
  "code": '''
180
180
  # Access outputs from previous steps.
181
181
  research_summary = context['initial_llm_research']
182
- # The output of a declarative jinx call (like 'file_reader') is stored under its step name.
183
- # The actual content we want is the 'output' of the *last step* within that sub-jinx.
184
- file_summary = context['read_and_process_source_file'].get('output', 'No file summary available.')
182
+ # The file_reader jinx returns its output directly; also keep a fallback of file_raw_content.
183
+ file_summary = context.get('read_and_process_source_file', '') or context.get('file_raw_content', 'No file summary available.')
185
184
 
186
185
  prompt = f"""Based on the following information:
187
186
  1. Comprehensive Research Summary:
@@ -346,7 +346,12 @@ def get_ollama_response(
346
346
  res = ollama.chat(**api_params, options=options)
347
347
  result["raw_response"] = res
348
348
 
349
- # Extract usage from ollama response
349
+ if stream:
350
+ # Return immediately for streaming - don't check 'in' on generator as it consumes it
351
+ result["response"] = res
352
+ return result
353
+
354
+ # Extract usage from ollama response (only for non-streaming)
350
355
  if hasattr(res, 'prompt_eval_count') or 'prompt_eval_count' in res:
351
356
  input_tokens = getattr(res, 'prompt_eval_count', None) or res.get('prompt_eval_count', 0) or 0
352
357
  output_tokens = getattr(res, 'eval_count', None) or res.get('eval_count', 0) or 0
@@ -354,10 +359,6 @@ def get_ollama_response(
354
359
  "input_tokens": input_tokens,
355
360
  "output_tokens": output_tokens,
356
361
  }
357
-
358
- if stream:
359
- result["response"] = res
360
- return result
361
362
  else:
362
363
 
363
364
  message = res.get("message", {})
@@ -384,7 +384,6 @@ def write_yaml_file(file_path, data):
384
384
  print(f"Error writing YAML file {file_path}: {e}")
385
385
  return False
386
386
 
387
-
388
387
  class Jinx:
389
388
  '''
390
389
  Jinx represents a workflow template with Jinja-rendered steps.
@@ -394,7 +393,8 @@ class Jinx:
394
393
  - inputs: list of input parameters
395
394
  - description: what the jinx does
396
395
  - npc: optional NPC to execute with
397
- - steps: list of step definitions with code
396
+ - steps: list of step definitions with code. This section can now be a Jinja template itself.
397
+ - file_context: optional list of file patterns to include as context
398
398
 
399
399
  Execution:
400
400
  - Renders Jinja templates in step code with input values
@@ -409,9 +409,13 @@ class Jinx:
409
409
  else:
410
410
  raise ValueError("Either jinx_data or jinx_path must be provided")
411
411
 
412
- # Keep a copy for macro expansion, but retain the executable steps by default
412
+ # _raw_steps will now hold the original, potentially templated, steps definition
413
413
  self._raw_steps = list(self.steps)
414
- self.steps = list(self._raw_steps)
414
+ self.steps = [] # Will be populated after first-pass rendering
415
+ self.parsed_files = {}
416
+ if self.file_context:
417
+ self.parsed_files = self._parse_file_patterns(self.file_context)
418
+
415
419
  def _load_from_file(self, path):
416
420
  jinx_data = load_yaml_file(path)
417
421
  if not jinx_data:
@@ -431,7 +435,8 @@ class Jinx:
431
435
  self.inputs = jinx_data.get("inputs", [])
432
436
  self.description = jinx_data.get("description", "")
433
437
  self.npc = jinx_data.get("npc")
434
- self.steps = jinx_data.get("steps", [])
438
+ self.steps = jinx_data.get("steps", []) # This can now be a Jinja templated list
439
+ self.file_context = jinx_data.get("file_context", [])
435
440
  self._source_path = jinx_data.get("_source_path", None)
436
441
 
437
442
  def to_tool_def(self) -> Dict[str, Any]:
@@ -463,27 +468,62 @@ class Jinx:
463
468
  }
464
469
 
465
470
  def render_first_pass(
466
- self,
467
- jinja_env_for_macros: Environment,
471
+ self,
472
+ jinja_env_for_macros: Environment,
468
473
  all_jinx_callables: Dict[str, Callable]
469
474
  ):
470
475
  """
471
476
  Performs the first-pass Jinja rendering on the Jinx's raw steps.
472
- This expands nested Jinx calls (e.g., {{ sh(...) }} or
473
- engine: jinx_name) but preserves runtime variables
474
- (e.g., {{ command_var }}).
475
-
476
- Args:
477
- jinja_env_for_macros: The Jinja Environment configured with
478
- Jinx callables in its globals.
479
- all_jinx_callables: A dictionary of Jinx names to their
480
- callable functions (from create_jinx_callable).
477
+ This expands Jinja control flow (for, if) to generate step structures,
478
+ then expands nested Jinx calls (e.g., {{ sh(...) }} or engine: jinx_name)
479
+ and inline macros.
481
480
  """
482
- rendered_steps_output = []
481
+ # Check if steps are already parsed dicts (common case when loaded from YAML)
482
+ # If so, skip the YAML string join/parse cycle and use them directly
483
+ if self._raw_steps and isinstance(self._raw_steps[0], dict):
484
+ structurally_expanded_steps = list(self._raw_steps)
485
+ else:
486
+ # 1. Join the list of raw steps (which are individual YAML lines) into a single string.
487
+ # This single string is the complete Jinja template for the 'steps' section.
488
+ raw_steps_template_string = "\n".join(self._raw_steps)
483
489
 
484
- for raw_step in self._raw_steps:
490
+ # 2. Render this single string as a Jinja template.
491
+ # Jinja will now process the {% for %} and {% if %} directives,
492
+ # dynamically generating the YAML structure.
493
+ try:
494
+ steps_template = jinja_env_for_macros.from_string(raw_steps_template_string)
495
+ # Pass globals (like num_tasks, include_greeting from Jinx inputs)
496
+ # to the Jinja rendering context for structural templating.
497
+ rendered_steps_yaml_string = steps_template.render(**jinja_env_for_macros.globals)
498
+ except Exception as e:
499
+ # In a real Jinx, this would go to a proper logger.
500
+ # For this context, we handle the error gracefully.
501
+ # self._log_debug(f"Warning: Error during first-pass templating of Jinx '{self.jinx_name}' steps YAML: {e}")
502
+ self.steps = list(self._raw_steps) # Fallback to original raw steps
503
+ return
504
+
505
+ # 3. Parse the rendered YAML string back into a list of step dictionaries.
506
+ # This step will now correctly interpret the YAML structure generated by Jinja.
507
+ try:
508
+ structurally_expanded_steps = yaml.safe_load(rendered_steps_yaml_string)
509
+ if not isinstance(structurally_expanded_steps, list):
510
+ # Handle cases where the rendered YAML might be empty or not a list
511
+ if structurally_expanded_steps is None:
512
+ structurally_expanded_steps = []
513
+ else:
514
+ raise ValueError(f"Rendered steps YAML did not result in a list: {type(structurally_expanded_steps)}")
515
+ except Exception as e:
516
+ # self._log_debug(f"Warning: Error re-parsing structurally expanded steps YAML for Jinx '{self.jinx_name}': {e}")
517
+ self.steps = list(self._raw_steps) # Fallback
518
+ return
519
+
520
+ # 4. Now, iterate through these `structurally_expanded_steps` to expand
521
+ # declarative Jinx calls (engine: jinx_name) and inline macros.
522
+ # This is the second phase of the first-pass rendering.
523
+ final_rendered_steps = []
524
+ for raw_step in structurally_expanded_steps:
485
525
  if not isinstance(raw_step, dict):
486
- rendered_steps_output.append(raw_step)
526
+ final_rendered_steps.append(raw_step)
487
527
  continue
488
528
 
489
529
  engine_name = raw_step.get('engine')
@@ -502,51 +542,58 @@ class Jinx:
502
542
  expanded_steps = yaml.safe_load(expanded_yaml_string)
503
543
 
504
544
  if isinstance(expanded_steps, list):
505
- rendered_steps_output.extend(expanded_steps)
506
-
545
+ final_rendered_steps.extend(expanded_steps)
507
546
  elif expanded_steps is not None:
508
- rendered_steps_output.append(expanded_steps)
509
-
547
+ final_rendered_steps.append(expanded_steps)
510
548
  except Exception as e:
511
- print(
512
- f"Warning: Error expanding Jinx '{engine_name}' "
513
- f"within Jinx '{self.jinx_name}' "
514
- f"(declarative): {e}"
515
- )
516
- rendered_steps_output.append(raw_step)
517
- # Skip rendering for python/bash engine steps - preserve runtime variables
549
+ # self._log_debug(
550
+ # f"Warning: Error expanding Jinx '{engine_name}' "
551
+ # f"within Jinx '{self.jinx_name}' "
552
+ # f"(declarative): {e}"
553
+ # )
554
+ final_rendered_steps.append(raw_step)
555
+ # For python/bash engine steps, only inline macro expansion happens in the next block.
556
+ # The code content itself is preserved for runtime Jinja rendering.
518
557
  elif raw_step.get('engine') in ['python', 'bash']:
519
- rendered_steps_output.append(raw_step)
558
+ processed_step = {}
559
+ for key, value in raw_step.items():
560
+ if isinstance(value, str):
561
+ try:
562
+ template = jinja_env_for_macros.from_string(value)
563
+ # Render with empty context for inline macros/static values
564
+ rendered_value = template.render({})
565
+ try:
566
+ loaded_value = yaml.safe_load(rendered_value)
567
+ processed_step[key] = loaded_value
568
+ except yaml.YAMLError:
569
+ processed_step[key] = rendered_value
570
+ except Exception as e:
571
+ # self._log_debug(f"Warning: Error during first-pass rendering of Jinx '{self.jinx_name}' step field '{key}' (inline macro): {e}")
572
+ processed_step[key] = value
573
+ else:
574
+ processed_step[key] = value
575
+ final_rendered_steps.append(processed_step)
520
576
  else:
521
- # For other steps, do first-pass rendering (inline macro expansion)
577
+ # For other steps (e.g., custom engines, or just data), perform inline macro expansion
522
578
  processed_step = {}
523
579
  for key, value in raw_step.items():
524
580
  if isinstance(value, str):
525
581
  try:
526
- template = jinja_env_for_macros.from_string(
527
- value
528
- )
582
+ template = jinja_env_for_macros.from_string(value)
529
583
  rendered_value = template.render({})
530
-
531
584
  try:
532
- loaded_value = yaml.safe_load(
533
- rendered_value
534
- )
585
+ loaded_value = yaml.safe_load(rendered_value)
535
586
  processed_step[key] = loaded_value
536
587
  except yaml.YAMLError:
537
588
  processed_step[key] = rendered_value
538
589
  except Exception as e:
539
- print(
540
- f"Warning: Error during first-pass "
541
- f"rendering of Jinx '{self.jinx_name}' "
542
- f"step field '{key}' (inline macro): {e}"
543
- )
590
+ # self._log_debug(f"Warning: Error during first-pass rendering of Jinx '{self.jinx_name}' step field '{key}' (inline macro): {e}")
544
591
  processed_step[key] = value
545
592
  else:
546
593
  processed_step[key] = value
547
- rendered_steps_output.append(processed_step)
594
+ final_rendered_steps.append(processed_step)
548
595
 
549
- self.steps = rendered_steps_output
596
+ self.steps = final_rendered_steps
550
597
 
551
598
  def execute(self,
552
599
  input_values: Dict[str, Any],
@@ -575,6 +622,11 @@ class Jinx:
575
622
  "messages": messages,
576
623
  "npc": active_npc
577
624
  })
625
+
626
+ # Add parsed file content to the context
627
+ if self.parsed_files:
628
+ context['file_context'] = self._format_parsed_files_context(self.parsed_files)
629
+ context['files'] = self.parsed_files # Also make raw dict available
578
630
 
579
631
  for i, step in enumerate(self.steps):
580
632
  context = self._execute_step(
@@ -585,9 +637,19 @@ class Jinx:
585
637
  messages=messages,
586
638
  extra_globals=extra_globals
587
639
  )
640
+ # If an error occurred in a step, propagate it and stop execution
641
+ if "error" in context.get("output", ""):
642
+ self._log_debug(f"DEBUG: Jinx '{self.jinx_name}' execution stopped due to error in step '{step.get('name', 'unnamed_step')}': {context['output']}")
643
+ break
588
644
 
589
645
  return context
590
646
 
647
+ def _log_debug(self, msg: str):
648
+ """Helper for logging debug messages to a file."""
649
+ log_file_path = os.path.expanduser("~/jinx_debug_log.txt")
650
+ with open(log_file_path, "a") as f:
651
+ f.write(f"[{datetime.now().isoformat()}] {msg}\n")
652
+
591
653
  def _execute_step(self,
592
654
  step: Dict[str, Any],
593
655
  context: Dict[str, Any],
@@ -596,40 +658,31 @@ class Jinx:
596
658
  messages: Optional[List[Dict[str, str]]] = None,
597
659
  extra_globals: Optional[Dict[str, Any]] = None):
598
660
 
599
- def _log_debug(msg):
600
- log_file_path = os.path.expanduser("~/jinx_debug_log.txt")
601
- with open(log_file_path, "a") as f:
602
- f.write(f"[{datetime.now().isoformat()}] {msg}\n")
603
-
604
661
  code_content = step.get("code", "")
605
662
  step_name = step.get("name", "unnamed_step")
606
663
  step_npc = step.get("npc")
607
664
 
608
665
  active_npc = step_npc if step_npc else npc
609
666
 
667
+ # Second pass Jinja rendering: render the step's code with the current runtime context
610
668
  try:
611
669
  template = jinja_env.from_string(code_content)
612
670
  rendered_code = template.render(**context)
613
671
  except Exception as e:
614
- _log_debug(
615
- f"Error rendering template for step {step_name} "
616
- f"(second pass): {e}"
672
+ error_msg = (
673
+ f"Error rendering template for step '{step_name}' "
674
+ f"(second pass): {type(e).__name__}: {e}"
617
675
  )
618
- rendered_code = code_content
676
+ context['output'] = error_msg
677
+ self._log_debug(error_msg)
678
+ return context
619
679
 
620
- _log_debug(f"rendered jinx code: {rendered_code}")
621
- _log_debug(
622
- f"DEBUG: Before exec - rendered_code: {rendered_code}"
623
- )
624
- _log_debug(
625
- f"DEBUG: Before exec - context['output'] before step: "
626
- f"{context.get('output')}"
627
- )
680
+ self._log_debug(f"DEBUG: Executing step '{step_name}' with rendered code: {rendered_code}")
628
681
 
629
682
  exec_globals = {
630
683
  "__builtins__": __builtins__,
631
684
  "npc": active_npc,
632
- "context": context,
685
+ "context": context, # Pass context by reference
633
686
  "math": math,
634
687
  "random": random,
635
688
  "datetime": datetime,
@@ -653,43 +706,27 @@ class Jinx:
653
706
  if extra_globals:
654
707
  exec_globals.update(extra_globals)
655
708
 
656
- exec_locals = {}
657
-
709
+ exec_locals = {} # Locals for this specific exec call
710
+
658
711
  try:
659
712
  exec(rendered_code, exec_globals, exec_locals)
660
713
  except Exception as e:
661
714
  error_msg = (
662
- f"Error executing step {step_name}: "
715
+ f"Error executing step '{step_name}': "
663
716
  f"{type(e).__name__}: {e}"
664
717
  )
665
718
  context['output'] = error_msg
666
- _log_debug(error_msg)
719
+ self._log_debug(error_msg)
667
720
  return context
668
721
 
669
- _log_debug(f"DEBUG: After exec - exec_locals: {exec_locals}")
670
- _log_debug(
671
- f"DEBUG: After exec - 'output' in exec_locals: "
672
- f"{'output' in exec_locals}"
673
- )
674
-
722
+ # Update the main context with any variables set in exec_locals
675
723
  context.update(exec_locals)
676
724
 
677
- _log_debug(
678
- f"DEBUG: After context.update(exec_locals) - "
679
- f"context['output']: {context.get('output')}"
680
- )
681
- _log_debug(f"context after jinx ex: {context}")
682
-
683
725
  if "output" in exec_locals:
684
726
  outp = exec_locals["output"]
685
727
  context["output"] = outp
686
728
  context[step_name] = outp
687
729
 
688
- _log_debug(
689
- f"DEBUG: Inside 'output' in exec_locals block - "
690
- f"context['output']: {context.get('output')}"
691
- )
692
-
693
730
  if messages is not None:
694
731
  messages.append({
695
732
  'role':'assistant',
@@ -702,13 +739,95 @@ class Jinx:
702
739
 
703
740
  return context
704
741
 
742
+ def _parse_file_patterns(self, patterns_config):
743
+ """Parse file patterns configuration and load matching files into KV cache"""
744
+ if not patterns_config:
745
+ return {}
746
+
747
+ file_cache = {}
748
+
749
+ for pattern_entry in patterns_config:
750
+ if isinstance(pattern_entry, str):
751
+ pattern_entry = {"pattern": pattern_entry}
752
+
753
+ pattern = pattern_entry.get("pattern", "")
754
+ recursive = pattern_entry.get("recursive", False)
755
+ base_path = pattern_entry.get("base_path", ".")
756
+
757
+ if not pattern:
758
+ continue
759
+
760
+ # Resolve base_path relative to jinx's source path or current working directory
761
+ if self._source_path:
762
+ base_path = os.path.join(os.path.dirname(self._source_path), base_path)
763
+ base_path = os.path.expanduser(base_path)
764
+
765
+ if not os.path.isabs(base_path):
766
+ base_path = os.path.join(os.getcwd(), base_path)
767
+
768
+ matching_files = self._find_matching_files(pattern, base_path, recursive)
769
+
770
+ for file_path in matching_files:
771
+ file_content = self._load_file_content(file_path)
772
+ if file_content:
773
+ relative_path = os.path.relpath(file_path, base_path)
774
+ file_cache[relative_path] = file_content
775
+
776
+ return file_cache
777
+
778
+ def _find_matching_files(self, pattern, base_path, recursive=False):
779
+ """Find files matching the given pattern"""
780
+ matching_files = []
781
+
782
+ if not os.path.exists(base_path):
783
+ return matching_files
784
+
785
+ if recursive:
786
+ for root, dirs, files in os.walk(base_path):
787
+ for filename in files:
788
+ if fnmatch.fnmatch(filename, pattern):
789
+ matching_files.append(os.path.join(root, filename))
790
+ else:
791
+ try:
792
+ for item in os.listdir(base_path):
793
+ item_path = os.path.join(base_path, item)
794
+ if os.path.isfile(item_path) and fnmatch.fnmatch(item, pattern):
795
+ matching_files.append(item_path)
796
+ except PermissionError:
797
+ print(f"Permission denied accessing {base_path}")
798
+
799
+ return matching_files
800
+
801
+ def _load_file_content(self, file_path):
802
+ """Load content from a file with error handling"""
803
+ try:
804
+ with open(file_path, 'r', encoding='utf-8') as f:
805
+ return f.read()
806
+ except Exception as e:
807
+ print(f"Error reading {file_path}: {e}")
808
+ return None
809
+
810
+ def _format_parsed_files_context(self, parsed_files):
811
+ """Format parsed files into context string"""
812
+ if not parsed_files:
813
+ return ""
814
+
815
+ context_parts = ["Additional context from files:"]
816
+
817
+ for file_path, content in parsed_files.items():
818
+ context_parts.append(f"\n--- {file_path} ---")
819
+ context_parts.append(content)
820
+ context_parts.append("")
821
+
822
+ return "\n".join(context_parts)
705
823
 
706
824
  def to_dict(self):
707
825
  result = {
708
826
  "jinx_name": self.jinx_name,
709
827
  "description": self.description,
710
828
  "inputs": self.inputs,
711
- "steps": self._raw_steps
829
+ "steps": self._raw_steps, # Save the original raw steps, which might be templated
830
+ "file_context": self.file_context
712
831
  }
713
832
 
714
833
  if self.npc:
@@ -754,6 +873,7 @@ class Jinx:
754
873
  "jinx_name": name,
755
874
  "description": doc.strip(),
756
875
  "inputs": inputs,
876
+ "file_context": [],
757
877
  "steps": [
758
878
  {
759
879
  "name": "mcp_function_call",
@@ -775,6 +895,8 @@ output = {mcp_tool.__module__}.{name}(
775
895
  except:
776
896
  pass
777
897
 
898
+
899
+
778
900
  def load_jinxs_from_directory(directory):
779
901
  """Load all jinxs from a directory recursively"""
780
902
  jinxs = []
@@ -3901,26 +3901,41 @@ def stream():
3901
3901
  print('.', end="", flush=True)
3902
3902
  dot_count += 1
3903
3903
  if "hf.co" in model or provider == 'ollama' and 'gpt-oss' not in model:
3904
- chunk_content = response_chunk["message"]["content"] if "message" in response_chunk and "content" in response_chunk["message"] else ""
3905
- if "message" in response_chunk and "tool_calls" in response_chunk["message"]:
3906
- for tool_call in response_chunk["message"]["tool_calls"]:
3907
- if "id" in tool_call:
3908
- tool_call_data["id"] = tool_call["id"]
3909
- if "function" in tool_call:
3910
- if "name" in tool_call["function"]:
3911
- tool_call_data["function_name"] = tool_call["function"]["name"]
3912
- if "arguments" in tool_call["function"]:
3913
- arg_val = tool_call["function"]["arguments"]
3904
+ # Ollama returns ChatResponse objects - support both attribute and dict access
3905
+ msg = getattr(response_chunk, "message", None) or response_chunk.get("message", {}) if hasattr(response_chunk, "get") else {}
3906
+ chunk_content = getattr(msg, "content", None) or (msg.get("content") if hasattr(msg, "get") else "") or ""
3907
+ # Extract Ollama thinking/reasoning tokens
3908
+ thinking_content = getattr(msg, "thinking", None) or (msg.get("thinking") if hasattr(msg, "get") else None)
3909
+ # Handle tool calls with robust attribute/dict access
3910
+ tool_calls = getattr(msg, "tool_calls", None) or (msg.get("tool_calls") if hasattr(msg, "get") else None)
3911
+ if tool_calls:
3912
+ for tool_call in tool_calls:
3913
+ tc_id = getattr(tool_call, "id", None) or (tool_call.get("id") if hasattr(tool_call, "get") else None)
3914
+ if tc_id:
3915
+ tool_call_data["id"] = tc_id
3916
+ tc_func = getattr(tool_call, "function", None) or (tool_call.get("function") if hasattr(tool_call, "get") else None)
3917
+ if tc_func:
3918
+ tc_name = getattr(tc_func, "name", None) or (tc_func.get("name") if hasattr(tc_func, "get") else None)
3919
+ if tc_name:
3920
+ tool_call_data["function_name"] = tc_name
3921
+ tc_args = getattr(tc_func, "arguments", None) or (tc_func.get("arguments") if hasattr(tc_func, "get") else None)
3922
+ if tc_args:
3923
+ arg_val = tc_args
3914
3924
  if isinstance(arg_val, dict):
3915
3925
  arg_val = json.dumps(arg_val)
3916
3926
  tool_call_data["arguments"] += arg_val
3917
3927
  if chunk_content:
3918
3928
  complete_response.append(chunk_content)
3929
+ # Extract other fields with robust access
3930
+ created_at = getattr(response_chunk, "created_at", None) or (response_chunk.get("created_at") if hasattr(response_chunk, "get") else None)
3931
+ model_name = getattr(response_chunk, "model", None) or (response_chunk.get("model") if hasattr(response_chunk, "get") else model)
3932
+ msg_role = getattr(msg, "role", None) or (msg.get("role") if hasattr(msg, "get") else "assistant")
3933
+ done_reason = getattr(response_chunk, "done_reason", None) or (response_chunk.get("done_reason") if hasattr(response_chunk, "get") else None)
3919
3934
  chunk_data = {
3920
3935
  "id": None, "object": None,
3921
- "created": response_chunk["created_at"] or datetime.datetime.now(),
3922
- "model": response_chunk["model"],
3923
- "choices": [{"index": 0, "delta": {"content": chunk_content, "role": response_chunk["message"]["role"]}, "finish_reason": response_chunk.get("done_reason")}]
3936
+ "created": created_at or datetime.datetime.now(),
3937
+ "model": model_name,
3938
+ "choices": [{"index": 0, "delta": {"content": chunk_content, "role": msg_role, "reasoning_content": thinking_content}, "finish_reason": done_reason}]
3924
3939
  }
3925
3940
  yield f"data: {json.dumps(chunk_data)}\n\n"
3926
3941
  else:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: npcpy
3
- Version: 1.2.37
3
+ Version: 1.3.2
4
4
  Summary: npcpy is the premier open-source library for integrating LLMs and Agents into python systems.
5
5
  Home-page: https://github.com/NPC-Worldwide/npcpy
6
6
  Author: Christopher Agostino
@@ -275,9 +275,8 @@ output = context['research_summary']
275
275
  "code": '''
276
276
  # Access outputs from previous steps.
277
277
  research_summary = context['initial_llm_research']
278
- # The output of a declarative jinx call (like 'file_reader') is stored under its step name.
279
- # The actual content we want is the 'output' of the *last step* within that sub-jinx.
280
- file_summary = context['read_and_process_source_file'].get('output', 'No file summary available.')
278
+ # The file_reader jinx returns its output directly; also keep a fallback of file_raw_content.
279
+ file_summary = context.get('read_and_process_source_file', '') or context.get('file_raw_content', 'No file summary available.')
281
280
 
282
281
  prompt = f"""Based on the following information:
283
282
  1. Comprehensive Research Summary:
@@ -83,7 +83,7 @@ extra_files = package_files("npcpy/npc_team/")
83
83
 
84
84
  setup(
85
85
  name="npcpy",
86
- version="1.2.37",
86
+ version="1.3.2",
87
87
  packages=find_packages(exclude=["tests*"]),
88
88
  install_requires=base_requirements,
89
89
  extras_require={
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes