npcpy 1.2.23__tar.gz → 1.2.25__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (72) hide show
  1. {npcpy-1.2.23/npcpy.egg-info → npcpy-1.2.25}/PKG-INFO +1 -1
  2. {npcpy-1.2.23 → npcpy-1.2.25}/npcpy/npc_compiler.py +38 -23
  3. {npcpy-1.2.23 → npcpy-1.2.25}/npcpy/npc_sysenv.py +152 -24
  4. {npcpy-1.2.23 → npcpy-1.2.25}/npcpy/serve.py +411 -2
  5. {npcpy-1.2.23 → npcpy-1.2.25/npcpy.egg-info}/PKG-INFO +1 -1
  6. {npcpy-1.2.23 → npcpy-1.2.25}/setup.py +1 -1
  7. {npcpy-1.2.23 → npcpy-1.2.25}/LICENSE +0 -0
  8. {npcpy-1.2.23 → npcpy-1.2.25}/MANIFEST.in +0 -0
  9. {npcpy-1.2.23 → npcpy-1.2.25}/README.md +0 -0
  10. {npcpy-1.2.23 → npcpy-1.2.25}/npcpy/__init__.py +0 -0
  11. {npcpy-1.2.23 → npcpy-1.2.25}/npcpy/data/__init__.py +0 -0
  12. {npcpy-1.2.23 → npcpy-1.2.25}/npcpy/data/audio.py +0 -0
  13. {npcpy-1.2.23 → npcpy-1.2.25}/npcpy/data/data_models.py +0 -0
  14. {npcpy-1.2.23 → npcpy-1.2.25}/npcpy/data/image.py +0 -0
  15. {npcpy-1.2.23 → npcpy-1.2.25}/npcpy/data/load.py +0 -0
  16. {npcpy-1.2.23 → npcpy-1.2.25}/npcpy/data/text.py +0 -0
  17. {npcpy-1.2.23 → npcpy-1.2.25}/npcpy/data/video.py +0 -0
  18. {npcpy-1.2.23 → npcpy-1.2.25}/npcpy/data/web.py +0 -0
  19. {npcpy-1.2.23 → npcpy-1.2.25}/npcpy/ft/__init__.py +0 -0
  20. {npcpy-1.2.23 → npcpy-1.2.25}/npcpy/ft/diff.py +0 -0
  21. {npcpy-1.2.23 → npcpy-1.2.25}/npcpy/ft/ge.py +0 -0
  22. {npcpy-1.2.23 → npcpy-1.2.25}/npcpy/ft/memory_trainer.py +0 -0
  23. {npcpy-1.2.23 → npcpy-1.2.25}/npcpy/ft/model_ensembler.py +0 -0
  24. {npcpy-1.2.23 → npcpy-1.2.25}/npcpy/ft/rl.py +0 -0
  25. {npcpy-1.2.23 → npcpy-1.2.25}/npcpy/ft/sft.py +0 -0
  26. {npcpy-1.2.23 → npcpy-1.2.25}/npcpy/ft/usft.py +0 -0
  27. {npcpy-1.2.23 → npcpy-1.2.25}/npcpy/gen/__init__.py +0 -0
  28. {npcpy-1.2.23 → npcpy-1.2.25}/npcpy/gen/audio_gen.py +0 -0
  29. {npcpy-1.2.23 → npcpy-1.2.25}/npcpy/gen/embeddings.py +0 -0
  30. {npcpy-1.2.23 → npcpy-1.2.25}/npcpy/gen/image_gen.py +0 -0
  31. {npcpy-1.2.23 → npcpy-1.2.25}/npcpy/gen/response.py +0 -0
  32. {npcpy-1.2.23 → npcpy-1.2.25}/npcpy/gen/video_gen.py +0 -0
  33. {npcpy-1.2.23 → npcpy-1.2.25}/npcpy/llm_funcs.py +0 -0
  34. {npcpy-1.2.23 → npcpy-1.2.25}/npcpy/main.py +0 -0
  35. {npcpy-1.2.23 → npcpy-1.2.25}/npcpy/memory/__init__.py +0 -0
  36. {npcpy-1.2.23 → npcpy-1.2.25}/npcpy/memory/command_history.py +0 -0
  37. {npcpy-1.2.23 → npcpy-1.2.25}/npcpy/memory/kg_vis.py +0 -0
  38. {npcpy-1.2.23 → npcpy-1.2.25}/npcpy/memory/knowledge_graph.py +0 -0
  39. {npcpy-1.2.23 → npcpy-1.2.25}/npcpy/memory/memory_processor.py +0 -0
  40. {npcpy-1.2.23 → npcpy-1.2.25}/npcpy/memory/search.py +0 -0
  41. {npcpy-1.2.23 → npcpy-1.2.25}/npcpy/mix/__init__.py +0 -0
  42. {npcpy-1.2.23 → npcpy-1.2.25}/npcpy/mix/debate.py +0 -0
  43. {npcpy-1.2.23 → npcpy-1.2.25}/npcpy/npcs.py +0 -0
  44. {npcpy-1.2.23 → npcpy-1.2.25}/npcpy/sql/__init__.py +0 -0
  45. {npcpy-1.2.23 → npcpy-1.2.25}/npcpy/sql/ai_function_tools.py +0 -0
  46. {npcpy-1.2.23 → npcpy-1.2.25}/npcpy/sql/database_ai_adapters.py +0 -0
  47. {npcpy-1.2.23 → npcpy-1.2.25}/npcpy/sql/database_ai_functions.py +0 -0
  48. {npcpy-1.2.23 → npcpy-1.2.25}/npcpy/sql/model_runner.py +0 -0
  49. {npcpy-1.2.23 → npcpy-1.2.25}/npcpy/sql/npcsql.py +0 -0
  50. {npcpy-1.2.23 → npcpy-1.2.25}/npcpy/sql/sql_model_compiler.py +0 -0
  51. {npcpy-1.2.23 → npcpy-1.2.25}/npcpy/tools.py +0 -0
  52. {npcpy-1.2.23 → npcpy-1.2.25}/npcpy/work/__init__.py +0 -0
  53. {npcpy-1.2.23 → npcpy-1.2.25}/npcpy/work/desktop.py +0 -0
  54. {npcpy-1.2.23 → npcpy-1.2.25}/npcpy/work/plan.py +0 -0
  55. {npcpy-1.2.23 → npcpy-1.2.25}/npcpy/work/trigger.py +0 -0
  56. {npcpy-1.2.23 → npcpy-1.2.25}/npcpy.egg-info/SOURCES.txt +0 -0
  57. {npcpy-1.2.23 → npcpy-1.2.25}/npcpy.egg-info/dependency_links.txt +0 -0
  58. {npcpy-1.2.23 → npcpy-1.2.25}/npcpy.egg-info/requires.txt +0 -0
  59. {npcpy-1.2.23 → npcpy-1.2.25}/npcpy.egg-info/top_level.txt +0 -0
  60. {npcpy-1.2.23 → npcpy-1.2.25}/setup.cfg +0 -0
  61. {npcpy-1.2.23 → npcpy-1.2.25}/tests/test_audio.py +0 -0
  62. {npcpy-1.2.23 → npcpy-1.2.25}/tests/test_command_history.py +0 -0
  63. {npcpy-1.2.23 → npcpy-1.2.25}/tests/test_image.py +0 -0
  64. {npcpy-1.2.23 → npcpy-1.2.25}/tests/test_llm_funcs.py +0 -0
  65. {npcpy-1.2.23 → npcpy-1.2.25}/tests/test_load.py +0 -0
  66. {npcpy-1.2.23 → npcpy-1.2.25}/tests/test_npc_compiler.py +0 -0
  67. {npcpy-1.2.23 → npcpy-1.2.25}/tests/test_npcsql.py +0 -0
  68. {npcpy-1.2.23 → npcpy-1.2.25}/tests/test_response.py +0 -0
  69. {npcpy-1.2.23 → npcpy-1.2.25}/tests/test_serve.py +0 -0
  70. {npcpy-1.2.23 → npcpy-1.2.25}/tests/test_text.py +0 -0
  71. {npcpy-1.2.23 → npcpy-1.2.25}/tests/test_tools.py +0 -0
  72. {npcpy-1.2.23 → npcpy-1.2.25}/tests/test_web.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: npcpy
3
- Version: 1.2.23
3
+ Version: 1.2.25
4
4
  Summary: npcpy is the premier open-source library for integrating LLMs and Agents into python systems.
5
5
  Home-page: https://github.com/NPC-Worldwide/npcpy
6
6
  Author: Christopher Agostino
@@ -563,12 +563,10 @@ def get_npc_action_space(npc=None, team=None):
563
563
  }
564
564
 
565
565
  return actions
566
-
567
-
568
566
  def extract_jinx_inputs(args: List[str], jinx: Jinx) -> Dict[str, Any]:
569
567
  inputs = {}
570
568
 
571
-
569
+ # Create flag mapping for inputs
572
570
  flag_mapping = {}
573
571
  for input_ in jinx.inputs:
574
572
  if isinstance(input_, str):
@@ -579,46 +577,63 @@ def extract_jinx_inputs(args: List[str], jinx: Jinx) -> Dict[str, Any]:
579
577
  flag_mapping[f"-{key[0]}"] = key
580
578
  flag_mapping[f"--{key}"] = key
581
579
 
582
-
580
+ # Parse key=value pairs first
583
581
  used_args = set()
584
582
  for i, arg in enumerate(args):
585
- if arg in flag_mapping:
583
+ if '=' in arg and not arg.startswith('-'):
584
+ key, value = arg.split('=', 1)
585
+ key = key.strip().strip("'\"")
586
+ value = value.strip().strip("'\"")
587
+ inputs[key] = value
588
+ used_args.add(i)
589
+
590
+ # Parse flags
591
+ for i, arg in enumerate(args):
592
+ if i in used_args:
593
+ continue
586
594
 
587
- if i + 1 < len(args):
595
+ if arg in flag_mapping:
596
+ if i + 1 < len(args) and not args[i + 1].startswith('-'):
588
597
  input_name = flag_mapping[arg]
589
598
  inputs[input_name] = args[i + 1]
590
599
  used_args.add(i)
591
600
  used_args.add(i + 1)
592
601
  else:
593
- print(f"Warning: {arg} flag is missing a value.")
602
+ # Boolean flag
603
+ input_name = flag_mapping[arg]
604
+ inputs[input_name] = True
605
+ used_args.add(i)
594
606
 
595
-
607
+ # Handle remaining positional arguments
596
608
  unused_args = [arg for i, arg in enumerate(args) if i not in used_args]
597
- if unused_args and jinx.inputs:
598
- first_input = jinx.inputs[0]
599
- if isinstance(first_input, str):
600
- inputs[first_input] = " ".join(unused_args)
601
- elif isinstance(first_input, dict):
602
- key = list(first_input.keys())[0]
603
- inputs[key] = " ".join(unused_args)
604
-
605
609
 
610
+ # Map positional args to jinx inputs in order
611
+ jinx_input_names = []
612
+ for input_ in jinx.inputs:
613
+ if isinstance(input_, str):
614
+ jinx_input_names.append(input_)
615
+ elif isinstance(input_, dict):
616
+ jinx_input_names.append(list(input_.keys())[0])
617
+
618
+ for i, arg in enumerate(unused_args):
619
+ if i < len(jinx_input_names):
620
+ input_name = jinx_input_names[i]
621
+ if input_name not in inputs: # Don't overwrite existing values
622
+ inputs[input_name] = arg
623
+
624
+ # Set default values for missing inputs
606
625
  for input_ in jinx.inputs:
607
626
  if isinstance(input_, str):
608
627
  if input_ not in inputs:
609
- if any(args):
610
- raise ValueError(f"Missing required input: {input_}")
611
- else:
612
- inputs[input_] = None
628
+ raise ValueError(f"Missing required input: {input_}")
613
629
  elif isinstance(input_, dict):
614
630
  key = list(input_.keys())[0]
631
+ default_value = input_[key]
615
632
  if key not in inputs:
616
- inputs[key] = input_[key]
633
+ inputs[key] = default_value
617
634
 
618
635
  return inputs
619
636
 
620
-
621
-
622
637
  from npcpy.memory.command_history import load_kg_from_db, save_kg_to_db
623
638
  from npcpy.memory.knowledge_graph import kg_initial, kg_evolve_incremental, kg_sleep_process, kg_dream_process
624
639
  from npcpy.llm_funcs import get_llm_response, breathe
@@ -63,7 +63,7 @@ warnings.filterwarnings("ignore", module="torch.serialization")
63
63
  os.environ["PYTHONWARNINGS"] = "ignore"
64
64
  os.environ["SDL_AUDIODRIVER"] = "dummy"
65
65
 
66
- def check_internet_connection(timeout=0.5):
66
+ def check_internet_connection(timeout=5):
67
67
  """
68
68
  Checks for internet connectivity by trying to connect to a well-known host.
69
69
  """
@@ -87,16 +87,78 @@ def get_locally_available_models(project_directory, airplane_mode=False):
87
87
  key, value = line.split("=", 1)
88
88
  env_vars[key.strip()] = value.strip().strip("\"'")
89
89
 
90
-
91
90
  internet_available = check_internet_connection()
92
91
  if not internet_available:
93
- logging.info("No internet connection detected. External API calls will be skipped (effective airplane_mode).")
94
-
92
+ logging.info(
93
+ "No internet connection detected. "
94
+ "External API calls will be skipped."
95
+ )
95
96
  airplane_mode = True
96
97
  else:
97
- logging.info("Internet connection detected. Proceeding based on 'airplane_mode' parameter.")
98
+ logging.info(
99
+ "Internet connection detected. "
100
+ "Proceeding based on 'airplane_mode' parameter."
101
+ )
102
+
103
+ custom_providers = load_custom_providers()
104
+
105
+ for provider_name, config in custom_providers.items():
106
+ api_key_var = config.get('api_key_var')
107
+ if not api_key_var:
108
+ api_key_var = f"{provider_name.upper()}_API_KEY"
109
+
110
+ if api_key_var in env_vars or os.environ.get(api_key_var):
111
+ try:
112
+ import requests
113
+
114
+ def fetch_custom_models():
115
+ base_url = config.get('base_url', '')
116
+ headers = config.get('headers', {})
117
+
118
+ api_key = env_vars.get(api_key_var) or \
119
+ os.environ.get(api_key_var)
120
+ if api_key:
121
+ headers['Authorization'] = f'Bearer {api_key}'
122
+
123
+ models_endpoint = f"{base_url.rstrip('/')}/models"
124
+ response = requests.get(
125
+ models_endpoint,
126
+ headers=headers,
127
+ timeout=3.5
128
+ )
129
+
130
+ if response.status_code == 200:
131
+ data = response.json()
132
+
133
+ if isinstance(data, dict) and 'data' in data:
134
+ return [
135
+ m['id'] for m in data['data']
136
+ if 'id' in m
137
+ ]
138
+ elif isinstance(data, list):
139
+ return [
140
+ m['id'] for m in data
141
+ if isinstance(m, dict) and 'id' in m
142
+ ]
143
+ return []
144
+
145
+ models = fetch_custom_models()
146
+ for model in models:
147
+ available_models[model] = provider_name
148
+
149
+ logging.info(
150
+ f"Loaded {len(models)} models "
151
+ f"from custom provider '{provider_name}'"
152
+ )
153
+
154
+ except Exception as e:
155
+ logging.warning(
156
+ f"Failed to load models from "
157
+ f"custom provider '{provider_name}': {e}"
158
+ )
98
159
 
99
160
 
161
+ airplane_mode = False
100
162
  if not airplane_mode:
101
163
  timeout_seconds = 3.5
102
164
 
@@ -802,50 +864,116 @@ def load_env_from_execution_dir() -> None:
802
864
 
803
865
 
804
866
 
867
+
805
868
  def lookup_provider(model: str) -> str:
806
869
  """
807
- Function Description:
808
- This function determines the provider based on the model name.
870
+ Determine the provider based on the model name.
871
+ Checks custom providers first, then falls back to known providers.
872
+
809
873
  Args:
810
- model (str): The model name.
811
- Keyword Args:
812
- None
874
+ model: The model name
875
+
813
876
  Returns:
814
- str: The provider based on the model name.
877
+ The provider name or None if not found
815
878
  """
879
+ custom_providers = load_custom_providers()
880
+
881
+ for provider_name, config in custom_providers.items():
882
+ if model.startswith(f"{provider_name}-"):
883
+ return provider_name
884
+
885
+ try:
886
+ import requests
887
+ api_key_var = config.get('api_key_var') or \
888
+ f"{provider_name.upper()}_API_KEY"
889
+ api_key = os.environ.get(api_key_var)
890
+
891
+ if api_key:
892
+ base_url = config.get('base_url', '')
893
+ headers = config.get('headers', {})
894
+ headers['Authorization'] = f'Bearer {api_key}'
895
+
896
+ models_endpoint = f"{base_url.rstrip('/')}/models"
897
+ response = requests.get(
898
+ models_endpoint,
899
+ headers=headers,
900
+ timeout=1.0
901
+ )
902
+
903
+ if response.status_code == 200:
904
+ data = response.json()
905
+ models = []
906
+
907
+ if isinstance(data, dict) and 'data' in data:
908
+ models = [m['id'] for m in data['data']]
909
+ elif isinstance(data, list):
910
+ models = [m['id'] for m in data]
911
+
912
+ if model in models:
913
+ return provider_name
914
+ except:
915
+ pass
916
+
816
917
  if model == "deepseek-chat" or model == "deepseek-reasoner":
817
918
  return "deepseek"
919
+
818
920
  ollama_prefixes = [
819
- "llama",
820
- "deepseek",
821
- "qwen",
822
- "llava",
823
- "phi",
824
- "mistral",
825
- "mixtral",
826
- "dolphin",
827
- "codellama",
828
- "gemma",
829
- ]
921
+ "llama", "deepseek", "qwen", "llava",
922
+ "phi", "mistral", "mixtral", "dolphin",
923
+ "codellama", "gemma",]
830
924
  if any(model.startswith(prefix) for prefix in ollama_prefixes):
831
925
  return "ollama"
832
926
 
833
-
834
927
  openai_prefixes = ["gpt-", "dall-e-", "whisper-", "o1"]
835
928
  if any(model.startswith(prefix) for prefix in openai_prefixes):
836
929
  return "openai"
837
930
 
838
-
839
931
  if model.startswith("claude"):
840
932
  return "anthropic"
841
933
  if model.startswith("gemini"):
842
934
  return "gemini"
843
935
  if "diffusion" in model:
844
936
  return "diffusers"
937
+
845
938
  return None
939
+
940
+
941
+ def load_custom_providers():
942
+ """
943
+ Load custom provider configurations from .npcshrc
944
+
945
+ Returns:
946
+ dict: Custom provider configurations keyed by provider name
947
+ """
948
+ custom_providers = {}
949
+ npcshrc_path = os.path.expanduser("~/.npcshrc")
950
+
951
+ if os.path.exists(npcshrc_path):
952
+ with open(npcshrc_path, "r") as f:
953
+ for line in f:
954
+ line = line.split("#")[0].strip()
955
+ if "CUSTOM_PROVIDER_" in line and "=" in line:
956
+ key, value = line.split("=", 1)
957
+ key = key.strip().replace("export ", "")
958
+ value = value.strip().strip("\"'")
959
+
960
+ try:
961
+ config = json.loads(value)
962
+ provider_name = key.replace(
963
+ "CUSTOM_PROVIDER_", ""
964
+ ).lower()
965
+ custom_providers[provider_name] = config
966
+ except json.JSONDecodeError as e:
967
+ logging.warning(
968
+ f"Failed to parse custom provider {key}: {e}"
969
+ )
970
+ continue
971
+
972
+ return custom_providers
846
973
  load_env_from_execution_dir()
847
974
  deepseek_api_key = os.getenv("DEEPSEEK_API_KEY", None)
848
975
  gemini_api_key = os.getenv("GEMINI_API_KEY", None)
849
976
 
850
977
  anthropic_api_key = os.getenv("ANTHROPIC_API_KEY", None)
851
978
  openai_api_key = os.getenv("OPENAI_API_KEY", None)
979
+
@@ -504,7 +504,233 @@ def get_global_settings():
504
504
  return jsonify({"error": str(e)}), 500
505
505
 
506
506
 
507
+ @app.route("/api/jinxs/available", methods=["GET"])
508
+ def get_available_jinxs():
509
+ """
510
+ Get all available jinxs for a given NPC and/or team.
511
+ Returns a list of jinx names that can be executed.
512
+ """
513
+ try:
514
+ current_path = request.args.get('currentPath')
515
+ npc_name = request.args.get('npc')
516
+
517
+ jinx_names = set() # Use set to avoid duplicates
518
+
519
+ # Get team jinxs from project directory
520
+ if current_path:
521
+ team_jinxs_dir = os.path.join(current_path, 'npc_team', 'jinxs')
522
+ if os.path.exists(team_jinxs_dir):
523
+ for file in os.listdir(team_jinxs_dir):
524
+ if file.endswith('.jinx'):
525
+ jinx_names.add(file[:-5]) # Remove .jinx extension
526
+
527
+ # Get global jinxs
528
+ global_jinxs_dir = os.path.expanduser('~/.npcsh/npc_team/jinxs')
529
+ if os.path.exists(global_jinxs_dir):
530
+ for file in os.listdir(global_jinxs_dir):
531
+ if file.endswith('.jinx'):
532
+ jinx_names.add(file[:-5])
533
+
534
+ # Get NPC-specific jinxs if NPC is specified
535
+ if npc_name:
536
+ # Try to load the NPC and get its jinxs
537
+ db_conn = get_db_connection()
538
+ npc_object = load_npc_by_name_and_source(npc_name, 'project', db_conn, current_path)
539
+ if not npc_object:
540
+ npc_object = load_npc_by_name_and_source(npc_name, 'global', db_conn)
541
+
542
+ if npc_object and hasattr(npc_object, 'jinxs_dict') and npc_object.jinxs_dict:
543
+ jinx_names.update(npc_object.jinxs_dict.keys())
544
+
545
+ return jsonify({
546
+ 'jinxs': sorted(list(jinx_names)),
547
+ 'error': None
548
+ })
549
+
550
+ except Exception as e:
551
+ print(f"Error getting available jinxs: {str(e)}")
552
+ traceback.print_exc()
553
+ return jsonify({'jinxs': [], 'error': str(e)}), 500
554
+
507
555
 
556
+ @app.route("/api/jinx/execute", methods=["POST"])
557
+ def execute_jinx():
558
+ """
559
+ Execute a specific jinx with provided arguments.
560
+ Streams the output back to the client.
561
+ """
562
+ data = request.json
563
+
564
+ stream_id = data.get("streamId")
565
+ if not stream_id:
566
+ stream_id = str(uuid.uuid4())
567
+
568
+ with cancellation_lock:
569
+ cancellation_flags[stream_id] = False
570
+
571
+ jinx_name = data.get("jinxName")
572
+ jinx_args = data.get("jinxArgs", [])
573
+ conversation_id = data.get("conversationId")
574
+ model = data.get("model")
575
+ provider = data.get("provider")
576
+ npc_name = data.get("npc")
577
+ npc_source = data.get("npcSource", "global")
578
+ current_path = data.get("currentPath")
579
+
580
+ if not jinx_name:
581
+ return jsonify({"error": "jinxName is required"}), 400
582
+
583
+ # Load project environment if applicable
584
+ if current_path:
585
+ load_project_env(current_path)
586
+
587
+ # Load the NPC
588
+ npc_object = None
589
+ if npc_name:
590
+ db_conn = get_db_connection()
591
+ npc_object = load_npc_by_name_and_source(npc_name, npc_source, db_conn, current_path)
592
+ if not npc_object and npc_source == 'project':
593
+ npc_object = load_npc_by_name_and_source(npc_name, 'global', db_conn)
594
+
595
+ # Try to find the jinx
596
+ jinx = None
597
+
598
+ # Check NPC's jinxs
599
+ if npc_object and hasattr(npc_object, 'jinxs_dict') and jinx_name in npc_object.jinxs_dict:
600
+ jinx = npc_object.jinxs_dict[jinx_name]
601
+
602
+ # Check team jinxs
603
+ if not jinx and current_path:
604
+ team_jinx_path = os.path.join(current_path, 'npc_team', 'jinxs', f'{jinx_name}.jinx')
605
+ if os.path.exists(team_jinx_path):
606
+ jinx = Jinx(jinx_path=team_jinx_path)
607
+
608
+ # Check global jinxs
609
+ if not jinx:
610
+ global_jinx_path = os.path.expanduser(f'~/.npcsh/npc_team/jinxs/{jinx_name}.jinx')
611
+ if os.path.exists(global_jinx_path):
612
+ jinx = Jinx(jinx_path=global_jinx_path)
613
+
614
+ if not jinx:
615
+ return jsonify({"error": f"Jinx '{jinx_name}' not found"}), 404
616
+
617
+ # Extract inputs from args
618
+ from npcpy.npc_compiler import extract_jinx_inputs
619
+ input_values = extract_jinx_inputs(jinx_args, jinx)
620
+
621
+ # Get conversation history
622
+ command_history = CommandHistory(app.config.get('DB_PATH'))
623
+ messages = fetch_messages_for_conversation(conversation_id)
624
+
625
+ # Prepare jinxs_dict for execution
626
+ all_jinxs = {}
627
+ if npc_object and hasattr(npc_object, 'jinxs_dict'):
628
+ all_jinxs.update(npc_object.jinxs_dict)
629
+
630
+ def event_stream(current_stream_id):
631
+ try:
632
+ # Execute the jinx
633
+ result = jinx.execute(
634
+ input_values=input_values,
635
+ jinxs_dict=all_jinxs,
636
+ jinja_env=npc_object.jinja_env if npc_object else None,
637
+ npc=npc_object,
638
+ messages=messages
639
+ )
640
+
641
+ # Get output
642
+ output = result.get('output', str(result))
643
+ messages_updated = result.get('messages', messages)
644
+
645
+ # Check for interruption
646
+ with cancellation_lock:
647
+ if cancellation_flags.get(current_stream_id, False):
648
+ yield f"data: {json.dumps({'type': 'interrupted'})}\n\n"
649
+ return
650
+
651
+ # Stream the output in chunks for consistent UI experience
652
+ if isinstance(output, str):
653
+ chunk_size = 50 # Characters per chunk
654
+ for i in range(0, len(output), chunk_size):
655
+ chunk = output[i:i + chunk_size]
656
+ chunk_data = {
657
+ "id": None,
658
+ "object": None,
659
+ "created": datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
660
+ "model": model,
661
+ "choices": [{
662
+ "index": 0,
663
+ "delta": {
664
+ "content": chunk,
665
+ "role": "assistant"
666
+ },
667
+ "finish_reason": None
668
+ }]
669
+ }
670
+ yield f"data: {json.dumps(chunk_data)}\n\n"
671
+ else:
672
+ # Non-string output, send as single chunk
673
+ chunk_data = {
674
+ "id": None,
675
+ "object": None,
676
+ "created": datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
677
+ "model": model,
678
+ "choices": [{
679
+ "index": 0,
680
+ "delta": {
681
+ "content": str(output),
682
+ "role": "assistant"
683
+ },
684
+ "finish_reason": None
685
+ }]
686
+ }
687
+ yield f"data: {json.dumps(chunk_data)}\n\n"
688
+
689
+ # Send completion message
690
+ yield f"data: {json.dumps({'type': 'message_stop'})}\n\n"
691
+
692
+ # Save to conversation history
693
+ message_id = generate_message_id()
694
+ save_conversation_message(
695
+ command_history,
696
+ conversation_id,
697
+ "user",
698
+ f"/{jinx_name} {' '.join(jinx_args)}",
699
+ wd=current_path,
700
+ model=model,
701
+ provider=provider,
702
+ npc=npc_name,
703
+ message_id=message_id
704
+ )
705
+
706
+ message_id = generate_message_id()
707
+ save_conversation_message(
708
+ command_history,
709
+ conversation_id,
710
+ "assistant",
711
+ str(output),
712
+ wd=current_path,
713
+ model=model,
714
+ provider=provider,
715
+ npc=npc_name,
716
+ message_id=message_id
717
+ )
718
+
719
+ except Exception as e:
720
+ print(f"Error executing jinx {jinx_name}: {str(e)}")
721
+ traceback.print_exc()
722
+ error_data = {
723
+ "type": "error",
724
+ "error": str(e)
725
+ }
726
+ yield f"data: {json.dumps(error_data)}\n\n"
727
+
728
+ finally:
729
+ with cancellation_lock:
730
+ if current_stream_id in cancellation_flags:
731
+ del cancellation_flags[current_stream_id]
732
+
733
+ return Response(event_stream(stream_id), mimetype="text/event-stream")
508
734
 
509
735
  @app.route("/api/settings/global", methods=["POST", "OPTIONS"])
510
736
  def save_global_settings():
@@ -1288,6 +1514,187 @@ def get_available_image_models(current_path=None):
1288
1514
 
1289
1515
  return unique_models
1290
1516
 
1517
+ @app.route('/api/generative_fill', methods=['POST'])
1518
+ def generative_fill():
1519
+ data = request.get_json()
1520
+ image_path = data.get('imagePath')
1521
+ mask_data = data.get('mask')
1522
+ prompt = data.get('prompt')
1523
+ model = data.get('model')
1524
+ provider = data.get('provider')
1525
+
1526
+ if not all([image_path, mask_data, prompt, model, provider]):
1527
+ return jsonify({"error": "Missing required fields"}), 400
1528
+
1529
+ try:
1530
+ image_path = os.path.expanduser(image_path)
1531
+
1532
+ mask_b64 = mask_data.split(',')[1] if ',' in mask_data else mask_data
1533
+ mask_bytes = base64.b64decode(mask_b64)
1534
+ mask_image = Image.open(BytesIO(mask_bytes))
1535
+
1536
+ original_image = Image.open(image_path)
1537
+
1538
+ if provider == 'openai':
1539
+ result = inpaint_openai(original_image, mask_image, prompt, model)
1540
+ elif provider == 'gemini':
1541
+ result = inpaint_gemini(original_image, mask_image, prompt, model)
1542
+ elif provider == 'diffusers':
1543
+ result = inpaint_diffusers(original_image, mask_image, prompt, model)
1544
+ else:
1545
+ return jsonify({"error": f"Provider {provider} not supported"}), 400
1546
+
1547
+ timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
1548
+ filename = f"inpaint_{timestamp}.png"
1549
+ save_dir = os.path.dirname(image_path)
1550
+ result_path = os.path.join(save_dir, filename)
1551
+
1552
+ result.save(result_path)
1553
+
1554
+ return jsonify({"resultPath": result_path, "error": None})
1555
+
1556
+ except Exception as e:
1557
+ traceback.print_exc()
1558
+ return jsonify({"error": str(e)}), 500
1559
+
1560
+
1561
+ def inpaint_openai(image, mask, prompt, model):
1562
+ import io
1563
+ from openai import OpenAI
1564
+ from PIL import Image
1565
+ import base64
1566
+
1567
+ client = OpenAI()
1568
+
1569
+ original_size = image.size
1570
+
1571
+ if model == 'dall-e-2':
1572
+ valid_sizes = ['256x256', '512x512', '1024x1024']
1573
+ max_dim = max(image.width, image.height)
1574
+
1575
+ if max_dim <= 256:
1576
+ target_size = (256, 256)
1577
+ size_str = '256x256'
1578
+ elif max_dim <= 512:
1579
+ target_size = (512, 512)
1580
+ size_str = '512x512'
1581
+ else:
1582
+ target_size = (1024, 1024)
1583
+ size_str = '1024x1024'
1584
+ else:
1585
+ valid_sizes = {
1586
+ (1024, 1024): "1024x1024",
1587
+ (1024, 1536): "1024x1536",
1588
+ (1536, 1024): "1536x1024"
1589
+ }
1590
+
1591
+ target_size = (1024, 1024)
1592
+ for size in valid_sizes.keys():
1593
+ if image.width > image.height and size == (1536, 1024):
1594
+ target_size = size
1595
+ break
1596
+ elif image.height > image.width and size == (1024, 1536):
1597
+ target_size = size
1598
+ break
1599
+
1600
+ size_str = valid_sizes[target_size]
1601
+
1602
+ resized_image = image.resize(target_size, Image.Resampling.LANCZOS)
1603
+ resized_mask = mask.resize(target_size, Image.Resampling.LANCZOS)
1604
+
1605
+ img_bytes = io.BytesIO()
1606
+ resized_image.save(img_bytes, format='PNG')
1607
+ img_bytes.seek(0)
1608
+ img_bytes.name = 'image.png'
1609
+
1610
+ mask_bytes = io.BytesIO()
1611
+ resized_mask.save(mask_bytes, format='PNG')
1612
+ mask_bytes.seek(0)
1613
+ mask_bytes.name = 'mask.png'
1614
+
1615
+ response = client.images.edit(
1616
+ model=model,
1617
+ image=img_bytes,
1618
+ mask=mask_bytes,
1619
+ prompt=prompt,
1620
+ n=1,
1621
+ size=size_str
1622
+ )
1623
+
1624
+ if response.data[0].url:
1625
+ import requests
1626
+ img_data = requests.get(response.data[0].url).content
1627
+ elif hasattr(response.data[0], 'b64_json'):
1628
+ img_data = base64.b64decode(response.data[0].b64_json)
1629
+ else:
1630
+ raise Exception("No image data in response")
1631
+
1632
+ result_image = Image.open(io.BytesIO(img_data))
1633
+ return result_image.resize(original_size, Image.Resampling.LANCZOS)
1634
+
1635
+ def inpaint_diffusers(image, mask, prompt, model):
1636
+ from diffusers import StableDiffusionInpaintPipeline
1637
+ import torch
1638
+
1639
+ pipe = StableDiffusionInpaintPipeline.from_pretrained(
1640
+ model,
1641
+ torch_dtype=torch.float16
1642
+ )
1643
+ pipe = pipe.to("cuda" if torch.cuda.is_available() else "cpu")
1644
+
1645
+ result = pipe(
1646
+ prompt=prompt,
1647
+ image=image,
1648
+ mask_image=mask
1649
+ ).images[0]
1650
+
1651
+ return result
1652
+ def inpaint_gemini(image, mask, prompt, model):
1653
+ from npcpy.gen.image_gen import generate_image
1654
+ import io
1655
+ import numpy as np
1656
+
1657
+ mask_np = np.array(mask.convert('L'))
1658
+ ys, xs = np.where(mask_np > 128)
1659
+
1660
+ if len(xs) == 0:
1661
+ return image
1662
+
1663
+ x_center = int(np.mean(xs))
1664
+ y_center = int(np.mean(ys))
1665
+ width_pct = (xs.max() - xs.min()) / image.width * 100
1666
+ height_pct = (ys.max() - ys.min()) / image.height * 100
1667
+
1668
+ position = "center"
1669
+ if y_center < image.height / 3:
1670
+ position = "top"
1671
+ elif y_center > 2 * image.height / 3:
1672
+ position = "bottom"
1673
+
1674
+ if x_center < image.width / 3:
1675
+ position += " left"
1676
+ elif x_center > 2 * image.width / 3:
1677
+ position += " right"
1678
+
1679
+ img_bytes = io.BytesIO()
1680
+ image.save(img_bytes, format='PNG')
1681
+ img_bytes.seek(0)
1682
+
1683
+ full_prompt = f"""Using the provided image, change only the region in the {position}
1684
+ approximately {int(width_pct)}% wide by {int(height_pct)}% tall) to: {prompt}.
1685
+
1686
+ Keep everything else exactly the same, matching the original lighting and style.
1687
+ You are in-painting the image. You should not be changing anything other than what was requested in prompt: {prompt}
1688
+ """
1689
+ results = generate_image(
1690
+ prompt=full_prompt,
1691
+ model=model,
1692
+ provider='gemini',
1693
+ attachments=[img_bytes],
1694
+ n_images=1
1695
+ )
1696
+
1697
+ return results[0] if results else None
1291
1698
 
1292
1699
  @app.route('/api/generate_images', methods=['POST'])
1293
1700
  def generate_images():
@@ -1942,7 +2349,7 @@ def stream():
1942
2349
 
1943
2350
  print('.', end="", flush=True)
1944
2351
  dot_count += 1
1945
- if "hf.co" in model or provider == 'ollama':
2352
+ if "hf.co" in model or provider == 'ollama' and 'gpt-oss' not in model:
1946
2353
  chunk_content = response_chunk["message"]["content"] if "message" in response_chunk and "content" in response_chunk["message"] else ""
1947
2354
  if "message" in response_chunk and "tool_calls" in response_chunk["message"]:
1948
2355
  for tool_call in response_chunk["message"]["tool_calls"]:
@@ -1959,7 +2366,9 @@ def stream():
1959
2366
  if chunk_content:
1960
2367
  complete_response.append(chunk_content)
1961
2368
  chunk_data = {
1962
- "id": None, "object": None, "created": response_chunk["created_at"], "model": response_chunk["model"],
2369
+ "id": None, "object": None,
2370
+ "created": response_chunk["created_at"] or datetime.datetime.now(),
2371
+ "model": response_chunk["model"],
1963
2372
  "choices": [{"index": 0, "delta": {"content": chunk_content, "role": response_chunk["message"]["role"]}, "finish_reason": response_chunk.get("done_reason")}]
1964
2373
  }
1965
2374
  yield f"data: {json.dumps(chunk_data)}\n\n"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: npcpy
3
- Version: 1.2.23
3
+ Version: 1.2.25
4
4
  Summary: npcpy is the premier open-source library for integrating LLMs and Agents into python systems.
5
5
  Home-page: https://github.com/NPC-Worldwide/npcpy
6
6
  Author: Christopher Agostino
@@ -83,7 +83,7 @@ extra_files = package_files("npcpy/npc_team/")
83
83
 
84
84
  setup(
85
85
  name="npcpy",
86
- version="1.2.23",
86
+ version="1.2.25",
87
87
  packages=find_packages(exclude=["tests*"]),
88
88
  install_requires=base_requirements,
89
89
  extras_require={
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes