npcpy 1.2.24__tar.gz → 1.2.26__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {npcpy-1.2.24/npcpy.egg-info → npcpy-1.2.26}/PKG-INFO +1 -1
- {npcpy-1.2.24 → npcpy-1.2.26}/npcpy/npc_compiler.py +39 -24
- {npcpy-1.2.24 → npcpy-1.2.26}/npcpy/serve.py +407 -0
- {npcpy-1.2.24 → npcpy-1.2.26/npcpy.egg-info}/PKG-INFO +1 -1
- {npcpy-1.2.24 → npcpy-1.2.26}/setup.py +1 -1
- {npcpy-1.2.24 → npcpy-1.2.26}/LICENSE +0 -0
- {npcpy-1.2.24 → npcpy-1.2.26}/MANIFEST.in +0 -0
- {npcpy-1.2.24 → npcpy-1.2.26}/README.md +0 -0
- {npcpy-1.2.24 → npcpy-1.2.26}/npcpy/__init__.py +0 -0
- {npcpy-1.2.24 → npcpy-1.2.26}/npcpy/data/__init__.py +0 -0
- {npcpy-1.2.24 → npcpy-1.2.26}/npcpy/data/audio.py +0 -0
- {npcpy-1.2.24 → npcpy-1.2.26}/npcpy/data/data_models.py +0 -0
- {npcpy-1.2.24 → npcpy-1.2.26}/npcpy/data/image.py +0 -0
- {npcpy-1.2.24 → npcpy-1.2.26}/npcpy/data/load.py +0 -0
- {npcpy-1.2.24 → npcpy-1.2.26}/npcpy/data/text.py +0 -0
- {npcpy-1.2.24 → npcpy-1.2.26}/npcpy/data/video.py +0 -0
- {npcpy-1.2.24 → npcpy-1.2.26}/npcpy/data/web.py +0 -0
- {npcpy-1.2.24 → npcpy-1.2.26}/npcpy/ft/__init__.py +0 -0
- {npcpy-1.2.24 → npcpy-1.2.26}/npcpy/ft/diff.py +0 -0
- {npcpy-1.2.24 → npcpy-1.2.26}/npcpy/ft/ge.py +0 -0
- {npcpy-1.2.24 → npcpy-1.2.26}/npcpy/ft/memory_trainer.py +0 -0
- {npcpy-1.2.24 → npcpy-1.2.26}/npcpy/ft/model_ensembler.py +0 -0
- {npcpy-1.2.24 → npcpy-1.2.26}/npcpy/ft/rl.py +0 -0
- {npcpy-1.2.24 → npcpy-1.2.26}/npcpy/ft/sft.py +0 -0
- {npcpy-1.2.24 → npcpy-1.2.26}/npcpy/ft/usft.py +0 -0
- {npcpy-1.2.24 → npcpy-1.2.26}/npcpy/gen/__init__.py +0 -0
- {npcpy-1.2.24 → npcpy-1.2.26}/npcpy/gen/audio_gen.py +0 -0
- {npcpy-1.2.24 → npcpy-1.2.26}/npcpy/gen/embeddings.py +0 -0
- {npcpy-1.2.24 → npcpy-1.2.26}/npcpy/gen/image_gen.py +0 -0
- {npcpy-1.2.24 → npcpy-1.2.26}/npcpy/gen/response.py +0 -0
- {npcpy-1.2.24 → npcpy-1.2.26}/npcpy/gen/video_gen.py +0 -0
- {npcpy-1.2.24 → npcpy-1.2.26}/npcpy/llm_funcs.py +0 -0
- {npcpy-1.2.24 → npcpy-1.2.26}/npcpy/main.py +0 -0
- {npcpy-1.2.24 → npcpy-1.2.26}/npcpy/memory/__init__.py +0 -0
- {npcpy-1.2.24 → npcpy-1.2.26}/npcpy/memory/command_history.py +0 -0
- {npcpy-1.2.24 → npcpy-1.2.26}/npcpy/memory/kg_vis.py +0 -0
- {npcpy-1.2.24 → npcpy-1.2.26}/npcpy/memory/knowledge_graph.py +0 -0
- {npcpy-1.2.24 → npcpy-1.2.26}/npcpy/memory/memory_processor.py +0 -0
- {npcpy-1.2.24 → npcpy-1.2.26}/npcpy/memory/search.py +0 -0
- {npcpy-1.2.24 → npcpy-1.2.26}/npcpy/mix/__init__.py +0 -0
- {npcpy-1.2.24 → npcpy-1.2.26}/npcpy/mix/debate.py +0 -0
- {npcpy-1.2.24 → npcpy-1.2.26}/npcpy/npc_sysenv.py +0 -0
- {npcpy-1.2.24 → npcpy-1.2.26}/npcpy/npcs.py +0 -0
- {npcpy-1.2.24 → npcpy-1.2.26}/npcpy/sql/__init__.py +0 -0
- {npcpy-1.2.24 → npcpy-1.2.26}/npcpy/sql/ai_function_tools.py +0 -0
- {npcpy-1.2.24 → npcpy-1.2.26}/npcpy/sql/database_ai_adapters.py +0 -0
- {npcpy-1.2.24 → npcpy-1.2.26}/npcpy/sql/database_ai_functions.py +0 -0
- {npcpy-1.2.24 → npcpy-1.2.26}/npcpy/sql/model_runner.py +0 -0
- {npcpy-1.2.24 → npcpy-1.2.26}/npcpy/sql/npcsql.py +0 -0
- {npcpy-1.2.24 → npcpy-1.2.26}/npcpy/sql/sql_model_compiler.py +0 -0
- {npcpy-1.2.24 → npcpy-1.2.26}/npcpy/tools.py +0 -0
- {npcpy-1.2.24 → npcpy-1.2.26}/npcpy/work/__init__.py +0 -0
- {npcpy-1.2.24 → npcpy-1.2.26}/npcpy/work/desktop.py +0 -0
- {npcpy-1.2.24 → npcpy-1.2.26}/npcpy/work/plan.py +0 -0
- {npcpy-1.2.24 → npcpy-1.2.26}/npcpy/work/trigger.py +0 -0
- {npcpy-1.2.24 → npcpy-1.2.26}/npcpy.egg-info/SOURCES.txt +0 -0
- {npcpy-1.2.24 → npcpy-1.2.26}/npcpy.egg-info/dependency_links.txt +0 -0
- {npcpy-1.2.24 → npcpy-1.2.26}/npcpy.egg-info/requires.txt +0 -0
- {npcpy-1.2.24 → npcpy-1.2.26}/npcpy.egg-info/top_level.txt +0 -0
- {npcpy-1.2.24 → npcpy-1.2.26}/setup.cfg +0 -0
- {npcpy-1.2.24 → npcpy-1.2.26}/tests/test_audio.py +0 -0
- {npcpy-1.2.24 → npcpy-1.2.26}/tests/test_command_history.py +0 -0
- {npcpy-1.2.24 → npcpy-1.2.26}/tests/test_image.py +0 -0
- {npcpy-1.2.24 → npcpy-1.2.26}/tests/test_llm_funcs.py +0 -0
- {npcpy-1.2.24 → npcpy-1.2.26}/tests/test_load.py +0 -0
- {npcpy-1.2.24 → npcpy-1.2.26}/tests/test_npc_compiler.py +0 -0
- {npcpy-1.2.24 → npcpy-1.2.26}/tests/test_npcsql.py +0 -0
- {npcpy-1.2.24 → npcpy-1.2.26}/tests/test_response.py +0 -0
- {npcpy-1.2.24 → npcpy-1.2.26}/tests/test_serve.py +0 -0
- {npcpy-1.2.24 → npcpy-1.2.26}/tests/test_text.py +0 -0
- {npcpy-1.2.24 → npcpy-1.2.26}/tests/test_tools.py +0 -0
- {npcpy-1.2.24 → npcpy-1.2.26}/tests/test_web.py +0 -0
|
@@ -563,12 +563,9 @@ def get_npc_action_space(npc=None, team=None):
|
|
|
563
563
|
}
|
|
564
564
|
|
|
565
565
|
return actions
|
|
566
|
-
|
|
567
|
-
|
|
568
566
|
def extract_jinx_inputs(args: List[str], jinx: Jinx) -> Dict[str, Any]:
|
|
569
567
|
inputs = {}
|
|
570
568
|
|
|
571
|
-
|
|
572
569
|
flag_mapping = {}
|
|
573
570
|
for input_ in jinx.inputs:
|
|
574
571
|
if isinstance(input_, str):
|
|
@@ -579,46 +576,64 @@ def extract_jinx_inputs(args: List[str], jinx: Jinx) -> Dict[str, Any]:
|
|
|
579
576
|
flag_mapping[f"-{key[0]}"] = key
|
|
580
577
|
flag_mapping[f"--{key}"] = key
|
|
581
578
|
|
|
582
|
-
|
|
583
|
-
|
|
579
|
+
if len(jinx.inputs) > 1:
|
|
580
|
+
used_args = set()
|
|
581
|
+
for i, arg in enumerate(args):
|
|
582
|
+
if '=' in arg and arg != '=' and not arg.startswith('-'):
|
|
583
|
+
key, value = arg.split('=', 1)
|
|
584
|
+
key = key.strip().strip("'\"")
|
|
585
|
+
value = value.strip().strip("'\"")
|
|
586
|
+
inputs[key] = value
|
|
587
|
+
used_args.add(i)
|
|
588
|
+
else:
|
|
589
|
+
used_args = set()
|
|
590
|
+
|
|
591
|
+
|
|
584
592
|
for i, arg in enumerate(args):
|
|
585
|
-
if
|
|
593
|
+
if i in used_args:
|
|
594
|
+
continue
|
|
586
595
|
|
|
587
|
-
|
|
596
|
+
if arg in flag_mapping:
|
|
597
|
+
if i + 1 < len(args) and not args[i + 1].startswith('-'):
|
|
588
598
|
input_name = flag_mapping[arg]
|
|
589
599
|
inputs[input_name] = args[i + 1]
|
|
590
600
|
used_args.add(i)
|
|
591
601
|
used_args.add(i + 1)
|
|
592
602
|
else:
|
|
593
|
-
|
|
603
|
+
input_name = flag_mapping[arg]
|
|
604
|
+
inputs[input_name] = True
|
|
605
|
+
used_args.add(i)
|
|
594
606
|
|
|
595
|
-
|
|
596
607
|
unused_args = [arg for i, arg in enumerate(args) if i not in used_args]
|
|
597
|
-
if unused_args and jinx.inputs:
|
|
598
|
-
first_input = jinx.inputs[0]
|
|
599
|
-
if isinstance(first_input, str):
|
|
600
|
-
inputs[first_input] = " ".join(unused_args)
|
|
601
|
-
elif isinstance(first_input, dict):
|
|
602
|
-
key = list(first_input.keys())[0]
|
|
603
|
-
inputs[key] = " ".join(unused_args)
|
|
604
|
-
|
|
605
608
|
|
|
609
|
+
jinx_input_names = []
|
|
610
|
+
for input_ in jinx.inputs:
|
|
611
|
+
if isinstance(input_, str):
|
|
612
|
+
jinx_input_names.append(input_)
|
|
613
|
+
elif isinstance(input_, dict):
|
|
614
|
+
jinx_input_names.append(list(input_.keys())[0])
|
|
615
|
+
if len(jinx_input_names) == 1:
|
|
616
|
+
inputs[jinx_input_names[0]] = ' '.join(unused_args).strip()
|
|
617
|
+
else:
|
|
618
|
+
for i, arg in enumerate(unused_args):
|
|
619
|
+
if i < len(jinx_input_names):
|
|
620
|
+
input_name = jinx_input_names[i]
|
|
621
|
+
if input_name not in inputs:
|
|
622
|
+
inputs[input_name] = arg
|
|
623
|
+
|
|
624
|
+
|
|
606
625
|
for input_ in jinx.inputs:
|
|
607
626
|
if isinstance(input_, str):
|
|
608
627
|
if input_ not in inputs:
|
|
609
|
-
|
|
610
|
-
raise ValueError(f"Missing required input: {input_}")
|
|
611
|
-
else:
|
|
612
|
-
inputs[input_] = None
|
|
628
|
+
raise ValueError(f"Missing required input: {input_}")
|
|
613
629
|
elif isinstance(input_, dict):
|
|
614
630
|
key = list(input_.keys())[0]
|
|
631
|
+
default_value = input_[key]
|
|
615
632
|
if key not in inputs:
|
|
616
|
-
inputs[key] =
|
|
633
|
+
inputs[key] = default_value
|
|
617
634
|
|
|
618
635
|
return inputs
|
|
619
636
|
|
|
620
|
-
|
|
621
|
-
|
|
622
637
|
from npcpy.memory.command_history import load_kg_from_db, save_kg_to_db
|
|
623
638
|
from npcpy.memory.knowledge_graph import kg_initial, kg_evolve_incremental, kg_sleep_process, kg_dream_process
|
|
624
639
|
from npcpy.llm_funcs import get_llm_response, breathe
|
|
@@ -504,7 +504,233 @@ def get_global_settings():
|
|
|
504
504
|
return jsonify({"error": str(e)}), 500
|
|
505
505
|
|
|
506
506
|
|
|
507
|
+
@app.route("/api/jinxs/available", methods=["GET"])
|
|
508
|
+
def get_available_jinxs():
|
|
509
|
+
"""
|
|
510
|
+
Get all available jinxs for a given NPC and/or team.
|
|
511
|
+
Returns a list of jinx names that can be executed.
|
|
512
|
+
"""
|
|
513
|
+
try:
|
|
514
|
+
current_path = request.args.get('currentPath')
|
|
515
|
+
npc_name = request.args.get('npc')
|
|
516
|
+
|
|
517
|
+
jinx_names = set() # Use set to avoid duplicates
|
|
518
|
+
|
|
519
|
+
# Get team jinxs from project directory
|
|
520
|
+
if current_path:
|
|
521
|
+
team_jinxs_dir = os.path.join(current_path, 'npc_team', 'jinxs')
|
|
522
|
+
if os.path.exists(team_jinxs_dir):
|
|
523
|
+
for file in os.listdir(team_jinxs_dir):
|
|
524
|
+
if file.endswith('.jinx'):
|
|
525
|
+
jinx_names.add(file[:-5]) # Remove .jinx extension
|
|
526
|
+
|
|
527
|
+
# Get global jinxs
|
|
528
|
+
global_jinxs_dir = os.path.expanduser('~/.npcsh/npc_team/jinxs')
|
|
529
|
+
if os.path.exists(global_jinxs_dir):
|
|
530
|
+
for file in os.listdir(global_jinxs_dir):
|
|
531
|
+
if file.endswith('.jinx'):
|
|
532
|
+
jinx_names.add(file[:-5])
|
|
533
|
+
|
|
534
|
+
# Get NPC-specific jinxs if NPC is specified
|
|
535
|
+
if npc_name:
|
|
536
|
+
# Try to load the NPC and get its jinxs
|
|
537
|
+
db_conn = get_db_connection()
|
|
538
|
+
npc_object = load_npc_by_name_and_source(npc_name, 'project', db_conn, current_path)
|
|
539
|
+
if not npc_object:
|
|
540
|
+
npc_object = load_npc_by_name_and_source(npc_name, 'global', db_conn)
|
|
541
|
+
|
|
542
|
+
if npc_object and hasattr(npc_object, 'jinxs_dict') and npc_object.jinxs_dict:
|
|
543
|
+
jinx_names.update(npc_object.jinxs_dict.keys())
|
|
544
|
+
|
|
545
|
+
return jsonify({
|
|
546
|
+
'jinxs': sorted(list(jinx_names)),
|
|
547
|
+
'error': None
|
|
548
|
+
})
|
|
549
|
+
|
|
550
|
+
except Exception as e:
|
|
551
|
+
print(f"Error getting available jinxs: {str(e)}")
|
|
552
|
+
traceback.print_exc()
|
|
553
|
+
return jsonify({'jinxs': [], 'error': str(e)}), 500
|
|
554
|
+
|
|
507
555
|
|
|
556
|
+
@app.route("/api/jinx/execute", methods=["POST"])
|
|
557
|
+
def execute_jinx():
|
|
558
|
+
"""
|
|
559
|
+
Execute a specific jinx with provided arguments.
|
|
560
|
+
Streams the output back to the client.
|
|
561
|
+
"""
|
|
562
|
+
data = request.json
|
|
563
|
+
|
|
564
|
+
stream_id = data.get("streamId")
|
|
565
|
+
if not stream_id:
|
|
566
|
+
stream_id = str(uuid.uuid4())
|
|
567
|
+
|
|
568
|
+
with cancellation_lock:
|
|
569
|
+
cancellation_flags[stream_id] = False
|
|
570
|
+
|
|
571
|
+
jinx_name = data.get("jinxName")
|
|
572
|
+
jinx_args = data.get("jinxArgs", [])
|
|
573
|
+
conversation_id = data.get("conversationId")
|
|
574
|
+
model = data.get("model")
|
|
575
|
+
provider = data.get("provider")
|
|
576
|
+
npc_name = data.get("npc")
|
|
577
|
+
npc_source = data.get("npcSource", "global")
|
|
578
|
+
current_path = data.get("currentPath")
|
|
579
|
+
|
|
580
|
+
if not jinx_name:
|
|
581
|
+
return jsonify({"error": "jinxName is required"}), 400
|
|
582
|
+
|
|
583
|
+
# Load project environment if applicable
|
|
584
|
+
if current_path:
|
|
585
|
+
load_project_env(current_path)
|
|
586
|
+
|
|
587
|
+
# Load the NPC
|
|
588
|
+
npc_object = None
|
|
589
|
+
if npc_name:
|
|
590
|
+
db_conn = get_db_connection()
|
|
591
|
+
npc_object = load_npc_by_name_and_source(npc_name, npc_source, db_conn, current_path)
|
|
592
|
+
if not npc_object and npc_source == 'project':
|
|
593
|
+
npc_object = load_npc_by_name_and_source(npc_name, 'global', db_conn)
|
|
594
|
+
|
|
595
|
+
# Try to find the jinx
|
|
596
|
+
jinx = None
|
|
597
|
+
|
|
598
|
+
# Check NPC's jinxs
|
|
599
|
+
if npc_object and hasattr(npc_object, 'jinxs_dict') and jinx_name in npc_object.jinxs_dict:
|
|
600
|
+
jinx = npc_object.jinxs_dict[jinx_name]
|
|
601
|
+
|
|
602
|
+
# Check team jinxs
|
|
603
|
+
if not jinx and current_path:
|
|
604
|
+
team_jinx_path = os.path.join(current_path, 'npc_team', 'jinxs', f'{jinx_name}.jinx')
|
|
605
|
+
if os.path.exists(team_jinx_path):
|
|
606
|
+
jinx = Jinx(jinx_path=team_jinx_path)
|
|
607
|
+
|
|
608
|
+
# Check global jinxs
|
|
609
|
+
if not jinx:
|
|
610
|
+
global_jinx_path = os.path.expanduser(f'~/.npcsh/npc_team/jinxs/{jinx_name}.jinx')
|
|
611
|
+
if os.path.exists(global_jinx_path):
|
|
612
|
+
jinx = Jinx(jinx_path=global_jinx_path)
|
|
613
|
+
|
|
614
|
+
if not jinx:
|
|
615
|
+
return jsonify({"error": f"Jinx '{jinx_name}' not found"}), 404
|
|
616
|
+
|
|
617
|
+
# Extract inputs from args
|
|
618
|
+
from npcpy.npc_compiler import extract_jinx_inputs
|
|
619
|
+
input_values = extract_jinx_inputs(jinx_args, jinx)
|
|
620
|
+
|
|
621
|
+
# Get conversation history
|
|
622
|
+
command_history = CommandHistory(app.config.get('DB_PATH'))
|
|
623
|
+
messages = fetch_messages_for_conversation(conversation_id)
|
|
624
|
+
|
|
625
|
+
# Prepare jinxs_dict for execution
|
|
626
|
+
all_jinxs = {}
|
|
627
|
+
if npc_object and hasattr(npc_object, 'jinxs_dict'):
|
|
628
|
+
all_jinxs.update(npc_object.jinxs_dict)
|
|
629
|
+
|
|
630
|
+
def event_stream(current_stream_id):
|
|
631
|
+
try:
|
|
632
|
+
# Execute the jinx
|
|
633
|
+
result = jinx.execute(
|
|
634
|
+
input_values=input_values,
|
|
635
|
+
jinxs_dict=all_jinxs,
|
|
636
|
+
jinja_env=npc_object.jinja_env if npc_object else None,
|
|
637
|
+
npc=npc_object,
|
|
638
|
+
messages=messages
|
|
639
|
+
)
|
|
640
|
+
|
|
641
|
+
# Get output
|
|
642
|
+
output = result.get('output', str(result))
|
|
643
|
+
messages_updated = result.get('messages', messages)
|
|
644
|
+
|
|
645
|
+
# Check for interruption
|
|
646
|
+
with cancellation_lock:
|
|
647
|
+
if cancellation_flags.get(current_stream_id, False):
|
|
648
|
+
yield f"data: {json.dumps({'type': 'interrupted'})}\n\n"
|
|
649
|
+
return
|
|
650
|
+
|
|
651
|
+
# Stream the output in chunks for consistent UI experience
|
|
652
|
+
if isinstance(output, str):
|
|
653
|
+
chunk_size = 50 # Characters per chunk
|
|
654
|
+
for i in range(0, len(output), chunk_size):
|
|
655
|
+
chunk = output[i:i + chunk_size]
|
|
656
|
+
chunk_data = {
|
|
657
|
+
"id": None,
|
|
658
|
+
"object": None,
|
|
659
|
+
"created": datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
|
|
660
|
+
"model": model,
|
|
661
|
+
"choices": [{
|
|
662
|
+
"index": 0,
|
|
663
|
+
"delta": {
|
|
664
|
+
"content": chunk,
|
|
665
|
+
"role": "assistant"
|
|
666
|
+
},
|
|
667
|
+
"finish_reason": None
|
|
668
|
+
}]
|
|
669
|
+
}
|
|
670
|
+
yield f"data: {json.dumps(chunk_data)}\n\n"
|
|
671
|
+
else:
|
|
672
|
+
# Non-string output, send as single chunk
|
|
673
|
+
chunk_data = {
|
|
674
|
+
"id": None,
|
|
675
|
+
"object": None,
|
|
676
|
+
"created": datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
|
|
677
|
+
"model": model,
|
|
678
|
+
"choices": [{
|
|
679
|
+
"index": 0,
|
|
680
|
+
"delta": {
|
|
681
|
+
"content": str(output),
|
|
682
|
+
"role": "assistant"
|
|
683
|
+
},
|
|
684
|
+
"finish_reason": None
|
|
685
|
+
}]
|
|
686
|
+
}
|
|
687
|
+
yield f"data: {json.dumps(chunk_data)}\n\n"
|
|
688
|
+
|
|
689
|
+
# Send completion message
|
|
690
|
+
yield f"data: {json.dumps({'type': 'message_stop'})}\n\n"
|
|
691
|
+
|
|
692
|
+
# Save to conversation history
|
|
693
|
+
message_id = generate_message_id()
|
|
694
|
+
save_conversation_message(
|
|
695
|
+
command_history,
|
|
696
|
+
conversation_id,
|
|
697
|
+
"user",
|
|
698
|
+
f"/{jinx_name} {' '.join(jinx_args)}",
|
|
699
|
+
wd=current_path,
|
|
700
|
+
model=model,
|
|
701
|
+
provider=provider,
|
|
702
|
+
npc=npc_name,
|
|
703
|
+
message_id=message_id
|
|
704
|
+
)
|
|
705
|
+
|
|
706
|
+
message_id = generate_message_id()
|
|
707
|
+
save_conversation_message(
|
|
708
|
+
command_history,
|
|
709
|
+
conversation_id,
|
|
710
|
+
"assistant",
|
|
711
|
+
str(output),
|
|
712
|
+
wd=current_path,
|
|
713
|
+
model=model,
|
|
714
|
+
provider=provider,
|
|
715
|
+
npc=npc_name,
|
|
716
|
+
message_id=message_id
|
|
717
|
+
)
|
|
718
|
+
|
|
719
|
+
except Exception as e:
|
|
720
|
+
print(f"Error executing jinx {jinx_name}: {str(e)}")
|
|
721
|
+
traceback.print_exc()
|
|
722
|
+
error_data = {
|
|
723
|
+
"type": "error",
|
|
724
|
+
"error": str(e)
|
|
725
|
+
}
|
|
726
|
+
yield f"data: {json.dumps(error_data)}\n\n"
|
|
727
|
+
|
|
728
|
+
finally:
|
|
729
|
+
with cancellation_lock:
|
|
730
|
+
if current_stream_id in cancellation_flags:
|
|
731
|
+
del cancellation_flags[current_stream_id]
|
|
732
|
+
|
|
733
|
+
return Response(event_stream(stream_id), mimetype="text/event-stream")
|
|
508
734
|
|
|
509
735
|
@app.route("/api/settings/global", methods=["POST", "OPTIONS"])
|
|
510
736
|
def save_global_settings():
|
|
@@ -1288,6 +1514,187 @@ def get_available_image_models(current_path=None):
|
|
|
1288
1514
|
|
|
1289
1515
|
return unique_models
|
|
1290
1516
|
|
|
1517
|
+
@app.route('/api/generative_fill', methods=['POST'])
|
|
1518
|
+
def generative_fill():
|
|
1519
|
+
data = request.get_json()
|
|
1520
|
+
image_path = data.get('imagePath')
|
|
1521
|
+
mask_data = data.get('mask')
|
|
1522
|
+
prompt = data.get('prompt')
|
|
1523
|
+
model = data.get('model')
|
|
1524
|
+
provider = data.get('provider')
|
|
1525
|
+
|
|
1526
|
+
if not all([image_path, mask_data, prompt, model, provider]):
|
|
1527
|
+
return jsonify({"error": "Missing required fields"}), 400
|
|
1528
|
+
|
|
1529
|
+
try:
|
|
1530
|
+
image_path = os.path.expanduser(image_path)
|
|
1531
|
+
|
|
1532
|
+
mask_b64 = mask_data.split(',')[1] if ',' in mask_data else mask_data
|
|
1533
|
+
mask_bytes = base64.b64decode(mask_b64)
|
|
1534
|
+
mask_image = Image.open(BytesIO(mask_bytes))
|
|
1535
|
+
|
|
1536
|
+
original_image = Image.open(image_path)
|
|
1537
|
+
|
|
1538
|
+
if provider == 'openai':
|
|
1539
|
+
result = inpaint_openai(original_image, mask_image, prompt, model)
|
|
1540
|
+
elif provider == 'gemini':
|
|
1541
|
+
result = inpaint_gemini(original_image, mask_image, prompt, model)
|
|
1542
|
+
elif provider == 'diffusers':
|
|
1543
|
+
result = inpaint_diffusers(original_image, mask_image, prompt, model)
|
|
1544
|
+
else:
|
|
1545
|
+
return jsonify({"error": f"Provider {provider} not supported"}), 400
|
|
1546
|
+
|
|
1547
|
+
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
1548
|
+
filename = f"inpaint_{timestamp}.png"
|
|
1549
|
+
save_dir = os.path.dirname(image_path)
|
|
1550
|
+
result_path = os.path.join(save_dir, filename)
|
|
1551
|
+
|
|
1552
|
+
result.save(result_path)
|
|
1553
|
+
|
|
1554
|
+
return jsonify({"resultPath": result_path, "error": None})
|
|
1555
|
+
|
|
1556
|
+
except Exception as e:
|
|
1557
|
+
traceback.print_exc()
|
|
1558
|
+
return jsonify({"error": str(e)}), 500
|
|
1559
|
+
|
|
1560
|
+
|
|
1561
|
+
def inpaint_openai(image, mask, prompt, model):
|
|
1562
|
+
import io
|
|
1563
|
+
from openai import OpenAI
|
|
1564
|
+
from PIL import Image
|
|
1565
|
+
import base64
|
|
1566
|
+
|
|
1567
|
+
client = OpenAI()
|
|
1568
|
+
|
|
1569
|
+
original_size = image.size
|
|
1570
|
+
|
|
1571
|
+
if model == 'dall-e-2':
|
|
1572
|
+
valid_sizes = ['256x256', '512x512', '1024x1024']
|
|
1573
|
+
max_dim = max(image.width, image.height)
|
|
1574
|
+
|
|
1575
|
+
if max_dim <= 256:
|
|
1576
|
+
target_size = (256, 256)
|
|
1577
|
+
size_str = '256x256'
|
|
1578
|
+
elif max_dim <= 512:
|
|
1579
|
+
target_size = (512, 512)
|
|
1580
|
+
size_str = '512x512'
|
|
1581
|
+
else:
|
|
1582
|
+
target_size = (1024, 1024)
|
|
1583
|
+
size_str = '1024x1024'
|
|
1584
|
+
else:
|
|
1585
|
+
valid_sizes = {
|
|
1586
|
+
(1024, 1024): "1024x1024",
|
|
1587
|
+
(1024, 1536): "1024x1536",
|
|
1588
|
+
(1536, 1024): "1536x1024"
|
|
1589
|
+
}
|
|
1590
|
+
|
|
1591
|
+
target_size = (1024, 1024)
|
|
1592
|
+
for size in valid_sizes.keys():
|
|
1593
|
+
if image.width > image.height and size == (1536, 1024):
|
|
1594
|
+
target_size = size
|
|
1595
|
+
break
|
|
1596
|
+
elif image.height > image.width and size == (1024, 1536):
|
|
1597
|
+
target_size = size
|
|
1598
|
+
break
|
|
1599
|
+
|
|
1600
|
+
size_str = valid_sizes[target_size]
|
|
1601
|
+
|
|
1602
|
+
resized_image = image.resize(target_size, Image.Resampling.LANCZOS)
|
|
1603
|
+
resized_mask = mask.resize(target_size, Image.Resampling.LANCZOS)
|
|
1604
|
+
|
|
1605
|
+
img_bytes = io.BytesIO()
|
|
1606
|
+
resized_image.save(img_bytes, format='PNG')
|
|
1607
|
+
img_bytes.seek(0)
|
|
1608
|
+
img_bytes.name = 'image.png'
|
|
1609
|
+
|
|
1610
|
+
mask_bytes = io.BytesIO()
|
|
1611
|
+
resized_mask.save(mask_bytes, format='PNG')
|
|
1612
|
+
mask_bytes.seek(0)
|
|
1613
|
+
mask_bytes.name = 'mask.png'
|
|
1614
|
+
|
|
1615
|
+
response = client.images.edit(
|
|
1616
|
+
model=model,
|
|
1617
|
+
image=img_bytes,
|
|
1618
|
+
mask=mask_bytes,
|
|
1619
|
+
prompt=prompt,
|
|
1620
|
+
n=1,
|
|
1621
|
+
size=size_str
|
|
1622
|
+
)
|
|
1623
|
+
|
|
1624
|
+
if response.data[0].url:
|
|
1625
|
+
import requests
|
|
1626
|
+
img_data = requests.get(response.data[0].url).content
|
|
1627
|
+
elif hasattr(response.data[0], 'b64_json'):
|
|
1628
|
+
img_data = base64.b64decode(response.data[0].b64_json)
|
|
1629
|
+
else:
|
|
1630
|
+
raise Exception("No image data in response")
|
|
1631
|
+
|
|
1632
|
+
result_image = Image.open(io.BytesIO(img_data))
|
|
1633
|
+
return result_image.resize(original_size, Image.Resampling.LANCZOS)
|
|
1634
|
+
|
|
1635
|
+
def inpaint_diffusers(image, mask, prompt, model):
|
|
1636
|
+
from diffusers import StableDiffusionInpaintPipeline
|
|
1637
|
+
import torch
|
|
1638
|
+
|
|
1639
|
+
pipe = StableDiffusionInpaintPipeline.from_pretrained(
|
|
1640
|
+
model,
|
|
1641
|
+
torch_dtype=torch.float16
|
|
1642
|
+
)
|
|
1643
|
+
pipe = pipe.to("cuda" if torch.cuda.is_available() else "cpu")
|
|
1644
|
+
|
|
1645
|
+
result = pipe(
|
|
1646
|
+
prompt=prompt,
|
|
1647
|
+
image=image,
|
|
1648
|
+
mask_image=mask
|
|
1649
|
+
).images[0]
|
|
1650
|
+
|
|
1651
|
+
return result
|
|
1652
|
+
def inpaint_gemini(image, mask, prompt, model):
|
|
1653
|
+
from npcpy.gen.image_gen import generate_image
|
|
1654
|
+
import io
|
|
1655
|
+
import numpy as np
|
|
1656
|
+
|
|
1657
|
+
mask_np = np.array(mask.convert('L'))
|
|
1658
|
+
ys, xs = np.where(mask_np > 128)
|
|
1659
|
+
|
|
1660
|
+
if len(xs) == 0:
|
|
1661
|
+
return image
|
|
1662
|
+
|
|
1663
|
+
x_center = int(np.mean(xs))
|
|
1664
|
+
y_center = int(np.mean(ys))
|
|
1665
|
+
width_pct = (xs.max() - xs.min()) / image.width * 100
|
|
1666
|
+
height_pct = (ys.max() - ys.min()) / image.height * 100
|
|
1667
|
+
|
|
1668
|
+
position = "center"
|
|
1669
|
+
if y_center < image.height / 3:
|
|
1670
|
+
position = "top"
|
|
1671
|
+
elif y_center > 2 * image.height / 3:
|
|
1672
|
+
position = "bottom"
|
|
1673
|
+
|
|
1674
|
+
if x_center < image.width / 3:
|
|
1675
|
+
position += " left"
|
|
1676
|
+
elif x_center > 2 * image.width / 3:
|
|
1677
|
+
position += " right"
|
|
1678
|
+
|
|
1679
|
+
img_bytes = io.BytesIO()
|
|
1680
|
+
image.save(img_bytes, format='PNG')
|
|
1681
|
+
img_bytes.seek(0)
|
|
1682
|
+
|
|
1683
|
+
full_prompt = f"""Using the provided image, change only the region in the {position}
|
|
1684
|
+
approximately {int(width_pct)}% wide by {int(height_pct)}% tall) to: {prompt}.
|
|
1685
|
+
|
|
1686
|
+
Keep everything else exactly the same, matching the original lighting and style.
|
|
1687
|
+
You are in-painting the image. You should not be changing anything other than what was requested in prompt: {prompt}
|
|
1688
|
+
"""
|
|
1689
|
+
results = generate_image(
|
|
1690
|
+
prompt=full_prompt,
|
|
1691
|
+
model=model,
|
|
1692
|
+
provider='gemini',
|
|
1693
|
+
attachments=[img_bytes],
|
|
1694
|
+
n_images=1
|
|
1695
|
+
)
|
|
1696
|
+
|
|
1697
|
+
return results[0] if results else None
|
|
1291
1698
|
|
|
1292
1699
|
@app.route('/api/generate_images', methods=['POST'])
|
|
1293
1700
|
def generate_images():
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|