npcpy 1.2.12__tar.gz → 1.2.14__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {npcpy-1.2.12/npcpy.egg-info → npcpy-1.2.14}/PKG-INFO +1 -1
- {npcpy-1.2.12 → npcpy-1.2.14}/npcpy/data/audio.py +1 -23
- {npcpy-1.2.12 → npcpy-1.2.14}/npcpy/gen/response.py +4 -9
- {npcpy-1.2.12 → npcpy-1.2.14}/npcpy/npc_compiler.py +32 -3
- {npcpy-1.2.12 → npcpy-1.2.14}/npcpy/serve.py +1 -0
- {npcpy-1.2.12 → npcpy-1.2.14/npcpy.egg-info}/PKG-INFO +1 -1
- {npcpy-1.2.12 → npcpy-1.2.14}/setup.py +1 -1
- {npcpy-1.2.12 → npcpy-1.2.14}/LICENSE +0 -0
- {npcpy-1.2.12 → npcpy-1.2.14}/MANIFEST.in +0 -0
- {npcpy-1.2.12 → npcpy-1.2.14}/README.md +0 -0
- {npcpy-1.2.12 → npcpy-1.2.14}/npcpy/__init__.py +0 -0
- {npcpy-1.2.12 → npcpy-1.2.14}/npcpy/data/__init__.py +0 -0
- {npcpy-1.2.12 → npcpy-1.2.14}/npcpy/data/data_models.py +0 -0
- {npcpy-1.2.12 → npcpy-1.2.14}/npcpy/data/image.py +0 -0
- {npcpy-1.2.12 → npcpy-1.2.14}/npcpy/data/load.py +0 -0
- {npcpy-1.2.12 → npcpy-1.2.14}/npcpy/data/text.py +0 -0
- {npcpy-1.2.12 → npcpy-1.2.14}/npcpy/data/video.py +0 -0
- {npcpy-1.2.12 → npcpy-1.2.14}/npcpy/data/web.py +0 -0
- {npcpy-1.2.12 → npcpy-1.2.14}/npcpy/gen/__init__.py +0 -0
- {npcpy-1.2.12 → npcpy-1.2.14}/npcpy/gen/audio_gen.py +0 -0
- {npcpy-1.2.12 → npcpy-1.2.14}/npcpy/gen/embeddings.py +0 -0
- {npcpy-1.2.12 → npcpy-1.2.14}/npcpy/gen/image_gen.py +0 -0
- {npcpy-1.2.12 → npcpy-1.2.14}/npcpy/gen/video_gen.py +0 -0
- {npcpy-1.2.12 → npcpy-1.2.14}/npcpy/llm_funcs.py +0 -0
- {npcpy-1.2.12 → npcpy-1.2.14}/npcpy/main.py +0 -0
- {npcpy-1.2.12 → npcpy-1.2.14}/npcpy/memory/__init__.py +0 -0
- {npcpy-1.2.12 → npcpy-1.2.14}/npcpy/memory/command_history.py +0 -0
- {npcpy-1.2.12 → npcpy-1.2.14}/npcpy/memory/kg_vis.py +0 -0
- {npcpy-1.2.12 → npcpy-1.2.14}/npcpy/memory/knowledge_graph.py +0 -0
- {npcpy-1.2.12 → npcpy-1.2.14}/npcpy/memory/search.py +0 -0
- {npcpy-1.2.12 → npcpy-1.2.14}/npcpy/mix/__init__.py +0 -0
- {npcpy-1.2.12 → npcpy-1.2.14}/npcpy/mix/debate.py +0 -0
- {npcpy-1.2.12 → npcpy-1.2.14}/npcpy/npc_sysenv.py +0 -0
- {npcpy-1.2.12 → npcpy-1.2.14}/npcpy/npcs.py +0 -0
- {npcpy-1.2.12 → npcpy-1.2.14}/npcpy/sql/__init__.py +0 -0
- {npcpy-1.2.12 → npcpy-1.2.14}/npcpy/sql/model_runner.py +0 -0
- {npcpy-1.2.12 → npcpy-1.2.14}/npcpy/sql/npcsql.py +0 -0
- {npcpy-1.2.12 → npcpy-1.2.14}/npcpy/tools.py +0 -0
- {npcpy-1.2.12 → npcpy-1.2.14}/npcpy/work/__init__.py +0 -0
- {npcpy-1.2.12 → npcpy-1.2.14}/npcpy/work/desktop.py +0 -0
- {npcpy-1.2.12 → npcpy-1.2.14}/npcpy/work/plan.py +0 -0
- {npcpy-1.2.12 → npcpy-1.2.14}/npcpy/work/trigger.py +0 -0
- {npcpy-1.2.12 → npcpy-1.2.14}/npcpy.egg-info/SOURCES.txt +0 -0
- {npcpy-1.2.12 → npcpy-1.2.14}/npcpy.egg-info/dependency_links.txt +0 -0
- {npcpy-1.2.12 → npcpy-1.2.14}/npcpy.egg-info/requires.txt +0 -0
- {npcpy-1.2.12 → npcpy-1.2.14}/npcpy.egg-info/top_level.txt +0 -0
- {npcpy-1.2.12 → npcpy-1.2.14}/setup.cfg +0 -0
- {npcpy-1.2.12 → npcpy-1.2.14}/tests/test_audio.py +0 -0
- {npcpy-1.2.12 → npcpy-1.2.14}/tests/test_command_history.py +0 -0
- {npcpy-1.2.12 → npcpy-1.2.14}/tests/test_image.py +0 -0
- {npcpy-1.2.12 → npcpy-1.2.14}/tests/test_llm_funcs.py +0 -0
- {npcpy-1.2.12 → npcpy-1.2.14}/tests/test_load.py +0 -0
- {npcpy-1.2.12 → npcpy-1.2.14}/tests/test_npc_compiler.py +0 -0
- {npcpy-1.2.12 → npcpy-1.2.14}/tests/test_npcsql.py +0 -0
- {npcpy-1.2.12 → npcpy-1.2.14}/tests/test_response.py +0 -0
- {npcpy-1.2.12 → npcpy-1.2.14}/tests/test_serve.py +0 -0
- {npcpy-1.2.12 → npcpy-1.2.14}/tests/test_text.py +0 -0
- {npcpy-1.2.12 → npcpy-1.2.14}/tests/test_tools.py +0 -0
- {npcpy-1.2.12 → npcpy-1.2.14}/tests/test_web.py +0 -0
|
@@ -425,32 +425,10 @@ def process_response_chunk(text_chunk):
|
|
|
425
425
|
|
|
426
426
|
|
|
427
427
|
def process_text_for_tts(text):
|
|
428
|
-
text = re.sub(r"[*<>{}()\[\]&%
|
|
428
|
+
text = re.sub(r"[*<>{}()\[\]&%")
|
|
429
429
|
text = text.strip()
|
|
430
430
|
text = re.sub(r"(\w)\.(\w)\.", r"\1 \2 ", text)
|
|
431
431
|
text = re.sub(r"([.!?])(\w)", r"\1 \2", text)
|
|
432
432
|
return text
|
|
433
433
|
|
|
434
434
|
|
|
435
|
-
"""
|
|
436
|
-
|
|
437
|
-
To use this code, you'll need to have the following dependencies installed:
|
|
438
|
-
|
|
439
|
-
```bash
|
|
440
|
-
pip install numpy torch torchaudio faster-whisper pygame pyaudio gtts ollama
|
|
441
|
-
```
|
|
442
|
-
|
|
443
|
-
And optionally FFmpeg for audio speed adjustment:
|
|
444
|
-
```bash
|
|
445
|
-
|
|
446
|
-
sudo apt-get install ffmpeg
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
brew install ffmpeg
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
choco install ffmpeg
|
|
453
|
-
```
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
"""
|
|
@@ -320,7 +320,7 @@ def get_ollama_response(
|
|
|
320
320
|
|
|
321
321
|
|
|
322
322
|
if message.get('tool_calls'):
|
|
323
|
-
|
|
323
|
+
|
|
324
324
|
|
|
325
325
|
result["tool_calls"] = message['tool_calls']
|
|
326
326
|
|
|
@@ -340,7 +340,7 @@ def get_ollama_response(
|
|
|
340
340
|
|
|
341
341
|
|
|
342
342
|
if stream:
|
|
343
|
-
|
|
343
|
+
|
|
344
344
|
|
|
345
345
|
|
|
346
346
|
final_messages = processed_result["messages"]
|
|
@@ -557,12 +557,10 @@ def get_litellm_response(
|
|
|
557
557
|
if api_key is None:
|
|
558
558
|
api_key = os.environ.get('NPC_STUDIO_LICENSE')
|
|
559
559
|
api_params['api_key'] = api_key
|
|
560
|
-
print(api_key)
|
|
561
560
|
if '-npc' in model:
|
|
562
561
|
model = model.split('-npc')[0]
|
|
563
562
|
provider = "openai"
|
|
564
563
|
|
|
565
|
-
print(model, provider)
|
|
566
564
|
if isinstance(format, BaseModel):
|
|
567
565
|
api_params["response_format"] = format
|
|
568
566
|
if model is None:
|
|
@@ -639,7 +637,7 @@ def get_litellm_response(
|
|
|
639
637
|
has_tool_calls = hasattr(resp.choices[0].message, 'tool_calls') and resp.choices[0].message.tool_calls
|
|
640
638
|
|
|
641
639
|
if has_tool_calls:
|
|
642
|
-
|
|
640
|
+
|
|
643
641
|
|
|
644
642
|
result["tool_calls"] = resp.choices[0].message.tool_calls
|
|
645
643
|
|
|
@@ -653,7 +651,7 @@ def get_litellm_response(
|
|
|
653
651
|
|
|
654
652
|
|
|
655
653
|
if stream:
|
|
656
|
-
|
|
654
|
+
|
|
657
655
|
|
|
658
656
|
|
|
659
657
|
clean_messages = []
|
|
@@ -756,10 +754,7 @@ def process_tool_calls(response_dict, tool_map, model, provider, messages, strea
|
|
|
756
754
|
serializable_result = None
|
|
757
755
|
|
|
758
756
|
try:
|
|
759
|
-
print(tool_map[tool_name])
|
|
760
|
-
print('Executing tool:', tool_name, 'with arguments:', arguments)
|
|
761
757
|
tool_result = tool_map[tool_name](**arguments)
|
|
762
|
-
print('Executed Tool Result:', tool_result)
|
|
763
758
|
except Exception as e:
|
|
764
759
|
tool_result = f"Error executing tool '{tool_name}': {str(e)}. Tool map is : {tool_map}"
|
|
765
760
|
|
|
@@ -1202,7 +1202,12 @@ class NPC:
|
|
|
1202
1202
|
3. Next, I need to...
|
|
1203
1203
|
4. Finally, I can conclude...
|
|
1204
1204
|
|
|
1205
|
-
Provide your step-by-step analysis
|
|
1205
|
+
Provide your step-by-step analysis.
|
|
1206
|
+
Do not under any circumstances ask for feedback from a user. These thoughts are part of an agentic tool that is letting the agent
|
|
1207
|
+
break down a problem by thinking it through. they will review the results and use them accordingly.
|
|
1208
|
+
|
|
1209
|
+
|
|
1210
|
+
"""
|
|
1206
1211
|
|
|
1207
1212
|
response = self.get_llm_response(thinking_prompt, tool_choice = False)
|
|
1208
1213
|
return response.get('response', 'Unable to process thinking request')
|
|
@@ -1437,12 +1442,36 @@ class NPC:
|
|
|
1437
1442
|
context.append(f"Recent successes: {'; '.join(successes[-3:])}")
|
|
1438
1443
|
return "\n".join(context)
|
|
1439
1444
|
|
|
1440
|
-
|
|
1445
|
+
|
|
1446
|
+
def compress_planning_state(self, messages):
|
|
1447
|
+
if isinstance(messages, list):
|
|
1448
|
+
from npcpy.llm_funcs import breathe, get_facts
|
|
1449
|
+
|
|
1450
|
+
conversation_summary = breathe(messages=messages, npc=self)
|
|
1451
|
+
summary_data = conversation_summary.get('output', '')
|
|
1452
|
+
|
|
1453
|
+
conversation_text = "\n".join([msg['content'] for msg in messages])
|
|
1454
|
+
extracted_facts = get_facts(conversation_text, model=self.model, provider=self.provider, npc=self)
|
|
1455
|
+
|
|
1456
|
+
user_inputs = [msg['content'] for msg in messages if msg.get('role') == 'user']
|
|
1457
|
+
assistant_outputs = [msg['content'] for msg in messages if msg.get('role') == 'assistant']
|
|
1458
|
+
|
|
1459
|
+
planning_state = {
|
|
1460
|
+
"goal": summary_data,
|
|
1461
|
+
"facts": [fact['statement'] if isinstance(fact, dict) else str(fact) for fact in extracted_facts[-10:]],
|
|
1462
|
+
"successes": [output[:100] for output in assistant_outputs[-5:]],
|
|
1463
|
+
"mistakes": [],
|
|
1464
|
+
"todos": user_inputs[-3:],
|
|
1465
|
+
"constraints": []
|
|
1466
|
+
}
|
|
1467
|
+
else:
|
|
1468
|
+
planning_state = messages
|
|
1469
|
+
|
|
1441
1470
|
todos = planning_state.get('todos', [])
|
|
1442
1471
|
current_index = planning_state.get('current_todo_index', 0)
|
|
1443
1472
|
|
|
1444
1473
|
if todos and current_index < len(todos):
|
|
1445
|
-
current_focus = todos[current_index].get('description',
|
|
1474
|
+
current_focus = todos[current_index].get('description', todos[current_index]) if isinstance(todos[current_index], dict) else str(todos[current_index])
|
|
1446
1475
|
else:
|
|
1447
1476
|
current_focus = 'No current task'
|
|
1448
1477
|
|
|
@@ -1778,6 +1778,7 @@ def stream():
|
|
|
1778
1778
|
|
|
1779
1779
|
try:
|
|
1780
1780
|
from npcsh.corca import execute_command_corca, create_corca_state_and_mcp_client, MCPClientNPC
|
|
1781
|
+
from npcsh._state import initial_state as state
|
|
1781
1782
|
except ImportError:
|
|
1782
1783
|
|
|
1783
1784
|
print("ERROR: npcsh.corca or MCPClientNPC not found. Corca mode is disabled.", file=sys.stderr)
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|