npcpy 1.2.12__py3-none-any.whl → 1.2.14__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
npcpy/data/audio.py CHANGED
@@ -425,32 +425,10 @@ def process_response_chunk(text_chunk):
425
425
 
426
426
 
427
427
  def process_text_for_tts(text):
428
- text = re.sub(r"[*<>{}()\[\]&%
428
+ text = re.sub(r"[*<>{}()\[\]&%")
429
429
  text = text.strip()
430
430
  text = re.sub(r"(\w)\.(\w)\.", r"\1 \2 ", text)
431
431
  text = re.sub(r"([.!?])(\w)", r"\1 \2", text)
432
432
  return text
433
433
 
434
434
 
435
- """
436
-
437
- To use this code, you'll need to have the following dependencies installed:
438
-
439
- ```bash
440
- pip install numpy torch torchaudio faster-whisper pygame pyaudio gtts ollama
441
- ```
442
-
443
- And optionally FFmpeg for audio speed adjustment:
444
- ```bash
445
-
446
- sudo apt-get install ffmpeg
447
-
448
-
449
- brew install ffmpeg
450
-
451
-
452
- choco install ffmpeg
453
- ```
454
-
455
-
456
- """
npcpy/gen/response.py CHANGED
@@ -320,7 +320,7 @@ def get_ollama_response(
320
320
 
321
321
 
322
322
  if message.get('tool_calls'):
323
- print("Found tool calls, processing automatically:", message['tool_calls'])
323
+
324
324
 
325
325
  result["tool_calls"] = message['tool_calls']
326
326
 
@@ -340,7 +340,7 @@ def get_ollama_response(
340
340
 
341
341
 
342
342
  if stream:
343
- print("Making final streaming call with processed tools")
343
+
344
344
 
345
345
 
346
346
  final_messages = processed_result["messages"]
@@ -557,12 +557,10 @@ def get_litellm_response(
557
557
  if api_key is None:
558
558
  api_key = os.environ.get('NPC_STUDIO_LICENSE')
559
559
  api_params['api_key'] = api_key
560
- print(api_key)
561
560
  if '-npc' in model:
562
561
  model = model.split('-npc')[0]
563
562
  provider = "openai"
564
563
 
565
- print(model, provider)
566
564
  if isinstance(format, BaseModel):
567
565
  api_params["response_format"] = format
568
566
  if model is None:
@@ -639,7 +637,7 @@ def get_litellm_response(
639
637
  has_tool_calls = hasattr(resp.choices[0].message, 'tool_calls') and resp.choices[0].message.tool_calls
640
638
 
641
639
  if has_tool_calls:
642
- print("Found tool calls in LiteLLM, processing automatically:", resp.choices[0].message.tool_calls)
640
+
643
641
 
644
642
  result["tool_calls"] = resp.choices[0].message.tool_calls
645
643
 
@@ -653,7 +651,7 @@ def get_litellm_response(
653
651
 
654
652
 
655
653
  if stream:
656
- print("Making final streaming call with processed tools")
654
+
657
655
 
658
656
 
659
657
  clean_messages = []
@@ -756,10 +754,7 @@ def process_tool_calls(response_dict, tool_map, model, provider, messages, strea
756
754
  serializable_result = None
757
755
 
758
756
  try:
759
- print(tool_map[tool_name])
760
- print('Executing tool:', tool_name, 'with arguments:', arguments)
761
757
  tool_result = tool_map[tool_name](**arguments)
762
- print('Executed Tool Result:', tool_result)
763
758
  except Exception as e:
764
759
  tool_result = f"Error executing tool '{tool_name}': {str(e)}. Tool map is : {tool_map}"
765
760
 
npcpy/npc_compiler.py CHANGED
@@ -1202,7 +1202,12 @@ class NPC:
1202
1202
  3. Next, I need to...
1203
1203
  4. Finally, I can conclude...
1204
1204
 
1205
- Provide your step-by-step analysis:"""
1205
+ Provide your step-by-step analysis.
1206
+ Do not under any circumstances ask for feedback from a user. These thoughts are part of an agentic tool that is letting the agent
1207
+ break down a problem by thinking it through. they will review the results and use them accordingly.
1208
+
1209
+
1210
+ """
1206
1211
 
1207
1212
  response = self.get_llm_response(thinking_prompt, tool_choice = False)
1208
1213
  return response.get('response', 'Unable to process thinking request')
@@ -1437,12 +1442,36 @@ class NPC:
1437
1442
  context.append(f"Recent successes: {'; '.join(successes[-3:])}")
1438
1443
  return "\n".join(context)
1439
1444
 
1440
- def compress_planning_state(self, planning_state: Dict[str, Any]) -> str:
1445
+
1446
+ def compress_planning_state(self, messages):
1447
+ if isinstance(messages, list):
1448
+ from npcpy.llm_funcs import breathe, get_facts
1449
+
1450
+ conversation_summary = breathe(messages=messages, npc=self)
1451
+ summary_data = conversation_summary.get('output', '')
1452
+
1453
+ conversation_text = "\n".join([msg['content'] for msg in messages])
1454
+ extracted_facts = get_facts(conversation_text, model=self.model, provider=self.provider, npc=self)
1455
+
1456
+ user_inputs = [msg['content'] for msg in messages if msg.get('role') == 'user']
1457
+ assistant_outputs = [msg['content'] for msg in messages if msg.get('role') == 'assistant']
1458
+
1459
+ planning_state = {
1460
+ "goal": summary_data,
1461
+ "facts": [fact['statement'] if isinstance(fact, dict) else str(fact) for fact in extracted_facts[-10:]],
1462
+ "successes": [output[:100] for output in assistant_outputs[-5:]],
1463
+ "mistakes": [],
1464
+ "todos": user_inputs[-3:],
1465
+ "constraints": []
1466
+ }
1467
+ else:
1468
+ planning_state = messages
1469
+
1441
1470
  todos = planning_state.get('todos', [])
1442
1471
  current_index = planning_state.get('current_todo_index', 0)
1443
1472
 
1444
1473
  if todos and current_index < len(todos):
1445
- current_focus = todos[current_index].get('description', 'No current task')
1474
+ current_focus = todos[current_index].get('description', todos[current_index]) if isinstance(todos[current_index], dict) else str(todos[current_index])
1446
1475
  else:
1447
1476
  current_focus = 'No current task'
1448
1477
 
npcpy/serve.py CHANGED
@@ -1778,6 +1778,7 @@ def stream():
1778
1778
 
1779
1779
  try:
1780
1780
  from npcsh.corca import execute_command_corca, create_corca_state_and_mcp_client, MCPClientNPC
1781
+ from npcsh._state import initial_state as state
1781
1782
  except ImportError:
1782
1783
 
1783
1784
  print("ERROR: npcsh.corca or MCPClientNPC not found. Corca mode is disabled.", file=sys.stderr)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: npcpy
3
- Version: 1.2.12
3
+ Version: 1.2.14
4
4
  Summary: npcpy is the premier open-source library for integrating LLMs and Agents into python systems.
5
5
  Home-page: https://github.com/NPC-Worldwide/npcpy
6
6
  Author: Christopher Agostino
@@ -1,13 +1,13 @@
1
1
  npcpy/__init__.py,sha256=9imxFtK74_6Rw9rz0kyMnZYl_voPb569tkTlYLt0Urg,131
2
2
  npcpy/llm_funcs.py,sha256=bcMOTfVcBqRgHvBV2JxUtChgT1wMWoHhXVFYOlxGDFY,85031
3
3
  npcpy/main.py,sha256=RWoRIj6VQLxKdOKvdVyaq2kwG35oRpeXPvp1CAAoG-w,81
4
- npcpy/npc_compiler.py,sha256=wS708nauHNGVZCyc-6kUx-38lOyFi64j2sB22koEFPw,85034
4
+ npcpy/npc_compiler.py,sha256=Yiv4xWsuQvPX5oeFh0z9snguiyx4Ul16KfUyjPWt7uI,86462
5
5
  npcpy/npc_sysenv.py,sha256=lPYlKM_TeR4l4-Jcgiqq3CCge8b2oFHdfISD4L_G7eo,30308
6
6
  npcpy/npcs.py,sha256=eExuVsbTfrRobTRRptRpDm46jCLWUgbvy4_U7IUQo-c,744
7
- npcpy/serve.py,sha256=QYks3pwTeNH-JyGz6AEK5sGhza6cHzd_lgHkoJ4gdgY,98283
7
+ npcpy/serve.py,sha256=tyOQ_YWNOVJl5NkbEshcIbpBCR1wohHVg9JtPYEKuMI,98343
8
8
  npcpy/tools.py,sha256=A5_oVmZkzGnI3BI-NmneuxeXQq-r29PbpAZP4nV4jrc,5303
9
9
  npcpy/data/__init__.py,sha256=1tcoChR-Hjn905JDLqaW9ElRmcISCTJdE7BGXPlym2Q,642
10
- npcpy/data/audio.py,sha256=mO9DChyEzo03ek84KkZF-7uqmtC9KwTOHCoKlf_pdqc,11592
10
+ npcpy/data/audio.py,sha256=goon4HfsYgx0bI-n1lhkrzWPrJoejJlycXcB0P62pyk,11280
11
11
  npcpy/data/data_models.py,sha256=q7xpI4_nK5HvlOE1XB5u5nFQs4SE5zcgt0kIZJF2dhs,682
12
12
  npcpy/data/image.py,sha256=UQcioNPDd5HYMLL_KStf45SuiIPXDcUY-dEFHwSWUeE,6564
13
13
  npcpy/data/load.py,sha256=f3-bgKUq_pnfUhqjZdXwfIEZmMbGJpJfGTBjuiYJos8,4258
@@ -18,7 +18,7 @@ npcpy/gen/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
18
18
  npcpy/gen/audio_gen.py,sha256=w4toESu7nmli1T5FOwRRCGC_QK9W-SMWknYYkbRv9jE,635
19
19
  npcpy/gen/embeddings.py,sha256=QStTJ2ELiC379OEZsLEgGGIIFD267Y8zQchs7HRn2Zg,2089
20
20
  npcpy/gen/image_gen.py,sha256=ln71jmLoJHekbZYDJpTe5DtOamVte9gjr2BPQ1DzjMQ,14955
21
- npcpy/gen/response.py,sha256=_k_yOq6vyw-yfNbOa5DluArWfdCQ_lvQc_patKR7OSo,29332
21
+ npcpy/gen/response.py,sha256=xm4wksuPTyNszN8DhxpOWDD6waUqsOgTrXFDL3wMqSo,28765
22
22
  npcpy/gen/video_gen.py,sha256=JMp2s2qMp5uy0rOgv6BRZ7nkQI4vdT1hbJ2nSu4s-KA,3243
23
23
  npcpy/memory/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
24
24
  npcpy/memory/command_history.py,sha256=DzwUpXxtswdUXImszlV1TppMMd_aCP01oaeZEjGKk04,39812
@@ -34,8 +34,8 @@ npcpy/work/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
34
34
  npcpy/work/desktop.py,sha256=F3I8mUtJp6LAkXodsh8hGZIncoads6c_2Utty-0EdDA,2986
35
35
  npcpy/work/plan.py,sha256=QyUwg8vElWiHuoS-xK4jXTxxHvkMD3VkaCEsCmrEPQk,8300
36
36
  npcpy/work/trigger.py,sha256=P1Y8u1wQRsS2WACims_2IdkBEar-iBQix-2TDWoW0OM,9948
37
- npcpy-1.2.12.dist-info/licenses/LICENSE,sha256=j0YPvce7Ng9e32zYOu0EmXjXeJ0Nwawd0RA3uSGGH4E,1070
38
- npcpy-1.2.12.dist-info/METADATA,sha256=VNvcXK2v7xEqW4oqgjta5KpTe9YBmENnGWIg7q7ZaLo,26084
39
- npcpy-1.2.12.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
40
- npcpy-1.2.12.dist-info/top_level.txt,sha256=g1pbSvrOOncB74Bg5-J0Olg4V0A5VzDw-Xz5YObq8BU,6
41
- npcpy-1.2.12.dist-info/RECORD,,
37
+ npcpy-1.2.14.dist-info/licenses/LICENSE,sha256=j0YPvce7Ng9e32zYOu0EmXjXeJ0Nwawd0RA3uSGGH4E,1070
38
+ npcpy-1.2.14.dist-info/METADATA,sha256=VdN6ok5gb3rnAYCFZKciUD9qpGBEr4KKOGmUEAy_DRo,26084
39
+ npcpy-1.2.14.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
40
+ npcpy-1.2.14.dist-info/top_level.txt,sha256=g1pbSvrOOncB74Bg5-J0Olg4V0A5VzDw-Xz5YObq8BU,6
41
+ npcpy-1.2.14.dist-info/RECORD,,
File without changes