npcsh 0.3.30__py3-none-any.whl → 0.3.32__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. npcsh/audio.py +540 -181
  2. npcsh/audio_gen.py +1 -0
  3. npcsh/cli.py +37 -19
  4. npcsh/conversation.py +14 -251
  5. npcsh/dataframes.py +13 -5
  6. npcsh/helpers.py +5 -0
  7. npcsh/image.py +2 -4
  8. npcsh/image_gen.py +38 -38
  9. npcsh/knowledge_graph.py +4 -4
  10. npcsh/llm_funcs.py +517 -349
  11. npcsh/npc_compiler.py +44 -23
  12. npcsh/npc_sysenv.py +5 -0
  13. npcsh/npc_team/npcsh.ctx +8 -2
  14. npcsh/npc_team/tools/generic_search.tool +9 -1
  15. npcsh/plonk.py +2 -2
  16. npcsh/response.py +131 -482
  17. npcsh/search.py +20 -9
  18. npcsh/serve.py +210 -203
  19. npcsh/shell.py +78 -80
  20. npcsh/shell_helpers.py +513 -102
  21. npcsh/stream.py +87 -554
  22. npcsh/video.py +5 -2
  23. npcsh/video_gen.py +69 -0
  24. {npcsh-0.3.30.data → npcsh-0.3.32.data}/data/npcsh/npc_team/generic_search.tool +9 -1
  25. {npcsh-0.3.30.data → npcsh-0.3.32.data}/data/npcsh/npc_team/npcsh.ctx +8 -2
  26. npcsh-0.3.32.dist-info/METADATA +779 -0
  27. npcsh-0.3.32.dist-info/RECORD +78 -0
  28. npcsh-0.3.30.dist-info/METADATA +0 -1862
  29. npcsh-0.3.30.dist-info/RECORD +0 -76
  30. {npcsh-0.3.30.data → npcsh-0.3.32.data}/data/npcsh/npc_team/bash_executer.tool +0 -0
  31. {npcsh-0.3.30.data → npcsh-0.3.32.data}/data/npcsh/npc_team/calculator.tool +0 -0
  32. {npcsh-0.3.30.data → npcsh-0.3.32.data}/data/npcsh/npc_team/celona.npc +0 -0
  33. {npcsh-0.3.30.data → npcsh-0.3.32.data}/data/npcsh/npc_team/code_executor.tool +0 -0
  34. {npcsh-0.3.30.data → npcsh-0.3.32.data}/data/npcsh/npc_team/corca.npc +0 -0
  35. {npcsh-0.3.30.data → npcsh-0.3.32.data}/data/npcsh/npc_team/eriane.npc +0 -0
  36. {npcsh-0.3.30.data → npcsh-0.3.32.data}/data/npcsh/npc_team/foreman.npc +0 -0
  37. {npcsh-0.3.30.data → npcsh-0.3.32.data}/data/npcsh/npc_team/image_generation.tool +0 -0
  38. {npcsh-0.3.30.data → npcsh-0.3.32.data}/data/npcsh/npc_team/lineru.npc +0 -0
  39. {npcsh-0.3.30.data → npcsh-0.3.32.data}/data/npcsh/npc_team/local_search.tool +0 -0
  40. {npcsh-0.3.30.data → npcsh-0.3.32.data}/data/npcsh/npc_team/maurawa.npc +0 -0
  41. {npcsh-0.3.30.data → npcsh-0.3.32.data}/data/npcsh/npc_team/npcsh_executor.tool +0 -0
  42. {npcsh-0.3.30.data → npcsh-0.3.32.data}/data/npcsh/npc_team/raone.npc +0 -0
  43. {npcsh-0.3.30.data → npcsh-0.3.32.data}/data/npcsh/npc_team/screen_cap.tool +0 -0
  44. {npcsh-0.3.30.data → npcsh-0.3.32.data}/data/npcsh/npc_team/sibiji.npc +0 -0
  45. {npcsh-0.3.30.data → npcsh-0.3.32.data}/data/npcsh/npc_team/slean.npc +0 -0
  46. {npcsh-0.3.30.data → npcsh-0.3.32.data}/data/npcsh/npc_team/sql_executor.tool +0 -0
  47. {npcsh-0.3.30.data → npcsh-0.3.32.data}/data/npcsh/npc_team/test_pipeline.py +0 -0
  48. {npcsh-0.3.30.data → npcsh-0.3.32.data}/data/npcsh/npc_team/turnic.npc +0 -0
  49. {npcsh-0.3.30.data → npcsh-0.3.32.data}/data/npcsh/npc_team/welxor.npc +0 -0
  50. {npcsh-0.3.30.dist-info → npcsh-0.3.32.dist-info}/WHEEL +0 -0
  51. {npcsh-0.3.30.dist-info → npcsh-0.3.32.dist-info}/entry_points.txt +0 -0
  52. {npcsh-0.3.30.dist-info → npcsh-0.3.32.dist-info}/licenses/LICENSE +0 -0
  53. {npcsh-0.3.30.dist-info → npcsh-0.3.32.dist-info}/top_level.txt +0 -0
npcsh/search.py CHANGED
@@ -5,6 +5,7 @@ import os
5
5
 
6
6
  from bs4 import BeautifulSoup
7
7
  from duckduckgo_search import DDGS
8
+ from duckduckgo_search.exceptions import DuckDuckGoSearchException
8
9
 
9
10
  try:
10
11
  from googlesearch import search
@@ -41,7 +42,6 @@ def search_perplexity(
41
42
  "max_tokens": max_tokens,
42
43
  "temperature": temperature,
43
44
  "top_p": top_p,
44
- "search_domain_filter": ["perplexity.ai"],
45
45
  "return_images": False,
46
46
  "return_related_questions": False,
47
47
  "search_recency_filter": "month",
@@ -58,6 +58,7 @@ def search_perplexity(
58
58
  # Make the POST request to the API
59
59
  response = requests.post(url, json=payload, headers=headers)
60
60
  response = json.loads(response.text)
61
+ print(response)
61
62
  return [response["choices"][0]["message"]["content"], response["citations"]]
62
63
 
63
64
 
@@ -88,14 +89,24 @@ def search_web(
88
89
  return search_result
89
90
 
90
91
  if provider == "duckduckgo":
91
- ddgs = DDGS()
92
- search_results = ddgs.text(query, max_results=num_results)
93
- print(search_results, type(search_results))
94
- urls = [r["href"] for r in search_results]
95
- results = [
96
- {"title": r["title"], "link": r["href"], "content": r["body"]}
97
- for r in search_results
98
- ]
92
+ headers = {
93
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:124.0) Gecko/20100101 Firefox/124.0"
94
+ }
95
+ ddgs = DDGS(headers=headers)
96
+
97
+ try:
98
+ search_results = ddgs.text(query, max_results=num_results)
99
+ print(search_results, type(search_results))
100
+ urls = [r["href"] for r in search_results]
101
+ results = [
102
+ {"title": r["title"], "link": r["href"], "content": r["body"]}
103
+ for r in search_results
104
+ ]
105
+ except DuckDuckGoSearchException as e:
106
+ print("DuckDuckGo search failed: ", e)
107
+ urls = []
108
+ results = []
109
+
99
110
  else: # google
100
111
  urls = list(search(query, num_results=num_results))
101
112
  # google shit doesnt seem to be working anymore, apparently a lbock they made on browsers without js?
npcsh/serve.py CHANGED
@@ -69,6 +69,105 @@ CORS(
69
69
  )
70
70
 
71
71
 
72
+ def get_locally_available_models(project_directory):
73
+ # check if anthropic, gemini, openai keys exist in project folder env
74
+ # also try to get ollama
75
+ available_models_providers = []
76
+ # get the project env
77
+ env_path = os.path.join(project_directory, ".env")
78
+ env_vars = {}
79
+ if os.path.exists(env_path):
80
+ with open(env_path, "r") as f:
81
+ for line in f:
82
+ line = line.strip()
83
+ if line and not line.startswith("#"):
84
+ if "=" in line:
85
+ key, value = line.split("=", 1)
86
+ env_vars[key.strip()] = value.strip().strip("\"'")
87
+ # check if the keys exist in the env
88
+ if "ANTHROPIC_API_KEY" in env_vars:
89
+ import anthropic
90
+
91
+ client = anthropic.Anthropic(api_key=os.environ.get("ANTHROPIC_API_KEY"))
92
+ models = client.models.list()
93
+ for model in models.data:
94
+
95
+ available_models_providers.append(
96
+ {
97
+ "model": model.id,
98
+ "provider": "anthropic",
99
+ }
100
+ )
101
+
102
+ if "OPENAI_API_KEY" in env_vars:
103
+ import openai
104
+
105
+ openai.api_key = env_vars["OPENAI_API_KEY"]
106
+ models = openai.models.list()
107
+
108
+ for model in models.data:
109
+ if (
110
+ (
111
+ "gpt" in model.id
112
+ or "o1" in model.id
113
+ or "o3" in model.id
114
+ or "chat" in model.id
115
+ )
116
+ and "audio" not in model.id
117
+ and "realtime" not in model.id
118
+ ):
119
+
120
+ available_models_providers.append(
121
+ {
122
+ "model": model.id,
123
+ "provider": "openai",
124
+ }
125
+ )
126
+ if "GEMINI_API_KEY" in env_vars:
127
+ import google.generativeai as gemini
128
+
129
+ gemini.configure(api_key=env_vars["GEMINI_API_KEY"])
130
+ models = gemini.list_models()
131
+ # available_models_providers.append(
132
+ # {
133
+ # "model": "gemini-2.5-pro",
134
+ # "provider": "gemini",
135
+ # }
136
+ # )
137
+ available_models_providers.append(
138
+ {
139
+ "model": "gemini-2.0-flash-lite",
140
+ "provider": "gemini",
141
+ }
142
+ )
143
+
144
+ if "DEEPSEEK_API_KEY" in env_vars:
145
+ available_models_providers.append(
146
+ {"model": "deepseek-chat", "provider": "deepseek"}
147
+ )
148
+ available_models_providers.append(
149
+ {"model": "deepseek-reasoner", "provider": "deepseek"}
150
+ )
151
+
152
+ try:
153
+ import ollama
154
+
155
+ models = ollama.list()
156
+ for model in models:
157
+ if "embed" not in model.model:
158
+ mod = model.model
159
+ available_models_providers.append(
160
+ {
161
+ "model": mod,
162
+ "provider": "ollama",
163
+ }
164
+ )
165
+
166
+ except Exception as e:
167
+ print(f"Error loading ollama models: {e}")
168
+ return available_models_providers
169
+
170
+
72
171
  def get_db_connection():
73
172
  conn = sqlite3.connect(db_path)
74
173
  conn.row_factory = sqlite3.Row
@@ -342,6 +441,67 @@ def save_project_settings():
342
441
  return jsonify({"error": str(e)}), 500
343
442
 
344
443
 
444
+ @app.route("/api/models", methods=["GET"])
445
+ def get_models():
446
+ """
447
+ Endpoint to retrieve available models based on the current project path.
448
+ Checks for local configurations (.env) and Ollama.
449
+ """
450
+ current_path = request.args.get("currentPath")
451
+ if not current_path:
452
+ # Fallback to a default path or user home if needed,
453
+ # but ideally the frontend should always provide it.
454
+ current_path = os.path.expanduser("~/.npcsh") # Or handle error
455
+ print("Warning: No currentPath provided for /api/models, using default.")
456
+ # return jsonify({"error": "currentPath parameter is required"}), 400
457
+
458
+ try:
459
+ # Reuse the existing function to detect models
460
+ available_models = get_locally_available_models(current_path)
461
+
462
+ # Optionally, add more details or format the response if needed
463
+ # Example: Add a display name
464
+ formatted_models = []
465
+ for m in available_models:
466
+ # Basic formatting, customize as needed
467
+ text_only = (
468
+ "(text only)"
469
+ if m["provider"] == "ollama"
470
+ and m["model"] in ["llama3.2", "deepseek-v3", "phi4"]
471
+ else ""
472
+ )
473
+ # Handle specific known model names for display
474
+ display_model = m["model"]
475
+ if "claude-3-5-haiku-latest" in m["model"]:
476
+ display_model = "claude-3.5-haiku"
477
+ elif "claude-3-5-sonnet-latest" in m["model"]:
478
+ display_model = "claude-3.5-sonnet"
479
+ elif "gemini-1.5-flash" in m["model"]:
480
+ display_model = "gemini-1.5-flash" # Handle multiple versions if needed
481
+ elif "gemini-2.0-flash-lite-preview-02-05" in m["model"]:
482
+ display_model = "gemini-2.0-flash-lite-preview"
483
+
484
+ display_name = f"{display_model} | {m['provider']} {text_only}".strip()
485
+
486
+ formatted_models.append(
487
+ {
488
+ "value": m["model"], # Use the actual model ID as the value
489
+ "provider": m["provider"],
490
+ "display_name": display_name,
491
+ }
492
+ )
493
+
494
+ return jsonify({"models": formatted_models, "error": None})
495
+
496
+ except Exception as e:
497
+ print(f"Error getting available models: {str(e)}")
498
+ import traceback
499
+
500
+ traceback.print_exc()
501
+ # Return an empty list or a specific error structure
502
+ return jsonify({"models": [], "error": str(e)}), 500
503
+
504
+
345
505
  @app.route("/api/stream", methods=["POST"])
346
506
  def stream():
347
507
  """SSE stream that takes messages, models, providers, and attachments from frontend."""
@@ -422,9 +582,6 @@ def stream():
422
582
  )
423
583
  message_id = command_history.generate_message_id()
424
584
 
425
- # if len(images) > 0:
426
- # go straight to get stream instead of executing , will continue this way to avoid npc
427
- # loading issues for now.
428
585
  stream_response = get_stream(
429
586
  messages,
430
587
  images=images,
@@ -433,19 +590,6 @@ def stream():
433
590
  npc=npc if isinstance(npc, NPC) else None,
434
591
  )
435
592
 
436
- """else:
437
-
438
- stream_response = execute_command_stream(
439
- commandstr,
440
- command_history,
441
- db_path,
442
- npc_compiler,
443
- model=model,
444
- provider=provider,
445
- messages=messages,
446
- images=images, # Pass the processed images
447
- ) # Get all conversation messages so far
448
- """
449
593
  final_response = "" # To accumulate the assistant's response
450
594
 
451
595
  complete_response = [] # List to store all chunks
@@ -453,100 +597,31 @@ def stream():
453
597
  def event_stream():
454
598
  for response_chunk in stream_response:
455
599
  chunk_content = ""
456
-
457
- # Extract content based on model type
458
- if model.startswith("gpt-4o"):
459
- chunk_content = "".join(
460
- choice.delta.content
461
- for choice in response_chunk.choices
462
- if choice.delta.content is not None
463
- )
464
- if chunk_content:
465
- complete_response.append(chunk_content)
466
- chunk_data = {
467
- "id": response_chunk.id,
468
- "object": response_chunk.object,
469
- "created": response_chunk.created,
470
- "model": response_chunk.model,
471
- "choices": [
472
- {
473
- "index": choice.index,
474
- "delta": {
475
- "content": choice.delta.content,
476
- "role": choice.delta.role,
477
- },
478
- "finish_reason": choice.finish_reason,
479
- }
480
- for choice in response_chunk.choices
481
- ],
482
- }
483
- yield f"data: {json.dumps(chunk_data)}\n\n"
484
-
485
- elif model.startswith("llama"):
486
- chunk_content = response_chunk["message"]["content"]
487
- if chunk_content:
488
- complete_response.append(chunk_content)
489
- chunk_data = {
490
- "id": None,
491
- "object": None,
492
- "created": response_chunk["created_at"],
493
- "model": response_chunk["model"],
494
- "choices": [
495
- {
496
- "index": 0,
497
- "delta": {
498
- "content": chunk_content,
499
- "role": response_chunk["message"]["role"],
500
- },
501
- "finish_reason": response_chunk.get("done_reason"),
502
- }
503
- ],
504
- }
505
- yield f"data: {json.dumps(chunk_data)}\n\n"
506
-
507
- elif model.startswith("claude"):
508
- print(response_chunk)
509
- if response_chunk.type == "message_start":
510
- chunk_data = {
511
- "id": None,
512
- "object": None,
513
- "created": None,
514
- "model": model,
515
- "choices": [
516
- {
517
- "index": 0,
518
- "delta": {
519
- "content": "",
520
- "role": "assistant",
521
- },
522
- "finish_reason": "",
523
- }
524
- ],
525
- }
526
- yield f"data: {json.dumps(chunk_data)}\n\n"
527
-
528
- if response_chunk.type == "content_block_delta":
529
- chunk_content = response_chunk.delta.text
530
- if chunk_content:
531
- complete_response.append(chunk_content)
532
- chunk_data = {
533
- "id": None,
534
- "object": None,
535
- "created": None,
536
- "model": model,
537
- "choices": [
538
- {
539
- "index": 0,
540
- "delta": {
541
- "content": chunk_content,
542
- "role": "assistant",
543
- },
544
- "finish_reason": response_chunk.delta.type,
545
- }
546
- ],
600
+ chunk_content = "".join(
601
+ choice.delta.content
602
+ for choice in response_chunk.choices
603
+ if choice.delta.content is not None
604
+ )
605
+ if chunk_content:
606
+ complete_response.append(chunk_content)
607
+ chunk_data = {
608
+ "id": response_chunk.id,
609
+ "object": response_chunk.object,
610
+ "created": response_chunk.created,
611
+ "model": response_chunk.model,
612
+ "choices": [
613
+ {
614
+ "index": choice.index,
615
+ "delta": {
616
+ "content": choice.delta.content,
617
+ "role": choice.delta.role,
618
+ },
619
+ "finish_reason": choice.finish_reason,
547
620
  }
548
- yield f"data: {json.dumps(chunk_data)}\n\n"
549
-
621
+ for choice in response_chunk.choices
622
+ ],
623
+ }
624
+ yield f"data: {json.dumps(chunk_data)}\n\n"
550
625
  save_conversation_message(
551
626
  command_history,
552
627
  conversation_id,
@@ -1198,101 +1273,33 @@ def stream_raw():
1198
1273
  for response_chunk in stream_response:
1199
1274
  chunk_content = ""
1200
1275
 
1201
- # Extract content based on model type
1202
- if model.startswith("gpt-4o"):
1203
- chunk_content = "".join(
1204
- choice.delta.content
1205
- for choice in response_chunk.choices
1206
- if choice.delta.content is not None
1207
- )
1208
- if chunk_content:
1209
- complete_response.append(chunk_content)
1210
- chunk_data = {
1211
- "type": "content", # Added type
1212
- "id": response_chunk.id,
1213
- "object": response_chunk.object,
1214
- "created": response_chunk.created,
1215
- "model": response_chunk.model,
1216
- "choices": [
1217
- {
1218
- "index": choice.index,
1219
- "delta": {
1220
- "content": choice.delta.content,
1221
- "role": choice.delta.role,
1222
- },
1223
- "finish_reason": choice.finish_reason,
1224
- }
1225
- for choice in response_chunk.choices
1226
- ],
1227
- }
1228
- yield f"{json.dumps(chunk_data)}\n\n"
1229
-
1230
- elif model.startswith("llama"):
1231
- chunk_content = response_chunk["message"]["content"]
1232
- if chunk_content:
1233
- complete_response.append(chunk_content)
1234
- chunk_data = {
1235
- "type": "content", # Added type
1236
- "id": None,
1237
- "object": None,
1238
- "created": response_chunk["created_at"],
1239
- "model": response_chunk["model"],
1240
- "choices": [
1241
- {
1242
- "index": 0,
1243
- "delta": {
1244
- "content": chunk_content,
1245
- "role": response_chunk["message"]["role"],
1246
- },
1247
- "finish_reason": response_chunk.get("done_reason"),
1248
- }
1249
- ],
1250
- }
1251
- yield f"{json.dumps(chunk_data)}\n\n"
1252
- elif model.startswith("claude"):
1253
- print(response_chunk)
1254
- if response_chunk.type == "message_start":
1255
- chunk_data = {
1256
- "type": "message_start", # Added type
1257
- "id": None,
1258
- "object": None,
1259
- "created": None,
1260
- "model": model,
1261
- "choices": [
1262
- {
1263
- "index": 0,
1264
- "delta": {
1265
- "content": "",
1266
- "role": "assistant",
1267
- },
1268
- "finish_reason": "",
1269
- }
1270
- ],
1271
- }
1272
- yield f"{json.dumps(chunk_data)}\n\n"
1273
- if response_chunk.type == "content_block_delta":
1274
- chunk_content = response_chunk.delta.text
1275
- if chunk_content:
1276
- complete_response.append(chunk_content)
1277
- chunk_data = {
1278
- "type": "content", # Added type
1279
- "content": chunk_content,
1280
- "id": None,
1281
- "object": None,
1282
- "created": None,
1283
- "model": model,
1284
- "choices": [
1285
- {
1286
- "index": 0,
1287
- "delta": {
1288
- "content": chunk_content,
1289
- "role": "assistant",
1290
- },
1291
- "finish_reason": response_chunk.delta.type,
1292
- }
1293
- ],
1276
+ chunk_content = "".join(
1277
+ choice.delta.content
1278
+ for choice in response_chunk.choices
1279
+ if choice.delta.content is not None
1280
+ )
1281
+ if chunk_content:
1282
+ complete_response.append(chunk_content)
1283
+ chunk_data = {
1284
+ "type": "content", # Added type
1285
+ "id": response_chunk.id,
1286
+ "object": response_chunk.object,
1287
+ "created": response_chunk.created,
1288
+ "model": response_chunk.model,
1289
+ "choices": [
1290
+ {
1291
+ "index": choice.index,
1292
+ "delta": {
1293
+ "content": choice.delta.content,
1294
+ "role": choice.delta.role,
1295
+ },
1296
+ "finish_reason": choice.finish_reason,
1294
1297
  }
1295
- yield f"{json.dumps(chunk_data)}\n\n"
1298
+ for choice in response_chunk.choices
1299
+ ],
1300
+ }
1301
+ yield f"{json.dumps(chunk_data)}\n\n"
1302
+
1296
1303
  if save_to_sqlite3:
1297
1304
  save_conversation_message(
1298
1305
  command_history,