npcsh 0.3.26__tar.gz → 0.3.27.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (69) hide show
  1. {npcsh-0.3.26/npcsh.egg-info → npcsh-0.3.27.1}/PKG-INFO +16 -17
  2. {npcsh-0.3.26 → npcsh-0.3.27.1}/npcsh/conversation.py +2 -2
  3. {npcsh-0.3.26 → npcsh-0.3.27.1}/npcsh/embeddings.py +2 -1
  4. {npcsh-0.3.26 → npcsh-0.3.27.1}/npcsh/image_gen.py +2 -1
  5. {npcsh-0.3.26 → npcsh-0.3.27.1}/npcsh/knowledge_graph.py +9 -2
  6. {npcsh-0.3.26 → npcsh-0.3.27.1}/npcsh/llm_funcs.py +16 -33
  7. {npcsh-0.3.26 → npcsh-0.3.27.1}/npcsh/npc_sysenv.py +6 -2
  8. {npcsh-0.3.26 → npcsh-0.3.27.1}/npcsh/response.py +2 -2
  9. {npcsh-0.3.26 → npcsh-0.3.27.1}/npcsh/search.py +6 -2
  10. {npcsh-0.3.26 → npcsh-0.3.27.1}/npcsh/shell_helpers.py +3 -4
  11. {npcsh-0.3.26 → npcsh-0.3.27.1}/npcsh/stream.py +4 -2
  12. {npcsh-0.3.26 → npcsh-0.3.27.1/npcsh.egg-info}/PKG-INFO +16 -17
  13. {npcsh-0.3.26 → npcsh-0.3.27.1}/npcsh.egg-info/requires.txt +15 -16
  14. {npcsh-0.3.26 → npcsh-0.3.27.1}/setup.py +78 -48
  15. {npcsh-0.3.26 → npcsh-0.3.27.1}/LICENSE +0 -0
  16. {npcsh-0.3.26 → npcsh-0.3.27.1}/MANIFEST.in +0 -0
  17. {npcsh-0.3.26 → npcsh-0.3.27.1}/README.md +0 -0
  18. {npcsh-0.3.26 → npcsh-0.3.27.1}/npcsh/__init__.py +0 -0
  19. {npcsh-0.3.26 → npcsh-0.3.27.1}/npcsh/audio.py +0 -0
  20. {npcsh-0.3.26 → npcsh-0.3.27.1}/npcsh/cli.py +0 -0
  21. {npcsh-0.3.26 → npcsh-0.3.27.1}/npcsh/command_history.py +0 -0
  22. {npcsh-0.3.26 → npcsh-0.3.27.1}/npcsh/data_models.py +0 -0
  23. {npcsh-0.3.26 → npcsh-0.3.27.1}/npcsh/dataframes.py +0 -0
  24. {npcsh-0.3.26 → npcsh-0.3.27.1}/npcsh/helpers.py +0 -0
  25. {npcsh-0.3.26 → npcsh-0.3.27.1}/npcsh/image.py +0 -0
  26. {npcsh-0.3.26 → npcsh-0.3.27.1}/npcsh/load_data.py +0 -0
  27. {npcsh-0.3.26 → npcsh-0.3.27.1}/npcsh/main.py +0 -0
  28. {npcsh-0.3.26 → npcsh-0.3.27.1}/npcsh/model_runner.py +0 -0
  29. {npcsh-0.3.26 → npcsh-0.3.27.1}/npcsh/npc_compiler.py +0 -0
  30. {npcsh-0.3.26 → npcsh-0.3.27.1}/npcsh/npc_team/assembly_lines/test_pipeline.py +0 -0
  31. {npcsh-0.3.26 → npcsh-0.3.27.1}/npcsh/npc_team/corca.npc +0 -0
  32. {npcsh-0.3.26 → npcsh-0.3.27.1}/npcsh/npc_team/foreman.npc +0 -0
  33. {npcsh-0.3.26 → npcsh-0.3.27.1}/npcsh/npc_team/npcsh.ctx +0 -0
  34. {npcsh-0.3.26 → npcsh-0.3.27.1}/npcsh/npc_team/sibiji.npc +0 -0
  35. {npcsh-0.3.26 → npcsh-0.3.27.1}/npcsh/npc_team/templates/analytics/celona.npc +0 -0
  36. {npcsh-0.3.26 → npcsh-0.3.27.1}/npcsh/npc_team/templates/hr_support/raone.npc +0 -0
  37. {npcsh-0.3.26 → npcsh-0.3.27.1}/npcsh/npc_team/templates/humanities/eriane.npc +0 -0
  38. {npcsh-0.3.26 → npcsh-0.3.27.1}/npcsh/npc_team/templates/it_support/lineru.npc +0 -0
  39. {npcsh-0.3.26 → npcsh-0.3.27.1}/npcsh/npc_team/templates/marketing/slean.npc +0 -0
  40. {npcsh-0.3.26 → npcsh-0.3.27.1}/npcsh/npc_team/templates/philosophy/maurawa.npc +0 -0
  41. {npcsh-0.3.26 → npcsh-0.3.27.1}/npcsh/npc_team/templates/sales/turnic.npc +0 -0
  42. {npcsh-0.3.26 → npcsh-0.3.27.1}/npcsh/npc_team/templates/software/welxor.npc +0 -0
  43. {npcsh-0.3.26 → npcsh-0.3.27.1}/npcsh/npc_team/tools/calculator.tool +0 -0
  44. {npcsh-0.3.26 → npcsh-0.3.27.1}/npcsh/npc_team/tools/generic_search.tool +0 -0
  45. {npcsh-0.3.26 → npcsh-0.3.27.1}/npcsh/npc_team/tools/image_generation.tool +0 -0
  46. {npcsh-0.3.26 → npcsh-0.3.27.1}/npcsh/npc_team/tools/local_search.tool +0 -0
  47. {npcsh-0.3.26 → npcsh-0.3.27.1}/npcsh/npc_team/tools/screen_cap.tool +0 -0
  48. {npcsh-0.3.26 → npcsh-0.3.27.1}/npcsh/npc_team/tools/sql_executor.tool +0 -0
  49. {npcsh-0.3.26 → npcsh-0.3.27.1}/npcsh/plonk.py +0 -0
  50. {npcsh-0.3.26 → npcsh-0.3.27.1}/npcsh/serve.py +0 -0
  51. {npcsh-0.3.26 → npcsh-0.3.27.1}/npcsh/shell.py +0 -0
  52. {npcsh-0.3.26 → npcsh-0.3.27.1}/npcsh/video.py +0 -0
  53. {npcsh-0.3.26 → npcsh-0.3.27.1}/npcsh.egg-info/SOURCES.txt +0 -0
  54. {npcsh-0.3.26 → npcsh-0.3.27.1}/npcsh.egg-info/dependency_links.txt +0 -0
  55. {npcsh-0.3.26 → npcsh-0.3.27.1}/npcsh.egg-info/entry_points.txt +0 -0
  56. {npcsh-0.3.26 → npcsh-0.3.27.1}/npcsh.egg-info/top_level.txt +0 -0
  57. {npcsh-0.3.26 → npcsh-0.3.27.1}/setup.cfg +0 -0
  58. {npcsh-0.3.26 → npcsh-0.3.27.1}/tests/test_chromadb.py +0 -0
  59. {npcsh-0.3.26 → npcsh-0.3.27.1}/tests/test_embedding_check.py +0 -0
  60. {npcsh-0.3.26 → npcsh-0.3.27.1}/tests/test_embedding_methods.py +0 -0
  61. {npcsh-0.3.26 → npcsh-0.3.27.1}/tests/test_helpers.py +0 -0
  62. {npcsh-0.3.26 → npcsh-0.3.27.1}/tests/test_knowledge_graph_rag.py +0 -0
  63. {npcsh-0.3.26 → npcsh-0.3.27.1}/tests/test_llm_funcs.py +0 -0
  64. {npcsh-0.3.26 → npcsh-0.3.27.1}/tests/test_networkx_vis.py +0 -0
  65. {npcsh-0.3.26 → npcsh-0.3.27.1}/tests/test_npc_compiler.py +0 -0
  66. {npcsh-0.3.26 → npcsh-0.3.27.1}/tests/test_npcsh.py +0 -0
  67. {npcsh-0.3.26 → npcsh-0.3.27.1}/tests/test_npcteam.py +0 -0
  68. {npcsh-0.3.26 → npcsh-0.3.27.1}/tests/test_shell_helpers.py +0 -0
  69. {npcsh-0.3.26 → npcsh-0.3.27.1}/tests/test_tool_use.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: npcsh
3
- Version: 0.3.26
3
+ Version: 0.3.27.1
4
4
  Summary: npcsh is a command line tool for integrating LLMs into everyday workflows and for orchestrating teams of NPCs.
5
5
  Home-page: https://github.com/cagostino/npcsh
6
6
  Author: Christopher Agostino
@@ -13,34 +13,18 @@ License-File: LICENSE
13
13
  Requires-Dist: redis
14
14
  Requires-Dist: flask_sse
15
15
  Requires-Dist: anthropic
16
- Requires-Dist: screeninfo
17
- Requires-Dist: sentence_transformers
18
- Requires-Dist: nltk
19
- Requires-Dist: thefuzz
20
16
  Requires-Dist: beautifulsoup4
21
17
  Requires-Dist: google-generativeai
22
18
  Requires-Dist: google-genai
23
19
  Requires-Dist: duckduckgo-search
24
- Requires-Dist: pypdf
25
- Requires-Dist: PyMuPDF
26
- Requires-Dist: opencv-python
27
- Requires-Dist: librosa
28
20
  Requires-Dist: openai
29
21
  Requires-Dist: jinja2
30
- Requires-Dist: pandas
31
- Requires-Dist: matplotlib
32
- Requires-Dist: IPython
33
- Requires-Dist: ollama
34
22
  Requires-Dist: requests
35
23
  Requires-Dist: markdown
36
24
  Requires-Dist: PyYAML
37
25
  Requires-Dist: langchain
38
26
  Requires-Dist: langchain_community
39
- Requires-Dist: pyautogui
40
27
  Requires-Dist: pygments
41
- Requires-Dist: pyttsx3
42
- Requires-Dist: kuzu
43
- Requires-Dist: chromadb
44
28
  Requires-Dist: termcolor
45
29
  Requires-Dist: colorama
46
30
  Requires-Dist: python-dotenv
@@ -48,6 +32,21 @@ Requires-Dist: pytest
48
32
  Requires-Dist: googlesearch-python
49
33
  Requires-Dist: flask
50
34
  Requires-Dist: flask_cors
35
+ Requires-Dist: librosa
36
+ Requires-Dist: pandas
37
+ Requires-Dist: matplotlib
38
+ Requires-Dist: IPython
39
+ Requires-Dist: pyautogui
40
+ Requires-Dist: nltk
41
+ Requires-Dist: thefuzz
42
+ Requires-Dist: pypdf
43
+ Requires-Dist: PyMuPDF
44
+ Requires-Dist: screeninfo
45
+ Requires-Dist: sentence_transformers
46
+ Requires-Dist: opencv-python
47
+ Requires-Dist: ollama
48
+ Requires-Dist: kuzu
49
+ Requires-Dist: chromadb
51
50
  Requires-Dist: diffusers
52
51
  Dynamic: author
53
52
  Dynamic: author-email
@@ -7,9 +7,8 @@
7
7
  from typing import Any, Dict, Generator, List
8
8
  import os
9
9
  import anthropic
10
- import ollama # Add to setup.py if missing
10
+
11
11
  from openai import OpenAI
12
- from diffusers import StableDiffusionPipeline
13
12
  from google.generativeai import types
14
13
  import google.generativeai as genai
15
14
  from .npc_sysenv import get_system_message
@@ -34,6 +33,7 @@ def get_ollama_conversation(
34
33
  Returns:
35
34
  List[Dict[str, str]]: The list of messages in the conversation.
36
35
  """
36
+ import ollama
37
37
 
38
38
  messages_copy = messages.copy()
39
39
  if messages_copy[0]["role"] != "system":
@@ -12,7 +12,6 @@ from npcsh.npc_sysenv import (
12
12
  NPCSH_EMBEDDING_PROVIDER,
13
13
  chroma_client,
14
14
  )
15
- import ollama
16
15
  from openai import OpenAI
17
16
  import anthropic
18
17
 
@@ -21,6 +20,8 @@ def get_ollama_embeddings(
21
20
  texts: List[str], model: str = "nomic-embed-text"
22
21
  ) -> List[List[float]]:
23
22
  """Generate embeddings using Ollama."""
23
+ import ollama
24
+
24
25
  embeddings = []
25
26
  for text in texts:
26
27
  response = ollama.embeddings(model=model, prompt=text)
@@ -10,7 +10,6 @@
10
10
  import os
11
11
 
12
12
  from openai import OpenAI
13
- from diffusers import StableDiffusionPipeline
14
13
 
15
14
 
16
15
  def generate_image_openai(
@@ -66,6 +65,8 @@ def generate_image_hf_diffusion(
66
65
  PIL.Image: The generated image.
67
66
  """
68
67
  # Load the Stable Diffusion pipeline
68
+ from diffusers import StableDiffusionPipeline
69
+
69
70
  pipe = StableDiffusionPipeline.from_pretrained(model)
70
71
  pipe = pipe.to(device)
71
72
 
@@ -3,7 +3,11 @@ import os
3
3
  import datetime
4
4
 
5
5
  import numpy as np
6
- import kuzu
6
+
7
+ try:
8
+ import kuzu
9
+ except ModuleNotFoundError:
10
+ print("kuzu not installed")
7
11
  from typing import Optional, Dict, List, Union, Tuple
8
12
 
9
13
 
@@ -581,7 +585,10 @@ def visualize_graph(conn):
581
585
  plt.show()
582
586
 
583
587
 
584
- import chromadb
588
+ try:
589
+ import chromadb
590
+ except ModuleNotFoundError:
591
+ print("chromadb not installed")
585
592
  import numpy as np
586
593
  import os
587
594
  import datetime
@@ -4,17 +4,10 @@ import requests
4
4
  import os
5
5
  import json
6
6
  import PIL
7
- from PIL import Image
8
7
 
9
8
  import sqlite3
10
9
  from datetime import datetime
11
10
  from typing import List, Dict, Any, Optional, Union, Generator
12
- import typing_extensions as typing
13
- from pydantic import BaseModel, Field
14
-
15
- import base64
16
- import re
17
- import io
18
11
 
19
12
 
20
13
  from jinja2 import Environment, FileSystemLoader, Template, Undefined
@@ -22,15 +15,6 @@ from jinja2 import Environment, FileSystemLoader, Template, Undefined
22
15
  import pandas as pd
23
16
  import numpy as np
24
17
 
25
- # chroma
26
- import chromadb
27
- from chromadb import Client
28
-
29
- # llm providers
30
- import anthropic
31
- import ollama # Add to setup.py if missing
32
- from openai import OpenAI
33
- from diffusers import StableDiffusionPipeline
34
18
  from google.generativeai import types
35
19
  import google.generativeai as genai
36
20
 
@@ -54,7 +38,6 @@ from .npc_sysenv import (
54
38
  NPCSH_API_URL,
55
39
  NPCSH_VISION_MODEL,
56
40
  NPCSH_VISION_PROVIDER,
57
- chroma_client,
58
41
  available_reasoning_models,
59
42
  available_chat_models,
60
43
  )
@@ -150,25 +133,30 @@ def generate_image(
150
133
  # image = generate_image_openai_like(prompt, model, npc.api_url, openai_api_key)
151
134
  elif provider == "diffusers":
152
135
  image = generate_image_hf_diffusion(prompt, model)
136
+ else:
137
+ image = None
153
138
  # save image
154
139
  # check if image is a PIL image
155
140
  if isinstance(image, PIL.Image.Image):
156
141
  image.save(filename)
157
142
  return filename
158
143
 
159
- elif image is not None:
160
- # image is at a private url
161
- response = requests.get(image.data[0].url)
162
- with open(filename, "wb") as file:
163
- file.write(response.content)
164
- from PIL import Image
144
+ else:
145
+ try:
146
+ # image is at a private url
147
+ response = requests.get(image.data[0].url)
148
+ with open(filename, "wb") as file:
149
+ file.write(response.content)
150
+ from PIL import Image
165
151
 
166
- img = Image.open(filename)
167
- img.show()
168
- # console = Console()
169
- # console.print(Image.from_path(filename))
152
+ img = Image.open(filename)
153
+ img.show()
154
+ # console = Console()
155
+ # console.print(Image.from_path(filename))
156
+ return filename
170
157
 
171
- return filename
158
+ except AttributeError as e:
159
+ print(f"Error saving image: {e}")
172
160
 
173
161
 
174
162
  def get_embeddings(
@@ -511,7 +499,6 @@ def execute_llm_question(
511
499
  # messages.append({"role": "assistant", "content": output})
512
500
 
513
501
  else:
514
-
515
502
  response = get_conversation(
516
503
  messages,
517
504
  model=model,
@@ -1030,7 +1017,6 @@ ReAct choices then will enter reasoning flow
1030
1017
  return {"messages": messages, "output": output}
1031
1018
 
1032
1019
  elif action == "answer_question":
1033
-
1034
1020
  if ENTER_REASONING_FLOW:
1035
1021
  print("entering reasoning flow")
1036
1022
  result = enter_reasoning_human_in_the_loop(
@@ -1208,7 +1194,6 @@ def handle_tool_call(
1208
1194
  # print(npc)
1209
1195
  print("handling tool call")
1210
1196
  if not npc:
1211
-
1212
1197
  print(
1213
1198
  f"No tools available for NPC '{npc.name}' or tools_dict is empty. Available tools: {available_tools}"
1214
1199
  )
@@ -1320,7 +1305,6 @@ def handle_tool_call(
1320
1305
  print("Executing tool with input values:", input_values)
1321
1306
 
1322
1307
  try:
1323
-
1324
1308
  tool_output = tool.execute(
1325
1309
  input_values,
1326
1310
  npc.all_tools_dict,
@@ -1335,7 +1319,6 @@ def handle_tool_call(
1335
1319
  if "Error" in tool_output:
1336
1320
  raise Exception(tool_output)
1337
1321
  except Exception as e:
1338
-
1339
1322
  # diagnose_problem = get_llm_response(
1340
1323
  ## f"""a problem has occurred.
1341
1324
  # Please provide a diagnosis of the problem and a suggested #fix.
@@ -3,7 +3,6 @@ from datetime import datetime
3
3
  from typing import Any
4
4
  import os
5
5
  import io
6
- import chromadb
7
6
  import sqlite3
8
7
  from dotenv import load_dotenv
9
8
  from PIL import Image
@@ -222,7 +221,12 @@ available_chat_models, available_reasoning_models = get_available_models()
222
221
 
223
222
  EMBEDDINGS_DB_PATH = os.path.expanduser("~/npcsh_chroma.db")
224
223
 
225
- chroma_client = chromadb.PersistentClient(path=EMBEDDINGS_DB_PATH)
224
+ try:
225
+ import chromadb
226
+
227
+ chroma_client = chromadb.PersistentClient(path=EMBEDDINGS_DB_PATH)
228
+ except:
229
+ chroma_client = None
226
230
 
227
231
 
228
232
  # Load environment variables from .env file
@@ -2,9 +2,7 @@ from typing import Any, Dict, Generator, List, Union
2
2
  from pydantic import BaseModel
3
3
  import os
4
4
  import anthropic
5
- import ollama # Add to setup.py if missing
6
5
  from openai import OpenAI
7
- from diffusers import StableDiffusionPipeline
8
6
  from google.generativeai import types
9
7
  from google import genai
10
8
 
@@ -143,6 +141,8 @@ def get_ollama_response(
143
141
  Returns:
144
142
  Dict[str, Any]: The response, optionally including updated messages.
145
143
  """
144
+ import ollama
145
+
146
146
  # try:
147
147
  # Prepare the message payload
148
148
  system_message = get_system_message(npc) if npc else "You are a helpful assistant."
@@ -162,8 +162,12 @@ def rag_search(
162
162
 
163
163
  """
164
164
  if embedding_model is None:
165
- embedding_model = SentenceTransformer("all-MiniLM-L6-v2")
166
-
165
+ try:
166
+ embedding_model = SentenceTransformer("all-MiniLM-L6-v2")
167
+ except:
168
+ raise Exception(
169
+ "Please install the sentence-transformers library to use this function or provide an embedding transformer model."
170
+ )
167
171
  results = []
168
172
 
169
173
  # Compute the embedding of the query
@@ -1433,7 +1433,6 @@ def execute_slash_command(
1433
1433
  command_parts, model=model, provider=provider, npc=npc, api_url=api_url
1434
1434
  )
1435
1435
  elif command_name == "help": # New help command
1436
-
1437
1436
  return {
1438
1437
  "messages": messages,
1439
1438
  "output": get_help(),
@@ -1763,7 +1762,7 @@ def execute_command(
1763
1762
  messages: list = None,
1764
1763
  conversation_id: str = None,
1765
1764
  stream: bool = False,
1766
- embedding_model: Union[SentenceTransformer, Any] = None,
1765
+ embedding_model=None,
1767
1766
  ):
1768
1767
  """
1769
1768
  Function Description:
@@ -1774,7 +1773,7 @@ def execute_command(
1774
1773
  db_path : str : Database path
1775
1774
  npc_compiler : NPCCompiler : NPC compiler
1776
1775
  Keyword Args:
1777
- embedding_model : Union[SentenceTransformer, Any] : Embedding model
1776
+ embedding_model : Embedding model
1778
1777
  current_npc : NPC : Current NPC
1779
1778
  messages : list : Messages
1780
1779
  Returns:
@@ -2086,7 +2085,7 @@ def execute_command_stream(
2086
2085
  command: str,
2087
2086
  db_path: str,
2088
2087
  npc_compiler: NPCCompiler,
2089
- embedding_model: Union[SentenceTransformer, Any] = None,
2088
+ embedding_model=None,
2090
2089
  current_npc: NPC = None,
2091
2090
  model: str = None,
2092
2091
  provider: str = None,
@@ -10,9 +10,7 @@ from npcsh.npc_sysenv import get_system_message
10
10
  from typing import Any, Dict, Generator, List
11
11
  import os
12
12
  import anthropic
13
- import ollama # Add to setup.py if missing
14
13
  from openai import OpenAI
15
- from diffusers import StableDiffusionPipeline
16
14
  from google import genai
17
15
 
18
16
  from google.generativeai import types
@@ -53,6 +51,8 @@ def get_anthropic_stream(
53
51
  messages = messages[1:]
54
52
  elif npc is not None:
55
53
  system_message = get_system_message(npc)
54
+ else:
55
+ system_message = "You are a helpful assistant."
56
56
 
57
57
  # Preprocess messages to ensure content is a list of dicts
58
58
  for message in messages:
@@ -274,6 +274,8 @@ def get_ollama_stream(
274
274
  **kwargs,
275
275
  ) -> Generator:
276
276
  """Streams responses from Ollama, supporting images and tools."""
277
+ import ollama
278
+
277
279
  messages_copy = messages.copy()
278
280
 
279
281
  # Handle images if provided
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: npcsh
3
- Version: 0.3.26
3
+ Version: 0.3.27.1
4
4
  Summary: npcsh is a command line tool for integrating LLMs into everyday workflows and for orchestrating teams of NPCs.
5
5
  Home-page: https://github.com/cagostino/npcsh
6
6
  Author: Christopher Agostino
@@ -13,34 +13,18 @@ License-File: LICENSE
13
13
  Requires-Dist: redis
14
14
  Requires-Dist: flask_sse
15
15
  Requires-Dist: anthropic
16
- Requires-Dist: screeninfo
17
- Requires-Dist: sentence_transformers
18
- Requires-Dist: nltk
19
- Requires-Dist: thefuzz
20
16
  Requires-Dist: beautifulsoup4
21
17
  Requires-Dist: google-generativeai
22
18
  Requires-Dist: google-genai
23
19
  Requires-Dist: duckduckgo-search
24
- Requires-Dist: pypdf
25
- Requires-Dist: PyMuPDF
26
- Requires-Dist: opencv-python
27
- Requires-Dist: librosa
28
20
  Requires-Dist: openai
29
21
  Requires-Dist: jinja2
30
- Requires-Dist: pandas
31
- Requires-Dist: matplotlib
32
- Requires-Dist: IPython
33
- Requires-Dist: ollama
34
22
  Requires-Dist: requests
35
23
  Requires-Dist: markdown
36
24
  Requires-Dist: PyYAML
37
25
  Requires-Dist: langchain
38
26
  Requires-Dist: langchain_community
39
- Requires-Dist: pyautogui
40
27
  Requires-Dist: pygments
41
- Requires-Dist: pyttsx3
42
- Requires-Dist: kuzu
43
- Requires-Dist: chromadb
44
28
  Requires-Dist: termcolor
45
29
  Requires-Dist: colorama
46
30
  Requires-Dist: python-dotenv
@@ -48,6 +32,21 @@ Requires-Dist: pytest
48
32
  Requires-Dist: googlesearch-python
49
33
  Requires-Dist: flask
50
34
  Requires-Dist: flask_cors
35
+ Requires-Dist: librosa
36
+ Requires-Dist: pandas
37
+ Requires-Dist: matplotlib
38
+ Requires-Dist: IPython
39
+ Requires-Dist: pyautogui
40
+ Requires-Dist: nltk
41
+ Requires-Dist: thefuzz
42
+ Requires-Dist: pypdf
43
+ Requires-Dist: PyMuPDF
44
+ Requires-Dist: screeninfo
45
+ Requires-Dist: sentence_transformers
46
+ Requires-Dist: opencv-python
47
+ Requires-Dist: ollama
48
+ Requires-Dist: kuzu
49
+ Requires-Dist: chromadb
51
50
  Requires-Dist: diffusers
52
51
  Dynamic: author
53
52
  Dynamic: author-email
@@ -1,34 +1,18 @@
1
1
  redis
2
2
  flask_sse
3
3
  anthropic
4
- screeninfo
5
- sentence_transformers
6
- nltk
7
- thefuzz
8
4
  beautifulsoup4
9
5
  google-generativeai
10
6
  google-genai
11
7
  duckduckgo-search
12
- pypdf
13
- PyMuPDF
14
- opencv-python
15
- librosa
16
8
  openai
17
9
  jinja2
18
- pandas
19
- matplotlib
20
- IPython
21
- ollama
22
10
  requests
23
11
  markdown
24
12
  PyYAML
25
13
  langchain
26
14
  langchain_community
27
- pyautogui
28
15
  pygments
29
- pyttsx3
30
- kuzu
31
- chromadb
32
16
  termcolor
33
17
  colorama
34
18
  python-dotenv
@@ -36,4 +20,19 @@ pytest
36
20
  googlesearch-python
37
21
  flask
38
22
  flask_cors
23
+ librosa
24
+ pandas
25
+ matplotlib
26
+ IPython
27
+ pyautogui
28
+ nltk
29
+ thefuzz
30
+ pypdf
31
+ PyMuPDF
32
+ screeninfo
33
+ sentence_transformers
34
+ opencv-python
35
+ ollama
36
+ kuzu
37
+ chromadb
39
38
  diffusers
@@ -1,7 +1,15 @@
1
- from setuptools import setup, find_packages
1
+ # Force lite installation if environment variable is set
2
2
  import os
3
- import site
4
3
 
4
+ import sys
5
+
6
+ if os.environ.get("NPCSH_LITE_INSTALL", "").lower() == "true":
7
+
8
+ sys.argv.append("--config-settings")
9
+ sys.argv.append("install.lite=true")
10
+
11
+ from setuptools import setup, find_packages
12
+ import site
5
13
  import platform
6
14
  from pathlib import Path
7
15
 
@@ -53,57 +61,78 @@ To configure your API keys and preferences.
53
61
  return "" # Return empty string for non-Windows platforms
54
62
 
55
63
 
64
+ # Define core (lite) requirements
65
+ core_requirements = [
66
+ "redis",
67
+ "flask_sse",
68
+ "anthropic",
69
+ "beautifulsoup4",
70
+ "google-generativeai",
71
+ "google-genai",
72
+ "duckduckgo-search",
73
+ "openai",
74
+ "jinja2",
75
+ "requests",
76
+ "markdown",
77
+ "PyYAML",
78
+ "langchain",
79
+ "langchain_community",
80
+ "pygments",
81
+ "termcolor",
82
+ "colorama",
83
+ "python-dotenv",
84
+ "pytest",
85
+ "googlesearch-python",
86
+ "flask",
87
+ "flask_cors",
88
+ "librosa",
89
+ "pandas",
90
+ "matplotlib",
91
+ "IPython",
92
+ "pyautogui",
93
+ "nltk",
94
+ "thefuzz",
95
+ "pypdf",
96
+ "PyMuPDF",
97
+ "screeninfo",
98
+ ]
99
+
100
+ # Define additional requirements for full installation
101
+ extra_requirements = [
102
+ "sentence_transformers",
103
+ "opencv-python",
104
+ "ollama",
105
+ "kuzu",
106
+ "chromadb",
107
+ "diffusers",
108
+ ]
109
+
110
+ # Define audio requirements
111
+ audio_requirements = [
112
+ "openai-whisper",
113
+ "pyaudio",
114
+ "gtts",
115
+ "playsound==1.2.2",
116
+ "pyttsx3",
117
+ ]
118
+
56
119
  extra_files = package_files("npcsh/npc_team/")
57
120
 
121
+
122
+ def get_requirements():
123
+ # Check if lite installation was requested via sys.argv
124
+ if any("install.lite=true" in arg for arg in sys.argv):
125
+ return core_requirements
126
+ elif os.environ.get("NPCSH_AUDIO_INSTALL", "").lower() == "true":
127
+ return core_requirements + extra_requirements + audio_requirements
128
+ return core_requirements + extra_requirements
129
+
130
+
58
131
  setup(
59
132
  name="npcsh",
60
- version="0.3.26",
133
+ version="0.3.27.1",
61
134
  packages=find_packages(exclude=["tests*"]),
62
- install_requires=[
63
- "redis",
64
- "flask_sse",
65
- "anthropic",
66
- "screeninfo",
67
- "sentence_transformers",
68
- "nltk",
69
- "thefuzz",
70
- "beautifulsoup4",
71
- "google-generativeai",
72
- "google-genai",
73
- "duckduckgo-search",
74
- "pypdf",
75
- "PyMuPDF",
76
- "opencv-python",
77
- "librosa",
78
- "openai",
79
- "jinja2",
80
- "pandas",
81
- "matplotlib",
82
- "IPython",
83
- "ollama",
84
- "requests",
85
- "markdown",
86
- "PyYAML",
87
- "langchain",
88
- "langchain_community",
89
- "pyautogui",
90
- # "openai-whisper",
91
- # "pyaudio",
92
- # "gtts",
93
- # "playsound==1.2.2",
94
- "pygments",
95
- "pyttsx3",
96
- "kuzu",
97
- "chromadb",
98
- "termcolor",
99
- "colorama",
100
- "python-dotenv",
101
- "pytest",
102
- "googlesearch-python",
103
- "flask",
104
- "flask_cors",
105
- "diffusers",
106
- ],
135
+ install_requires=get_requirements(),
107
136
  entry_points={
108
137
  "console_scripts": [
109
138
  "npcsh=npcsh.shell:main",
@@ -124,6 +153,7 @@ setup(
124
153
  data_files=[("npcsh/npc_team", extra_files)],
125
154
  python_requires=">=3.10",
126
155
  )
156
+
127
157
  # Print setup message only on Windows
128
158
  if platform.system() == "Windows":
129
159
  print(get_setup_message())
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes