npcsh 0.3.32__py3-none-any.whl → 1.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (93) hide show
  1. npcsh/_state.py +942 -0
  2. npcsh/alicanto.py +1074 -0
  3. npcsh/guac.py +785 -0
  4. npcsh/mcp_helpers.py +357 -0
  5. npcsh/mcp_npcsh.py +822 -0
  6. npcsh/mcp_server.py +184 -0
  7. npcsh/npc.py +218 -0
  8. npcsh/npcsh.py +1161 -0
  9. npcsh/plonk.py +387 -269
  10. npcsh/pti.py +234 -0
  11. npcsh/routes.py +958 -0
  12. npcsh/spool.py +315 -0
  13. npcsh/wander.py +550 -0
  14. npcsh/yap.py +573 -0
  15. npcsh-1.0.1.dist-info/METADATA +596 -0
  16. npcsh-1.0.1.dist-info/RECORD +21 -0
  17. {npcsh-0.3.32.dist-info → npcsh-1.0.1.dist-info}/WHEEL +1 -1
  18. npcsh-1.0.1.dist-info/entry_points.txt +9 -0
  19. {npcsh-0.3.32.dist-info → npcsh-1.0.1.dist-info}/licenses/LICENSE +1 -1
  20. npcsh/audio.py +0 -569
  21. npcsh/audio_gen.py +0 -1
  22. npcsh/cli.py +0 -543
  23. npcsh/command_history.py +0 -566
  24. npcsh/conversation.py +0 -54
  25. npcsh/data_models.py +0 -46
  26. npcsh/dataframes.py +0 -171
  27. npcsh/embeddings.py +0 -168
  28. npcsh/helpers.py +0 -646
  29. npcsh/image.py +0 -298
  30. npcsh/image_gen.py +0 -79
  31. npcsh/knowledge_graph.py +0 -1006
  32. npcsh/llm_funcs.py +0 -2195
  33. npcsh/load_data.py +0 -83
  34. npcsh/main.py +0 -5
  35. npcsh/model_runner.py +0 -189
  36. npcsh/npc_compiler.py +0 -2879
  37. npcsh/npc_sysenv.py +0 -388
  38. npcsh/npc_team/assembly_lines/test_pipeline.py +0 -181
  39. npcsh/npc_team/corca.npc +0 -13
  40. npcsh/npc_team/foreman.npc +0 -7
  41. npcsh/npc_team/npcsh.ctx +0 -11
  42. npcsh/npc_team/sibiji.npc +0 -4
  43. npcsh/npc_team/templates/analytics/celona.npc +0 -0
  44. npcsh/npc_team/templates/hr_support/raone.npc +0 -0
  45. npcsh/npc_team/templates/humanities/eriane.npc +0 -4
  46. npcsh/npc_team/templates/it_support/lineru.npc +0 -0
  47. npcsh/npc_team/templates/marketing/slean.npc +0 -4
  48. npcsh/npc_team/templates/philosophy/maurawa.npc +0 -0
  49. npcsh/npc_team/templates/sales/turnic.npc +0 -4
  50. npcsh/npc_team/templates/software/welxor.npc +0 -0
  51. npcsh/npc_team/tools/bash_executer.tool +0 -32
  52. npcsh/npc_team/tools/calculator.tool +0 -8
  53. npcsh/npc_team/tools/code_executor.tool +0 -16
  54. npcsh/npc_team/tools/generic_search.tool +0 -27
  55. npcsh/npc_team/tools/image_generation.tool +0 -25
  56. npcsh/npc_team/tools/local_search.tool +0 -149
  57. npcsh/npc_team/tools/npcsh_executor.tool +0 -9
  58. npcsh/npc_team/tools/screen_cap.tool +0 -27
  59. npcsh/npc_team/tools/sql_executor.tool +0 -26
  60. npcsh/response.py +0 -272
  61. npcsh/search.py +0 -252
  62. npcsh/serve.py +0 -1467
  63. npcsh/shell.py +0 -524
  64. npcsh/shell_helpers.py +0 -3919
  65. npcsh/stream.py +0 -233
  66. npcsh/video.py +0 -52
  67. npcsh/video_gen.py +0 -69
  68. npcsh-0.3.32.data/data/npcsh/npc_team/bash_executer.tool +0 -32
  69. npcsh-0.3.32.data/data/npcsh/npc_team/calculator.tool +0 -8
  70. npcsh-0.3.32.data/data/npcsh/npc_team/celona.npc +0 -0
  71. npcsh-0.3.32.data/data/npcsh/npc_team/code_executor.tool +0 -16
  72. npcsh-0.3.32.data/data/npcsh/npc_team/corca.npc +0 -13
  73. npcsh-0.3.32.data/data/npcsh/npc_team/eriane.npc +0 -4
  74. npcsh-0.3.32.data/data/npcsh/npc_team/foreman.npc +0 -7
  75. npcsh-0.3.32.data/data/npcsh/npc_team/generic_search.tool +0 -27
  76. npcsh-0.3.32.data/data/npcsh/npc_team/image_generation.tool +0 -25
  77. npcsh-0.3.32.data/data/npcsh/npc_team/lineru.npc +0 -0
  78. npcsh-0.3.32.data/data/npcsh/npc_team/local_search.tool +0 -149
  79. npcsh-0.3.32.data/data/npcsh/npc_team/maurawa.npc +0 -0
  80. npcsh-0.3.32.data/data/npcsh/npc_team/npcsh.ctx +0 -11
  81. npcsh-0.3.32.data/data/npcsh/npc_team/npcsh_executor.tool +0 -9
  82. npcsh-0.3.32.data/data/npcsh/npc_team/raone.npc +0 -0
  83. npcsh-0.3.32.data/data/npcsh/npc_team/screen_cap.tool +0 -27
  84. npcsh-0.3.32.data/data/npcsh/npc_team/sibiji.npc +0 -4
  85. npcsh-0.3.32.data/data/npcsh/npc_team/slean.npc +0 -4
  86. npcsh-0.3.32.data/data/npcsh/npc_team/sql_executor.tool +0 -26
  87. npcsh-0.3.32.data/data/npcsh/npc_team/test_pipeline.py +0 -181
  88. npcsh-0.3.32.data/data/npcsh/npc_team/turnic.npc +0 -4
  89. npcsh-0.3.32.data/data/npcsh/npc_team/welxor.npc +0 -0
  90. npcsh-0.3.32.dist-info/METADATA +0 -779
  91. npcsh-0.3.32.dist-info/RECORD +0 -78
  92. npcsh-0.3.32.dist-info/entry_points.txt +0 -3
  93. {npcsh-0.3.32.dist-info → npcsh-1.0.1.dist-info}/top_level.txt +0 -0
npcsh/dataframes.py DELETED
@@ -1,171 +0,0 @@
1
- ## functions for dataframes
2
- import os
3
- import sqlite3
4
- import json
5
- import pandas as pd
6
- import numpy as np
7
- import io
8
- from PIL import Image
9
- from typing import Optional
10
-
11
- from npcsh.llm_funcs import get_llm_response
12
-
13
- # from npcsh.audio import process_audio
14
- # from npcsh.video import process_video
15
-
16
- from npcsh.load_data import (
17
- load_pdf,
18
- load_csv,
19
- load_json,
20
- load_excel,
21
- load_txt,
22
- load_image,
23
- )
24
-
25
-
26
- def load_data_into_table(
27
- file_path: str, table_name: str, cursor: sqlite3.Cursor, conn: sqlite3.Connection
28
- ) -> None:
29
- """
30
- Function Description:
31
- This function is used to load data into a table.
32
- Args:
33
- file_path : str : The file path.
34
- table_name : str : The table name.
35
- cursor : sqlite3.Cursor : The SQLite cursor.
36
- conn : sqlite3.Connection : The SQLite connection.
37
- Keyword Args:
38
- None
39
- Returns:
40
- None
41
- """
42
- try:
43
- if not os.path.exists(file_path):
44
- raise FileNotFoundError(f"File not found: {file_path}")
45
-
46
- # Determine file type and load data
47
- if file_path.endswith(".csv"):
48
- df = pd.read_csv(file_path)
49
- elif file_path.endswith(".pdf"):
50
- df = load_pdf(file_path)
51
- elif file_path.endswith((".txt", ".log", ".md")):
52
- df = load_txt(file_path)
53
- elif file_path.endswith((".xls", ".xlsx")):
54
- df = load_excel(file_path)
55
- elif file_path.lower().endswith(
56
- (".png", ".jpg", ".jpeg", ".gif", ".bmp", ".tiff")
57
- ):
58
- # Handle images as NumPy arrays
59
- df = load_image(file_path)
60
- elif file_path.lower().endswith(
61
- (".mp4", ".avi", ".mov", ".mkv")
62
- ): # Video files
63
- video_frames, audio_array = process_video(file_path)
64
- # Store video frames and audio
65
- df = pd.DataFrame(
66
- {
67
- "video_frames": [video_frames.tobytes()],
68
- "shape": [video_frames.shape],
69
- "dtype": [video_frames.dtype.str],
70
- "audio_array": (
71
- [audio_array.tobytes()] if audio_array is not None else None
72
- ),
73
- "audio_rate": [sr] if audio_array is not None else None,
74
- }
75
- )
76
-
77
- elif file_path.lower().endswith((".mp3", ".wav", ".ogg")): # Audio files
78
- audio_array, sr = process_audio(file_path)
79
- df = pd.DataFrame(
80
- {
81
- "audio_array": [audio_array.tobytes()],
82
- "audio_rate": [sr],
83
- }
84
- )
85
- else:
86
- # Attempt to load as text if no other type matches
87
- try:
88
- df = load_txt(file_path)
89
- except Exception as e:
90
- print(f"Could not load file: {e}")
91
- return
92
-
93
- # Store DataFrame in the database
94
- df.to_sql(table_name, conn, if_exists="replace", index=False)
95
- print(f"Data from '{file_path}' loaded into table '{table_name}'")
96
-
97
- except Exception as e:
98
- raise e # Re-raise the exception for handling in enter_observation_mode
99
-
100
-
101
- def create_new_table(cursor: sqlite3.Cursor, conn: sqlite3.Connection) -> None:
102
- """
103
- Function Description:
104
- This function is used to create a new table.
105
- Args:
106
- cursor : sqlite3.Cursor : The SQLite cursor.
107
- conn : sqlite3.Connection : The SQLite connection.
108
- Keyword Args:
109
- None
110
- Returns:
111
- None
112
- """
113
-
114
- table_name = input("Enter new table name: ").strip()
115
- columns = input("Enter column names separated by commas: ").strip()
116
-
117
- create_query = (
118
- f"CREATE TABLE {table_name} (id INTEGER PRIMARY KEY AUTOINCREMENT, {columns})"
119
- )
120
- cursor.execute(create_query)
121
- conn.commit()
122
- print(f"Table '{table_name}' created successfully.")
123
-
124
-
125
- def delete_table(cursor: sqlite3.Cursor, conn: sqlite3.Connection) -> None:
126
- """
127
- Function Description:
128
- This function is used to delete a table.
129
- Args:
130
- cursor : sqlite3.Cursor : The SQLite cursor.
131
- conn : sqlite3.Connection : The SQLite connection.
132
- Keyword Args:
133
- None
134
- Returns:
135
- None
136
- """
137
-
138
- table_name = input("Enter table name to delete: ").strip()
139
- cursor.execute(f"DROP TABLE IF EXISTS {table_name}")
140
- conn.commit()
141
- print(f"Table '{table_name}' deleted successfully.")
142
-
143
-
144
- def add_observation(
145
- cursor: sqlite3.Cursor, conn: sqlite3.Connection, table_name: str
146
- ) -> None:
147
- """
148
- Function Description:
149
- This function is used to add an observation.
150
- Args:
151
- cursor : sqlite3.Cursor : The SQLite cursor.
152
- conn : sqlite3.Connection : The SQLite connection.
153
- table_name : str : The table name.
154
- Keyword Args:
155
- None
156
- Returns:
157
- None
158
- """
159
-
160
- cursor.execute(f"PRAGMA table_info({table_name})")
161
- columns = [column[1] for column in cursor.fetchall() if column[1] != "id"]
162
-
163
- values = []
164
- for column in columns:
165
- value = input(f"Enter value for {column}: ").strip()
166
- values.append(value)
167
-
168
- insert_query = f"INSERT INTO {table_name} ({','.join(columns)}) VALUES ({','.join(['?' for _ in columns])})"
169
- cursor.execute(insert_query, values)
170
- conn.commit()
171
- print("Observation added successfully.")
npcsh/embeddings.py DELETED
@@ -1,168 +0,0 @@
1
- #######
2
- #######
3
- #######
4
- #######
5
- ####### EMBEDDINGS
6
- #######
7
- from typing import List, Dict, Optional
8
- import numpy as np
9
- from npcsh.npc_sysenv import (
10
- NPCSH_VECTOR_DB_PATH,
11
- NPCSH_EMBEDDING_MODEL,
12
- NPCSH_EMBEDDING_PROVIDER,
13
- chroma_client,
14
- )
15
- from openai import OpenAI
16
- import anthropic
17
-
18
-
19
- def get_ollama_embeddings(
20
- texts: List[str], model: str = "nomic-embed-text"
21
- ) -> List[List[float]]:
22
- """Generate embeddings using Ollama."""
23
- import ollama
24
-
25
- embeddings = []
26
- for text in texts:
27
- response = ollama.embeddings(model=model, prompt=text)
28
- embeddings.append(response["embedding"])
29
- return embeddings
30
-
31
-
32
- def get_openai_embeddings(
33
- texts: List[str], model: str = "text-embedding-3-small"
34
- ) -> List[List[float]]:
35
- """Generate embeddings using OpenAI."""
36
- client = OpenAI(api_key=openai_api_key)
37
- response = client.embeddings.create(input=texts, model=model)
38
- return [embedding.embedding for embedding in response.data]
39
-
40
-
41
- def get_openai_like_embeddings(
42
- texts: List[str], model, api_url=None, api_key=None
43
- ) -> List[List[float]]:
44
- """Generate embeddings using OpenAI."""
45
- client = OpenAI(api_key=openai_api_key, base_url=api_url)
46
- response = client.embeddings.create(input=texts, model=model)
47
- return [embedding.embedding for embedding in response.data]
48
-
49
-
50
- def get_anthropic_embeddings(
51
- texts: List[str], model: str = "claude-3-haiku-20240307"
52
- ) -> List[List[float]]:
53
- """Generate embeddings using Anthropic."""
54
- client = anthropic.Anthropic(api_key=anthropic_api_key)
55
- embeddings = []
56
- for text in texts:
57
- # response = client.messages.create(
58
- # model=model, max_tokens=1024, messages=[{"role": "user", "content": text}]
59
- # )
60
- # Placeholder for actual embedding
61
- embeddings.append([0.0] * 1024) # Replace with actual embedding when available
62
- return embeddings
63
-
64
-
65
- def store_embeddings_for_model(
66
- texts,
67
- embeddings,
68
- metadata=None,
69
- model: str = NPCSH_EMBEDDING_MODEL,
70
- provider: str = NPCSH_EMBEDDING_PROVIDER,
71
- ):
72
- collection_name = f"{provider}_{model}_embeddings"
73
- collection = chroma_client.get_collection(collection_name)
74
-
75
- # Create meaningful metadata for each document (adjust as necessary)
76
- if metadata is None:
77
- metadata = [{"text_length": len(text)} for text in texts] # Example metadata
78
- print(
79
- "metadata is none, creating metadata for each document as the length of the text"
80
- )
81
- # Add embeddings to the collection with metadata
82
- collection.add(
83
- ids=[str(i) for i in range(len(texts))],
84
- embeddings=embeddings,
85
- metadatas=metadata, # Passing populated metadata
86
- documents=texts,
87
- )
88
-
89
-
90
- def delete_embeddings_from_collection(collection, ids):
91
- """Delete embeddings by id from Chroma collection."""
92
- if ids:
93
- collection.delete(ids=ids) # Only delete if ids are provided
94
-
95
-
96
- def search_similar_texts(
97
- query: str,
98
- docs_to_embed: Optional[List[str]] = None,
99
- top_k: int = 5,
100
- db_path: str = NPCSH_VECTOR_DB_PATH,
101
- embedding_model: str = NPCSH_EMBEDDING_MODEL,
102
- embedding_provider: str = NPCSH_EMBEDDING_PROVIDER,
103
- ) -> List[Dict[str, any]]:
104
- """
105
- Search for similar texts using either a Chroma database or direct embedding comparison.
106
- """
107
-
108
- print(f"\nQuery to embed: {query}")
109
- embedded_search_term = get_ollama_embeddings([query], embedding_model)[0]
110
- # print(f"Query embedding: {embedded_search_term}")
111
-
112
- if docs_to_embed is None:
113
- # Fetch from the database if no documents to embed are provided
114
- collection_name = f"{embedding_provider}_{embedding_model}_embeddings"
115
- collection = chroma_client.get_collection(collection_name)
116
- results = collection.query(
117
- query_embeddings=[embedded_search_term], n_results=top_k
118
- )
119
- # Constructing and returning results
120
- return [
121
- {"id": id, "score": float(distance), "text": document}
122
- for id, distance, document in zip(
123
- results["ids"][0], results["distances"][0], results["documents"][0]
124
- )
125
- ]
126
-
127
- print(f"\nNumber of documents to embed: {len(docs_to_embed)}")
128
-
129
- # Get embeddings for provided documents
130
- raw_embeddings = get_ollama_embeddings(docs_to_embed, embedding_model)
131
-
132
- output_embeddings = []
133
- for idx, emb in enumerate(raw_embeddings):
134
- if emb: # Exclude any empty embeddings
135
- output_embeddings.append(emb)
136
-
137
- # Convert to numpy arrays for calculations
138
- doc_embeddings = np.array(output_embeddings)
139
- query_embedding = np.array(embedded_search_term)
140
-
141
- # Check for zero-length embeddings
142
- if len(doc_embeddings) == 0:
143
- raise ValueError("No valid document embeddings found")
144
-
145
- # Normalize embeddings to avoid division by zeros
146
- doc_norms = np.linalg.norm(doc_embeddings, axis=1, keepdims=True)
147
- query_norm = np.linalg.norm(query_embedding)
148
-
149
- # Ensure no zero vectors are being used in cosine similarity
150
- if query_norm == 0:
151
- raise ValueError("Query embedding is zero-length")
152
-
153
- # Calculate cosine similarities
154
- cosine_similarities = np.dot(doc_embeddings, query_embedding) / (
155
- doc_norms.flatten() * query_norm
156
- )
157
-
158
- # Get indices of top K documents
159
- top_indices = np.argsort(cosine_similarities)[::-1][:top_k]
160
-
161
- return [
162
- {
163
- "id": str(idx),
164
- "score": float(cosine_similarities[idx]),
165
- "text": docs_to_embed[idx],
166
- }
167
- for idx in top_indices
168
- ]