npcsh 0.3.32__py3-none-any.whl → 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (93) hide show
  1. npcsh/_state.py +942 -0
  2. npcsh/alicanto.py +1074 -0
  3. npcsh/guac.py +785 -0
  4. npcsh/mcp_helpers.py +357 -0
  5. npcsh/mcp_npcsh.py +822 -0
  6. npcsh/mcp_server.py +184 -0
  7. npcsh/npc.py +218 -0
  8. npcsh/npcsh.py +1161 -0
  9. npcsh/plonk.py +387 -269
  10. npcsh/pti.py +234 -0
  11. npcsh/routes.py +958 -0
  12. npcsh/spool.py +315 -0
  13. npcsh/wander.py +550 -0
  14. npcsh/yap.py +573 -0
  15. npcsh-1.0.0.dist-info/METADATA +596 -0
  16. npcsh-1.0.0.dist-info/RECORD +21 -0
  17. {npcsh-0.3.32.dist-info → npcsh-1.0.0.dist-info}/WHEEL +1 -1
  18. npcsh-1.0.0.dist-info/entry_points.txt +9 -0
  19. {npcsh-0.3.32.dist-info → npcsh-1.0.0.dist-info}/licenses/LICENSE +1 -1
  20. npcsh/audio.py +0 -569
  21. npcsh/audio_gen.py +0 -1
  22. npcsh/cli.py +0 -543
  23. npcsh/command_history.py +0 -566
  24. npcsh/conversation.py +0 -54
  25. npcsh/data_models.py +0 -46
  26. npcsh/dataframes.py +0 -171
  27. npcsh/embeddings.py +0 -168
  28. npcsh/helpers.py +0 -646
  29. npcsh/image.py +0 -298
  30. npcsh/image_gen.py +0 -79
  31. npcsh/knowledge_graph.py +0 -1006
  32. npcsh/llm_funcs.py +0 -2195
  33. npcsh/load_data.py +0 -83
  34. npcsh/main.py +0 -5
  35. npcsh/model_runner.py +0 -189
  36. npcsh/npc_compiler.py +0 -2879
  37. npcsh/npc_sysenv.py +0 -388
  38. npcsh/npc_team/assembly_lines/test_pipeline.py +0 -181
  39. npcsh/npc_team/corca.npc +0 -13
  40. npcsh/npc_team/foreman.npc +0 -7
  41. npcsh/npc_team/npcsh.ctx +0 -11
  42. npcsh/npc_team/sibiji.npc +0 -4
  43. npcsh/npc_team/templates/analytics/celona.npc +0 -0
  44. npcsh/npc_team/templates/hr_support/raone.npc +0 -0
  45. npcsh/npc_team/templates/humanities/eriane.npc +0 -4
  46. npcsh/npc_team/templates/it_support/lineru.npc +0 -0
  47. npcsh/npc_team/templates/marketing/slean.npc +0 -4
  48. npcsh/npc_team/templates/philosophy/maurawa.npc +0 -0
  49. npcsh/npc_team/templates/sales/turnic.npc +0 -4
  50. npcsh/npc_team/templates/software/welxor.npc +0 -0
  51. npcsh/npc_team/tools/bash_executer.tool +0 -32
  52. npcsh/npc_team/tools/calculator.tool +0 -8
  53. npcsh/npc_team/tools/code_executor.tool +0 -16
  54. npcsh/npc_team/tools/generic_search.tool +0 -27
  55. npcsh/npc_team/tools/image_generation.tool +0 -25
  56. npcsh/npc_team/tools/local_search.tool +0 -149
  57. npcsh/npc_team/tools/npcsh_executor.tool +0 -9
  58. npcsh/npc_team/tools/screen_cap.tool +0 -27
  59. npcsh/npc_team/tools/sql_executor.tool +0 -26
  60. npcsh/response.py +0 -272
  61. npcsh/search.py +0 -252
  62. npcsh/serve.py +0 -1467
  63. npcsh/shell.py +0 -524
  64. npcsh/shell_helpers.py +0 -3919
  65. npcsh/stream.py +0 -233
  66. npcsh/video.py +0 -52
  67. npcsh/video_gen.py +0 -69
  68. npcsh-0.3.32.data/data/npcsh/npc_team/bash_executer.tool +0 -32
  69. npcsh-0.3.32.data/data/npcsh/npc_team/calculator.tool +0 -8
  70. npcsh-0.3.32.data/data/npcsh/npc_team/celona.npc +0 -0
  71. npcsh-0.3.32.data/data/npcsh/npc_team/code_executor.tool +0 -16
  72. npcsh-0.3.32.data/data/npcsh/npc_team/corca.npc +0 -13
  73. npcsh-0.3.32.data/data/npcsh/npc_team/eriane.npc +0 -4
  74. npcsh-0.3.32.data/data/npcsh/npc_team/foreman.npc +0 -7
  75. npcsh-0.3.32.data/data/npcsh/npc_team/generic_search.tool +0 -27
  76. npcsh-0.3.32.data/data/npcsh/npc_team/image_generation.tool +0 -25
  77. npcsh-0.3.32.data/data/npcsh/npc_team/lineru.npc +0 -0
  78. npcsh-0.3.32.data/data/npcsh/npc_team/local_search.tool +0 -149
  79. npcsh-0.3.32.data/data/npcsh/npc_team/maurawa.npc +0 -0
  80. npcsh-0.3.32.data/data/npcsh/npc_team/npcsh.ctx +0 -11
  81. npcsh-0.3.32.data/data/npcsh/npc_team/npcsh_executor.tool +0 -9
  82. npcsh-0.3.32.data/data/npcsh/npc_team/raone.npc +0 -0
  83. npcsh-0.3.32.data/data/npcsh/npc_team/screen_cap.tool +0 -27
  84. npcsh-0.3.32.data/data/npcsh/npc_team/sibiji.npc +0 -4
  85. npcsh-0.3.32.data/data/npcsh/npc_team/slean.npc +0 -4
  86. npcsh-0.3.32.data/data/npcsh/npc_team/sql_executor.tool +0 -26
  87. npcsh-0.3.32.data/data/npcsh/npc_team/test_pipeline.py +0 -181
  88. npcsh-0.3.32.data/data/npcsh/npc_team/turnic.npc +0 -4
  89. npcsh-0.3.32.data/data/npcsh/npc_team/welxor.npc +0 -0
  90. npcsh-0.3.32.dist-info/METADATA +0 -779
  91. npcsh-0.3.32.dist-info/RECORD +0 -78
  92. npcsh-0.3.32.dist-info/entry_points.txt +0 -3
  93. {npcsh-0.3.32.dist-info → npcsh-1.0.0.dist-info}/top_level.txt +0 -0
npcsh/search.py DELETED
@@ -1,252 +0,0 @@
1
- # search.py
2
-
3
- import requests
4
- import os
5
-
6
- from bs4 import BeautifulSoup
7
- from duckduckgo_search import DDGS
8
- from duckduckgo_search.exceptions import DuckDuckGoSearchException
9
-
10
- try:
11
- from googlesearch import search
12
- except:
13
- pass
14
- from typing import List, Dict, Any, Optional, Union
15
- import numpy as np
16
- import json
17
-
18
- try:
19
- from sentence_transformers import util, SentenceTransformer
20
- except:
21
- pass
22
-
23
-
24
- def search_perplexity(
25
- query: str,
26
- api_key: str = None,
27
- model: str = "sonar",
28
- max_tokens: int = 400,
29
- temperature: float = 0.2,
30
- top_p: float = 0.9,
31
- ):
32
- if api_key is None:
33
- api_key = os.environ["PERPLEXITY_API_KEY"]
34
- # print("api_key", api_key)
35
- url = "https://api.perplexity.ai/chat/completions"
36
- payload = {
37
- "model": "sonar",
38
- "messages": [
39
- {"role": "system", "content": "Be precise and concise."},
40
- {"role": "user", "content": query},
41
- ],
42
- "max_tokens": max_tokens,
43
- "temperature": temperature,
44
- "top_p": top_p,
45
- "return_images": False,
46
- "return_related_questions": False,
47
- "search_recency_filter": "month",
48
- "top_k": 0,
49
- "stream": False,
50
- "presence_penalty": 0,
51
- "frequency_penalty": 1,
52
- "response_format": None,
53
- }
54
-
55
- # Headers for the request, including the Authorization bearer token
56
- headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
57
-
58
- # Make the POST request to the API
59
- response = requests.post(url, json=payload, headers=headers)
60
- response = json.loads(response.text)
61
- print(response)
62
- return [response["choices"][0]["message"]["content"], response["citations"]]
63
-
64
-
65
- def search_web(
66
- query: str,
67
- num_results: int = 5,
68
- provider: str = "duckduckgo",
69
- api_key=None,
70
- **kwargs,
71
- ) -> List[Dict[str, str]]:
72
- """
73
- Function Description:
74
- This function searches the web for information based on a query.
75
- Args:
76
- query: The search query.
77
- Keyword Args:
78
- num_results: The number of search results to retrieve.
79
- provider: The search engine provider to use ('google' or 'duckduckgo').
80
- Returns:
81
- A list of dictionaries with 'title', 'link', and 'content' keys.
82
- """
83
- results = []
84
-
85
- # try:
86
- if provider == "perplexity":
87
- search_result = search_perplexity(query, api_key=api_key, **kwargs)
88
- # print(search_result, type(search_result))
89
- return search_result
90
-
91
- if provider == "duckduckgo":
92
- headers = {
93
- "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:124.0) Gecko/20100101 Firefox/124.0"
94
- }
95
- ddgs = DDGS(headers=headers)
96
-
97
- try:
98
- search_results = ddgs.text(query, max_results=num_results)
99
- print(search_results, type(search_results))
100
- urls = [r["href"] for r in search_results]
101
- results = [
102
- {"title": r["title"], "link": r["href"], "content": r["body"]}
103
- for r in search_results
104
- ]
105
- except DuckDuckGoSearchException as e:
106
- print("DuckDuckGo search failed: ", e)
107
- urls = []
108
- results = []
109
-
110
- else: # google
111
- urls = list(search(query, num_results=num_results))
112
- # google shit doesnt seem to be working anymore, apparently a lbock they made on browsers without js?
113
- print("urls", urls)
114
- print(provider)
115
- for url in urls:
116
- try:
117
- # Fetch the webpage content
118
- headers = {
119
- "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
120
- }
121
- response = requests.get(url, headers=headers, timeout=5)
122
- response.raise_for_status()
123
-
124
- # Parse with BeautifulSoup
125
- soup = BeautifulSoup(response.text, "html.parser")
126
-
127
- # Get title and content
128
- title = soup.title.string if soup.title else url
129
-
130
- # Extract text content and clean it up
131
- content = " ".join([p.get_text() for p in soup.find_all("p")])
132
- content = " ".join(content.split()) # Clean up whitespace
133
-
134
- results.append(
135
- {
136
- "title": title,
137
- "link": url,
138
- "content": (
139
- content[:500] + "..." if len(content) > 500 else content
140
- ),
141
- }
142
- )
143
-
144
- except Exception as e:
145
- print(f"Error fetching {url}: {str(e)}")
146
- continue
147
-
148
- # except Exception as e:
149
- # print(f"Search error: {str(e)}")
150
- content_str = "\n".join(
151
- [r["content"] + "\n Citation: " + r["link"] + "\n\n\n" for r in results]
152
- )
153
- link_str = "\n".join([r["link"] + "\n" for r in results])
154
- return [content_str, link_str]
155
-
156
-
157
- def rag_search(
158
- query: str,
159
- text_data: Union[Dict[str, str], str],
160
- embedding_model: Any = None,
161
- text_data_embedded: Optional[Dict[str, np.ndarray]] = None,
162
- similarity_threshold: float = 0.3,
163
- device="cpu",
164
- ) -> List[str]:
165
- """
166
- Function Description:
167
- This function retrieves lines from documents that are relevant to the query.
168
- Args:
169
- query: The query string.
170
- text_data: A dictionary with file paths as keys and file contents as values.
171
- embedding_model: The sentence embedding model.
172
- Keyword Args:
173
- text_data_embedded: A dictionary with file paths as keys and embedded file contents as values.
174
- similarity_threshold: The similarity threshold for considering a line relevant.
175
- Returns:
176
- A list of relevant snippets.
177
-
178
- """
179
- if embedding_model is None:
180
- try:
181
- embedding_model = SentenceTransformer("all-MiniLM-L6-v2")
182
- except:
183
- raise Exception(
184
- "Please install the sentence-transformers library to use this function or provide an embedding transformer model."
185
- )
186
- results = []
187
-
188
- # Compute the embedding of the query
189
- query_embedding = embedding_model.encode(
190
- query, convert_to_tensor=True, show_progress_bar=False
191
- )
192
- if isinstance(text_data, str):
193
- # split at the sentence level
194
- lines = text_data.split(".")
195
- if not lines:
196
- return results
197
- # Compute embeddings for each line
198
- if text_data_embedded is None:
199
- line_embeddings = embedding_model.encode(lines, convert_to_tensor=True)
200
- else:
201
- line_embeddings = text_data_embedded
202
- # Compute cosine similarities
203
- cosine_scores = util.cos_sim(query_embedding, line_embeddings)[0].cpu().numpy()
204
-
205
- # Find indices of lines above the similarity threshold
206
- relevant_line_indices = np.where(cosine_scores >= similarity_threshold)[0]
207
- # print("relevant_line_indices", cosine_scores)
208
- # print(np.mean(cosine_scores))
209
- # print(np.max(cosine_scores))
210
-
211
- for idx in relevant_line_indices:
212
- idx = int(idx)
213
- # Get context lines (±10 lines)
214
- start_idx = max(0, idx - 10)
215
- end_idx = min(len(lines), idx + 11) # +11 because end index is exclusive
216
- snippet = ". ".join(lines[start_idx:end_idx])
217
- results.append(snippet)
218
-
219
- elif isinstance(text_data, dict):
220
- for filename, content in text_data.items():
221
- # Split content into lines
222
- lines = content.split("\n")
223
- if not lines:
224
- continue
225
- # Compute embeddings for each line
226
- if text_data_embedded is None:
227
- line_embeddings = embedding_model.encode(lines, convert_to_tensor=True)
228
- else:
229
- line_embeddings = text_data_embedded[filename]
230
- # Compute cosine similarities
231
- cosine_scores = (
232
- util.cos_sim(query_embedding, line_embeddings)[0].cpu().numpy()
233
- )
234
-
235
- # Find indices of lines above the similarity threshold
236
- ##print("most similar", np.max(cosine_scores))
237
- ##print("most similar doc", lines[np.argmax(cosine_scores)])
238
- relevant_line_indices = np.where(cosine_scores >= similarity_threshold)[0]
239
- # print("relevant_line_indices", cosine_scores)
240
- # print(np.mean(cosine_scores))
241
- # print(np.max(cosine_scores))
242
- for idx in relevant_line_indices:
243
- idx = int(idx) # Ensure idx is an integer
244
- # Get context lines (±10 lines)
245
- start_idx = max(0, idx - 10)
246
- end_idx = min(
247
- len(lines), idx + 11
248
- ) # +11 because end index is exclusive
249
- snippet = "\n".join(lines[start_idx:end_idx])
250
- results.append((filename, snippet))
251
- # print("results", results)
252
- return results