npcsh 0.3.31__py3-none-any.whl → 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (91) hide show
  1. npcsh/_state.py +942 -0
  2. npcsh/alicanto.py +1074 -0
  3. npcsh/guac.py +785 -0
  4. npcsh/mcp_helpers.py +357 -0
  5. npcsh/mcp_npcsh.py +822 -0
  6. npcsh/mcp_server.py +184 -0
  7. npcsh/npc.py +218 -0
  8. npcsh/npcsh.py +1161 -0
  9. npcsh/plonk.py +387 -269
  10. npcsh/pti.py +234 -0
  11. npcsh/routes.py +958 -0
  12. npcsh/spool.py +315 -0
  13. npcsh/wander.py +550 -0
  14. npcsh/yap.py +573 -0
  15. npcsh-1.0.0.dist-info/METADATA +596 -0
  16. npcsh-1.0.0.dist-info/RECORD +21 -0
  17. {npcsh-0.3.31.dist-info → npcsh-1.0.0.dist-info}/WHEEL +1 -1
  18. npcsh-1.0.0.dist-info/entry_points.txt +9 -0
  19. {npcsh-0.3.31.dist-info → npcsh-1.0.0.dist-info}/licenses/LICENSE +1 -1
  20. npcsh/audio.py +0 -210
  21. npcsh/cli.py +0 -545
  22. npcsh/command_history.py +0 -566
  23. npcsh/conversation.py +0 -291
  24. npcsh/data_models.py +0 -46
  25. npcsh/dataframes.py +0 -163
  26. npcsh/embeddings.py +0 -168
  27. npcsh/helpers.py +0 -641
  28. npcsh/image.py +0 -298
  29. npcsh/image_gen.py +0 -79
  30. npcsh/knowledge_graph.py +0 -1006
  31. npcsh/llm_funcs.py +0 -2027
  32. npcsh/load_data.py +0 -83
  33. npcsh/main.py +0 -5
  34. npcsh/model_runner.py +0 -189
  35. npcsh/npc_compiler.py +0 -2870
  36. npcsh/npc_sysenv.py +0 -383
  37. npcsh/npc_team/assembly_lines/test_pipeline.py +0 -181
  38. npcsh/npc_team/corca.npc +0 -13
  39. npcsh/npc_team/foreman.npc +0 -7
  40. npcsh/npc_team/npcsh.ctx +0 -11
  41. npcsh/npc_team/sibiji.npc +0 -4
  42. npcsh/npc_team/templates/analytics/celona.npc +0 -0
  43. npcsh/npc_team/templates/hr_support/raone.npc +0 -0
  44. npcsh/npc_team/templates/humanities/eriane.npc +0 -4
  45. npcsh/npc_team/templates/it_support/lineru.npc +0 -0
  46. npcsh/npc_team/templates/marketing/slean.npc +0 -4
  47. npcsh/npc_team/templates/philosophy/maurawa.npc +0 -0
  48. npcsh/npc_team/templates/sales/turnic.npc +0 -4
  49. npcsh/npc_team/templates/software/welxor.npc +0 -0
  50. npcsh/npc_team/tools/bash_executer.tool +0 -32
  51. npcsh/npc_team/tools/calculator.tool +0 -8
  52. npcsh/npc_team/tools/code_executor.tool +0 -16
  53. npcsh/npc_team/tools/generic_search.tool +0 -27
  54. npcsh/npc_team/tools/image_generation.tool +0 -25
  55. npcsh/npc_team/tools/local_search.tool +0 -149
  56. npcsh/npc_team/tools/npcsh_executor.tool +0 -9
  57. npcsh/npc_team/tools/screen_cap.tool +0 -27
  58. npcsh/npc_team/tools/sql_executor.tool +0 -26
  59. npcsh/response.py +0 -623
  60. npcsh/search.py +0 -248
  61. npcsh/serve.py +0 -1460
  62. npcsh/shell.py +0 -538
  63. npcsh/shell_helpers.py +0 -3529
  64. npcsh/stream.py +0 -700
  65. npcsh/video.py +0 -49
  66. npcsh-0.3.31.data/data/npcsh/npc_team/bash_executer.tool +0 -32
  67. npcsh-0.3.31.data/data/npcsh/npc_team/calculator.tool +0 -8
  68. npcsh-0.3.31.data/data/npcsh/npc_team/celona.npc +0 -0
  69. npcsh-0.3.31.data/data/npcsh/npc_team/code_executor.tool +0 -16
  70. npcsh-0.3.31.data/data/npcsh/npc_team/corca.npc +0 -13
  71. npcsh-0.3.31.data/data/npcsh/npc_team/eriane.npc +0 -4
  72. npcsh-0.3.31.data/data/npcsh/npc_team/foreman.npc +0 -7
  73. npcsh-0.3.31.data/data/npcsh/npc_team/generic_search.tool +0 -27
  74. npcsh-0.3.31.data/data/npcsh/npc_team/image_generation.tool +0 -25
  75. npcsh-0.3.31.data/data/npcsh/npc_team/lineru.npc +0 -0
  76. npcsh-0.3.31.data/data/npcsh/npc_team/local_search.tool +0 -149
  77. npcsh-0.3.31.data/data/npcsh/npc_team/maurawa.npc +0 -0
  78. npcsh-0.3.31.data/data/npcsh/npc_team/npcsh.ctx +0 -11
  79. npcsh-0.3.31.data/data/npcsh/npc_team/npcsh_executor.tool +0 -9
  80. npcsh-0.3.31.data/data/npcsh/npc_team/raone.npc +0 -0
  81. npcsh-0.3.31.data/data/npcsh/npc_team/screen_cap.tool +0 -27
  82. npcsh-0.3.31.data/data/npcsh/npc_team/sibiji.npc +0 -4
  83. npcsh-0.3.31.data/data/npcsh/npc_team/slean.npc +0 -4
  84. npcsh-0.3.31.data/data/npcsh/npc_team/sql_executor.tool +0 -26
  85. npcsh-0.3.31.data/data/npcsh/npc_team/test_pipeline.py +0 -181
  86. npcsh-0.3.31.data/data/npcsh/npc_team/turnic.npc +0 -4
  87. npcsh-0.3.31.data/data/npcsh/npc_team/welxor.npc +0 -0
  88. npcsh-0.3.31.dist-info/METADATA +0 -1853
  89. npcsh-0.3.31.dist-info/RECORD +0 -76
  90. npcsh-0.3.31.dist-info/entry_points.txt +0 -3
  91. {npcsh-0.3.31.dist-info → npcsh-1.0.0.dist-info}/top_level.txt +0 -0
npcsh/search.py DELETED
@@ -1,248 +0,0 @@
1
- # search.py
2
-
3
- import requests
4
- import os
5
-
6
- from bs4 import BeautifulSoup
7
- from duckduckgo_search import DDGS
8
- from duckduckgo_search.exceptions import DuckDuckGoSearchException
9
-
10
- try:
11
- from googlesearch import search
12
- except:
13
- pass
14
- from typing import List, Dict, Any, Optional, Union
15
- import numpy as np
16
- import json
17
-
18
- try:
19
- from sentence_transformers import util, SentenceTransformer
20
- except:
21
- pass
22
-
23
-
24
- def search_perplexity(
25
- query: str,
26
- api_key: str = None,
27
- model: str = "sonar",
28
- max_tokens: int = 400,
29
- temperature: float = 0.2,
30
- top_p: float = 0.9,
31
- ):
32
- if api_key is None:
33
- api_key = os.environ["PERPLEXITY_API_KEY"]
34
- # print("api_key", api_key)
35
- url = "https://api.perplexity.ai/chat/completions"
36
- payload = {
37
- "model": "sonar",
38
- "messages": [
39
- {"role": "system", "content": "Be precise and concise."},
40
- {"role": "user", "content": query},
41
- ],
42
- "max_tokens": max_tokens,
43
- "temperature": temperature,
44
- "top_p": top_p,
45
- "return_images": False,
46
- "return_related_questions": False,
47
- "search_recency_filter": "month",
48
- "top_k": 0,
49
- "stream": False,
50
- "presence_penalty": 0,
51
- "frequency_penalty": 1,
52
- "response_format": None,
53
- }
54
-
55
- # Headers for the request, including the Authorization bearer token
56
- headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
57
-
58
- # Make the POST request to the API
59
- response = requests.post(url, json=payload, headers=headers)
60
- response = json.loads(response.text)
61
- print(response)
62
- return [response["choices"][0]["message"]["content"], response["citations"]]
63
-
64
-
65
- def search_web(
66
- query: str,
67
- num_results: int = 5,
68
- provider: str = "duckduckgo",
69
- api_key=None,
70
- **kwargs,
71
- ) -> List[Dict[str, str]]:
72
- """
73
- Function Description:
74
- This function searches the web for information based on a query.
75
- Args:
76
- query: The search query.
77
- Keyword Args:
78
- num_results: The number of search results to retrieve.
79
- provider: The search engine provider to use ('google' or 'duckduckgo').
80
- Returns:
81
- A list of dictionaries with 'title', 'link', and 'content' keys.
82
- """
83
- results = []
84
-
85
- # try:
86
- if provider == "perplexity":
87
- search_result = search_perplexity(query, api_key=api_key, **kwargs)
88
- # print(search_result, type(search_result))
89
- return search_result
90
-
91
- if provider == "duckduckgo":
92
- ddgs = DDGS()
93
- try:
94
- search_results = ddgs.text(query, max_results=num_results)
95
- print(search_results, type(search_results))
96
- urls = [r["href"] for r in search_results]
97
- results = [
98
- {"title": r["title"], "link": r["href"], "content": r["body"]}
99
- for r in search_results
100
- ]
101
- except DuckDuckGoSearchException as e:
102
- print("DuckDuckGo search failed: ", e)
103
- urls = []
104
- results = []
105
-
106
- else: # google
107
- urls = list(search(query, num_results=num_results))
108
- # google shit doesnt seem to be working anymore, apparently a lbock they made on browsers without js?
109
- print("urls", urls)
110
- print(provider)
111
- for url in urls:
112
- try:
113
- # Fetch the webpage content
114
- headers = {
115
- "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
116
- }
117
- response = requests.get(url, headers=headers, timeout=5)
118
- response.raise_for_status()
119
-
120
- # Parse with BeautifulSoup
121
- soup = BeautifulSoup(response.text, "html.parser")
122
-
123
- # Get title and content
124
- title = soup.title.string if soup.title else url
125
-
126
- # Extract text content and clean it up
127
- content = " ".join([p.get_text() for p in soup.find_all("p")])
128
- content = " ".join(content.split()) # Clean up whitespace
129
-
130
- results.append(
131
- {
132
- "title": title,
133
- "link": url,
134
- "content": (
135
- content[:500] + "..." if len(content) > 500 else content
136
- ),
137
- }
138
- )
139
-
140
- except Exception as e:
141
- print(f"Error fetching {url}: {str(e)}")
142
- continue
143
-
144
- # except Exception as e:
145
- # print(f"Search error: {str(e)}")
146
- content_str = "\n".join(
147
- [r["content"] + "\n Citation: " + r["link"] + "\n\n\n" for r in results]
148
- )
149
- link_str = "\n".join([r["link"] + "\n" for r in results])
150
- return [content_str, link_str]
151
-
152
-
153
- def rag_search(
154
- query: str,
155
- text_data: Union[Dict[str, str], str],
156
- embedding_model: Any = None,
157
- text_data_embedded: Optional[Dict[str, np.ndarray]] = None,
158
- similarity_threshold: float = 0.3,
159
- device="cpu",
160
- ) -> List[str]:
161
- """
162
- Function Description:
163
- This function retrieves lines from documents that are relevant to the query.
164
- Args:
165
- query: The query string.
166
- text_data: A dictionary with file paths as keys and file contents as values.
167
- embedding_model: The sentence embedding model.
168
- Keyword Args:
169
- text_data_embedded: A dictionary with file paths as keys and embedded file contents as values.
170
- similarity_threshold: The similarity threshold for considering a line relevant.
171
- Returns:
172
- A list of relevant snippets.
173
-
174
- """
175
- if embedding_model is None:
176
- try:
177
- embedding_model = SentenceTransformer("all-MiniLM-L6-v2")
178
- except:
179
- raise Exception(
180
- "Please install the sentence-transformers library to use this function or provide an embedding transformer model."
181
- )
182
- results = []
183
-
184
- # Compute the embedding of the query
185
- query_embedding = embedding_model.encode(
186
- query, convert_to_tensor=True, show_progress_bar=False
187
- )
188
- if isinstance(text_data, str):
189
- # split at the sentence level
190
- lines = text_data.split(".")
191
- if not lines:
192
- return results
193
- # Compute embeddings for each line
194
- if text_data_embedded is None:
195
- line_embeddings = embedding_model.encode(lines, convert_to_tensor=True)
196
- else:
197
- line_embeddings = text_data_embedded
198
- # Compute cosine similarities
199
- cosine_scores = util.cos_sim(query_embedding, line_embeddings)[0].cpu().numpy()
200
-
201
- # Find indices of lines above the similarity threshold
202
- relevant_line_indices = np.where(cosine_scores >= similarity_threshold)[0]
203
- # print("relevant_line_indices", cosine_scores)
204
- # print(np.mean(cosine_scores))
205
- # print(np.max(cosine_scores))
206
-
207
- for idx in relevant_line_indices:
208
- idx = int(idx)
209
- # Get context lines (±10 lines)
210
- start_idx = max(0, idx - 10)
211
- end_idx = min(len(lines), idx + 11) # +11 because end index is exclusive
212
- snippet = ". ".join(lines[start_idx:end_idx])
213
- results.append(snippet)
214
-
215
- elif isinstance(text_data, dict):
216
- for filename, content in text_data.items():
217
- # Split content into lines
218
- lines = content.split("\n")
219
- if not lines:
220
- continue
221
- # Compute embeddings for each line
222
- if text_data_embedded is None:
223
- line_embeddings = embedding_model.encode(lines, convert_to_tensor=True)
224
- else:
225
- line_embeddings = text_data_embedded[filename]
226
- # Compute cosine similarities
227
- cosine_scores = (
228
- util.cos_sim(query_embedding, line_embeddings)[0].cpu().numpy()
229
- )
230
-
231
- # Find indices of lines above the similarity threshold
232
- ##print("most similar", np.max(cosine_scores))
233
- ##print("most similar doc", lines[np.argmax(cosine_scores)])
234
- relevant_line_indices = np.where(cosine_scores >= similarity_threshold)[0]
235
- # print("relevant_line_indices", cosine_scores)
236
- # print(np.mean(cosine_scores))
237
- # print(np.max(cosine_scores))
238
- for idx in relevant_line_indices:
239
- idx = int(idx) # Ensure idx is an integer
240
- # Get context lines (±10 lines)
241
- start_idx = max(0, idx - 10)
242
- end_idx = min(
243
- len(lines), idx + 11
244
- ) # +11 because end index is exclusive
245
- snippet = "\n".join(lines[start_idx:end_idx])
246
- results.append((filename, snippet))
247
- # print("results", results)
248
- return results