npcpy 1.1.28__py3-none-any.whl → 1.2.32__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- npcpy/data/audio.py +16 -38
- npcpy/data/image.py +29 -29
- npcpy/data/load.py +4 -3
- npcpy/data/text.py +28 -28
- npcpy/data/video.py +6 -6
- npcpy/data/web.py +49 -21
- npcpy/ft/__init__.py +0 -0
- npcpy/ft/diff.py +110 -0
- npcpy/ft/ge.py +115 -0
- npcpy/ft/memory_trainer.py +171 -0
- npcpy/ft/model_ensembler.py +357 -0
- npcpy/ft/rl.py +360 -0
- npcpy/ft/sft.py +248 -0
- npcpy/ft/usft.py +128 -0
- npcpy/gen/audio_gen.py +24 -0
- npcpy/gen/embeddings.py +13 -13
- npcpy/gen/image_gen.py +37 -15
- npcpy/gen/response.py +287 -111
- npcpy/gen/video_gen.py +10 -9
- npcpy/llm_funcs.py +447 -79
- npcpy/memory/command_history.py +201 -48
- npcpy/memory/kg_vis.py +74 -74
- npcpy/memory/knowledge_graph.py +482 -115
- npcpy/memory/memory_processor.py +81 -0
- npcpy/memory/search.py +70 -70
- npcpy/mix/debate.py +192 -3
- npcpy/npc_compiler.py +1541 -879
- npcpy/npc_sysenv.py +250 -78
- npcpy/serve.py +1036 -321
- npcpy/sql/ai_function_tools.py +257 -0
- npcpy/sql/database_ai_adapters.py +186 -0
- npcpy/sql/database_ai_functions.py +163 -0
- npcpy/sql/model_runner.py +19 -19
- npcpy/sql/npcsql.py +706 -507
- npcpy/sql/sql_model_compiler.py +156 -0
- npcpy/tools.py +20 -20
- npcpy/work/plan.py +8 -8
- npcpy/work/trigger.py +3 -3
- {npcpy-1.1.28.dist-info → npcpy-1.2.32.dist-info}/METADATA +169 -9
- npcpy-1.2.32.dist-info/RECORD +54 -0
- npcpy-1.1.28.dist-info/RECORD +0 -40
- {npcpy-1.1.28.dist-info → npcpy-1.2.32.dist-info}/WHEEL +0 -0
- {npcpy-1.1.28.dist-info → npcpy-1.2.32.dist-info}/licenses/LICENSE +0 -0
- {npcpy-1.1.28.dist-info → npcpy-1.2.32.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,81 @@
|
|
|
1
|
+
from dataclasses import dataclass
|
|
2
|
+
from typing import List, Dict, Any, Optional
|
|
3
|
+
from datetime import datetime
|
|
4
|
+
import threading
|
|
5
|
+
import queue
|
|
6
|
+
import time
|
|
7
|
+
|
|
8
|
+
@dataclass
|
|
9
|
+
class MemoryItem:
|
|
10
|
+
message_id: str
|
|
11
|
+
conversation_id: str
|
|
12
|
+
npc: str
|
|
13
|
+
team: str
|
|
14
|
+
directory_path: str
|
|
15
|
+
content: str
|
|
16
|
+
context: str
|
|
17
|
+
model: str
|
|
18
|
+
provider: str
|
|
19
|
+
|
|
20
|
+
def memory_approval_ui(memories: List[Dict]) -> List[Dict]:
|
|
21
|
+
if not memories:
|
|
22
|
+
return []
|
|
23
|
+
|
|
24
|
+
print(f"\n📝 {len(memories)} memories ready for approval:")
|
|
25
|
+
|
|
26
|
+
approvals = []
|
|
27
|
+
for i, memory in enumerate(memories, 1):
|
|
28
|
+
print(f"\n--- Memory {i}/{len(memories)} ---")
|
|
29
|
+
print(f"NPC: {memory['npc']}")
|
|
30
|
+
content_preview = memory['content'][:200]
|
|
31
|
+
if len(memory['content']) > 200:
|
|
32
|
+
content_preview += '...'
|
|
33
|
+
print(f"Content: {content_preview}")
|
|
34
|
+
|
|
35
|
+
while True:
|
|
36
|
+
choice = input(
|
|
37
|
+
"(a)pprove, (r)eject, (e)dit, (s)kip | "
|
|
38
|
+
"(A)ll approve, (R)all reject, (S)all skip: "
|
|
39
|
+
).strip().lower()
|
|
40
|
+
|
|
41
|
+
if choice == 'a':
|
|
42
|
+
approvals.append({
|
|
43
|
+
"memory_id": memory['memory_id'],
|
|
44
|
+
"decision": "human-approved"
|
|
45
|
+
})
|
|
46
|
+
break
|
|
47
|
+
elif choice == 'r':
|
|
48
|
+
approvals.append({
|
|
49
|
+
"memory_id": memory['memory_id'],
|
|
50
|
+
"decision": "human-rejected"
|
|
51
|
+
})
|
|
52
|
+
break
|
|
53
|
+
elif choice == 'e':
|
|
54
|
+
edited = input("Edit memory: ").strip()
|
|
55
|
+
if edited:
|
|
56
|
+
approvals.append({
|
|
57
|
+
"memory_id": memory['memory_id'],
|
|
58
|
+
"decision": "human-edited",
|
|
59
|
+
"final_memory": edited
|
|
60
|
+
})
|
|
61
|
+
break
|
|
62
|
+
elif choice == 's':
|
|
63
|
+
break
|
|
64
|
+
elif choice == 'A':
|
|
65
|
+
for remaining_memory in memories[i-1:]:
|
|
66
|
+
approvals.append({
|
|
67
|
+
"memory_id": remaining_memory['memory_id'],
|
|
68
|
+
"decision": "human-approved"
|
|
69
|
+
})
|
|
70
|
+
return approvals
|
|
71
|
+
elif choice == 'R':
|
|
72
|
+
for remaining_memory in memories[i-1:]:
|
|
73
|
+
approvals.append({
|
|
74
|
+
"memory_id": remaining_memory['memory_id'],
|
|
75
|
+
"decision": "human-rejected"
|
|
76
|
+
})
|
|
77
|
+
return approvals
|
|
78
|
+
elif choice == 'S':
|
|
79
|
+
return approvals
|
|
80
|
+
|
|
81
|
+
return approvals
|
npcpy/memory/search.py
CHANGED
|
@@ -35,21 +35,21 @@ def search_similar_texts(
|
|
|
35
35
|
embedded_search_term = get_ollama_embeddings([query], embedding_model)[0]
|
|
36
36
|
|
|
37
37
|
if docs_to_embed is None:
|
|
38
|
-
|
|
38
|
+
|
|
39
39
|
collection_name = f"{embedding_provider}_{embedding_model}_embeddings"
|
|
40
40
|
collection = chroma_client.get_collection(collection_name)
|
|
41
41
|
results = collection.query(
|
|
42
|
-
query_embeddings=[embedded_search_term], n_results=top_k * 2
|
|
42
|
+
query_embeddings=[embedded_search_term], n_results=top_k * 2
|
|
43
43
|
)
|
|
44
44
|
|
|
45
|
-
|
|
45
|
+
|
|
46
46
|
seen_texts = set()
|
|
47
47
|
filtered_results = []
|
|
48
48
|
|
|
49
49
|
for idx, (id, distance, document) in enumerate(zip(
|
|
50
50
|
results["ids"][0], results["distances"][0], results["documents"][0]
|
|
51
51
|
)):
|
|
52
|
-
|
|
52
|
+
|
|
53
53
|
if document not in seen_texts:
|
|
54
54
|
seen_texts.add(document)
|
|
55
55
|
filtered_results.append({
|
|
@@ -58,7 +58,7 @@ def search_similar_texts(
|
|
|
58
58
|
"text": document
|
|
59
59
|
})
|
|
60
60
|
|
|
61
|
-
|
|
61
|
+
|
|
62
62
|
if len(filtered_results) >= top_k:
|
|
63
63
|
break
|
|
64
64
|
|
|
@@ -66,40 +66,40 @@ def search_similar_texts(
|
|
|
66
66
|
|
|
67
67
|
print(f"\nNumber of documents to embed: {len(docs_to_embed)}")
|
|
68
68
|
|
|
69
|
-
|
|
70
|
-
unique_docs = list(dict.fromkeys(docs_to_embed))
|
|
69
|
+
|
|
70
|
+
unique_docs = list(dict.fromkeys(docs_to_embed))
|
|
71
71
|
raw_embeddings = get_ollama_embeddings(unique_docs, embedding_model)
|
|
72
72
|
|
|
73
73
|
output_embeddings = []
|
|
74
74
|
unique_doc_indices = []
|
|
75
75
|
|
|
76
76
|
for idx, emb in enumerate(raw_embeddings):
|
|
77
|
-
if emb:
|
|
77
|
+
if emb:
|
|
78
78
|
output_embeddings.append(emb)
|
|
79
79
|
unique_doc_indices.append(idx)
|
|
80
80
|
|
|
81
|
-
|
|
81
|
+
|
|
82
82
|
doc_embeddings = np.array(output_embeddings)
|
|
83
83
|
query_embedding = np.array(embedded_search_term)
|
|
84
84
|
|
|
85
|
-
|
|
85
|
+
|
|
86
86
|
if len(doc_embeddings) == 0:
|
|
87
87
|
raise ValueError("No valid document embeddings found")
|
|
88
88
|
|
|
89
|
-
|
|
89
|
+
|
|
90
90
|
doc_norms = np.linalg.norm(doc_embeddings, axis=1, keepdims=True)
|
|
91
91
|
query_norm = np.linalg.norm(query_embedding)
|
|
92
92
|
|
|
93
|
-
|
|
93
|
+
|
|
94
94
|
if query_norm == 0:
|
|
95
95
|
raise ValueError("Query embedding is zero-length")
|
|
96
96
|
|
|
97
|
-
|
|
97
|
+
|
|
98
98
|
cosine_similarities = np.dot(doc_embeddings, query_embedding) / (
|
|
99
99
|
doc_norms.flatten() * query_norm
|
|
100
100
|
)
|
|
101
101
|
|
|
102
|
-
|
|
102
|
+
|
|
103
103
|
top_indices = np.argsort(cosine_similarities)[::-1][:top_k]
|
|
104
104
|
|
|
105
105
|
return [
|
|
@@ -155,7 +155,7 @@ def execute_search_command(
|
|
|
155
155
|
else:
|
|
156
156
|
num_results = 5
|
|
157
157
|
|
|
158
|
-
|
|
158
|
+
|
|
159
159
|
command = command.replace(f"-p {provider}", "").replace(
|
|
160
160
|
f"--provider {provider}", ""
|
|
161
161
|
)
|
|
@@ -196,7 +196,7 @@ def get_facts_for_rag(
|
|
|
196
196
|
Returns:
|
|
197
197
|
Formatted context string with retrieved facts
|
|
198
198
|
"""
|
|
199
|
-
|
|
199
|
+
|
|
200
200
|
kuzu_conn = init_db(kuzu_db_path)
|
|
201
201
|
chroma_client, chroma_collection = setup_chroma_db(
|
|
202
202
|
"knowledge_graph",
|
|
@@ -204,7 +204,7 @@ def get_facts_for_rag(
|
|
|
204
204
|
chroma_db_path
|
|
205
205
|
)
|
|
206
206
|
|
|
207
|
-
|
|
207
|
+
|
|
208
208
|
results = hybrid_search_with_chroma(
|
|
209
209
|
kuzu_conn=kuzu_conn,
|
|
210
210
|
chroma_collection=chroma_collection,
|
|
@@ -213,23 +213,23 @@ def get_facts_for_rag(
|
|
|
213
213
|
top_k=top_k,
|
|
214
214
|
)
|
|
215
215
|
|
|
216
|
-
|
|
216
|
+
|
|
217
217
|
context = "Related facts:\n\n"
|
|
218
218
|
|
|
219
|
-
|
|
219
|
+
|
|
220
220
|
context += "Most relevant facts:\n"
|
|
221
221
|
vector_matches = [r for r in results if r["source"] == "vector_search"]
|
|
222
222
|
for i, item in enumerate(vector_matches):
|
|
223
223
|
context += f"{i+1}. {item['fact']}\n"
|
|
224
224
|
|
|
225
|
-
|
|
225
|
+
|
|
226
226
|
context += "\nRelated concepts:\n"
|
|
227
227
|
graph_matches = [r for r in results if r["source"] != "vector_search"]
|
|
228
228
|
for i, item in enumerate(graph_matches):
|
|
229
229
|
group = item["source"].replace("graph_relation_via_", "")
|
|
230
230
|
context += f"{i+1}. {item['fact']} (related via {group})\n"
|
|
231
231
|
|
|
232
|
-
|
|
232
|
+
|
|
233
233
|
kuzu_conn.close()
|
|
234
234
|
|
|
235
235
|
return context
|
|
@@ -253,14 +253,14 @@ def answer_with_rag(
|
|
|
253
253
|
Returns:
|
|
254
254
|
Answer from the model
|
|
255
255
|
"""
|
|
256
|
-
|
|
256
|
+
|
|
257
257
|
context = get_facts_for_rag(
|
|
258
258
|
kuzu_db_path,
|
|
259
259
|
chroma_db_path,
|
|
260
260
|
query,
|
|
261
261
|
)
|
|
262
262
|
|
|
263
|
-
|
|
263
|
+
|
|
264
264
|
prompt = f"""
|
|
265
265
|
Answer this question based on the retrieved information.
|
|
266
266
|
|
|
@@ -273,7 +273,7 @@ def answer_with_rag(
|
|
|
273
273
|
from the available facts.
|
|
274
274
|
"""
|
|
275
275
|
|
|
276
|
-
|
|
276
|
+
|
|
277
277
|
response = get_llm_response(prompt, model=model, provider=provider)
|
|
278
278
|
|
|
279
279
|
return response["response"]
|
|
@@ -285,14 +285,14 @@ def execute_rag_command(
|
|
|
285
285
|
embedding_model: str,
|
|
286
286
|
embedding_provider: str,
|
|
287
287
|
top_k: int = 15,
|
|
288
|
-
file_contents=None,
|
|
288
|
+
file_contents=None,
|
|
289
289
|
**kwargs
|
|
290
290
|
) -> dict:
|
|
291
291
|
"""
|
|
292
292
|
Execute the RAG command with support for embedding generation.
|
|
293
293
|
When file_contents is provided, it searches those instead of the database.
|
|
294
294
|
"""
|
|
295
|
-
|
|
295
|
+
|
|
296
296
|
BLUE = "\033[94m"
|
|
297
297
|
GREEN = "\033[92m"
|
|
298
298
|
YELLOW = "\033[93m"
|
|
@@ -300,44 +300,44 @@ def execute_rag_command(
|
|
|
300
300
|
RESET = "\033[0m"
|
|
301
301
|
BOLD = "\033[1m"
|
|
302
302
|
|
|
303
|
-
|
|
303
|
+
|
|
304
304
|
header = f"\n{BOLD}{BLUE}RAG Query: {RESET}{GREEN}{command}{RESET}\n"
|
|
305
305
|
|
|
306
|
-
|
|
306
|
+
|
|
307
307
|
if file_contents and len(file_contents) > 0:
|
|
308
308
|
similar_chunks = search_similar_texts(
|
|
309
309
|
command,
|
|
310
310
|
embedding_model,
|
|
311
311
|
embedding_provider,
|
|
312
|
-
chroma_client=None,
|
|
312
|
+
chroma_client=None,
|
|
313
313
|
|
|
314
|
-
docs_to_embed=file_contents,
|
|
314
|
+
docs_to_embed=file_contents,
|
|
315
315
|
top_k=top_k
|
|
316
316
|
)
|
|
317
317
|
|
|
318
|
-
|
|
318
|
+
|
|
319
319
|
file_info = f"{BOLD}{BLUE}Files Processed: {RESET}{YELLOW}{len(file_contents)}{RESET}\n"
|
|
320
320
|
separator = f"{YELLOW}{'-' * 100}{RESET}\n"
|
|
321
321
|
|
|
322
|
-
|
|
322
|
+
|
|
323
323
|
chunk_results = []
|
|
324
324
|
for i, chunk in enumerate(similar_chunks, 1):
|
|
325
325
|
score = chunk['score']
|
|
326
326
|
text = chunk['text']
|
|
327
327
|
|
|
328
|
-
|
|
328
|
+
|
|
329
329
|
display_text = text[:150] + ("..." if len(text) > 150 else "")
|
|
330
330
|
chunk_results.append(f"{BOLD}{i:2d}{RESET}. {CYAN}[{score:.2f}]{RESET} {display_text}")
|
|
331
331
|
|
|
332
|
-
|
|
332
|
+
|
|
333
333
|
file_results = header + file_info + separator + "\n".join(chunk_results)
|
|
334
334
|
render_markdown(f"FILE SEARCH RESULTS:\n{file_results}")
|
|
335
335
|
|
|
336
|
-
|
|
336
|
+
|
|
337
337
|
plain_chunks = [f"{i+1}. {chunk['text']}" for i, chunk in enumerate(similar_chunks)]
|
|
338
338
|
plain_results = "\n\n".join(plain_chunks)
|
|
339
339
|
|
|
340
|
-
|
|
340
|
+
|
|
341
341
|
prompt = f"""
|
|
342
342
|
The user asked: {command}
|
|
343
343
|
|
|
@@ -349,7 +349,7 @@ def execute_rag_command(
|
|
|
349
349
|
between the results and the initial input. do not do this haphazardly, be creative yet cautious.
|
|
350
350
|
"""
|
|
351
351
|
|
|
352
|
-
|
|
352
|
+
|
|
353
353
|
response = get_llm_response(
|
|
354
354
|
prompt,
|
|
355
355
|
**kwargs
|
|
@@ -357,16 +357,16 @@ def execute_rag_command(
|
|
|
357
357
|
return response
|
|
358
358
|
|
|
359
359
|
else:
|
|
360
|
-
|
|
360
|
+
|
|
361
361
|
try:
|
|
362
|
-
|
|
362
|
+
|
|
363
363
|
chroma_client, chroma_collection = setup_chroma_db(
|
|
364
364
|
f"{embedding_provider}_{embedding_model}_embeddings",
|
|
365
365
|
"Conversation embeddings",
|
|
366
366
|
vector_db_path
|
|
367
367
|
)
|
|
368
368
|
|
|
369
|
-
|
|
369
|
+
|
|
370
370
|
similar_texts = search_similar_texts(
|
|
371
371
|
command,
|
|
372
372
|
embedding_model,
|
|
@@ -375,16 +375,16 @@ def execute_rag_command(
|
|
|
375
375
|
top_k=top_k,
|
|
376
376
|
)
|
|
377
377
|
|
|
378
|
-
|
|
378
|
+
|
|
379
379
|
separator = f"{YELLOW}{'-' * 100}{RESET}\n"
|
|
380
380
|
|
|
381
|
-
|
|
381
|
+
|
|
382
382
|
processed_texts = []
|
|
383
383
|
for i, similar_text in enumerate(similar_texts, 1):
|
|
384
384
|
text = similar_text['text']
|
|
385
385
|
score = similar_text['score']
|
|
386
386
|
|
|
387
|
-
|
|
387
|
+
|
|
388
388
|
timestamp_str = ""
|
|
389
389
|
try:
|
|
390
390
|
if 'id' in similar_text and '_' in similar_text['id']:
|
|
@@ -393,24 +393,24 @@ def execute_rag_command(
|
|
|
393
393
|
except (IndexError, ValueError, TypeError):
|
|
394
394
|
pass
|
|
395
395
|
|
|
396
|
-
|
|
396
|
+
|
|
397
397
|
text = text.replace('\n', ' ').strip()
|
|
398
398
|
snippet = text[:85] + ("..." if len(text) > 85 else "")
|
|
399
399
|
|
|
400
|
-
|
|
400
|
+
|
|
401
401
|
processed_texts.append(
|
|
402
402
|
f"{BOLD}{i:2d}{RESET}. {CYAN}[{score:.2f}]{RESET} {snippet} {timestamp_str}"
|
|
403
403
|
)
|
|
404
404
|
|
|
405
|
-
|
|
405
|
+
|
|
406
406
|
knowledge_results = header + separator + "\n".join(processed_texts)
|
|
407
407
|
render_markdown(f"KNOWLEDGE BASE: {knowledge_results}")
|
|
408
408
|
|
|
409
|
-
|
|
409
|
+
|
|
410
410
|
plain_texts = [f"{i+1}. {similar_texts[i]['text']}" for i in range(len(similar_texts))]
|
|
411
411
|
plain_results = "\n\n".join(plain_texts)
|
|
412
412
|
|
|
413
|
-
|
|
413
|
+
|
|
414
414
|
prompt = f"""
|
|
415
415
|
The user asked: {command}
|
|
416
416
|
|
|
@@ -422,7 +422,7 @@ def execute_rag_command(
|
|
|
422
422
|
between the results and the initial input. do not do this haphazardly, be creative yet cautious.
|
|
423
423
|
"""
|
|
424
424
|
|
|
425
|
-
|
|
425
|
+
|
|
426
426
|
response = get_llm_response(
|
|
427
427
|
prompt,
|
|
428
428
|
**kwargs
|
|
@@ -442,7 +442,7 @@ def execute_brainblast_command(
|
|
|
442
442
|
Execute a comprehensive "brainblast" search on command history.
|
|
443
443
|
Breaks the query into words and searches for combinations of those words.
|
|
444
444
|
"""
|
|
445
|
-
|
|
445
|
+
|
|
446
446
|
BLUE = "\033[94m"
|
|
447
447
|
GREEN = "\033[92m"
|
|
448
448
|
YELLOW = "\033[93m"
|
|
@@ -457,54 +457,54 @@ def execute_brainblast_command(
|
|
|
457
457
|
top_k = kwargs.get('top_k', 10)
|
|
458
458
|
|
|
459
459
|
|
|
460
|
-
|
|
460
|
+
|
|
461
461
|
header = f"\n{BOLD}{BLUE}BRAINBLAST Query: {RESET}{GREEN}{command}{RESET}\n"
|
|
462
462
|
separator = f"{YELLOW}{'-' * 100}{RESET}\n"
|
|
463
463
|
|
|
464
464
|
try:
|
|
465
|
-
|
|
465
|
+
|
|
466
466
|
words = command.split()
|
|
467
467
|
|
|
468
468
|
if not words:
|
|
469
469
|
return {"output": "Please provide search terms to use brainblast.", "messages": messages or []}
|
|
470
470
|
|
|
471
|
-
|
|
471
|
+
|
|
472
472
|
all_chunks = []
|
|
473
473
|
|
|
474
|
-
|
|
474
|
+
|
|
475
475
|
all_chunks.extend(words)
|
|
476
476
|
|
|
477
|
-
|
|
477
|
+
|
|
478
478
|
if len(words) >= 2:
|
|
479
479
|
for i in range(len(words) - 1):
|
|
480
480
|
all_chunks.append(f"{words[i]} {words[i+1]}")
|
|
481
481
|
|
|
482
|
-
|
|
482
|
+
|
|
483
483
|
if len(words) >= 4:
|
|
484
484
|
for i in range(len(words) - 3):
|
|
485
485
|
all_chunks.append(f"{words[i]} {words[i+1]} {words[i+2]} {words[i+3]}")
|
|
486
486
|
|
|
487
|
-
|
|
487
|
+
|
|
488
488
|
if len(words) > 1:
|
|
489
489
|
all_chunks.append(command)
|
|
490
490
|
|
|
491
|
-
|
|
491
|
+
|
|
492
492
|
unique_chunks = []
|
|
493
493
|
for chunk in all_chunks:
|
|
494
494
|
if chunk not in unique_chunks:
|
|
495
495
|
unique_chunks.append(chunk)
|
|
496
496
|
|
|
497
|
-
|
|
497
|
+
|
|
498
498
|
all_results = []
|
|
499
499
|
chunk_results = {}
|
|
500
500
|
|
|
501
501
|
for chunk in unique_chunks:
|
|
502
502
|
results = command_history.search_conversations(chunk)
|
|
503
503
|
if results:
|
|
504
|
-
chunk_results[chunk] = results[:top_k]
|
|
504
|
+
chunk_results[chunk] = results[:top_k]
|
|
505
505
|
all_results.extend(results[:top_k])
|
|
506
506
|
|
|
507
|
-
|
|
507
|
+
|
|
508
508
|
unique_results = []
|
|
509
509
|
seen_ids = set()
|
|
510
510
|
for result in all_results:
|
|
@@ -517,7 +517,7 @@ def execute_brainblast_command(
|
|
|
517
517
|
result_message = f"No matches found for any combination of terms in: {command}"
|
|
518
518
|
render_markdown(f"BRAINBLAST SEARCH: {header}{separator}{result_message}")
|
|
519
519
|
|
|
520
|
-
|
|
520
|
+
|
|
521
521
|
prompt = f"""
|
|
522
522
|
The user asked for a brainblast search with: {command}
|
|
523
523
|
|
|
@@ -532,13 +532,13 @@ def execute_brainblast_command(
|
|
|
532
532
|
)
|
|
533
533
|
return {'output':response.get('response'), 'messages':response.get('messages') or []}
|
|
534
534
|
|
|
535
|
-
|
|
535
|
+
|
|
536
536
|
processed_chunks = []
|
|
537
537
|
for chunk, results in chunk_results.items():
|
|
538
538
|
if results:
|
|
539
539
|
chunk_display = f"{BOLD}{BLUE}Results for '{chunk}':{RESET}\n"
|
|
540
540
|
|
|
541
|
-
for i, result in enumerate(results[:3], 1):
|
|
541
|
+
for i, result in enumerate(results[:3], 1):
|
|
542
542
|
cmd = result.get('content', '')
|
|
543
543
|
timestamp = result.get('timestamp', '')
|
|
544
544
|
|
|
@@ -550,25 +550,25 @@ def execute_brainblast_command(
|
|
|
550
550
|
processed_chunks.append(chunk_display)
|
|
551
551
|
|
|
552
552
|
|
|
553
|
-
|
|
553
|
+
|
|
554
554
|
plain_results = []
|
|
555
|
-
for i, result in enumerate(unique_results[:15], 1):
|
|
555
|
+
for i, result in enumerate(unique_results[:15], 1):
|
|
556
556
|
content = result.get('content', '')[0:250]
|
|
557
557
|
timestamp = result.get('timestamp', '')
|
|
558
558
|
location = result.get('directory_path', '')
|
|
559
559
|
|
|
560
|
-
|
|
560
|
+
|
|
561
561
|
plain_results.append(
|
|
562
562
|
f"{i}. [{timestamp}] Command: {cmd}\n Location: {location}\n Output: {content[:150] + ('...' if len(content) > 150 else '')}"
|
|
563
563
|
)
|
|
564
564
|
|
|
565
|
-
|
|
565
|
+
|
|
566
566
|
term_summary = []
|
|
567
567
|
for chunk, results in chunk_results.items():
|
|
568
568
|
if results:
|
|
569
569
|
term_summary.append(f"Term '{chunk}' matched {len(results)} commands")
|
|
570
570
|
|
|
571
|
-
|
|
571
|
+
|
|
572
572
|
f=', '.join(term_summary)
|
|
573
573
|
e="\n\n".join(plain_results)
|
|
574
574
|
prompt = f"""
|
|
@@ -585,7 +585,7 @@ def execute_brainblast_command(
|
|
|
585
585
|
Please analyze these results and attempt to generate some novel insight about them in one sentence. think outside the box.
|
|
586
586
|
Provide a summary as well.
|
|
587
587
|
"""
|
|
588
|
-
|
|
588
|
+
|
|
589
589
|
response = get_llm_response(
|
|
590
590
|
prompt,
|
|
591
591
|
**kwargs,
|