npcpy 1.2.15__tar.gz → 1.2.17__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {npcpy-1.2.15/npcpy.egg-info → npcpy-1.2.17}/PKG-INFO +1 -1
- npcpy-1.2.17/npcpy/ft/diff.py +1 -0
- npcpy-1.2.17/npcpy/ft/ge.py +1 -0
- npcpy-1.2.17/npcpy/ft/memory_trainer.py +161 -0
- npcpy-1.2.17/npcpy/ft/rl.py +1 -0
- npcpy-1.2.17/npcpy/ft/sft.py +1 -0
- {npcpy-1.2.15 → npcpy-1.2.17}/npcpy/memory/command_history.py +130 -1
- {npcpy-1.2.15 → npcpy-1.2.17}/npcpy/memory/knowledge_graph.py +31 -23
- npcpy-1.2.17/npcpy/memory/memory_processor.py +155 -0
- {npcpy-1.2.15 → npcpy-1.2.17}/npcpy/npc_compiler.py +11 -9
- {npcpy-1.2.15 → npcpy-1.2.17}/npcpy/serve.py +115 -57
- npcpy-1.2.17/npcpy/work/__init__.py +0 -0
- {npcpy-1.2.15 → npcpy-1.2.17/npcpy.egg-info}/PKG-INFO +1 -1
- {npcpy-1.2.15 → npcpy-1.2.17}/npcpy.egg-info/SOURCES.txt +7 -0
- {npcpy-1.2.15 → npcpy-1.2.17}/setup.py +1 -1
- {npcpy-1.2.15 → npcpy-1.2.17}/LICENSE +0 -0
- {npcpy-1.2.15 → npcpy-1.2.17}/MANIFEST.in +0 -0
- {npcpy-1.2.15 → npcpy-1.2.17}/README.md +0 -0
- {npcpy-1.2.15 → npcpy-1.2.17}/npcpy/__init__.py +0 -0
- {npcpy-1.2.15 → npcpy-1.2.17}/npcpy/data/__init__.py +0 -0
- {npcpy-1.2.15 → npcpy-1.2.17}/npcpy/data/audio.py +0 -0
- {npcpy-1.2.15 → npcpy-1.2.17}/npcpy/data/data_models.py +0 -0
- {npcpy-1.2.15 → npcpy-1.2.17}/npcpy/data/image.py +0 -0
- {npcpy-1.2.15 → npcpy-1.2.17}/npcpy/data/load.py +0 -0
- {npcpy-1.2.15 → npcpy-1.2.17}/npcpy/data/text.py +0 -0
- {npcpy-1.2.15 → npcpy-1.2.17}/npcpy/data/video.py +0 -0
- {npcpy-1.2.15 → npcpy-1.2.17}/npcpy/data/web.py +0 -0
- {npcpy-1.2.15/npcpy/gen → npcpy-1.2.17/npcpy/ft}/__init__.py +0 -0
- {npcpy-1.2.15/npcpy/memory → npcpy-1.2.17/npcpy/gen}/__init__.py +0 -0
- {npcpy-1.2.15 → npcpy-1.2.17}/npcpy/gen/audio_gen.py +0 -0
- {npcpy-1.2.15 → npcpy-1.2.17}/npcpy/gen/embeddings.py +0 -0
- {npcpy-1.2.15 → npcpy-1.2.17}/npcpy/gen/image_gen.py +0 -0
- {npcpy-1.2.15 → npcpy-1.2.17}/npcpy/gen/response.py +0 -0
- {npcpy-1.2.15 → npcpy-1.2.17}/npcpy/gen/video_gen.py +0 -0
- {npcpy-1.2.15 → npcpy-1.2.17}/npcpy/llm_funcs.py +0 -0
- {npcpy-1.2.15 → npcpy-1.2.17}/npcpy/main.py +0 -0
- {npcpy-1.2.15/npcpy/mix → npcpy-1.2.17/npcpy/memory}/__init__.py +0 -0
- {npcpy-1.2.15 → npcpy-1.2.17}/npcpy/memory/kg_vis.py +0 -0
- {npcpy-1.2.15 → npcpy-1.2.17}/npcpy/memory/search.py +0 -0
- {npcpy-1.2.15/npcpy/sql → npcpy-1.2.17/npcpy/mix}/__init__.py +0 -0
- {npcpy-1.2.15 → npcpy-1.2.17}/npcpy/mix/debate.py +0 -0
- {npcpy-1.2.15 → npcpy-1.2.17}/npcpy/npc_sysenv.py +0 -0
- {npcpy-1.2.15 → npcpy-1.2.17}/npcpy/npcs.py +0 -0
- {npcpy-1.2.15/npcpy/work → npcpy-1.2.17/npcpy/sql}/__init__.py +0 -0
- {npcpy-1.2.15 → npcpy-1.2.17}/npcpy/sql/model_runner.py +0 -0
- {npcpy-1.2.15 → npcpy-1.2.17}/npcpy/sql/npcsql.py +0 -0
- {npcpy-1.2.15 → npcpy-1.2.17}/npcpy/tools.py +0 -0
- {npcpy-1.2.15 → npcpy-1.2.17}/npcpy/work/desktop.py +0 -0
- {npcpy-1.2.15 → npcpy-1.2.17}/npcpy/work/plan.py +0 -0
- {npcpy-1.2.15 → npcpy-1.2.17}/npcpy/work/trigger.py +0 -0
- {npcpy-1.2.15 → npcpy-1.2.17}/npcpy.egg-info/dependency_links.txt +0 -0
- {npcpy-1.2.15 → npcpy-1.2.17}/npcpy.egg-info/requires.txt +0 -0
- {npcpy-1.2.15 → npcpy-1.2.17}/npcpy.egg-info/top_level.txt +0 -0
- {npcpy-1.2.15 → npcpy-1.2.17}/setup.cfg +0 -0
- {npcpy-1.2.15 → npcpy-1.2.17}/tests/test_audio.py +0 -0
- {npcpy-1.2.15 → npcpy-1.2.17}/tests/test_command_history.py +0 -0
- {npcpy-1.2.15 → npcpy-1.2.17}/tests/test_image.py +0 -0
- {npcpy-1.2.15 → npcpy-1.2.17}/tests/test_llm_funcs.py +0 -0
- {npcpy-1.2.15 → npcpy-1.2.17}/tests/test_load.py +0 -0
- {npcpy-1.2.15 → npcpy-1.2.17}/tests/test_npc_compiler.py +0 -0
- {npcpy-1.2.15 → npcpy-1.2.17}/tests/test_npcsql.py +0 -0
- {npcpy-1.2.15 → npcpy-1.2.17}/tests/test_response.py +0 -0
- {npcpy-1.2.15 → npcpy-1.2.17}/tests/test_serve.py +0 -0
- {npcpy-1.2.15 → npcpy-1.2.17}/tests/test_text.py +0 -0
- {npcpy-1.2.15 → npcpy-1.2.17}/tests/test_tools.py +0 -0
- {npcpy-1.2.15 → npcpy-1.2.17}/tests/test_web.py +0 -0
|
@@ -0,0 +1 @@
|
|
|
1
|
+
# finetuning diffuser models
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
# genetic engineering for using genetic algorithms with LLMs
|
|
@@ -0,0 +1,161 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
import torch.nn as nn
|
|
3
|
+
from transformers import AutoTokenizer, AutoModelForSequenceClassification, Trainer, TrainingArguments
|
|
4
|
+
from torch.utils.data import Dataset
|
|
5
|
+
import json
|
|
6
|
+
from typing import List, Dict, Tuple
|
|
7
|
+
import random
|
|
8
|
+
|
|
9
|
+
class MemoryDataset(Dataset):
|
|
10
|
+
def __init__(self, examples: List[Dict], tokenizer, max_length=512):
|
|
11
|
+
self.examples = examples
|
|
12
|
+
self.tokenizer = tokenizer
|
|
13
|
+
self.max_length = max_length
|
|
14
|
+
|
|
15
|
+
def __len__(self):
|
|
16
|
+
return len(self.examples)
|
|
17
|
+
|
|
18
|
+
def __getitem__(self, idx):
|
|
19
|
+
example = self.examples[idx]
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
text = f"Memory: {example['memory']}\nContext: {example.get('context', '')}"
|
|
23
|
+
|
|
24
|
+
encoding = self.tokenizer(
|
|
25
|
+
text,
|
|
26
|
+
truncation=True,
|
|
27
|
+
padding='max_length',
|
|
28
|
+
max_length=self.max_length,
|
|
29
|
+
return_tensors='pt'
|
|
30
|
+
)
|
|
31
|
+
|
|
32
|
+
return {
|
|
33
|
+
'input_ids': encoding['input_ids'].flatten(),
|
|
34
|
+
'attention_mask': encoding['attention_mask'].flatten(),
|
|
35
|
+
'labels': torch.tensor(example['label'], dtype=torch.long)
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
class MemoryTrainer:
|
|
39
|
+
def __init__(self, model_name="google/gemma-2b", device="cpu"):
|
|
40
|
+
self.device = device
|
|
41
|
+
self.tokenizer = AutoTokenizer.from_pretrained(model_name)
|
|
42
|
+
if self.tokenizer.pad_token is None:
|
|
43
|
+
self.tokenizer.pad_token = self.tokenizer.eos_token
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
self.model = AutoModelForSequenceClassification.from_pretrained(
|
|
47
|
+
model_name,
|
|
48
|
+
num_labels=3
|
|
49
|
+
).to(device)
|
|
50
|
+
|
|
51
|
+
def prepare_training_data(self, approved_memories: List[Dict],
|
|
52
|
+
rejected_memories: List[Dict]) -> List[Dict]:
|
|
53
|
+
"""Prepare training data from memory examples"""
|
|
54
|
+
examples = []
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
for memory in approved_memories:
|
|
58
|
+
examples.append({
|
|
59
|
+
"memory": memory.get("final_memory") or memory.get("initial_memory"),
|
|
60
|
+
"context": memory.get("context", ""),
|
|
61
|
+
"label": 1
|
|
62
|
+
})
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
for memory in rejected_memories:
|
|
66
|
+
examples.append({
|
|
67
|
+
"memory": memory.get("initial_memory"),
|
|
68
|
+
"context": memory.get("context", ""),
|
|
69
|
+
"label": 0
|
|
70
|
+
})
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
edited_examples = []
|
|
74
|
+
for memory in approved_memories[:len(rejected_memories)//2]:
|
|
75
|
+
if memory.get("final_memory") and memory.get("initial_memory"):
|
|
76
|
+
|
|
77
|
+
edited_examples.append({
|
|
78
|
+
"memory": memory.get("initial_memory"),
|
|
79
|
+
"context": memory.get("context", ""),
|
|
80
|
+
"label": 2
|
|
81
|
+
})
|
|
82
|
+
|
|
83
|
+
examples.extend(edited_examples)
|
|
84
|
+
random.shuffle(examples)
|
|
85
|
+
return examples
|
|
86
|
+
|
|
87
|
+
def train(self, approved_memories: List[Dict], rejected_memories: List[Dict],
|
|
88
|
+
output_dir: str = "./memory_model", epochs: int = 3):
|
|
89
|
+
"""Train the memory classification model"""
|
|
90
|
+
|
|
91
|
+
if len(approved_memories) < 10 or len(rejected_memories) < 10:
|
|
92
|
+
print("Not enough training data. Need at least 10 approved and 10 rejected memories.")
|
|
93
|
+
return False
|
|
94
|
+
|
|
95
|
+
training_data = self.prepare_training_data(approved_memories, rejected_memories)
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
split_idx = int(0.8 * len(training_data))
|
|
99
|
+
train_data = training_data[:split_idx]
|
|
100
|
+
val_data = training_data[split_idx:]
|
|
101
|
+
|
|
102
|
+
train_dataset = MemoryDataset(train_data, self.tokenizer)
|
|
103
|
+
val_dataset = MemoryDataset(val_data, self.tokenizer)
|
|
104
|
+
|
|
105
|
+
training_args = TrainingArguments(
|
|
106
|
+
output_dir=output_dir,
|
|
107
|
+
num_train_epochs=epochs,
|
|
108
|
+
per_device_train_batch_size=4,
|
|
109
|
+
per_device_eval_batch_size=4,
|
|
110
|
+
warmup_steps=100,
|
|
111
|
+
weight_decay=0.01,
|
|
112
|
+
logging_dir='./logs',
|
|
113
|
+
evaluation_strategy="epoch",
|
|
114
|
+
save_strategy="epoch",
|
|
115
|
+
load_best_model_at_end=True,
|
|
116
|
+
)
|
|
117
|
+
|
|
118
|
+
trainer = Trainer(
|
|
119
|
+
model=self.model,
|
|
120
|
+
args=training_args,
|
|
121
|
+
train_dataset=train_dataset,
|
|
122
|
+
eval_dataset=val_dataset,
|
|
123
|
+
)
|
|
124
|
+
|
|
125
|
+
trainer.train()
|
|
126
|
+
trainer.save_model()
|
|
127
|
+
self.tokenizer.save_pretrained(output_dir)
|
|
128
|
+
|
|
129
|
+
print(f"Model trained and saved to {output_dir}")
|
|
130
|
+
return True
|
|
131
|
+
|
|
132
|
+
def predict_memory_action(self, memory_content: str, context: str = "") -> Tuple[str, float]:
|
|
133
|
+
"""Predict what action to take on a memory"""
|
|
134
|
+
text = f"Memory: {memory_content}\nContext: {context}"
|
|
135
|
+
|
|
136
|
+
encoding = self.tokenizer(
|
|
137
|
+
text,
|
|
138
|
+
truncation=True,
|
|
139
|
+
padding=True,
|
|
140
|
+
max_length=512,
|
|
141
|
+
return_tensors='pt'
|
|
142
|
+
).to(self.device)
|
|
143
|
+
|
|
144
|
+
with torch.no_grad():
|
|
145
|
+
outputs = self.model(**encoding)
|
|
146
|
+
probabilities = torch.softmax(outputs.logits, dim=-1)
|
|
147
|
+
predicted_class = torch.argmax(probabilities, dim=-1).item()
|
|
148
|
+
confidence = probabilities[0][predicted_class].item()
|
|
149
|
+
|
|
150
|
+
actions = {0: "model-rejected", 1: "model-approved", 2: "needs-editing"}
|
|
151
|
+
return actions[predicted_class], confidence
|
|
152
|
+
|
|
153
|
+
def auto_approve_memory(self, memory_content: str, context: str = "",
|
|
154
|
+
confidence_threshold: float = 0.8) -> Dict:
|
|
155
|
+
"""Auto-approve memory if confidence is high enough"""
|
|
156
|
+
action, confidence = self.predict_memory_action(memory_content, context)
|
|
157
|
+
|
|
158
|
+
if confidence >= confidence_threshold:
|
|
159
|
+
return {"action": action, "confidence": confidence, "auto_processed": True}
|
|
160
|
+
else:
|
|
161
|
+
return {"action": "pending_approval", "confidence": confidence, "auto_processed": False}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
# tools for reinforcement learning
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
# structured fine tuning of LLMs to produce structured output
|
|
@@ -8,7 +8,7 @@ import numpy as np
|
|
|
8
8
|
|
|
9
9
|
try:
|
|
10
10
|
import sqlalchemy
|
|
11
|
-
from sqlalchemy import create_engine, text, MetaData, Table, Column, Integer, String, Text, DateTime, LargeBinary, ForeignKey, Boolean
|
|
11
|
+
from sqlalchemy import create_engine, text, MetaData, Table, Column, Integer, String, Text, DateTime, LargeBinary, ForeignKey, Boolean, func
|
|
12
12
|
from sqlalchemy.engine import Engine, Connection as SQLAlchemyConnection
|
|
13
13
|
from sqlalchemy.exc import SQLAlchemyError
|
|
14
14
|
from sqlalchemy.sql import select, insert, update, delete
|
|
@@ -477,6 +477,22 @@ class CommandHistory:
|
|
|
477
477
|
Column('duration_ms', Integer)
|
|
478
478
|
)
|
|
479
479
|
|
|
480
|
+
Table('memory_lifecycle', metadata,
|
|
481
|
+
Column('id', Integer, primary_key=True, autoincrement=True),
|
|
482
|
+
Column('message_id', String(50), nullable=False),
|
|
483
|
+
Column('conversation_id', String(100), nullable=False),
|
|
484
|
+
Column('npc', String(100), nullable=False),
|
|
485
|
+
Column('team', String(100), nullable=False),
|
|
486
|
+
Column('directory_path', Text, nullable=False),
|
|
487
|
+
Column('timestamp', String(50), nullable=False),
|
|
488
|
+
Column('initial_memory', Text, nullable=False),
|
|
489
|
+
Column('final_memory', Text),
|
|
490
|
+
Column('status', String(50), nullable=False),
|
|
491
|
+
Column('model', String(100)),
|
|
492
|
+
Column('provider', String(100)),
|
|
493
|
+
Column('created_at', DateTime, default=func.now())
|
|
494
|
+
)
|
|
495
|
+
|
|
480
496
|
|
|
481
497
|
metadata.create_all(self.engine, checkfirst=True)
|
|
482
498
|
|
|
@@ -580,6 +596,119 @@ class CommandHistory:
|
|
|
580
596
|
|
|
581
597
|
return message_id
|
|
582
598
|
|
|
599
|
+
def add_memory_to_database(self, message_id: str, conversation_id: str, npc: str, team: str,
|
|
600
|
+
directory_path: str, initial_memory: str, status: str,
|
|
601
|
+
model: str = None, provider: str = None, final_memory: str = None):
|
|
602
|
+
"""Store a memory entry in the database"""
|
|
603
|
+
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
|
604
|
+
|
|
605
|
+
stmt = """
|
|
606
|
+
INSERT INTO memory_lifecycle
|
|
607
|
+
(message_id, conversation_id, npc, team, directory_path, timestamp,
|
|
608
|
+
initial_memory, final_memory, status, model, provider)
|
|
609
|
+
VALUES (:message_id, :conversation_id, :npc, :team, :directory_path,
|
|
610
|
+
:timestamp, :initial_memory, :final_memory, :status, :model, :provider)
|
|
611
|
+
"""
|
|
612
|
+
|
|
613
|
+
params = {
|
|
614
|
+
"message_id": message_id, "conversation_id": conversation_id,
|
|
615
|
+
"npc": npc, "team": team, "directory_path": directory_path,
|
|
616
|
+
"timestamp": timestamp, "initial_memory": initial_memory,
|
|
617
|
+
"final_memory": final_memory, "status": status,
|
|
618
|
+
"model": model, "provider": provider
|
|
619
|
+
}
|
|
620
|
+
|
|
621
|
+
return self._execute_returning_id(stmt, params)
|
|
622
|
+
|
|
623
|
+
def search_memory(self, query: str, npc: str = None, team: str = None,
|
|
624
|
+
directory_path: str = None, status_filter: str = None, limit: int = 10):
|
|
625
|
+
"""Search memories with hierarchical scope"""
|
|
626
|
+
conditions = ["LOWER(initial_memory) LIKE LOWER(:query) OR LOWER(final_memory) LIKE LOWER(:query)"]
|
|
627
|
+
params = {"query": f"%{query}%"}
|
|
628
|
+
|
|
629
|
+
if status_filter:
|
|
630
|
+
conditions.append("status = :status")
|
|
631
|
+
params["status"] = status_filter
|
|
632
|
+
|
|
633
|
+
|
|
634
|
+
order_parts = []
|
|
635
|
+
if npc:
|
|
636
|
+
order_parts.append(f"CASE WHEN npc = '{npc}' THEN 1 ELSE 2 END")
|
|
637
|
+
if team:
|
|
638
|
+
order_parts.append(f"CASE WHEN team = '{team}' THEN 1 ELSE 2 END")
|
|
639
|
+
if directory_path:
|
|
640
|
+
order_parts.append(f"CASE WHEN directory_path = '{directory_path}' THEN 1 ELSE 2 END")
|
|
641
|
+
|
|
642
|
+
order_clause = ", ".join(order_parts) + ", created_at DESC" if order_parts else "created_at DESC"
|
|
643
|
+
|
|
644
|
+
stmt = f"""
|
|
645
|
+
SELECT * FROM memory_lifecycle
|
|
646
|
+
WHERE {' AND '.join(conditions)}
|
|
647
|
+
ORDER BY {order_clause}
|
|
648
|
+
LIMIT :limit
|
|
649
|
+
"""
|
|
650
|
+
params["limit"] = limit
|
|
651
|
+
|
|
652
|
+
return self._fetch_all(stmt, params)
|
|
653
|
+
|
|
654
|
+
def get_memory_examples_for_context(self, npc: str, team: str, directory_path: str,
|
|
655
|
+
n_approved: int = 10, n_rejected: int = 10):
|
|
656
|
+
"""Get recent approved and rejected memories for learning context"""
|
|
657
|
+
|
|
658
|
+
approved_stmt = """
|
|
659
|
+
SELECT initial_memory, final_memory, status FROM memory_lifecycle
|
|
660
|
+
WHERE status IN ('human-approved', 'model-approved')
|
|
661
|
+
ORDER BY
|
|
662
|
+
CASE WHEN npc = :npc AND team = :team AND directory_path = :path THEN 1
|
|
663
|
+
WHEN npc = :npc AND team = :team THEN 2
|
|
664
|
+
WHEN team = :team THEN 3
|
|
665
|
+
ELSE 4 END,
|
|
666
|
+
created_at DESC
|
|
667
|
+
LIMIT :n_approved
|
|
668
|
+
"""
|
|
669
|
+
|
|
670
|
+
rejected_stmt = """
|
|
671
|
+
SELECT initial_memory, status FROM memory_lifecycle
|
|
672
|
+
WHERE status IN ('human-rejected', 'model-rejected')
|
|
673
|
+
ORDER BY
|
|
674
|
+
CASE WHEN npc = :npc AND team = :team AND directory_path = :path THEN 1
|
|
675
|
+
WHEN npc = :npc AND team = :team THEN 2
|
|
676
|
+
WHEN team = :team THEN 3
|
|
677
|
+
ELSE 4 END,
|
|
678
|
+
created_at DESC
|
|
679
|
+
LIMIT :n_rejected
|
|
680
|
+
"""
|
|
681
|
+
|
|
682
|
+
params = {"npc": npc, "team": team, "path": directory_path,
|
|
683
|
+
"n_approved": n_approved, "n_rejected": n_rejected}
|
|
684
|
+
|
|
685
|
+
approved = self._fetch_all(approved_stmt, params)
|
|
686
|
+
rejected = self._fetch_all(rejected_stmt, params)
|
|
687
|
+
|
|
688
|
+
return {"approved": approved, "rejected": rejected}
|
|
689
|
+
|
|
690
|
+
def get_pending_memories(self, limit: int = 50):
|
|
691
|
+
"""Get memories pending human approval"""
|
|
692
|
+
stmt = """
|
|
693
|
+
SELECT * FROM memory_lifecycle
|
|
694
|
+
WHERE status = 'pending_approval'
|
|
695
|
+
ORDER BY created_at ASC
|
|
696
|
+
LIMIT :limit
|
|
697
|
+
"""
|
|
698
|
+
return self._fetch_all(stmt, {"limit": limit})
|
|
699
|
+
|
|
700
|
+
def update_memory_status(self, memory_id: int, new_status: str, final_memory: str = None):
|
|
701
|
+
"""Update memory status and optionally final_memory"""
|
|
702
|
+
stmt = """
|
|
703
|
+
UPDATE memory_lifecycle
|
|
704
|
+
SET status = :status, final_memory = :final_memory
|
|
705
|
+
WHERE id = :memory_id
|
|
706
|
+
"""
|
|
707
|
+
params = {"status": new_status, "final_memory": final_memory, "memory_id": memory_id}
|
|
708
|
+
|
|
709
|
+
with self.engine.begin() as conn:
|
|
710
|
+
conn.execute(text(stmt), params)
|
|
711
|
+
|
|
583
712
|
def add_attachment(self, message_id, name, attachment_type, data, size, file_path=None):
|
|
584
713
|
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
|
585
714
|
stmt = """
|
|
@@ -329,9 +329,9 @@ def kg_initial(content,
|
|
|
329
329
|
}
|
|
330
330
|
|
|
331
331
|
|
|
332
|
-
|
|
333
332
|
def kg_evolve_incremental(existing_kg,
|
|
334
|
-
new_content_text,
|
|
333
|
+
new_content_text=None,
|
|
334
|
+
new_facts=None,
|
|
335
335
|
model = None,
|
|
336
336
|
provider=None,
|
|
337
337
|
npc=None,
|
|
@@ -340,15 +340,12 @@ def kg_evolve_incremental(existing_kg,
|
|
|
340
340
|
link_concepts_facts = False,
|
|
341
341
|
link_concepts_concepts=False,
|
|
342
342
|
link_facts_facts = False,
|
|
343
|
-
|
|
344
343
|
):
|
|
345
344
|
|
|
346
345
|
current_gen = existing_kg.get('generation', 0)
|
|
347
346
|
next_gen = current_gen + 1
|
|
348
347
|
print(f"\n--- ABSORBING INFO: Gen {current_gen} -> Gen {next_gen} ---")
|
|
349
348
|
|
|
350
|
-
print('extracting facts...')
|
|
351
|
-
|
|
352
349
|
newly_added_concepts = []
|
|
353
350
|
concept_links = list(existing_kg.get('concept_links', []))
|
|
354
351
|
fact_to_concept_links = defaultdict(list,
|
|
@@ -360,25 +357,36 @@ def kg_evolve_incremental(existing_kg,
|
|
|
360
357
|
existing_concept_names = {c['name'] for c in existing_concepts}
|
|
361
358
|
existing_fact_statements = [f['statement'] for f in existing_facts]
|
|
362
359
|
all_concept_names = list(existing_concept_names)
|
|
360
|
+
|
|
363
361
|
all_new_facts = []
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
362
|
+
print(npc, npc.model, npc.provider)
|
|
363
|
+
|
|
364
|
+
if new_facts:
|
|
365
|
+
all_new_facts = new_facts
|
|
366
|
+
print(f'using pre-approved facts: {len(all_new_facts)}')
|
|
367
|
+
elif new_content_text:
|
|
368
|
+
print('extracting facts from content...')
|
|
369
|
+
if len(new_content_text) > 10000:
|
|
370
|
+
starting_point = random.randint(0, len(new_content_text)-10000)
|
|
371
|
+
for n in range(len(new_content_text)//10000):
|
|
372
|
+
content_to_sample = new_content_text[n*10000:(n+1)*10000]
|
|
373
|
+
facts = get_facts(content_to_sample,
|
|
374
|
+
model=model,
|
|
375
|
+
provider=provider,
|
|
376
|
+
npc = npc,
|
|
377
|
+
context=context)
|
|
378
|
+
all_new_facts.extend(facts)
|
|
379
|
+
print(facts)
|
|
380
|
+
else:
|
|
381
|
+
all_new_facts = get_facts(new_content_text,
|
|
382
|
+
model=model,
|
|
383
|
+
provider=provider,
|
|
384
|
+
npc = npc,
|
|
385
|
+
context=context)
|
|
386
|
+
print(all_new_facts)
|
|
374
387
|
else:
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
model=model,
|
|
378
|
-
provider=provider,
|
|
379
|
-
npc = npc,
|
|
380
|
-
context=context)
|
|
381
|
-
|
|
388
|
+
print("No new content or facts provided")
|
|
389
|
+
return existing_kg, {}
|
|
382
390
|
|
|
383
391
|
for fact in all_new_facts:
|
|
384
392
|
fact['generation'] = next_gen
|
|
@@ -393,6 +401,7 @@ def kg_evolve_incremental(existing_kg,
|
|
|
393
401
|
provider = provider,
|
|
394
402
|
npc=npc,
|
|
395
403
|
context=context)
|
|
404
|
+
print(candidate_concepts)
|
|
396
405
|
print('checking group uniqueness')
|
|
397
406
|
for cand_concept in candidate_concepts:
|
|
398
407
|
cand_name = cand_concept['name']
|
|
@@ -412,7 +421,6 @@ def kg_evolve_incremental(existing_kg,
|
|
|
412
421
|
context)
|
|
413
422
|
for related_name in related_concepts:
|
|
414
423
|
if related_name != cand_name:
|
|
415
|
-
|
|
416
424
|
concept_links.append((cand_name, related_name))
|
|
417
425
|
all_concept_names.append(cand_name)
|
|
418
426
|
|
|
@@ -0,0 +1,155 @@
|
|
|
1
|
+
from dataclasses import dataclass
|
|
2
|
+
from typing import List, Dict, Any, Optional
|
|
3
|
+
from datetime import datetime
|
|
4
|
+
import threading
|
|
5
|
+
import queue
|
|
6
|
+
import time
|
|
7
|
+
|
|
8
|
+
@dataclass
|
|
9
|
+
class MemoryItem:
|
|
10
|
+
message_id: str
|
|
11
|
+
conversation_id: str
|
|
12
|
+
npc: str
|
|
13
|
+
team: str
|
|
14
|
+
directory_path: str
|
|
15
|
+
content: str
|
|
16
|
+
context: str
|
|
17
|
+
model: str
|
|
18
|
+
provider: str
|
|
19
|
+
|
|
20
|
+
class MemoryApprovalQueue:
|
|
21
|
+
def __init__(self, command_history):
|
|
22
|
+
self.command_history = command_history
|
|
23
|
+
self.pending_queue = queue.Queue()
|
|
24
|
+
self.approval_results = queue.Queue()
|
|
25
|
+
self.processing_thread = None
|
|
26
|
+
self.running = False
|
|
27
|
+
|
|
28
|
+
def add_memory(self, memory_item: MemoryItem):
|
|
29
|
+
"""Add memory to processing queue (non-blocking)"""
|
|
30
|
+
self.pending_queue.put(memory_item)
|
|
31
|
+
|
|
32
|
+
def start_background_processing(self):
|
|
33
|
+
"""Start background thread for memory processing"""
|
|
34
|
+
if self.processing_thread and self.processing_thread.is_alive():
|
|
35
|
+
return
|
|
36
|
+
|
|
37
|
+
self.running = True
|
|
38
|
+
self.processing_thread = threading.Thread(target=self._process_queue)
|
|
39
|
+
self.processing_thread.daemon = True
|
|
40
|
+
self.processing_thread.start()
|
|
41
|
+
|
|
42
|
+
def _process_queue(self):
|
|
43
|
+
"""Background processing of memory queue"""
|
|
44
|
+
while self.running:
|
|
45
|
+
try:
|
|
46
|
+
|
|
47
|
+
batch = []
|
|
48
|
+
try:
|
|
49
|
+
|
|
50
|
+
memory = self.pending_queue.get(timeout=1.0)
|
|
51
|
+
batch.append(memory)
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
while len(batch) < 10:
|
|
55
|
+
try:
|
|
56
|
+
memory = self.pending_queue.get_nowait()
|
|
57
|
+
batch.append(memory)
|
|
58
|
+
except queue.Empty:
|
|
59
|
+
break
|
|
60
|
+
|
|
61
|
+
self._process_memory_batch(batch)
|
|
62
|
+
|
|
63
|
+
except queue.Empty:
|
|
64
|
+
continue
|
|
65
|
+
|
|
66
|
+
except Exception as e:
|
|
67
|
+
print(f"Error in memory processing: {e}")
|
|
68
|
+
time.sleep(1)
|
|
69
|
+
|
|
70
|
+
def _process_memory_batch(self, memories: List[MemoryItem]):
|
|
71
|
+
"""Process a batch of memories"""
|
|
72
|
+
for memory in memories:
|
|
73
|
+
|
|
74
|
+
memory_id = self.command_history.add_memory_to_database(
|
|
75
|
+
message_id=memory.message_id,
|
|
76
|
+
conversation_id=memory.conversation_id,
|
|
77
|
+
npc=memory.npc,
|
|
78
|
+
team=memory.team,
|
|
79
|
+
directory_path=memory.directory_path,
|
|
80
|
+
initial_memory=memory.content,
|
|
81
|
+
status="pending_approval",
|
|
82
|
+
model=memory.model,
|
|
83
|
+
provider=memory.provider
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
self.approval_results.put({
|
|
88
|
+
"memory_id": memory_id,
|
|
89
|
+
"content": memory.content,
|
|
90
|
+
"context": memory.context,
|
|
91
|
+
"npc": memory.npc
|
|
92
|
+
})
|
|
93
|
+
|
|
94
|
+
def get_approval_batch(self, max_items: int = 5) -> List[Dict]:
|
|
95
|
+
"""Get batch of memories ready for approval"""
|
|
96
|
+
batch = []
|
|
97
|
+
try:
|
|
98
|
+
while len(batch) < max_items:
|
|
99
|
+
item = self.approval_results.get_nowait()
|
|
100
|
+
batch.append(item)
|
|
101
|
+
except queue.Empty:
|
|
102
|
+
pass
|
|
103
|
+
return batch
|
|
104
|
+
|
|
105
|
+
def stop_processing(self):
|
|
106
|
+
"""Stop background processing"""
|
|
107
|
+
self.running = False
|
|
108
|
+
if self.processing_thread:
|
|
109
|
+
self.processing_thread.join(timeout=2.0)
|
|
110
|
+
|
|
111
|
+
def memory_approval_ui(memories: List[Dict]) -> List[Dict]:
|
|
112
|
+
"""Simple CLI interface for memory approval"""
|
|
113
|
+
if not memories:
|
|
114
|
+
return []
|
|
115
|
+
|
|
116
|
+
print(f"\n📝 {len(memories)} memories ready for approval:")
|
|
117
|
+
|
|
118
|
+
approvals = []
|
|
119
|
+
for i, memory in enumerate(memories, 1):
|
|
120
|
+
print(f"\n--- Memory {i}/{len(memories)} ---")
|
|
121
|
+
print(f"NPC: {memory['npc']}")
|
|
122
|
+
print(f"Content: {memory['content'][:200]}{'...' if len(memory['content']) > 200 else ''}")
|
|
123
|
+
|
|
124
|
+
while True:
|
|
125
|
+
choice = input("(a)pprove, (r)eject, (e)dit, (s)kip, (q)uit, (A)pprove all: ").strip().lower()
|
|
126
|
+
|
|
127
|
+
if choice == 'a':
|
|
128
|
+
approvals.append({"memory_id": memory['memory_id'], "decision": "human-approved"})
|
|
129
|
+
break
|
|
130
|
+
elif choice == 'r':
|
|
131
|
+
approvals.append({"memory_id": memory['memory_id'], "decision": "human-rejected"})
|
|
132
|
+
break
|
|
133
|
+
elif choice == 'e':
|
|
134
|
+
edited = input("Edit memory: ").strip()
|
|
135
|
+
if edited:
|
|
136
|
+
approvals.append({
|
|
137
|
+
"memory_id": memory['memory_id'],
|
|
138
|
+
"decision": "human-edited",
|
|
139
|
+
"final_memory": edited
|
|
140
|
+
})
|
|
141
|
+
break
|
|
142
|
+
elif choice == 's':
|
|
143
|
+
break
|
|
144
|
+
elif choice == 'q':
|
|
145
|
+
return approvals
|
|
146
|
+
elif choice == 'A':
|
|
147
|
+
|
|
148
|
+
for remaining_memory in memories[i-1:]:
|
|
149
|
+
approvals.append({
|
|
150
|
+
"memory_id": remaining_memory['memory_id'],
|
|
151
|
+
"decision": "human-approved"
|
|
152
|
+
})
|
|
153
|
+
return approvals
|
|
154
|
+
|
|
155
|
+
return approvals
|
|
@@ -746,7 +746,9 @@ class NPC:
|
|
|
746
746
|
npc_name=self.name,
|
|
747
747
|
directory_path=directory_path
|
|
748
748
|
)
|
|
749
|
-
|
|
749
|
+
print('# of facts: ', len(kg_data['facts']))
|
|
750
|
+
print('# of facts: ', len(kg_data['concepts']))
|
|
751
|
+
|
|
750
752
|
if not kg_data.get('facts') and not kg_data.get('concepts'):
|
|
751
753
|
return self._initialize_kg_from_history()
|
|
752
754
|
|
|
@@ -1215,7 +1217,7 @@ class NPC:
|
|
|
1215
1217
|
def write_code(self, task_description: str, language: str = "python", show=True) -> str:
|
|
1216
1218
|
"""Generate and execute code for a specific task, returning the result"""
|
|
1217
1219
|
if language.lower() != "python":
|
|
1218
|
-
|
|
1220
|
+
|
|
1219
1221
|
code_prompt = f"""Write {language} code for the following task:
|
|
1220
1222
|
{task_description}
|
|
1221
1223
|
|
|
@@ -1224,7 +1226,7 @@ class NPC:
|
|
|
1224
1226
|
response = self.get_llm_response(code_prompt, tool_choice=False )
|
|
1225
1227
|
return response.get('response', 'Unable to generate code')
|
|
1226
1228
|
|
|
1227
|
-
|
|
1229
|
+
|
|
1228
1230
|
code_prompt = f"""Write Python code for the following task:
|
|
1229
1231
|
{task_description}
|
|
1230
1232
|
|
|
@@ -1245,7 +1247,7 @@ class NPC:
|
|
|
1245
1247
|
response = self.get_llm_response(code_prompt, tool_choice= False)
|
|
1246
1248
|
generated_code = response.get('response', '')
|
|
1247
1249
|
|
|
1248
|
-
|
|
1250
|
+
|
|
1249
1251
|
if '```python' in generated_code:
|
|
1250
1252
|
code_lines = generated_code.split('\n')
|
|
1251
1253
|
start_idx = None
|
|
@@ -1265,7 +1267,7 @@ class NPC:
|
|
|
1265
1267
|
generated_code = '\n'.join(code_lines[start_idx:])
|
|
1266
1268
|
|
|
1267
1269
|
try:
|
|
1268
|
-
|
|
1270
|
+
|
|
1269
1271
|
exec_globals = {
|
|
1270
1272
|
"__builtins__": __builtins__,
|
|
1271
1273
|
"npc": self,
|
|
@@ -1290,21 +1292,21 @@ class NPC:
|
|
|
1290
1292
|
|
|
1291
1293
|
exec_locals = {}
|
|
1292
1294
|
|
|
1293
|
-
|
|
1295
|
+
|
|
1294
1296
|
exec(generated_code, exec_globals, exec_locals)
|
|
1295
1297
|
|
|
1296
1298
|
if show:
|
|
1297
1299
|
print('Executing code', generated_code)
|
|
1298
1300
|
|
|
1299
|
-
|
|
1301
|
+
|
|
1300
1302
|
if "output" in exec_locals:
|
|
1301
1303
|
result = exec_locals["output"]
|
|
1302
|
-
|
|
1304
|
+
|
|
1303
1305
|
self.shared_context.update({k: v for k, v in exec_locals.items()
|
|
1304
1306
|
if not k.startswith('_') and not callable(v)})
|
|
1305
1307
|
return f"Code executed successfully. Result: {result}"
|
|
1306
1308
|
else:
|
|
1307
|
-
|
|
1309
|
+
|
|
1308
1310
|
meaningful_vars = {k: v for k, v in exec_locals.items()
|
|
1309
1311
|
if not k.startswith('_') and not callable(v)}
|
|
1310
1312
|
|
|
@@ -2023,6 +2023,28 @@ def stream():
|
|
|
2023
2023
|
|
|
2024
2024
|
|
|
2025
2025
|
|
|
2026
|
+
@app.route("/api/memory/approve", methods=["POST"])
|
|
2027
|
+
def approve_memories():
|
|
2028
|
+
try:
|
|
2029
|
+
data = request.json
|
|
2030
|
+
approvals = data.get("approvals", [])
|
|
2031
|
+
|
|
2032
|
+
command_history = CommandHistory(app.config.get('DB_PATH'))
|
|
2033
|
+
|
|
2034
|
+
for approval in approvals:
|
|
2035
|
+
command_history.update_memory_status(
|
|
2036
|
+
approval['memory_id'],
|
|
2037
|
+
approval['decision'],
|
|
2038
|
+
approval.get('final_memory')
|
|
2039
|
+
)
|
|
2040
|
+
|
|
2041
|
+
return jsonify({"success": True, "processed": len(approvals)})
|
|
2042
|
+
|
|
2043
|
+
except Exception as e:
|
|
2044
|
+
return jsonify({"error": str(e)}), 500
|
|
2045
|
+
|
|
2046
|
+
|
|
2047
|
+
|
|
2026
2048
|
|
|
2027
2049
|
@app.route("/api/execute", methods=["POST"])
|
|
2028
2050
|
def execute():
|
|
@@ -2166,10 +2188,10 @@ def execute():
|
|
|
2166
2188
|
dot_count = 0
|
|
2167
2189
|
interrupted = False
|
|
2168
2190
|
tool_call_data = {"id": None, "function_name": None, "arguments": ""}
|
|
2191
|
+
memory_data = None
|
|
2169
2192
|
|
|
2170
2193
|
try:
|
|
2171
|
-
for response_chunk in
|
|
2172
|
-
|
|
2194
|
+
for response_chunk in stream_response.get('response', stream_response.get('output')):
|
|
2173
2195
|
with cancellation_lock:
|
|
2174
2196
|
if cancellation_flags.get(current_stream_id, False):
|
|
2175
2197
|
print(f"Cancellation flag triggered for {current_stream_id}. Breaking loop.")
|
|
@@ -2179,28 +2201,7 @@ def execute():
|
|
|
2179
2201
|
print('.', end="", flush=True)
|
|
2180
2202
|
dot_count += 1
|
|
2181
2203
|
|
|
2182
|
-
|
|
2183
|
-
if isinstance(response_chunk, dict) and response_chunk.get("role") == "decision":
|
|
2184
|
-
|
|
2185
|
-
chunk_data = {
|
|
2186
|
-
"id": None, "object": None, "created": None, "model": model,
|
|
2187
|
-
"choices": [
|
|
2188
|
-
{
|
|
2189
|
-
"index": 0,
|
|
2190
|
-
"delta":
|
|
2191
|
-
{
|
|
2192
|
-
"content": response_chunk.get('content', ''),
|
|
2193
|
-
"role": "assistant"
|
|
2194
|
-
},
|
|
2195
|
-
"finish_reason": None
|
|
2196
|
-
}
|
|
2197
|
-
]
|
|
2198
|
-
}
|
|
2199
|
-
complete_response.append(response_chunk.get('content', ''))
|
|
2200
|
-
yield f"data: {json.dumps(chunk_data)}\n\n"
|
|
2201
|
-
continue
|
|
2202
|
-
|
|
2203
|
-
elif "hf.co" in model or provider == 'ollama':
|
|
2204
|
+
if "hf.co" in model or provider == 'ollama':
|
|
2204
2205
|
chunk_content = response_chunk["message"]["content"] if "message" in response_chunk and "content" in response_chunk["message"] else ""
|
|
2205
2206
|
if "message" in response_chunk and "tool_calls" in response_chunk["message"]:
|
|
2206
2207
|
for tool_call in response_chunk["message"]["tool_calls"]:
|
|
@@ -2210,46 +2211,41 @@ def execute():
|
|
|
2210
2211
|
if "name" in tool_call["function"]:
|
|
2211
2212
|
tool_call_data["function_name"] = tool_call["function"]["name"]
|
|
2212
2213
|
if "arguments" in tool_call["function"]:
|
|
2213
|
-
|
|
2214
|
+
arg_val = tool_call["function"]["arguments"]
|
|
2215
|
+
if isinstance(arg_val, dict):
|
|
2216
|
+
arg_val = json.dumps(arg_val)
|
|
2217
|
+
tool_call_data["arguments"] += arg_val
|
|
2214
2218
|
if chunk_content:
|
|
2215
2219
|
complete_response.append(chunk_content)
|
|
2216
2220
|
chunk_data = {
|
|
2217
2221
|
"id": None, "object": None, "created": response_chunk["created_at"], "model": response_chunk["model"],
|
|
2218
2222
|
"choices": [{"index": 0, "delta": {"content": chunk_content, "role": response_chunk["message"]["role"]}, "finish_reason": response_chunk.get("done_reason")}]
|
|
2219
2223
|
}
|
|
2224
|
+
yield f"data: {json.dumps(chunk_data)}\n\n"
|
|
2220
2225
|
else:
|
|
2221
2226
|
chunk_content = ""
|
|
2222
2227
|
reasoning_content = ""
|
|
2223
|
-
|
|
2224
|
-
|
|
2225
|
-
|
|
2226
|
-
|
|
2227
|
-
|
|
2228
|
-
|
|
2229
|
-
if tool_call.function:
|
|
2230
|
-
|
|
2231
|
-
|
|
2232
|
-
|
|
2233
|
-
|
|
2234
|
-
|
|
2235
|
-
|
|
2236
|
-
|
|
2237
|
-
|
|
2238
|
-
if chunk_content:
|
|
2239
|
-
complete_response.append(chunk_content)
|
|
2240
|
-
chunk_data = {
|
|
2241
|
-
"id": response_chunk.id, "object": response_chunk.object, "created": response_chunk.created, "model": response_chunk.model,
|
|
2242
|
-
"choices": [{"index": choice.index, "delta": {"content": choice.delta.content, "role": choice.delta.role, "reasoning_content": reasoning_content if hasattr(choice.delta, "reasoning_content") else None}, "finish_reason": choice.finish_reason} for choice in response_chunk.choices]
|
|
2243
|
-
}
|
|
2244
|
-
else:
|
|
2245
|
-
chunk_content = response_chunk
|
|
2228
|
+
for choice in response_chunk.choices:
|
|
2229
|
+
if hasattr(choice.delta, "tool_calls") and choice.delta.tool_calls:
|
|
2230
|
+
for tool_call in choice.delta.tool_calls:
|
|
2231
|
+
if tool_call.id:
|
|
2232
|
+
tool_call_data["id"] = tool_call.id
|
|
2233
|
+
if tool_call.function:
|
|
2234
|
+
if hasattr(tool_call.function, "name") and tool_call.function.name:
|
|
2235
|
+
tool_call_data["function_name"] = tool_call.function.name
|
|
2236
|
+
if hasattr(tool_call.function, "arguments") and tool_call.function.arguments:
|
|
2237
|
+
tool_call_data["arguments"] += tool_call.function.arguments
|
|
2238
|
+
for choice in response_chunk.choices:
|
|
2239
|
+
if hasattr(choice.delta, "reasoning_content"):
|
|
2240
|
+
reasoning_content += choice.delta.reasoning_content
|
|
2241
|
+
chunk_content = "".join(choice.delta.content for choice in response_chunk.choices if choice.delta.content is not None)
|
|
2242
|
+
if chunk_content:
|
|
2246
2243
|
complete_response.append(chunk_content)
|
|
2247
|
-
|
|
2248
|
-
|
|
2249
|
-
|
|
2250
|
-
|
|
2251
|
-
|
|
2252
|
-
yield f"data: {json.dumps(chunk_data)}\n\n"
|
|
2244
|
+
chunk_data = {
|
|
2245
|
+
"id": response_chunk.id, "object": response_chunk.object, "created": response_chunk.created, "model": response_chunk.model,
|
|
2246
|
+
"choices": [{"index": choice.index, "delta": {"content": choice.delta.content, "role": choice.delta.role, "reasoning_content": reasoning_content if hasattr(choice.delta, "reasoning_content") else None}, "finish_reason": choice.finish_reason} for choice in response_chunk.choices]
|
|
2247
|
+
}
|
|
2248
|
+
yield f"data: {json.dumps(chunk_data)}\n\n"
|
|
2253
2249
|
|
|
2254
2250
|
except Exception as e:
|
|
2255
2251
|
print(f"\nAn exception occurred during streaming for {current_stream_id}: {e}")
|
|
@@ -2261,13 +2257,74 @@ def execute():
|
|
|
2261
2257
|
print('\r' + ' ' * dot_count*2 + '\r', end="", flush=True)
|
|
2262
2258
|
|
|
2263
2259
|
final_response_text = ''.join(complete_response)
|
|
2260
|
+
|
|
2261
|
+
conversation_turn_text = f"User: {commandstr}\nAssistant: {final_response_text}"
|
|
2262
|
+
|
|
2263
|
+
try:
|
|
2264
|
+
memory_examples = command_history.get_memory_examples_for_context(
|
|
2265
|
+
npc=npc_name,
|
|
2266
|
+
team=team,
|
|
2267
|
+
directory_path=current_path
|
|
2268
|
+
)
|
|
2269
|
+
|
|
2270
|
+
memory_context = format_memory_context(memory_examples)
|
|
2271
|
+
|
|
2272
|
+
facts = get_facts(
|
|
2273
|
+
conversation_turn_text,
|
|
2274
|
+
model=npc_object.model if npc_object else model,
|
|
2275
|
+
provider=npc_object.provider if npc_object else provider,
|
|
2276
|
+
npc=npc_object,
|
|
2277
|
+
context=memory_context
|
|
2278
|
+
)
|
|
2279
|
+
|
|
2280
|
+
if facts:
|
|
2281
|
+
memories_for_approval = []
|
|
2282
|
+
for i, fact in enumerate(facts):
|
|
2283
|
+
memory_id = command_history.add_memory_to_database(
|
|
2284
|
+
message_id=f"{conversation_id}_{datetime.now().strftime('%H%M%S')}_{i}",
|
|
2285
|
+
conversation_id=conversation_id,
|
|
2286
|
+
npc=npc_name or "default",
|
|
2287
|
+
team=team or "default",
|
|
2288
|
+
directory_path=current_path or "/",
|
|
2289
|
+
initial_memory=fact['statement'],
|
|
2290
|
+
status="pending_approval",
|
|
2291
|
+
model=npc_object.model if npc_object else model,
|
|
2292
|
+
provider=npc_object.provider if npc_object else provider
|
|
2293
|
+
)
|
|
2294
|
+
|
|
2295
|
+
memories_for_approval.append({
|
|
2296
|
+
"memory_id": memory_id,
|
|
2297
|
+
"content": fact['statement'],
|
|
2298
|
+
"context": f"Type: {fact.get('type', 'unknown')}, Source: {fact.get('source_text', '')}",
|
|
2299
|
+
"npc": npc_name or "default"
|
|
2300
|
+
})
|
|
2301
|
+
|
|
2302
|
+
memory_data = {
|
|
2303
|
+
"type": "memory_approval",
|
|
2304
|
+
"memories": memories_for_approval,
|
|
2305
|
+
"conversation_id": conversation_id
|
|
2306
|
+
}
|
|
2307
|
+
|
|
2308
|
+
except Exception as e:
|
|
2309
|
+
print(f"Memory generation error: {e}")
|
|
2310
|
+
|
|
2311
|
+
if memory_data:
|
|
2312
|
+
yield f"data: {json.dumps(memory_data)}\n\n"
|
|
2313
|
+
|
|
2264
2314
|
yield f"data: {json.dumps({'type': 'message_stop'})}\n\n"
|
|
2265
2315
|
|
|
2266
2316
|
npc_name_to_save = npc_object.name if npc_object else ''
|
|
2267
2317
|
save_conversation_message(
|
|
2268
|
-
command_history,
|
|
2269
|
-
|
|
2270
|
-
|
|
2318
|
+
command_history,
|
|
2319
|
+
conversation_id,
|
|
2320
|
+
"assistant",
|
|
2321
|
+
final_response_text,
|
|
2322
|
+
wd=current_path,
|
|
2323
|
+
model=model,
|
|
2324
|
+
provider=provider,
|
|
2325
|
+
npc=npc_name_to_save,
|
|
2326
|
+
team=team,
|
|
2327
|
+
message_id=message_id,
|
|
2271
2328
|
)
|
|
2272
2329
|
|
|
2273
2330
|
with cancellation_lock:
|
|
@@ -2276,6 +2333,7 @@ def execute():
|
|
|
2276
2333
|
print(f"Cleaned up cancellation flag for stream ID: {current_stream_id}")
|
|
2277
2334
|
|
|
2278
2335
|
|
|
2336
|
+
|
|
2279
2337
|
return Response(event_stream(stream_id), mimetype="text/event-stream")
|
|
2280
2338
|
|
|
2281
2339
|
@app.route("/api/interrupt", methods=["POST"])
|
|
File without changes
|
|
@@ -23,6 +23,12 @@ npcpy/data/load.py
|
|
|
23
23
|
npcpy/data/text.py
|
|
24
24
|
npcpy/data/video.py
|
|
25
25
|
npcpy/data/web.py
|
|
26
|
+
npcpy/ft/__init__.py
|
|
27
|
+
npcpy/ft/diff.py
|
|
28
|
+
npcpy/ft/ge.py
|
|
29
|
+
npcpy/ft/memory_trainer.py
|
|
30
|
+
npcpy/ft/rl.py
|
|
31
|
+
npcpy/ft/sft.py
|
|
26
32
|
npcpy/gen/__init__.py
|
|
27
33
|
npcpy/gen/audio_gen.py
|
|
28
34
|
npcpy/gen/embeddings.py
|
|
@@ -33,6 +39,7 @@ npcpy/memory/__init__.py
|
|
|
33
39
|
npcpy/memory/command_history.py
|
|
34
40
|
npcpy/memory/kg_vis.py
|
|
35
41
|
npcpy/memory/knowledge_graph.py
|
|
42
|
+
npcpy/memory/memory_processor.py
|
|
36
43
|
npcpy/memory/search.py
|
|
37
44
|
npcpy/mix/__init__.py
|
|
38
45
|
npcpy/mix/debate.py
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|