npcpy 1.3.21__py3-none-any.whl → 1.3.23__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- npcpy/data/audio.py +58 -286
- npcpy/data/image.py +15 -15
- npcpy/data/web.py +2 -2
- npcpy/gen/audio_gen.py +172 -2
- npcpy/gen/image_gen.py +113 -62
- npcpy/gen/response.py +239 -0
- npcpy/llm_funcs.py +73 -71
- npcpy/memory/command_history.py +117 -69
- npcpy/memory/kg_vis.py +74 -74
- npcpy/npc_compiler.py +261 -26
- npcpy/npc_sysenv.py +4 -1
- npcpy/serve.py +393 -91
- npcpy/work/desktop.py +31 -5
- npcpy-1.3.23.dist-info/METADATA +416 -0
- {npcpy-1.3.21.dist-info → npcpy-1.3.23.dist-info}/RECORD +18 -18
- npcpy-1.3.21.dist-info/METADATA +0 -1039
- {npcpy-1.3.21.dist-info → npcpy-1.3.23.dist-info}/WHEEL +0 -0
- {npcpy-1.3.21.dist-info → npcpy-1.3.23.dist-info}/licenses/LICENSE +0 -0
- {npcpy-1.3.21.dist-info → npcpy-1.3.23.dist-info}/top_level.txt +0 -0
npcpy/llm_funcs.py
CHANGED
|
@@ -681,43 +681,60 @@ def _react_fallback(
|
|
|
681
681
|
# Cap iterations - after this, return to orchestrator for review/compression
|
|
682
682
|
effective_max = min(max_iterations, 7)
|
|
683
683
|
|
|
684
|
+
original_command = command # Preserve original request
|
|
685
|
+
|
|
684
686
|
for iteration in range(effective_max):
|
|
685
|
-
# Build history of what's been tried
|
|
686
|
-
history_text = ""
|
|
687
687
|
if jinx_executions:
|
|
688
|
-
|
|
689
|
-
|
|
690
|
-
|
|
691
|
-
|
|
688
|
+
# Tools have already been called — ask model to answer WITHOUT showing tools
|
|
689
|
+
last_result = str(jinx_executions[-1].get('output', ''))[:1000]
|
|
690
|
+
prompt = f"""The user asked: {original_command}
|
|
691
|
+
|
|
692
|
+
You already ran a tool and got: {last_result}
|
|
692
693
|
|
|
693
|
-
|
|
694
|
+
Answer the user now. Respond with this JSON:
|
|
695
|
+
{{"action": "answer", "response": "your answer based on the tool result"}}"""
|
|
696
|
+
|
|
697
|
+
# Don't pass npc on answer-nudge iterations to avoid system prompt
|
|
698
|
+
# re-injecting tool descriptions that bias toward more tool calls
|
|
699
|
+
response = get_llm_response(
|
|
700
|
+
prompt,
|
|
701
|
+
model=model,
|
|
702
|
+
provider=provider,
|
|
703
|
+
api_url=api_url,
|
|
704
|
+
api_key=api_key,
|
|
705
|
+
messages=[], # Clean slate - no history noise
|
|
706
|
+
npc=None, # No system prompt with tool descriptions
|
|
707
|
+
team=None,
|
|
708
|
+
images=generated_images or None,
|
|
709
|
+
format="json",
|
|
710
|
+
)
|
|
711
|
+
else:
|
|
712
|
+
prompt = f"""Request: {original_command}
|
|
694
713
|
|
|
695
714
|
Available Tools:
|
|
696
715
|
{jinx_list}
|
|
697
716
|
|
|
698
717
|
Instructions:
|
|
699
|
-
1.
|
|
700
|
-
2. If you
|
|
701
|
-
3.
|
|
702
|
-
4. Use EXACT parameter names from tool definitions
|
|
703
|
-
5. Do NOT repeat the same tool call with the same inputs{history_text}"""
|
|
718
|
+
1. If you can answer directly without tools, use {{"action": "answer", "response": "your answer"}}
|
|
719
|
+
2. If you need to use a tool, use {{"action": "jinx", "jinx_name": "tool_name", "inputs": {{"param": "value"}}}}
|
|
720
|
+
3. Use EXACT parameter names from tool definitions"""
|
|
704
721
|
|
|
705
|
-
|
|
706
|
-
|
|
722
|
+
if context:
|
|
723
|
+
prompt += f"\n\nCurrent context: {context}"
|
|
707
724
|
|
|
708
|
-
|
|
709
|
-
|
|
710
|
-
|
|
711
|
-
|
|
712
|
-
|
|
713
|
-
|
|
714
|
-
|
|
715
|
-
|
|
716
|
-
|
|
717
|
-
|
|
718
|
-
|
|
719
|
-
|
|
720
|
-
|
|
725
|
+
response = get_llm_response(
|
|
726
|
+
prompt,
|
|
727
|
+
model=model,
|
|
728
|
+
provider=provider,
|
|
729
|
+
api_url=api_url,
|
|
730
|
+
api_key=api_key,
|
|
731
|
+
messages=current_messages[-10:],
|
|
732
|
+
npc=npc,
|
|
733
|
+
team=team,
|
|
734
|
+
images=((images or []) if iteration == 0 else []) + generated_images or None,
|
|
735
|
+
format="json",
|
|
736
|
+
context=context,
|
|
737
|
+
)
|
|
721
738
|
|
|
722
739
|
if response.get("usage"):
|
|
723
740
|
total_usage["input_tokens"] += response["usage"].get("input_tokens", 0)
|
|
@@ -726,17 +743,16 @@ Instructions:
|
|
|
726
743
|
decision = response.get("response", {})
|
|
727
744
|
logger.debug(f"[_react_fallback] Raw decision: {str(decision)[:200]}")
|
|
728
745
|
|
|
729
|
-
|
|
730
|
-
|
|
731
|
-
|
|
732
|
-
|
|
733
|
-
|
|
734
|
-
|
|
735
|
-
|
|
736
|
-
|
|
737
|
-
|
|
738
|
-
|
|
739
|
-
return {"messages": current_messages, "output": decision, "usage": total_usage, "jinx_executions": jinx_executions}
|
|
746
|
+
if not isinstance(decision, dict):
|
|
747
|
+
logger.debug(f"[_react_fallback] Non-dict response on iteration {iteration} - continuing")
|
|
748
|
+
# If we already have tool results and model can't produce valid JSON answer,
|
|
749
|
+
# just return the last tool result directly
|
|
750
|
+
if jinx_executions:
|
|
751
|
+
last_output = jinx_executions[-1].get("output", "")
|
|
752
|
+
logger.debug(f"[_react_fallback] Forcing answer from last tool result")
|
|
753
|
+
return {"messages": current_messages, "output": str(last_output), "usage": total_usage, "jinx_executions": jinx_executions}
|
|
754
|
+
context = f"Your response was not valid JSON object. You must respond with a JSON object: either {{\"action\": \"answer\", \"response\": \"...\"}} or {{\"action\": \"jinx\", \"jinx_name\": \"tool_name\", \"inputs\": {{...}}}}"
|
|
755
|
+
continue
|
|
740
756
|
|
|
741
757
|
logger.debug(f"[_react_fallback] Parsed decision action: {decision.get('action') if decision else 'None'}")
|
|
742
758
|
if decision.get("action") == "answer":
|
|
@@ -784,6 +800,13 @@ Instructions:
|
|
|
784
800
|
inputs = {k: v for k, v in decision.items() if k not in ('action', 'jinx_name', 'inputs', 'response')}
|
|
785
801
|
logger.debug(f"[_react_fallback] Jinx action: {jinx_name} with inputs: {inputs}")
|
|
786
802
|
|
|
803
|
+
# If we already have tool results and model tries to call another tool,
|
|
804
|
+
# force-return the existing result instead of executing more tools
|
|
805
|
+
if jinx_executions:
|
|
806
|
+
last_output = jinx_executions[-1].get("output", "")
|
|
807
|
+
logger.debug(f"[_react_fallback] Model tried to call '{jinx_name}' after already having results - forcing answer")
|
|
808
|
+
return {"messages": current_messages, "output": str(last_output), "usage": total_usage, "jinx_executions": jinx_executions}
|
|
809
|
+
|
|
787
810
|
if jinx_name not in jinxs:
|
|
788
811
|
context = f"Error: '{jinx_name}' not found. Available: {list(jinxs.keys())}"
|
|
789
812
|
logger.debug(f"[_react_fallback] Jinx not found: {jinx_name}")
|
|
@@ -851,41 +874,20 @@ Instructions:
|
|
|
851
874
|
# Truncate output for context to avoid sending huge base64 data back to LLM
|
|
852
875
|
output_for_context = str(output)[:8000] + "..." if len(str(output)) > 8000 else str(output)
|
|
853
876
|
context = f"Tool '{jinx_name}' returned: {output_for_context}"
|
|
854
|
-
command = f"{command}\n\nPrevious: {context}"
|
|
855
877
|
|
|
856
878
|
else:
|
|
857
|
-
|
|
858
|
-
|
|
859
|
-
|
|
860
|
-
|
|
861
|
-
|
|
862
|
-
|
|
863
|
-
|
|
864
|
-
|
|
865
|
-
|
|
866
|
-
|
|
867
|
-
|
|
868
|
-
|
|
869
|
-
pass
|
|
870
|
-
current_messages.append({"role": "user", "content": command})
|
|
871
|
-
fallback_response = get_llm_response(
|
|
872
|
-
command,
|
|
873
|
-
model=model,
|
|
874
|
-
provider=provider,
|
|
875
|
-
messages=current_messages[-10:],
|
|
876
|
-
npc=npc,
|
|
877
|
-
team=team,
|
|
878
|
-
stream=stream,
|
|
879
|
-
context=context,
|
|
880
|
-
)
|
|
881
|
-
if fallback_response.get("usage"):
|
|
882
|
-
total_usage["input_tokens"] += fallback_response["usage"].get("input_tokens", 0)
|
|
883
|
-
total_usage["output_tokens"] += fallback_response["usage"].get("output_tokens", 0)
|
|
884
|
-
output = fallback_response.get("response", "")
|
|
885
|
-
if output and isinstance(output, str):
|
|
886
|
-
current_messages.append({"role": "assistant", "content": output})
|
|
887
|
-
return {"messages": current_messages, "output": output, "usage": total_usage, "jinx_executions": jinx_executions}
|
|
888
|
-
return {"messages": current_messages, "output": str(decision), "usage": total_usage, "jinx_executions": jinx_executions}
|
|
879
|
+
# Unknown or missing action
|
|
880
|
+
action_val = decision.get("action")
|
|
881
|
+
logger.debug(f"[_react_fallback] Unknown action '{action_val}' on iteration {iteration}")
|
|
882
|
+
# If we have tool results, just return them
|
|
883
|
+
if jinx_executions:
|
|
884
|
+
last_output = jinx_executions[-1].get("output", "")
|
|
885
|
+
return {"messages": current_messages, "output": str(last_output), "usage": total_usage, "jinx_executions": jinx_executions}
|
|
886
|
+
if jinxs:
|
|
887
|
+
context = f"Your response had action='{action_val}' which is not valid. You must respond with either {{\"action\": \"answer\", \"response\": \"...\"}} or {{\"action\": \"jinx\", \"jinx_name\": \"tool_name\", \"inputs\": {{...}}}}. Available tools: {list(jinxs.keys())}"
|
|
888
|
+
else:
|
|
889
|
+
context = f"Your response had action='{action_val}' which is not valid. Respond with {{\"action\": \"answer\", \"response\": \"your final answer\"}}"
|
|
890
|
+
continue
|
|
889
891
|
|
|
890
892
|
logger.debug(f"[_react_fallback] Max iterations - returning {len(current_messages)} messages")
|
|
891
893
|
# If we have jinx executions, return the last output
|
npcpy/memory/command_history.py
CHANGED
|
@@ -650,7 +650,11 @@ class CommandHistory:
|
|
|
650
650
|
Column('timestamp', String(50)),
|
|
651
651
|
Column('npc', String(100)),
|
|
652
652
|
Column('team', String(100)),
|
|
653
|
-
Column('conversation_id', String(100))
|
|
653
|
+
Column('conversation_id', String(100)),
|
|
654
|
+
Column('output', Text),
|
|
655
|
+
Column('status', String(50)),
|
|
656
|
+
Column('error_message', Text),
|
|
657
|
+
Column('duration_ms', Integer)
|
|
654
658
|
)
|
|
655
659
|
|
|
656
660
|
Table('npc_executions', metadata,
|
|
@@ -706,6 +710,22 @@ class CommandHistory:
|
|
|
706
710
|
conn.execute(text("ALTER TABLE conversation_history ADD COLUMN cost VARCHAR(50)"))
|
|
707
711
|
except Exception:
|
|
708
712
|
pass # Column already exists
|
|
713
|
+
# jinx_executions new columns
|
|
714
|
+
for col in [
|
|
715
|
+
"ALTER TABLE jinx_executions ADD COLUMN output TEXT",
|
|
716
|
+
"ALTER TABLE jinx_executions ADD COLUMN status VARCHAR(50)",
|
|
717
|
+
"ALTER TABLE jinx_executions ADD COLUMN error_message TEXT",
|
|
718
|
+
"ALTER TABLE jinx_executions ADD COLUMN duration_ms INTEGER",
|
|
719
|
+
]:
|
|
720
|
+
try:
|
|
721
|
+
conn.execute(text(col))
|
|
722
|
+
except Exception:
|
|
723
|
+
pass
|
|
724
|
+
# drop the redundant jinx_execution_log if it exists
|
|
725
|
+
try:
|
|
726
|
+
conn.execute(text("DROP TABLE IF EXISTS jinx_execution_log"))
|
|
727
|
+
except Exception:
|
|
728
|
+
pass
|
|
709
729
|
|
|
710
730
|
def _setup_execution_triggers(self):
|
|
711
731
|
if 'sqlite' in str(self.engine.url):
|
|
@@ -1028,40 +1048,45 @@ class CommandHistory:
|
|
|
1028
1048
|
return self._fetch_all(stmt, params)
|
|
1029
1049
|
|
|
1030
1050
|
def get_memory_examples_for_context(self, npc: str, team: str, directory_path: str,
|
|
1031
|
-
n_approved: int = 10, n_rejected: int = 10):
|
|
1032
|
-
"""Get recent approved and
|
|
1033
|
-
|
|
1034
|
-
|
|
1051
|
+
n_approved: int = 10, n_rejected: int = 10, n_edited: int = 5):
|
|
1052
|
+
"""Get recent approved, rejected, and edited memories for learning context."""
|
|
1053
|
+
|
|
1054
|
+
scope_order = """
|
|
1055
|
+
CASE WHEN npc = :npc AND team = :team AND directory_path = :path THEN 1
|
|
1056
|
+
WHEN npc = :npc AND team = :team THEN 2
|
|
1057
|
+
WHEN team = :team THEN 3
|
|
1058
|
+
ELSE 4 END
|
|
1059
|
+
"""
|
|
1060
|
+
|
|
1061
|
+
approved_stmt = f"""
|
|
1035
1062
|
SELECT initial_memory, final_memory, status FROM memory_lifecycle
|
|
1036
1063
|
WHERE status IN ('human-approved', 'model-approved')
|
|
1037
|
-
ORDER BY
|
|
1038
|
-
CASE WHEN npc = :npc AND team = :team AND directory_path = :path THEN 1
|
|
1039
|
-
WHEN npc = :npc AND team = :team THEN 2
|
|
1040
|
-
WHEN team = :team THEN 3
|
|
1041
|
-
ELSE 4 END,
|
|
1042
|
-
created_at DESC
|
|
1064
|
+
ORDER BY {scope_order}, created_at DESC
|
|
1043
1065
|
LIMIT :n_approved
|
|
1044
1066
|
"""
|
|
1045
|
-
|
|
1046
|
-
rejected_stmt = """
|
|
1067
|
+
|
|
1068
|
+
rejected_stmt = f"""
|
|
1047
1069
|
SELECT initial_memory, status FROM memory_lifecycle
|
|
1048
1070
|
WHERE status IN ('human-rejected', 'model-rejected')
|
|
1049
|
-
ORDER BY
|
|
1050
|
-
CASE WHEN npc = :npc AND team = :team AND directory_path = :path THEN 1
|
|
1051
|
-
WHEN npc = :npc AND team = :team THEN 2
|
|
1052
|
-
WHEN team = :team THEN 3
|
|
1053
|
-
ELSE 4 END,
|
|
1054
|
-
created_at DESC
|
|
1071
|
+
ORDER BY {scope_order}, created_at DESC
|
|
1055
1072
|
LIMIT :n_rejected
|
|
1056
1073
|
"""
|
|
1057
|
-
|
|
1058
|
-
|
|
1059
|
-
|
|
1060
|
-
|
|
1074
|
+
|
|
1075
|
+
edited_stmt = f"""
|
|
1076
|
+
SELECT initial_memory, final_memory, status FROM memory_lifecycle
|
|
1077
|
+
WHERE status = 'human-edited' AND final_memory IS NOT NULL
|
|
1078
|
+
ORDER BY {scope_order}, created_at DESC
|
|
1079
|
+
LIMIT :n_edited
|
|
1080
|
+
"""
|
|
1081
|
+
|
|
1082
|
+
params = {"npc": npc, "team": team, "path": directory_path,
|
|
1083
|
+
"n_approved": n_approved, "n_rejected": n_rejected, "n_edited": n_edited}
|
|
1084
|
+
|
|
1061
1085
|
approved = self._fetch_all(approved_stmt, params)
|
|
1062
1086
|
rejected = self._fetch_all(rejected_stmt, params)
|
|
1063
|
-
|
|
1064
|
-
|
|
1087
|
+
edited = self._fetch_all(edited_stmt, params)
|
|
1088
|
+
|
|
1089
|
+
return {"approved": approved, "rejected": rejected, "edited": edited}
|
|
1065
1090
|
|
|
1066
1091
|
def get_pending_memories(self, limit: int = 50):
|
|
1067
1092
|
"""Get memories pending human approval"""
|
|
@@ -1133,25 +1158,26 @@ class CommandHistory:
|
|
|
1133
1158
|
conn.execute(text(stmt), params)
|
|
1134
1159
|
|
|
1135
1160
|
def save_jinx_execution(
|
|
1136
|
-
self,
|
|
1137
|
-
triggering_message_id: str,
|
|
1138
|
-
conversation_id: str,
|
|
1161
|
+
self,
|
|
1162
|
+
triggering_message_id: str,
|
|
1163
|
+
conversation_id: str,
|
|
1139
1164
|
npc_name: Optional[str],
|
|
1140
|
-
jinx_name: str,
|
|
1141
|
-
jinx_inputs: Dict,
|
|
1142
|
-
jinx_output: Any,
|
|
1143
|
-
|
|
1165
|
+
jinx_name: str,
|
|
1166
|
+
jinx_inputs: Dict,
|
|
1167
|
+
jinx_output: Any,
|
|
1168
|
+
status: str,
|
|
1169
|
+
team_name: Optional[str] = None,
|
|
1144
1170
|
error_message: Optional[str] = None,
|
|
1145
|
-
response_message_id: Optional[str] = None,
|
|
1171
|
+
response_message_id: Optional[str] = None,
|
|
1146
1172
|
duration_ms: Optional[int] = None
|
|
1147
1173
|
):
|
|
1148
1174
|
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
|
1149
|
-
|
|
1175
|
+
|
|
1150
1176
|
try:
|
|
1151
1177
|
inputs_json = json.dumps(jinx_inputs, cls=CustomJSONEncoder)
|
|
1152
1178
|
except TypeError:
|
|
1153
1179
|
inputs_json = json.dumps(str(jinx_inputs))
|
|
1154
|
-
|
|
1180
|
+
|
|
1155
1181
|
try:
|
|
1156
1182
|
if isinstance(jinx_output, (str, int, float, bool, list, dict, type(None))):
|
|
1157
1183
|
outputs_json = json.dumps(jinx_output, cls=CustomJSONEncoder)
|
|
@@ -1160,29 +1186,32 @@ class CommandHistory:
|
|
|
1160
1186
|
except TypeError:
|
|
1161
1187
|
outputs_json = json.dumps(f"Non-serializable output: {type(jinx_output)}")
|
|
1162
1188
|
|
|
1189
|
+
msg_id = triggering_message_id or f"jinx-{jinx_name}-{timestamp.replace(' ', '-')}"
|
|
1190
|
+
|
|
1191
|
+
# If trigger already created a row, update it; otherwise insert
|
|
1163
1192
|
stmt = """
|
|
1164
|
-
INSERT INTO
|
|
1165
|
-
(
|
|
1166
|
-
|
|
1167
|
-
|
|
1168
|
-
|
|
1193
|
+
INSERT OR REPLACE INTO jinx_executions
|
|
1194
|
+
(message_id, jinx_name, input, timestamp, npc, team,
|
|
1195
|
+
conversation_id, output, status, error_message, duration_ms)
|
|
1196
|
+
VALUES (:message_id, :jinx_name, :input, :timestamp, :npc, :team,
|
|
1197
|
+
:conversation_id, :output, :status, :error_message, :duration_ms)
|
|
1169
1198
|
"""
|
|
1170
1199
|
params = {
|
|
1171
|
-
"
|
|
1172
|
-
"conversation_id": conversation_id,
|
|
1173
|
-
"timestamp": timestamp,
|
|
1174
|
-
"npc_name": npc_name,
|
|
1175
|
-
"team_name": team_name,
|
|
1200
|
+
"message_id": msg_id,
|
|
1176
1201
|
"jinx_name": jinx_name,
|
|
1177
|
-
"
|
|
1178
|
-
"
|
|
1202
|
+
"input": inputs_json,
|
|
1203
|
+
"timestamp": timestamp,
|
|
1204
|
+
"npc": npc_name,
|
|
1205
|
+
"team": team_name,
|
|
1206
|
+
"conversation_id": conversation_id,
|
|
1207
|
+
"output": outputs_json,
|
|
1179
1208
|
"status": status,
|
|
1180
1209
|
"error_message": error_message,
|
|
1181
|
-
"
|
|
1182
|
-
"duration_ms": duration_ms
|
|
1210
|
+
"duration_ms": duration_ms,
|
|
1183
1211
|
}
|
|
1184
|
-
|
|
1185
|
-
|
|
1212
|
+
|
|
1213
|
+
with self.engine.begin() as conn:
|
|
1214
|
+
conn.execute(text(stmt), params)
|
|
1186
1215
|
|
|
1187
1216
|
def get_full_message_content(self, message_id):
|
|
1188
1217
|
stmt = "SELECT content FROM conversation_history WHERE message_id = :message_id ORDER BY timestamp ASC"
|
|
@@ -1494,28 +1523,47 @@ def start_new_conversation(prepend: str = None) -> str:
|
|
|
1494
1523
|
def format_memory_context(memory_examples):
|
|
1495
1524
|
if not memory_examples:
|
|
1496
1525
|
return ""
|
|
1497
|
-
|
|
1498
|
-
context_parts = []
|
|
1499
|
-
|
|
1526
|
+
|
|
1500
1527
|
approved_examples = memory_examples.get("approved", [])
|
|
1501
1528
|
rejected_examples = memory_examples.get("rejected", [])
|
|
1502
|
-
|
|
1529
|
+
edited_examples = memory_examples.get("edited", [])
|
|
1530
|
+
|
|
1531
|
+
if not approved_examples and not rejected_examples and not edited_examples:
|
|
1532
|
+
return ""
|
|
1533
|
+
|
|
1534
|
+
parts = ["MEMORY QUALITY GUIDELINES (based on user feedback):"]
|
|
1535
|
+
|
|
1503
1536
|
if approved_examples:
|
|
1504
|
-
|
|
1505
|
-
for ex in approved_examples[:
|
|
1506
|
-
|
|
1507
|
-
|
|
1508
|
-
|
|
1537
|
+
parts.append("\nAPPROVED — memories like these were kept:")
|
|
1538
|
+
for ex in approved_examples[:7]:
|
|
1539
|
+
mem = ex.get("final_memory") or ex.get("initial_memory")
|
|
1540
|
+
parts.append(f" + {mem}")
|
|
1541
|
+
|
|
1542
|
+
if edited_examples:
|
|
1543
|
+
parts.append("\nCORRECTED — the user fixed these (learn from the corrections):")
|
|
1544
|
+
for ex in edited_examples[:5]:
|
|
1545
|
+
original = ex.get("initial_memory", "")
|
|
1546
|
+
corrected = ex.get("final_memory", "")
|
|
1547
|
+
if original and corrected and original != corrected:
|
|
1548
|
+
parts.append(f" BEFORE: {original}")
|
|
1549
|
+
parts.append(f" AFTER: {corrected}")
|
|
1550
|
+
parts.append("")
|
|
1551
|
+
|
|
1509
1552
|
if rejected_examples:
|
|
1510
|
-
|
|
1511
|
-
for ex in rejected_examples[:
|
|
1512
|
-
|
|
1513
|
-
|
|
1514
|
-
|
|
1515
|
-
|
|
1516
|
-
|
|
1517
|
-
|
|
1518
|
-
|
|
1553
|
+
parts.append("\nREJECTED — memories like these were thrown out (do NOT generate similar ones):")
|
|
1554
|
+
for ex in rejected_examples[:5]:
|
|
1555
|
+
parts.append(f" x {ex.get('initial_memory')}")
|
|
1556
|
+
|
|
1557
|
+
parts.append("\nRULES derived from this feedback:")
|
|
1558
|
+
parts.append("- Match the style and specificity of approved memories.")
|
|
1559
|
+
if edited_examples:
|
|
1560
|
+
parts.append("- Apply the same corrections the user made in the CORRECTED examples.")
|
|
1561
|
+
if rejected_examples:
|
|
1562
|
+
parts.append("- Avoid the patterns seen in rejected memories.")
|
|
1563
|
+
parts.append("- Each memory must be self-contained: no vague pronouns (this, that, it) without referents.")
|
|
1564
|
+
parts.append("- Do not duplicate or closely paraphrase any existing approved memory.")
|
|
1565
|
+
|
|
1566
|
+
return "\n".join(parts)
|
|
1519
1567
|
def save_conversation_message(
|
|
1520
1568
|
command_history: CommandHistory,
|
|
1521
1569
|
conversation_id: str,
|