grov 0.5.9 → 0.5.10
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/lib/api-client.d.ts +42 -1
- package/dist/lib/api-client.js +33 -6
- package/dist/lib/cloud-sync.d.ts +57 -3
- package/dist/lib/cloud-sync.js +176 -6
- package/dist/lib/llm-extractor.d.ts +63 -1
- package/dist/lib/llm-extractor.js +882 -92
- package/dist/lib/store/database.js +14 -0
- package/dist/lib/store/index.d.ts +1 -1
- package/dist/lib/store/index.js +1 -1
- package/dist/lib/store/sessions.js +9 -3
- package/dist/lib/store/tasks.d.ts +12 -0
- package/dist/lib/store/tasks.js +43 -3
- package/dist/lib/store/types.d.ts +13 -2
- package/dist/lib/store/types.js +0 -1
- package/dist/proxy/action-parser.d.ts +0 -4
- package/dist/proxy/action-parser.js +0 -29
- package/dist/proxy/cache.d.ts +0 -4
- package/dist/proxy/cache.js +4 -8
- package/dist/proxy/extended-cache.js +6 -16
- package/dist/proxy/handlers/preprocess.js +29 -12
- package/dist/proxy/injection/delta-tracking.js +1 -0
- package/dist/proxy/request-processor.js +44 -54
- package/dist/proxy/response-processor.d.ts +6 -2
- package/dist/proxy/response-processor.js +27 -9
- package/dist/proxy/server.js +72 -38
- package/dist/proxy/utils/logging.d.ts +2 -1
- package/dist/proxy/utils/logging.js +11 -5
- package/package.json +1 -1
package/dist/lib/api-client.d.ts
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import type { Team, Memory, MemorySyncRequest, MemorySyncResponse, DeviceFlowStartResponse, DeviceFlowPollResponse } from '@grov/shared';
|
|
1
|
+
import type { Team, Memory, MemorySyncRequest, MemorySyncResponse, DeviceFlowStartResponse, DeviceFlowPollResponse, ReasoningTraceEntry } from '@grov/shared';
|
|
2
2
|
export interface ApiResponse<T> {
|
|
3
3
|
data?: T;
|
|
4
4
|
error?: string;
|
|
@@ -47,6 +47,47 @@ export declare function fetchTeamMemories(teamId: string, projectPath: string, o
|
|
|
47
47
|
context?: string;
|
|
48
48
|
current_files?: string[];
|
|
49
49
|
}): Promise<Memory[]>;
|
|
50
|
+
/**
|
|
51
|
+
* Input data for match endpoint (memory content for embedding generation)
|
|
52
|
+
*/
|
|
53
|
+
export interface MatchInput {
|
|
54
|
+
project_path: string;
|
|
55
|
+
goal?: string;
|
|
56
|
+
system_name?: string;
|
|
57
|
+
original_query: string;
|
|
58
|
+
reasoning_trace?: ReasoningTraceEntry[];
|
|
59
|
+
decisions?: Array<{
|
|
60
|
+
aspect?: string;
|
|
61
|
+
tags?: string;
|
|
62
|
+
choice: string;
|
|
63
|
+
reason: string;
|
|
64
|
+
}>;
|
|
65
|
+
evolution_steps?: Array<{
|
|
66
|
+
summary: string;
|
|
67
|
+
date: string;
|
|
68
|
+
}>;
|
|
69
|
+
task_type?: 'information' | 'planning' | 'implementation';
|
|
70
|
+
}
|
|
71
|
+
/**
|
|
72
|
+
* Response type for match endpoint
|
|
73
|
+
* Note: Embeddings are now chunk-based and generated in SYNC (not passed from MATCH)
|
|
74
|
+
*/
|
|
75
|
+
export interface MatchResponse {
|
|
76
|
+
match: Memory | null;
|
|
77
|
+
combined_score?: number;
|
|
78
|
+
}
|
|
79
|
+
/**
|
|
80
|
+
* Fetch best matching memory for UPDATE decision
|
|
81
|
+
* Used by CLI before sync to check if a similar memory exists
|
|
82
|
+
*
|
|
83
|
+
* API generates chunks for multi-vector search against stored memory chunks.
|
|
84
|
+
* SYNC will regenerate chunks when saving (chunks not passed between endpoints).
|
|
85
|
+
*
|
|
86
|
+
* @param teamId - Team UUID
|
|
87
|
+
* @param data - Memory data for chunk generation and search
|
|
88
|
+
* @returns Match response with memory and score
|
|
89
|
+
*/
|
|
90
|
+
export declare function fetchMatch(teamId: string, data: MatchInput): Promise<MatchResponse>;
|
|
50
91
|
/**
|
|
51
92
|
* Sleep helper for polling
|
|
52
93
|
*/
|
package/dist/lib/api-client.js
CHANGED
|
@@ -139,26 +139,53 @@ export async function fetchTeamMemories(teamId, projectPath, options) {
|
|
|
139
139
|
params.set('current_files', files.join(','));
|
|
140
140
|
}
|
|
141
141
|
const url = `/teams/${teamId}/memories?${params.toString()}`;
|
|
142
|
-
console.log(`[API] fetchTeamMemories: GET ${url}`);
|
|
143
142
|
try {
|
|
144
143
|
const response = await apiRequest('GET', url);
|
|
145
144
|
if (response.error) {
|
|
146
|
-
console.
|
|
145
|
+
console.error(`[API-CLIENT] FAILED: ${response.error}`);
|
|
147
146
|
return []; // Fail silent - don't block Claude Code
|
|
148
147
|
}
|
|
149
148
|
if (!response.data || !response.data.memories) {
|
|
150
|
-
console.log('[API] fetchTeamMemories: No memories returned');
|
|
151
149
|
return [];
|
|
152
150
|
}
|
|
153
|
-
console.log(`[API] fetchTeamMemories: Got ${response.data.memories.length} memories`);
|
|
154
151
|
return response.data.memories;
|
|
155
152
|
}
|
|
156
153
|
catch (err) {
|
|
157
|
-
const errorMsg = err instanceof Error ? err.message : 'Unknown error';
|
|
158
|
-
console.error(`[API] fetchTeamMemories exception: ${errorMsg}`);
|
|
154
|
+
// const errorMsg = err instanceof Error ? err.message : 'Unknown error';
|
|
155
|
+
// console.error(`[API] fetchTeamMemories exception: ${errorMsg}`);
|
|
159
156
|
return []; // Fail silent - don't block Claude Code
|
|
160
157
|
}
|
|
161
158
|
}
|
|
159
|
+
/**
|
|
160
|
+
* Fetch best matching memory for UPDATE decision
|
|
161
|
+
* Used by CLI before sync to check if a similar memory exists
|
|
162
|
+
*
|
|
163
|
+
* API generates chunks for multi-vector search against stored memory chunks.
|
|
164
|
+
* SYNC will regenerate chunks when saving (chunks not passed between endpoints).
|
|
165
|
+
*
|
|
166
|
+
* @param teamId - Team UUID
|
|
167
|
+
* @param data - Memory data for chunk generation and search
|
|
168
|
+
* @returns Match response with memory and score
|
|
169
|
+
*/
|
|
170
|
+
export async function fetchMatch(teamId, data) {
|
|
171
|
+
const url = `/teams/${teamId}/memories/match`;
|
|
172
|
+
try {
|
|
173
|
+
const response = await apiRequest('POST', url, data);
|
|
174
|
+
if (response.error) {
|
|
175
|
+
console.error(`[MATCH-API] FAILED: ${response.error}`);
|
|
176
|
+
return { match: null };
|
|
177
|
+
}
|
|
178
|
+
if (!response.data) {
|
|
179
|
+
return { match: null };
|
|
180
|
+
}
|
|
181
|
+
return response.data;
|
|
182
|
+
}
|
|
183
|
+
catch (err) {
|
|
184
|
+
const errorMsg = err instanceof Error ? err.message : 'Unknown error';
|
|
185
|
+
console.error(`[MATCH-API] FAILED: ${errorMsg}`);
|
|
186
|
+
return { match: null };
|
|
187
|
+
}
|
|
188
|
+
}
|
|
162
189
|
// ============= Utility Functions =============
|
|
163
190
|
/**
|
|
164
191
|
* Sleep helper for polling
|
package/dist/lib/cloud-sync.d.ts
CHANGED
|
@@ -1,9 +1,53 @@
|
|
|
1
|
-
import type { CreateMemoryInput } from '@grov/shared';
|
|
1
|
+
import type { CreateMemoryInput, Memory } from '@grov/shared';
|
|
2
2
|
import type { Task } from './store.js';
|
|
3
|
+
import type { ExtractedReasoningAndDecisions, ShouldUpdateResult } from './llm-extractor.js';
|
|
4
|
+
/**
|
|
5
|
+
* Evolution step in memory history
|
|
6
|
+
*/
|
|
7
|
+
export interface EvolutionStep {
|
|
8
|
+
summary: string;
|
|
9
|
+
date: string;
|
|
10
|
+
}
|
|
11
|
+
/**
|
|
12
|
+
* Decision with tracking metadata
|
|
13
|
+
*/
|
|
14
|
+
export interface TrackedDecision {
|
|
15
|
+
choice: string;
|
|
16
|
+
reason: string;
|
|
17
|
+
date?: string;
|
|
18
|
+
active?: boolean;
|
|
19
|
+
superseded_by?: {
|
|
20
|
+
choice: string;
|
|
21
|
+
reason: string;
|
|
22
|
+
date: string;
|
|
23
|
+
};
|
|
24
|
+
}
|
|
25
|
+
/**
|
|
26
|
+
* Extended memory input with fields for UPDATE path
|
|
27
|
+
*/
|
|
28
|
+
export interface UpdateMemoryInput extends CreateMemoryInput {
|
|
29
|
+
memory_id?: string;
|
|
30
|
+
evolution_steps?: EvolutionStep[];
|
|
31
|
+
reasoning_evolution?: Array<{
|
|
32
|
+
content: string;
|
|
33
|
+
date: string;
|
|
34
|
+
}>;
|
|
35
|
+
}
|
|
3
36
|
/**
|
|
4
37
|
* Convert local Task to CreateMemoryInput for API
|
|
5
38
|
*/
|
|
6
39
|
export declare function taskToMemory(task: Task): CreateMemoryInput;
|
|
40
|
+
/**
|
|
41
|
+
* Prepare sync payload for UPDATE path
|
|
42
|
+
* Merges existing memory with new data based on shouldUpdateMemory result
|
|
43
|
+
*
|
|
44
|
+
* @param existingMemory - The memory that was matched
|
|
45
|
+
* @param newData - Extracted reasoning and decisions from current session
|
|
46
|
+
* @param updateResult - Result from shouldUpdateMemory Haiku call
|
|
47
|
+
* @param task - The current task being synced
|
|
48
|
+
* @returns Payload ready for sync with memory_id for UPDATE
|
|
49
|
+
*/
|
|
50
|
+
export declare function prepareSyncPayload(existingMemory: Memory, newData: ExtractedReasoningAndDecisions, updateResult: ShouldUpdateResult, task: Task): UpdateMemoryInput;
|
|
7
51
|
/**
|
|
8
52
|
* Check if sync is enabled and configured
|
|
9
53
|
*/
|
|
@@ -13,10 +57,20 @@ export declare function isSyncEnabled(): boolean;
|
|
|
13
57
|
*/
|
|
14
58
|
export declare function getSyncTeamId(): string | null;
|
|
15
59
|
/**
|
|
16
|
-
* Sync a single task to the cloud
|
|
60
|
+
* Sync a single task to the cloud with memory editing support
|
|
17
61
|
* Called when a task is completed
|
|
62
|
+
*
|
|
63
|
+
* Flow:
|
|
64
|
+
* 1. Check for existing match via /match endpoint
|
|
65
|
+
* 2. If match found: shouldUpdateMemory() decides UPDATE or SKIP
|
|
66
|
+
* 3. If UPDATE: prepareSyncPayload() merges data
|
|
67
|
+
* 4. If no match: INSERT new memory
|
|
68
|
+
*
|
|
69
|
+
* @param task - The task to sync
|
|
70
|
+
* @param extractedData - Optional pre-extracted reasoning and decisions
|
|
71
|
+
* @param taskType - Optional task type for shouldUpdateMemory context
|
|
18
72
|
*/
|
|
19
|
-
export declare function syncTask(task: Task): Promise<boolean>;
|
|
73
|
+
export declare function syncTask(task: Task, extractedData?: ExtractedReasoningAndDecisions, taskType?: 'information' | 'planning' | 'implementation'): Promise<boolean>;
|
|
20
74
|
/**
|
|
21
75
|
* Sync multiple tasks with batching and retry
|
|
22
76
|
*/
|
package/dist/lib/cloud-sync.js
CHANGED
|
@@ -1,7 +1,8 @@
|
|
|
1
1
|
// Cloud sync logic - Upload memories from local database to API
|
|
2
2
|
// Handles batching, retries, and conversion from Task to Memory format
|
|
3
3
|
import { getSyncStatus, getAccessToken } from './credentials.js';
|
|
4
|
-
import { syncMemories, sleep, getApiUrl } from './api-client.js';
|
|
4
|
+
import { syncMemories, sleep, getApiUrl, fetchMatch } from './api-client.js';
|
|
5
|
+
import { shouldUpdateMemory, isShouldUpdateAvailable } from './llm-extractor.js';
|
|
5
6
|
// Sync configuration
|
|
6
7
|
const SYNC_CONFIG = {
|
|
7
8
|
batchSize: 10, // Number of memories per batch
|
|
@@ -17,6 +18,8 @@ export function taskToMemory(task) {
|
|
|
17
18
|
project_path: task.project_path,
|
|
18
19
|
original_query: task.original_query,
|
|
19
20
|
goal: task.goal,
|
|
21
|
+
system_name: task.system_name, // Parent anchor for semantic search
|
|
22
|
+
summary: task.summary,
|
|
20
23
|
reasoning_trace: task.reasoning_trace,
|
|
21
24
|
files_touched: task.files_touched,
|
|
22
25
|
decisions: task.decisions,
|
|
@@ -26,6 +29,103 @@ export function taskToMemory(task) {
|
|
|
26
29
|
linked_commit: task.linked_commit,
|
|
27
30
|
};
|
|
28
31
|
}
|
|
32
|
+
/**
|
|
33
|
+
* Get today's date in ISO format (full timestamp)
|
|
34
|
+
*/
|
|
35
|
+
function getToday() {
|
|
36
|
+
return new Date().toISOString();
|
|
37
|
+
}
|
|
38
|
+
/**
|
|
39
|
+
* Prepare sync payload for UPDATE path
|
|
40
|
+
* Merges existing memory with new data based on shouldUpdateMemory result
|
|
41
|
+
*
|
|
42
|
+
* @param existingMemory - The memory that was matched
|
|
43
|
+
* @param newData - Extracted reasoning and decisions from current session
|
|
44
|
+
* @param updateResult - Result from shouldUpdateMemory Haiku call
|
|
45
|
+
* @param task - The current task being synced
|
|
46
|
+
* @returns Payload ready for sync with memory_id for UPDATE
|
|
47
|
+
*/
|
|
48
|
+
export function prepareSyncPayload(existingMemory, newData, updateResult, task) {
|
|
49
|
+
const today = getToday();
|
|
50
|
+
// 1. Get existing decisions with proper typing
|
|
51
|
+
const existingDecisions = (existingMemory.decisions || []);
|
|
52
|
+
// 2. Build lookup for superseded decisions from mapping
|
|
53
|
+
const supersededMap = new Map(updateResult.superseded_mapping.map(m => [
|
|
54
|
+
m.old_index,
|
|
55
|
+
{
|
|
56
|
+
choice: m.replaced_by_choice,
|
|
57
|
+
reason: m.replaced_by_reason,
|
|
58
|
+
date: today,
|
|
59
|
+
},
|
|
60
|
+
]));
|
|
61
|
+
// 3. Mark superseded decisions as inactive and add superseded_by info
|
|
62
|
+
const updatedDecisions = existingDecisions.map((d, i) => {
|
|
63
|
+
const replacement = supersededMap.get(i);
|
|
64
|
+
if (replacement) {
|
|
65
|
+
return {
|
|
66
|
+
...d,
|
|
67
|
+
active: false,
|
|
68
|
+
superseded_by: replacement,
|
|
69
|
+
};
|
|
70
|
+
}
|
|
71
|
+
return {
|
|
72
|
+
...d,
|
|
73
|
+
active: d.active !== false,
|
|
74
|
+
};
|
|
75
|
+
});
|
|
76
|
+
// 4. Append new decisions with date and active flag
|
|
77
|
+
const newDecisions = newData.decisions.map(d => ({
|
|
78
|
+
...d,
|
|
79
|
+
date: today,
|
|
80
|
+
active: true,
|
|
81
|
+
}));
|
|
82
|
+
const allDecisions = [...updatedDecisions, ...newDecisions];
|
|
83
|
+
// 4. Handle evolution_steps - use consolidated or existing
|
|
84
|
+
const existingEvolutionSteps = (existingMemory.evolution_steps || []);
|
|
85
|
+
const baseEvolutionSteps = updateResult.consolidated_evolution_steps || existingEvolutionSteps;
|
|
86
|
+
// 5. Append new evolution step if summary provided
|
|
87
|
+
const evolutionSteps = [...baseEvolutionSteps];
|
|
88
|
+
if (updateResult.evolution_summary) {
|
|
89
|
+
evolutionSteps.push({
|
|
90
|
+
summary: updateResult.evolution_summary,
|
|
91
|
+
date: today,
|
|
92
|
+
});
|
|
93
|
+
}
|
|
94
|
+
// 6. Handle reasoning_evolution - append condensed old reasoning
|
|
95
|
+
const existingReasoningEvolution = (existingMemory.reasoning_evolution || []);
|
|
96
|
+
const reasoningEvolution = [...existingReasoningEvolution];
|
|
97
|
+
if (updateResult.condensed_old_reasoning) {
|
|
98
|
+
reasoningEvolution.push({
|
|
99
|
+
content: updateResult.condensed_old_reasoning,
|
|
100
|
+
date: today,
|
|
101
|
+
});
|
|
102
|
+
}
|
|
103
|
+
// 7. Truncate arrays to max limits
|
|
104
|
+
const MAX_DECISIONS = 20;
|
|
105
|
+
const MAX_EVOLUTION_STEPS = 10;
|
|
106
|
+
const MAX_REASONING_EVOLUTION = 5;
|
|
107
|
+
const finalDecisions = allDecisions.slice(-MAX_DECISIONS);
|
|
108
|
+
const finalEvolutionSteps = evolutionSteps.slice(-MAX_EVOLUTION_STEPS);
|
|
109
|
+
const finalReasoningEvolution = reasoningEvolution.slice(-MAX_REASONING_EVOLUTION);
|
|
110
|
+
// 8. Build final payload
|
|
111
|
+
return {
|
|
112
|
+
memory_id: existingMemory.id, // Triggers UPDATE path in API
|
|
113
|
+
client_task_id: task.id,
|
|
114
|
+
project_path: task.project_path,
|
|
115
|
+
original_query: task.original_query,
|
|
116
|
+
goal: task.goal,
|
|
117
|
+
system_name: newData.system_name || task.system_name, // Parent anchor for semantic search
|
|
118
|
+
reasoning_trace: newData.reasoning_trace, // OVERWRITE with new
|
|
119
|
+
files_touched: task.files_touched,
|
|
120
|
+
decisions: finalDecisions,
|
|
121
|
+
constraints: task.constraints,
|
|
122
|
+
tags: task.tags,
|
|
123
|
+
status: task.status,
|
|
124
|
+
linked_commit: task.linked_commit,
|
|
125
|
+
evolution_steps: finalEvolutionSteps,
|
|
126
|
+
reasoning_evolution: finalReasoningEvolution,
|
|
127
|
+
};
|
|
128
|
+
}
|
|
29
129
|
/**
|
|
30
130
|
* Check if sync is enabled and configured
|
|
31
131
|
*/
|
|
@@ -41,10 +141,20 @@ export function getSyncTeamId() {
|
|
|
41
141
|
return status?.teamId || null;
|
|
42
142
|
}
|
|
43
143
|
/**
|
|
44
|
-
* Sync a single task to the cloud
|
|
144
|
+
* Sync a single task to the cloud with memory editing support
|
|
45
145
|
* Called when a task is completed
|
|
146
|
+
*
|
|
147
|
+
* Flow:
|
|
148
|
+
* 1. Check for existing match via /match endpoint
|
|
149
|
+
* 2. If match found: shouldUpdateMemory() decides UPDATE or SKIP
|
|
150
|
+
* 3. If UPDATE: prepareSyncPayload() merges data
|
|
151
|
+
* 4. If no match: INSERT new memory
|
|
152
|
+
*
|
|
153
|
+
* @param task - The task to sync
|
|
154
|
+
* @param extractedData - Optional pre-extracted reasoning and decisions
|
|
155
|
+
* @param taskType - Optional task type for shouldUpdateMemory context
|
|
46
156
|
*/
|
|
47
|
-
export async function syncTask(task) {
|
|
157
|
+
export async function syncTask(task, extractedData, taskType) {
|
|
48
158
|
if (!isSyncEnabled()) {
|
|
49
159
|
return false;
|
|
50
160
|
}
|
|
@@ -57,11 +167,71 @@ export async function syncTask(task) {
|
|
|
57
167
|
return false;
|
|
58
168
|
}
|
|
59
169
|
try {
|
|
60
|
-
const
|
|
61
|
-
|
|
170
|
+
const taskId = task.id.substring(0, 8);
|
|
171
|
+
// Build effective extracted data from task if not provided
|
|
172
|
+
const effectiveExtractedData = extractedData || ((task.reasoning_trace.length > 0 || task.decisions.length > 0)
|
|
173
|
+
? {
|
|
174
|
+
system_name: task.system_name || null,
|
|
175
|
+
summary: task.summary || null,
|
|
176
|
+
reasoning_trace: task.reasoning_trace,
|
|
177
|
+
decisions: task.decisions,
|
|
178
|
+
}
|
|
179
|
+
: undefined);
|
|
180
|
+
// Step 1: Check for existing match
|
|
181
|
+
const matchResult = await fetchMatch(teamId, {
|
|
182
|
+
project_path: task.project_path,
|
|
183
|
+
goal: task.goal,
|
|
184
|
+
original_query: task.original_query,
|
|
185
|
+
reasoning_trace: effectiveExtractedData?.reasoning_trace || task.reasoning_trace,
|
|
186
|
+
decisions: effectiveExtractedData?.decisions || task.decisions,
|
|
187
|
+
task_type: taskType,
|
|
188
|
+
});
|
|
189
|
+
// Step 2: If no match, INSERT as new memory
|
|
190
|
+
if (!matchResult.match) {
|
|
191
|
+
const memory = taskToMemory(task);
|
|
192
|
+
const result = await syncMemories(teamId, { memories: [memory] });
|
|
193
|
+
console.log(`[SYNC] ${taskId} -> INSERT ${result.synced === 1 ? 'OK' : 'FAILED'}`);
|
|
194
|
+
return result.synced === 1;
|
|
195
|
+
}
|
|
196
|
+
const matchedId = matchResult.match.id.substring(0, 8);
|
|
197
|
+
const score = matchResult.combined_score?.toFixed(3) || '-';
|
|
198
|
+
// If shouldUpdateMemory is not available or no extracted data, INSERT anyway
|
|
199
|
+
if (!isShouldUpdateAvailable() || !effectiveExtractedData) {
|
|
200
|
+
const memory = taskToMemory(task);
|
|
201
|
+
const result = await syncMemories(teamId, { memories: [memory] });
|
|
202
|
+
console.log(`[SYNC] ${taskId} -> INSERT (no haiku) ${result.synced === 1 ? 'OK' : 'FAILED'}`);
|
|
203
|
+
return result.synced === 1;
|
|
204
|
+
}
|
|
205
|
+
// Build session context for shouldUpdateMemory
|
|
206
|
+
const sessionContext = {
|
|
207
|
+
task_type: taskType || 'implementation',
|
|
208
|
+
original_query: task.original_query,
|
|
209
|
+
files_touched: task.files_touched,
|
|
210
|
+
};
|
|
211
|
+
// Call shouldUpdateMemory to decide
|
|
212
|
+
const updateResult = await shouldUpdateMemory({
|
|
213
|
+
id: matchResult.match.id,
|
|
214
|
+
goal: matchResult.match.goal,
|
|
215
|
+
decisions: matchResult.match.decisions || [],
|
|
216
|
+
reasoning_trace: matchResult.match.reasoning_trace || [],
|
|
217
|
+
evolution_steps: (matchResult.match.evolution_steps || []),
|
|
218
|
+
files_touched: matchResult.match.files_touched || [],
|
|
219
|
+
}, effectiveExtractedData, sessionContext);
|
|
220
|
+
// If should NOT update, skip sync entirely
|
|
221
|
+
if (!updateResult.should_update) {
|
|
222
|
+
console.log(`[SYNC] ${taskId} -> SKIP (matched ${matchedId}, score=${score})`);
|
|
223
|
+
return true;
|
|
224
|
+
}
|
|
225
|
+
// Prepare payload for UPDATE
|
|
226
|
+
const payload = prepareSyncPayload(matchResult.match, effectiveExtractedData, updateResult, task);
|
|
227
|
+
// Sync with memory_id for UPDATE path
|
|
228
|
+
const result = await syncMemories(teamId, { memories: [payload] });
|
|
229
|
+
console.log(`[SYNC] ${taskId} -> UPDATE ${matchedId} (score=${score}) ${result.synced === 1 ? 'OK' : 'FAILED'}`);
|
|
62
230
|
return result.synced === 1;
|
|
63
231
|
}
|
|
64
|
-
catch {
|
|
232
|
+
catch (err) {
|
|
233
|
+
const msg = err instanceof Error ? err.message : 'Unknown error';
|
|
234
|
+
console.error(`[SYNC] Error: ${msg}`);
|
|
65
235
|
return false;
|
|
66
236
|
}
|
|
67
237
|
}
|
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import type { SessionState, StepRecord } from './store.js';
|
|
2
|
+
import type { ReasoningTraceEntry } from '@grov/shared';
|
|
2
3
|
export interface ExtractedIntent {
|
|
3
4
|
goal: string;
|
|
4
5
|
expected_scope: string[];
|
|
@@ -32,6 +33,7 @@ export interface TaskAnalysis {
|
|
|
32
33
|
task_type: 'information' | 'planning' | 'implementation';
|
|
33
34
|
action: 'continue' | 'new_task' | 'subtask' | 'parallel_task' | 'task_complete' | 'subtask_complete';
|
|
34
35
|
task_id: string;
|
|
36
|
+
current_goal: string;
|
|
35
37
|
parent_task_id?: string;
|
|
36
38
|
reasoning: string;
|
|
37
39
|
step_reasoning?: string;
|
|
@@ -54,8 +56,12 @@ export declare function isTaskAnalysisAvailable(): boolean;
|
|
|
54
56
|
*/
|
|
55
57
|
export declare function analyzeTaskContext(currentSession: SessionState | null, latestUserMessage: string, recentSteps: StepRecord[], assistantResponse: string, conversationHistory?: ConversationMessage[]): Promise<TaskAnalysis>;
|
|
56
58
|
export interface ExtractedReasoningAndDecisions {
|
|
57
|
-
|
|
59
|
+
system_name: string | null;
|
|
60
|
+
summary: string | null;
|
|
61
|
+
reasoning_trace: ReasoningTraceEntry[];
|
|
58
62
|
decisions: Array<{
|
|
63
|
+
aspect?: string;
|
|
64
|
+
tags?: string;
|
|
59
65
|
choice: string;
|
|
60
66
|
reason: string;
|
|
61
67
|
}>;
|
|
@@ -72,3 +78,59 @@ export declare function isReasoningExtractionAvailable(): boolean;
|
|
|
72
78
|
* @param originalGoal - The original task goal
|
|
73
79
|
*/
|
|
74
80
|
export declare function extractReasoningAndDecisions(formattedSteps: string, originalGoal: string): Promise<ExtractedReasoningAndDecisions>;
|
|
81
|
+
/**
|
|
82
|
+
* Evolution step in memory history
|
|
83
|
+
*/
|
|
84
|
+
export interface EvolutionStep {
|
|
85
|
+
summary: string;
|
|
86
|
+
date: string;
|
|
87
|
+
}
|
|
88
|
+
/**
|
|
89
|
+
* Mapping entry for superseded decisions
|
|
90
|
+
*/
|
|
91
|
+
export interface SupersededMapping {
|
|
92
|
+
old_index: number;
|
|
93
|
+
replaced_by_choice: string;
|
|
94
|
+
replaced_by_reason: string;
|
|
95
|
+
}
|
|
96
|
+
/**
|
|
97
|
+
* Result from shouldUpdateMemory decision
|
|
98
|
+
*/
|
|
99
|
+
export interface ShouldUpdateResult {
|
|
100
|
+
should_update: boolean;
|
|
101
|
+
reason: string;
|
|
102
|
+
superseded_mapping: SupersededMapping[];
|
|
103
|
+
condensed_old_reasoning: string | null;
|
|
104
|
+
evolution_summary: string | null;
|
|
105
|
+
consolidated_evolution_steps?: EvolutionStep[];
|
|
106
|
+
}
|
|
107
|
+
/**
|
|
108
|
+
* Existing memory structure (from API match response)
|
|
109
|
+
*/
|
|
110
|
+
export interface ExistingMemory {
|
|
111
|
+
id: string;
|
|
112
|
+
goal?: string | null;
|
|
113
|
+
decisions: Array<{
|
|
114
|
+
tags?: string;
|
|
115
|
+
choice: string;
|
|
116
|
+
reason: string;
|
|
117
|
+
date?: string;
|
|
118
|
+
active?: boolean;
|
|
119
|
+
}>;
|
|
120
|
+
reasoning_trace: ReasoningTraceEntry[];
|
|
121
|
+
evolution_steps: EvolutionStep[];
|
|
122
|
+
files_touched: string[];
|
|
123
|
+
}
|
|
124
|
+
/**
|
|
125
|
+
* Session context for update decision
|
|
126
|
+
*/
|
|
127
|
+
export interface SessionContext {
|
|
128
|
+
task_type: 'information' | 'planning' | 'implementation';
|
|
129
|
+
original_query: string;
|
|
130
|
+
files_touched: string[];
|
|
131
|
+
}
|
|
132
|
+
/**
|
|
133
|
+
* Check if shouldUpdateMemory is available
|
|
134
|
+
*/
|
|
135
|
+
export declare function isShouldUpdateAvailable(): boolean;
|
|
136
|
+
export declare function shouldUpdateMemory(existingMemory: ExistingMemory, newData: ExtractedReasoningAndDecisions, sessionContext: SessionContext): Promise<ShouldUpdateResult>;
|