grov 0.2.3 → 0.5.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/lib/store.js CHANGED
@@ -70,6 +70,8 @@ export function initDatabase() {
70
70
  turn_number INTEGER,
71
71
  tags JSON DEFAULT '[]',
72
72
  created_at TEXT NOT NULL,
73
+ synced_at TEXT,
74
+ sync_error TEXT,
73
75
  FOREIGN KEY (parent_task_id) REFERENCES tasks(id)
74
76
  );
75
77
 
@@ -90,6 +92,14 @@ export function initDatabase() {
90
92
  db.exec(`ALTER TABLE tasks ADD COLUMN trigger_reason TEXT`);
91
93
  }
92
94
  catch { /* column exists */ }
95
+ try {
96
+ db.exec(`ALTER TABLE tasks ADD COLUMN synced_at TEXT`);
97
+ }
98
+ catch { /* column exists */ }
99
+ try {
100
+ db.exec(`ALTER TABLE tasks ADD COLUMN sync_error TEXT`);
101
+ }
102
+ catch { /* column exists */ }
93
103
  // Create session_states table (temporary per-session tracking)
94
104
  db.exec(`
95
105
  CREATE TABLE IF NOT EXISTS session_states (
@@ -112,6 +122,7 @@ export function initDatabase() {
112
122
  completed_at TEXT,
113
123
  parent_session_id TEXT,
114
124
  task_type TEXT DEFAULT 'main' CHECK(task_type IN ('main', 'subtask', 'parallel')),
125
+ pending_correction TEXT,
115
126
  FOREIGN KEY (parent_session_id) REFERENCES session_states(session_id)
116
127
  );
117
128
 
@@ -264,6 +275,18 @@ export function initDatabase() {
264
275
  if (!existingColumns.has('drift_warnings')) {
265
276
  db.exec(`ALTER TABLE session_states ADD COLUMN drift_warnings JSON DEFAULT '[]'`);
266
277
  }
278
+ if (!existingColumns.has('pending_correction')) {
279
+ db.exec(`ALTER TABLE session_states ADD COLUMN pending_correction TEXT`);
280
+ }
281
+ if (!existingColumns.has('pending_clear_summary')) {
282
+ db.exec(`ALTER TABLE session_states ADD COLUMN pending_clear_summary TEXT`);
283
+ }
284
+ if (!existingColumns.has('pending_forced_recovery')) {
285
+ db.exec(`ALTER TABLE session_states ADD COLUMN pending_forced_recovery TEXT`);
286
+ }
287
+ if (!existingColumns.has('final_response')) {
288
+ db.exec(`ALTER TABLE session_states ADD COLUMN final_response TEXT`);
289
+ }
267
290
  // Create steps table (action log for current session)
268
291
  db.exec(`
269
292
  CREATE TABLE IF NOT EXISTS steps (
@@ -368,22 +391,24 @@ export function createTask(input) {
368
391
  parent_task_id: input.parent_task_id,
369
392
  turn_number: input.turn_number,
370
393
  tags: input.tags || [],
371
- created_at: new Date().toISOString()
394
+ created_at: new Date().toISOString(),
395
+ synced_at: null,
396
+ sync_error: null
372
397
  };
373
398
  const stmt = database.prepare(`
374
399
  INSERT INTO tasks (
375
400
  id, project_path, user, original_query, goal,
376
401
  reasoning_trace, files_touched, decisions, constraints,
377
402
  status, trigger_reason, linked_commit,
378
- parent_task_id, turn_number, tags, created_at
403
+ parent_task_id, turn_number, tags, created_at, synced_at, sync_error
379
404
  ) VALUES (
380
405
  ?, ?, ?, ?, ?,
381
406
  ?, ?, ?, ?,
382
407
  ?, ?, ?,
383
- ?, ?, ?, ?
408
+ ?, ?, ?, ?, ?, ?
384
409
  )
385
410
  `);
386
- stmt.run(task.id, task.project_path, task.user || null, task.original_query, task.goal || null, JSON.stringify(task.reasoning_trace), JSON.stringify(task.files_touched), JSON.stringify(task.decisions), JSON.stringify(task.constraints), task.status, task.trigger_reason || null, task.linked_commit || null, task.parent_task_id || null, task.turn_number || null, JSON.stringify(task.tags), task.created_at);
411
+ stmt.run(task.id, task.project_path, task.user || null, task.original_query, task.goal || null, JSON.stringify(task.reasoning_trace), JSON.stringify(task.files_touched), JSON.stringify(task.decisions), JSON.stringify(task.constraints), task.status, task.trigger_reason || null, task.linked_commit || null, task.parent_task_id || null, task.turn_number || null, JSON.stringify(task.tags), task.created_at, task.synced_at, task.sync_error);
387
412
  return task;
388
413
  }
389
414
  /**
@@ -468,6 +493,36 @@ export function getTaskCount(projectPath) {
468
493
  const row = stmt.get(projectPath);
469
494
  return row?.count ?? 0;
470
495
  }
496
+ /**
497
+ * Get unsynced tasks for a project (synced_at is NULL)
498
+ */
499
+ export function getUnsyncedTasks(projectPath, limit) {
500
+ const database = initDatabase();
501
+ let sql = 'SELECT * FROM tasks WHERE project_path = ? AND synced_at IS NULL ORDER BY created_at DESC';
502
+ const params = [projectPath];
503
+ if (limit) {
504
+ sql += ' LIMIT ?';
505
+ params.push(limit);
506
+ }
507
+ const stmt = database.prepare(sql);
508
+ const rows = stmt.all(...params);
509
+ return rows.map(rowToTask);
510
+ }
511
+ /**
512
+ * Mark a task as synced and clear any previous sync error
513
+ */
514
+ export function markTaskSynced(id) {
515
+ const database = initDatabase();
516
+ const now = new Date().toISOString();
517
+ database.prepare('UPDATE tasks SET synced_at = ?, sync_error = NULL WHERE id = ?').run(now, id);
518
+ }
519
+ /**
520
+ * Record a sync error for a task
521
+ */
522
+ export function setTaskSyncError(id, error) {
523
+ const database = initDatabase();
524
+ database.prepare('UPDATE tasks SET sync_error = ? WHERE id = ?').run(error, id);
525
+ }
471
526
  /**
472
527
  * Safely parse JSON with fallback to empty array.
473
528
  */
@@ -502,7 +557,9 @@ function rowToTask(row) {
502
557
  parent_task_id: row.parent_task_id,
503
558
  turn_number: row.turn_number,
504
559
  tags: safeJsonParse(row.tags, []),
505
- created_at: row.created_at
560
+ created_at: row.created_at,
561
+ synced_at: row.synced_at,
562
+ sync_error: row.sync_error
506
563
  };
507
564
  }
508
565
  // ============================================
@@ -631,6 +688,22 @@ export function updateSessionState(sessionId, updates) {
631
688
  setClauses.push('status = ?');
632
689
  params.push(updates.status);
633
690
  }
691
+ if (updates.pending_correction !== undefined) {
692
+ setClauses.push('pending_correction = ?');
693
+ params.push(updates.pending_correction || null);
694
+ }
695
+ if (updates.pending_forced_recovery !== undefined) {
696
+ setClauses.push('pending_forced_recovery = ?');
697
+ params.push(updates.pending_forced_recovery || null);
698
+ }
699
+ if (updates.pending_clear_summary !== undefined) {
700
+ setClauses.push('pending_clear_summary = ?');
701
+ params.push(updates.pending_clear_summary || null);
702
+ }
703
+ if (updates.final_response !== undefined) {
704
+ setClauses.push('final_response = ?');
705
+ params.push(updates.final_response || null);
706
+ }
634
707
  // Always update last_update
635
708
  setClauses.push('last_update = ?');
636
709
  params.push(new Date().toISOString());
@@ -729,6 +802,10 @@ function rowToSessionState(row) {
729
802
  completed_at: row.completed_at,
730
803
  parent_session_id: row.parent_session_id,
731
804
  task_type: row.task_type || 'main',
805
+ pending_correction: row.pending_correction,
806
+ pending_forced_recovery: row.pending_forced_recovery,
807
+ pending_clear_summary: row.pending_clear_summary,
808
+ final_response: row.final_response,
732
809
  };
733
810
  }
734
811
  // ============================================
@@ -974,6 +1051,43 @@ export function getValidatedSteps(sessionId) {
974
1051
  const rows = stmt.all(sessionId);
975
1052
  return rows.map(rowToStep);
976
1053
  }
1054
+ /**
1055
+ * Get key decision steps for a session (is_key_decision = 1)
1056
+ * Used for user message injection - important decisions with reasoning
1057
+ */
1058
+ export function getKeyDecisions(sessionId, limit = 5) {
1059
+ const database = initDatabase();
1060
+ const stmt = database.prepare(`SELECT * FROM steps
1061
+ WHERE session_id = ? AND is_key_decision = 1 AND reasoning IS NOT NULL
1062
+ ORDER BY timestamp DESC
1063
+ LIMIT ?`);
1064
+ const rows = stmt.all(sessionId, limit);
1065
+ return rows.map(rowToStep);
1066
+ }
1067
+ /**
1068
+ * Get edited files for a session (action_type IN ('edit', 'write'))
1069
+ * Used for user message injection - prevent re-work
1070
+ */
1071
+ export function getEditedFiles(sessionId) {
1072
+ const database = initDatabase();
1073
+ const stmt = database.prepare(`SELECT DISTINCT files FROM steps
1074
+ WHERE session_id = ? AND action_type IN ('edit', 'write')
1075
+ ORDER BY timestamp DESC`);
1076
+ const rows = stmt.all(sessionId);
1077
+ const allFiles = [];
1078
+ for (const row of rows) {
1079
+ try {
1080
+ const files = JSON.parse(row.files || '[]');
1081
+ if (Array.isArray(files)) {
1082
+ allFiles.push(...files);
1083
+ }
1084
+ }
1085
+ catch {
1086
+ // Skip invalid JSON
1087
+ }
1088
+ }
1089
+ return [...new Set(allFiles)];
1090
+ }
977
1091
  /**
978
1092
  * Delete steps for a session
979
1093
  */
@@ -1132,19 +1246,27 @@ export function getKeyDecisionSteps(sessionId, limit = 5) {
1132
1246
  }
1133
1247
  /**
1134
1248
  * Get steps reasoning by file path (for proxy team memory injection)
1135
- * Searches across ALL sessions, returns file-level reasoning from steps table
1249
+ * Searches across sessions, returns file-level reasoning from steps table
1250
+ * @param excludeSessionId - Optional session ID to exclude (for filtering current session)
1136
1251
  */
1137
- export function getStepsReasoningByPath(filePath, limit = 5) {
1252
+ export function getStepsReasoningByPath(filePath, limit = 5, excludeSessionId) {
1138
1253
  const database = initDatabase();
1139
1254
  // Search steps where files JSON contains this path and reasoning exists
1140
1255
  const pattern = `%"${escapeLikePattern(filePath)}"%`;
1141
- const rows = database.prepare(`
1256
+ let sql = `
1142
1257
  SELECT files, reasoning
1143
1258
  FROM steps
1144
1259
  WHERE files LIKE ? AND reasoning IS NOT NULL AND reasoning != ''
1145
- ORDER BY timestamp DESC
1146
- LIMIT ?
1147
- `).all(pattern, limit);
1260
+ `;
1261
+ const params = [pattern];
1262
+ // Exclude current session if specified (for team memory from PAST sessions only)
1263
+ if (excludeSessionId) {
1264
+ sql += ` AND session_id != ?`;
1265
+ params.push(excludeSessionId);
1266
+ }
1267
+ sql += ` ORDER BY timestamp DESC LIMIT ?`;
1268
+ params.push(limit);
1269
+ const rows = database.prepare(sql).all(...params);
1148
1270
  return rows.map(row => {
1149
1271
  const files = safeJsonParse(row.files, []);
1150
1272
  // Find the matching file path from the files array
@@ -5,6 +5,11 @@
5
5
  * Truncate a string to a maximum length, adding ellipsis if truncated.
6
6
  */
7
7
  export declare function truncate(str: string, maxLength: number): string;
8
+ /**
9
+ * Smart truncate: cleans markdown noise, prefers sentence/punctuation boundaries.
10
+ * Used for reasoning content that may contain markdown tables, bullets, etc.
11
+ */
12
+ export declare function smartTruncate(text: string, maxLen?: number): string;
8
13
  /**
9
14
  * Capitalize the first letter of a string.
10
15
  */
package/dist/lib/utils.js CHANGED
@@ -9,6 +9,51 @@ export function truncate(str, maxLength) {
9
9
  return str;
10
10
  return str.substring(0, maxLength - 3) + '...';
11
11
  }
12
+ /**
13
+ * Smart truncate: cleans markdown noise, prefers sentence/punctuation boundaries.
14
+ * Used for reasoning content that may contain markdown tables, bullets, etc.
15
+ */
16
+ export function smartTruncate(text, maxLen = 120) {
17
+ // 1. Clean markdown noise
18
+ let clean = text
19
+ .replace(/\|[^|]+\|/g, '') // markdown table cells
20
+ .replace(/^[-*]\s*/gm, '') // bullet points
21
+ .replace(/#{1,6}\s*/g, '') // headers
22
+ .replace(/\n+/g, ' ') // newlines to space
23
+ .replace(/\s+/g, ' ') // multiple spaces to one
24
+ .trim();
25
+ // 2. If short enough, return as-is
26
+ if (clean.length <= maxLen)
27
+ return clean;
28
+ // 3. Try to keep complete sentences
29
+ const sentences = clean.match(/[^.!?]+[.!?]+/g) || [];
30
+ let result = '';
31
+ for (const sentence of sentences) {
32
+ if ((result + sentence).length <= maxLen) {
33
+ result += sentence;
34
+ }
35
+ else {
36
+ break;
37
+ }
38
+ }
39
+ // 4. If we got at least one meaningful sentence, return it
40
+ if (result.length > 20)
41
+ return result.trim();
42
+ // 5. Fallback: find punctuation boundary
43
+ const truncated = clean.slice(0, maxLen);
44
+ const breakPoints = [
45
+ truncated.lastIndexOf('. '),
46
+ truncated.lastIndexOf(', '),
47
+ truncated.lastIndexOf('; '),
48
+ truncated.lastIndexOf(': '),
49
+ truncated.lastIndexOf(' - '),
50
+ truncated.lastIndexOf(' '),
51
+ ].filter(p => p > maxLen * 0.6);
52
+ const cutPoint = breakPoints.length > 0
53
+ ? Math.max(...breakPoints)
54
+ : truncated.lastIndexOf(' ');
55
+ return truncated.slice(0, cutPoint > 0 ? cutPoint : maxLen).trim() + '...';
56
+ }
12
57
  /**
13
58
  * Capitalize the first letter of a string.
14
59
  */
@@ -10,9 +10,11 @@ export interface AnthropicResponse {
10
10
  usage: {
11
11
  input_tokens: number;
12
12
  output_tokens: number;
13
+ cache_creation_input_tokens?: number;
14
+ cache_read_input_tokens?: number;
13
15
  };
14
16
  }
15
- export type ContentBlock = TextBlock | ToolUseBlock;
17
+ export type ContentBlock = TextBlock | ToolUseBlock | ThinkingBlock;
16
18
  export interface TextBlock {
17
19
  type: 'text';
18
20
  text: string;
@@ -23,6 +25,10 @@ export interface ToolUseBlock {
23
25
  name: string;
24
26
  input: Record<string, unknown>;
25
27
  }
28
+ export interface ThinkingBlock {
29
+ type: 'thinking';
30
+ thinking: string;
31
+ }
26
32
  export interface ParsedAction {
27
33
  toolName: string;
28
34
  toolId: string;
@@ -37,12 +43,14 @@ export interface ParsedAction {
37
43
  */
38
44
  export declare function parseToolUseBlocks(response: AnthropicResponse): ParsedAction[];
39
45
  /**
40
- * Extract token usage from response
46
+ * Extract token usage from response (including cache metrics)
41
47
  */
42
48
  export declare function extractTokenUsage(response: AnthropicResponse): {
43
49
  inputTokens: number;
44
50
  outputTokens: number;
45
51
  totalTokens: number;
52
+ cacheCreation: number;
53
+ cacheRead: number;
46
54
  };
47
55
  /**
48
56
  * Check if response contains any file-modifying actions
@@ -132,13 +132,15 @@ function extractPathFromGlobPattern(pattern) {
132
132
  return nonGlobParts.length > 0 ? nonGlobParts.join('/') : null;
133
133
  }
134
134
  /**
135
- * Extract token usage from response
135
+ * Extract token usage from response (including cache metrics)
136
136
  */
137
137
  export function extractTokenUsage(response) {
138
138
  return {
139
139
  inputTokens: response.usage.input_tokens,
140
140
  outputTokens: response.usage.output_tokens,
141
- totalTokens: response.usage.input_tokens + response.usage.output_tokens
141
+ totalTokens: response.usage.input_tokens + response.usage.output_tokens,
142
+ cacheCreation: response.usage.cache_creation_input_tokens || 0,
143
+ cacheRead: response.usage.cache_read_input_tokens || 0,
142
144
  };
143
145
  }
144
146
  /**
@@ -4,6 +4,7 @@ export interface ForwardResult {
4
4
  headers: Record<string, string | string[]>;
5
5
  body: AnthropicResponse | Record<string, unknown>;
6
6
  rawBody: string;
7
+ wasSSE: boolean;
7
8
  }
8
9
  export interface ForwardError {
9
10
  type: 'timeout' | 'network' | 'parse' | 'unknown';
@@ -13,11 +14,16 @@ export interface ForwardError {
13
14
  /**
14
15
  * Forward request to Anthropic API
15
16
  * Buffers full response for processing
17
+ *
18
+ * @param body - Parsed body for logging
19
+ * @param headers - Request headers
20
+ * @param logger - Optional logger
21
+ * @param rawBody - Raw request bytes (preserves exact bytes for cache)
16
22
  */
17
23
  export declare function forwardToAnthropic(body: Record<string, unknown>, headers: Record<string, string | string[] | undefined>, logger?: {
18
24
  info: (msg: string, data?: Record<string, unknown>) => void;
19
25
  error: (msg: string, data?: Record<string, unknown>) => void;
20
- }): Promise<ForwardResult>;
26
+ }, rawBody?: Buffer): Promise<ForwardResult>;
21
27
  /**
22
28
  * Check if error is a ForwardError
23
29
  */
@@ -10,13 +10,139 @@ const agent = new Agent({
10
10
  autoSelectFamilyAttemptTimeout: 500, // Try next address family after 500ms
11
11
  });
12
12
  import { config, buildSafeHeaders, maskSensitiveValue } from './config.js';
13
+ /**
14
+ * Parse SSE stream and reconstruct final message
15
+ * SSE format: "event: <type>\ndata: <json>\n\n"
16
+ */
17
+ function parseSSEResponse(sseText) {
18
+ const lines = sseText.split('\n');
19
+ let message = null;
20
+ const contentBlocks = [];
21
+ const contentDeltas = new Map();
22
+ let finalUsage = null;
23
+ let stopReason = null;
24
+ let currentEvent = '';
25
+ let currentData = '';
26
+ for (const line of lines) {
27
+ if (line.startsWith('event: ')) {
28
+ currentEvent = line.slice(7).trim();
29
+ }
30
+ else if (line.startsWith('data: ')) {
31
+ currentData = line.slice(6);
32
+ try {
33
+ const data = JSON.parse(currentData);
34
+ switch (data.type) {
35
+ case 'message_start':
36
+ // Initialize message from message_start event
37
+ message = data.message;
38
+ break;
39
+ case 'content_block_start':
40
+ // Add new content block
41
+ if (data.content_block) {
42
+ contentBlocks[data.index] = data.content_block;
43
+ if (data.content_block.type === 'text') {
44
+ contentDeltas.set(data.index, []);
45
+ }
46
+ else if (data.content_block.type === 'thinking') {
47
+ // Initialize thinking with empty string, will accumulate via deltas
48
+ contentBlocks[data.index] = { type: 'thinking', thinking: '' };
49
+ }
50
+ }
51
+ break;
52
+ case 'content_block_delta':
53
+ // Accumulate text deltas
54
+ if (data.delta?.type === 'text_delta' && data.delta.text) {
55
+ const deltas = contentDeltas.get(data.index) || [];
56
+ deltas.push(data.delta.text);
57
+ contentDeltas.set(data.index, deltas);
58
+ }
59
+ else if (data.delta?.type === 'thinking_delta' && data.delta.thinking) {
60
+ // Handle thinking blocks
61
+ const block = contentBlocks[data.index];
62
+ if (block && block.type === 'thinking') {
63
+ block.thinking += data.delta.thinking;
64
+ }
65
+ }
66
+ else if (data.delta?.type === 'input_json_delta' && data.delta.partial_json) {
67
+ // Handle tool input streaming
68
+ const block = contentBlocks[data.index];
69
+ if (block && block.type === 'tool_use') {
70
+ // Accumulate partial JSON - will need to parse at the end
71
+ const partialKey = `tool_partial_${data.index}`;
72
+ const existing = contentDeltas.get(data.index) || [];
73
+ existing.push(data.delta.partial_json);
74
+ contentDeltas.set(data.index, existing);
75
+ }
76
+ }
77
+ break;
78
+ case 'message_delta':
79
+ // Final usage and stop_reason
80
+ if (data.usage) {
81
+ finalUsage = data.usage;
82
+ }
83
+ if (data.delta?.stop_reason) {
84
+ stopReason = data.delta.stop_reason;
85
+ }
86
+ break;
87
+ }
88
+ }
89
+ catch {
90
+ // Ignore unparseable data lines
91
+ }
92
+ }
93
+ }
94
+ if (!message) {
95
+ return null;
96
+ }
97
+ // Reconstruct content blocks with accumulated text/input
98
+ for (let i = 0; i < contentBlocks.length; i++) {
99
+ const block = contentBlocks[i];
100
+ if (!block)
101
+ continue;
102
+ const deltas = contentDeltas.get(i);
103
+ if (deltas && deltas.length > 0) {
104
+ if (block.type === 'text') {
105
+ block.text = deltas.join('');
106
+ }
107
+ else if (block.type === 'tool_use') {
108
+ // Parse accumulated partial JSON for tool input
109
+ try {
110
+ const fullJson = deltas.join('');
111
+ block.input = JSON.parse(fullJson);
112
+ }
113
+ catch {
114
+ // Keep original input if parsing fails
115
+ }
116
+ }
117
+ }
118
+ }
119
+ // Build final response
120
+ const response = {
121
+ id: message.id || '',
122
+ type: 'message',
123
+ role: 'assistant',
124
+ content: contentBlocks.filter(Boolean),
125
+ model: message.model || '',
126
+ stop_reason: stopReason,
127
+ stop_sequence: null,
128
+ usage: finalUsage || message.usage || { input_tokens: 0, output_tokens: 0 },
129
+ };
130
+ return response;
131
+ }
13
132
  /**
14
133
  * Forward request to Anthropic API
15
134
  * Buffers full response for processing
135
+ *
136
+ * @param body - Parsed body for logging
137
+ * @param headers - Request headers
138
+ * @param logger - Optional logger
139
+ * @param rawBody - Raw request bytes (preserves exact bytes for cache)
16
140
  */
17
- export async function forwardToAnthropic(body, headers, logger) {
141
+ export async function forwardToAnthropic(body, headers, logger, rawBody) {
18
142
  const targetUrl = `${config.ANTHROPIC_BASE_URL}/v1/messages`;
19
143
  const safeHeaders = buildSafeHeaders(headers);
144
+ // Use raw bytes if available (preserves cache), otherwise re-serialize
145
+ const requestBody = rawBody || JSON.stringify(body);
20
146
  // Log request (mask sensitive data)
21
147
  if (logger && config.LOG_REQUESTS) {
22
148
  const maskedHeaders = {};
@@ -28,6 +154,8 @@ export async function forwardToAnthropic(body, headers, logger) {
28
154
  model: body.model,
29
155
  messageCount: Array.isArray(body.messages) ? body.messages.length : 0,
30
156
  headers: maskedHeaders,
157
+ usingRawBody: !!rawBody,
158
+ bodySize: rawBody?.length || JSON.stringify(body).length,
31
159
  });
32
160
  }
33
161
  try {
@@ -37,7 +165,7 @@ export async function forwardToAnthropic(body, headers, logger) {
37
165
  ...safeHeaders,
38
166
  'content-type': 'application/json',
39
167
  },
40
- body: JSON.stringify(body),
168
+ body: requestBody,
41
169
  bodyTimeout: config.REQUEST_TIMEOUT,
42
170
  headersTimeout: config.REQUEST_TIMEOUT,
43
171
  dispatcher: agent,
@@ -48,14 +176,29 @@ export async function forwardToAnthropic(body, headers, logger) {
48
176
  chunks.push(Buffer.from(chunk));
49
177
  }
50
178
  const rawBody = Buffer.concat(chunks).toString('utf-8');
179
+ // Check if response is SSE streaming
180
+ const contentType = response.headers['content-type'];
181
+ const isSSE = typeof contentType === 'string' && contentType.includes('text/event-stream');
51
182
  // Parse response
52
183
  let parsedBody;
53
- try {
54
- parsedBody = JSON.parse(rawBody);
184
+ if (isSSE) {
185
+ // Parse SSE and reconstruct final message
186
+ const sseMessage = parseSSEResponse(rawBody);
187
+ if (sseMessage) {
188
+ parsedBody = sseMessage;
189
+ }
190
+ else {
191
+ parsedBody = { error: 'Failed to parse SSE response', raw: rawBody.substring(0, 500) };
192
+ }
55
193
  }
56
- catch {
57
- // Return raw body if not JSON
58
- parsedBody = { error: 'Invalid JSON response', raw: rawBody.substring(0, 500) };
194
+ else {
195
+ // Regular JSON response
196
+ try {
197
+ parsedBody = JSON.parse(rawBody);
198
+ }
199
+ catch {
200
+ parsedBody = { error: 'Invalid JSON response', raw: rawBody.substring(0, 500) };
201
+ }
59
202
  }
60
203
  // Convert headers to record
61
204
  const responseHeaders = {};
@@ -64,11 +207,17 @@ export async function forwardToAnthropic(body, headers, logger) {
64
207
  responseHeaders[key] = value;
65
208
  }
66
209
  }
210
+ // If we parsed SSE, change content-type to JSON for Claude Code
211
+ if (isSSE) {
212
+ responseHeaders['content-type'] = 'application/json';
213
+ }
67
214
  if (logger && config.LOG_REQUESTS) {
68
215
  logger.info('Received from Anthropic', {
69
216
  statusCode: response.statusCode,
70
217
  bodyLength: rawBody.length,
71
218
  hasUsage: 'usage' in parsedBody,
219
+ wasSSE: isSSE,
220
+ parseSuccess: !('error' in parsedBody),
72
221
  });
73
222
  }
74
223
  return {
@@ -76,6 +225,7 @@ export async function forwardToAnthropic(body, headers, logger) {
76
225
  headers: responseHeaders,
77
226
  body: parsedBody,
78
227
  rawBody,
228
+ wasSSE: isSSE,
79
229
  };
80
230
  }
81
231
  catch (error) {
@@ -1,8 +1,9 @@
1
1
  /**
2
- * Build context from team memory for injection
3
- * Queries tasks and file_reasoning tables
2
+ * Build context from team memory for injection (PAST sessions only)
3
+ * Queries tasks and file_reasoning tables, excluding current session data
4
+ * @param currentSessionId - Session ID to exclude (ensures only past session data)
4
5
  */
5
- export declare function buildTeamMemoryContext(projectPath: string, mentionedFiles: string[]): string | null;
6
+ export declare function buildTeamMemoryContext(projectPath: string, mentionedFiles: string[], currentSessionId?: string): string | null;
6
7
  /**
7
8
  * Extract file paths from messages (user messages only, clean text)
8
9
  */
@@ -3,10 +3,11 @@
3
3
  import { getTasksForProject, getTasksByFiles, getStepsReasoningByPath, } from '../lib/store.js';
4
4
  import { truncate } from '../lib/utils.js';
5
5
  /**
6
- * Build context from team memory for injection
7
- * Queries tasks and file_reasoning tables
6
+ * Build context from team memory for injection (PAST sessions only)
7
+ * Queries tasks and file_reasoning tables, excluding current session data
8
+ * @param currentSessionId - Session ID to exclude (ensures only past session data)
8
9
  */
9
- export function buildTeamMemoryContext(projectPath, mentionedFiles) {
10
+ export function buildTeamMemoryContext(projectPath, mentionedFiles, currentSessionId) {
10
11
  // Get recent completed tasks for this project
11
12
  const tasks = getTasksForProject(projectPath, {
12
13
  status: 'complete',
@@ -16,9 +17,10 @@ export function buildTeamMemoryContext(projectPath, mentionedFiles) {
16
17
  const fileTasks = mentionedFiles.length > 0
17
18
  ? getTasksByFiles(projectPath, mentionedFiles, { status: 'complete', limit: 5 })
18
19
  : [];
19
- // Get file-level reasoning from steps table (proxy version)
20
+ // Get file-level reasoning from steps table (PAST sessions only)
21
+ // Pass currentSessionId to exclude current session data
20
22
  const fileReasonings = mentionedFiles.length > 0
21
- ? mentionedFiles.flatMap(f => getStepsReasoningByPath(f, 3))
23
+ ? mentionedFiles.flatMap(f => getStepsReasoningByPath(f, 3, currentSessionId))
22
24
  : [];
23
25
  // Combine unique tasks
24
26
  const allTasks = [...new Map([...tasks, ...fileTasks].map(t => [t.id, t])).values()];