@claudemini/shit-cli 1.3.0 → 1.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bin/shit.js +1 -0
- package/lib/checkpoint.js +16 -1
- package/lib/enable.js +10 -1
- package/lib/summarize.js +342 -0
- package/package.json +1 -1
package/bin/shit.js
CHANGED
|
@@ -15,6 +15,7 @@ const commands = {
|
|
|
15
15
|
view: 'View session details',
|
|
16
16
|
query: 'Query session memory (cross-session)',
|
|
17
17
|
explain: 'Explain a session or commit',
|
|
18
|
+
summarize: 'Generate AI summary for a session',
|
|
18
19
|
rewind: 'Rollback to previous checkpoint',
|
|
19
20
|
resume: 'Resume session from checkpoint',
|
|
20
21
|
reset: 'Delete checkpoint for current HEAD',
|
package/lib/checkpoint.js
CHANGED
|
@@ -139,7 +139,9 @@ function buildTree(cwd, dirPath, redact = true) {
|
|
|
139
139
|
* Commit session checkpoint to shadow branch
|
|
140
140
|
* Branch format: shit/checkpoints/v1/YYYY-MM-DD-<session-id>
|
|
141
141
|
*/
|
|
142
|
-
export async function commitCheckpoint(projectRoot, sessionDir, sessionId, commitSha = null) {
|
|
142
|
+
export async function commitCheckpoint(projectRoot, sessionDir, sessionId, commitSha = null, options = {}) {
|
|
143
|
+
const autoSummarize = options.autoSummarize !== false; // default true
|
|
144
|
+
|
|
143
145
|
// Verify we're in a git repo
|
|
144
146
|
try {
|
|
145
147
|
git('rev-parse --git-dir', projectRoot);
|
|
@@ -212,6 +214,19 @@ export async function commitCheckpoint(projectRoot, sessionDir, sessionId, commi
|
|
|
212
214
|
commit: commitHash,
|
|
213
215
|
linked_commit: linkedCommit,
|
|
214
216
|
};
|
|
217
|
+
|
|
218
|
+
// Auto-summarize if enabled
|
|
219
|
+
if (autoSummarize) {
|
|
220
|
+
try {
|
|
221
|
+
const { summarizeSession } = await import('./summarize.js');
|
|
222
|
+
const summaryResult = await summarizeSession(projectRoot, sessionId, sessionDir);
|
|
223
|
+
if (summaryResult.success) {
|
|
224
|
+
console.log(`✅ AI summary generated: ${summaryResult.model}`);
|
|
225
|
+
}
|
|
226
|
+
} catch {
|
|
227
|
+
// Best effort - summarize is optional
|
|
228
|
+
}
|
|
229
|
+
}
|
|
215
230
|
}
|
|
216
231
|
|
|
217
232
|
/**
|
package/lib/enable.js
CHANGED
|
@@ -173,6 +173,7 @@ export default async function enable(args) {
|
|
|
173
173
|
let force = false;
|
|
174
174
|
let pushSessions = true;
|
|
175
175
|
let telemetry = true;
|
|
176
|
+
let summarize = true;
|
|
176
177
|
|
|
177
178
|
for (const arg of args) {
|
|
178
179
|
if (arg === '--all') {
|
|
@@ -188,6 +189,8 @@ export default async function enable(args) {
|
|
|
188
189
|
force = true;
|
|
189
190
|
} else if (arg === '--skip-push-sessions') {
|
|
190
191
|
pushSessions = false;
|
|
192
|
+
} else if (arg === '--no-summarize') {
|
|
193
|
+
summarize = false;
|
|
191
194
|
} else if (arg.startsWith('--telemetry=')) {
|
|
192
195
|
telemetry = arg.split('=')[1] !== 'false';
|
|
193
196
|
} else if (arg === '--telemetry') {
|
|
@@ -213,6 +216,7 @@ export default async function enable(args) {
|
|
|
213
216
|
const configData = {
|
|
214
217
|
enabled: true,
|
|
215
218
|
push_sessions: pushSessions,
|
|
219
|
+
summarize: summarize,
|
|
216
220
|
telemetry: telemetry,
|
|
217
221
|
log_level: 'info'
|
|
218
222
|
};
|
|
@@ -264,8 +268,13 @@ export default async function enable(args) {
|
|
|
264
268
|
console.log(' shit checkpoints # List all checkpoints');
|
|
265
269
|
console.log(' shit commit # Manually create checkpoint after git commit');
|
|
266
270
|
console.log('\nOptions:');
|
|
267
|
-
console.log(' --all
|
|
271
|
+
console.log(' --all # Enable for all supported agents');
|
|
268
272
|
console.log(' --checkpoint, -c # Enable automatic checkpoint on git commit');
|
|
273
|
+
console.log(' --no-summarize # Disable AI summary generation');
|
|
274
|
+
console.log(' --skip-push-sessions # Disable auto-push to remote');
|
|
275
|
+
console.log(' --telemetry=false # Disable anonymous telemetry');
|
|
276
|
+
console.log('\nAI Summary:');
|
|
277
|
+
console.log(' Set OPENAI_API_KEY or ANTHROPIC_API_KEY to enable AI summaries');
|
|
269
278
|
|
|
270
279
|
} catch (error) {
|
|
271
280
|
console.error('❌ Failed to enable shit-cli:', error.message);
|
package/lib/summarize.js
ADDED
|
@@ -0,0 +1,342 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* AI Summarization module
|
|
5
|
+
* Automatically generates AI-powered summaries of sessions using LLM APIs
|
|
6
|
+
* Supports OpenAI and Anthropic APIs
|
|
7
|
+
*/
|
|
8
|
+
|
|
9
|
+
import { existsSync, readFileSync, writeFileSync } from 'fs';
|
|
10
|
+
import { join } from 'path';
|
|
11
|
+
import { execSync } from 'child_process';
|
|
12
|
+
|
|
13
|
+
// Default configuration
|
|
14
|
+
const DEFAULT_CONFIG = {
|
|
15
|
+
provider: 'openai', // or 'anthropic'
|
|
16
|
+
model: 'gpt-4o-mini',
|
|
17
|
+
max_tokens: 1000,
|
|
18
|
+
temperature: 0.7,
|
|
19
|
+
};
|
|
20
|
+
|
|
21
|
+
/**
|
|
22
|
+
* Get API configuration from environment or config file
|
|
23
|
+
*/
|
|
24
|
+
function getApiConfig(projectRoot) {
|
|
25
|
+
// Check for environment variables first
|
|
26
|
+
const config = { ...DEFAULT_CONFIG };
|
|
27
|
+
|
|
28
|
+
// OpenAI
|
|
29
|
+
if (process.env.OPENAI_API_KEY) {
|
|
30
|
+
config.provider = 'openai';
|
|
31
|
+
config.api_key = process.env.OPENAI_API_KEY;
|
|
32
|
+
} else if (process.env.ANTHROPIC_API_KEY) {
|
|
33
|
+
config.provider = 'anthropic';
|
|
34
|
+
config.api_key = process.env.ANTHROPIC_API_KEY;
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
// Check for project config
|
|
38
|
+
const configFile = join(projectRoot, '.shit-logs', 'config.json');
|
|
39
|
+
if (existsSync(configFile)) {
|
|
40
|
+
try {
|
|
41
|
+
const fileConfig = JSON.parse(readFileSync(configFile, 'utf-8'));
|
|
42
|
+
Object.assign(config, fileConfig);
|
|
43
|
+
} catch {
|
|
44
|
+
// Use defaults
|
|
45
|
+
}
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
return config;
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
/**
|
|
52
|
+
* Extract relevant context from session for summarization
|
|
53
|
+
*/
|
|
54
|
+
function extractContext(sessionDir) {
|
|
55
|
+
const context = {
|
|
56
|
+
prompts: [],
|
|
57
|
+
changes: [],
|
|
58
|
+
tools: {},
|
|
59
|
+
errors: [],
|
|
60
|
+
summary: null,
|
|
61
|
+
};
|
|
62
|
+
|
|
63
|
+
// Read summary.json
|
|
64
|
+
const summaryFile = join(sessionDir, 'summary.json');
|
|
65
|
+
if (existsSync(summaryFile)) {
|
|
66
|
+
try {
|
|
67
|
+
const summary = JSON.parse(readFileSync(summaryFile, 'utf-8'));
|
|
68
|
+
context.summary = summary;
|
|
69
|
+
context.prompts = summary.prompts || [];
|
|
70
|
+
context.tools = summary.activity?.tools || {};
|
|
71
|
+
context.errors = summary.activity?.errors || [];
|
|
72
|
+
context.changes = summary.changes?.files || [];
|
|
73
|
+
} catch {
|
|
74
|
+
// Best effort
|
|
75
|
+
}
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
// Read prompts.txt
|
|
79
|
+
const promptsFile = join(sessionDir, 'prompts.txt');
|
|
80
|
+
if (existsSync(promptsFile)) {
|
|
81
|
+
try {
|
|
82
|
+
context.prompts_text = readFileSync(promptsFile, 'utf-8').slice(0, 3000);
|
|
83
|
+
} catch {
|
|
84
|
+
// Best effort
|
|
85
|
+
}
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
// Read context.md
|
|
89
|
+
const contextFile = join(sessionDir, 'context.md');
|
|
90
|
+
if (existsSync(contextFile)) {
|
|
91
|
+
try {
|
|
92
|
+
context.context_md = readFileSync(contextFile, 'utf-8').slice(0, 2000);
|
|
93
|
+
} catch {
|
|
94
|
+
// Best effort
|
|
95
|
+
}
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
return context;
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
/**
|
|
102
|
+
* Build prompt for LLM summarization
|
|
103
|
+
*/
|
|
104
|
+
function buildSummarizePrompt(context) {
|
|
105
|
+
const parts = [];
|
|
106
|
+
|
|
107
|
+
// System prompt
|
|
108
|
+
parts.push(`You are a helpful assistant that summarizes AI coding sessions. Generate a concise summary that explains:`);
|
|
109
|
+
parts.push(`1. What the user wanted to accomplish`);
|
|
110
|
+
parts.push(`2. What changes were made`);
|
|
111
|
+
parts.push(`3. Any issues or errors encountered`);
|
|
112
|
+
parts.push(`4. Overall outcome`);
|
|
113
|
+
|
|
114
|
+
parts.push(`\n---\n`);
|
|
115
|
+
|
|
116
|
+
// User prompts
|
|
117
|
+
if (context.prompts_text) {
|
|
118
|
+
parts.push(`## User Prompts\n${context.prompts_text}\n`);
|
|
119
|
+
} else if (context.prompts && context.prompts.length > 0) {
|
|
120
|
+
parts.push(`## User Prompts\n`);
|
|
121
|
+
context.prompts.slice(0, 5).forEach(p => {
|
|
122
|
+
const text = typeof p === 'string' ? p : p.text || '';
|
|
123
|
+
parts.push(`- ${text.slice(0, 200)}`);
|
|
124
|
+
});
|
|
125
|
+
parts.push('');
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
// Changes summary
|
|
129
|
+
if (context.changes && context.changes.length > 0) {
|
|
130
|
+
parts.push(`## Files Changed\n`);
|
|
131
|
+
context.changes.slice(0, 10).forEach(f => {
|
|
132
|
+
const ops = f.operations?.join(', ') || 'modified';
|
|
133
|
+
parts.push(`- ${f.path}: ${ops}`);
|
|
134
|
+
});
|
|
135
|
+
parts.push('');
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
// Tool usage
|
|
139
|
+
if (context.tools && Object.keys(context.tools).length > 0) {
|
|
140
|
+
parts.push(`## Tools Used\n`);
|
|
141
|
+
Object.entries(context.tools).forEach(([tool, count]) => {
|
|
142
|
+
parts.push(`- ${tool}: ${count} times`);
|
|
143
|
+
});
|
|
144
|
+
parts.push('');
|
|
145
|
+
}
|
|
146
|
+
|
|
147
|
+
// Errors
|
|
148
|
+
if (context.errors && context.errors.length > 0) {
|
|
149
|
+
parts.push(`## Errors\n`);
|
|
150
|
+
context.errors.slice(0, 5).forEach(e => {
|
|
151
|
+
parts.push(`- ${e.tool}: ${(e.message || '').slice(0, 100)}`);
|
|
152
|
+
});
|
|
153
|
+
parts.push('');
|
|
154
|
+
}
|
|
155
|
+
|
|
156
|
+
return parts.join('\n');
|
|
157
|
+
}
|
|
158
|
+
|
|
159
|
+
/**
|
|
160
|
+
* Call OpenAI API
|
|
161
|
+
*/
|
|
162
|
+
async function callOpenAI(apiKey, model, prompt, maxTokens, temperature) {
|
|
163
|
+
const response = await fetch('https://api.openai.com/v1/chat/completions', {
|
|
164
|
+
method: 'POST',
|
|
165
|
+
headers: {
|
|
166
|
+
'Content-Type': 'application/json',
|
|
167
|
+
'Authorization': `Bearer ${apiKey}`,
|
|
168
|
+
},
|
|
169
|
+
body: JSON.stringify({
|
|
170
|
+
model,
|
|
171
|
+
messages: [
|
|
172
|
+
{ role: 'system', content: 'You are a helpful assistant that summarizes AI coding sessions.' },
|
|
173
|
+
{ role: 'user', content: prompt }
|
|
174
|
+
],
|
|
175
|
+
max_tokens: maxTokens,
|
|
176
|
+
temperature,
|
|
177
|
+
}),
|
|
178
|
+
});
|
|
179
|
+
|
|
180
|
+
if (!response.ok) {
|
|
181
|
+
const error = await response.text();
|
|
182
|
+
throw new Error(`OpenAI API error: ${response.status} - ${error}`);
|
|
183
|
+
}
|
|
184
|
+
|
|
185
|
+
const data = await response.json();
|
|
186
|
+
return data.choices[0].message.content;
|
|
187
|
+
}
|
|
188
|
+
|
|
189
|
+
/**
|
|
190
|
+
* Call Anthropic API
|
|
191
|
+
*/
|
|
192
|
+
async function callAnthropic(apiKey, model, prompt, maxTokens, temperature) {
|
|
193
|
+
const response = await fetch('https://api.anthropic.com/v1/messages', {
|
|
194
|
+
method: 'POST',
|
|
195
|
+
headers: {
|
|
196
|
+
'Content-Type': 'application/json',
|
|
197
|
+
'x-api-key': apiKey,
|
|
198
|
+
'anthropic-version': '2023-06-01',
|
|
199
|
+
},
|
|
200
|
+
body: JSON.stringify({
|
|
201
|
+
model,
|
|
202
|
+
max_tokens: maxTokens,
|
|
203
|
+
temperature,
|
|
204
|
+
messages: [
|
|
205
|
+
{ role: 'user', content: prompt }
|
|
206
|
+
],
|
|
207
|
+
}),
|
|
208
|
+
});
|
|
209
|
+
|
|
210
|
+
if (!response.ok) {
|
|
211
|
+
const error = await response.text();
|
|
212
|
+
throw new Error(`Anthropic API error: ${response.status} - ${error}`);
|
|
213
|
+
}
|
|
214
|
+
|
|
215
|
+
const data = await response.json();
|
|
216
|
+
return data.content[0].text;
|
|
217
|
+
}
|
|
218
|
+
|
|
219
|
+
/**
|
|
220
|
+
* Generate AI summary for a session
|
|
221
|
+
*/
|
|
222
|
+
export async function summarizeSession(projectRoot, sessionId, sessionDir) {
|
|
223
|
+
const config = getApiConfig(projectRoot);
|
|
224
|
+
|
|
225
|
+
if (!config.api_key) {
|
|
226
|
+
return {
|
|
227
|
+
success: false,
|
|
228
|
+
reason: 'No API key configured. Set OPENAI_API_KEY or ANTHROPIC_API_KEY environment variable.'
|
|
229
|
+
};
|
|
230
|
+
}
|
|
231
|
+
|
|
232
|
+
// Extract context from session
|
|
233
|
+
const context = extractContext(sessionDir);
|
|
234
|
+
|
|
235
|
+
// Build prompt
|
|
236
|
+
const prompt = buildSummarizePrompt(context);
|
|
237
|
+
|
|
238
|
+
try {
|
|
239
|
+
let summary;
|
|
240
|
+
|
|
241
|
+
if (config.provider === 'anthropic') {
|
|
242
|
+
summary = await callAnthropic(
|
|
243
|
+
config.api_key,
|
|
244
|
+
config.model || 'claude-3-haiku-20240307',
|
|
245
|
+
prompt,
|
|
246
|
+
config.max_tokens,
|
|
247
|
+
config.temperature
|
|
248
|
+
);
|
|
249
|
+
} else {
|
|
250
|
+
summary = await callOpenAI(
|
|
251
|
+
config.api_key,
|
|
252
|
+
config.model || 'gpt-4o-mini',
|
|
253
|
+
prompt,
|
|
254
|
+
config.max_tokens,
|
|
255
|
+
config.temperature
|
|
256
|
+
);
|
|
257
|
+
}
|
|
258
|
+
|
|
259
|
+
// Save summary
|
|
260
|
+
const aiSummaryFile = join(sessionDir, 'ai-summary.md');
|
|
261
|
+
writeFileSync(aiSummaryFile, summary);
|
|
262
|
+
|
|
263
|
+
// Update state
|
|
264
|
+
const stateFile = join(sessionDir, 'state.json');
|
|
265
|
+
if (existsSync(stateFile)) {
|
|
266
|
+
try {
|
|
267
|
+
const state = JSON.parse(readFileSync(stateFile, 'utf-8'));
|
|
268
|
+
state.ai_summary = {
|
|
269
|
+
provider: config.provider,
|
|
270
|
+
model: config.model,
|
|
271
|
+
generated_at: new Date().toISOString(),
|
|
272
|
+
};
|
|
273
|
+
writeFileSync(stateFile, JSON.stringify(state, null, 2));
|
|
274
|
+
} catch {
|
|
275
|
+
// Best effort
|
|
276
|
+
}
|
|
277
|
+
}
|
|
278
|
+
|
|
279
|
+
return {
|
|
280
|
+
success: true,
|
|
281
|
+
summary,
|
|
282
|
+
provider: config.provider,
|
|
283
|
+
model: config.model,
|
|
284
|
+
};
|
|
285
|
+
} catch (error) {
|
|
286
|
+
return {
|
|
287
|
+
success: false,
|
|
288
|
+
reason: error.message
|
|
289
|
+
};
|
|
290
|
+
}
|
|
291
|
+
}
|
|
292
|
+
|
|
293
|
+
/**
|
|
294
|
+
* CLI command for manual summarization
|
|
295
|
+
*/
|
|
296
|
+
export default async function summarize(args) {
|
|
297
|
+
const projectRoot = findProjectRoot();
|
|
298
|
+
const sessionId = args[0];
|
|
299
|
+
|
|
300
|
+
if (!sessionId) {
|
|
301
|
+
console.log('Usage: shit summarize <session-id>');
|
|
302
|
+
console.log('\nEnvironment variables:');
|
|
303
|
+
console.log(' OPENAI_API_KEY # Use OpenAI for summarization');
|
|
304
|
+
console.log(' ANTHROPIC_API_KEY # Use Anthropic for summarization');
|
|
305
|
+
console.log('\nConfiguration (.shit-logs/config.json):');
|
|
306
|
+
console.log(` {"provider": "openai", "model": "gpt-4o-mini"}`);
|
|
307
|
+
process.exit(1);
|
|
308
|
+
}
|
|
309
|
+
|
|
310
|
+
const sessionDir = join(projectRoot, '.shit-logs', sessionId);
|
|
311
|
+
|
|
312
|
+
if (!existsSync(sessionDir)) {
|
|
313
|
+
console.error(`Session not found: ${sessionId}`);
|
|
314
|
+
process.exit(1);
|
|
315
|
+
}
|
|
316
|
+
|
|
317
|
+
console.log(`🤖 Generating AI summary for session: ${sessionId}\n`);
|
|
318
|
+
|
|
319
|
+
const result = await summarizeSession(projectRoot, sessionId, sessionDir);
|
|
320
|
+
|
|
321
|
+
if (result.success) {
|
|
322
|
+
console.log('✅ AI Summary generated!\n');
|
|
323
|
+
console.log(result.summary);
|
|
324
|
+
console.log(`\n---`);
|
|
325
|
+
console.log(`Provider: ${result.provider}`);
|
|
326
|
+
console.log(`Model: ${result.model}`);
|
|
327
|
+
} else {
|
|
328
|
+
console.error('❌ Failed to generate summary:', result.reason);
|
|
329
|
+
process.exit(1);
|
|
330
|
+
}
|
|
331
|
+
}
|
|
332
|
+
|
|
333
|
+
function findProjectRoot() {
|
|
334
|
+
let dir = process.cwd();
|
|
335
|
+
while (dir !== '/') {
|
|
336
|
+
if (existsSync(join(dir, '.git'))) {
|
|
337
|
+
return dir;
|
|
338
|
+
}
|
|
339
|
+
dir = join(dir, '..');
|
|
340
|
+
}
|
|
341
|
+
throw new Error('Not in a git repository');
|
|
342
|
+
}
|