context-mode 0.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude-plugin/plugin.json +29 -0
- package/.mcp.json +8 -0
- package/LICENSE +21 -0
- package/README.md +304 -0
- package/build/cli.d.ts +10 -0
- package/build/cli.js +193 -0
- package/build/executor.d.ts +27 -0
- package/build/executor.js +255 -0
- package/build/runtime.d.ts +24 -0
- package/build/runtime.js +167 -0
- package/build/server.d.ts +2 -0
- package/build/server.js +457 -0
- package/build/store.d.ts +39 -0
- package/build/store.js +212 -0
- package/package.json +64 -0
- package/skills/context-mode/SKILL.md +124 -0
- package/skills/context-mode/references/anti-patterns.md +257 -0
- package/skills/context-mode/references/patterns-javascript.md +298 -0
- package/skills/context-mode/references/patterns-python.md +304 -0
- package/skills/context-mode/references/patterns-shell.md +277 -0
|
@@ -0,0 +1,298 @@
|
|
|
1
|
+
# JavaScript / TypeScript Patterns for execute
|
|
2
|
+
|
|
3
|
+
Practical patterns for using `execute` with `language: javascript`.
|
|
4
|
+
All examples assume Node.js runtime with native fetch (Node 18+).
|
|
5
|
+
|
|
6
|
+
---
|
|
7
|
+
|
|
8
|
+
## API Response Processing
|
|
9
|
+
|
|
10
|
+
### Fetch and summarize a REST API
|
|
11
|
+
|
|
12
|
+
```javascript
|
|
13
|
+
// execute: Analyze API health endpoint
|
|
14
|
+
const resp = await fetch('https://api.example.com/health');
|
|
15
|
+
const data = await resp.json();
|
|
16
|
+
|
|
17
|
+
console.log('=== Service Health ===');
|
|
18
|
+
console.log(`Status: ${data.status}`);
|
|
19
|
+
console.log(`Uptime: ${data.uptime}`);
|
|
20
|
+
console.log(`Timestamp: ${data.timestamp}`);
|
|
21
|
+
|
|
22
|
+
if (data.services) {
|
|
23
|
+
console.log('\n=== Service Components ===');
|
|
24
|
+
for (const [name, info] of Object.entries(data.services)) {
|
|
25
|
+
console.log(` ${name}: ${info.status} (latency: ${info.latency_ms}ms)`);
|
|
26
|
+
}
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
if (data.errors && data.errors.length > 0) {
|
|
30
|
+
console.log('\n=== Recent Errors ===');
|
|
31
|
+
data.errors.slice(0, 10).forEach(e => {
|
|
32
|
+
console.log(` [${e.timestamp}] ${e.code}: ${e.message}`);
|
|
33
|
+
});
|
|
34
|
+
}
|
|
35
|
+
```
|
|
36
|
+
> summary_prompt: "Report overall health, list any degraded services, and highlight errors"
|
|
37
|
+
|
|
38
|
+
### Paginated API collection
|
|
39
|
+
|
|
40
|
+
```javascript
|
|
41
|
+
// execute: Fetch all open issues from GitHub API
|
|
42
|
+
const owner = 'org';
|
|
43
|
+
const repo = 'project';
|
|
44
|
+
let page = 1;
|
|
45
|
+
let allIssues = [];
|
|
46
|
+
|
|
47
|
+
while (true) {
|
|
48
|
+
const resp = await fetch(
|
|
49
|
+
`https://api.github.com/repos/${owner}/${repo}/issues?state=open&per_page=100&page=${page}`,
|
|
50
|
+
{ headers: { 'Accept': 'application/vnd.github.v3+json' } }
|
|
51
|
+
);
|
|
52
|
+
const issues = await resp.json();
|
|
53
|
+
if (issues.length === 0) break;
|
|
54
|
+
allIssues.push(...issues);
|
|
55
|
+
page++;
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
console.log(`Total open issues: ${allIssues.length}\n`);
|
|
59
|
+
|
|
60
|
+
// Group by labels
|
|
61
|
+
const byLabel = {};
|
|
62
|
+
allIssues.forEach(issue => {
|
|
63
|
+
issue.labels.forEach(label => {
|
|
64
|
+
byLabel[label.name] = (byLabel[label.name] || 0) + 1;
|
|
65
|
+
});
|
|
66
|
+
});
|
|
67
|
+
|
|
68
|
+
console.log('=== Issues by Label ===');
|
|
69
|
+
Object.entries(byLabel)
|
|
70
|
+
.sort((a, b) => b[1] - a[1])
|
|
71
|
+
.forEach(([label, count]) => console.log(` ${label}: ${count}`));
|
|
72
|
+
|
|
73
|
+
// Oldest issues
|
|
74
|
+
console.log('\n=== 10 Oldest Issues ===');
|
|
75
|
+
allIssues
|
|
76
|
+
.sort((a, b) => new Date(a.created_at) - new Date(b.created_at))
|
|
77
|
+
.slice(0, 10)
|
|
78
|
+
.forEach(i => console.log(` #${i.number} (${i.created_at.slice(0,10)}): ${i.title}`));
|
|
79
|
+
```
|
|
80
|
+
> summary_prompt: "Summarize issue distribution by label, highlight stale issues, suggest priorities"
|
|
81
|
+
> timeout_ms: 30000
|
|
82
|
+
|
|
83
|
+
---
|
|
84
|
+
|
|
85
|
+
## JSON Data Analysis
|
|
86
|
+
|
|
87
|
+
### Analyze a large JSON config file
|
|
88
|
+
|
|
89
|
+
```javascript
|
|
90
|
+
const fs = require('fs');
|
|
91
|
+
const data = JSON.parse(fs.readFileSync('tsconfig.json', 'utf8'));
|
|
92
|
+
|
|
93
|
+
console.log('=== TSConfig Analysis ===');
|
|
94
|
+
console.log(`Target: ${data.compilerOptions?.target}`);
|
|
95
|
+
console.log(`Module: ${data.compilerOptions?.module}`);
|
|
96
|
+
console.log(`Strict: ${data.compilerOptions?.strict}`);
|
|
97
|
+
console.log(`Paths aliases: ${Object.keys(data.compilerOptions?.paths || {}).length}`);
|
|
98
|
+
|
|
99
|
+
if (data.compilerOptions?.paths) {
|
|
100
|
+
console.log('\n=== Path Aliases ===');
|
|
101
|
+
for (const [alias, targets] of Object.entries(data.compilerOptions.paths)) {
|
|
102
|
+
console.log(` ${alias} -> ${targets.join(', ')}`);
|
|
103
|
+
}
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
if (data.include) console.log(`\nInclude: ${data.include.join(', ')}`);
|
|
107
|
+
if (data.exclude) console.log(`Exclude: ${data.exclude.join(', ')}`);
|
|
108
|
+
if (data.references) {
|
|
109
|
+
console.log(`\nProject References: ${data.references.length}`);
|
|
110
|
+
data.references.forEach(r => console.log(` ${r.path}`));
|
|
111
|
+
}
|
|
112
|
+
```
|
|
113
|
+
> summary_prompt: "Report compiler strictness, module system, and any unusual configuration"
|
|
114
|
+
|
|
115
|
+
### Diff two JSON files
|
|
116
|
+
|
|
117
|
+
```javascript
|
|
118
|
+
const fs = require('fs');
|
|
119
|
+
const a = JSON.parse(fs.readFileSync('config.prod.json', 'utf8'));
|
|
120
|
+
const b = JSON.parse(fs.readFileSync('config.staging.json', 'utf8'));
|
|
121
|
+
|
|
122
|
+
function diffObjects(obj1, obj2, path = '') {
|
|
123
|
+
const allKeys = new Set([...Object.keys(obj1 || {}), ...Object.keys(obj2 || {})]);
|
|
124
|
+
for (const key of allKeys) {
|
|
125
|
+
const fullPath = path ? `${path}.${key}` : key;
|
|
126
|
+
if (!(key in (obj1 || {}))) {
|
|
127
|
+
console.log(`+ ${fullPath}: ${JSON.stringify(obj2[key])}`);
|
|
128
|
+
} else if (!(key in (obj2 || {}))) {
|
|
129
|
+
console.log(`- ${fullPath}: ${JSON.stringify(obj1[key])}`);
|
|
130
|
+
} else if (typeof obj1[key] === 'object' && typeof obj2[key] === 'object') {
|
|
131
|
+
diffObjects(obj1[key], obj2[key], fullPath);
|
|
132
|
+
} else if (JSON.stringify(obj1[key]) !== JSON.stringify(obj2[key])) {
|
|
133
|
+
console.log(`~ ${fullPath}: ${JSON.stringify(obj1[key])} -> ${JSON.stringify(obj2[key])}`);
|
|
134
|
+
}
|
|
135
|
+
}
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
console.log('=== Config Diff: prod vs staging ===');
|
|
139
|
+
diffObjects(a, b);
|
|
140
|
+
```
|
|
141
|
+
> summary_prompt: "List all configuration differences between prod and staging environments"
|
|
142
|
+
|
|
143
|
+
---
|
|
144
|
+
|
|
145
|
+
## Package.json / Lock File Analysis
|
|
146
|
+
|
|
147
|
+
### Dependency audit
|
|
148
|
+
|
|
149
|
+
```javascript
|
|
150
|
+
const fs = require('fs');
|
|
151
|
+
const pkg = JSON.parse(fs.readFileSync('package.json', 'utf8'));
|
|
152
|
+
|
|
153
|
+
const deps = Object.entries(pkg.dependencies || {});
|
|
154
|
+
const devDeps = Object.entries(pkg.devDependencies || {});
|
|
155
|
+
|
|
156
|
+
console.log(`Package: ${pkg.name}@${pkg.version}`);
|
|
157
|
+
console.log(`Dependencies: ${deps.length}`);
|
|
158
|
+
console.log(`DevDependencies: ${devDeps.length}`);
|
|
159
|
+
|
|
160
|
+
// Find non-pinned versions
|
|
161
|
+
console.log('\n=== Non-Pinned Dependencies ===');
|
|
162
|
+
[...deps, ...devDeps].forEach(([name, version]) => {
|
|
163
|
+
if (version.startsWith('^') || version.startsWith('~') || version === '*') {
|
|
164
|
+
console.log(` ${name}: ${version}`);
|
|
165
|
+
}
|
|
166
|
+
});
|
|
167
|
+
|
|
168
|
+
// Find duplicated categories
|
|
169
|
+
console.log('\n=== Scripts ===');
|
|
170
|
+
Object.entries(pkg.scripts || {}).forEach(([name, cmd]) => {
|
|
171
|
+
console.log(` ${name}: ${cmd}`);
|
|
172
|
+
});
|
|
173
|
+
|
|
174
|
+
// Workspace detection
|
|
175
|
+
if (pkg.workspaces) {
|
|
176
|
+
console.log('\n=== Monorepo Workspaces ===');
|
|
177
|
+
const ws = Array.isArray(pkg.workspaces) ? pkg.workspaces : pkg.workspaces.packages || [];
|
|
178
|
+
ws.forEach(w => console.log(` ${w}`));
|
|
179
|
+
}
|
|
180
|
+
```
|
|
181
|
+
> summary_prompt: "Report dependency health: unpinned versions, total count, any security concerns from package names"
|
|
182
|
+
|
|
183
|
+
### Lock file drift detection
|
|
184
|
+
|
|
185
|
+
```javascript
|
|
186
|
+
const fs = require('fs');
|
|
187
|
+
const pkg = JSON.parse(fs.readFileSync('package.json', 'utf8'));
|
|
188
|
+
|
|
189
|
+
let lockExists = { npm: false, yarn: false, pnpm: false };
|
|
190
|
+
try { fs.accessSync('package-lock.json'); lockExists.npm = true; } catch {}
|
|
191
|
+
try { fs.accessSync('yarn.lock'); lockExists.yarn = true; } catch {}
|
|
192
|
+
try { fs.accessSync('pnpm-lock.yaml'); lockExists.pnpm = true; } catch {}
|
|
193
|
+
|
|
194
|
+
console.log('=== Lock File Status ===');
|
|
195
|
+
Object.entries(lockExists).forEach(([mgr, exists]) => {
|
|
196
|
+
console.log(` ${mgr}: ${exists ? 'PRESENT' : 'missing'}`);
|
|
197
|
+
});
|
|
198
|
+
|
|
199
|
+
const activeLocks = Object.entries(lockExists).filter(([, v]) => v);
|
|
200
|
+
if (activeLocks.length > 1) {
|
|
201
|
+
console.log('\nWARNING: Multiple lock files detected! This causes inconsistent installs.');
|
|
202
|
+
}
|
|
203
|
+
if (activeLocks.length === 0) {
|
|
204
|
+
console.log('\nWARNING: No lock file found! Dependencies are not reproducible.');
|
|
205
|
+
}
|
|
206
|
+
|
|
207
|
+
// Check engines
|
|
208
|
+
if (pkg.engines) {
|
|
209
|
+
console.log('\n=== Required Engines ===');
|
|
210
|
+
Object.entries(pkg.engines).forEach(([e, v]) => console.log(` ${e}: ${v}`));
|
|
211
|
+
}
|
|
212
|
+
```
|
|
213
|
+
> summary_prompt: "Report lock file health and any warnings about package management"
|
|
214
|
+
|
|
215
|
+
---
|
|
216
|
+
|
|
217
|
+
## File Content Parsing
|
|
218
|
+
|
|
219
|
+
### Parse and summarize a large markdown file
|
|
220
|
+
|
|
221
|
+
```javascript
|
|
222
|
+
const fs = require('fs');
|
|
223
|
+
const content = fs.readFileSync('CHANGELOG.md', 'utf8');
|
|
224
|
+
const lines = content.split('\n');
|
|
225
|
+
|
|
226
|
+
const sections = [];
|
|
227
|
+
let currentSection = null;
|
|
228
|
+
|
|
229
|
+
for (const line of lines) {
|
|
230
|
+
if (line.startsWith('## ')) {
|
|
231
|
+
if (currentSection) sections.push(currentSection);
|
|
232
|
+
currentSection = { title: line.replace('## ', ''), items: 0, breaking: 0 };
|
|
233
|
+
} else if (currentSection && line.startsWith('- ')) {
|
|
234
|
+
currentSection.items++;
|
|
235
|
+
if (line.toLowerCase().includes('breaking') || line.toLowerCase().includes('BREAKING')) {
|
|
236
|
+
currentSection.breaking++;
|
|
237
|
+
}
|
|
238
|
+
}
|
|
239
|
+
}
|
|
240
|
+
if (currentSection) sections.push(currentSection);
|
|
241
|
+
|
|
242
|
+
console.log(`Total versions: ${sections.length}\n`);
|
|
243
|
+
console.log('=== Recent Versions ===');
|
|
244
|
+
sections.slice(0, 10).forEach(s => {
|
|
245
|
+
const warn = s.breaking > 0 ? ` [${s.breaking} BREAKING]` : '';
|
|
246
|
+
console.log(` ${s.title}: ${s.items} changes${warn}`);
|
|
247
|
+
});
|
|
248
|
+
|
|
249
|
+
const totalBreaking = sections.reduce((sum, s) => sum + s.breaking, 0);
|
|
250
|
+
if (totalBreaking > 0) {
|
|
251
|
+
console.log(`\nTotal breaking changes across all versions: ${totalBreaking}`);
|
|
252
|
+
}
|
|
253
|
+
```
|
|
254
|
+
> summary_prompt: "Summarize recent releases, highlight breaking changes, report release cadence"
|
|
255
|
+
|
|
256
|
+
---
|
|
257
|
+
|
|
258
|
+
## Test Output Parsing
|
|
259
|
+
|
|
260
|
+
### Run tests and extract failures
|
|
261
|
+
|
|
262
|
+
```javascript
|
|
263
|
+
const { execSync } = require('child_process');
|
|
264
|
+
|
|
265
|
+
let output;
|
|
266
|
+
try {
|
|
267
|
+
output = execSync('npx jest --json 2>/dev/null', { encoding: 'utf8', maxBuffer: 50 * 1024 * 1024 });
|
|
268
|
+
} catch (e) {
|
|
269
|
+
output = e.stdout || '';
|
|
270
|
+
}
|
|
271
|
+
|
|
272
|
+
try {
|
|
273
|
+
const results = JSON.parse(output);
|
|
274
|
+
console.log(`=== Test Results ===`);
|
|
275
|
+
console.log(`Suites: ${results.numPassedTestSuites} passed, ${results.numFailedTestSuites} failed`);
|
|
276
|
+
console.log(`Tests: ${results.numPassedTests} passed, ${results.numFailedTests} failed`);
|
|
277
|
+
console.log(`Time: ${(results.testResults || []).reduce((s, t) => s + (t.endTime - t.startTime), 0)}ms`);
|
|
278
|
+
|
|
279
|
+
const failures = (results.testResults || []).filter(t => t.status === 'failed');
|
|
280
|
+
if (failures.length > 0) {
|
|
281
|
+
console.log('\n=== Failed Tests ===');
|
|
282
|
+
failures.forEach(suite => {
|
|
283
|
+
console.log(`\nSuite: ${suite.name}`);
|
|
284
|
+
(suite.assertionResults || [])
|
|
285
|
+
.filter(a => a.status === 'failed')
|
|
286
|
+
.forEach(a => {
|
|
287
|
+
console.log(` FAIL: ${a.ancestorTitles.join(' > ')} > ${a.title}`);
|
|
288
|
+
console.log(` ${(a.failureMessages || []).join('\n ').slice(0, 200)}`);
|
|
289
|
+
});
|
|
290
|
+
});
|
|
291
|
+
}
|
|
292
|
+
} catch {
|
|
293
|
+
console.log('Could not parse JSON output. Raw output:');
|
|
294
|
+
console.log(output.slice(0, 5000));
|
|
295
|
+
}
|
|
296
|
+
```
|
|
297
|
+
> summary_prompt: "Report test pass/fail counts, list each failing test with its error message"
|
|
298
|
+
> timeout_ms: 60000
|
|
@@ -0,0 +1,304 @@
|
|
|
1
|
+
# Python Patterns for execute
|
|
2
|
+
|
|
3
|
+
Practical patterns for using `execute` with `language: python`.
|
|
4
|
+
All examples use Python standard library only (no pip installs required).
|
|
5
|
+
|
|
6
|
+
---
|
|
7
|
+
|
|
8
|
+
## Data Processing with json Module
|
|
9
|
+
|
|
10
|
+
### Analyze a large JSON dataset
|
|
11
|
+
|
|
12
|
+
```python
|
|
13
|
+
import json
|
|
14
|
+
|
|
15
|
+
with open('data/users.json') as f:
|
|
16
|
+
users = json.load(f)
|
|
17
|
+
|
|
18
|
+
print(f"Total users: {len(users)}")
|
|
19
|
+
|
|
20
|
+
# Group by status
|
|
21
|
+
from collections import Counter
|
|
22
|
+
statuses = Counter(u.get('status', 'unknown') for u in users)
|
|
23
|
+
print("\n=== Users by Status ===")
|
|
24
|
+
for status, count in statuses.most_common():
|
|
25
|
+
print(f" {status}: {count}")
|
|
26
|
+
|
|
27
|
+
# Find anomalies
|
|
28
|
+
inactive_with_recent = [
|
|
29
|
+
u for u in users
|
|
30
|
+
if u.get('status') == 'inactive' and u.get('last_login', '') > '2025-01-01'
|
|
31
|
+
]
|
|
32
|
+
if inactive_with_recent:
|
|
33
|
+
print(f"\n=== Anomaly: {len(inactive_with_recent)} inactive users with recent logins ===")
|
|
34
|
+
for u in inactive_with_recent[:10]:
|
|
35
|
+
print(f" {u['email']} - last login: {u['last_login']}")
|
|
36
|
+
|
|
37
|
+
# Field completeness
|
|
38
|
+
fields = ['name', 'email', 'phone', 'address']
|
|
39
|
+
print("\n=== Field Completeness ===")
|
|
40
|
+
for field in fields:
|
|
41
|
+
filled = sum(1 for u in users if u.get(field))
|
|
42
|
+
pct = (filled / len(users)) * 100 if users else 0
|
|
43
|
+
print(f" {field}: {filled}/{len(users)} ({pct:.1f}%)")
|
|
44
|
+
```
|
|
45
|
+
> summary_prompt: "Report user distribution, data quality issues, and any anomalies found"
|
|
46
|
+
|
|
47
|
+
### Merge and compare two JSON configs
|
|
48
|
+
|
|
49
|
+
```python
|
|
50
|
+
import json
|
|
51
|
+
|
|
52
|
+
with open('config.default.json') as f:
|
|
53
|
+
defaults = json.load(f)
|
|
54
|
+
with open('config.local.json') as f:
|
|
55
|
+
local = json.load(f)
|
|
56
|
+
|
|
57
|
+
def compare(d1, d2, path=""):
|
|
58
|
+
diffs = []
|
|
59
|
+
all_keys = set(list(d1.keys()) + list(d2.keys()))
|
|
60
|
+
for key in sorted(all_keys):
|
|
61
|
+
full_path = f"{path}.{key}" if path else key
|
|
62
|
+
if key not in d1:
|
|
63
|
+
diffs.append(f" + {full_path} = {json.dumps(d2[key])}")
|
|
64
|
+
elif key not in d2:
|
|
65
|
+
diffs.append(f" - {full_path} = {json.dumps(d1[key])}")
|
|
66
|
+
elif isinstance(d1[key], dict) and isinstance(d2[key], dict):
|
|
67
|
+
diffs.extend(compare(d1[key], d2[key], full_path))
|
|
68
|
+
elif d1[key] != d2[key]:
|
|
69
|
+
diffs.append(f" ~ {full_path}: {json.dumps(d1[key])} -> {json.dumps(d2[key])}")
|
|
70
|
+
return diffs
|
|
71
|
+
|
|
72
|
+
diffs = compare(defaults, local)
|
|
73
|
+
print(f"Config differences: {len(diffs)}")
|
|
74
|
+
if diffs:
|
|
75
|
+
print("\n=== Changes (local overrides) ===")
|
|
76
|
+
for d in diffs:
|
|
77
|
+
print(d)
|
|
78
|
+
else:
|
|
79
|
+
print("No differences found — local matches defaults.")
|
|
80
|
+
```
|
|
81
|
+
> summary_prompt: "List all local config overrides and flag any potentially dangerous changes"
|
|
82
|
+
|
|
83
|
+
---
|
|
84
|
+
|
|
85
|
+
## CSV / Log File Analysis
|
|
86
|
+
|
|
87
|
+
### Analyze a CSV file
|
|
88
|
+
|
|
89
|
+
```python
|
|
90
|
+
import csv
|
|
91
|
+
from collections import Counter, defaultdict
|
|
92
|
+
from datetime import datetime
|
|
93
|
+
|
|
94
|
+
with open('data/transactions.csv') as f:
|
|
95
|
+
reader = csv.DictReader(f)
|
|
96
|
+
rows = list(reader)
|
|
97
|
+
|
|
98
|
+
print(f"Total records: {len(rows)}")
|
|
99
|
+
print(f"Columns: {', '.join(rows[0].keys()) if rows else 'none'}")
|
|
100
|
+
|
|
101
|
+
# Summary statistics for numeric column
|
|
102
|
+
amounts = [float(r['amount']) for r in rows if r.get('amount')]
|
|
103
|
+
if amounts:
|
|
104
|
+
print(f"\n=== Amount Statistics ===")
|
|
105
|
+
print(f" Min: ${min(amounts):,.2f}")
|
|
106
|
+
print(f" Max: ${max(amounts):,.2f}")
|
|
107
|
+
print(f" Mean: ${sum(amounts)/len(amounts):,.2f}")
|
|
108
|
+
print(f" Median: ${sorted(amounts)[len(amounts)//2]:,.2f}")
|
|
109
|
+
print(f" Total: ${sum(amounts):,.2f}")
|
|
110
|
+
|
|
111
|
+
# Group by category
|
|
112
|
+
if 'category' in rows[0]:
|
|
113
|
+
by_cat = defaultdict(list)
|
|
114
|
+
for r in rows:
|
|
115
|
+
by_cat[r['category']].append(float(r.get('amount', 0)))
|
|
116
|
+
print("\n=== By Category ===")
|
|
117
|
+
for cat, vals in sorted(by_cat.items(), key=lambda x: -sum(x[1])):
|
|
118
|
+
print(f" {cat}: {len(vals)} txns, total ${sum(vals):,.2f}")
|
|
119
|
+
```
|
|
120
|
+
> summary_prompt: "Summarize transaction patterns, highlight outliers, report category distribution"
|
|
121
|
+
|
|
122
|
+
### Parse application logs
|
|
123
|
+
|
|
124
|
+
```python
|
|
125
|
+
import re
|
|
126
|
+
from collections import Counter
|
|
127
|
+
from datetime import datetime
|
|
128
|
+
|
|
129
|
+
error_pattern = re.compile(r'\[(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2})\] (\w+): (.+)')
|
|
130
|
+
|
|
131
|
+
levels = Counter()
|
|
132
|
+
errors_by_type = Counter()
|
|
133
|
+
hourly = Counter()
|
|
134
|
+
|
|
135
|
+
with open('app.log') as f:
|
|
136
|
+
for line in f:
|
|
137
|
+
match = error_pattern.match(line.strip())
|
|
138
|
+
if match:
|
|
139
|
+
timestamp, level, message = match.groups()
|
|
140
|
+
levels[level] += 1
|
|
141
|
+
hour = timestamp[:13]
|
|
142
|
+
hourly[hour] += 1
|
|
143
|
+
if level in ('ERROR', 'FATAL'):
|
|
144
|
+
# Extract error class
|
|
145
|
+
err_type = message.split(':')[0].strip()
|
|
146
|
+
errors_by_type[err_type] += 1
|
|
147
|
+
|
|
148
|
+
print("=== Log Level Distribution ===")
|
|
149
|
+
for level, count in levels.most_common():
|
|
150
|
+
print(f" {level}: {count}")
|
|
151
|
+
|
|
152
|
+
print("\n=== Top Error Types ===")
|
|
153
|
+
for err, count in errors_by_type.most_common(10):
|
|
154
|
+
print(f" {err}: {count}")
|
|
155
|
+
|
|
156
|
+
print("\n=== Hourly Activity (last 24h) ===")
|
|
157
|
+
for hour, count in sorted(hourly.items())[-24:]:
|
|
158
|
+
bar = '#' * min(count // 10, 50)
|
|
159
|
+
print(f" {hour}: {count:>5} {bar}")
|
|
160
|
+
```
|
|
161
|
+
> summary_prompt: "Report error rates, identify the most common failures, and note any traffic spikes"
|
|
162
|
+
|
|
163
|
+
---
|
|
164
|
+
|
|
165
|
+
## Text Extraction and Summarization
|
|
166
|
+
|
|
167
|
+
### Extract TODOs and FIXMEs from codebase
|
|
168
|
+
|
|
169
|
+
```python
|
|
170
|
+
import os
|
|
171
|
+
import re
|
|
172
|
+
|
|
173
|
+
pattern = re.compile(r'(TODO|FIXME|HACK|XXX|WARN)[:\s](.+)', re.IGNORECASE)
|
|
174
|
+
results = []
|
|
175
|
+
|
|
176
|
+
for root, dirs, files in os.walk('src'):
|
|
177
|
+
# Skip node_modules and hidden dirs
|
|
178
|
+
dirs[:] = [d for d in dirs if not d.startswith('.') and d != 'node_modules']
|
|
179
|
+
for fname in files:
|
|
180
|
+
if fname.endswith(('.ts', '.tsx', '.js', '.jsx', '.py')):
|
|
181
|
+
filepath = os.path.join(root, fname)
|
|
182
|
+
with open(filepath) as f:
|
|
183
|
+
for i, line in enumerate(f, 1):
|
|
184
|
+
match = pattern.search(line)
|
|
185
|
+
if match:
|
|
186
|
+
results.append({
|
|
187
|
+
'file': filepath,
|
|
188
|
+
'line': i,
|
|
189
|
+
'type': match.group(1).upper(),
|
|
190
|
+
'text': match.group(2).strip()
|
|
191
|
+
})
|
|
192
|
+
|
|
193
|
+
from collections import Counter
|
|
194
|
+
by_type = Counter(r['type'] for r in results)
|
|
195
|
+
|
|
196
|
+
print(f"Total annotations found: {len(results)}\n")
|
|
197
|
+
print("=== By Type ===")
|
|
198
|
+
for t, c in by_type.most_common():
|
|
199
|
+
print(f" {t}: {c}")
|
|
200
|
+
|
|
201
|
+
print("\n=== All Items ===")
|
|
202
|
+
for r in results:
|
|
203
|
+
print(f" [{r['type']}] {r['file']}:{r['line']} — {r['text'][:100]}")
|
|
204
|
+
```
|
|
205
|
+
> summary_prompt: "Categorize TODOs by urgency, group by file area, suggest which to address first"
|
|
206
|
+
|
|
207
|
+
### Summarize a large text/markdown file
|
|
208
|
+
|
|
209
|
+
```python
|
|
210
|
+
with open('ARCHITECTURE.md') as f:
|
|
211
|
+
content = f.read()
|
|
212
|
+
|
|
213
|
+
lines = content.split('\n')
|
|
214
|
+
print(f"Total lines: {len(lines)}")
|
|
215
|
+
print(f"Total words: {len(content.split())}")
|
|
216
|
+
|
|
217
|
+
# Extract structure
|
|
218
|
+
headings = [(i+1, line) for i, line in enumerate(lines) if line.startswith('#')]
|
|
219
|
+
print(f"Sections: {len(headings)}\n")
|
|
220
|
+
|
|
221
|
+
print("=== Document Structure ===")
|
|
222
|
+
for line_num, heading in headings:
|
|
223
|
+
level = len(heading) - len(heading.lstrip('#'))
|
|
224
|
+
indent = ' ' * (level - 1)
|
|
225
|
+
print(f" {indent}{heading.strip()} (line {line_num})")
|
|
226
|
+
|
|
227
|
+
# Extract code blocks
|
|
228
|
+
import re
|
|
229
|
+
code_blocks = re.findall(r'```(\w+)?', content)
|
|
230
|
+
if code_blocks:
|
|
231
|
+
from collections import Counter
|
|
232
|
+
langs = Counter(b for b in code_blocks if b)
|
|
233
|
+
print(f"\n=== Code Blocks: {len(code_blocks)} total ===")
|
|
234
|
+
for lang, count in langs.most_common():
|
|
235
|
+
print(f" {lang}: {count}")
|
|
236
|
+
|
|
237
|
+
# Print first 50 lines for content preview
|
|
238
|
+
print("\n=== Content Preview (first 50 lines) ===")
|
|
239
|
+
for line in lines[:50]:
|
|
240
|
+
print(line)
|
|
241
|
+
```
|
|
242
|
+
> summary_prompt: "Summarize the document structure, key architectural decisions, and main components described"
|
|
243
|
+
|
|
244
|
+
---
|
|
245
|
+
|
|
246
|
+
## File Comparison
|
|
247
|
+
|
|
248
|
+
### Compare two source files
|
|
249
|
+
|
|
250
|
+
```python
|
|
251
|
+
import difflib
|
|
252
|
+
|
|
253
|
+
with open('src/auth/login.ts') as f:
|
|
254
|
+
old_lines = f.readlines()
|
|
255
|
+
with open('src/auth/login.new.ts') as f:
|
|
256
|
+
new_lines = f.readlines()
|
|
257
|
+
|
|
258
|
+
diff = list(difflib.unified_diff(old_lines, new_lines, fromfile='login.ts', tofile='login.new.ts', lineterm=''))
|
|
259
|
+
|
|
260
|
+
additions = sum(1 for l in diff if l.startswith('+') and not l.startswith('+++'))
|
|
261
|
+
deletions = sum(1 for l in diff if l.startswith('-') and not l.startswith('---'))
|
|
262
|
+
|
|
263
|
+
print(f"Changes: +{additions} -{deletions}\n")
|
|
264
|
+
|
|
265
|
+
if diff:
|
|
266
|
+
print("=== Diff ===")
|
|
267
|
+
for line in diff:
|
|
268
|
+
print(line)
|
|
269
|
+
else:
|
|
270
|
+
print("Files are identical.")
|
|
271
|
+
```
|
|
272
|
+
> summary_prompt: "Describe the functional changes between the old and new versions"
|
|
273
|
+
|
|
274
|
+
### Find duplicate content across files
|
|
275
|
+
|
|
276
|
+
```python
|
|
277
|
+
import os
|
|
278
|
+
import hashlib
|
|
279
|
+
from collections import defaultdict
|
|
280
|
+
|
|
281
|
+
file_hashes = defaultdict(list)
|
|
282
|
+
|
|
283
|
+
for root, dirs, files in os.walk('src'):
|
|
284
|
+
dirs[:] = [d for d in dirs if not d.startswith('.') and d != 'node_modules']
|
|
285
|
+
for fname in files:
|
|
286
|
+
if fname.endswith(('.ts', '.tsx', '.js', '.jsx')):
|
|
287
|
+
filepath = os.path.join(root, fname)
|
|
288
|
+
with open(filepath, 'rb') as f:
|
|
289
|
+
content_hash = hashlib.md5(f.read()).hexdigest()
|
|
290
|
+
file_hashes[content_hash].append(filepath)
|
|
291
|
+
|
|
292
|
+
duplicates = {h: files for h, files in file_hashes.items() if len(files) > 1}
|
|
293
|
+
|
|
294
|
+
if duplicates:
|
|
295
|
+
print(f"Found {len(duplicates)} sets of duplicate files:\n")
|
|
296
|
+
for h, files in duplicates.items():
|
|
297
|
+
print(f" Hash: {h[:8]}...")
|
|
298
|
+
for f in files:
|
|
299
|
+
print(f" {f}")
|
|
300
|
+
print()
|
|
301
|
+
else:
|
|
302
|
+
print("No duplicate files found.")
|
|
303
|
+
```
|
|
304
|
+
> summary_prompt: "List all duplicate files and suggest which copies to remove"
|