@haystackeditor/cli 0.7.2 → 0.8.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +59 -12
- package/dist/assets/hooks/agent-context/detect.ts +136 -0
- package/dist/assets/hooks/agent-context/format.ts +99 -0
- package/dist/assets/hooks/agent-context/index.ts +39 -0
- package/dist/assets/hooks/agent-context/parsers/claude.ts +253 -0
- package/dist/assets/hooks/agent-context/parsers/gemini.ts +155 -0
- package/dist/assets/hooks/agent-context/parsers/opencode.ts +174 -0
- package/dist/assets/hooks/agent-context/tsconfig.json +13 -0
- package/dist/assets/hooks/agent-context/types.ts +58 -0
- package/dist/assets/hooks/llm-rules-template.md +35 -0
- package/dist/assets/hooks/package.json +11 -0
- package/dist/assets/hooks/scripts/commit-msg.sh +4 -0
- package/dist/assets/hooks/scripts/post-commit.sh +4 -0
- package/dist/assets/hooks/scripts/pre-commit.sh +92 -0
- package/dist/assets/hooks/scripts/pre-push.sh +5 -0
- package/dist/assets/hooks/scripts/prepare-commit-msg.sh +3 -0
- package/dist/assets/hooks/truncation-checker/ast-analyzer.ts +528 -0
- package/dist/assets/hooks/truncation-checker/index.ts +595 -0
- package/dist/assets/hooks/truncation-checker/tsconfig.json +13 -0
- package/dist/commands/config.d.ts +14 -0
- package/dist/commands/config.js +89 -0
- package/dist/commands/hooks.d.ts +17 -0
- package/dist/commands/hooks.js +269 -0
- package/dist/commands/init.d.ts +1 -1
- package/dist/commands/init.js +20 -239
- package/dist/commands/secrets.d.ts +15 -0
- package/dist/commands/secrets.js +83 -0
- package/dist/commands/skills.d.ts +8 -0
- package/dist/commands/skills.js +215 -0
- package/dist/index.js +107 -7
- package/dist/types.d.ts +32 -8
- package/dist/utils/hooks.d.ts +26 -0
- package/dist/utils/hooks.js +226 -0
- package/dist/utils/skill.d.ts +1 -1
- package/dist/utils/skill.js +481 -13
- package/package.json +2 -2
|
@@ -0,0 +1,155 @@
|
|
|
1
|
+
import * as fs from 'fs';
|
|
2
|
+
import type {
|
|
3
|
+
AgentContext,
|
|
4
|
+
AgentParser,
|
|
5
|
+
DetectionResult,
|
|
6
|
+
InteractionTurn,
|
|
7
|
+
ModifiedFile,
|
|
8
|
+
TokenUsage,
|
|
9
|
+
ToolUseInfo,
|
|
10
|
+
} from '../types.js';
|
|
11
|
+
import { detectAgent } from '../detect.js';
|
|
12
|
+
|
|
13
|
+
interface GeminiToolCall {
|
|
14
|
+
id?: string;
|
|
15
|
+
name?: string;
|
|
16
|
+
args?: Record<string, unknown>;
|
|
17
|
+
status?: string;
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
interface GeminiTokens {
|
|
21
|
+
input?: number;
|
|
22
|
+
output?: number;
|
|
23
|
+
cached?: number;
|
|
24
|
+
thought?: number;
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
interface GeminiMessage {
|
|
28
|
+
id?: string;
|
|
29
|
+
type: 'user' | 'gemini';
|
|
30
|
+
content: string | Array<{ text: string }>;
|
|
31
|
+
toolCalls?: GeminiToolCall[];
|
|
32
|
+
tokens?: GeminiTokens;
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
interface GeminiSession {
|
|
36
|
+
messages: GeminiMessage[];
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
const FILE_PATH_KEYS = ['file_path', 'path', 'filename'];
|
|
40
|
+
|
|
41
|
+
function extractContent(content: string | Array<{ text: string }>): string {
|
|
42
|
+
if (typeof content === 'string') return content;
|
|
43
|
+
if (Array.isArray(content)) {
|
|
44
|
+
return content.map((c) => c.text).join('\n');
|
|
45
|
+
}
|
|
46
|
+
return '';
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
function extractModifiedFiles(toolCalls?: GeminiToolCall[]): ModifiedFile[] {
|
|
50
|
+
if (!toolCalls) return [];
|
|
51
|
+
const files: ModifiedFile[] = [];
|
|
52
|
+
|
|
53
|
+
for (const call of toolCalls) {
|
|
54
|
+
if (!call.args) continue;
|
|
55
|
+
for (const key of FILE_PATH_KEYS) {
|
|
56
|
+
const val = call.args[key];
|
|
57
|
+
if (typeof val === 'string') {
|
|
58
|
+
files.push({
|
|
59
|
+
filePath: val,
|
|
60
|
+
toolName: call.name || 'unknown',
|
|
61
|
+
toolUseId: call.id,
|
|
62
|
+
});
|
|
63
|
+
break;
|
|
64
|
+
}
|
|
65
|
+
}
|
|
66
|
+
}
|
|
67
|
+
return files;
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
function extractToolUses(toolCalls?: GeminiToolCall[]): ToolUseInfo[] {
|
|
71
|
+
if (!toolCalls) return [];
|
|
72
|
+
return toolCalls.map((call) => ({
|
|
73
|
+
toolName: call.name || 'unknown',
|
|
74
|
+
toolUseId: call.id || '',
|
|
75
|
+
input: (call.args as Record<string, unknown>) || {},
|
|
76
|
+
}));
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
export class GeminiParser implements AgentParser {
|
|
80
|
+
async detect(repoPath: string): Promise<DetectionResult> {
|
|
81
|
+
return detectAgent(repoPath);
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
async parse(
|
|
85
|
+
sessionFilePath: string,
|
|
86
|
+
_repoPath: string,
|
|
87
|
+
): Promise<AgentContext> {
|
|
88
|
+
const raw = fs.readFileSync(sessionFilePath, 'utf-8');
|
|
89
|
+
const session: GeminiSession = JSON.parse(raw);
|
|
90
|
+
const messages = session.messages || [];
|
|
91
|
+
|
|
92
|
+
const transcript: InteractionTurn[] = [];
|
|
93
|
+
const totalUsage: TokenUsage = {
|
|
94
|
+
inputTokens: 0,
|
|
95
|
+
outputTokens: 0,
|
|
96
|
+
};
|
|
97
|
+
|
|
98
|
+
let taskPrompt = '';
|
|
99
|
+
|
|
100
|
+
for (let i = 0; i < messages.length; i++) {
|
|
101
|
+
const msg = messages[i];
|
|
102
|
+
const role = msg.type === 'user' ? 'user' : 'assistant';
|
|
103
|
+
const content = extractContent(msg.content);
|
|
104
|
+
|
|
105
|
+
if (role === 'user' && !taskPrompt && content) {
|
|
106
|
+
taskPrompt = content;
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
if (msg.tokens) {
|
|
110
|
+
totalUsage.inputTokens += msg.tokens.input || 0;
|
|
111
|
+
totalUsage.outputTokens += msg.tokens.output || 0;
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
transcript.push({
|
|
115
|
+
turnIndex: i,
|
|
116
|
+
role,
|
|
117
|
+
content,
|
|
118
|
+
modifiedFiles: role === 'assistant' ? extractModifiedFiles(msg.toolCalls) : [],
|
|
119
|
+
tokenUsage: msg.tokens
|
|
120
|
+
? {
|
|
121
|
+
inputTokens: msg.tokens.input || 0,
|
|
122
|
+
outputTokens: msg.tokens.output || 0,
|
|
123
|
+
}
|
|
124
|
+
: undefined,
|
|
125
|
+
toolUses: role === 'assistant' ? extractToolUses(msg.toolCalls) : [],
|
|
126
|
+
});
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
const allModifiedFiles = new Set<string>();
|
|
130
|
+
for (const turn of transcript) {
|
|
131
|
+
for (const f of turn.modifiedFiles) {
|
|
132
|
+
allModifiedFiles.add(f.filePath);
|
|
133
|
+
}
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
const stat = fs.statSync(sessionFilePath);
|
|
137
|
+
const sessionId = sessionFilePath
|
|
138
|
+
.split('/')
|
|
139
|
+
.pop()
|
|
140
|
+
?.replace(/\.json$/, '') || '';
|
|
141
|
+
|
|
142
|
+
return {
|
|
143
|
+
agent: 'gemini-cli',
|
|
144
|
+
sessionId,
|
|
145
|
+
sessionFilePath,
|
|
146
|
+
taskPrompt,
|
|
147
|
+
modifiedFiles: Array.from(allModifiedFiles),
|
|
148
|
+
tokenUsage: totalUsage,
|
|
149
|
+
transcript,
|
|
150
|
+
metadata: {
|
|
151
|
+
sessionFileModifiedAt: stat.mtime.toISOString(),
|
|
152
|
+
},
|
|
153
|
+
};
|
|
154
|
+
}
|
|
155
|
+
}
|
|
@@ -0,0 +1,174 @@
|
|
|
1
|
+
import * as fs from 'fs';
|
|
2
|
+
import type {
|
|
3
|
+
AgentContext,
|
|
4
|
+
AgentParser,
|
|
5
|
+
DetectionResult,
|
|
6
|
+
InteractionTurn,
|
|
7
|
+
ModifiedFile,
|
|
8
|
+
TokenUsage,
|
|
9
|
+
ToolUseInfo,
|
|
10
|
+
} from '../types.js';
|
|
11
|
+
import { detectAgent } from '../detect.js';
|
|
12
|
+
|
|
13
|
+
const FILE_MOD_TOOLS = new Set(['edit', 'write', 'patch']);
|
|
14
|
+
|
|
15
|
+
interface OpenCodeTokens {
|
|
16
|
+
input?: number;
|
|
17
|
+
output?: number;
|
|
18
|
+
cache?: {
|
|
19
|
+
read?: number;
|
|
20
|
+
write?: number;
|
|
21
|
+
};
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
interface OpenCodePart {
|
|
25
|
+
type: 'text' | 'tool';
|
|
26
|
+
text?: string;
|
|
27
|
+
tool?: string;
|
|
28
|
+
state?: {
|
|
29
|
+
input?: Record<string, unknown>;
|
|
30
|
+
};
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
interface OpenCodeMessage {
|
|
34
|
+
info: {
|
|
35
|
+
id?: string;
|
|
36
|
+
role: 'user' | 'assistant';
|
|
37
|
+
createdAt?: string;
|
|
38
|
+
tokens?: OpenCodeTokens;
|
|
39
|
+
};
|
|
40
|
+
parts: OpenCodePart[];
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
interface OpenCodeSession {
|
|
44
|
+
info: {
|
|
45
|
+
id: string;
|
|
46
|
+
title?: string;
|
|
47
|
+
createdAt?: string;
|
|
48
|
+
updatedAt?: string;
|
|
49
|
+
};
|
|
50
|
+
messages: OpenCodeMessage[];
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
function extractTextContent(parts: OpenCodePart[]): string {
|
|
54
|
+
return parts
|
|
55
|
+
.filter((p) => p.type === 'text' && p.text)
|
|
56
|
+
.map((p) => p.text!)
|
|
57
|
+
.join('\n');
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
function extractModifiedFiles(parts: OpenCodePart[]): ModifiedFile[] {
|
|
61
|
+
const files: ModifiedFile[] = [];
|
|
62
|
+
for (const part of parts) {
|
|
63
|
+
if (part.type !== 'tool' || !part.tool) continue;
|
|
64
|
+
if (!FILE_MOD_TOOLS.has(part.tool)) continue;
|
|
65
|
+
|
|
66
|
+
const input = part.state?.input;
|
|
67
|
+
if (!input) continue;
|
|
68
|
+
|
|
69
|
+
const filePath = (input.filePath as string) || (input.path as string);
|
|
70
|
+
if (filePath) {
|
|
71
|
+
files.push({
|
|
72
|
+
filePath,
|
|
73
|
+
toolName: part.tool,
|
|
74
|
+
});
|
|
75
|
+
}
|
|
76
|
+
}
|
|
77
|
+
return files;
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
function extractToolUses(parts: OpenCodePart[]): ToolUseInfo[] {
|
|
81
|
+
const tools: ToolUseInfo[] = [];
|
|
82
|
+
for (const part of parts) {
|
|
83
|
+
if (part.type !== 'tool' || !part.tool) continue;
|
|
84
|
+
tools.push({
|
|
85
|
+
toolName: part.tool,
|
|
86
|
+
toolUseId: '',
|
|
87
|
+
input: (part.state?.input as Record<string, unknown>) || {},
|
|
88
|
+
});
|
|
89
|
+
}
|
|
90
|
+
return tools;
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
export class OpenCodeParser implements AgentParser {
|
|
94
|
+
async detect(repoPath: string): Promise<DetectionResult> {
|
|
95
|
+
return detectAgent(repoPath);
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
async parse(
|
|
99
|
+
sessionFilePath: string,
|
|
100
|
+
_repoPath: string,
|
|
101
|
+
): Promise<AgentContext> {
|
|
102
|
+
const raw = fs.readFileSync(sessionFilePath, 'utf-8');
|
|
103
|
+
const session: OpenCodeSession = JSON.parse(raw);
|
|
104
|
+
const messages = session.messages || [];
|
|
105
|
+
|
|
106
|
+
const transcript: InteractionTurn[] = [];
|
|
107
|
+
const totalUsage: TokenUsage = {
|
|
108
|
+
inputTokens: 0,
|
|
109
|
+
outputTokens: 0,
|
|
110
|
+
cacheCreationTokens: 0,
|
|
111
|
+
cacheReadTokens: 0,
|
|
112
|
+
};
|
|
113
|
+
|
|
114
|
+
let taskPrompt = '';
|
|
115
|
+
|
|
116
|
+
for (let i = 0; i < messages.length; i++) {
|
|
117
|
+
const msg = messages[i];
|
|
118
|
+
const role = msg.info.role;
|
|
119
|
+
const content = extractTextContent(msg.parts);
|
|
120
|
+
|
|
121
|
+
if (role === 'user' && !taskPrompt && content) {
|
|
122
|
+
taskPrompt = content;
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
const tokens = msg.info.tokens;
|
|
126
|
+
if (tokens) {
|
|
127
|
+
totalUsage.inputTokens += tokens.input || 0;
|
|
128
|
+
totalUsage.outputTokens += tokens.output || 0;
|
|
129
|
+
totalUsage.cacheReadTokens! += tokens.cache?.read || 0;
|
|
130
|
+
totalUsage.cacheCreationTokens! += tokens.cache?.write || 0;
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
transcript.push({
|
|
134
|
+
turnIndex: i,
|
|
135
|
+
role,
|
|
136
|
+
content,
|
|
137
|
+
modifiedFiles:
|
|
138
|
+
role === 'assistant' ? extractModifiedFiles(msg.parts) : [],
|
|
139
|
+
tokenUsage: tokens
|
|
140
|
+
? {
|
|
141
|
+
inputTokens: tokens.input || 0,
|
|
142
|
+
outputTokens: tokens.output || 0,
|
|
143
|
+
cacheReadTokens: tokens.cache?.read,
|
|
144
|
+
cacheCreationTokens: tokens.cache?.write,
|
|
145
|
+
}
|
|
146
|
+
: undefined,
|
|
147
|
+
toolUses: role === 'assistant' ? extractToolUses(msg.parts) : [],
|
|
148
|
+
});
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
const allModifiedFiles = new Set<string>();
|
|
152
|
+
for (const turn of transcript) {
|
|
153
|
+
for (const f of turn.modifiedFiles) {
|
|
154
|
+
allModifiedFiles.add(f.filePath);
|
|
155
|
+
}
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
const stat = fs.statSync(sessionFilePath);
|
|
159
|
+
|
|
160
|
+
return {
|
|
161
|
+
agent: 'opencode',
|
|
162
|
+
sessionId: session.info.id,
|
|
163
|
+
sessionFilePath,
|
|
164
|
+
taskPrompt,
|
|
165
|
+
modifiedFiles: Array.from(allModifiedFiles),
|
|
166
|
+
tokenUsage: totalUsage,
|
|
167
|
+
transcript,
|
|
168
|
+
metadata: {
|
|
169
|
+
startedAt: session.info.createdAt,
|
|
170
|
+
sessionFileModifiedAt: stat.mtime.toISOString(),
|
|
171
|
+
},
|
|
172
|
+
};
|
|
173
|
+
}
|
|
174
|
+
}
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
{
|
|
2
|
+
"compilerOptions": {
|
|
3
|
+
"target": "ES2022",
|
|
4
|
+
"module": "Node16",
|
|
5
|
+
"moduleResolution": "Node16",
|
|
6
|
+
"strict": true,
|
|
7
|
+
"esModuleInterop": true,
|
|
8
|
+
"resolveJsonModule": true,
|
|
9
|
+
"noEmit": true,
|
|
10
|
+
"skipLibCheck": true
|
|
11
|
+
},
|
|
12
|
+
"include": ["./**/*.ts"]
|
|
13
|
+
}
|
|
@@ -0,0 +1,58 @@
|
|
|
1
|
+
export type AgentType = 'claude-code' | 'gemini-cli' | 'opencode';
|
|
2
|
+
|
|
3
|
+
export interface ModifiedFile {
|
|
4
|
+
filePath: string;
|
|
5
|
+
toolName: string;
|
|
6
|
+
toolUseId?: string;
|
|
7
|
+
}
|
|
8
|
+
|
|
9
|
+
export interface TokenUsage {
|
|
10
|
+
inputTokens: number;
|
|
11
|
+
outputTokens: number;
|
|
12
|
+
cacheCreationTokens?: number;
|
|
13
|
+
cacheReadTokens?: number;
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
export interface ToolUseInfo {
|
|
17
|
+
toolName: string;
|
|
18
|
+
toolUseId: string;
|
|
19
|
+
input: Record<string, unknown>;
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
export interface InteractionTurn {
|
|
23
|
+
turnIndex: number;
|
|
24
|
+
role: 'user' | 'assistant';
|
|
25
|
+
content: string;
|
|
26
|
+
modifiedFiles: ModifiedFile[];
|
|
27
|
+
tokenUsage?: TokenUsage;
|
|
28
|
+
toolUses: ToolUseInfo[];
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
export interface AgentContext {
|
|
32
|
+
agent: AgentType;
|
|
33
|
+
sessionId: string;
|
|
34
|
+
sessionFilePath: string;
|
|
35
|
+
taskPrompt: string;
|
|
36
|
+
modifiedFiles: string[];
|
|
37
|
+
tokenUsage: TokenUsage;
|
|
38
|
+
transcript: InteractionTurn[];
|
|
39
|
+
metadata: {
|
|
40
|
+
model?: string;
|
|
41
|
+
version?: string;
|
|
42
|
+
gitBranch?: string;
|
|
43
|
+
startedAt?: string;
|
|
44
|
+
sessionFileModifiedAt: string;
|
|
45
|
+
};
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
export interface DetectionResult {
|
|
49
|
+
detected: boolean;
|
|
50
|
+
agent?: AgentType;
|
|
51
|
+
sessionFilePath?: string;
|
|
52
|
+
sessionId?: string;
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
export interface AgentParser {
|
|
56
|
+
detect(repoPath: string): Promise<DetectionResult>;
|
|
57
|
+
parse(sessionFilePath: string, repoPath: string): Promise<AgentContext>;
|
|
58
|
+
}
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
# LLM Rules
|
|
2
|
+
|
|
3
|
+
These rules govern how LLM agents should interact with this codebase. They are enforced by a pre-commit hook.
|
|
4
|
+
|
|
5
|
+
- When you write or edit LLM prompts in response to a failure running against some test data, be certain that your edit is not specific to the test data, and is actually addressing the more generic problem that caused the failure.
|
|
6
|
+
- If the user specifies a model, follow it. Your understanding of current LLM models is outdated.
|
|
7
|
+
- Do not add silent fallbacks.
|
|
8
|
+
- Do not add backwards compatibility, unless you confirmed with the user first that it's desired.
|
|
9
|
+
- Do not add TODOs and incomplete code unless they have been explicitly flagged to the user.
|
|
10
|
+
|
|
11
|
+
## No Truncation Without Permission
|
|
12
|
+
|
|
13
|
+
When writing or modifying agent code (tool calls, prompts, LLM pipelines):
|
|
14
|
+
|
|
15
|
+
- **Never truncate, slice, or omit content** from tool call inputs, tool call outputs, prompt context, search results, file contents, or any data passed to/from LLMs without explicit user permission.
|
|
16
|
+
- **Never summarize in place of full content** (e.g., "... [content omitted]", "showing first 50 lines", "[truncated for brevity]").
|
|
17
|
+
- **No silent length limits** - do not add code that silently cuts off text at N characters/lines/tokens.
|
|
18
|
+
|
|
19
|
+
If truncation is technically necessary (e.g., context limits):
|
|
20
|
+
1. **Disclose explicitly** that truncation will occur
|
|
21
|
+
2. **State what and how much** will be omitted
|
|
22
|
+
3. **Ask for user permission** before proceeding
|
|
23
|
+
4. **Offer alternatives** (e.g., chunking, pagination with user control)
|
|
24
|
+
|
|
25
|
+
This applies to: grep/search results, file reads, API responses, prompt templates, tool definitions, agent memory, conversation history, and any other text flowing through agent pipelines.
|
|
26
|
+
|
|
27
|
+
## No Hardcoded Repo Knowledge in Prompts
|
|
28
|
+
|
|
29
|
+
When writing LLM prompts for generic tools:
|
|
30
|
+
|
|
31
|
+
- **Never hardcode repo-specific examples** like component names, file paths, or internal terminology.
|
|
32
|
+
- **Use generic examples** that would work for any codebase (e.g., "Sidebar", "Card", "Modal").
|
|
33
|
+
- **Let the LLM infer from context** rather than baking in assumptions about your specific codebase.
|
|
34
|
+
|
|
35
|
+
This keeps tools portable and prevents prompts from becoming stale when the codebase changes.
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "haystack-hooks",
|
|
3
|
+
"version": "1.0.0",
|
|
4
|
+
"private": true,
|
|
5
|
+
"type": "module",
|
|
6
|
+
"description": "Git hooks for AI agent quality checks (installed by @haystackeditor/cli)",
|
|
7
|
+
"dependencies": {
|
|
8
|
+
"tree-sitter": "^0.22.4",
|
|
9
|
+
"tree-sitter-typescript": "^0.23.2"
|
|
10
|
+
}
|
|
11
|
+
}
|
|
@@ -0,0 +1,92 @@
|
|
|
1
|
+
#!/usr/bin/env bash
|
|
2
|
+
HOOKS_DIR="$(cd "$(dirname "$0")" && pwd)"
|
|
3
|
+
REPO_ROOT="$(cd "$HOOKS_DIR/.." && pwd)"
|
|
4
|
+
|
|
5
|
+
# Extract agent context if running under an AI agent
|
|
6
|
+
# Capture stderr to detect whether an AI agent is present
|
|
7
|
+
AGENT_OUTPUT=$(npx tsx "$HOOKS_DIR/agent-context/index.ts" "$REPO_ROOT" 2>&1) || true
|
|
8
|
+
|
|
9
|
+
# Print the agent context to stderr so it still appears in the commit output
|
|
10
|
+
if [ -n "$AGENT_OUTPUT" ]; then
|
|
11
|
+
echo "$AGENT_OUTPUT" >&2
|
|
12
|
+
fi
|
|
13
|
+
|
|
14
|
+
# If no agent detected, this is a human commit — pass through
|
|
15
|
+
if ! echo "$AGENT_OUTPUT" | grep -q "AGENT CONTEXT DETECTED"; then
|
|
16
|
+
exit 0
|
|
17
|
+
fi
|
|
18
|
+
|
|
19
|
+
# --- AI agent detected: run automated checks ---
|
|
20
|
+
|
|
21
|
+
# Check for truncation violations in staged changes
|
|
22
|
+
TRUNCATION_OUTPUT=$(npx tsx "$HOOKS_DIR/truncation-checker/index.ts" 2>&1)
|
|
23
|
+
TRUNCATION_EXIT=$?
|
|
24
|
+
|
|
25
|
+
if [ -n "$TRUNCATION_OUTPUT" ]; then
|
|
26
|
+
echo "$TRUNCATION_OUTPUT" >&2
|
|
27
|
+
fi
|
|
28
|
+
|
|
29
|
+
if [ $TRUNCATION_EXIT -ne 0 ]; then
|
|
30
|
+
echo "" >&2
|
|
31
|
+
echo "========================================" >&2
|
|
32
|
+
echo " COMMIT BLOCKED: TRUNCATION VIOLATIONS" >&2
|
|
33
|
+
echo "========================================" >&2
|
|
34
|
+
echo "" >&2
|
|
35
|
+
echo "Your staged changes contain truncation patterns that violate LLM_RULES.md." >&2
|
|
36
|
+
echo "Review the violations above and either:" >&2
|
|
37
|
+
echo " 1. Remove the truncation code" >&2
|
|
38
|
+
echo " 2. Add a comment with 'intentional' explaining why it's needed" >&2
|
|
39
|
+
echo " 3. Get explicit user permission for the truncation" >&2
|
|
40
|
+
echo "" >&2
|
|
41
|
+
exit 1
|
|
42
|
+
fi
|
|
43
|
+
|
|
44
|
+
# --- Enforce LLM rules review ---
|
|
45
|
+
|
|
46
|
+
GIT_DIR=$(git rev-parse --git-dir)
|
|
47
|
+
BYPASS_TOKEN_FILE="$GIT_DIR/llm-rules-reviewed"
|
|
48
|
+
RULES_FILE="$REPO_ROOT/LLM_RULES.md"
|
|
49
|
+
|
|
50
|
+
# Get the SHA of the current staged tree
|
|
51
|
+
STAGED_TREE_SHA=$(git write-tree)
|
|
52
|
+
|
|
53
|
+
# Check for a valid bypass token (written on a previous failed attempt)
|
|
54
|
+
if [ -f "$BYPASS_TOKEN_FILE" ]; then
|
|
55
|
+
STORED_SHA=$(cat "$BYPASS_TOKEN_FILE")
|
|
56
|
+
if [ "$STORED_SHA" = "$STAGED_TREE_SHA" ]; then
|
|
57
|
+
# Agent has already been shown the rules for this exact set of changes
|
|
58
|
+
rm -f "$BYPASS_TOKEN_FILE"
|
|
59
|
+
exit 0
|
|
60
|
+
fi
|
|
61
|
+
fi
|
|
62
|
+
|
|
63
|
+
# First attempt — block the commit and inject the rules into the agent's context
|
|
64
|
+
|
|
65
|
+
echo "" >&2
|
|
66
|
+
echo "========================================" >&2
|
|
67
|
+
echo " LLM RULES — REVIEW BEFORE COMMITTING" >&2
|
|
68
|
+
echo "========================================" >&2
|
|
69
|
+
echo "" >&2
|
|
70
|
+
|
|
71
|
+
if [ -f "$RULES_FILE" ]; then
|
|
72
|
+
cat "$RULES_FILE" >&2
|
|
73
|
+
else
|
|
74
|
+
echo "(LLM_RULES.md not found at $RULES_FILE)" >&2
|
|
75
|
+
fi
|
|
76
|
+
|
|
77
|
+
echo "" >&2
|
|
78
|
+
echo "========================================" >&2
|
|
79
|
+
echo " STAGED CHANGES" >&2
|
|
80
|
+
echo "========================================" >&2
|
|
81
|
+
echo "" >&2
|
|
82
|
+
git diff --cached --stat >&2
|
|
83
|
+
|
|
84
|
+
echo "" >&2
|
|
85
|
+
echo "========================================" >&2
|
|
86
|
+
echo "PRE-COMMIT CHECK: You must confirm your changes comply with the above rules. The rules have been re-injected into your context. Review your staged changes against each rule, then retry the commit. A bypass token has been written." >&2
|
|
87
|
+
echo "========================================" >&2
|
|
88
|
+
|
|
89
|
+
# Write the bypass token so the next attempt with the same staged tree passes
|
|
90
|
+
echo "$STAGED_TREE_SHA" > "$BYPASS_TOKEN_FILE"
|
|
91
|
+
|
|
92
|
+
exit 1
|