@haystackeditor/cli 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +202 -0
- package/dist/commands/init.d.ts +10 -0
- package/dist/commands/init.js +299 -0
- package/dist/commands/status.d.ts +4 -0
- package/dist/commands/status.js +42 -0
- package/dist/index.d.ts +12 -0
- package/dist/index.js +41 -0
- package/dist/types.d.ts +177 -0
- package/dist/types.js +6 -0
- package/dist/utils/config.d.ts +24 -0
- package/dist/utils/config.js +74 -0
- package/dist/utils/detect.d.ts +10 -0
- package/dist/utils/detect.js +306 -0
- package/dist/utils/secrets.d.ts +47 -0
- package/dist/utils/secrets.js +241 -0
- package/dist/utils/skill.d.ts +4 -0
- package/dist/utils/skill.js +249 -0
- package/package.json +51 -0
|
@@ -0,0 +1,241 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Secret Scanning & Validation
|
|
3
|
+
*
|
|
4
|
+
* Detects potential hardcoded secrets in configuration files
|
|
5
|
+
* before they can be committed to version control.
|
|
6
|
+
*
|
|
7
|
+
* Based on patterns from:
|
|
8
|
+
* - GitHub secret scanning
|
|
9
|
+
* - detect-secrets
|
|
10
|
+
* - truffleHog
|
|
11
|
+
*/
|
|
12
|
+
import * as fs from 'fs/promises';
|
|
13
|
+
import * as path from 'path';
|
|
14
|
+
/**
|
|
15
|
+
* Common secret patterns
|
|
16
|
+
*/
|
|
17
|
+
const SECRET_PATTERNS = [
|
|
18
|
+
// API Keys
|
|
19
|
+
{
|
|
20
|
+
name: 'generic-api-key',
|
|
21
|
+
pattern: /(?:api[_-]?key|apikey)\s*[:=]\s*['"]?([a-zA-Z0-9_\-]{20,})['"]?/gi,
|
|
22
|
+
description: 'Potential API key detected',
|
|
23
|
+
severity: 'high',
|
|
24
|
+
},
|
|
25
|
+
{
|
|
26
|
+
name: 'generic-secret',
|
|
27
|
+
pattern: /(?:secret|password|passwd|pwd)\s*[:=]\s*['"]?([a-zA-Z0-9_\-!@#$%^&*]{8,})['"]?/gi,
|
|
28
|
+
description: 'Potential secret or password detected',
|
|
29
|
+
severity: 'high',
|
|
30
|
+
},
|
|
31
|
+
// Cloud Providers
|
|
32
|
+
{
|
|
33
|
+
name: 'aws-access-key',
|
|
34
|
+
pattern: /AKIA[0-9A-Z]{16}/g,
|
|
35
|
+
description: 'AWS Access Key ID detected',
|
|
36
|
+
severity: 'high',
|
|
37
|
+
},
|
|
38
|
+
{
|
|
39
|
+
name: 'aws-secret-key',
|
|
40
|
+
pattern: /(?:aws)?[_-]?secret[_-]?(?:access)?[_-]?key\s*[:=]\s*['"]?([a-zA-Z0-9/+=]{40})['"]?/gi,
|
|
41
|
+
description: 'AWS Secret Access Key detected',
|
|
42
|
+
severity: 'high',
|
|
43
|
+
},
|
|
44
|
+
{
|
|
45
|
+
name: 'cloudflare-api-token',
|
|
46
|
+
pattern: /(?:cloudflare|cf)[_-]?(?:api)?[_-]?token\s*[:=]\s*['"]?([a-zA-Z0-9_\-]{40,})['"]?/gi,
|
|
47
|
+
description: 'Cloudflare API Token detected',
|
|
48
|
+
severity: 'high',
|
|
49
|
+
},
|
|
50
|
+
// GitHub
|
|
51
|
+
{
|
|
52
|
+
name: 'github-token',
|
|
53
|
+
pattern: /gh[pousr]_[a-zA-Z0-9]{36,}/g,
|
|
54
|
+
description: 'GitHub Personal Access Token detected',
|
|
55
|
+
severity: 'high',
|
|
56
|
+
},
|
|
57
|
+
{
|
|
58
|
+
name: 'github-oauth',
|
|
59
|
+
pattern: /gho_[a-zA-Z0-9]{36,}/g,
|
|
60
|
+
description: 'GitHub OAuth Token detected',
|
|
61
|
+
severity: 'high',
|
|
62
|
+
},
|
|
63
|
+
// OpenAI / Anthropic
|
|
64
|
+
{
|
|
65
|
+
name: 'openai-api-key',
|
|
66
|
+
pattern: /sk-[a-zA-Z0-9]{20,}T3BlbkFJ[a-zA-Z0-9]{20,}/g,
|
|
67
|
+
description: 'OpenAI API Key detected',
|
|
68
|
+
severity: 'high',
|
|
69
|
+
},
|
|
70
|
+
{
|
|
71
|
+
name: 'anthropic-api-key',
|
|
72
|
+
pattern: /sk-ant-[a-zA-Z0-9\-_]{40,}/g,
|
|
73
|
+
description: 'Anthropic API Key detected',
|
|
74
|
+
severity: 'high',
|
|
75
|
+
},
|
|
76
|
+
// Database
|
|
77
|
+
{
|
|
78
|
+
name: 'postgres-connection',
|
|
79
|
+
pattern: /postgres(?:ql)?:\/\/[^:]+:[^@]+@[^/]+/gi,
|
|
80
|
+
description: 'PostgreSQL connection string with credentials detected',
|
|
81
|
+
severity: 'high',
|
|
82
|
+
},
|
|
83
|
+
{
|
|
84
|
+
name: 'mongodb-connection',
|
|
85
|
+
pattern: /mongodb(?:\+srv)?:\/\/[^:]+:[^@]+@[^/]+/gi,
|
|
86
|
+
description: 'MongoDB connection string with credentials detected',
|
|
87
|
+
severity: 'high',
|
|
88
|
+
},
|
|
89
|
+
// Private Keys
|
|
90
|
+
{
|
|
91
|
+
name: 'private-key',
|
|
92
|
+
pattern: /-----BEGIN (?:RSA |EC |DSA )?PRIVATE KEY-----/g,
|
|
93
|
+
description: 'Private key detected',
|
|
94
|
+
severity: 'high',
|
|
95
|
+
},
|
|
96
|
+
// JWT / Bearer tokens
|
|
97
|
+
{
|
|
98
|
+
name: 'jwt-token',
|
|
99
|
+
pattern: /eyJ[a-zA-Z0-9_-]*\.eyJ[a-zA-Z0-9_-]*\.[a-zA-Z0-9_-]*/g,
|
|
100
|
+
description: 'JWT token detected',
|
|
101
|
+
severity: 'medium',
|
|
102
|
+
},
|
|
103
|
+
{
|
|
104
|
+
name: 'bearer-token',
|
|
105
|
+
pattern: /bearer\s+[a-zA-Z0-9_\-.~+/]+=*/gi,
|
|
106
|
+
description: 'Bearer token detected',
|
|
107
|
+
severity: 'medium',
|
|
108
|
+
},
|
|
109
|
+
// Slack
|
|
110
|
+
{
|
|
111
|
+
name: 'slack-token',
|
|
112
|
+
pattern: /xox[baprs]-[0-9]+-[0-9]+-[a-zA-Z0-9]+/g,
|
|
113
|
+
description: 'Slack token detected',
|
|
114
|
+
severity: 'high',
|
|
115
|
+
},
|
|
116
|
+
{
|
|
117
|
+
name: 'slack-webhook',
|
|
118
|
+
pattern: /https:\/\/hooks\.slack\.com\/services\/T[a-zA-Z0-9_]+\/B[a-zA-Z0-9_]+\/[a-zA-Z0-9_]+/g,
|
|
119
|
+
description: 'Slack webhook URL detected',
|
|
120
|
+
severity: 'medium',
|
|
121
|
+
},
|
|
122
|
+
// Generic high-entropy strings in config values
|
|
123
|
+
{
|
|
124
|
+
name: 'high-entropy-string',
|
|
125
|
+
pattern: /(?:token|key|secret|password|credential|auth)\s*[:=]\s*['"]([a-zA-Z0-9+/=_\-]{32,})['"]?/gi,
|
|
126
|
+
description: 'High-entropy string in sensitive field',
|
|
127
|
+
severity: 'medium',
|
|
128
|
+
},
|
|
129
|
+
];
|
|
130
|
+
/**
|
|
131
|
+
* Patterns that are safe and should be ignored
|
|
132
|
+
*/
|
|
133
|
+
const SAFE_PATTERNS = [
|
|
134
|
+
/\$[A-Z_]+/, // Environment variable reference
|
|
135
|
+
/\$\{[A-Z_]+\}/, // Environment variable with braces
|
|
136
|
+
/file:\/\//, // File path reference
|
|
137
|
+
/https?:\/\//, // Regular URLs (unless they contain credentials)
|
|
138
|
+
/<[A-Z_]+>/, // Placeholder like <API_KEY>
|
|
139
|
+
/your[_-]?(?:api)?[_-]?key/i, // Example placeholder
|
|
140
|
+
/xxx+/i, // Redacted value
|
|
141
|
+
/\*{3,}/, // Masked value
|
|
142
|
+
];
|
|
143
|
+
/**
|
|
144
|
+
* Redact a secret value for safe display
|
|
145
|
+
*/
|
|
146
|
+
function redactSecret(value) {
|
|
147
|
+
if (value.length <= 8) {
|
|
148
|
+
return '***';
|
|
149
|
+
}
|
|
150
|
+
return value.substring(0, 4) + '***' + value.substring(value.length - 2);
|
|
151
|
+
}
|
|
152
|
+
/**
|
|
153
|
+
* Check if a match is a safe pattern (not a real secret)
|
|
154
|
+
*/
|
|
155
|
+
function isSafePattern(match) {
|
|
156
|
+
return SAFE_PATTERNS.some((pattern) => pattern.test(match));
|
|
157
|
+
}
|
|
158
|
+
/**
|
|
159
|
+
* Scan content for potential secrets
|
|
160
|
+
*/
|
|
161
|
+
export function scanForSecrets(content, filename) {
|
|
162
|
+
const findings = [];
|
|
163
|
+
const lines = content.split('\n');
|
|
164
|
+
for (const secretPattern of SECRET_PATTERNS) {
|
|
165
|
+
// Reset regex state
|
|
166
|
+
secretPattern.pattern.lastIndex = 0;
|
|
167
|
+
let match;
|
|
168
|
+
while ((match = secretPattern.pattern.exec(content)) !== null) {
|
|
169
|
+
const matchedValue = match[1] || match[0];
|
|
170
|
+
// Skip safe patterns
|
|
171
|
+
if (isSafePattern(matchedValue)) {
|
|
172
|
+
continue;
|
|
173
|
+
}
|
|
174
|
+
// Find line and column
|
|
175
|
+
const beforeMatch = content.substring(0, match.index);
|
|
176
|
+
const line = beforeMatch.split('\n').length;
|
|
177
|
+
const lastNewline = beforeMatch.lastIndexOf('\n');
|
|
178
|
+
const column = match.index - lastNewline;
|
|
179
|
+
findings.push({
|
|
180
|
+
pattern: secretPattern.name,
|
|
181
|
+
description: secretPattern.description,
|
|
182
|
+
severity: secretPattern.severity,
|
|
183
|
+
line,
|
|
184
|
+
column,
|
|
185
|
+
match: redactSecret(matchedValue),
|
|
186
|
+
file: filename,
|
|
187
|
+
});
|
|
188
|
+
}
|
|
189
|
+
}
|
|
190
|
+
return findings;
|
|
191
|
+
}
|
|
192
|
+
/**
|
|
193
|
+
* Scan a file for secrets
|
|
194
|
+
*/
|
|
195
|
+
export async function scanFile(filePath) {
|
|
196
|
+
try {
|
|
197
|
+
const content = await fs.readFile(filePath, 'utf-8');
|
|
198
|
+
return scanForSecrets(content, path.basename(filePath));
|
|
199
|
+
}
|
|
200
|
+
catch {
|
|
201
|
+
return [];
|
|
202
|
+
}
|
|
203
|
+
}
|
|
204
|
+
/**
|
|
205
|
+
* Scan .haystack.yml specifically for secrets
|
|
206
|
+
*/
|
|
207
|
+
export async function scanHaystackConfig(configPath) {
|
|
208
|
+
return scanFile(configPath);
|
|
209
|
+
}
|
|
210
|
+
/**
|
|
211
|
+
* Validate that a config doesn't contain hardcoded secrets
|
|
212
|
+
* Returns true if safe, false if secrets detected
|
|
213
|
+
*/
|
|
214
|
+
export async function validateConfigSecurity(configPath) {
|
|
215
|
+
const findings = await scanHaystackConfig(configPath);
|
|
216
|
+
const highSeverity = findings.filter((f) => f.severity === 'high');
|
|
217
|
+
return {
|
|
218
|
+
safe: highSeverity.length === 0,
|
|
219
|
+
findings,
|
|
220
|
+
};
|
|
221
|
+
}
|
|
222
|
+
/**
|
|
223
|
+
* Get a security report string for display
|
|
224
|
+
*/
|
|
225
|
+
export function formatSecurityReport(findings) {
|
|
226
|
+
if (findings.length === 0) {
|
|
227
|
+
return '✓ No secrets detected';
|
|
228
|
+
}
|
|
229
|
+
const lines = [
|
|
230
|
+
`⚠️ ${findings.length} potential secret(s) detected:\n`,
|
|
231
|
+
];
|
|
232
|
+
for (const finding of findings) {
|
|
233
|
+
const severity = finding.severity === 'high' ? '🔴' : finding.severity === 'medium' ? '🟡' : '🟢';
|
|
234
|
+
lines.push(` ${severity} ${finding.file}:${finding.line}:${finding.column}`);
|
|
235
|
+
lines.push(` ${finding.description}`);
|
|
236
|
+
lines.push(` Matched: ${finding.match}\n`);
|
|
237
|
+
}
|
|
238
|
+
lines.push('Use $VARIABLE syntax to reference secrets from Haystack platform.');
|
|
239
|
+
lines.push('Run `haystack secrets set KEY value` to store secrets securely.\n');
|
|
240
|
+
return lines.join('\n');
|
|
241
|
+
}
|
|
@@ -0,0 +1,249 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Create the .agents/skills/haystack.md file for agent discovery
|
|
3
|
+
*/
|
|
4
|
+
import * as fs from 'fs/promises';
|
|
5
|
+
import * as path from 'path';
|
|
6
|
+
const SKILL_CONTENT = `# Haystack Verification
|
|
7
|
+
|
|
8
|
+
Haystack provides automated PR verification. When a PR is opened, an AI agent spins up the app in a sandbox and verifies changes work correctly.
|
|
9
|
+
|
|
10
|
+
## Setup Workflow
|
|
11
|
+
|
|
12
|
+
### Step 1: Generate base config
|
|
13
|
+
\`\`\`bash
|
|
14
|
+
npx @haystackeditor/cli init
|
|
15
|
+
\`\`\`
|
|
16
|
+
This auto-detects framework, ports, package manager and creates \`.haystack.yml\`.
|
|
17
|
+
|
|
18
|
+
### Step 2: Review and customize
|
|
19
|
+
After init, review the generated config and customize based on the app:
|
|
20
|
+
|
|
21
|
+
| If the app has... | Add this |
|
|
22
|
+
|-------------------|----------|
|
|
23
|
+
| Login/authentication | Auth bypass env var (see Auth Bypass section) |
|
|
24
|
+
| Key user journeys | Flows describing what to verify (see Flows section) |
|
|
25
|
+
| API calls needing auth | Fixtures to mock responses (see Fixtures section) |
|
|
26
|
+
|
|
27
|
+
**Minimum viable config**: Just \`dev_server\` settings. Flows and fixtures can be added later as needed.
|
|
28
|
+
|
|
29
|
+
### Step 3: Commit
|
|
30
|
+
\`\`\`bash
|
|
31
|
+
git add .haystack.yml .agents/
|
|
32
|
+
git commit -m "Add Haystack verification"
|
|
33
|
+
\`\`\`
|
|
34
|
+
|
|
35
|
+
## Config Reference
|
|
36
|
+
|
|
37
|
+
\`\`\`yaml
|
|
38
|
+
version: "1"
|
|
39
|
+
name: my-app
|
|
40
|
+
|
|
41
|
+
# Dev server configuration
|
|
42
|
+
dev_server:
|
|
43
|
+
command: pnpm dev
|
|
44
|
+
port: 3000
|
|
45
|
+
ready_pattern: "Local:" # Text in stdout when server is ready
|
|
46
|
+
env:
|
|
47
|
+
SKIP_AUTH: "true" # Auth bypass for testing
|
|
48
|
+
|
|
49
|
+
# Verification commands (run in PR checks)
|
|
50
|
+
verification:
|
|
51
|
+
commands:
|
|
52
|
+
- name: build
|
|
53
|
+
run: pnpm build
|
|
54
|
+
- name: lint
|
|
55
|
+
run: pnpm lint
|
|
56
|
+
- name: typecheck
|
|
57
|
+
run: pnpm tsc --noEmit
|
|
58
|
+
|
|
59
|
+
# Flows: Advisory descriptions for AI verification agent
|
|
60
|
+
# The agent reads these to understand WHAT to verify, then navigates autonomously
|
|
61
|
+
flows:
|
|
62
|
+
- name: "Landing page loads"
|
|
63
|
+
description: "Verify the landing page renders without errors"
|
|
64
|
+
trigger: always
|
|
65
|
+
steps:
|
|
66
|
+
- action: navigate
|
|
67
|
+
url: "/"
|
|
68
|
+
- action: wait_for
|
|
69
|
+
selector: "[data-testid='landing']"
|
|
70
|
+
- action: screenshot
|
|
71
|
+
name: "landing"
|
|
72
|
+
|
|
73
|
+
- name: "Dashboard loads with data"
|
|
74
|
+
description: "Verify dashboard shows user data correctly"
|
|
75
|
+
trigger: on_change
|
|
76
|
+
watch_patterns:
|
|
77
|
+
- "src/components/dashboard/**"
|
|
78
|
+
steps:
|
|
79
|
+
- action: navigate
|
|
80
|
+
url: "/dashboard"
|
|
81
|
+
- action: wait_for
|
|
82
|
+
selector: ".dashboard-content"
|
|
83
|
+
- action: assert_no_errors
|
|
84
|
+
\`\`\`
|
|
85
|
+
|
|
86
|
+
## Monorepo Configuration
|
|
87
|
+
|
|
88
|
+
For monorepos with multiple services:
|
|
89
|
+
|
|
90
|
+
\`\`\`yaml
|
|
91
|
+
version: "1"
|
|
92
|
+
name: my-monorepo
|
|
93
|
+
|
|
94
|
+
services:
|
|
95
|
+
frontend:
|
|
96
|
+
root: ./
|
|
97
|
+
command: pnpm dev
|
|
98
|
+
port: 3000
|
|
99
|
+
ready_pattern: "Local:"
|
|
100
|
+
env:
|
|
101
|
+
VITE_SKIP_AUTH: "true"
|
|
102
|
+
|
|
103
|
+
api:
|
|
104
|
+
root: packages/api
|
|
105
|
+
command: pnpm dev
|
|
106
|
+
port: 8080
|
|
107
|
+
ready_pattern: "listening"
|
|
108
|
+
|
|
109
|
+
worker:
|
|
110
|
+
root: infra/worker
|
|
111
|
+
command: pnpm wrangler dev
|
|
112
|
+
port: 8787
|
|
113
|
+
|
|
114
|
+
# Batch jobs (run once, don't stay running)
|
|
115
|
+
analysis:
|
|
116
|
+
root: packages/analysis
|
|
117
|
+
type: batch
|
|
118
|
+
command: pnpm start
|
|
119
|
+
|
|
120
|
+
verification:
|
|
121
|
+
commands:
|
|
122
|
+
- name: build
|
|
123
|
+
run: pnpm build
|
|
124
|
+
- name: lint
|
|
125
|
+
run: pnpm lint
|
|
126
|
+
\`\`\`
|
|
127
|
+
|
|
128
|
+
## Multi-Repo Configuration
|
|
129
|
+
|
|
130
|
+
When services live in separate git repositories (not a monorepo), each repo gets its own \`.haystack.yml\`:
|
|
131
|
+
|
|
132
|
+
**Frontend repo** - Mock the API it depends on:
|
|
133
|
+
\`\`\`yaml
|
|
134
|
+
version: "1"
|
|
135
|
+
name: frontend
|
|
136
|
+
|
|
137
|
+
dev_server:
|
|
138
|
+
command: pnpm dev
|
|
139
|
+
port: 3000
|
|
140
|
+
env:
|
|
141
|
+
VITE_API_URL: "http://localhost:8080" # Will be mocked
|
|
142
|
+
|
|
143
|
+
# Mock the API from the other repo
|
|
144
|
+
fixtures:
|
|
145
|
+
- pattern: "/api/*"
|
|
146
|
+
source: "file://fixtures/api-responses.json"
|
|
147
|
+
\`\`\`
|
|
148
|
+
|
|
149
|
+
**API repo** - Standalone verification:
|
|
150
|
+
\`\`\`yaml
|
|
151
|
+
version: "1"
|
|
152
|
+
name: api
|
|
153
|
+
|
|
154
|
+
dev_server:
|
|
155
|
+
command: pnpm dev
|
|
156
|
+
port: 8080
|
|
157
|
+
|
|
158
|
+
verification:
|
|
159
|
+
commands:
|
|
160
|
+
- name: test
|
|
161
|
+
run: pnpm test
|
|
162
|
+
\`\`\`
|
|
163
|
+
|
|
164
|
+
Each repo is verified independently. Use fixtures to mock dependencies on other services.
|
|
165
|
+
|
|
166
|
+
## Fixtures (API Mocking)
|
|
167
|
+
|
|
168
|
+
Mock API responses so verification doesn't need real credentials:
|
|
169
|
+
|
|
170
|
+
\`\`\`yaml
|
|
171
|
+
fixtures:
|
|
172
|
+
# Local file (small data, commit to repo)
|
|
173
|
+
- pattern: "/api/user"
|
|
174
|
+
source: "file://fixtures/user.json"
|
|
175
|
+
|
|
176
|
+
# From staging server
|
|
177
|
+
- pattern: "/api/dashboard"
|
|
178
|
+
source: "https://staging.example.com/api/dashboard"
|
|
179
|
+
headers:
|
|
180
|
+
Authorization: "Bearer $STAGING_TOKEN"
|
|
181
|
+
|
|
182
|
+
# Large data from S3
|
|
183
|
+
- pattern: "/api/analytics"
|
|
184
|
+
source: "s3://my-bucket/fixtures/analytics.json"
|
|
185
|
+
|
|
186
|
+
# Use real API (no mocking)
|
|
187
|
+
- pattern: "/api/public/*"
|
|
188
|
+
source: passthrough
|
|
189
|
+
\`\`\`
|
|
190
|
+
|
|
191
|
+
## Understanding Flows
|
|
192
|
+
|
|
193
|
+
**Flows are advisory, not mechanical scripts.**
|
|
194
|
+
|
|
195
|
+
When a PR is opened, an AI agent (running in a Modal sandbox with browser access):
|
|
196
|
+
1. Reads the flows to understand what to verify
|
|
197
|
+
2. Navigates the app autonomously
|
|
198
|
+
3. Determines if things work based on flow descriptions
|
|
199
|
+
4. Captures screenshots and evidence
|
|
200
|
+
5. Reports results
|
|
201
|
+
|
|
202
|
+
The flow steps are hints like "check the landing page loads" - the AI figures out how to verify that. It can adapt if the UI changes slightly.
|
|
203
|
+
|
|
204
|
+
### Flow Triggers
|
|
205
|
+
|
|
206
|
+
| Trigger | When it runs |
|
|
207
|
+
|---------|--------------|
|
|
208
|
+
| \`always\` | Every PR |
|
|
209
|
+
| \`on_change\` | Only when \`watch_patterns\` match changed files |
|
|
210
|
+
|
|
211
|
+
### Flow Actions (Advisory)
|
|
212
|
+
|
|
213
|
+
These describe what the agent should verify:
|
|
214
|
+
|
|
215
|
+
**Browser:**
|
|
216
|
+
- \`navigate\` - Go to URL
|
|
217
|
+
- \`wait_for\` - Wait for element
|
|
218
|
+
- \`click\` - Click element
|
|
219
|
+
- \`type\` - Enter text
|
|
220
|
+
- \`screenshot\` - Capture screenshot
|
|
221
|
+
- \`assert_no_errors\` - Check for error states
|
|
222
|
+
|
|
223
|
+
**API:**
|
|
224
|
+
- \`http_request\` - Make API call
|
|
225
|
+
- \`assert_status\` - Check response code
|
|
226
|
+
- \`websocket_connect\` - Test WebSocket
|
|
227
|
+
|
|
228
|
+
## Auth Bypass
|
|
229
|
+
|
|
230
|
+
Most apps need auth bypassed for testing. Common patterns:
|
|
231
|
+
|
|
232
|
+
| Framework | Env Var |
|
|
233
|
+
|-----------|---------|
|
|
234
|
+
| Vite/React | \`VITE_SKIP_AUTH=true\` |
|
|
235
|
+
| Next.js | \`NEXT_PUBLIC_SKIP_AUTH=true\` |
|
|
236
|
+
| Express | \`SKIP_AUTH=true\` |
|
|
237
|
+
| Rails | \`SKIP_AUTH=true\` |
|
|
238
|
+
|
|
239
|
+
Add to \`dev_server.env\` or \`services.*.env\` in your config.
|
|
240
|
+
`;
|
|
241
|
+
export async function createSkillFile() {
|
|
242
|
+
const skillDir = path.join(process.cwd(), '.agents', 'skills');
|
|
243
|
+
const skillPath = path.join(skillDir, 'haystack.md');
|
|
244
|
+
// Create directory if needed
|
|
245
|
+
await fs.mkdir(skillDir, { recursive: true });
|
|
246
|
+
// Write skill file
|
|
247
|
+
await fs.writeFile(skillPath, SKILL_CONTENT, 'utf-8');
|
|
248
|
+
return skillPath;
|
|
249
|
+
}
|
package/package.json
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@haystackeditor/cli",
|
|
3
|
+
"version": "0.2.0",
|
|
4
|
+
"description": "Set up Haystack verification for your project",
|
|
5
|
+
"type": "module",
|
|
6
|
+
"bin": {
|
|
7
|
+
"haystack": "./dist/index.js"
|
|
8
|
+
},
|
|
9
|
+
"keywords": [
|
|
10
|
+
"haystack",
|
|
11
|
+
"verification",
|
|
12
|
+
"sandbox",
|
|
13
|
+
"ai-agent",
|
|
14
|
+
"testing",
|
|
15
|
+
"cli"
|
|
16
|
+
],
|
|
17
|
+
"author": "Haystack",
|
|
18
|
+
"license": "MIT",
|
|
19
|
+
"repository": {
|
|
20
|
+
"type": "git",
|
|
21
|
+
"url": "https://github.com/haystackeditor/haystack-review.git",
|
|
22
|
+
"directory": "packages/haystack-cli"
|
|
23
|
+
},
|
|
24
|
+
"homepage": "https://haystackeditor.com",
|
|
25
|
+
"bugs": {
|
|
26
|
+
"url": "https://github.com/haystackeditor/haystack-review/issues"
|
|
27
|
+
},
|
|
28
|
+
"dependencies": {
|
|
29
|
+
"chalk": "^5.3.0",
|
|
30
|
+
"commander": "^12.0.0",
|
|
31
|
+
"fast-glob": "^3.3.0",
|
|
32
|
+
"inquirer": "^9.2.0",
|
|
33
|
+
"yaml": "^2.3.4"
|
|
34
|
+
},
|
|
35
|
+
"devDependencies": {
|
|
36
|
+
"@types/inquirer": "^9.0.0",
|
|
37
|
+
"@types/node": "^20.10.0",
|
|
38
|
+
"typescript": "^5.3.0"
|
|
39
|
+
},
|
|
40
|
+
"files": [
|
|
41
|
+
"dist"
|
|
42
|
+
],
|
|
43
|
+
"engines": {
|
|
44
|
+
"node": ">=18"
|
|
45
|
+
},
|
|
46
|
+
"scripts": {
|
|
47
|
+
"build": "tsc",
|
|
48
|
+
"dev": "tsc --watch",
|
|
49
|
+
"start": "node dist/index.js"
|
|
50
|
+
}
|
|
51
|
+
}
|