bashkit 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/AGENTS.md +442 -0
- package/LICENSE +21 -0
- package/README.md +713 -0
- package/dist/cli/init.d.ts +2 -0
- package/dist/cli/init.js +179 -0
- package/dist/index.d.ts +14 -0
- package/dist/index.js +1805 -0
- package/dist/middleware/anthropic-cache.d.ts +17 -0
- package/dist/middleware/index.d.ts +1 -0
- package/dist/sandbox/e2b.d.ts +9 -0
- package/dist/sandbox/index.d.ts +4 -0
- package/dist/sandbox/interface.d.ts +21 -0
- package/dist/sandbox/local.d.ts +5 -0
- package/dist/sandbox/vercel.d.ts +13 -0
- package/dist/setup/index.d.ts +2 -0
- package/dist/setup/setup-environment.d.ts +36 -0
- package/dist/setup/types.d.ts +47 -0
- package/dist/skills/discovery.d.ts +9 -0
- package/dist/skills/fetch.d.ts +56 -0
- package/dist/skills/index.d.ts +6 -0
- package/dist/skills/loader.d.ts +11 -0
- package/dist/skills/types.d.ts +29 -0
- package/dist/skills/xml.d.ts +26 -0
- package/dist/tools/bash.d.ts +18 -0
- package/dist/tools/edit.d.ts +16 -0
- package/dist/tools/exit-plan-mode.d.ts +11 -0
- package/dist/tools/glob.d.ts +14 -0
- package/dist/tools/grep.d.ts +42 -0
- package/dist/tools/index.d.ts +45 -0
- package/dist/tools/read.d.ts +25 -0
- package/dist/tools/task.d.ts +50 -0
- package/dist/tools/todo-write.d.ts +28 -0
- package/dist/tools/web-fetch.d.ts +20 -0
- package/dist/tools/web-search.d.ts +24 -0
- package/dist/tools/write.d.ts +14 -0
- package/dist/types.d.ts +32 -0
- package/dist/utils/compact-conversation.d.ts +85 -0
- package/dist/utils/context-status.d.ts +71 -0
- package/dist/utils/index.d.ts +3 -0
- package/dist/utils/prune-messages.d.ts +32 -0
- package/package.json +84 -0
package/dist/types.d.ts
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
import type { LanguageModel } from "ai";
|
|
2
|
+
export type ToolConfig = {
|
|
3
|
+
timeout?: number;
|
|
4
|
+
maxFileSize?: number;
|
|
5
|
+
maxOutputLength?: number;
|
|
6
|
+
allowedPaths?: string[];
|
|
7
|
+
blockedCommands?: string[];
|
|
8
|
+
};
|
|
9
|
+
export type WebSearchConfig = {
|
|
10
|
+
apiKey: string;
|
|
11
|
+
};
|
|
12
|
+
export type WebFetchConfig = {
|
|
13
|
+
apiKey: string;
|
|
14
|
+
model: LanguageModel;
|
|
15
|
+
};
|
|
16
|
+
export type AgentConfig = {
|
|
17
|
+
tools?: {
|
|
18
|
+
Bash?: ToolConfig;
|
|
19
|
+
Read?: ToolConfig;
|
|
20
|
+
Write?: ToolConfig;
|
|
21
|
+
Edit?: ToolConfig;
|
|
22
|
+
Glob?: ToolConfig;
|
|
23
|
+
Grep?: ToolConfig;
|
|
24
|
+
};
|
|
25
|
+
/** Include WebSearch tool with this config */
|
|
26
|
+
webSearch?: WebSearchConfig;
|
|
27
|
+
/** Include WebFetch tool with this config */
|
|
28
|
+
webFetch?: WebFetchConfig;
|
|
29
|
+
defaultTimeout?: number;
|
|
30
|
+
workingDirectory?: string;
|
|
31
|
+
};
|
|
32
|
+
export declare const DEFAULT_CONFIG: AgentConfig;
|
|
@@ -0,0 +1,85 @@
|
|
|
1
|
+
import { type LanguageModel, type ModelMessage } from "ai";
|
|
2
|
+
export interface CompactConversationConfig {
|
|
3
|
+
/** Model's context limit (e.g., 200000 for Claude) */
|
|
4
|
+
maxTokens: number;
|
|
5
|
+
/** Trigger compaction at this % of maxTokens (default: 0.85) */
|
|
6
|
+
compactionThreshold?: number;
|
|
7
|
+
/** Keep last N messages intact (default: 10) */
|
|
8
|
+
protectRecentMessages?: number;
|
|
9
|
+
/** Model to use for summarization (use a fast/cheap model like Haiku) */
|
|
10
|
+
summarizerModel: LanguageModel;
|
|
11
|
+
/** The original task/goal the agent is working on - helps preserve context */
|
|
12
|
+
taskContext?: string;
|
|
13
|
+
}
|
|
14
|
+
export interface CompactConversationState {
|
|
15
|
+
/** Accumulated summary from previous compactions */
|
|
16
|
+
conversationSummary: string;
|
|
17
|
+
}
|
|
18
|
+
export interface CompactConversationResult {
|
|
19
|
+
/** Messages to use (unchanged if under limit, compacted if over) */
|
|
20
|
+
messages: ModelMessage[];
|
|
21
|
+
/** Updated state to pass to next call */
|
|
22
|
+
state: CompactConversationState;
|
|
23
|
+
/** Whether compaction occurred this call */
|
|
24
|
+
didCompact: boolean;
|
|
25
|
+
}
|
|
26
|
+
/**
|
|
27
|
+
* Compacts a conversation when it exceeds the token limit.
|
|
28
|
+
*
|
|
29
|
+
* When the conversation hits the threshold, it:
|
|
30
|
+
* 1. Summarizes older messages using the summarizer model
|
|
31
|
+
* 2. Keeps recent messages intact
|
|
32
|
+
* 3. Returns a new conversation starting with the summary
|
|
33
|
+
*
|
|
34
|
+
* Use this alongside pruneMessagesByTokens:
|
|
35
|
+
* - pruneMessagesByTokens: Fast pruning, loses context
|
|
36
|
+
* - compactConversation: Preserves context via summarization
|
|
37
|
+
*
|
|
38
|
+
* @param messages - Current conversation messages
|
|
39
|
+
* @param config - Compaction configuration
|
|
40
|
+
* @param state - State from previous compaction (or empty for first call)
|
|
41
|
+
* @returns Compacted messages, updated state, and whether compaction occurred
|
|
42
|
+
*
|
|
43
|
+
* @example
|
|
44
|
+
* ```typescript
|
|
45
|
+
* import { compactConversation } from 'bashkit';
|
|
46
|
+
*
|
|
47
|
+
* let messages: CoreMessage[] = [];
|
|
48
|
+
* let compactState = { conversationSummary: '' };
|
|
49
|
+
*
|
|
50
|
+
* async function chat(userMessage: string) {
|
|
51
|
+
* messages.push({ role: 'user', content: userMessage });
|
|
52
|
+
*
|
|
53
|
+
* const result = await compactConversation(messages, {
|
|
54
|
+
* maxTokens: 200_000,
|
|
55
|
+
* summarizerModel: anthropic('claude-haiku-4')
|
|
56
|
+
* }, compactState);
|
|
57
|
+
*
|
|
58
|
+
* messages = result.messages;
|
|
59
|
+
* compactState = result.state;
|
|
60
|
+
*
|
|
61
|
+
* // Continue with streamText using result.messages...
|
|
62
|
+
* }
|
|
63
|
+
* ```
|
|
64
|
+
*/
|
|
65
|
+
export declare function compactConversation(messages: ModelMessage[], config: CompactConversationConfig, state?: CompactConversationState): Promise<CompactConversationResult>;
|
|
66
|
+
/**
|
|
67
|
+
* Pre-configured token limits for common models.
|
|
68
|
+
* Use these with compactConversation config.
|
|
69
|
+
*/
|
|
70
|
+
export declare const MODEL_CONTEXT_LIMITS: {
|
|
71
|
+
readonly "claude-opus-4-5": 200000;
|
|
72
|
+
readonly "claude-sonnet-4-5": 200000;
|
|
73
|
+
readonly "claude-haiku-4": 200000;
|
|
74
|
+
readonly "gpt-4o": 128000;
|
|
75
|
+
readonly "gpt-4-turbo": 128000;
|
|
76
|
+
readonly "gpt-4": 8192;
|
|
77
|
+
readonly "gpt-3.5-turbo": 16385;
|
|
78
|
+
readonly "gemini-2.5-pro": 1000000;
|
|
79
|
+
readonly "gemini-2.5-flash": 1000000;
|
|
80
|
+
};
|
|
81
|
+
export type ModelContextLimit = keyof typeof MODEL_CONTEXT_LIMITS;
|
|
82
|
+
/**
|
|
83
|
+
* Helper to create config with model preset
|
|
84
|
+
*/
|
|
85
|
+
export declare function createCompactConfig(modelId: ModelContextLimit, summarizerModel: LanguageModel, overrides?: Partial<Omit<CompactConversationConfig, "summarizerModel">>): CompactConversationConfig;
|
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
import type { ModelMessage } from "ai";
|
|
2
|
+
export type ContextStatusLevel = "comfortable" | "elevated" | "high" | "critical";
|
|
3
|
+
export interface ContextStatus {
|
|
4
|
+
/** Estimated tokens used by current messages */
|
|
5
|
+
usedTokens: number;
|
|
6
|
+
/** Maximum tokens for the model */
|
|
7
|
+
maxTokens: number;
|
|
8
|
+
/** Usage as a decimal (0-1) */
|
|
9
|
+
usagePercent: number;
|
|
10
|
+
/** Usage level category */
|
|
11
|
+
status: ContextStatusLevel;
|
|
12
|
+
/** Optional guidance message to inject into the conversation */
|
|
13
|
+
guidance?: string;
|
|
14
|
+
}
|
|
15
|
+
/** Base context metrics passed to custom guidance functions */
|
|
16
|
+
export interface ContextMetrics {
|
|
17
|
+
usedTokens: number;
|
|
18
|
+
maxTokens: number;
|
|
19
|
+
usagePercent: number;
|
|
20
|
+
}
|
|
21
|
+
export interface ContextStatusConfig {
|
|
22
|
+
/** Threshold for 'elevated' status (default: 0.5) */
|
|
23
|
+
elevatedThreshold?: number;
|
|
24
|
+
/** Threshold for 'high' status (default: 0.7) */
|
|
25
|
+
highThreshold?: number;
|
|
26
|
+
/** Threshold for 'critical' status (default: 0.85) */
|
|
27
|
+
criticalThreshold?: number;
|
|
28
|
+
/** Custom guidance message for 'high' status */
|
|
29
|
+
highGuidance?: string | ((metrics: ContextMetrics) => string);
|
|
30
|
+
/** Custom guidance message for 'critical' status */
|
|
31
|
+
criticalGuidance?: string | ((metrics: ContextMetrics) => string);
|
|
32
|
+
}
|
|
33
|
+
/**
|
|
34
|
+
* Get the current context window status for a conversation.
|
|
35
|
+
*
|
|
36
|
+
* Use this to monitor context usage and optionally inject guidance
|
|
37
|
+
* to prevent agents from rushing when context is filling up.
|
|
38
|
+
*
|
|
39
|
+
* @param messages - Current conversation messages
|
|
40
|
+
* @param maxTokens - Maximum tokens for the model (use MODEL_CONTEXT_LIMITS)
|
|
41
|
+
* @param config - Optional thresholds and custom guidance
|
|
42
|
+
* @returns Context status with usage info and optional guidance message
|
|
43
|
+
*
|
|
44
|
+
* @example
|
|
45
|
+
* ```typescript
|
|
46
|
+
* import { getContextStatus, MODEL_CONTEXT_LIMITS } from '@jbreite/bashkit';
|
|
47
|
+
*
|
|
48
|
+
* const status = getContextStatus(messages, MODEL_CONTEXT_LIMITS['claude-sonnet-4-5']);
|
|
49
|
+
*
|
|
50
|
+
* if (status.guidance) {
|
|
51
|
+
* // Inject into system prompt or conversation
|
|
52
|
+
* system = `${system}\n\n<context_status>${status.guidance}</context_status>`;
|
|
53
|
+
* }
|
|
54
|
+
*
|
|
55
|
+
* if (status.status === 'critical') {
|
|
56
|
+
* // Trigger compaction
|
|
57
|
+
* const compacted = await compactConversation(messages, config, state);
|
|
58
|
+
* }
|
|
59
|
+
* ```
|
|
60
|
+
*/
|
|
61
|
+
export declare function getContextStatus(messages: ModelMessage[], maxTokens: number, config?: ContextStatusConfig): ContextStatus;
|
|
62
|
+
/**
|
|
63
|
+
* Check if context status requires action (high or critical).
|
|
64
|
+
* Convenience helper for conditional logic.
|
|
65
|
+
*/
|
|
66
|
+
export declare function contextNeedsAttention(status: ContextStatus): boolean;
|
|
67
|
+
/**
|
|
68
|
+
* Check if context should be compacted (critical status).
|
|
69
|
+
* Convenience helper for triggering compaction.
|
|
70
|
+
*/
|
|
71
|
+
export declare function contextNeedsCompaction(status: ContextStatus): boolean;
|
|
@@ -0,0 +1,3 @@
|
|
|
1
|
+
export { type CompactConversationConfig, type CompactConversationResult, type CompactConversationState, compactConversation, createCompactConfig, MODEL_CONTEXT_LIMITS, type ModelContextLimit, } from "./compact-conversation";
|
|
2
|
+
export { type ContextMetrics, type ContextStatus, type ContextStatusConfig, type ContextStatusLevel, contextNeedsAttention, contextNeedsCompaction, getContextStatus, } from "./context-status";
|
|
3
|
+
export { estimateMessagesTokens, estimateMessageTokens, estimateTokens, type PruneMessagesConfig, pruneMessagesByTokens, } from "./prune-messages";
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
import type { ModelMessage } from "ai";
|
|
2
|
+
export interface PruneMessagesConfig {
|
|
3
|
+
/** Keep approximately this many tokens (default: 40000) */
|
|
4
|
+
targetTokens?: number;
|
|
5
|
+
/** Only prune if we'd save more than this many tokens (default: 20000) */
|
|
6
|
+
minSavingsThreshold?: number;
|
|
7
|
+
/** Always keep the last N user messages and their responses (default: 3) */
|
|
8
|
+
protectLastNUserMessages?: number;
|
|
9
|
+
}
|
|
10
|
+
/**
|
|
11
|
+
* Estimate token count for a string (~4 chars per token for English)
|
|
12
|
+
*/
|
|
13
|
+
export declare function estimateTokens(text: string): number;
|
|
14
|
+
/**
|
|
15
|
+
* Estimate token count for a single message
|
|
16
|
+
*/
|
|
17
|
+
export declare function estimateMessageTokens(message: ModelMessage): number;
|
|
18
|
+
/**
|
|
19
|
+
* Estimate total token count for an array of messages
|
|
20
|
+
*/
|
|
21
|
+
export declare function estimateMessagesTokens(messages: ModelMessage[]): number;
|
|
22
|
+
/**
|
|
23
|
+
* Prune messages to fit within target token budget.
|
|
24
|
+
*
|
|
25
|
+
* Strategy: Remove tool call details and tool results from older messages,
|
|
26
|
+
* keeping the conversation structure intact. Recent messages are protected.
|
|
27
|
+
*
|
|
28
|
+
* @param messages - Array of ModelMessage from the AI SDK
|
|
29
|
+
* @param config - Pruning configuration
|
|
30
|
+
* @returns Pruned messages array
|
|
31
|
+
*/
|
|
32
|
+
export declare function pruneMessagesByTokens(messages: ModelMessage[], config?: PruneMessagesConfig): ModelMessage[];
|
package/package.json
ADDED
|
@@ -0,0 +1,84 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "bashkit",
|
|
3
|
+
"version": "0.1.0",
|
|
4
|
+
"description": "Agentic coding tools for the Vercel AI SDK",
|
|
5
|
+
"type": "module",
|
|
6
|
+
"main": "./dist/index.js",
|
|
7
|
+
"types": "./dist/index.d.ts",
|
|
8
|
+
"exports": {
|
|
9
|
+
".": {
|
|
10
|
+
"types": "./dist/index.d.ts",
|
|
11
|
+
"import": "./dist/index.js"
|
|
12
|
+
}
|
|
13
|
+
},
|
|
14
|
+
"bin": {
|
|
15
|
+
"bashkit": "./dist/cli/init.js"
|
|
16
|
+
},
|
|
17
|
+
"files": [
|
|
18
|
+
"dist",
|
|
19
|
+
"README.md",
|
|
20
|
+
"AGENTS.md"
|
|
21
|
+
],
|
|
22
|
+
"scripts": {
|
|
23
|
+
"dev": "bun run src/index.ts",
|
|
24
|
+
"build": "bun run build:js && bun run build:cli && bun run build:types",
|
|
25
|
+
"build:js": "bun build src/index.ts --outdir dist --target node --format esm --external ai --external zod --external @ai-sdk/* --external @vercel/sandbox --external @e2b/code-interpreter --external parallel-web",
|
|
26
|
+
"build:cli": "bun build src/cli/init.ts --outdir dist/cli --target node --format esm --external @clack/prompts && chmod +x dist/cli/init.js",
|
|
27
|
+
"build:types": "tsc -p tsconfig.build.json",
|
|
28
|
+
"typecheck": "tsc --noEmit",
|
|
29
|
+
"format": "biome format --write .",
|
|
30
|
+
"format:check": "biome format .",
|
|
31
|
+
"lint": "biome lint --write .",
|
|
32
|
+
"lint:check": "biome lint .",
|
|
33
|
+
"check": "biome check --write .",
|
|
34
|
+
"check:ci": "biome check .",
|
|
35
|
+
"prepublishOnly": "bun run build"
|
|
36
|
+
},
|
|
37
|
+
"keywords": [
|
|
38
|
+
"ai",
|
|
39
|
+
"agent",
|
|
40
|
+
"vercel",
|
|
41
|
+
"ai-sdk",
|
|
42
|
+
"sandbox",
|
|
43
|
+
"tools",
|
|
44
|
+
"bash",
|
|
45
|
+
"code-execution"
|
|
46
|
+
],
|
|
47
|
+
"author": "jbreite",
|
|
48
|
+
"license": "MIT",
|
|
49
|
+
"repository": {
|
|
50
|
+
"type": "git",
|
|
51
|
+
"url": "https://github.com/jbreite/bashkit"
|
|
52
|
+
},
|
|
53
|
+
"dependencies": {
|
|
54
|
+
"@clack/prompts": "^0.7.0"
|
|
55
|
+
},
|
|
56
|
+
"devDependencies": {
|
|
57
|
+
"@ai-sdk/anthropic": "^2.0.50",
|
|
58
|
+
"@biomejs/biome": "^2.3.9",
|
|
59
|
+
"@e2b/code-interpreter": "^1.5.1",
|
|
60
|
+
"@types/bun": "latest",
|
|
61
|
+
"@types/node": "^24.10.0",
|
|
62
|
+
"@vercel/sandbox": "^1.0.4",
|
|
63
|
+
"parallel-web": "^0.2.4",
|
|
64
|
+
"typescript": "^5.9.3"
|
|
65
|
+
},
|
|
66
|
+
"peerDependencies": {
|
|
67
|
+
"ai": "^5.0.0",
|
|
68
|
+
"zod": "^4.1.8",
|
|
69
|
+
"@vercel/sandbox": "^1.0.0",
|
|
70
|
+
"@e2b/code-interpreter": "^1.0.0",
|
|
71
|
+
"parallel-web": "^0.2.4"
|
|
72
|
+
},
|
|
73
|
+
"peerDependenciesMeta": {
|
|
74
|
+
"@vercel/sandbox": {
|
|
75
|
+
"optional": true
|
|
76
|
+
},
|
|
77
|
+
"@e2b/code-interpreter": {
|
|
78
|
+
"optional": true
|
|
79
|
+
},
|
|
80
|
+
"parallel-web": {
|
|
81
|
+
"optional": true
|
|
82
|
+
}
|
|
83
|
+
}
|
|
84
|
+
}
|