@fastino-ai/pioneer-cli 0.1.0 → 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,435 @@
1
+ /**
2
+ * LLM Client abstraction supporting multiple providers
3
+ */
4
+
5
+ import Anthropic from "@anthropic-ai/sdk";
6
+ import OpenAI from "openai";
7
+ import type { Message, LLMResponse, ToolCall, Tool, AgentConfig, TokenUsage } from "./types.js";
8
+
9
+ export interface LLMClientOptions {
10
+ provider: "anthropic" | "openai" | "local";
11
+ model: string;
12
+ apiKey?: string;
13
+ baseUrl?: string;
14
+ }
15
+
16
+ export class LLMClient {
17
+ private provider: "anthropic" | "openai" | "local";
18
+ private model: string;
19
+ private anthropic?: Anthropic;
20
+ private openai?: OpenAI;
21
+
22
+ constructor(options: LLMClientOptions) {
23
+ this.provider = options.provider;
24
+ this.model = options.model;
25
+
26
+ if (options.provider === "anthropic") {
27
+ this.anthropic = new Anthropic({
28
+ apiKey: options.apiKey || process.env.ANTHROPIC_API_KEY,
29
+ });
30
+ } else if (options.provider === "openai") {
31
+ this.openai = new OpenAI({
32
+ apiKey: options.apiKey || process.env.OPENAI_API_KEY,
33
+ baseURL: options.baseUrl,
34
+ });
35
+ }
36
+ }
37
+
38
+ getModel(): string {
39
+ return this.model;
40
+ }
41
+
42
+ setModel(model: string): void {
43
+ this.model = model;
44
+ }
45
+
46
+ getProvider(): string {
47
+ return this.provider;
48
+ }
49
+
50
+ async chat(
51
+ messages: Message[],
52
+ tools: Tool[],
53
+ systemPrompt?: string,
54
+ onStream?: (chunk: string) => void,
55
+ abortSignal?: AbortSignal
56
+ ): Promise<LLMResponse> {
57
+ if (this.provider === "anthropic") {
58
+ return this.chatAnthropic(messages, tools, systemPrompt, onStream, abortSignal);
59
+ } else if (this.provider === "openai") {
60
+ return this.chatOpenAI(messages, tools, systemPrompt, onStream, abortSignal);
61
+ }
62
+ throw new Error(`Unsupported provider: ${this.provider}`);
63
+ }
64
+
65
+ private async chatAnthropic(
66
+ messages: Message[],
67
+ tools: Tool[],
68
+ systemPrompt?: string,
69
+ onStream?: (chunk: string) => void,
70
+ abortSignal?: AbortSignal
71
+ ): Promise<LLMResponse> {
72
+ if (!this.anthropic) throw new Error("Anthropic client not initialized");
73
+
74
+ // Check if already aborted
75
+ if (abortSignal?.aborted) {
76
+ throw new Error("Aborted");
77
+ }
78
+
79
+ const anthropicMessages = messages
80
+ .filter((m) => m.role !== "system")
81
+ .map((m) => this.toAnthropicMessage(m));
82
+
83
+ const anthropicTools = tools.map((tool) => ({
84
+ name: tool.name,
85
+ description: tool.description,
86
+ input_schema: {
87
+ type: "object" as const,
88
+ properties: Object.fromEntries(
89
+ tool.parameters.map((p) => [
90
+ p.name,
91
+ { type: p.type, description: p.description },
92
+ ])
93
+ ),
94
+ required: tool.parameters.filter((p) => p.required).map((p) => p.name),
95
+ },
96
+ }));
97
+
98
+ if (onStream) {
99
+ // Streaming response
100
+ const stream = await this.anthropic.messages.stream({
101
+ model: this.model,
102
+ max_tokens: 8192,
103
+ system: systemPrompt || "You are a helpful AI assistant with access to tools for file system operations, bash execution, and code sandbox.",
104
+ messages: anthropicMessages,
105
+ tools: anthropicTools.length > 0 ? anthropicTools : undefined,
106
+ });
107
+
108
+ let content = "";
109
+ const toolCalls: ToolCall[] = [];
110
+
111
+ for await (const event of stream) {
112
+ // Check abort signal during streaming
113
+ if (abortSignal?.aborted) {
114
+ stream.controller.abort();
115
+ throw new Error("Aborted");
116
+ }
117
+
118
+ if (event.type === "content_block_delta") {
119
+ if (event.delta.type === "text_delta") {
120
+ content += event.delta.text;
121
+ onStream(event.delta.text);
122
+ } else if (event.delta.type === "input_json_delta") {
123
+ // Handle tool call streaming
124
+ }
125
+ }
126
+ }
127
+
128
+ const finalMessage = await stream.finalMessage();
129
+
130
+ // Extract tool calls from final message
131
+ for (const block of finalMessage.content) {
132
+ if (block.type === "tool_use") {
133
+ toolCalls.push({
134
+ id: block.id,
135
+ name: block.name,
136
+ arguments: block.input as Record<string, unknown>,
137
+ });
138
+ }
139
+ }
140
+
141
+ return {
142
+ content,
143
+ toolCalls: toolCalls.length > 0 ? toolCalls : undefined,
144
+ usage: {
145
+ inputTokens: finalMessage.usage.input_tokens,
146
+ outputTokens: finalMessage.usage.output_tokens,
147
+ totalTokens: finalMessage.usage.input_tokens + finalMessage.usage.output_tokens,
148
+ },
149
+ finishReason: finalMessage.stop_reason === "tool_use" ? "tool_calls" : "stop",
150
+ };
151
+ } else {
152
+ // Non-streaming response
153
+ const response = await this.anthropic.messages.create({
154
+ model: this.model,
155
+ max_tokens: 8192,
156
+ system: systemPrompt || "You are a helpful AI assistant with access to tools for file system operations, bash execution, and code sandbox.",
157
+ messages: anthropicMessages,
158
+ tools: anthropicTools.length > 0 ? anthropicTools : undefined,
159
+ });
160
+
161
+ let content = "";
162
+ const toolCalls: ToolCall[] = [];
163
+
164
+ for (const block of response.content) {
165
+ if (block.type === "text") {
166
+ content += block.text;
167
+ } else if (block.type === "tool_use") {
168
+ toolCalls.push({
169
+ id: block.id,
170
+ name: block.name,
171
+ arguments: block.input as Record<string, unknown>,
172
+ });
173
+ }
174
+ }
175
+
176
+ return {
177
+ content,
178
+ toolCalls: toolCalls.length > 0 ? toolCalls : undefined,
179
+ usage: {
180
+ inputTokens: response.usage.input_tokens,
181
+ outputTokens: response.usage.output_tokens,
182
+ totalTokens: response.usage.input_tokens + response.usage.output_tokens,
183
+ },
184
+ finishReason: response.stop_reason === "tool_use" ? "tool_calls" : "stop",
185
+ };
186
+ }
187
+ }
188
+
189
+ private async chatOpenAI(
190
+ messages: Message[],
191
+ tools: Tool[],
192
+ systemPrompt?: string,
193
+ onStream?: (chunk: string) => void,
194
+ abortSignal?: AbortSignal
195
+ ): Promise<LLMResponse> {
196
+ if (!this.openai) throw new Error("OpenAI client not initialized");
197
+
198
+ // Check if already aborted
199
+ if (abortSignal?.aborted) {
200
+ throw new Error("Aborted");
201
+ }
202
+
203
+ const openaiMessages: OpenAI.Chat.Completions.ChatCompletionMessageParam[] = [];
204
+
205
+ if (systemPrompt) {
206
+ openaiMessages.push({ role: "system", content: systemPrompt });
207
+ }
208
+
209
+ for (const m of messages) {
210
+ openaiMessages.push(this.toOpenAIMessage(m));
211
+ }
212
+
213
+ const openaiTools: OpenAI.Chat.Completions.ChatCompletionTool[] = tools.map((tool) => ({
214
+ type: "function",
215
+ function: {
216
+ name: tool.name,
217
+ description: tool.description,
218
+ parameters: {
219
+ type: "object",
220
+ properties: Object.fromEntries(
221
+ tool.parameters.map((p) => [
222
+ p.name,
223
+ { type: p.type, description: p.description },
224
+ ])
225
+ ),
226
+ required: tool.parameters.filter((p) => p.required).map((p) => p.name),
227
+ },
228
+ },
229
+ }));
230
+
231
+ if (onStream) {
232
+ const stream = await this.openai.chat.completions.create({
233
+ model: this.model,
234
+ messages: openaiMessages,
235
+ tools: openaiTools.length > 0 ? openaiTools : undefined,
236
+ stream: true,
237
+ });
238
+
239
+ let content = "";
240
+ const toolCalls: Map<number, { id: string; name: string; arguments: string }> = new Map();
241
+ let totalInputTokens = 0;
242
+ let totalOutputTokens = 0;
243
+
244
+ for await (const chunk of stream) {
245
+ // Check abort signal during streaming
246
+ if (abortSignal?.aborted) {
247
+ stream.controller.abort();
248
+ throw new Error("Aborted");
249
+ }
250
+
251
+ const delta = chunk.choices[0]?.delta;
252
+
253
+ if (delta?.content) {
254
+ content += delta.content;
255
+ onStream(delta.content);
256
+ }
257
+
258
+ if (delta?.tool_calls) {
259
+ for (const tc of delta.tool_calls) {
260
+ const existing = toolCalls.get(tc.index) || { id: "", name: "", arguments: "" };
261
+ if (tc.id) existing.id = tc.id;
262
+ if (tc.function?.name) existing.name = tc.function.name;
263
+ if (tc.function?.arguments) existing.arguments += tc.function.arguments;
264
+ toolCalls.set(tc.index, existing);
265
+ }
266
+ }
267
+
268
+ if (chunk.usage) {
269
+ totalInputTokens = chunk.usage.prompt_tokens;
270
+ totalOutputTokens = chunk.usage.completion_tokens;
271
+ }
272
+ }
273
+
274
+ const parsedToolCalls: ToolCall[] = [];
275
+ for (const [_, tc] of toolCalls) {
276
+ try {
277
+ parsedToolCalls.push({
278
+ id: tc.id,
279
+ name: tc.name,
280
+ arguments: JSON.parse(tc.arguments || "{}"),
281
+ });
282
+ } catch {
283
+ // Invalid JSON, skip
284
+ }
285
+ }
286
+
287
+ return {
288
+ content,
289
+ toolCalls: parsedToolCalls.length > 0 ? parsedToolCalls : undefined,
290
+ usage: {
291
+ inputTokens: totalInputTokens,
292
+ outputTokens: totalOutputTokens,
293
+ totalTokens: totalInputTokens + totalOutputTokens,
294
+ },
295
+ finishReason: parsedToolCalls.length > 0 ? "tool_calls" : "stop",
296
+ };
297
+ } else {
298
+ const response = await this.openai.chat.completions.create({
299
+ model: this.model,
300
+ messages: openaiMessages,
301
+ tools: openaiTools.length > 0 ? openaiTools : undefined,
302
+ });
303
+
304
+ const choice = response.choices[0];
305
+ const toolCalls: ToolCall[] = [];
306
+
307
+ if (choice.message.tool_calls) {
308
+ for (const tc of choice.message.tool_calls) {
309
+ try {
310
+ toolCalls.push({
311
+ id: tc.id,
312
+ name: tc.function.name,
313
+ arguments: JSON.parse(tc.function.arguments || "{}"),
314
+ });
315
+ } catch {
316
+ // Invalid JSON, skip
317
+ }
318
+ }
319
+ }
320
+
321
+ return {
322
+ content: choice.message.content || "",
323
+ toolCalls: toolCalls.length > 0 ? toolCalls : undefined,
324
+ usage: {
325
+ inputTokens: response.usage?.prompt_tokens || 0,
326
+ outputTokens: response.usage?.completion_tokens || 0,
327
+ totalTokens: response.usage?.total_tokens || 0,
328
+ },
329
+ finishReason: choice.finish_reason === "tool_calls" ? "tool_calls" : "stop",
330
+ };
331
+ }
332
+ }
333
+
334
+ private toAnthropicMessage(message: Message): Anthropic.MessageParam {
335
+ if (message.role === "tool") {
336
+ return {
337
+ role: "user",
338
+ content: [
339
+ {
340
+ type: "tool_result",
341
+ tool_use_id: message.toolCallId!,
342
+ content: message.content,
343
+ },
344
+ ],
345
+ };
346
+ }
347
+
348
+ if (message.role === "assistant" && message.toolCalls?.length) {
349
+ const content: Anthropic.ContentBlockParam[] = [];
350
+ if (message.content) {
351
+ content.push({ type: "text", text: message.content });
352
+ }
353
+ for (const tc of message.toolCalls) {
354
+ content.push({
355
+ type: "tool_use",
356
+ id: tc.id,
357
+ name: tc.name,
358
+ input: tc.arguments,
359
+ });
360
+ }
361
+ return { role: "assistant", content };
362
+ }
363
+
364
+ return {
365
+ role: message.role as "user" | "assistant",
366
+ content: message.content,
367
+ };
368
+ }
369
+
370
+ private toOpenAIMessage(message: Message): OpenAI.Chat.Completions.ChatCompletionMessageParam {
371
+ if (message.role === "tool") {
372
+ return {
373
+ role: "tool",
374
+ tool_call_id: message.toolCallId!,
375
+ content: message.content,
376
+ };
377
+ }
378
+
379
+ if (message.role === "assistant" && message.toolCalls?.length) {
380
+ return {
381
+ role: "assistant",
382
+ content: message.content || null,
383
+ tool_calls: message.toolCalls.map((tc) => ({
384
+ id: tc.id,
385
+ type: "function" as const,
386
+ function: {
387
+ name: tc.name,
388
+ arguments: JSON.stringify(tc.arguments),
389
+ },
390
+ })),
391
+ };
392
+ }
393
+
394
+ return {
395
+ role: message.role as "user" | "assistant" | "system",
396
+ content: message.content,
397
+ };
398
+ }
399
+
400
+ // Cost estimation per 1M tokens (approximate)
401
+ getCostPerMillion(): { input: number; output: number } {
402
+ const costs: Record<string, { input: number; output: number }> = {
403
+ // Claude 4.5 series (newest)
404
+ "claude-opus-4-5-20251101": { input: 15, output: 75 },
405
+ "claude-sonnet-4-5-20250929": { input: 3, output: 15 },
406
+ "claude-haiku-4-5-20251001": { input: 0.8, output: 4 },
407
+ // Claude 4 series
408
+ "claude-opus-4-20250514": { input: 15, output: 75 },
409
+ "claude-sonnet-4-20250514": { input: 3, output: 15 },
410
+ // Claude 3.5 series
411
+ "claude-3-5-sonnet-20241022": { input: 3, output: 15 },
412
+ "claude-3-5-haiku-20241022": { input: 0.8, output: 4 },
413
+ // Claude 3 series
414
+ "claude-3-opus-20240229": { input: 15, output: 75 },
415
+ "claude-3-sonnet-20240229": { input: 3, output: 15 },
416
+ "claude-3-haiku-20240307": { input: 0.25, output: 1.25 },
417
+ // OpenAI
418
+ "gpt-4o": { input: 2.5, output: 10 },
419
+ "gpt-4o-mini": { input: 0.15, output: 0.6 },
420
+ "gpt-4-turbo": { input: 10, output: 30 },
421
+ "gpt-4.1": { input: 2, output: 8 },
422
+ "gpt-4.1-mini": { input: 0.4, output: 1.6 },
423
+ "o1": { input: 15, output: 60 },
424
+ "o1-mini": { input: 1.1, output: 4.4 },
425
+ "o3-mini": { input: 1.1, output: 4.4 },
426
+ };
427
+ return costs[this.model] || { input: 3, output: 15 }; // Default to mid-tier pricing
428
+ }
429
+
430
+ estimateCost(usage: TokenUsage): number {
431
+ const costs = this.getCostPerMillion();
432
+ return (usage.inputTokens * costs.input + usage.outputTokens * costs.output) / 1_000_000;
433
+ }
434
+ }
435
+
@@ -0,0 +1,97 @@
1
+ /**
2
+ * Tool Registry - Manages available tools for the agent
3
+ */
4
+
5
+ import type { Tool, ToolResult } from "./types.js";
6
+
7
+ export class ToolRegistry {
8
+ private tools: Map<string, Tool> = new Map();
9
+
10
+ register(tool: Tool): void {
11
+ this.tools.set(tool.name, tool);
12
+ }
13
+
14
+ unregister(name: string): boolean {
15
+ return this.tools.delete(name);
16
+ }
17
+
18
+ get(name: string): Tool | undefined {
19
+ return this.tools.get(name);
20
+ }
21
+
22
+ getAll(): Tool[] {
23
+ return Array.from(this.tools.values());
24
+ }
25
+
26
+ has(name: string): boolean {
27
+ return this.tools.has(name);
28
+ }
29
+
30
+ async execute(name: string, args: Record<string, unknown>): Promise<ToolResult> {
31
+ const tool = this.tools.get(name);
32
+ if (!tool) {
33
+ return {
34
+ toolCallId: "",
35
+ output: `Unknown tool: ${name}`,
36
+ success: false,
37
+ error: `Tool '${name}' not found in registry`,
38
+ };
39
+ }
40
+
41
+ try {
42
+ return await tool.execute(args);
43
+ } catch (error) {
44
+ return {
45
+ toolCallId: "",
46
+ output: "",
47
+ success: false,
48
+ error: error instanceof Error ? error.message : String(error),
49
+ };
50
+ }
51
+ }
52
+
53
+ // Get tool definitions for LLM
54
+ getToolDefinitions(): Array<{
55
+ name: string;
56
+ description: string;
57
+ parameters: {
58
+ type: "object";
59
+ properties: Record<string, { type: string; description: string }>;
60
+ required: string[];
61
+ };
62
+ }> {
63
+ return this.getAll().map((tool) => ({
64
+ name: tool.name,
65
+ description: tool.description,
66
+ parameters: {
67
+ type: "object" as const,
68
+ properties: Object.fromEntries(
69
+ tool.parameters.map((p) => [
70
+ p.name,
71
+ { type: p.type, description: p.description },
72
+ ])
73
+ ),
74
+ required: tool.parameters.filter((p) => p.required).map((p) => p.name),
75
+ },
76
+ }));
77
+ }
78
+
79
+ listTools(): string {
80
+ if (this.tools.size === 0) {
81
+ return "No tools registered.";
82
+ }
83
+
84
+ let output = "Available tools:\n";
85
+ for (const tool of this.tools.values()) {
86
+ output += `\n ${tool.name}: ${tool.description}\n`;
87
+ if (tool.parameters.length > 0) {
88
+ for (const param of tool.parameters) {
89
+ const required = param.required ? " (required)" : "";
90
+ output += ` - ${param.name}: ${param.type}${required} - ${param.description}\n`;
91
+ }
92
+ }
93
+ }
94
+ return output;
95
+ }
96
+ }
97
+
@@ -0,0 +1,15 @@
1
+ /**
2
+ * Agent module exports
3
+ */
4
+
5
+ export { Agent } from "./Agent.js";
6
+ export type { AgentEvents } from "./Agent.js";
7
+ export { LLMClient } from "./LLMClient.js";
8
+ export type { LLMClientOptions } from "./LLMClient.js";
9
+ export { ToolRegistry } from "./ToolRegistry.js";
10
+ export { BudgetManager } from "./BudgetManager.js";
11
+ export type { BudgetCheckResult } from "./BudgetManager.js";
12
+ export { FileResolver, getFileResolver } from "./FileResolver.js";
13
+ export type { FileReference, ResolvedMessage } from "./FileResolver.js";
14
+ export * from "./types.js";
15
+
@@ -0,0 +1,84 @@
1
+ /**
2
+ * Core types for the agent system
3
+ */
4
+
5
+ export interface Message {
6
+ role: "user" | "assistant" | "system" | "tool";
7
+ content: string;
8
+ toolCallId?: string;
9
+ toolCalls?: ToolCall[];
10
+ timestamp: Date;
11
+ }
12
+
13
+ export interface ToolCall {
14
+ id: string;
15
+ name: string;
16
+ arguments: Record<string, unknown>;
17
+ }
18
+
19
+ export interface ToolResult {
20
+ toolCallId: string;
21
+ output: string;
22
+ success: boolean;
23
+ error?: string;
24
+ }
25
+
26
+ export interface Tool {
27
+ name: string;
28
+ description: string;
29
+ parameters: ToolParameter[];
30
+ execute: (args: Record<string, unknown>) => Promise<ToolResult>;
31
+ }
32
+
33
+ export interface ToolParameter {
34
+ name: string;
35
+ type: "string" | "number" | "boolean" | "array" | "object";
36
+ description: string;
37
+ required: boolean;
38
+ default?: unknown;
39
+ }
40
+
41
+ export interface LLMResponse {
42
+ content: string;
43
+ toolCalls?: ToolCall[];
44
+ usage: TokenUsage;
45
+ finishReason: "stop" | "tool_calls" | "length" | "error";
46
+ }
47
+
48
+ export interface TokenUsage {
49
+ inputTokens: number;
50
+ outputTokens: number;
51
+ totalTokens: number;
52
+ }
53
+
54
+ export interface Budget {
55
+ maxTokens?: number;
56
+ maxCost?: number; // in USD
57
+ maxTime?: number; // in seconds
58
+ maxIterations?: number;
59
+ }
60
+
61
+ export interface BudgetUsage {
62
+ tokensUsed: number;
63
+ costUsed: number;
64
+ timeUsed: number;
65
+ iterationsUsed: number;
66
+ }
67
+
68
+ export interface AgentConfig {
69
+ provider: "anthropic" | "openai" | "local";
70
+ model: string;
71
+ apiKey?: string;
72
+ baseUrl?: string;
73
+ budget?: Budget;
74
+ systemPrompt?: string;
75
+ maxToolCalls?: number;
76
+ }
77
+
78
+ export interface ConversationContext {
79
+ messages: Message[];
80
+ budgetUsage: BudgetUsage;
81
+ startTime: Date;
82
+ workingDirectory: string;
83
+ }
84
+