llm-advanced-tools 0.1.2 → 0.1.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,195 +0,0 @@
1
- import OpenAI from 'openai';
2
- import {
3
- ProviderAdapter,
4
- ProviderCapabilities,
5
- ToolDefinition,
6
- ChatRequest,
7
- ChatResponse,
8
- ToolCall,
9
- ToolResult,
10
- Message
11
- } from '../types';
12
-
13
- /**
14
- * OpenAI provider adapter
15
- * Emulates advanced tool use features for OpenAI's API
16
- */
17
- export class OpenAIAdapter implements ProviderAdapter {
18
- readonly name = 'openai';
19
- readonly capabilities: ProviderCapabilities = {
20
- supportsNativeToolSearch: false,
21
- supportsNativeCodeExecution: false,
22
- supportsNativeExamples: false,
23
- supportsStreaming: true,
24
- supportsParallelToolCalls: true,
25
- maxTokens: 128000 // GPT-4 Turbo
26
- };
27
-
28
- private client: OpenAI;
29
- private model: string;
30
-
31
- constructor(apiKey: string, model = 'gpt-4-turbo-preview') {
32
- this.client = new OpenAI({ apiKey });
33
- this.model = model;
34
- }
35
-
36
- /**
37
- * Format a tool definition for OpenAI's function calling format
38
- */
39
- formatTool(tool: ToolDefinition): Record<string, any> {
40
- // Extract schema from Zod or use as-is
41
- let schema = tool.inputSchema;
42
- if (typeof schema === 'object' && 'parse' in schema) {
43
- // It's a Zod schema - we'd need to convert it
44
- // For now, assume it's already JSON schema
45
- schema = tool.inputSchema;
46
- }
47
-
48
- // Prepare description with examples if available
49
- let description = tool.description;
50
-
51
- if (tool.inputExamples && tool.inputExamples.length > 0) {
52
- description += '\n\nExamples:';
53
- tool.inputExamples.forEach((example, i) => {
54
- description += `\n${i + 1}. ${JSON.stringify(example)}`;
55
- });
56
- }
57
-
58
- return {
59
- type: 'function',
60
- function: {
61
- name: tool.name,
62
- description,
63
- parameters: schema
64
- }
65
- };
66
- }
67
-
68
- /**
69
- * Convert our Message format to OpenAI's format
70
- */
71
- private convertMessages(messages: Message[]): OpenAI.ChatCompletionMessageParam[] {
72
- return messages.map(msg => {
73
- if (msg.role === 'tool') {
74
- // Tool result message
75
- const toolResults = msg.toolResults || [];
76
- return toolResults.map(result => ({
77
- role: 'tool' as const,
78
- tool_call_id: result.toolCallId,
79
- content: result.error
80
- ? `Error: ${result.error.message}`
81
- : JSON.stringify(result.data)
82
- }));
83
- }
84
-
85
- if (msg.toolCalls && msg.toolCalls.length > 0) {
86
- // Assistant message with tool calls
87
- return {
88
- role: 'assistant' as const,
89
- content: typeof msg.content === 'string' ? msg.content : null,
90
- tool_calls: msg.toolCalls.map(tc => ({
91
- id: tc.id,
92
- type: 'function' as const,
93
- function: {
94
- name: tc.name,
95
- arguments: JSON.stringify(tc.input)
96
- }
97
- }))
98
- };
99
- }
100
-
101
- // Regular message
102
- return {
103
- role: msg.role as 'system' | 'user' | 'assistant',
104
- content: typeof msg.content === 'string' ? msg.content : JSON.stringify(msg.content)
105
- };
106
- }).flat();
107
- }
108
-
109
- /**
110
- * Create a chat completion
111
- */
112
- async chat(request: ChatRequest): Promise<ChatResponse> {
113
- const messages = this.convertMessages(request.messages);
114
-
115
- const tools = request.tools
116
- ?.filter(tool => !tool.deferLoading) // Only send loaded tools
117
- ?.map(tool => this.formatTool(tool)) as OpenAI.ChatCompletionTool[] | undefined;
118
-
119
- const completion = await this.client.chat.completions.create({
120
- model: this.model,
121
- messages,
122
- tools: tools && tools.length > 0 ? tools : undefined,
123
- max_tokens: request.maxTokens,
124
- temperature: request.temperature,
125
- stream: false // Streaming not yet supported
126
- }) as OpenAI.ChatCompletion;
127
-
128
- const choice = completion.choices[0];
129
- const message = choice.message;
130
-
131
- // Parse tool calls if any
132
- const toolCalls = message.tool_calls
133
- ? this.parseToolCalls(message.tool_calls)
134
- : [];
135
-
136
- return {
137
- message: {
138
- role: 'assistant',
139
- content: message.content || '',
140
- toolCalls
141
- },
142
- usage: completion.usage
143
- ? {
144
- inputTokens: completion.usage.prompt_tokens,
145
- outputTokens: completion.usage.completion_tokens,
146
- totalTokens: completion.usage.total_tokens
147
- }
148
- : undefined,
149
- stopReason: this.mapFinishReason(choice.finish_reason),
150
- toolCalls
151
- };
152
- }
153
-
154
- /**
155
- * Parse tool calls from OpenAI response
156
- */
157
- parseToolCalls(toolCalls: OpenAI.ChatCompletionMessageToolCall[]): ToolCall[] {
158
- return toolCalls.map(tc => ({
159
- id: tc.id,
160
- name: tc.function.name,
161
- input: JSON.parse(tc.function.arguments)
162
- }));
163
- }
164
-
165
- /**
166
- * Format tool results for OpenAI
167
- */
168
- formatToolResults(results: ToolResult[]): OpenAI.ChatCompletionMessageParam[] {
169
- return results.map(result => ({
170
- role: 'tool' as const,
171
- tool_call_id: result.toolCallId,
172
- content: result.error
173
- ? `Error: ${result.error.message}`
174
- : JSON.stringify(result.data)
175
- }));
176
- }
177
-
178
- /**
179
- * Map OpenAI finish reason to our format
180
- */
181
- private mapFinishReason(
182
- reason: string | null
183
- ): 'end_turn' | 'max_tokens' | 'tool_use' | 'stop_sequence' | undefined {
184
- switch (reason) {
185
- case 'stop':
186
- return 'end_turn';
187
- case 'length':
188
- return 'max_tokens';
189
- case 'tool_calls':
190
- return 'tool_use';
191
- default:
192
- return undefined;
193
- }
194
- }
195
- }
@@ -1,281 +0,0 @@
1
- import { generateText, LanguageModel, tool } from 'ai';
2
- import { z } from 'zod';
3
- import {
4
- ProviderAdapter,
5
- ProviderCapabilities,
6
- ToolDefinition,
7
- ChatRequest,
8
- ChatResponse,
9
- ToolCall,
10
- ToolResult,
11
- Message
12
- } from '../types';
13
-
14
- /**
15
- * Vercel AI SDK adapter
16
- * Works with any provider supported by AI SDK (OpenAI, Anthropic, Google, etc.)
17
- */
18
- export class VercelAIAdapter implements ProviderAdapter {
19
- readonly name = 'vercel-ai';
20
- readonly capabilities: ProviderCapabilities = {
21
- supportsNativeToolSearch: false,
22
- supportsNativeCodeExecution: false,
23
- supportsNativeExamples: false,
24
- supportsStreaming: true,
25
- supportsParallelToolCalls: true,
26
- maxTokens: 128000 // Depends on model, using safe default
27
- };
28
-
29
- private model: LanguageModel;
30
- private providerName: string;
31
-
32
- /**
33
- * @param model - AI SDK language model (e.g., from @ai-sdk/openai, @ai-sdk/anthropic)
34
- * @param providerName - Optional provider name for identification
35
- */
36
- constructor(model: LanguageModel, providerName = 'ai-sdk') {
37
- this.model = model;
38
- this.providerName = providerName;
39
- }
40
-
41
- /**
42
- * Format a tool definition for AI SDK's format
43
- */
44
- formatTool(tool: ToolDefinition): Record<string, any> {
45
- // AI SDK uses Zod schemas, but also accepts JSON schema
46
- let parameters = tool.inputSchema;
47
-
48
- // If it's already a Zod schema, use it directly
49
- if (typeof parameters === 'object' && 'parse' in parameters) {
50
- parameters = tool.inputSchema;
51
- }
52
-
53
- // Add examples to description for better accuracy
54
- let description = tool.description;
55
- if (tool.inputExamples && tool.inputExamples.length > 0) {
56
- description += '\n\nExample usage:';
57
- tool.inputExamples.forEach((example, i) => {
58
- description += `\n${i + 1}. ${JSON.stringify(example)}`;
59
- });
60
- }
61
-
62
- // AI SDK CoreTool format
63
- return {
64
- description,
65
- parameters
66
- };
67
- }
68
-
69
- /**
70
- * Convert our Message format to AI SDK's format
71
- */
72
- private convertMessages(messages: Message[]): Array<{
73
- role: 'user' | 'assistant' | 'system';
74
- content: string;
75
- }> {
76
- const converted: Array<{ role: 'user' | 'assistant' | 'system'; content: string }> = [];
77
-
78
- for (const msg of messages) {
79
- if (msg.role === 'tool') {
80
- // Tool results are handled differently in AI SDK
81
- // They're included in the tool execution flow
82
- continue;
83
- }
84
-
85
- converted.push({
86
- role: msg.role as 'user' | 'assistant' | 'system',
87
- content: typeof msg.content === 'string' ? msg.content : JSON.stringify(msg.content)
88
- });
89
- }
90
-
91
- return converted;
92
- }
93
-
94
- /**
95
- * Convert JSON Schema to Zod schema (simplified)
96
- */
97
- private jsonSchemaToZod(schema: any): z.ZodType {
98
- // If it's already a Zod schema, return it
99
- if (schema && typeof schema === 'object' && 'parse' in schema) {
100
- return schema;
101
- }
102
-
103
- // Simple JSON Schema to Zod conversion
104
- // This is a simplified version - for production, use a proper converter
105
- if (schema.type === 'object') {
106
- const shape: Record<string, z.ZodType> = {};
107
- const required = schema.required || [];
108
-
109
- for (const [key, value] of Object.entries(schema.properties || {})) {
110
- const prop: any = value;
111
- let zodType: z.ZodType;
112
-
113
- switch (prop.type) {
114
- case 'string':
115
- zodType = z.string();
116
- if (prop.enum) {
117
- zodType = z.enum(prop.enum);
118
- }
119
- break;
120
- case 'number':
121
- zodType = z.number();
122
- break;
123
- case 'boolean':
124
- zodType = z.boolean();
125
- break;
126
- case 'array':
127
- zodType = z.array(z.any());
128
- break;
129
- default:
130
- zodType = z.any();
131
- }
132
-
133
- if (prop.description) {
134
- zodType = zodType.describe(prop.description);
135
- }
136
-
137
- // Make field optional if not in required array
138
- if (!required.includes(key)) {
139
- zodType = zodType.optional();
140
- }
141
-
142
- shape[key] = zodType;
143
- }
144
-
145
- return z.object(shape);
146
- }
147
-
148
- return z.any();
149
- }
150
-
151
- /**
152
- * Create a chat completion using AI SDK
153
- */
154
- async chat(request: ChatRequest): Promise<ChatResponse> {
155
- const messages = this.convertMessages(request.messages);
156
-
157
- // Convert our tools to AI SDK format using the tool() helper
158
- const tools: Record<string, any> = {};
159
- const toolDefinitions = request.tools?.filter(t => !t.deferLoading) || [];
160
-
161
- for (const toolDef of toolDefinitions) {
162
- // Add examples to description
163
- let description = toolDef.description;
164
- if (toolDef.inputExamples && toolDef.inputExamples.length > 0) {
165
- description += '\n\nExample usage:';
166
- toolDef.inputExamples.forEach((example, i) => {
167
- description += `\n${i + 1}. ${JSON.stringify(example)}`;
168
- });
169
- }
170
-
171
- // Convert schema to Zod
172
- const parameters = this.jsonSchemaToZod(toolDef.inputSchema);
173
-
174
- // Create the tool using AI SDK's tool() helper
175
- // Store reference to toolDef in closure to avoid issues
176
- const handler = toolDef.handler;
177
- const name = toolDef.name;
178
- tools[name] = {
179
- description,
180
- parameters,
181
- execute: async (args: any) => {
182
- try {
183
- const result = await handler(args);
184
- return result;
185
- } catch (error: any) {
186
- throw new Error(`Tool ${name} failed: ${error.message}`);
187
- }
188
- }
189
- } as any;
190
- }
191
-
192
- // Generate text with tools
193
- const generateOptions: any = {
194
- model: this.model,
195
- messages: messages.map(m => ({
196
- role: m.role,
197
- content: m.content
198
- })),
199
- tools: Object.keys(tools).length > 0 ? tools : undefined
200
- };
201
-
202
- // Add optional parameters if provided
203
- if (request.temperature !== undefined) {
204
- generateOptions.temperature = request.temperature;
205
- }
206
- if (request.maxTokens !== undefined) {
207
- generateOptions.maxCompletionTokens = request.maxTokens;
208
- }
209
-
210
- const result = await generateText(generateOptions);
211
-
212
- // Parse tool calls from result
213
- const toolCalls: ToolCall[] = [];
214
- if (result.toolCalls && result.toolCalls.length > 0) {
215
- result.toolCalls.forEach((tc: any, index: number) => {
216
- toolCalls.push({
217
- id: `call_${index}`,
218
- name: tc.toolName,
219
- input: tc.args
220
- });
221
- });
222
- }
223
-
224
- return {
225
- message: {
226
- role: 'assistant',
227
- content: result.text || '',
228
- toolCalls
229
- },
230
- usage: result.usage
231
- ? {
232
- inputTokens: result.usage.inputTokens || (result.usage as any).promptTokens || 0,
233
- outputTokens: result.usage.outputTokens || (result.usage as any).completionTokens || 0,
234
- totalTokens: result.usage.totalTokens || 0
235
- }
236
- : undefined,
237
- stopReason: this.mapFinishReason(result.finishReason),
238
- toolCalls
239
- };
240
- }
241
-
242
- /**
243
- * Parse tool calls from AI SDK response
244
- */
245
- parseToolCalls(toolCalls: any[]): ToolCall[] {
246
- return toolCalls.map((tc, index) => ({
247
- id: `call_${index}`,
248
- name: tc.toolName,
249
- input: tc.args
250
- }));
251
- }
252
-
253
- /**
254
- * Format tool results for AI SDK
255
- */
256
- formatToolResults(results: ToolResult[]): any {
257
- // AI SDK handles tool results automatically in the execute callback
258
- return results;
259
- }
260
-
261
- /**
262
- * Map AI SDK finish reason to our format
263
- */
264
- private mapFinishReason(
265
- reason: string | undefined
266
- ): 'end_turn' | 'max_tokens' | 'tool_use' | 'stop_sequence' | undefined {
267
- switch (reason) {
268
- case 'stop':
269
- return 'end_turn';
270
- case 'length':
271
- return 'max_tokens';
272
- case 'tool-calls':
273
- return 'tool_use';
274
- case 'content-filter':
275
- case 'error':
276
- return 'stop_sequence';
277
- default:
278
- return undefined;
279
- }
280
- }
281
- }
@@ -1,232 +0,0 @@
1
- import {
2
- ClientConfig,
3
- ChatRequest,
4
- ChatResponse,
5
- Message,
6
- ToolCall,
7
- ToolResult
8
- } from '../types';
9
- import { ToolRegistry } from './registry';
10
- import { CodeExecutor, createDefaultExecutor } from '../executor';
11
-
12
- /**
13
- * Main client for provider-agnostic tool use
14
- */
15
- export class Client {
16
- private config: ClientConfig;
17
- private registry: ToolRegistry;
18
- private executor?: CodeExecutor;
19
-
20
- constructor(config: ClientConfig, registry?: ToolRegistry) {
21
- this.config = config;
22
- this.registry = registry || new ToolRegistry(config.searchConfig);
23
-
24
- if (config.enableProgrammaticCalling) {
25
- this.executor = createDefaultExecutor(config.executorConfig);
26
- }
27
- }
28
-
29
- /**
30
- * Get the tool registry
31
- */
32
- getRegistry(): ToolRegistry {
33
- return this.registry;
34
- }
35
-
36
- /**
37
- * Send a chat message
38
- */
39
- async chat(request: Omit<ChatRequest, 'tools'>): Promise<ChatResponse> {
40
- const messages = [...request.messages];
41
-
42
- // If tool search is enabled and provider doesn't support it natively,
43
- // add the tool search tool to loaded tools
44
- if (
45
- this.config.enableToolSearch &&
46
- !this.config.adapter.capabilities.supportsNativeToolSearch
47
- ) {
48
- const toolSearchTool = this.registry.createToolSearchTool();
49
- this.registry.register(toolSearchTool);
50
- }
51
-
52
- // Get loaded tools for this request
53
- const tools = this.registry.getLoadedTools();
54
-
55
- // Make the request
56
- const response = await this.config.adapter.chat({
57
- ...request,
58
- messages,
59
- tools
60
- });
61
-
62
- // Handle tool calls if any
63
- if (response.toolCalls && response.toolCalls.length > 0) {
64
- return this.handleToolCalls(request, response);
65
- }
66
-
67
- return response;
68
- }
69
-
70
- /**
71
- * Handle tool calls from LLM response
72
- */
73
- private async handleToolCalls(
74
- originalRequest: Omit<ChatRequest, 'tools'>,
75
- response: ChatResponse
76
- ): Promise<ChatResponse> {
77
- const toolCalls = response.toolCalls!;
78
-
79
- // Check if any tool call is for code execution
80
- const codeExecutionCall = toolCalls.find(tc => tc.name === 'code_execution');
81
-
82
- if (codeExecutionCall && this.executor && this.config.enableProgrammaticCalling) {
83
- return this.handleProgrammaticToolCall(originalRequest, response, codeExecutionCall);
84
- }
85
-
86
- // Regular tool execution
87
- const results = await this.executeToolCalls(toolCalls);
88
-
89
- // Add tool results to messages and continue conversation
90
- const messages: Message[] = [
91
- ...originalRequest.messages,
92
- response.message,
93
- {
94
- role: 'tool',
95
- content: '',
96
- toolResults: results
97
- }
98
- ];
99
-
100
- return this.chat({
101
- ...originalRequest,
102
- messages
103
- });
104
- }
105
-
106
- /**
107
- * Handle programmatic tool calling (code execution)
108
- */
109
- private async handleProgrammaticToolCall(
110
- originalRequest: Omit<ChatRequest, 'tools'>,
111
- response: ChatResponse,
112
- codeCall: ToolCall
113
- ): Promise<ChatResponse> {
114
- const code = codeCall.input.code;
115
- const tools = this.registry.getLoadedTools();
116
-
117
- // Execute the code with access to tools
118
- const execResult = await this.executor!.execute(code, tools);
119
-
120
- // Create tool result message
121
- const result: ToolResult = {
122
- toolCallId: codeCall.id,
123
- data: {
124
- stdout: execResult.stdout,
125
- returnValue: execResult.returnValue
126
- },
127
- error: execResult.error
128
- };
129
-
130
- // Continue conversation with execution result
131
- const messages: Message[] = [
132
- ...originalRequest.messages,
133
- response.message,
134
- {
135
- role: 'tool',
136
- content: '',
137
- toolResults: [result]
138
- }
139
- ];
140
-
141
- return this.chat({
142
- ...originalRequest,
143
- messages
144
- });
145
- }
146
-
147
- /**
148
- * Execute tool calls
149
- */
150
- private async executeToolCalls(toolCalls: ToolCall[]): Promise<ToolResult[]> {
151
- const results: ToolResult[] = [];
152
-
153
- for (const call of toolCalls) {
154
- const tool = this.registry.get(call.name);
155
-
156
- if (!tool) {
157
- results.push({
158
- toolCallId: call.id,
159
- error: {
160
- message: `Tool "${call.name}" not found`,
161
- code: 'TOOL_NOT_FOUND'
162
- }
163
- });
164
- continue;
165
- }
166
-
167
- // Special handling for tool_search
168
- if (call.name === 'tool_search') {
169
- try {
170
- const data = await tool.handler(call.input);
171
- results.push({
172
- toolCallId: call.id,
173
- data
174
- });
175
- } catch (error: any) {
176
- results.push({
177
- toolCallId: call.id,
178
- error: {
179
- message: error.message,
180
- code: 'EXECUTION_ERROR'
181
- }
182
- });
183
- }
184
- continue;
185
- }
186
-
187
- // Execute regular tool
188
- try {
189
- const data = await tool.handler(call.input);
190
- results.push({
191
- toolCallId: call.id,
192
- data
193
- });
194
- } catch (error: any) {
195
- results.push({
196
- toolCallId: call.id,
197
- error: {
198
- message: error.message,
199
- code: 'EXECUTION_ERROR',
200
- details: error
201
- }
202
- });
203
- }
204
- }
205
-
206
- return results;
207
- }
208
-
209
- /**
210
- * Helper method for simple text chat
211
- */
212
- async ask(prompt: string, systemPrompt?: string): Promise<string> {
213
- const messages: Message[] = [];
214
-
215
- if (systemPrompt) {
216
- messages.push({
217
- role: 'system',
218
- content: systemPrompt
219
- });
220
- }
221
-
222
- messages.push({
223
- role: 'user',
224
- content: prompt
225
- });
226
-
227
- const response = await this.chat({ messages });
228
- return typeof response.message.content === 'string'
229
- ? response.message.content
230
- : JSON.stringify(response.message.content);
231
- }
232
- }
package/src/core/index.ts DELETED
@@ -1,2 +0,0 @@
1
- export { ToolRegistry } from './registry';
2
- export { Client } from './client';