@blockrun/llm 0.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,295 @@
1
+ /**
2
+ * Type definitions for BlockRun LLM SDK
3
+ */
4
+ interface ChatMessage {
5
+ role: "system" | "user" | "assistant";
6
+ content: string;
7
+ }
8
+ interface ChatChoice {
9
+ index: number;
10
+ message: ChatMessage;
11
+ finish_reason?: string;
12
+ }
13
+ interface ChatUsage {
14
+ prompt_tokens: number;
15
+ completion_tokens: number;
16
+ total_tokens: number;
17
+ }
18
+ interface ChatResponse {
19
+ id: string;
20
+ object: string;
21
+ created: number;
22
+ model: string;
23
+ choices: ChatChoice[];
24
+ usage?: ChatUsage;
25
+ }
26
+ interface Model {
27
+ id: string;
28
+ name: string;
29
+ provider: string;
30
+ description: string;
31
+ inputPrice: number;
32
+ outputPrice: number;
33
+ contextWindow: number;
34
+ maxOutput: number;
35
+ available: boolean;
36
+ }
37
+ interface LLMClientOptions {
38
+ /** EVM wallet private key (hex string starting with 0x). Optional if BASE_CHAIN_WALLET_KEY env var is set. */
39
+ privateKey?: `0x${string}` | string;
40
+ /** API endpoint URL (default: https://blockrun.ai/api) */
41
+ apiUrl?: string;
42
+ /** Request timeout in milliseconds (default: 60000) */
43
+ timeout?: number;
44
+ }
45
+ interface ChatOptions {
46
+ /** System prompt */
47
+ system?: string;
48
+ /** Max tokens to generate */
49
+ maxTokens?: number;
50
+ /** Sampling temperature */
51
+ temperature?: number;
52
+ /** Nucleus sampling parameter */
53
+ topP?: number;
54
+ }
55
+ interface ChatCompletionOptions {
56
+ /** Max tokens to generate */
57
+ maxTokens?: number;
58
+ /** Sampling temperature */
59
+ temperature?: number;
60
+ /** Nucleus sampling parameter */
61
+ topP?: number;
62
+ }
63
+ declare class BlockrunError extends Error {
64
+ constructor(message: string);
65
+ }
66
+ declare class PaymentError extends BlockrunError {
67
+ constructor(message: string);
68
+ }
69
+ declare class APIError extends BlockrunError {
70
+ statusCode: number;
71
+ response?: unknown;
72
+ constructor(message: string, statusCode: number, response?: unknown);
73
+ }
74
+
75
+ /**
76
+ * BlockRun LLM Client - Main SDK entry point.
77
+ *
78
+ * Usage:
79
+ * import { LLMClient } from '@blockrun/llm';
80
+ *
81
+ * // Option 1: Use BASE_CHAIN_WALLET_KEY env var
82
+ * const client = new LLMClient();
83
+ *
84
+ * // Option 2: Pass private key directly
85
+ * const client = new LLMClient({ privateKey: '0x...' });
86
+ *
87
+ * const response = await client.chat('openai/gpt-4o', 'Hello!');
88
+ * console.log(response);
89
+ */
90
+
91
+ /**
92
+ * BlockRun LLM Gateway Client.
93
+ *
94
+ * Provides access to multiple LLM providers (OpenAI, Anthropic, Google, etc.)
95
+ * with automatic x402 micropayments on Base chain.
96
+ */
97
+ declare class LLMClient {
98
+ private account;
99
+ private privateKey;
100
+ private apiUrl;
101
+ private timeout;
102
+ /**
103
+ * Initialize the BlockRun LLM client.
104
+ *
105
+ * @param options - Client configuration options (optional if BASE_CHAIN_WALLET_KEY env var is set)
106
+ */
107
+ constructor(options?: LLMClientOptions);
108
+ /**
109
+ * Simple 1-line chat interface.
110
+ *
111
+ * @param model - Model ID (e.g., 'openai/gpt-4o', 'anthropic/claude-sonnet-4')
112
+ * @param prompt - User message
113
+ * @param options - Optional chat parameters
114
+ * @returns Assistant's response text
115
+ *
116
+ * @example
117
+ * const response = await client.chat('gpt-4o', 'What is the capital of France?');
118
+ * console.log(response); // 'The capital of France is Paris.'
119
+ */
120
+ chat(model: string, prompt: string, options?: ChatOptions): Promise<string>;
121
+ /**
122
+ * Full chat completion interface (OpenAI-compatible).
123
+ *
124
+ * @param model - Model ID
125
+ * @param messages - Array of messages with role and content
126
+ * @param options - Optional completion parameters
127
+ * @returns ChatResponse object with choices and usage
128
+ */
129
+ chatCompletion(model: string, messages: ChatMessage[], options?: ChatCompletionOptions): Promise<ChatResponse>;
130
+ /**
131
+ * Make a request with automatic x402 payment handling.
132
+ */
133
+ private requestWithPayment;
134
+ /**
135
+ * Handle 402 response: parse requirements, sign payment, retry.
136
+ */
137
+ private handlePaymentAndRetry;
138
+ /**
139
+ * Fetch with timeout.
140
+ */
141
+ private fetchWithTimeout;
142
+ /**
143
+ * List available models with pricing.
144
+ */
145
+ listModels(): Promise<Model[]>;
146
+ /**
147
+ * Get the wallet address being used for payments.
148
+ */
149
+ getWalletAddress(): string;
150
+ }
151
+
152
+ /**
153
+ * x402 Payment Protocol v2 Implementation for BlockRun.
154
+ *
155
+ * This module handles creating signed payment payloads for the x402 v2 protocol.
156
+ * The private key is used ONLY for local signing and NEVER leaves the client.
157
+ */
158
+
159
+ declare const BASE_CHAIN_ID = 8453;
160
+ declare const USDC_BASE: "0x833589fCD6eDb6E08f4c7C32D4f71b54bdA02913";
161
+
162
+ /**
163
+ * OpenAI-compatible API wrapper for BlockRun LLM SDK.
164
+ *
165
+ * Drop-in replacement for OpenAI SDK - just change the import and use walletKey instead of apiKey.
166
+ *
167
+ * @example
168
+ * // Before (OpenAI)
169
+ * import OpenAI from 'openai';
170
+ * const client = new OpenAI({ apiKey: 'sk-...' });
171
+ *
172
+ * // After (BlockRun)
173
+ * import { OpenAI } from '@blockrun/llm';
174
+ * const client = new OpenAI({ walletKey: '0x...' });
175
+ *
176
+ * // Rest of your code stays exactly the same!
177
+ * const response = await client.chat.completions.create({
178
+ * model: 'gpt-4o',
179
+ * messages: [{ role: 'user', content: 'Hello!' }]
180
+ * });
181
+ */
182
+
183
+ interface OpenAIClientOptions {
184
+ /** EVM wallet private key (replaces apiKey) */
185
+ walletKey?: `0x${string}` | string;
186
+ /** Alternative: use privateKey like LLMClient */
187
+ privateKey?: `0x${string}` | string;
188
+ /** API endpoint URL (default: https://blockrun.ai/api) */
189
+ baseURL?: string;
190
+ /** Request timeout in milliseconds */
191
+ timeout?: number;
192
+ }
193
+ interface OpenAIChatCompletionParams {
194
+ model: string;
195
+ messages: Array<{
196
+ role: "system" | "user" | "assistant";
197
+ content: string;
198
+ }>;
199
+ max_tokens?: number;
200
+ temperature?: number;
201
+ top_p?: number;
202
+ stream?: boolean;
203
+ n?: number;
204
+ stop?: string | string[];
205
+ presence_penalty?: number;
206
+ frequency_penalty?: number;
207
+ user?: string;
208
+ }
209
+ interface OpenAIChatCompletionChoice {
210
+ index: number;
211
+ message: {
212
+ role: "assistant";
213
+ content: string;
214
+ };
215
+ finish_reason: string | null;
216
+ }
217
+ interface OpenAIChatCompletionResponse {
218
+ id: string;
219
+ object: "chat.completion";
220
+ created: number;
221
+ model: string;
222
+ choices: OpenAIChatCompletionChoice[];
223
+ usage?: {
224
+ prompt_tokens: number;
225
+ completion_tokens: number;
226
+ total_tokens: number;
227
+ };
228
+ }
229
+ interface OpenAIChatCompletionChunk {
230
+ id: string;
231
+ object: "chat.completion.chunk";
232
+ created: number;
233
+ model: string;
234
+ choices: Array<{
235
+ index: number;
236
+ delta: {
237
+ role?: "assistant";
238
+ content?: string;
239
+ };
240
+ finish_reason: string | null;
241
+ }>;
242
+ }
243
+ /**
244
+ * Chat completions API (OpenAI-compatible)
245
+ */
246
+ declare class ChatCompletions {
247
+ private client;
248
+ private apiUrl;
249
+ private timeout;
250
+ constructor(client: LLMClient, apiUrl: string, timeout: number);
251
+ /**
252
+ * Create a chat completion (OpenAI-compatible).
253
+ */
254
+ create(params: OpenAIChatCompletionParams): Promise<OpenAIChatCompletionResponse>;
255
+ create(params: OpenAIChatCompletionParams & {
256
+ stream: true;
257
+ }): Promise<AsyncIterable<OpenAIChatCompletionChunk>>;
258
+ private createStream;
259
+ private transformResponse;
260
+ }
261
+ /**
262
+ * Chat API namespace
263
+ */
264
+ declare class Chat {
265
+ completions: ChatCompletions;
266
+ constructor(client: LLMClient, apiUrl: string, timeout: number);
267
+ }
268
+ /**
269
+ * OpenAI-compatible client for BlockRun.
270
+ *
271
+ * Drop-in replacement for the OpenAI SDK.
272
+ *
273
+ * @example
274
+ * import { OpenAI } from '@blockrun/llm';
275
+ *
276
+ * const client = new OpenAI({ walletKey: '0x...' });
277
+ *
278
+ * const response = await client.chat.completions.create({
279
+ * model: 'gpt-4o',
280
+ * messages: [{ role: 'user', content: 'Hello!' }]
281
+ * });
282
+ *
283
+ * console.log(response.choices[0].message.content);
284
+ */
285
+ declare class OpenAI {
286
+ chat: Chat;
287
+ private client;
288
+ constructor(options?: OpenAIClientOptions);
289
+ /**
290
+ * Get the wallet address being used for payments.
291
+ */
292
+ getWalletAddress(): string;
293
+ }
294
+
295
+ export { APIError, BASE_CHAIN_ID, BlockrunError, type ChatChoice, type ChatCompletionOptions, type ChatMessage, type ChatOptions, type ChatResponse, type ChatUsage, LLMClient, type LLMClientOptions, type Model, OpenAI, type OpenAIChatCompletionChoice, type OpenAIChatCompletionChunk, type OpenAIChatCompletionParams, type OpenAIChatCompletionResponse, type OpenAIClientOptions, PaymentError, USDC_BASE, LLMClient as default };
@@ -0,0 +1,295 @@
1
+ /**
2
+ * Type definitions for BlockRun LLM SDK
3
+ */
4
+ interface ChatMessage {
5
+ role: "system" | "user" | "assistant";
6
+ content: string;
7
+ }
8
+ interface ChatChoice {
9
+ index: number;
10
+ message: ChatMessage;
11
+ finish_reason?: string;
12
+ }
13
+ interface ChatUsage {
14
+ prompt_tokens: number;
15
+ completion_tokens: number;
16
+ total_tokens: number;
17
+ }
18
+ interface ChatResponse {
19
+ id: string;
20
+ object: string;
21
+ created: number;
22
+ model: string;
23
+ choices: ChatChoice[];
24
+ usage?: ChatUsage;
25
+ }
26
+ interface Model {
27
+ id: string;
28
+ name: string;
29
+ provider: string;
30
+ description: string;
31
+ inputPrice: number;
32
+ outputPrice: number;
33
+ contextWindow: number;
34
+ maxOutput: number;
35
+ available: boolean;
36
+ }
37
+ interface LLMClientOptions {
38
+ /** EVM wallet private key (hex string starting with 0x). Optional if BASE_CHAIN_WALLET_KEY env var is set. */
39
+ privateKey?: `0x${string}` | string;
40
+ /** API endpoint URL (default: https://blockrun.ai/api) */
41
+ apiUrl?: string;
42
+ /** Request timeout in milliseconds (default: 60000) */
43
+ timeout?: number;
44
+ }
45
+ interface ChatOptions {
46
+ /** System prompt */
47
+ system?: string;
48
+ /** Max tokens to generate */
49
+ maxTokens?: number;
50
+ /** Sampling temperature */
51
+ temperature?: number;
52
+ /** Nucleus sampling parameter */
53
+ topP?: number;
54
+ }
55
+ interface ChatCompletionOptions {
56
+ /** Max tokens to generate */
57
+ maxTokens?: number;
58
+ /** Sampling temperature */
59
+ temperature?: number;
60
+ /** Nucleus sampling parameter */
61
+ topP?: number;
62
+ }
63
+ declare class BlockrunError extends Error {
64
+ constructor(message: string);
65
+ }
66
+ declare class PaymentError extends BlockrunError {
67
+ constructor(message: string);
68
+ }
69
+ declare class APIError extends BlockrunError {
70
+ statusCode: number;
71
+ response?: unknown;
72
+ constructor(message: string, statusCode: number, response?: unknown);
73
+ }
74
+
75
+ /**
76
+ * BlockRun LLM Client - Main SDK entry point.
77
+ *
78
+ * Usage:
79
+ * import { LLMClient } from '@blockrun/llm';
80
+ *
81
+ * // Option 1: Use BASE_CHAIN_WALLET_KEY env var
82
+ * const client = new LLMClient();
83
+ *
84
+ * // Option 2: Pass private key directly
85
+ * const client = new LLMClient({ privateKey: '0x...' });
86
+ *
87
+ * const response = await client.chat('openai/gpt-4o', 'Hello!');
88
+ * console.log(response);
89
+ */
90
+
91
+ /**
92
+ * BlockRun LLM Gateway Client.
93
+ *
94
+ * Provides access to multiple LLM providers (OpenAI, Anthropic, Google, etc.)
95
+ * with automatic x402 micropayments on Base chain.
96
+ */
97
+ declare class LLMClient {
98
+ private account;
99
+ private privateKey;
100
+ private apiUrl;
101
+ private timeout;
102
+ /**
103
+ * Initialize the BlockRun LLM client.
104
+ *
105
+ * @param options - Client configuration options (optional if BASE_CHAIN_WALLET_KEY env var is set)
106
+ */
107
+ constructor(options?: LLMClientOptions);
108
+ /**
109
+ * Simple 1-line chat interface.
110
+ *
111
+ * @param model - Model ID (e.g., 'openai/gpt-4o', 'anthropic/claude-sonnet-4')
112
+ * @param prompt - User message
113
+ * @param options - Optional chat parameters
114
+ * @returns Assistant's response text
115
+ *
116
+ * @example
117
+ * const response = await client.chat('gpt-4o', 'What is the capital of France?');
118
+ * console.log(response); // 'The capital of France is Paris.'
119
+ */
120
+ chat(model: string, prompt: string, options?: ChatOptions): Promise<string>;
121
+ /**
122
+ * Full chat completion interface (OpenAI-compatible).
123
+ *
124
+ * @param model - Model ID
125
+ * @param messages - Array of messages with role and content
126
+ * @param options - Optional completion parameters
127
+ * @returns ChatResponse object with choices and usage
128
+ */
129
+ chatCompletion(model: string, messages: ChatMessage[], options?: ChatCompletionOptions): Promise<ChatResponse>;
130
+ /**
131
+ * Make a request with automatic x402 payment handling.
132
+ */
133
+ private requestWithPayment;
134
+ /**
135
+ * Handle 402 response: parse requirements, sign payment, retry.
136
+ */
137
+ private handlePaymentAndRetry;
138
+ /**
139
+ * Fetch with timeout.
140
+ */
141
+ private fetchWithTimeout;
142
+ /**
143
+ * List available models with pricing.
144
+ */
145
+ listModels(): Promise<Model[]>;
146
+ /**
147
+ * Get the wallet address being used for payments.
148
+ */
149
+ getWalletAddress(): string;
150
+ }
151
+
152
+ /**
153
+ * x402 Payment Protocol v2 Implementation for BlockRun.
154
+ *
155
+ * This module handles creating signed payment payloads for the x402 v2 protocol.
156
+ * The private key is used ONLY for local signing and NEVER leaves the client.
157
+ */
158
+
159
+ declare const BASE_CHAIN_ID = 8453;
160
+ declare const USDC_BASE: "0x833589fCD6eDb6E08f4c7C32D4f71b54bdA02913";
161
+
162
+ /**
163
+ * OpenAI-compatible API wrapper for BlockRun LLM SDK.
164
+ *
165
+ * Drop-in replacement for OpenAI SDK - just change the import and use walletKey instead of apiKey.
166
+ *
167
+ * @example
168
+ * // Before (OpenAI)
169
+ * import OpenAI from 'openai';
170
+ * const client = new OpenAI({ apiKey: 'sk-...' });
171
+ *
172
+ * // After (BlockRun)
173
+ * import { OpenAI } from '@blockrun/llm';
174
+ * const client = new OpenAI({ walletKey: '0x...' });
175
+ *
176
+ * // Rest of your code stays exactly the same!
177
+ * const response = await client.chat.completions.create({
178
+ * model: 'gpt-4o',
179
+ * messages: [{ role: 'user', content: 'Hello!' }]
180
+ * });
181
+ */
182
+
183
+ interface OpenAIClientOptions {
184
+ /** EVM wallet private key (replaces apiKey) */
185
+ walletKey?: `0x${string}` | string;
186
+ /** Alternative: use privateKey like LLMClient */
187
+ privateKey?: `0x${string}` | string;
188
+ /** API endpoint URL (default: https://blockrun.ai/api) */
189
+ baseURL?: string;
190
+ /** Request timeout in milliseconds */
191
+ timeout?: number;
192
+ }
193
+ interface OpenAIChatCompletionParams {
194
+ model: string;
195
+ messages: Array<{
196
+ role: "system" | "user" | "assistant";
197
+ content: string;
198
+ }>;
199
+ max_tokens?: number;
200
+ temperature?: number;
201
+ top_p?: number;
202
+ stream?: boolean;
203
+ n?: number;
204
+ stop?: string | string[];
205
+ presence_penalty?: number;
206
+ frequency_penalty?: number;
207
+ user?: string;
208
+ }
209
+ interface OpenAIChatCompletionChoice {
210
+ index: number;
211
+ message: {
212
+ role: "assistant";
213
+ content: string;
214
+ };
215
+ finish_reason: string | null;
216
+ }
217
+ interface OpenAIChatCompletionResponse {
218
+ id: string;
219
+ object: "chat.completion";
220
+ created: number;
221
+ model: string;
222
+ choices: OpenAIChatCompletionChoice[];
223
+ usage?: {
224
+ prompt_tokens: number;
225
+ completion_tokens: number;
226
+ total_tokens: number;
227
+ };
228
+ }
229
+ interface OpenAIChatCompletionChunk {
230
+ id: string;
231
+ object: "chat.completion.chunk";
232
+ created: number;
233
+ model: string;
234
+ choices: Array<{
235
+ index: number;
236
+ delta: {
237
+ role?: "assistant";
238
+ content?: string;
239
+ };
240
+ finish_reason: string | null;
241
+ }>;
242
+ }
243
+ /**
244
+ * Chat completions API (OpenAI-compatible)
245
+ */
246
+ declare class ChatCompletions {
247
+ private client;
248
+ private apiUrl;
249
+ private timeout;
250
+ constructor(client: LLMClient, apiUrl: string, timeout: number);
251
+ /**
252
+ * Create a chat completion (OpenAI-compatible).
253
+ */
254
+ create(params: OpenAIChatCompletionParams): Promise<OpenAIChatCompletionResponse>;
255
+ create(params: OpenAIChatCompletionParams & {
256
+ stream: true;
257
+ }): Promise<AsyncIterable<OpenAIChatCompletionChunk>>;
258
+ private createStream;
259
+ private transformResponse;
260
+ }
261
+ /**
262
+ * Chat API namespace
263
+ */
264
+ declare class Chat {
265
+ completions: ChatCompletions;
266
+ constructor(client: LLMClient, apiUrl: string, timeout: number);
267
+ }
268
+ /**
269
+ * OpenAI-compatible client for BlockRun.
270
+ *
271
+ * Drop-in replacement for the OpenAI SDK.
272
+ *
273
+ * @example
274
+ * import { OpenAI } from '@blockrun/llm';
275
+ *
276
+ * const client = new OpenAI({ walletKey: '0x...' });
277
+ *
278
+ * const response = await client.chat.completions.create({
279
+ * model: 'gpt-4o',
280
+ * messages: [{ role: 'user', content: 'Hello!' }]
281
+ * });
282
+ *
283
+ * console.log(response.choices[0].message.content);
284
+ */
285
+ declare class OpenAI {
286
+ chat: Chat;
287
+ private client;
288
+ constructor(options?: OpenAIClientOptions);
289
+ /**
290
+ * Get the wallet address being used for payments.
291
+ */
292
+ getWalletAddress(): string;
293
+ }
294
+
295
+ export { APIError, BASE_CHAIN_ID, BlockrunError, type ChatChoice, type ChatCompletionOptions, type ChatMessage, type ChatOptions, type ChatResponse, type ChatUsage, LLMClient, type LLMClientOptions, type Model, OpenAI, type OpenAIChatCompletionChoice, type OpenAIChatCompletionChunk, type OpenAIChatCompletionParams, type OpenAIChatCompletionResponse, type OpenAIClientOptions, PaymentError, USDC_BASE, LLMClient as default };