@providerprotocol/ai 0.0.4 → 0.0.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +19 -0
- package/dist/anthropic/index.js +1 -24
- package/dist/anthropic/index.js.map +1 -1
- package/dist/google/index.js +3 -46
- package/dist/google/index.js.map +1 -1
- package/dist/index.js +5 -1
- package/dist/index.js.map +1 -1
- package/dist/ollama/index.js +13 -44
- package/dist/ollama/index.js.map +1 -1
- package/dist/openai/index.d.ts +46 -27
- package/dist/openai/index.js +2 -116
- package/dist/openai/index.js.map +1 -1
- package/dist/openrouter/index.d.ts +23 -10
- package/dist/openrouter/index.js +2 -85
- package/dist/openrouter/index.js.map +1 -1
- package/dist/xai/index.d.ts +306 -0
- package/dist/xai/index.js +1696 -0
- package/dist/xai/index.js.map +1 -0
- package/package.json +9 -1
- package/src/core/llm.ts +6 -1
- package/src/openai/index.ts +2 -1
- package/src/openrouter/index.ts +2 -1
- package/src/providers/anthropic/transform.ts +7 -29
- package/src/providers/google/transform.ts +9 -49
- package/src/providers/ollama/transform.ts +27 -49
- package/src/providers/openai/index.ts +12 -8
- package/src/providers/openai/llm.completions.ts +9 -9
- package/src/providers/openai/llm.responses.ts +9 -9
- package/src/providers/openai/transform.completions.ts +12 -79
- package/src/providers/openai/transform.responses.ts +12 -54
- package/src/providers/openai/types.ts +54 -31
- package/src/providers/openrouter/index.ts +12 -8
- package/src/providers/openrouter/llm.completions.ts +9 -9
- package/src/providers/openrouter/llm.responses.ts +9 -9
- package/src/providers/openrouter/transform.completions.ts +12 -79
- package/src/providers/openrouter/transform.responses.ts +12 -25
- package/src/providers/openrouter/types.ts +22 -28
- package/src/providers/xai/index.ts +223 -0
- package/src/providers/xai/llm.completions.ts +201 -0
- package/src/providers/xai/llm.messages.ts +195 -0
- package/src/providers/xai/llm.responses.ts +211 -0
- package/src/providers/xai/transform.completions.ts +565 -0
- package/src/providers/xai/transform.messages.ts +448 -0
- package/src/providers/xai/transform.responses.ts +678 -0
- package/src/providers/xai/types.ts +938 -0
- package/src/xai/index.ts +41 -0
|
@@ -0,0 +1,306 @@
|
|
|
1
|
+
import { b as Provider, M as ModelReference, a as LLMHandler } from '../provider-CUJWjgNl.js';
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* xAI Chat Completions API parameters (OpenAI-compatible)
|
|
5
|
+
* These are passed through to the /v1/chat/completions endpoint
|
|
6
|
+
*/
|
|
7
|
+
interface XAICompletionsParams {
|
|
8
|
+
/** Maximum number of tokens to generate */
|
|
9
|
+
max_tokens?: number;
|
|
10
|
+
/** Maximum completion tokens */
|
|
11
|
+
max_completion_tokens?: number;
|
|
12
|
+
/** Temperature for randomness (0.0 - 2.0) */
|
|
13
|
+
temperature?: number;
|
|
14
|
+
/** Top-p (nucleus) sampling (0.0 - 1.0) */
|
|
15
|
+
top_p?: number;
|
|
16
|
+
/** Frequency penalty (-2.0 - 2.0) */
|
|
17
|
+
frequency_penalty?: number;
|
|
18
|
+
/** Presence penalty (-2.0 - 2.0) */
|
|
19
|
+
presence_penalty?: number;
|
|
20
|
+
/** Custom stop sequences */
|
|
21
|
+
stop?: string | string[];
|
|
22
|
+
/** Number of completions to generate */
|
|
23
|
+
n?: number;
|
|
24
|
+
/** Enable logprobs */
|
|
25
|
+
logprobs?: boolean;
|
|
26
|
+
/** Number of top logprobs to return (0-20) */
|
|
27
|
+
top_logprobs?: number;
|
|
28
|
+
/** Seed for deterministic sampling */
|
|
29
|
+
seed?: number;
|
|
30
|
+
/** User identifier for abuse detection */
|
|
31
|
+
user?: string;
|
|
32
|
+
/** Logit bias map */
|
|
33
|
+
logit_bias?: Record<string, number>;
|
|
34
|
+
/** Whether to enable parallel tool calls */
|
|
35
|
+
parallel_tool_calls?: boolean;
|
|
36
|
+
/**
|
|
37
|
+
* Reasoning effort for Grok 3 Mini models
|
|
38
|
+
* Note: Only 'low' and 'high' are supported by xAI
|
|
39
|
+
*/
|
|
40
|
+
reasoning_effort?: 'low' | 'high';
|
|
41
|
+
/** Store completion */
|
|
42
|
+
store?: boolean;
|
|
43
|
+
/** Metadata key-value pairs */
|
|
44
|
+
metadata?: Record<string, string>;
|
|
45
|
+
/** Response format for structured output */
|
|
46
|
+
response_format?: XAIResponseFormat;
|
|
47
|
+
/**
|
|
48
|
+
* Live Search parameters (deprecated, will be removed Dec 15, 2025)
|
|
49
|
+
* Use Agent Tools API instead for new implementations
|
|
50
|
+
*/
|
|
51
|
+
search_parameters?: XAISearchParameters;
|
|
52
|
+
}
|
|
53
|
+
/**
|
|
54
|
+
* xAI Responses API parameters (OpenAI Responses-compatible)
|
|
55
|
+
* These are passed through to the /v1/responses endpoint
|
|
56
|
+
*/
|
|
57
|
+
interface XAIResponsesParams {
|
|
58
|
+
/** Maximum output tokens */
|
|
59
|
+
max_output_tokens?: number;
|
|
60
|
+
/** Temperature for randomness (0.0 - 2.0) */
|
|
61
|
+
temperature?: number;
|
|
62
|
+
/** Top-p (nucleus) sampling (0.0 - 1.0) */
|
|
63
|
+
top_p?: number;
|
|
64
|
+
/** Whether to enable parallel tool calls */
|
|
65
|
+
parallel_tool_calls?: boolean;
|
|
66
|
+
/** Reasoning configuration */
|
|
67
|
+
reasoning?: {
|
|
68
|
+
effort?: 'low' | 'high';
|
|
69
|
+
/** Include encrypted reasoning content for continuation */
|
|
70
|
+
encrypted_content?: boolean;
|
|
71
|
+
};
|
|
72
|
+
/** Truncation strategy */
|
|
73
|
+
truncation?: 'auto' | 'disabled';
|
|
74
|
+
/** Fields to include in output */
|
|
75
|
+
include?: string[];
|
|
76
|
+
/** Continue from a previous response */
|
|
77
|
+
previous_response_id?: string;
|
|
78
|
+
/** Store response for continuation */
|
|
79
|
+
store?: boolean;
|
|
80
|
+
/** Store messages on xAI servers (default: true) */
|
|
81
|
+
store_messages?: boolean;
|
|
82
|
+
/** Metadata key-value pairs */
|
|
83
|
+
metadata?: Record<string, string>;
|
|
84
|
+
/**
|
|
85
|
+
* Live Search parameters (deprecated, will be removed Dec 15, 2025)
|
|
86
|
+
* Use Agent Tools API instead for new implementations
|
|
87
|
+
*/
|
|
88
|
+
search_parameters?: XAISearchParameters;
|
|
89
|
+
}
|
|
90
|
+
/**
|
|
91
|
+
* xAI Messages API parameters (Anthropic-compatible)
|
|
92
|
+
* These are passed through to the /v1/messages endpoint
|
|
93
|
+
*/
|
|
94
|
+
interface XAIMessagesParams {
|
|
95
|
+
/** Maximum number of tokens to generate */
|
|
96
|
+
max_tokens?: number;
|
|
97
|
+
/** Temperature for randomness (0.0 - 1.0) */
|
|
98
|
+
temperature?: number;
|
|
99
|
+
/** Top-p (nucleus) sampling (0.0 - 1.0) */
|
|
100
|
+
top_p?: number;
|
|
101
|
+
/** Top-k sampling */
|
|
102
|
+
top_k?: number;
|
|
103
|
+
/** Custom stop sequences */
|
|
104
|
+
stop_sequences?: string[];
|
|
105
|
+
/** Metadata for the request */
|
|
106
|
+
metadata?: {
|
|
107
|
+
user_id?: string;
|
|
108
|
+
};
|
|
109
|
+
/** Extended thinking configuration */
|
|
110
|
+
thinking?: {
|
|
111
|
+
type: 'enabled';
|
|
112
|
+
budget_tokens: number;
|
|
113
|
+
};
|
|
114
|
+
}
|
|
115
|
+
/**
|
|
116
|
+
* API mode for xAI provider
|
|
117
|
+
*/
|
|
118
|
+
type XAIAPIMode = 'completions' | 'responses' | 'messages';
|
|
119
|
+
/**
|
|
120
|
+
* Model options when creating a model reference
|
|
121
|
+
*/
|
|
122
|
+
interface XAIModelOptions {
|
|
123
|
+
/** Which API to use */
|
|
124
|
+
api?: XAIAPIMode;
|
|
125
|
+
}
|
|
126
|
+
/**
|
|
127
|
+
* Model reference with xAI-specific options
|
|
128
|
+
*/
|
|
129
|
+
interface XAIModelReference {
|
|
130
|
+
modelId: string;
|
|
131
|
+
options?: XAIModelOptions;
|
|
132
|
+
}
|
|
133
|
+
/**
|
|
134
|
+
* xAI provider configuration
|
|
135
|
+
*/
|
|
136
|
+
interface XAIConfig {
|
|
137
|
+
/** Which API to use: 'completions', 'responses', or 'messages' */
|
|
138
|
+
api?: XAIAPIMode;
|
|
139
|
+
}
|
|
140
|
+
/**
|
|
141
|
+
* Live Search parameters (deprecated)
|
|
142
|
+
*/
|
|
143
|
+
interface XAISearchParameters {
|
|
144
|
+
/** Search mode */
|
|
145
|
+
mode?: 'auto' | 'on' | 'off';
|
|
146
|
+
/** Limit search to specific date range */
|
|
147
|
+
from_date?: string;
|
|
148
|
+
/** End date for search range */
|
|
149
|
+
to_date?: string;
|
|
150
|
+
/** Sources to search */
|
|
151
|
+
sources?: Array<'web' | 'x' | 'news' | 'rss'>;
|
|
152
|
+
/** Maximum number of search results */
|
|
153
|
+
max_search_results?: number;
|
|
154
|
+
}
|
|
155
|
+
/**
|
|
156
|
+
* Server-side agentic tools
|
|
157
|
+
*/
|
|
158
|
+
interface XAIAgentTool {
|
|
159
|
+
type: 'web_search' | 'x_search' | 'code_execution';
|
|
160
|
+
}
|
|
161
|
+
/**
|
|
162
|
+
* Response format
|
|
163
|
+
*/
|
|
164
|
+
type XAIResponseFormat = {
|
|
165
|
+
type: 'text';
|
|
166
|
+
} | {
|
|
167
|
+
type: 'json_object';
|
|
168
|
+
} | {
|
|
169
|
+
type: 'json_schema';
|
|
170
|
+
json_schema: {
|
|
171
|
+
name: string;
|
|
172
|
+
description?: string;
|
|
173
|
+
schema: Record<string, unknown>;
|
|
174
|
+
strict?: boolean;
|
|
175
|
+
};
|
|
176
|
+
};
|
|
177
|
+
|
|
178
|
+
/** Union type for modalities interface */
|
|
179
|
+
type XAILLMParamsUnion = XAICompletionsParams | XAIResponsesParams | XAIMessagesParams;
|
|
180
|
+
/**
|
|
181
|
+
* xAI provider options
|
|
182
|
+
*/
|
|
183
|
+
interface XAIProviderOptions {
|
|
184
|
+
/**
|
|
185
|
+
* Which API to use:
|
|
186
|
+
* - 'completions': Chat Completions API (OpenAI-compatible, default)
|
|
187
|
+
* - 'responses': Responses API (OpenAI Responses-compatible, stateful)
|
|
188
|
+
* - 'messages': Messages API (Anthropic-compatible)
|
|
189
|
+
*/
|
|
190
|
+
api?: XAIAPIMode;
|
|
191
|
+
}
|
|
192
|
+
/**
|
|
193
|
+
* xAI provider with configurable API mode
|
|
194
|
+
*
|
|
195
|
+
* xAI's APIs are compatible with OpenAI and Anthropic SDKs, supporting three API modes:
|
|
196
|
+
* - Chat Completions API (OpenAI-compatible) - default, recommended
|
|
197
|
+
* - Responses API (OpenAI Responses-compatible) - stateful conversations
|
|
198
|
+
* - Messages API (Anthropic-compatible) - for migration from Anthropic
|
|
199
|
+
*
|
|
200
|
+
* @example
|
|
201
|
+
* // Using the Chat Completions API (default)
|
|
202
|
+
* const model = xai('grok-4');
|
|
203
|
+
*
|
|
204
|
+
* @example
|
|
205
|
+
* // Using the Responses API (stateful)
|
|
206
|
+
* const model = xai('grok-4', { api: 'responses' });
|
|
207
|
+
*
|
|
208
|
+
* @example
|
|
209
|
+
* // Using the Messages API (Anthropic-compatible)
|
|
210
|
+
* const model = xai('grok-4', { api: 'messages' });
|
|
211
|
+
*/
|
|
212
|
+
interface XAIProvider extends Provider<XAIProviderOptions> {
|
|
213
|
+
/**
|
|
214
|
+
* Create a model reference
|
|
215
|
+
* @param modelId - The model identifier (e.g., 'grok-4', 'grok-4.1-fast', 'grok-3-mini')
|
|
216
|
+
* @param options - Provider options including API selection
|
|
217
|
+
*/
|
|
218
|
+
(modelId: string, options?: XAIProviderOptions): ModelReference<XAIProviderOptions>;
|
|
219
|
+
/** Provider name */
|
|
220
|
+
readonly name: 'xai';
|
|
221
|
+
/** Provider version */
|
|
222
|
+
readonly version: string;
|
|
223
|
+
/** Supported modalities */
|
|
224
|
+
readonly modalities: {
|
|
225
|
+
llm: LLMHandler<XAILLMParamsUnion>;
|
|
226
|
+
};
|
|
227
|
+
}
|
|
228
|
+
/**
|
|
229
|
+
* xAI provider
|
|
230
|
+
*
|
|
231
|
+
* Supports three API modes:
|
|
232
|
+
* - Chat Completions API (default, OpenAI-compatible)
|
|
233
|
+
* - Responses API (stateful, OpenAI Responses-compatible)
|
|
234
|
+
* - Messages API (Anthropic-compatible)
|
|
235
|
+
*
|
|
236
|
+
* xAI's Grok models support:
|
|
237
|
+
* - Real-time search via Live Search API (deprecated Dec 2025) or Agent Tools API
|
|
238
|
+
* - Reasoning with `reasoning_effort` parameter (for Grok 3 Mini)
|
|
239
|
+
* - Tool/function calling
|
|
240
|
+
* - Image input
|
|
241
|
+
* - Streaming responses
|
|
242
|
+
* - Structured output (JSON mode)
|
|
243
|
+
*
|
|
244
|
+
* @example
|
|
245
|
+
* ```ts
|
|
246
|
+
* import { xai } from './providers/xai';
|
|
247
|
+
* import { llm } from './core/llm';
|
|
248
|
+
*
|
|
249
|
+
* // Using Chat Completions API (default, recommended)
|
|
250
|
+
* const model = llm({
|
|
251
|
+
* model: xai('grok-4'),
|
|
252
|
+
* params: { max_tokens: 1000 }
|
|
253
|
+
* });
|
|
254
|
+
*
|
|
255
|
+
* // Using Responses API (stateful conversations)
|
|
256
|
+
* const statefulModel = llm({
|
|
257
|
+
* model: xai('grok-4', { api: 'responses' }),
|
|
258
|
+
* params: {
|
|
259
|
+
* max_output_tokens: 1000,
|
|
260
|
+
* store: true, // Enable stateful storage
|
|
261
|
+
* }
|
|
262
|
+
* });
|
|
263
|
+
*
|
|
264
|
+
* // Continue a previous conversation
|
|
265
|
+
* const continuedModel = llm({
|
|
266
|
+
* model: xai('grok-4', { api: 'responses' }),
|
|
267
|
+
* params: {
|
|
268
|
+
* previous_response_id: 'resp_123...',
|
|
269
|
+
* }
|
|
270
|
+
* });
|
|
271
|
+
*
|
|
272
|
+
* // Using Messages API (Anthropic-compatible)
|
|
273
|
+
* const anthropicModel = llm({
|
|
274
|
+
* model: xai('grok-4', { api: 'messages' }),
|
|
275
|
+
* params: { max_tokens: 1000 }
|
|
276
|
+
* });
|
|
277
|
+
*
|
|
278
|
+
* // Using reasoning effort (Grok 3 Mini only)
|
|
279
|
+
* const reasoningModel = llm({
|
|
280
|
+
* model: xai('grok-3-mini'),
|
|
281
|
+
* params: {
|
|
282
|
+
* max_tokens: 1000,
|
|
283
|
+
* reasoning_effort: 'high', // 'low' or 'high'
|
|
284
|
+
* }
|
|
285
|
+
* });
|
|
286
|
+
*
|
|
287
|
+
* // Using Live Search (deprecated Dec 2025)
|
|
288
|
+
* const searchModel = llm({
|
|
289
|
+
* model: xai('grok-4'),
|
|
290
|
+
* params: {
|
|
291
|
+
* max_tokens: 1000,
|
|
292
|
+
* search_parameters: {
|
|
293
|
+
* mode: 'auto',
|
|
294
|
+
* sources: ['web', 'x', 'news'],
|
|
295
|
+
* }
|
|
296
|
+
* }
|
|
297
|
+
* });
|
|
298
|
+
*
|
|
299
|
+
* // Generate
|
|
300
|
+
* const turn = await model.generate('Hello!');
|
|
301
|
+
* console.log(turn.response.text);
|
|
302
|
+
* ```
|
|
303
|
+
*/
|
|
304
|
+
declare const xai: XAIProvider;
|
|
305
|
+
|
|
306
|
+
export { type XAIAPIMode, type XAIAgentTool, type XAICompletionsParams, type XAIConfig, type XAIMessagesParams, type XAIModelOptions, type XAIModelReference, type XAIProvider, type XAIProviderOptions, type XAIResponsesParams, type XAISearchParameters, xai };
|