@providerprotocol/ai 0.0.3 → 0.0.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +24 -1
- package/dist/anthropic/index.d.ts +2 -2
- package/dist/anthropic/index.js +4 -2
- package/dist/anthropic/index.js.map +1 -1
- package/dist/index.js +5 -1
- package/dist/index.js.map +1 -1
- package/dist/xai/index.d.ts +282 -0
- package/dist/xai/index.js +1812 -0
- package/dist/xai/index.js.map +1 -0
- package/package.json +10 -2
- package/src/core/llm.ts +6 -1
- package/src/providers/anthropic/transform.ts +6 -2
- package/src/providers/anthropic/types.ts +3 -3
- package/src/providers/xai/index.ts +218 -0
- package/src/providers/xai/llm.completions.ts +201 -0
- package/src/providers/xai/llm.messages.ts +195 -0
- package/src/providers/xai/llm.responses.ts +211 -0
- package/src/providers/xai/transform.completions.ts +617 -0
- package/src/providers/xai/transform.messages.ts +467 -0
- package/src/providers/xai/transform.responses.ts +717 -0
- package/src/providers/xai/types.ts +908 -0
- package/src/xai/index.ts +39 -0
|
@@ -0,0 +1,282 @@
|
|
|
1
|
+
import { b as Provider, M as ModelReference, a as LLMHandler } from '../provider-CUJWjgNl.js';
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* xAI-specific LLM parameters
|
|
5
|
+
* These are passed through to the relevant xAI APIs (Completions, Responses, Messages)
|
|
6
|
+
*
|
|
7
|
+
* xAI's APIs are compatible with OpenAI and Anthropic SDKs, supporting three API modes:
|
|
8
|
+
* - Chat Completions API (OpenAI-compatible)
|
|
9
|
+
* - Responses API (OpenAI Responses-compatible, with stateful conversations)
|
|
10
|
+
* - Messages API (Anthropic-compatible)
|
|
11
|
+
*/
|
|
12
|
+
interface XAILLMParams {
|
|
13
|
+
/** Maximum number of tokens to generate */
|
|
14
|
+
max_tokens?: number;
|
|
15
|
+
/** Maximum completion tokens (Chat Completions API) */
|
|
16
|
+
max_completion_tokens?: number;
|
|
17
|
+
/** Maximum output tokens (Responses API) */
|
|
18
|
+
max_output_tokens?: number;
|
|
19
|
+
/** Temperature for randomness (0.0 - 2.0) */
|
|
20
|
+
temperature?: number;
|
|
21
|
+
/** Top-p (nucleus) sampling (0.0 - 1.0) */
|
|
22
|
+
top_p?: number;
|
|
23
|
+
/** Top-k sampling (Messages API only) */
|
|
24
|
+
top_k?: number;
|
|
25
|
+
/** Frequency penalty (-2.0 - 2.0) */
|
|
26
|
+
frequency_penalty?: number;
|
|
27
|
+
/** Presence penalty (-2.0 - 2.0) */
|
|
28
|
+
presence_penalty?: number;
|
|
29
|
+
/** Custom stop sequences */
|
|
30
|
+
stop?: string | string[];
|
|
31
|
+
/** Stop sequences (Messages API) */
|
|
32
|
+
stop_sequences?: string[];
|
|
33
|
+
/** Number of completions to generate */
|
|
34
|
+
n?: number;
|
|
35
|
+
/** Enable logprobs */
|
|
36
|
+
logprobs?: boolean;
|
|
37
|
+
/** Number of top logprobs to return (0-20) */
|
|
38
|
+
top_logprobs?: number;
|
|
39
|
+
/** Seed for deterministic sampling */
|
|
40
|
+
seed?: number;
|
|
41
|
+
/** User identifier for abuse detection */
|
|
42
|
+
user?: string;
|
|
43
|
+
/** Logit bias map (Chat Completions API) */
|
|
44
|
+
logit_bias?: Record<string, number>;
|
|
45
|
+
/** Whether to enable parallel tool calls */
|
|
46
|
+
parallel_tool_calls?: boolean;
|
|
47
|
+
/**
|
|
48
|
+
* Reasoning effort for Grok 3 Mini models
|
|
49
|
+
* Note: Only 'low' and 'high' are supported by xAI
|
|
50
|
+
* Grok 4 does not support this parameter
|
|
51
|
+
*/
|
|
52
|
+
reasoning_effort?: 'low' | 'high';
|
|
53
|
+
/** Reasoning configuration (Responses API) */
|
|
54
|
+
reasoning?: {
|
|
55
|
+
effort?: 'low' | 'high';
|
|
56
|
+
/** Include encrypted reasoning content for continuation */
|
|
57
|
+
encrypted_content?: boolean;
|
|
58
|
+
};
|
|
59
|
+
/** Truncation strategy (Responses API) */
|
|
60
|
+
truncation?: 'auto' | 'disabled';
|
|
61
|
+
/** Fields to include in Responses API output */
|
|
62
|
+
include?: string[];
|
|
63
|
+
/** Continue from a previous response (Responses API) */
|
|
64
|
+
previous_response_id?: string;
|
|
65
|
+
/** Store response for continuation (Responses API) */
|
|
66
|
+
store?: boolean;
|
|
67
|
+
/** Store messages on xAI servers (default: true for Responses API) */
|
|
68
|
+
store_messages?: boolean;
|
|
69
|
+
/** Metadata key-value pairs */
|
|
70
|
+
metadata?: Record<string, string>;
|
|
71
|
+
/** Response format for structured output (Chat Completions API only) */
|
|
72
|
+
response_format?: XAIResponseFormat;
|
|
73
|
+
/**
|
|
74
|
+
* Live Search parameters (deprecated, will be removed Dec 15, 2025)
|
|
75
|
+
* Use Agent Tools API instead for new implementations
|
|
76
|
+
*/
|
|
77
|
+
search_parameters?: XAISearchParameters;
|
|
78
|
+
/**
|
|
79
|
+
* Server-side agentic tools (Agent Tools API)
|
|
80
|
+
* These are executed server-side by xAI
|
|
81
|
+
*/
|
|
82
|
+
agent_tools?: XAIAgentTool[];
|
|
83
|
+
/** Metadata for the request (Messages API) */
|
|
84
|
+
messages_metadata?: {
|
|
85
|
+
user_id?: string;
|
|
86
|
+
};
|
|
87
|
+
/** Extended thinking configuration (Messages API) */
|
|
88
|
+
thinking?: {
|
|
89
|
+
type: 'enabled';
|
|
90
|
+
budget_tokens: number;
|
|
91
|
+
};
|
|
92
|
+
}
|
|
93
|
+
/**
|
|
94
|
+
* API mode for xAI provider
|
|
95
|
+
*/
|
|
96
|
+
type XAIAPIMode = 'completions' | 'responses' | 'messages';
|
|
97
|
+
/**
|
|
98
|
+
* Model options when creating a model reference
|
|
99
|
+
*/
|
|
100
|
+
interface XAIModelOptions {
|
|
101
|
+
/** Which API to use */
|
|
102
|
+
api?: XAIAPIMode;
|
|
103
|
+
}
|
|
104
|
+
/**
|
|
105
|
+
* Model reference with xAI-specific options
|
|
106
|
+
*/
|
|
107
|
+
interface XAIModelReference {
|
|
108
|
+
modelId: string;
|
|
109
|
+
options?: XAIModelOptions;
|
|
110
|
+
}
|
|
111
|
+
/**
|
|
112
|
+
* xAI provider configuration
|
|
113
|
+
*/
|
|
114
|
+
interface XAIConfig {
|
|
115
|
+
/** Which API to use: 'completions', 'responses', or 'messages' */
|
|
116
|
+
api?: XAIAPIMode;
|
|
117
|
+
}
|
|
118
|
+
/**
|
|
119
|
+
* Live Search parameters (deprecated)
|
|
120
|
+
*/
|
|
121
|
+
interface XAISearchParameters {
|
|
122
|
+
/** Search mode */
|
|
123
|
+
mode?: 'auto' | 'on' | 'off';
|
|
124
|
+
/** Limit search to specific date range */
|
|
125
|
+
from_date?: string;
|
|
126
|
+
/** End date for search range */
|
|
127
|
+
to_date?: string;
|
|
128
|
+
/** Sources to search */
|
|
129
|
+
sources?: Array<'web' | 'x' | 'news' | 'rss'>;
|
|
130
|
+
/** Maximum number of search results */
|
|
131
|
+
max_search_results?: number;
|
|
132
|
+
}
|
|
133
|
+
/**
|
|
134
|
+
* Server-side agentic tools
|
|
135
|
+
*/
|
|
136
|
+
interface XAIAgentTool {
|
|
137
|
+
type: 'web_search' | 'x_search' | 'code_execution';
|
|
138
|
+
}
|
|
139
|
+
/**
|
|
140
|
+
* Response format
|
|
141
|
+
*/
|
|
142
|
+
type XAIResponseFormat = {
|
|
143
|
+
type: 'text';
|
|
144
|
+
} | {
|
|
145
|
+
type: 'json_object';
|
|
146
|
+
} | {
|
|
147
|
+
type: 'json_schema';
|
|
148
|
+
json_schema: {
|
|
149
|
+
name: string;
|
|
150
|
+
description?: string;
|
|
151
|
+
schema: Record<string, unknown>;
|
|
152
|
+
strict?: boolean;
|
|
153
|
+
};
|
|
154
|
+
};
|
|
155
|
+
|
|
156
|
+
/**
|
|
157
|
+
* xAI provider options
|
|
158
|
+
*/
|
|
159
|
+
interface XAIProviderOptions {
|
|
160
|
+
/**
|
|
161
|
+
* Which API to use:
|
|
162
|
+
* - 'completions': Chat Completions API (OpenAI-compatible, default)
|
|
163
|
+
* - 'responses': Responses API (OpenAI Responses-compatible, stateful)
|
|
164
|
+
* - 'messages': Messages API (Anthropic-compatible)
|
|
165
|
+
*/
|
|
166
|
+
api?: XAIAPIMode;
|
|
167
|
+
}
|
|
168
|
+
/**
|
|
169
|
+
* xAI provider with configurable API mode
|
|
170
|
+
*
|
|
171
|
+
* xAI's APIs are compatible with OpenAI and Anthropic SDKs, supporting three API modes:
|
|
172
|
+
* - Chat Completions API (OpenAI-compatible) - default, recommended
|
|
173
|
+
* - Responses API (OpenAI Responses-compatible) - stateful conversations
|
|
174
|
+
* - Messages API (Anthropic-compatible) - for migration from Anthropic
|
|
175
|
+
*
|
|
176
|
+
* @example
|
|
177
|
+
* // Using the Chat Completions API (default)
|
|
178
|
+
* const model = xai('grok-4');
|
|
179
|
+
*
|
|
180
|
+
* @example
|
|
181
|
+
* // Using the Responses API (stateful)
|
|
182
|
+
* const model = xai('grok-4', { api: 'responses' });
|
|
183
|
+
*
|
|
184
|
+
* @example
|
|
185
|
+
* // Using the Messages API (Anthropic-compatible)
|
|
186
|
+
* const model = xai('grok-4', { api: 'messages' });
|
|
187
|
+
*/
|
|
188
|
+
interface XAIProvider extends Provider<XAIProviderOptions> {
|
|
189
|
+
/**
|
|
190
|
+
* Create a model reference
|
|
191
|
+
* @param modelId - The model identifier (e.g., 'grok-4', 'grok-4.1-fast', 'grok-3-mini')
|
|
192
|
+
* @param options - Provider options including API selection
|
|
193
|
+
*/
|
|
194
|
+
(modelId: string, options?: XAIProviderOptions): ModelReference<XAIProviderOptions>;
|
|
195
|
+
/** Provider name */
|
|
196
|
+
readonly name: 'xai';
|
|
197
|
+
/** Provider version */
|
|
198
|
+
readonly version: string;
|
|
199
|
+
/** Supported modalities */
|
|
200
|
+
readonly modalities: {
|
|
201
|
+
llm: LLMHandler<XAILLMParams>;
|
|
202
|
+
};
|
|
203
|
+
}
|
|
204
|
+
/**
|
|
205
|
+
* xAI provider
|
|
206
|
+
*
|
|
207
|
+
* Supports three API modes:
|
|
208
|
+
* - Chat Completions API (default, OpenAI-compatible)
|
|
209
|
+
* - Responses API (stateful, OpenAI Responses-compatible)
|
|
210
|
+
* - Messages API (Anthropic-compatible)
|
|
211
|
+
*
|
|
212
|
+
* xAI's Grok models support:
|
|
213
|
+
* - Real-time search via Live Search API (deprecated Dec 2025) or Agent Tools API
|
|
214
|
+
* - Reasoning with `reasoning_effort` parameter (for Grok 3 Mini)
|
|
215
|
+
* - Tool/function calling
|
|
216
|
+
* - Image input
|
|
217
|
+
* - Streaming responses
|
|
218
|
+
* - Structured output (JSON mode)
|
|
219
|
+
*
|
|
220
|
+
* @example
|
|
221
|
+
* ```ts
|
|
222
|
+
* import { xai } from './providers/xai';
|
|
223
|
+
* import { llm } from './core/llm';
|
|
224
|
+
*
|
|
225
|
+
* // Using Chat Completions API (default, recommended)
|
|
226
|
+
* const model = llm({
|
|
227
|
+
* model: xai('grok-4'),
|
|
228
|
+
* params: { max_tokens: 1000 }
|
|
229
|
+
* });
|
|
230
|
+
*
|
|
231
|
+
* // Using Responses API (stateful conversations)
|
|
232
|
+
* const statefulModel = llm({
|
|
233
|
+
* model: xai('grok-4', { api: 'responses' }),
|
|
234
|
+
* params: {
|
|
235
|
+
* max_output_tokens: 1000,
|
|
236
|
+
* store: true, // Enable stateful storage
|
|
237
|
+
* }
|
|
238
|
+
* });
|
|
239
|
+
*
|
|
240
|
+
* // Continue a previous conversation
|
|
241
|
+
* const continuedModel = llm({
|
|
242
|
+
* model: xai('grok-4', { api: 'responses' }),
|
|
243
|
+
* params: {
|
|
244
|
+
* previous_response_id: 'resp_123...',
|
|
245
|
+
* }
|
|
246
|
+
* });
|
|
247
|
+
*
|
|
248
|
+
* // Using Messages API (Anthropic-compatible)
|
|
249
|
+
* const anthropicModel = llm({
|
|
250
|
+
* model: xai('grok-4', { api: 'messages' }),
|
|
251
|
+
* params: { max_tokens: 1000 }
|
|
252
|
+
* });
|
|
253
|
+
*
|
|
254
|
+
* // Using reasoning effort (Grok 3 Mini only)
|
|
255
|
+
* const reasoningModel = llm({
|
|
256
|
+
* model: xai('grok-3-mini'),
|
|
257
|
+
* params: {
|
|
258
|
+
* max_tokens: 1000,
|
|
259
|
+
* reasoning_effort: 'high', // 'low' or 'high'
|
|
260
|
+
* }
|
|
261
|
+
* });
|
|
262
|
+
*
|
|
263
|
+
* // Using Live Search (deprecated Dec 2025)
|
|
264
|
+
* const searchModel = llm({
|
|
265
|
+
* model: xai('grok-4'),
|
|
266
|
+
* params: {
|
|
267
|
+
* max_tokens: 1000,
|
|
268
|
+
* search_parameters: {
|
|
269
|
+
* mode: 'auto',
|
|
270
|
+
* sources: ['web', 'x', 'news'],
|
|
271
|
+
* }
|
|
272
|
+
* }
|
|
273
|
+
* });
|
|
274
|
+
*
|
|
275
|
+
* // Generate
|
|
276
|
+
* const turn = await model.generate('Hello!');
|
|
277
|
+
* console.log(turn.response.text);
|
|
278
|
+
* ```
|
|
279
|
+
*/
|
|
280
|
+
declare const xai: XAIProvider;
|
|
281
|
+
|
|
282
|
+
export { type XAIAPIMode, type XAIAgentTool, type XAIConfig, type XAILLMParams, type XAIModelOptions, type XAIModelReference, type XAIProvider, type XAIProviderOptions, type XAISearchParameters, xai };
|