@providerprotocol/ai 0.0.5 → 0.0.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/anthropic/index.js +1 -24
- package/dist/anthropic/index.js.map +1 -1
- package/dist/google/index.js +3 -46
- package/dist/google/index.js.map +1 -1
- package/dist/ollama/index.js +13 -44
- package/dist/ollama/index.js.map +1 -1
- package/dist/openai/index.d.ts +46 -27
- package/dist/openai/index.js +2 -116
- package/dist/openai/index.js.map +1 -1
- package/dist/openrouter/index.d.ts +23 -10
- package/dist/openrouter/index.js +2 -85
- package/dist/openrouter/index.js.map +1 -1
- package/dist/xai/index.d.ts +59 -35
- package/dist/xai/index.js +3 -119
- package/dist/xai/index.js.map +1 -1
- package/package.json +2 -1
- package/src/openai/index.ts +2 -1
- package/src/openrouter/index.ts +2 -1
- package/src/providers/anthropic/transform.ts +7 -29
- package/src/providers/google/transform.ts +9 -49
- package/src/providers/ollama/transform.ts +27 -49
- package/src/providers/openai/index.ts +12 -8
- package/src/providers/openai/llm.completions.ts +9 -9
- package/src/providers/openai/llm.responses.ts +9 -9
- package/src/providers/openai/transform.completions.ts +12 -79
- package/src/providers/openai/transform.responses.ts +12 -54
- package/src/providers/openai/types.ts +54 -31
- package/src/providers/openrouter/index.ts +12 -8
- package/src/providers/openrouter/llm.completions.ts +9 -9
- package/src/providers/openrouter/llm.responses.ts +9 -9
- package/src/providers/openrouter/transform.completions.ts +12 -79
- package/src/providers/openrouter/transform.responses.ts +12 -25
- package/src/providers/openrouter/types.ts +22 -28
- package/src/providers/xai/index.ts +15 -10
- package/src/providers/xai/llm.completions.ts +9 -9
- package/src/providers/xai/llm.messages.ts +9 -9
- package/src/providers/xai/llm.responses.ts +9 -9
- package/src/providers/xai/transform.completions.ts +12 -64
- package/src/providers/xai/transform.messages.ts +11 -30
- package/src/providers/xai/transform.responses.ts +12 -51
- package/src/providers/xai/types.ts +68 -38
- package/src/xai/index.ts +3 -1
|
@@ -6,7 +6,7 @@ import { resolveApiKey } from '../../http/keys.ts';
|
|
|
6
6
|
import { doFetch, doStreamFetch } from '../../http/fetch.ts';
|
|
7
7
|
import { parseSSEStream } from '../../http/sse.ts';
|
|
8
8
|
import { normalizeHttpError } from '../../http/errors.ts';
|
|
9
|
-
import type {
|
|
9
|
+
import type { XAICompletionsParams, XAICompletionsResponse, XAICompletionsStreamChunk } from './types.ts';
|
|
10
10
|
import {
|
|
11
11
|
transformRequest,
|
|
12
12
|
transformResponse,
|
|
@@ -32,16 +32,16 @@ const XAI_COMPLETIONS_CAPABILITIES: LLMCapabilities = {
|
|
|
32
32
|
/**
|
|
33
33
|
* Create xAI Chat Completions LLM handler
|
|
34
34
|
*/
|
|
35
|
-
export function createCompletionsLLMHandler(): LLMHandler<
|
|
35
|
+
export function createCompletionsLLMHandler(): LLMHandler<XAICompletionsParams> {
|
|
36
36
|
// Provider reference injected by createProvider() or xAI's custom factory
|
|
37
|
-
let providerRef: LLMProvider<
|
|
37
|
+
let providerRef: LLMProvider<XAICompletionsParams> | null = null;
|
|
38
38
|
|
|
39
39
|
return {
|
|
40
|
-
_setProvider(provider: LLMProvider<
|
|
40
|
+
_setProvider(provider: LLMProvider<XAICompletionsParams>) {
|
|
41
41
|
providerRef = provider;
|
|
42
42
|
},
|
|
43
43
|
|
|
44
|
-
bind(modelId: string): BoundLLMModel<
|
|
44
|
+
bind(modelId: string): BoundLLMModel<XAICompletionsParams> {
|
|
45
45
|
// Use the injected provider reference
|
|
46
46
|
if (!providerRef) {
|
|
47
47
|
throw new UPPError(
|
|
@@ -52,15 +52,15 @@ export function createCompletionsLLMHandler(): LLMHandler<XAILLMParams> {
|
|
|
52
52
|
);
|
|
53
53
|
}
|
|
54
54
|
|
|
55
|
-
const model: BoundLLMModel<
|
|
55
|
+
const model: BoundLLMModel<XAICompletionsParams> = {
|
|
56
56
|
modelId,
|
|
57
57
|
capabilities: XAI_COMPLETIONS_CAPABILITIES,
|
|
58
58
|
|
|
59
|
-
get provider(): LLMProvider<
|
|
59
|
+
get provider(): LLMProvider<XAICompletionsParams> {
|
|
60
60
|
return providerRef!;
|
|
61
61
|
},
|
|
62
62
|
|
|
63
|
-
async complete(request: LLMRequest<
|
|
63
|
+
async complete(request: LLMRequest<XAICompletionsParams>): Promise<LLMResponse> {
|
|
64
64
|
const apiKey = await resolveApiKey(
|
|
65
65
|
request.config,
|
|
66
66
|
'XAI_API_KEY',
|
|
@@ -91,7 +91,7 @@ export function createCompletionsLLMHandler(): LLMHandler<XAILLMParams> {
|
|
|
91
91
|
return transformResponse(data);
|
|
92
92
|
},
|
|
93
93
|
|
|
94
|
-
stream(request: LLMRequest<
|
|
94
|
+
stream(request: LLMRequest<XAICompletionsParams>): LLMStreamResult {
|
|
95
95
|
const state = createStreamState();
|
|
96
96
|
let responseResolve: (value: LLMResponse) => void;
|
|
97
97
|
let responseReject: (error: Error) => void;
|
|
@@ -6,7 +6,7 @@ import { resolveApiKey } from '../../http/keys.ts';
|
|
|
6
6
|
import { doFetch, doStreamFetch } from '../../http/fetch.ts';
|
|
7
7
|
import { parseSSEStream } from '../../http/sse.ts';
|
|
8
8
|
import { normalizeHttpError } from '../../http/errors.ts';
|
|
9
|
-
import type {
|
|
9
|
+
import type { XAIMessagesParams, XAIMessagesResponse, XAIMessagesStreamEvent } from './types.ts';
|
|
10
10
|
import {
|
|
11
11
|
transformRequest,
|
|
12
12
|
transformResponse,
|
|
@@ -32,16 +32,16 @@ const XAI_MESSAGES_CAPABILITIES: LLMCapabilities = {
|
|
|
32
32
|
/**
|
|
33
33
|
* Create xAI Messages API LLM handler (Anthropic-compatible)
|
|
34
34
|
*/
|
|
35
|
-
export function createMessagesLLMHandler(): LLMHandler<
|
|
35
|
+
export function createMessagesLLMHandler(): LLMHandler<XAIMessagesParams> {
|
|
36
36
|
// Provider reference injected by createProvider() or xAI's custom factory
|
|
37
|
-
let providerRef: LLMProvider<
|
|
37
|
+
let providerRef: LLMProvider<XAIMessagesParams> | null = null;
|
|
38
38
|
|
|
39
39
|
return {
|
|
40
|
-
_setProvider(provider: LLMProvider<
|
|
40
|
+
_setProvider(provider: LLMProvider<XAIMessagesParams>) {
|
|
41
41
|
providerRef = provider;
|
|
42
42
|
},
|
|
43
43
|
|
|
44
|
-
bind(modelId: string): BoundLLMModel<
|
|
44
|
+
bind(modelId: string): BoundLLMModel<XAIMessagesParams> {
|
|
45
45
|
// Use the injected provider reference
|
|
46
46
|
if (!providerRef) {
|
|
47
47
|
throw new UPPError(
|
|
@@ -52,15 +52,15 @@ export function createMessagesLLMHandler(): LLMHandler<XAILLMParams> {
|
|
|
52
52
|
);
|
|
53
53
|
}
|
|
54
54
|
|
|
55
|
-
const model: BoundLLMModel<
|
|
55
|
+
const model: BoundLLMModel<XAIMessagesParams> = {
|
|
56
56
|
modelId,
|
|
57
57
|
capabilities: XAI_MESSAGES_CAPABILITIES,
|
|
58
58
|
|
|
59
|
-
get provider(): LLMProvider<
|
|
59
|
+
get provider(): LLMProvider<XAIMessagesParams> {
|
|
60
60
|
return providerRef!;
|
|
61
61
|
},
|
|
62
62
|
|
|
63
|
-
async complete(request: LLMRequest<
|
|
63
|
+
async complete(request: LLMRequest<XAIMessagesParams>): Promise<LLMResponse> {
|
|
64
64
|
const apiKey = await resolveApiKey(
|
|
65
65
|
request.config,
|
|
66
66
|
'XAI_API_KEY',
|
|
@@ -92,7 +92,7 @@ export function createMessagesLLMHandler(): LLMHandler<XAILLMParams> {
|
|
|
92
92
|
return transformResponse(data);
|
|
93
93
|
},
|
|
94
94
|
|
|
95
|
-
stream(request: LLMRequest<
|
|
95
|
+
stream(request: LLMRequest<XAIMessagesParams>): LLMStreamResult {
|
|
96
96
|
const state = createStreamState();
|
|
97
97
|
let responseResolve: (value: LLMResponse) => void;
|
|
98
98
|
let responseReject: (error: Error) => void;
|
|
@@ -6,7 +6,7 @@ import { resolveApiKey } from '../../http/keys.ts';
|
|
|
6
6
|
import { doFetch, doStreamFetch } from '../../http/fetch.ts';
|
|
7
7
|
import { parseSSEStream } from '../../http/sse.ts';
|
|
8
8
|
import { normalizeHttpError } from '../../http/errors.ts';
|
|
9
|
-
import type {
|
|
9
|
+
import type { XAIResponsesParams, XAIResponsesResponse, XAIResponsesStreamEvent, XAIResponseErrorEvent } from './types.ts';
|
|
10
10
|
import {
|
|
11
11
|
transformRequest,
|
|
12
12
|
transformResponse,
|
|
@@ -32,16 +32,16 @@ const XAI_RESPONSES_CAPABILITIES: LLMCapabilities = {
|
|
|
32
32
|
/**
|
|
33
33
|
* Create xAI Responses API LLM handler
|
|
34
34
|
*/
|
|
35
|
-
export function createResponsesLLMHandler(): LLMHandler<
|
|
35
|
+
export function createResponsesLLMHandler(): LLMHandler<XAIResponsesParams> {
|
|
36
36
|
// Provider reference injected by createProvider() or xAI's custom factory
|
|
37
|
-
let providerRef: LLMProvider<
|
|
37
|
+
let providerRef: LLMProvider<XAIResponsesParams> | null = null;
|
|
38
38
|
|
|
39
39
|
return {
|
|
40
|
-
_setProvider(provider: LLMProvider<
|
|
40
|
+
_setProvider(provider: LLMProvider<XAIResponsesParams>) {
|
|
41
41
|
providerRef = provider;
|
|
42
42
|
},
|
|
43
43
|
|
|
44
|
-
bind(modelId: string): BoundLLMModel<
|
|
44
|
+
bind(modelId: string): BoundLLMModel<XAIResponsesParams> {
|
|
45
45
|
// Use the injected provider reference
|
|
46
46
|
if (!providerRef) {
|
|
47
47
|
throw new UPPError(
|
|
@@ -52,15 +52,15 @@ export function createResponsesLLMHandler(): LLMHandler<XAILLMParams> {
|
|
|
52
52
|
);
|
|
53
53
|
}
|
|
54
54
|
|
|
55
|
-
const model: BoundLLMModel<
|
|
55
|
+
const model: BoundLLMModel<XAIResponsesParams> = {
|
|
56
56
|
modelId,
|
|
57
57
|
capabilities: XAI_RESPONSES_CAPABILITIES,
|
|
58
58
|
|
|
59
|
-
get provider(): LLMProvider<
|
|
59
|
+
get provider(): LLMProvider<XAIResponsesParams> {
|
|
60
60
|
return providerRef!;
|
|
61
61
|
},
|
|
62
62
|
|
|
63
|
-
async complete(request: LLMRequest<
|
|
63
|
+
async complete(request: LLMRequest<XAIResponsesParams>): Promise<LLMResponse> {
|
|
64
64
|
const apiKey = await resolveApiKey(
|
|
65
65
|
request.config,
|
|
66
66
|
'XAI_API_KEY',
|
|
@@ -102,7 +102,7 @@ export function createResponsesLLMHandler(): LLMHandler<XAILLMParams> {
|
|
|
102
102
|
return transformResponse(data);
|
|
103
103
|
},
|
|
104
104
|
|
|
105
|
-
stream(request: LLMRequest<
|
|
105
|
+
stream(request: LLMRequest<XAIResponsesParams>): LLMStreamResult {
|
|
106
106
|
const state = createStreamState();
|
|
107
107
|
let responseResolve: (value: LLMResponse) => void;
|
|
108
108
|
let responseReject: (error: Error) => void;
|
|
@@ -11,7 +11,7 @@ import {
|
|
|
11
11
|
isToolResultMessage,
|
|
12
12
|
} from '../../types/messages.ts';
|
|
13
13
|
import type {
|
|
14
|
-
|
|
14
|
+
XAICompletionsParams,
|
|
15
15
|
XAICompletionsRequest,
|
|
16
16
|
XAICompletionsMessage,
|
|
17
17
|
XAIUserContent,
|
|
@@ -23,79 +23,30 @@ import type {
|
|
|
23
23
|
|
|
24
24
|
/**
|
|
25
25
|
* Transform UPP request to xAI Chat Completions format
|
|
26
|
+
*
|
|
27
|
+
* Params are spread directly to allow pass-through of any xAI API fields,
|
|
28
|
+
* even those not explicitly defined in our type. This enables developers to
|
|
29
|
+
* use new API features without waiting for library updates.
|
|
26
30
|
*/
|
|
27
|
-
export function transformRequest
|
|
28
|
-
request: LLMRequest<
|
|
31
|
+
export function transformRequest(
|
|
32
|
+
request: LLMRequest<XAICompletionsParams>,
|
|
29
33
|
modelId: string
|
|
30
34
|
): XAICompletionsRequest {
|
|
31
|
-
const params
|
|
35
|
+
const params = request.params ?? ({} as XAICompletionsParams);
|
|
32
36
|
|
|
37
|
+
// Spread params to pass through all fields, then set required fields
|
|
33
38
|
const xaiRequest: XAICompletionsRequest = {
|
|
39
|
+
...params,
|
|
34
40
|
model: modelId,
|
|
35
41
|
messages: transformMessages(request.messages, request.system),
|
|
36
42
|
};
|
|
37
43
|
|
|
38
|
-
//
|
|
39
|
-
if (params.temperature !== undefined) {
|
|
40
|
-
xaiRequest.temperature = params.temperature;
|
|
41
|
-
}
|
|
42
|
-
if (params.top_p !== undefined) {
|
|
43
|
-
xaiRequest.top_p = params.top_p;
|
|
44
|
-
}
|
|
45
|
-
if (params.max_completion_tokens !== undefined) {
|
|
46
|
-
xaiRequest.max_completion_tokens = params.max_completion_tokens;
|
|
47
|
-
} else if (params.max_tokens !== undefined) {
|
|
48
|
-
xaiRequest.max_tokens = params.max_tokens;
|
|
49
|
-
}
|
|
50
|
-
if (params.frequency_penalty !== undefined) {
|
|
51
|
-
xaiRequest.frequency_penalty = params.frequency_penalty;
|
|
52
|
-
}
|
|
53
|
-
if (params.presence_penalty !== undefined) {
|
|
54
|
-
xaiRequest.presence_penalty = params.presence_penalty;
|
|
55
|
-
}
|
|
56
|
-
if (params.stop !== undefined) {
|
|
57
|
-
xaiRequest.stop = params.stop;
|
|
58
|
-
}
|
|
59
|
-
if (params.n !== undefined) {
|
|
60
|
-
xaiRequest.n = params.n;
|
|
61
|
-
}
|
|
62
|
-
if (params.logprobs !== undefined) {
|
|
63
|
-
xaiRequest.logprobs = params.logprobs;
|
|
64
|
-
}
|
|
65
|
-
if (params.top_logprobs !== undefined) {
|
|
66
|
-
xaiRequest.top_logprobs = params.top_logprobs;
|
|
67
|
-
}
|
|
68
|
-
if (params.seed !== undefined) {
|
|
69
|
-
xaiRequest.seed = params.seed;
|
|
70
|
-
}
|
|
71
|
-
if (params.user !== undefined) {
|
|
72
|
-
xaiRequest.user = params.user;
|
|
73
|
-
}
|
|
74
|
-
if (params.logit_bias !== undefined) {
|
|
75
|
-
xaiRequest.logit_bias = params.logit_bias;
|
|
76
|
-
}
|
|
77
|
-
if (params.reasoning_effort !== undefined) {
|
|
78
|
-
xaiRequest.reasoning_effort = params.reasoning_effort;
|
|
79
|
-
}
|
|
80
|
-
if (params.store !== undefined) {
|
|
81
|
-
xaiRequest.store = params.store;
|
|
82
|
-
}
|
|
83
|
-
if (params.metadata !== undefined) {
|
|
84
|
-
xaiRequest.metadata = params.metadata;
|
|
85
|
-
}
|
|
86
|
-
if (params.search_parameters !== undefined) {
|
|
87
|
-
xaiRequest.search_parameters = params.search_parameters;
|
|
88
|
-
}
|
|
89
|
-
|
|
90
|
-
// Tools
|
|
44
|
+
// Tools come from request, not params
|
|
91
45
|
if (request.tools && request.tools.length > 0) {
|
|
92
46
|
xaiRequest.tools = request.tools.map(transformTool);
|
|
93
|
-
if (params.parallel_tool_calls !== undefined) {
|
|
94
|
-
xaiRequest.parallel_tool_calls = params.parallel_tool_calls;
|
|
95
|
-
}
|
|
96
47
|
}
|
|
97
48
|
|
|
98
|
-
// Structured output via response_format
|
|
49
|
+
// Structured output via response_format (overrides params.response_format if set)
|
|
99
50
|
if (request.structure) {
|
|
100
51
|
const schema: Record<string, unknown> = {
|
|
101
52
|
type: 'object',
|
|
@@ -118,9 +69,6 @@ export function transformRequest<TParams extends XAILLMParams>(
|
|
|
118
69
|
strict: true,
|
|
119
70
|
},
|
|
120
71
|
};
|
|
121
|
-
} else if (params.response_format !== undefined) {
|
|
122
|
-
// Pass through response_format from params if no structure is defined
|
|
123
|
-
xaiRequest.response_format = params.response_format;
|
|
124
72
|
}
|
|
125
73
|
|
|
126
74
|
return xaiRequest;
|
|
@@ -11,7 +11,7 @@ import {
|
|
|
11
11
|
isToolResultMessage,
|
|
12
12
|
} from '../../types/messages.ts';
|
|
13
13
|
import type {
|
|
14
|
-
|
|
14
|
+
XAIMessagesParams,
|
|
15
15
|
XAIMessagesRequest,
|
|
16
16
|
XAIMessagesMessage,
|
|
17
17
|
XAIMessagesContent,
|
|
@@ -23,49 +23,30 @@ import type {
|
|
|
23
23
|
|
|
24
24
|
/**
|
|
25
25
|
* Transform UPP request to xAI Messages API format (Anthropic-compatible)
|
|
26
|
+
*
|
|
27
|
+
* Params are spread directly to allow pass-through of any xAI API fields,
|
|
28
|
+
* even those not explicitly defined in our type. This enables developers to
|
|
29
|
+
* use new API features without waiting for library updates.
|
|
26
30
|
*/
|
|
27
|
-
export function transformRequest
|
|
28
|
-
request: LLMRequest<
|
|
31
|
+
export function transformRequest(
|
|
32
|
+
request: LLMRequest<XAIMessagesParams>,
|
|
29
33
|
modelId: string
|
|
30
34
|
): XAIMessagesRequest {
|
|
31
|
-
const params =
|
|
35
|
+
const params = request.params ?? ({} as XAIMessagesParams);
|
|
32
36
|
|
|
37
|
+
// Spread params to pass through all fields, then set required fields
|
|
33
38
|
const xaiRequest: XAIMessagesRequest = {
|
|
39
|
+
...params,
|
|
34
40
|
model: modelId,
|
|
35
41
|
messages: request.messages.map(transformMessage),
|
|
36
42
|
};
|
|
37
43
|
|
|
38
|
-
// Only include max_tokens if provided - let API enforce its requirement
|
|
39
|
-
if (params.max_tokens !== undefined) {
|
|
40
|
-
xaiRequest.max_tokens = params.max_tokens;
|
|
41
|
-
}
|
|
42
|
-
|
|
43
44
|
// System prompt (top-level in Messages API)
|
|
44
45
|
if (request.system) {
|
|
45
46
|
xaiRequest.system = request.system;
|
|
46
47
|
}
|
|
47
48
|
|
|
48
|
-
//
|
|
49
|
-
if (params.temperature !== undefined) {
|
|
50
|
-
xaiRequest.temperature = params.temperature;
|
|
51
|
-
}
|
|
52
|
-
if (params.top_p !== undefined) {
|
|
53
|
-
xaiRequest.top_p = params.top_p;
|
|
54
|
-
}
|
|
55
|
-
if (params.top_k !== undefined) {
|
|
56
|
-
xaiRequest.top_k = params.top_k;
|
|
57
|
-
}
|
|
58
|
-
if (params.stop_sequences) {
|
|
59
|
-
xaiRequest.stop_sequences = params.stop_sequences;
|
|
60
|
-
}
|
|
61
|
-
if (params.messages_metadata) {
|
|
62
|
-
xaiRequest.metadata = params.messages_metadata;
|
|
63
|
-
}
|
|
64
|
-
if (params.thinking) {
|
|
65
|
-
xaiRequest.thinking = params.thinking;
|
|
66
|
-
}
|
|
67
|
-
|
|
68
|
-
// Tools
|
|
49
|
+
// Tools come from request, not params
|
|
69
50
|
if (request.tools && request.tools.length > 0) {
|
|
70
51
|
xaiRequest.tools = request.tools.map(transformTool);
|
|
71
52
|
xaiRequest.tool_choice = { type: 'auto' };
|
|
@@ -11,7 +11,7 @@ import {
|
|
|
11
11
|
isToolResultMessage,
|
|
12
12
|
} from '../../types/messages.ts';
|
|
13
13
|
import type {
|
|
14
|
-
|
|
14
|
+
XAIResponsesParams,
|
|
15
15
|
XAIResponsesRequest,
|
|
16
16
|
XAIResponsesInputItem,
|
|
17
17
|
XAIResponsesContentPart,
|
|
@@ -25,69 +25,30 @@ import type {
|
|
|
25
25
|
|
|
26
26
|
/**
|
|
27
27
|
* Transform UPP request to xAI Responses API format
|
|
28
|
+
*
|
|
29
|
+
* Params are spread directly to allow pass-through of any xAI API fields,
|
|
30
|
+
* even those not explicitly defined in our type. This enables developers to
|
|
31
|
+
* use new API features without waiting for library updates.
|
|
28
32
|
*/
|
|
29
|
-
export function transformRequest
|
|
30
|
-
request: LLMRequest<
|
|
33
|
+
export function transformRequest(
|
|
34
|
+
request: LLMRequest<XAIResponsesParams>,
|
|
31
35
|
modelId: string
|
|
32
36
|
): XAIResponsesRequest {
|
|
33
|
-
const params
|
|
37
|
+
const params = request.params ?? ({} as XAIResponsesParams);
|
|
34
38
|
|
|
39
|
+
// Spread params to pass through all fields, then set required fields
|
|
35
40
|
const xaiRequest: XAIResponsesRequest = {
|
|
41
|
+
...params,
|
|
36
42
|
model: modelId,
|
|
37
43
|
input: transformInputItems(request.messages, request.system),
|
|
38
44
|
};
|
|
39
45
|
|
|
40
|
-
//
|
|
41
|
-
if (params.temperature !== undefined) {
|
|
42
|
-
xaiRequest.temperature = params.temperature;
|
|
43
|
-
}
|
|
44
|
-
if (params.top_p !== undefined) {
|
|
45
|
-
xaiRequest.top_p = params.top_p;
|
|
46
|
-
}
|
|
47
|
-
if (params.max_output_tokens !== undefined) {
|
|
48
|
-
xaiRequest.max_output_tokens = params.max_output_tokens;
|
|
49
|
-
} else if (params.max_completion_tokens !== undefined) {
|
|
50
|
-
xaiRequest.max_output_tokens = params.max_completion_tokens;
|
|
51
|
-
} else if (params.max_tokens !== undefined) {
|
|
52
|
-
xaiRequest.max_output_tokens = params.max_tokens;
|
|
53
|
-
}
|
|
54
|
-
if (params.store !== undefined) {
|
|
55
|
-
xaiRequest.store = params.store;
|
|
56
|
-
}
|
|
57
|
-
if (params.metadata !== undefined) {
|
|
58
|
-
xaiRequest.metadata = params.metadata;
|
|
59
|
-
}
|
|
60
|
-
if (params.truncation !== undefined) {
|
|
61
|
-
xaiRequest.truncation = params.truncation;
|
|
62
|
-
}
|
|
63
|
-
if (params.include !== undefined) {
|
|
64
|
-
xaiRequest.include = params.include;
|
|
65
|
-
}
|
|
66
|
-
if (params.previous_response_id !== undefined) {
|
|
67
|
-
xaiRequest.previous_response_id = params.previous_response_id;
|
|
68
|
-
}
|
|
69
|
-
if (params.reasoning !== undefined) {
|
|
70
|
-
xaiRequest.reasoning = { ...params.reasoning };
|
|
71
|
-
}
|
|
72
|
-
if (params.reasoning_effort !== undefined) {
|
|
73
|
-
xaiRequest.reasoning = {
|
|
74
|
-
...(xaiRequest.reasoning ?? {}),
|
|
75
|
-
effort: params.reasoning_effort,
|
|
76
|
-
};
|
|
77
|
-
}
|
|
78
|
-
if (params.search_parameters !== undefined) {
|
|
79
|
-
xaiRequest.search_parameters = params.search_parameters;
|
|
80
|
-
}
|
|
81
|
-
|
|
82
|
-
// Tools
|
|
46
|
+
// Tools come from request, not params
|
|
83
47
|
if (request.tools && request.tools.length > 0) {
|
|
84
48
|
xaiRequest.tools = request.tools.map(transformTool);
|
|
85
|
-
if (params.parallel_tool_calls !== undefined) {
|
|
86
|
-
xaiRequest.parallel_tool_calls = params.parallel_tool_calls;
|
|
87
|
-
}
|
|
88
49
|
}
|
|
89
50
|
|
|
90
|
-
// Structured output via text.format
|
|
51
|
+
// Structured output via text.format (overrides params.text if set)
|
|
91
52
|
if (request.structure) {
|
|
92
53
|
const schema: Record<string, unknown> = {
|
|
93
54
|
type: 'object',
|
|
@@ -1,31 +1,20 @@
|
|
|
1
1
|
/**
|
|
2
|
-
* xAI
|
|
3
|
-
* These are passed through to the
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
* - Chat Completions API (OpenAI-compatible)
|
|
7
|
-
* - Responses API (OpenAI Responses-compatible, with stateful conversations)
|
|
8
|
-
* - Messages API (Anthropic-compatible)
|
|
9
|
-
*/
|
|
10
|
-
export interface XAILLMParams {
|
|
2
|
+
* xAI Chat Completions API parameters (OpenAI-compatible)
|
|
3
|
+
* These are passed through to the /v1/chat/completions endpoint
|
|
4
|
+
*/
|
|
5
|
+
export interface XAICompletionsParams {
|
|
11
6
|
/** Maximum number of tokens to generate */
|
|
12
7
|
max_tokens?: number;
|
|
13
8
|
|
|
14
|
-
/** Maximum completion tokens
|
|
9
|
+
/** Maximum completion tokens */
|
|
15
10
|
max_completion_tokens?: number;
|
|
16
11
|
|
|
17
|
-
/** Maximum output tokens (Responses API) */
|
|
18
|
-
max_output_tokens?: number;
|
|
19
|
-
|
|
20
12
|
/** Temperature for randomness (0.0 - 2.0) */
|
|
21
13
|
temperature?: number;
|
|
22
14
|
|
|
23
15
|
/** Top-p (nucleus) sampling (0.0 - 1.0) */
|
|
24
16
|
top_p?: number;
|
|
25
17
|
|
|
26
|
-
/** Top-k sampling (Messages API only) */
|
|
27
|
-
top_k?: number;
|
|
28
|
-
|
|
29
18
|
/** Frequency penalty (-2.0 - 2.0) */
|
|
30
19
|
frequency_penalty?: number;
|
|
31
20
|
|
|
@@ -35,9 +24,6 @@ export interface XAILLMParams {
|
|
|
35
24
|
/** Custom stop sequences */
|
|
36
25
|
stop?: string | string[];
|
|
37
26
|
|
|
38
|
-
/** Stop sequences (Messages API) */
|
|
39
|
-
stop_sequences?: string[];
|
|
40
|
-
|
|
41
27
|
/** Number of completions to generate */
|
|
42
28
|
n?: number;
|
|
43
29
|
|
|
@@ -53,7 +39,7 @@ export interface XAILLMParams {
|
|
|
53
39
|
/** User identifier for abuse detection */
|
|
54
40
|
user?: string;
|
|
55
41
|
|
|
56
|
-
/** Logit bias map
|
|
42
|
+
/** Logit bias map */
|
|
57
43
|
logit_bias?: Record<string, number>;
|
|
58
44
|
|
|
59
45
|
/** Whether to enable parallel tool calls */
|
|
@@ -62,56 +48,100 @@ export interface XAILLMParams {
|
|
|
62
48
|
/**
|
|
63
49
|
* Reasoning effort for Grok 3 Mini models
|
|
64
50
|
* Note: Only 'low' and 'high' are supported by xAI
|
|
65
|
-
* Grok 4 does not support this parameter
|
|
66
51
|
*/
|
|
67
52
|
reasoning_effort?: 'low' | 'high';
|
|
68
53
|
|
|
69
|
-
/**
|
|
54
|
+
/** Store completion */
|
|
55
|
+
store?: boolean;
|
|
56
|
+
|
|
57
|
+
/** Metadata key-value pairs */
|
|
58
|
+
metadata?: Record<string, string>;
|
|
59
|
+
|
|
60
|
+
/** Response format for structured output */
|
|
61
|
+
response_format?: XAIResponseFormat;
|
|
62
|
+
|
|
63
|
+
/**
|
|
64
|
+
* Live Search parameters (deprecated, will be removed Dec 15, 2025)
|
|
65
|
+
* Use Agent Tools API instead for new implementations
|
|
66
|
+
*/
|
|
67
|
+
search_parameters?: XAISearchParameters;
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
/**
|
|
71
|
+
* xAI Responses API parameters (OpenAI Responses-compatible)
|
|
72
|
+
* These are passed through to the /v1/responses endpoint
|
|
73
|
+
*/
|
|
74
|
+
export interface XAIResponsesParams {
|
|
75
|
+
/** Maximum output tokens */
|
|
76
|
+
max_output_tokens?: number;
|
|
77
|
+
|
|
78
|
+
/** Temperature for randomness (0.0 - 2.0) */
|
|
79
|
+
temperature?: number;
|
|
80
|
+
|
|
81
|
+
/** Top-p (nucleus) sampling (0.0 - 1.0) */
|
|
82
|
+
top_p?: number;
|
|
83
|
+
|
|
84
|
+
/** Whether to enable parallel tool calls */
|
|
85
|
+
parallel_tool_calls?: boolean;
|
|
86
|
+
|
|
87
|
+
/** Reasoning configuration */
|
|
70
88
|
reasoning?: {
|
|
71
89
|
effort?: 'low' | 'high';
|
|
72
90
|
/** Include encrypted reasoning content for continuation */
|
|
73
91
|
encrypted_content?: boolean;
|
|
74
92
|
};
|
|
75
93
|
|
|
76
|
-
/** Truncation strategy
|
|
94
|
+
/** Truncation strategy */
|
|
77
95
|
truncation?: 'auto' | 'disabled';
|
|
78
96
|
|
|
79
|
-
/** Fields to include in
|
|
97
|
+
/** Fields to include in output */
|
|
80
98
|
include?: string[];
|
|
81
99
|
|
|
82
|
-
/** Continue from a previous response
|
|
100
|
+
/** Continue from a previous response */
|
|
83
101
|
previous_response_id?: string;
|
|
84
102
|
|
|
85
|
-
/** Store response for continuation
|
|
103
|
+
/** Store response for continuation */
|
|
86
104
|
store?: boolean;
|
|
87
105
|
|
|
88
|
-
/** Store messages on xAI servers (default: true
|
|
106
|
+
/** Store messages on xAI servers (default: true) */
|
|
89
107
|
store_messages?: boolean;
|
|
90
108
|
|
|
91
109
|
/** Metadata key-value pairs */
|
|
92
110
|
metadata?: Record<string, string>;
|
|
93
111
|
|
|
94
|
-
/** Response format for structured output (Chat Completions API only) */
|
|
95
|
-
response_format?: XAIResponseFormat;
|
|
96
|
-
|
|
97
112
|
/**
|
|
98
113
|
* Live Search parameters (deprecated, will be removed Dec 15, 2025)
|
|
99
114
|
* Use Agent Tools API instead for new implementations
|
|
100
115
|
*/
|
|
101
116
|
search_parameters?: XAISearchParameters;
|
|
117
|
+
}
|
|
102
118
|
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
119
|
+
/**
|
|
120
|
+
* xAI Messages API parameters (Anthropic-compatible)
|
|
121
|
+
* These are passed through to the /v1/messages endpoint
|
|
122
|
+
*/
|
|
123
|
+
export interface XAIMessagesParams {
|
|
124
|
+
/** Maximum number of tokens to generate */
|
|
125
|
+
max_tokens?: number;
|
|
126
|
+
|
|
127
|
+
/** Temperature for randomness (0.0 - 1.0) */
|
|
128
|
+
temperature?: number;
|
|
129
|
+
|
|
130
|
+
/** Top-p (nucleus) sampling (0.0 - 1.0) */
|
|
131
|
+
top_p?: number;
|
|
132
|
+
|
|
133
|
+
/** Top-k sampling */
|
|
134
|
+
top_k?: number;
|
|
135
|
+
|
|
136
|
+
/** Custom stop sequences */
|
|
137
|
+
stop_sequences?: string[];
|
|
108
138
|
|
|
109
|
-
/** Metadata for the request
|
|
110
|
-
|
|
139
|
+
/** Metadata for the request */
|
|
140
|
+
metadata?: {
|
|
111
141
|
user_id?: string;
|
|
112
142
|
};
|
|
113
143
|
|
|
114
|
-
/** Extended thinking configuration
|
|
144
|
+
/** Extended thinking configuration */
|
|
115
145
|
thinking?: {
|
|
116
146
|
type: 'enabled';
|
|
117
147
|
budget_tokens: number;
|