@providerprotocol/ai 0.0.4 → 0.0.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +19 -0
- package/dist/anthropic/index.js +1 -24
- package/dist/anthropic/index.js.map +1 -1
- package/dist/google/index.js +3 -46
- package/dist/google/index.js.map +1 -1
- package/dist/index.js +5 -1
- package/dist/index.js.map +1 -1
- package/dist/ollama/index.js +13 -44
- package/dist/ollama/index.js.map +1 -1
- package/dist/openai/index.d.ts +46 -27
- package/dist/openai/index.js +2 -116
- package/dist/openai/index.js.map +1 -1
- package/dist/openrouter/index.d.ts +23 -10
- package/dist/openrouter/index.js +2 -85
- package/dist/openrouter/index.js.map +1 -1
- package/dist/xai/index.d.ts +306 -0
- package/dist/xai/index.js +1696 -0
- package/dist/xai/index.js.map +1 -0
- package/package.json +9 -1
- package/src/core/llm.ts +6 -1
- package/src/openai/index.ts +2 -1
- package/src/openrouter/index.ts +2 -1
- package/src/providers/anthropic/transform.ts +7 -29
- package/src/providers/google/transform.ts +9 -49
- package/src/providers/ollama/transform.ts +27 -49
- package/src/providers/openai/index.ts +12 -8
- package/src/providers/openai/llm.completions.ts +9 -9
- package/src/providers/openai/llm.responses.ts +9 -9
- package/src/providers/openai/transform.completions.ts +12 -79
- package/src/providers/openai/transform.responses.ts +12 -54
- package/src/providers/openai/types.ts +54 -31
- package/src/providers/openrouter/index.ts +12 -8
- package/src/providers/openrouter/llm.completions.ts +9 -9
- package/src/providers/openrouter/llm.responses.ts +9 -9
- package/src/providers/openrouter/transform.completions.ts +12 -79
- package/src/providers/openrouter/transform.responses.ts +12 -25
- package/src/providers/openrouter/types.ts +22 -28
- package/src/providers/xai/index.ts +223 -0
- package/src/providers/xai/llm.completions.ts +201 -0
- package/src/providers/xai/llm.messages.ts +195 -0
- package/src/providers/xai/llm.responses.ts +211 -0
- package/src/providers/xai/transform.completions.ts +565 -0
- package/src/providers/xai/transform.messages.ts +448 -0
- package/src/providers/xai/transform.responses.ts +678 -0
- package/src/providers/xai/types.ts +938 -0
- package/src/xai/index.ts +41 -0
|
@@ -6,7 +6,7 @@ import { resolveApiKey } from '../../http/keys.ts';
|
|
|
6
6
|
import { doFetch, doStreamFetch } from '../../http/fetch.ts';
|
|
7
7
|
import { parseSSEStream } from '../../http/sse.ts';
|
|
8
8
|
import { normalizeHttpError } from '../../http/errors.ts';
|
|
9
|
-
import type {
|
|
9
|
+
import type { OpenAICompletionsParams, OpenAICompletionsResponse, OpenAICompletionsStreamChunk } from './types.ts';
|
|
10
10
|
import {
|
|
11
11
|
transformRequest,
|
|
12
12
|
transformResponse,
|
|
@@ -32,16 +32,16 @@ const OPENAI_CAPABILITIES: LLMCapabilities = {
|
|
|
32
32
|
/**
|
|
33
33
|
* Create OpenAI Chat Completions LLM handler
|
|
34
34
|
*/
|
|
35
|
-
export function createCompletionsLLMHandler(): LLMHandler<
|
|
35
|
+
export function createCompletionsLLMHandler(): LLMHandler<OpenAICompletionsParams> {
|
|
36
36
|
// Provider reference injected by createProvider() or OpenAI's custom factory
|
|
37
|
-
let providerRef: LLMProvider<
|
|
37
|
+
let providerRef: LLMProvider<OpenAICompletionsParams> | null = null;
|
|
38
38
|
|
|
39
39
|
return {
|
|
40
|
-
_setProvider(provider: LLMProvider<
|
|
40
|
+
_setProvider(provider: LLMProvider<OpenAICompletionsParams>) {
|
|
41
41
|
providerRef = provider;
|
|
42
42
|
},
|
|
43
43
|
|
|
44
|
-
bind(modelId: string): BoundLLMModel<
|
|
44
|
+
bind(modelId: string): BoundLLMModel<OpenAICompletionsParams> {
|
|
45
45
|
// Use the injected provider reference
|
|
46
46
|
if (!providerRef) {
|
|
47
47
|
throw new UPPError(
|
|
@@ -52,15 +52,15 @@ export function createCompletionsLLMHandler(): LLMHandler<OpenAILLMParams> {
|
|
|
52
52
|
);
|
|
53
53
|
}
|
|
54
54
|
|
|
55
|
-
const model: BoundLLMModel<
|
|
55
|
+
const model: BoundLLMModel<OpenAICompletionsParams> = {
|
|
56
56
|
modelId,
|
|
57
57
|
capabilities: OPENAI_CAPABILITIES,
|
|
58
58
|
|
|
59
|
-
get provider(): LLMProvider<
|
|
59
|
+
get provider(): LLMProvider<OpenAICompletionsParams> {
|
|
60
60
|
return providerRef!;
|
|
61
61
|
},
|
|
62
62
|
|
|
63
|
-
async complete(request: LLMRequest<
|
|
63
|
+
async complete(request: LLMRequest<OpenAICompletionsParams>): Promise<LLMResponse> {
|
|
64
64
|
const apiKey = await resolveApiKey(
|
|
65
65
|
request.config,
|
|
66
66
|
'OPENAI_API_KEY',
|
|
@@ -91,7 +91,7 @@ export function createCompletionsLLMHandler(): LLMHandler<OpenAILLMParams> {
|
|
|
91
91
|
return transformResponse(data);
|
|
92
92
|
},
|
|
93
93
|
|
|
94
|
-
stream(request: LLMRequest<
|
|
94
|
+
stream(request: LLMRequest<OpenAICompletionsParams>): LLMStreamResult {
|
|
95
95
|
const state = createStreamState();
|
|
96
96
|
let responseResolve: (value: LLMResponse) => void;
|
|
97
97
|
let responseReject: (error: Error) => void;
|
|
@@ -6,7 +6,7 @@ import { resolveApiKey } from '../../http/keys.ts';
|
|
|
6
6
|
import { doFetch, doStreamFetch } from '../../http/fetch.ts';
|
|
7
7
|
import { parseSSEStream } from '../../http/sse.ts';
|
|
8
8
|
import { normalizeHttpError } from '../../http/errors.ts';
|
|
9
|
-
import type {
|
|
9
|
+
import type { OpenAIResponsesParams, OpenAIResponsesResponse, OpenAIResponsesStreamEvent, OpenAIResponseErrorEvent } from './types.ts';
|
|
10
10
|
import {
|
|
11
11
|
transformRequest,
|
|
12
12
|
transformResponse,
|
|
@@ -32,16 +32,16 @@ const OPENAI_CAPABILITIES: LLMCapabilities = {
|
|
|
32
32
|
/**
|
|
33
33
|
* Create OpenAI Responses API LLM handler
|
|
34
34
|
*/
|
|
35
|
-
export function createResponsesLLMHandler(): LLMHandler<
|
|
35
|
+
export function createResponsesLLMHandler(): LLMHandler<OpenAIResponsesParams> {
|
|
36
36
|
// Provider reference injected by createProvider() or OpenAI's custom factory
|
|
37
|
-
let providerRef: LLMProvider<
|
|
37
|
+
let providerRef: LLMProvider<OpenAIResponsesParams> | null = null;
|
|
38
38
|
|
|
39
39
|
return {
|
|
40
|
-
_setProvider(provider: LLMProvider<
|
|
40
|
+
_setProvider(provider: LLMProvider<OpenAIResponsesParams>) {
|
|
41
41
|
providerRef = provider;
|
|
42
42
|
},
|
|
43
43
|
|
|
44
|
-
bind(modelId: string): BoundLLMModel<
|
|
44
|
+
bind(modelId: string): BoundLLMModel<OpenAIResponsesParams> {
|
|
45
45
|
// Use the injected provider reference
|
|
46
46
|
if (!providerRef) {
|
|
47
47
|
throw new UPPError(
|
|
@@ -52,15 +52,15 @@ export function createResponsesLLMHandler(): LLMHandler<OpenAILLMParams> {
|
|
|
52
52
|
);
|
|
53
53
|
}
|
|
54
54
|
|
|
55
|
-
const model: BoundLLMModel<
|
|
55
|
+
const model: BoundLLMModel<OpenAIResponsesParams> = {
|
|
56
56
|
modelId,
|
|
57
57
|
capabilities: OPENAI_CAPABILITIES,
|
|
58
58
|
|
|
59
|
-
get provider(): LLMProvider<
|
|
59
|
+
get provider(): LLMProvider<OpenAIResponsesParams> {
|
|
60
60
|
return providerRef!;
|
|
61
61
|
},
|
|
62
62
|
|
|
63
|
-
async complete(request: LLMRequest<
|
|
63
|
+
async complete(request: LLMRequest<OpenAIResponsesParams>): Promise<LLMResponse> {
|
|
64
64
|
const apiKey = await resolveApiKey(
|
|
65
65
|
request.config,
|
|
66
66
|
'OPENAI_API_KEY',
|
|
@@ -102,7 +102,7 @@ export function createResponsesLLMHandler(): LLMHandler<OpenAILLMParams> {
|
|
|
102
102
|
return transformResponse(data);
|
|
103
103
|
},
|
|
104
104
|
|
|
105
|
-
stream(request: LLMRequest<
|
|
105
|
+
stream(request: LLMRequest<OpenAIResponsesParams>): LLMStreamResult {
|
|
106
106
|
const state = createStreamState();
|
|
107
107
|
let responseResolve: (value: LLMResponse) => void;
|
|
108
108
|
let responseReject: (error: Error) => void;
|
|
@@ -11,7 +11,7 @@ import {
|
|
|
11
11
|
isToolResultMessage,
|
|
12
12
|
} from '../../types/messages.ts';
|
|
13
13
|
import type {
|
|
14
|
-
|
|
14
|
+
OpenAICompletionsParams,
|
|
15
15
|
OpenAICompletionsRequest,
|
|
16
16
|
OpenAICompletionsMessage,
|
|
17
17
|
OpenAIUserContent,
|
|
@@ -23,94 +23,30 @@ import type {
|
|
|
23
23
|
|
|
24
24
|
/**
|
|
25
25
|
* Transform UPP request to OpenAI Chat Completions format
|
|
26
|
+
*
|
|
27
|
+
* Params are spread directly to allow pass-through of any OpenAI API fields,
|
|
28
|
+
* even those not explicitly defined in our type. This enables developers to
|
|
29
|
+
* use new API features without waiting for library updates.
|
|
26
30
|
*/
|
|
27
|
-
export function transformRequest
|
|
28
|
-
request: LLMRequest<
|
|
31
|
+
export function transformRequest(
|
|
32
|
+
request: LLMRequest<OpenAICompletionsParams>,
|
|
29
33
|
modelId: string
|
|
30
34
|
): OpenAICompletionsRequest {
|
|
31
|
-
const params
|
|
35
|
+
const params = request.params ?? ({} as OpenAICompletionsParams);
|
|
32
36
|
|
|
37
|
+
// Spread params to pass through all fields, then set required fields
|
|
33
38
|
const openaiRequest: OpenAICompletionsRequest = {
|
|
39
|
+
...params,
|
|
34
40
|
model: modelId,
|
|
35
41
|
messages: transformMessages(request.messages, request.system),
|
|
36
42
|
};
|
|
37
43
|
|
|
38
|
-
//
|
|
39
|
-
if (params.temperature !== undefined) {
|
|
40
|
-
openaiRequest.temperature = params.temperature;
|
|
41
|
-
}
|
|
42
|
-
if (params.top_p !== undefined) {
|
|
43
|
-
openaiRequest.top_p = params.top_p;
|
|
44
|
-
}
|
|
45
|
-
if (params.max_completion_tokens !== undefined) {
|
|
46
|
-
openaiRequest.max_completion_tokens = params.max_completion_tokens;
|
|
47
|
-
} else if (params.max_tokens !== undefined) {
|
|
48
|
-
openaiRequest.max_tokens = params.max_tokens;
|
|
49
|
-
}
|
|
50
|
-
if (params.frequency_penalty !== undefined) {
|
|
51
|
-
openaiRequest.frequency_penalty = params.frequency_penalty;
|
|
52
|
-
}
|
|
53
|
-
if (params.presence_penalty !== undefined) {
|
|
54
|
-
openaiRequest.presence_penalty = params.presence_penalty;
|
|
55
|
-
}
|
|
56
|
-
if (params.stop !== undefined) {
|
|
57
|
-
openaiRequest.stop = params.stop;
|
|
58
|
-
}
|
|
59
|
-
if (params.n !== undefined) {
|
|
60
|
-
openaiRequest.n = params.n;
|
|
61
|
-
}
|
|
62
|
-
if (params.logprobs !== undefined) {
|
|
63
|
-
openaiRequest.logprobs = params.logprobs;
|
|
64
|
-
}
|
|
65
|
-
if (params.top_logprobs !== undefined) {
|
|
66
|
-
openaiRequest.top_logprobs = params.top_logprobs;
|
|
67
|
-
}
|
|
68
|
-
if (params.seed !== undefined) {
|
|
69
|
-
openaiRequest.seed = params.seed;
|
|
70
|
-
}
|
|
71
|
-
if (params.user !== undefined) {
|
|
72
|
-
openaiRequest.user = params.user;
|
|
73
|
-
}
|
|
74
|
-
if (params.logit_bias !== undefined) {
|
|
75
|
-
openaiRequest.logit_bias = params.logit_bias;
|
|
76
|
-
}
|
|
77
|
-
if (params.reasoning_effort !== undefined) {
|
|
78
|
-
openaiRequest.reasoning_effort = params.reasoning_effort;
|
|
79
|
-
}
|
|
80
|
-
if (params.verbosity !== undefined) {
|
|
81
|
-
openaiRequest.verbosity = params.verbosity;
|
|
82
|
-
}
|
|
83
|
-
if (params.service_tier !== undefined) {
|
|
84
|
-
openaiRequest.service_tier = params.service_tier;
|
|
85
|
-
}
|
|
86
|
-
if (params.store !== undefined) {
|
|
87
|
-
openaiRequest.store = params.store;
|
|
88
|
-
}
|
|
89
|
-
if (params.metadata !== undefined) {
|
|
90
|
-
openaiRequest.metadata = params.metadata;
|
|
91
|
-
}
|
|
92
|
-
if (params.prediction !== undefined) {
|
|
93
|
-
openaiRequest.prediction = params.prediction;
|
|
94
|
-
}
|
|
95
|
-
if (params.prompt_cache_key !== undefined) {
|
|
96
|
-
openaiRequest.prompt_cache_key = params.prompt_cache_key;
|
|
97
|
-
}
|
|
98
|
-
if (params.prompt_cache_retention !== undefined) {
|
|
99
|
-
openaiRequest.prompt_cache_retention = params.prompt_cache_retention;
|
|
100
|
-
}
|
|
101
|
-
if (params.safety_identifier !== undefined) {
|
|
102
|
-
openaiRequest.safety_identifier = params.safety_identifier;
|
|
103
|
-
}
|
|
104
|
-
|
|
105
|
-
// Tools
|
|
44
|
+
// Tools come from request, not params
|
|
106
45
|
if (request.tools && request.tools.length > 0) {
|
|
107
46
|
openaiRequest.tools = request.tools.map(transformTool);
|
|
108
|
-
if (params.parallel_tool_calls !== undefined) {
|
|
109
|
-
openaiRequest.parallel_tool_calls = params.parallel_tool_calls;
|
|
110
|
-
}
|
|
111
47
|
}
|
|
112
48
|
|
|
113
|
-
// Structured output via response_format
|
|
49
|
+
// Structured output via response_format (overrides params.response_format if set)
|
|
114
50
|
if (request.structure) {
|
|
115
51
|
const schema: Record<string, unknown> = {
|
|
116
52
|
type: 'object',
|
|
@@ -133,9 +69,6 @@ export function transformRequest<TParams extends OpenAILLMParams>(
|
|
|
133
69
|
strict: true,
|
|
134
70
|
},
|
|
135
71
|
};
|
|
136
|
-
} else if (params.response_format !== undefined) {
|
|
137
|
-
// Pass through response_format from params if no structure is defined
|
|
138
|
-
openaiRequest.response_format = params.response_format;
|
|
139
72
|
}
|
|
140
73
|
|
|
141
74
|
return openaiRequest;
|
|
@@ -11,7 +11,7 @@ import {
|
|
|
11
11
|
isToolResultMessage,
|
|
12
12
|
} from '../../types/messages.ts';
|
|
13
13
|
import type {
|
|
14
|
-
|
|
14
|
+
OpenAIResponsesParams,
|
|
15
15
|
OpenAIResponsesRequest,
|
|
16
16
|
OpenAIResponsesInputItem,
|
|
17
17
|
OpenAIResponsesContentPart,
|
|
@@ -25,72 +25,30 @@ import type {
|
|
|
25
25
|
|
|
26
26
|
/**
|
|
27
27
|
* Transform UPP request to OpenAI Responses API format
|
|
28
|
+
*
|
|
29
|
+
* Params are spread directly to allow pass-through of any OpenAI API fields,
|
|
30
|
+
* even those not explicitly defined in our type. This enables developers to
|
|
31
|
+
* use new API features without waiting for library updates.
|
|
28
32
|
*/
|
|
29
|
-
export function transformRequest
|
|
30
|
-
request: LLMRequest<
|
|
33
|
+
export function transformRequest(
|
|
34
|
+
request: LLMRequest<OpenAIResponsesParams>,
|
|
31
35
|
modelId: string
|
|
32
36
|
): OpenAIResponsesRequest {
|
|
33
|
-
const params
|
|
37
|
+
const params = request.params ?? ({} as OpenAIResponsesParams);
|
|
34
38
|
|
|
39
|
+
// Spread params to pass through all fields, then set required fields
|
|
35
40
|
const openaiRequest: OpenAIResponsesRequest = {
|
|
41
|
+
...params,
|
|
36
42
|
model: modelId,
|
|
37
43
|
input: transformInputItems(request.messages, request.system),
|
|
38
44
|
};
|
|
39
45
|
|
|
40
|
-
//
|
|
41
|
-
if (params.temperature !== undefined) {
|
|
42
|
-
openaiRequest.temperature = params.temperature;
|
|
43
|
-
}
|
|
44
|
-
if (params.top_p !== undefined) {
|
|
45
|
-
openaiRequest.top_p = params.top_p;
|
|
46
|
-
}
|
|
47
|
-
if (params.max_output_tokens !== undefined) {
|
|
48
|
-
openaiRequest.max_output_tokens = params.max_output_tokens;
|
|
49
|
-
} else if (params.max_completion_tokens !== undefined) {
|
|
50
|
-
openaiRequest.max_output_tokens = params.max_completion_tokens;
|
|
51
|
-
} else if (params.max_tokens !== undefined) {
|
|
52
|
-
openaiRequest.max_output_tokens = params.max_tokens;
|
|
53
|
-
}
|
|
54
|
-
if (params.service_tier !== undefined) {
|
|
55
|
-
openaiRequest.service_tier = params.service_tier;
|
|
56
|
-
}
|
|
57
|
-
if (params.store !== undefined) {
|
|
58
|
-
openaiRequest.store = params.store;
|
|
59
|
-
}
|
|
60
|
-
if (params.metadata !== undefined) {
|
|
61
|
-
openaiRequest.metadata = params.metadata;
|
|
62
|
-
}
|
|
63
|
-
if (params.truncation !== undefined) {
|
|
64
|
-
openaiRequest.truncation = params.truncation;
|
|
65
|
-
}
|
|
66
|
-
if (params.include !== undefined) {
|
|
67
|
-
openaiRequest.include = params.include;
|
|
68
|
-
}
|
|
69
|
-
if (params.background !== undefined) {
|
|
70
|
-
openaiRequest.background = params.background;
|
|
71
|
-
}
|
|
72
|
-
if (params.previous_response_id !== undefined) {
|
|
73
|
-
openaiRequest.previous_response_id = params.previous_response_id;
|
|
74
|
-
}
|
|
75
|
-
if (params.reasoning !== undefined) {
|
|
76
|
-
openaiRequest.reasoning = { ...params.reasoning };
|
|
77
|
-
}
|
|
78
|
-
if (params.reasoning_effort !== undefined) {
|
|
79
|
-
openaiRequest.reasoning = {
|
|
80
|
-
...(openaiRequest.reasoning ?? {}),
|
|
81
|
-
effort: params.reasoning_effort,
|
|
82
|
-
};
|
|
83
|
-
}
|
|
84
|
-
|
|
85
|
-
// Tools
|
|
46
|
+
// Tools come from request, not params
|
|
86
47
|
if (request.tools && request.tools.length > 0) {
|
|
87
48
|
openaiRequest.tools = request.tools.map(transformTool);
|
|
88
|
-
if (params.parallel_tool_calls !== undefined) {
|
|
89
|
-
openaiRequest.parallel_tool_calls = params.parallel_tool_calls;
|
|
90
|
-
}
|
|
91
49
|
}
|
|
92
50
|
|
|
93
|
-
// Structured output via text.format
|
|
51
|
+
// Structured output via text.format (overrides params.text if set)
|
|
94
52
|
if (request.structure) {
|
|
95
53
|
const schema: Record<string, unknown> = {
|
|
96
54
|
type: 'object',
|
|
@@ -1,17 +1,14 @@
|
|
|
1
1
|
/**
|
|
2
|
-
* OpenAI
|
|
3
|
-
* These are passed through to the
|
|
2
|
+
* OpenAI Chat Completions API parameters
|
|
3
|
+
* These are passed through to the /v1/chat/completions endpoint
|
|
4
4
|
*/
|
|
5
|
-
export interface
|
|
6
|
-
/** Maximum number of tokens to generate */
|
|
5
|
+
export interface OpenAICompletionsParams {
|
|
6
|
+
/** Maximum number of tokens to generate (legacy, prefer max_completion_tokens) */
|
|
7
7
|
max_tokens?: number;
|
|
8
8
|
|
|
9
|
-
/** Maximum completion tokens (preferred
|
|
9
|
+
/** Maximum completion tokens (preferred for newer models) */
|
|
10
10
|
max_completion_tokens?: number;
|
|
11
11
|
|
|
12
|
-
/** Maximum output tokens (Responses API) */
|
|
13
|
-
max_output_tokens?: number;
|
|
14
|
-
|
|
15
12
|
/** Temperature for randomness (0.0 - 2.0) */
|
|
16
13
|
temperature?: number;
|
|
17
14
|
|
|
@@ -42,10 +39,10 @@ export interface OpenAILLMParams {
|
|
|
42
39
|
/** User identifier for abuse detection */
|
|
43
40
|
user?: string;
|
|
44
41
|
|
|
45
|
-
/** Logit bias map
|
|
42
|
+
/** Logit bias map */
|
|
46
43
|
logit_bias?: Record<string, number>;
|
|
47
44
|
|
|
48
|
-
/** Verbosity control
|
|
45
|
+
/** Verbosity control */
|
|
49
46
|
verbosity?: 'low' | 'medium' | 'high';
|
|
50
47
|
|
|
51
48
|
/** Whether to enable parallel tool calls */
|
|
@@ -54,40 +51,21 @@ export interface OpenAILLMParams {
|
|
|
54
51
|
/** Reasoning effort for reasoning models */
|
|
55
52
|
reasoning_effort?: 'none' | 'minimal' | 'low' | 'medium' | 'high' | 'xhigh';
|
|
56
53
|
|
|
57
|
-
/** Reasoning configuration (Responses API) */
|
|
58
|
-
reasoning?: {
|
|
59
|
-
effort?: 'none' | 'minimal' | 'low' | 'medium' | 'high' | 'xhigh';
|
|
60
|
-
summary?: string;
|
|
61
|
-
};
|
|
62
|
-
|
|
63
54
|
/** Service tier */
|
|
64
55
|
service_tier?: 'auto' | 'default' | 'flex' | 'priority';
|
|
65
56
|
|
|
66
|
-
/** Truncation strategy (Responses API) */
|
|
67
|
-
truncation?: 'auto' | 'disabled';
|
|
68
|
-
|
|
69
|
-
/** Fields to include in Responses API output */
|
|
70
|
-
include?: string[];
|
|
71
|
-
|
|
72
|
-
/** Background processing (Responses API) */
|
|
73
|
-
background?: boolean;
|
|
74
|
-
|
|
75
|
-
/** Continue from a previous response (Responses API) */
|
|
76
|
-
previous_response_id?: string;
|
|
77
|
-
|
|
78
57
|
/** Store completion for distillation */
|
|
79
58
|
store?: boolean;
|
|
80
59
|
|
|
81
60
|
/** Metadata key-value pairs */
|
|
82
61
|
metadata?: Record<string, string>;
|
|
83
62
|
|
|
84
|
-
/** Response format for structured output
|
|
63
|
+
/** Response format for structured output */
|
|
85
64
|
response_format?: OpenAIResponseFormat;
|
|
86
65
|
|
|
87
66
|
/**
|
|
88
67
|
* Predicted Output configuration for faster regeneration
|
|
89
68
|
* Improves response times when large parts of the response are known ahead of time
|
|
90
|
-
* Most useful when regenerating a file with only minor changes
|
|
91
69
|
*/
|
|
92
70
|
prediction?: {
|
|
93
71
|
type: 'content';
|
|
@@ -95,7 +73,7 @@ export interface OpenAILLMParams {
|
|
|
95
73
|
};
|
|
96
74
|
|
|
97
75
|
/**
|
|
98
|
-
* Stable identifier for caching similar requests
|
|
76
|
+
* Stable identifier for caching similar requests
|
|
99
77
|
* Used to optimize cache hit rates
|
|
100
78
|
*/
|
|
101
79
|
prompt_cache_key?: string;
|
|
@@ -113,6 +91,51 @@ export interface OpenAILLMParams {
|
|
|
113
91
|
safety_identifier?: string;
|
|
114
92
|
}
|
|
115
93
|
|
|
94
|
+
/**
|
|
95
|
+
* OpenAI Responses API parameters
|
|
96
|
+
* These are passed through to the /v1/responses endpoint
|
|
97
|
+
*/
|
|
98
|
+
export interface OpenAIResponsesParams {
|
|
99
|
+
/** Maximum output tokens */
|
|
100
|
+
max_output_tokens?: number;
|
|
101
|
+
|
|
102
|
+
/** Temperature for randomness (0.0 - 2.0) */
|
|
103
|
+
temperature?: number;
|
|
104
|
+
|
|
105
|
+
/** Top-p (nucleus) sampling (0.0 - 1.0) */
|
|
106
|
+
top_p?: number;
|
|
107
|
+
|
|
108
|
+
/** Whether to enable parallel tool calls */
|
|
109
|
+
parallel_tool_calls?: boolean;
|
|
110
|
+
|
|
111
|
+
/** Reasoning configuration */
|
|
112
|
+
reasoning?: {
|
|
113
|
+
effort?: 'none' | 'minimal' | 'low' | 'medium' | 'high' | 'xhigh';
|
|
114
|
+
summary?: string;
|
|
115
|
+
};
|
|
116
|
+
|
|
117
|
+
/** Service tier */
|
|
118
|
+
service_tier?: 'auto' | 'default' | 'flex' | 'priority';
|
|
119
|
+
|
|
120
|
+
/** Truncation strategy */
|
|
121
|
+
truncation?: 'auto' | 'disabled';
|
|
122
|
+
|
|
123
|
+
/** Fields to include in output */
|
|
124
|
+
include?: string[];
|
|
125
|
+
|
|
126
|
+
/** Background processing */
|
|
127
|
+
background?: boolean;
|
|
128
|
+
|
|
129
|
+
/** Continue from a previous response */
|
|
130
|
+
previous_response_id?: string;
|
|
131
|
+
|
|
132
|
+
/** Store response for continuation */
|
|
133
|
+
store?: boolean;
|
|
134
|
+
|
|
135
|
+
/** Metadata key-value pairs */
|
|
136
|
+
metadata?: Record<string, string>;
|
|
137
|
+
}
|
|
138
|
+
|
|
116
139
|
/**
|
|
117
140
|
* API mode for OpenAI provider
|
|
118
141
|
*/
|
|
@@ -6,7 +6,10 @@ import type {
|
|
|
6
6
|
} from '../../types/provider.ts';
|
|
7
7
|
import { createCompletionsLLMHandler } from './llm.completions.ts';
|
|
8
8
|
import { createResponsesLLMHandler } from './llm.responses.ts';
|
|
9
|
-
import type {
|
|
9
|
+
import type { OpenRouterCompletionsParams, OpenRouterResponsesParams, OpenRouterConfig } from './types.ts';
|
|
10
|
+
|
|
11
|
+
/** Union type for modalities interface */
|
|
12
|
+
type OpenRouterLLMParamsUnion = OpenRouterCompletionsParams | OpenRouterResponsesParams;
|
|
10
13
|
|
|
11
14
|
/**
|
|
12
15
|
* OpenRouter provider options
|
|
@@ -51,7 +54,7 @@ export interface OpenRouterProvider extends Provider<OpenRouterProviderOptions>
|
|
|
51
54
|
|
|
52
55
|
/** Supported modalities */
|
|
53
56
|
readonly modalities: {
|
|
54
|
-
llm: LLMHandler<
|
|
57
|
+
llm: LLMHandler<OpenRouterLLMParamsUnion>;
|
|
55
58
|
};
|
|
56
59
|
}
|
|
57
60
|
|
|
@@ -78,10 +81,10 @@ function createOpenRouterProvider(): OpenRouterProvider {
|
|
|
78
81
|
|
|
79
82
|
// Create a dynamic modalities object that returns the correct handler
|
|
80
83
|
const modalities = {
|
|
81
|
-
get llm(): LLMHandler<
|
|
84
|
+
get llm(): LLMHandler<OpenRouterLLMParamsUnion> {
|
|
82
85
|
return currentApiMode === 'responses'
|
|
83
|
-
? responsesHandler
|
|
84
|
-
: completionsHandler;
|
|
86
|
+
? (responsesHandler as unknown as LLMHandler<OpenRouterLLMParamsUnion>)
|
|
87
|
+
: (completionsHandler as unknown as LLMHandler<OpenRouterLLMParamsUnion>);
|
|
85
88
|
},
|
|
86
89
|
};
|
|
87
90
|
|
|
@@ -107,8 +110,8 @@ function createOpenRouterProvider(): OpenRouterProvider {
|
|
|
107
110
|
const provider = fn as OpenRouterProvider;
|
|
108
111
|
|
|
109
112
|
// Inject provider reference into both handlers (spec compliance)
|
|
110
|
-
completionsHandler._setProvider?.(provider as unknown as LLMProvider<
|
|
111
|
-
responsesHandler._setProvider?.(provider as unknown as LLMProvider<
|
|
113
|
+
completionsHandler._setProvider?.(provider as unknown as LLMProvider<OpenRouterCompletionsParams>);
|
|
114
|
+
responsesHandler._setProvider?.(provider as unknown as LLMProvider<OpenRouterResponsesParams>);
|
|
112
115
|
|
|
113
116
|
return provider;
|
|
114
117
|
}
|
|
@@ -164,7 +167,8 @@ export const openrouter = createOpenRouterProvider();
|
|
|
164
167
|
|
|
165
168
|
// Re-export types
|
|
166
169
|
export type {
|
|
167
|
-
|
|
170
|
+
OpenRouterCompletionsParams,
|
|
171
|
+
OpenRouterResponsesParams,
|
|
168
172
|
OpenRouterConfig,
|
|
169
173
|
OpenRouterAPIMode,
|
|
170
174
|
OpenRouterModelOptions,
|
|
@@ -6,7 +6,7 @@ import { resolveApiKey } from '../../http/keys.ts';
|
|
|
6
6
|
import { doFetch, doStreamFetch } from '../../http/fetch.ts';
|
|
7
7
|
import { parseSSEStream } from '../../http/sse.ts';
|
|
8
8
|
import { normalizeHttpError } from '../../http/errors.ts';
|
|
9
|
-
import type {
|
|
9
|
+
import type { OpenRouterCompletionsParams, OpenRouterCompletionsResponse, OpenRouterCompletionsStreamChunk } from './types.ts';
|
|
10
10
|
import {
|
|
11
11
|
transformRequest,
|
|
12
12
|
transformResponse,
|
|
@@ -32,16 +32,16 @@ const OPENROUTER_CAPABILITIES: LLMCapabilities = {
|
|
|
32
32
|
/**
|
|
33
33
|
* Create OpenRouter Chat Completions LLM handler
|
|
34
34
|
*/
|
|
35
|
-
export function createCompletionsLLMHandler(): LLMHandler<
|
|
35
|
+
export function createCompletionsLLMHandler(): LLMHandler<OpenRouterCompletionsParams> {
|
|
36
36
|
// Provider reference injected by createProvider() or OpenRouter's custom factory
|
|
37
|
-
let providerRef: LLMProvider<
|
|
37
|
+
let providerRef: LLMProvider<OpenRouterCompletionsParams> | null = null;
|
|
38
38
|
|
|
39
39
|
return {
|
|
40
|
-
_setProvider(provider: LLMProvider<
|
|
40
|
+
_setProvider(provider: LLMProvider<OpenRouterCompletionsParams>) {
|
|
41
41
|
providerRef = provider;
|
|
42
42
|
},
|
|
43
43
|
|
|
44
|
-
bind(modelId: string): BoundLLMModel<
|
|
44
|
+
bind(modelId: string): BoundLLMModel<OpenRouterCompletionsParams> {
|
|
45
45
|
// Use the injected provider reference
|
|
46
46
|
if (!providerRef) {
|
|
47
47
|
throw new UPPError(
|
|
@@ -52,15 +52,15 @@ export function createCompletionsLLMHandler(): LLMHandler<OpenRouterLLMParams> {
|
|
|
52
52
|
);
|
|
53
53
|
}
|
|
54
54
|
|
|
55
|
-
const model: BoundLLMModel<
|
|
55
|
+
const model: BoundLLMModel<OpenRouterCompletionsParams> = {
|
|
56
56
|
modelId,
|
|
57
57
|
capabilities: OPENROUTER_CAPABILITIES,
|
|
58
58
|
|
|
59
|
-
get provider(): LLMProvider<
|
|
59
|
+
get provider(): LLMProvider<OpenRouterCompletionsParams> {
|
|
60
60
|
return providerRef!;
|
|
61
61
|
},
|
|
62
62
|
|
|
63
|
-
async complete(request: LLMRequest<
|
|
63
|
+
async complete(request: LLMRequest<OpenRouterCompletionsParams>): Promise<LLMResponse> {
|
|
64
64
|
const apiKey = await resolveApiKey(
|
|
65
65
|
request.config,
|
|
66
66
|
'OPENROUTER_API_KEY',
|
|
@@ -91,7 +91,7 @@ export function createCompletionsLLMHandler(): LLMHandler<OpenRouterLLMParams> {
|
|
|
91
91
|
return transformResponse(data);
|
|
92
92
|
},
|
|
93
93
|
|
|
94
|
-
stream(request: LLMRequest<
|
|
94
|
+
stream(request: LLMRequest<OpenRouterCompletionsParams>): LLMStreamResult {
|
|
95
95
|
const state = createStreamState();
|
|
96
96
|
let responseResolve: (value: LLMResponse) => void;
|
|
97
97
|
let responseReject: (error: Error) => void;
|
|
@@ -6,7 +6,7 @@ import { resolveApiKey } from '../../http/keys.ts';
|
|
|
6
6
|
import { doFetch, doStreamFetch } from '../../http/fetch.ts';
|
|
7
7
|
import { parseSSEStream } from '../../http/sse.ts';
|
|
8
8
|
import { normalizeHttpError } from '../../http/errors.ts';
|
|
9
|
-
import type {
|
|
9
|
+
import type { OpenRouterResponsesParams, OpenRouterResponsesResponse, OpenRouterResponsesStreamEvent, OpenRouterResponseErrorEvent } from './types.ts';
|
|
10
10
|
import {
|
|
11
11
|
transformRequest,
|
|
12
12
|
transformResponse,
|
|
@@ -32,16 +32,16 @@ const OPENROUTER_CAPABILITIES: LLMCapabilities = {
|
|
|
32
32
|
/**
|
|
33
33
|
* Create OpenRouter Responses API LLM handler
|
|
34
34
|
*/
|
|
35
|
-
export function createResponsesLLMHandler(): LLMHandler<
|
|
35
|
+
export function createResponsesLLMHandler(): LLMHandler<OpenRouterResponsesParams> {
|
|
36
36
|
// Provider reference injected by createProvider() or OpenRouter's custom factory
|
|
37
|
-
let providerRef: LLMProvider<
|
|
37
|
+
let providerRef: LLMProvider<OpenRouterResponsesParams> | null = null;
|
|
38
38
|
|
|
39
39
|
return {
|
|
40
|
-
_setProvider(provider: LLMProvider<
|
|
40
|
+
_setProvider(provider: LLMProvider<OpenRouterResponsesParams>) {
|
|
41
41
|
providerRef = provider;
|
|
42
42
|
},
|
|
43
43
|
|
|
44
|
-
bind(modelId: string): BoundLLMModel<
|
|
44
|
+
bind(modelId: string): BoundLLMModel<OpenRouterResponsesParams> {
|
|
45
45
|
// Use the injected provider reference
|
|
46
46
|
if (!providerRef) {
|
|
47
47
|
throw new UPPError(
|
|
@@ -52,15 +52,15 @@ export function createResponsesLLMHandler(): LLMHandler<OpenRouterLLMParams> {
|
|
|
52
52
|
);
|
|
53
53
|
}
|
|
54
54
|
|
|
55
|
-
const model: BoundLLMModel<
|
|
55
|
+
const model: BoundLLMModel<OpenRouterResponsesParams> = {
|
|
56
56
|
modelId,
|
|
57
57
|
capabilities: OPENROUTER_CAPABILITIES,
|
|
58
58
|
|
|
59
|
-
get provider(): LLMProvider<
|
|
59
|
+
get provider(): LLMProvider<OpenRouterResponsesParams> {
|
|
60
60
|
return providerRef!;
|
|
61
61
|
},
|
|
62
62
|
|
|
63
|
-
async complete(request: LLMRequest<
|
|
63
|
+
async complete(request: LLMRequest<OpenRouterResponsesParams>): Promise<LLMResponse> {
|
|
64
64
|
const apiKey = await resolveApiKey(
|
|
65
65
|
request.config,
|
|
66
66
|
'OPENROUTER_API_KEY',
|
|
@@ -102,7 +102,7 @@ export function createResponsesLLMHandler(): LLMHandler<OpenRouterLLMParams> {
|
|
|
102
102
|
return transformResponse(data);
|
|
103
103
|
},
|
|
104
104
|
|
|
105
|
-
stream(request: LLMRequest<
|
|
105
|
+
stream(request: LLMRequest<OpenRouterResponsesParams>): LLMStreamResult {
|
|
106
106
|
const state = createStreamState();
|
|
107
107
|
let responseResolve: (value: LLMResponse) => void;
|
|
108
108
|
let responseReject: (error: Error) => void;
|