@providerprotocol/ai 0.0.5 → 0.0.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. package/dist/anthropic/index.js +1 -24
  2. package/dist/anthropic/index.js.map +1 -1
  3. package/dist/google/index.js +3 -46
  4. package/dist/google/index.js.map +1 -1
  5. package/dist/ollama/index.js +13 -44
  6. package/dist/ollama/index.js.map +1 -1
  7. package/dist/openai/index.d.ts +46 -27
  8. package/dist/openai/index.js +2 -116
  9. package/dist/openai/index.js.map +1 -1
  10. package/dist/openrouter/index.d.ts +23 -10
  11. package/dist/openrouter/index.js +2 -85
  12. package/dist/openrouter/index.js.map +1 -1
  13. package/dist/xai/index.d.ts +59 -35
  14. package/dist/xai/index.js +3 -119
  15. package/dist/xai/index.js.map +1 -1
  16. package/package.json +2 -1
  17. package/src/openai/index.ts +2 -1
  18. package/src/openrouter/index.ts +2 -1
  19. package/src/providers/anthropic/transform.ts +7 -29
  20. package/src/providers/google/transform.ts +9 -49
  21. package/src/providers/ollama/transform.ts +27 -49
  22. package/src/providers/openai/index.ts +12 -8
  23. package/src/providers/openai/llm.completions.ts +9 -9
  24. package/src/providers/openai/llm.responses.ts +9 -9
  25. package/src/providers/openai/transform.completions.ts +12 -79
  26. package/src/providers/openai/transform.responses.ts +12 -54
  27. package/src/providers/openai/types.ts +54 -31
  28. package/src/providers/openrouter/index.ts +12 -8
  29. package/src/providers/openrouter/llm.completions.ts +9 -9
  30. package/src/providers/openrouter/llm.responses.ts +9 -9
  31. package/src/providers/openrouter/transform.completions.ts +12 -79
  32. package/src/providers/openrouter/transform.responses.ts +12 -25
  33. package/src/providers/openrouter/types.ts +22 -28
  34. package/src/providers/xai/index.ts +15 -10
  35. package/src/providers/xai/llm.completions.ts +9 -9
  36. package/src/providers/xai/llm.messages.ts +9 -9
  37. package/src/providers/xai/llm.responses.ts +9 -9
  38. package/src/providers/xai/transform.completions.ts +12 -64
  39. package/src/providers/xai/transform.messages.ts +11 -30
  40. package/src/providers/xai/transform.responses.ts +12 -51
  41. package/src/providers/xai/types.ts +68 -38
  42. package/src/xai/index.ts +3 -1
@@ -1,17 +1,14 @@
1
1
  /**
2
- * OpenAI-specific LLM parameters
3
- * These are passed through to the relevant OpenAI APIs
2
+ * OpenAI Chat Completions API parameters
3
+ * These are passed through to the /v1/chat/completions endpoint
4
4
  */
5
- export interface OpenAILLMParams {
6
- /** Maximum number of tokens to generate */
5
+ export interface OpenAICompletionsParams {
6
+ /** Maximum number of tokens to generate (legacy, prefer max_completion_tokens) */
7
7
  max_tokens?: number;
8
8
 
9
- /** Maximum completion tokens (preferred over max_tokens for newer models) */
9
+ /** Maximum completion tokens (preferred for newer models) */
10
10
  max_completion_tokens?: number;
11
11
 
12
- /** Maximum output tokens (Responses API) */
13
- max_output_tokens?: number;
14
-
15
12
  /** Temperature for randomness (0.0 - 2.0) */
16
13
  temperature?: number;
17
14
 
@@ -42,10 +39,10 @@ export interface OpenAILLMParams {
42
39
  /** User identifier for abuse detection */
43
40
  user?: string;
44
41
 
45
- /** Logit bias map (Chat Completions API) */
42
+ /** Logit bias map */
46
43
  logit_bias?: Record<string, number>;
47
44
 
48
- /** Verbosity control (Chat Completions API) */
45
+ /** Verbosity control */
49
46
  verbosity?: 'low' | 'medium' | 'high';
50
47
 
51
48
  /** Whether to enable parallel tool calls */
@@ -54,40 +51,21 @@ export interface OpenAILLMParams {
54
51
  /** Reasoning effort for reasoning models */
55
52
  reasoning_effort?: 'none' | 'minimal' | 'low' | 'medium' | 'high' | 'xhigh';
56
53
 
57
- /** Reasoning configuration (Responses API) */
58
- reasoning?: {
59
- effort?: 'none' | 'minimal' | 'low' | 'medium' | 'high' | 'xhigh';
60
- summary?: string;
61
- };
62
-
63
54
  /** Service tier */
64
55
  service_tier?: 'auto' | 'default' | 'flex' | 'priority';
65
56
 
66
- /** Truncation strategy (Responses API) */
67
- truncation?: 'auto' | 'disabled';
68
-
69
- /** Fields to include in Responses API output */
70
- include?: string[];
71
-
72
- /** Background processing (Responses API) */
73
- background?: boolean;
74
-
75
- /** Continue from a previous response (Responses API) */
76
- previous_response_id?: string;
77
-
78
57
  /** Store completion for distillation */
79
58
  store?: boolean;
80
59
 
81
60
  /** Metadata key-value pairs */
82
61
  metadata?: Record<string, string>;
83
62
 
84
- /** Response format for structured output (Chat Completions API only) */
63
+ /** Response format for structured output */
85
64
  response_format?: OpenAIResponseFormat;
86
65
 
87
66
  /**
88
67
  * Predicted Output configuration for faster regeneration
89
68
  * Improves response times when large parts of the response are known ahead of time
90
- * Most useful when regenerating a file with only minor changes
91
69
  */
92
70
  prediction?: {
93
71
  type: 'content';
@@ -95,7 +73,7 @@ export interface OpenAILLMParams {
95
73
  };
96
74
 
97
75
  /**
98
- * Stable identifier for caching similar requests (replaces user field)
76
+ * Stable identifier for caching similar requests
99
77
  * Used to optimize cache hit rates
100
78
  */
101
79
  prompt_cache_key?: string;
@@ -113,6 +91,51 @@ export interface OpenAILLMParams {
113
91
  safety_identifier?: string;
114
92
  }
115
93
 
94
+ /**
95
+ * OpenAI Responses API parameters
96
+ * These are passed through to the /v1/responses endpoint
97
+ */
98
+ export interface OpenAIResponsesParams {
99
+ /** Maximum output tokens */
100
+ max_output_tokens?: number;
101
+
102
+ /** Temperature for randomness (0.0 - 2.0) */
103
+ temperature?: number;
104
+
105
+ /** Top-p (nucleus) sampling (0.0 - 1.0) */
106
+ top_p?: number;
107
+
108
+ /** Whether to enable parallel tool calls */
109
+ parallel_tool_calls?: boolean;
110
+
111
+ /** Reasoning configuration */
112
+ reasoning?: {
113
+ effort?: 'none' | 'minimal' | 'low' | 'medium' | 'high' | 'xhigh';
114
+ summary?: string;
115
+ };
116
+
117
+ /** Service tier */
118
+ service_tier?: 'auto' | 'default' | 'flex' | 'priority';
119
+
120
+ /** Truncation strategy */
121
+ truncation?: 'auto' | 'disabled';
122
+
123
+ /** Fields to include in output */
124
+ include?: string[];
125
+
126
+ /** Background processing */
127
+ background?: boolean;
128
+
129
+ /** Continue from a previous response */
130
+ previous_response_id?: string;
131
+
132
+ /** Store response for continuation */
133
+ store?: boolean;
134
+
135
+ /** Metadata key-value pairs */
136
+ metadata?: Record<string, string>;
137
+ }
138
+
116
139
  /**
117
140
  * API mode for OpenAI provider
118
141
  */
@@ -6,7 +6,10 @@ import type {
6
6
  } from '../../types/provider.ts';
7
7
  import { createCompletionsLLMHandler } from './llm.completions.ts';
8
8
  import { createResponsesLLMHandler } from './llm.responses.ts';
9
- import type { OpenRouterLLMParams, OpenRouterConfig } from './types.ts';
9
+ import type { OpenRouterCompletionsParams, OpenRouterResponsesParams, OpenRouterConfig } from './types.ts';
10
+
11
+ /** Union type for modalities interface */
12
+ type OpenRouterLLMParamsUnion = OpenRouterCompletionsParams | OpenRouterResponsesParams;
10
13
 
11
14
  /**
12
15
  * OpenRouter provider options
@@ -51,7 +54,7 @@ export interface OpenRouterProvider extends Provider<OpenRouterProviderOptions>
51
54
 
52
55
  /** Supported modalities */
53
56
  readonly modalities: {
54
- llm: LLMHandler<OpenRouterLLMParams>;
57
+ llm: LLMHandler<OpenRouterLLMParamsUnion>;
55
58
  };
56
59
  }
57
60
 
@@ -78,10 +81,10 @@ function createOpenRouterProvider(): OpenRouterProvider {
78
81
 
79
82
  // Create a dynamic modalities object that returns the correct handler
80
83
  const modalities = {
81
- get llm(): LLMHandler<OpenRouterLLMParams> {
84
+ get llm(): LLMHandler<OpenRouterLLMParamsUnion> {
82
85
  return currentApiMode === 'responses'
83
- ? responsesHandler
84
- : completionsHandler;
86
+ ? (responsesHandler as unknown as LLMHandler<OpenRouterLLMParamsUnion>)
87
+ : (completionsHandler as unknown as LLMHandler<OpenRouterLLMParamsUnion>);
85
88
  },
86
89
  };
87
90
 
@@ -107,8 +110,8 @@ function createOpenRouterProvider(): OpenRouterProvider {
107
110
  const provider = fn as OpenRouterProvider;
108
111
 
109
112
  // Inject provider reference into both handlers (spec compliance)
110
- completionsHandler._setProvider?.(provider as unknown as LLMProvider<OpenRouterLLMParams>);
111
- responsesHandler._setProvider?.(provider as unknown as LLMProvider<OpenRouterLLMParams>);
113
+ completionsHandler._setProvider?.(provider as unknown as LLMProvider<OpenRouterCompletionsParams>);
114
+ responsesHandler._setProvider?.(provider as unknown as LLMProvider<OpenRouterResponsesParams>);
112
115
 
113
116
  return provider;
114
117
  }
@@ -164,7 +167,8 @@ export const openrouter = createOpenRouterProvider();
164
167
 
165
168
  // Re-export types
166
169
  export type {
167
- OpenRouterLLMParams,
170
+ OpenRouterCompletionsParams,
171
+ OpenRouterResponsesParams,
168
172
  OpenRouterConfig,
169
173
  OpenRouterAPIMode,
170
174
  OpenRouterModelOptions,
@@ -6,7 +6,7 @@ import { resolveApiKey } from '../../http/keys.ts';
6
6
  import { doFetch, doStreamFetch } from '../../http/fetch.ts';
7
7
  import { parseSSEStream } from '../../http/sse.ts';
8
8
  import { normalizeHttpError } from '../../http/errors.ts';
9
- import type { OpenRouterLLMParams, OpenRouterCompletionsResponse, OpenRouterCompletionsStreamChunk } from './types.ts';
9
+ import type { OpenRouterCompletionsParams, OpenRouterCompletionsResponse, OpenRouterCompletionsStreamChunk } from './types.ts';
10
10
  import {
11
11
  transformRequest,
12
12
  transformResponse,
@@ -32,16 +32,16 @@ const OPENROUTER_CAPABILITIES: LLMCapabilities = {
32
32
  /**
33
33
  * Create OpenRouter Chat Completions LLM handler
34
34
  */
35
- export function createCompletionsLLMHandler(): LLMHandler<OpenRouterLLMParams> {
35
+ export function createCompletionsLLMHandler(): LLMHandler<OpenRouterCompletionsParams> {
36
36
  // Provider reference injected by createProvider() or OpenRouter's custom factory
37
- let providerRef: LLMProvider<OpenRouterLLMParams> | null = null;
37
+ let providerRef: LLMProvider<OpenRouterCompletionsParams> | null = null;
38
38
 
39
39
  return {
40
- _setProvider(provider: LLMProvider<OpenRouterLLMParams>) {
40
+ _setProvider(provider: LLMProvider<OpenRouterCompletionsParams>) {
41
41
  providerRef = provider;
42
42
  },
43
43
 
44
- bind(modelId: string): BoundLLMModel<OpenRouterLLMParams> {
44
+ bind(modelId: string): BoundLLMModel<OpenRouterCompletionsParams> {
45
45
  // Use the injected provider reference
46
46
  if (!providerRef) {
47
47
  throw new UPPError(
@@ -52,15 +52,15 @@ export function createCompletionsLLMHandler(): LLMHandler<OpenRouterLLMParams> {
52
52
  );
53
53
  }
54
54
 
55
- const model: BoundLLMModel<OpenRouterLLMParams> = {
55
+ const model: BoundLLMModel<OpenRouterCompletionsParams> = {
56
56
  modelId,
57
57
  capabilities: OPENROUTER_CAPABILITIES,
58
58
 
59
- get provider(): LLMProvider<OpenRouterLLMParams> {
59
+ get provider(): LLMProvider<OpenRouterCompletionsParams> {
60
60
  return providerRef!;
61
61
  },
62
62
 
63
- async complete(request: LLMRequest<OpenRouterLLMParams>): Promise<LLMResponse> {
63
+ async complete(request: LLMRequest<OpenRouterCompletionsParams>): Promise<LLMResponse> {
64
64
  const apiKey = await resolveApiKey(
65
65
  request.config,
66
66
  'OPENROUTER_API_KEY',
@@ -91,7 +91,7 @@ export function createCompletionsLLMHandler(): LLMHandler<OpenRouterLLMParams> {
91
91
  return transformResponse(data);
92
92
  },
93
93
 
94
- stream(request: LLMRequest<OpenRouterLLMParams>): LLMStreamResult {
94
+ stream(request: LLMRequest<OpenRouterCompletionsParams>): LLMStreamResult {
95
95
  const state = createStreamState();
96
96
  let responseResolve: (value: LLMResponse) => void;
97
97
  let responseReject: (error: Error) => void;
@@ -6,7 +6,7 @@ import { resolveApiKey } from '../../http/keys.ts';
6
6
  import { doFetch, doStreamFetch } from '../../http/fetch.ts';
7
7
  import { parseSSEStream } from '../../http/sse.ts';
8
8
  import { normalizeHttpError } from '../../http/errors.ts';
9
- import type { OpenRouterLLMParams, OpenRouterResponsesResponse, OpenRouterResponsesStreamEvent, OpenRouterResponseErrorEvent } from './types.ts';
9
+ import type { OpenRouterResponsesParams, OpenRouterResponsesResponse, OpenRouterResponsesStreamEvent, OpenRouterResponseErrorEvent } from './types.ts';
10
10
  import {
11
11
  transformRequest,
12
12
  transformResponse,
@@ -32,16 +32,16 @@ const OPENROUTER_CAPABILITIES: LLMCapabilities = {
32
32
  /**
33
33
  * Create OpenRouter Responses API LLM handler
34
34
  */
35
- export function createResponsesLLMHandler(): LLMHandler<OpenRouterLLMParams> {
35
+ export function createResponsesLLMHandler(): LLMHandler<OpenRouterResponsesParams> {
36
36
  // Provider reference injected by createProvider() or OpenRouter's custom factory
37
- let providerRef: LLMProvider<OpenRouterLLMParams> | null = null;
37
+ let providerRef: LLMProvider<OpenRouterResponsesParams> | null = null;
38
38
 
39
39
  return {
40
- _setProvider(provider: LLMProvider<OpenRouterLLMParams>) {
40
+ _setProvider(provider: LLMProvider<OpenRouterResponsesParams>) {
41
41
  providerRef = provider;
42
42
  },
43
43
 
44
- bind(modelId: string): BoundLLMModel<OpenRouterLLMParams> {
44
+ bind(modelId: string): BoundLLMModel<OpenRouterResponsesParams> {
45
45
  // Use the injected provider reference
46
46
  if (!providerRef) {
47
47
  throw new UPPError(
@@ -52,15 +52,15 @@ export function createResponsesLLMHandler(): LLMHandler<OpenRouterLLMParams> {
52
52
  );
53
53
  }
54
54
 
55
- const model: BoundLLMModel<OpenRouterLLMParams> = {
55
+ const model: BoundLLMModel<OpenRouterResponsesParams> = {
56
56
  modelId,
57
57
  capabilities: OPENROUTER_CAPABILITIES,
58
58
 
59
- get provider(): LLMProvider<OpenRouterLLMParams> {
59
+ get provider(): LLMProvider<OpenRouterResponsesParams> {
60
60
  return providerRef!;
61
61
  },
62
62
 
63
- async complete(request: LLMRequest<OpenRouterLLMParams>): Promise<LLMResponse> {
63
+ async complete(request: LLMRequest<OpenRouterResponsesParams>): Promise<LLMResponse> {
64
64
  const apiKey = await resolveApiKey(
65
65
  request.config,
66
66
  'OPENROUTER_API_KEY',
@@ -102,7 +102,7 @@ export function createResponsesLLMHandler(): LLMHandler<OpenRouterLLMParams> {
102
102
  return transformResponse(data);
103
103
  },
104
104
 
105
- stream(request: LLMRequest<OpenRouterLLMParams>): LLMStreamResult {
105
+ stream(request: LLMRequest<OpenRouterResponsesParams>): LLMStreamResult {
106
106
  const state = createStreamState();
107
107
  let responseResolve: (value: LLMResponse) => void;
108
108
  let responseReject: (error: Error) => void;
@@ -11,7 +11,7 @@ import {
11
11
  isToolResultMessage,
12
12
  } from '../../types/messages.ts';
13
13
  import type {
14
- OpenRouterLLMParams,
14
+ OpenRouterCompletionsParams,
15
15
  OpenRouterCompletionsRequest,
16
16
  OpenRouterCompletionsMessage,
17
17
  OpenRouterUserContent,
@@ -23,94 +23,30 @@ import type {
23
23
 
24
24
  /**
25
25
  * Transform UPP request to OpenRouter Chat Completions format
26
+ *
27
+ * Params are spread directly to allow pass-through of any OpenRouter API fields,
28
+ * even those not explicitly defined in our type. This enables developers to
29
+ * use new API features without waiting for library updates.
26
30
  */
27
- export function transformRequest<TParams extends OpenRouterLLMParams>(
28
- request: LLMRequest<TParams>,
31
+ export function transformRequest(
32
+ request: LLMRequest<OpenRouterCompletionsParams>,
29
33
  modelId: string
30
34
  ): OpenRouterCompletionsRequest {
31
- const params: OpenRouterLLMParams = request.params ?? {};
35
+ const params = request.params ?? ({} as OpenRouterCompletionsParams);
32
36
 
37
+ // Spread params to pass through all fields, then set required fields
33
38
  const openrouterRequest: OpenRouterCompletionsRequest = {
39
+ ...params,
34
40
  model: modelId,
35
41
  messages: transformMessages(request.messages, request.system),
36
42
  };
37
43
 
38
- // Model parameters
39
- if (params.temperature !== undefined) {
40
- openrouterRequest.temperature = params.temperature;
41
- }
42
- if (params.top_p !== undefined) {
43
- openrouterRequest.top_p = params.top_p;
44
- }
45
- if (params.top_k !== undefined) {
46
- openrouterRequest.top_k = params.top_k;
47
- }
48
- if (params.min_p !== undefined) {
49
- openrouterRequest.min_p = params.min_p;
50
- }
51
- if (params.top_a !== undefined) {
52
- openrouterRequest.top_a = params.top_a;
53
- }
54
- if (params.max_tokens !== undefined) {
55
- openrouterRequest.max_tokens = params.max_tokens;
56
- }
57
- if (params.frequency_penalty !== undefined) {
58
- openrouterRequest.frequency_penalty = params.frequency_penalty;
59
- }
60
- if (params.presence_penalty !== undefined) {
61
- openrouterRequest.presence_penalty = params.presence_penalty;
62
- }
63
- if (params.repetition_penalty !== undefined) {
64
- openrouterRequest.repetition_penalty = params.repetition_penalty;
65
- }
66
- if (params.stop !== undefined) {
67
- openrouterRequest.stop = params.stop;
68
- }
69
- if (params.logprobs !== undefined) {
70
- openrouterRequest.logprobs = params.logprobs;
71
- }
72
- if (params.top_logprobs !== undefined) {
73
- openrouterRequest.top_logprobs = params.top_logprobs;
74
- }
75
- if (params.seed !== undefined) {
76
- openrouterRequest.seed = params.seed;
77
- }
78
- if (params.user !== undefined) {
79
- openrouterRequest.user = params.user;
80
- }
81
- if (params.logit_bias !== undefined) {
82
- openrouterRequest.logit_bias = params.logit_bias;
83
- }
84
- if (params.prediction !== undefined) {
85
- openrouterRequest.prediction = params.prediction;
86
- }
87
-
88
- // OpenRouter-specific parameters
89
- if (params.transforms !== undefined) {
90
- openrouterRequest.transforms = params.transforms;
91
- }
92
- if (params.models !== undefined) {
93
- openrouterRequest.models = params.models;
94
- }
95
- if (params.route !== undefined) {
96
- openrouterRequest.route = params.route;
97
- }
98
- if (params.provider !== undefined) {
99
- openrouterRequest.provider = params.provider;
100
- }
101
- if (params.debug !== undefined) {
102
- openrouterRequest.debug = params.debug;
103
- }
104
-
105
- // Tools
44
+ // Tools come from request, not params
106
45
  if (request.tools && request.tools.length > 0) {
107
46
  openrouterRequest.tools = request.tools.map(transformTool);
108
- if (params.parallel_tool_calls !== undefined) {
109
- openrouterRequest.parallel_tool_calls = params.parallel_tool_calls;
110
- }
111
47
  }
112
48
 
113
- // Structured output via response_format
49
+ // Structured output via response_format (overrides params.response_format if set)
114
50
  if (request.structure) {
115
51
  const schema: Record<string, unknown> = {
116
52
  type: 'object',
@@ -133,9 +69,6 @@ export function transformRequest<TParams extends OpenRouterLLMParams>(
133
69
  strict: true,
134
70
  },
135
71
  };
136
- } else if (params.response_format !== undefined) {
137
- // Pass through response_format from params if no structure is defined
138
- openrouterRequest.response_format = params.response_format;
139
72
  }
140
73
 
141
74
  return openrouterRequest;
@@ -11,7 +11,7 @@ import {
11
11
  isToolResultMessage,
12
12
  } from '../../types/messages.ts';
13
13
  import type {
14
- OpenRouterLLMParams,
14
+ OpenRouterResponsesParams,
15
15
  OpenRouterResponsesRequest,
16
16
  OpenRouterResponsesInputItem,
17
17
  OpenRouterResponsesContentPart,
@@ -25,43 +25,30 @@ import type {
25
25
 
26
26
  /**
27
27
  * Transform UPP request to OpenRouter Responses API format
28
+ *
29
+ * Params are spread directly to allow pass-through of any OpenRouter API fields,
30
+ * even those not explicitly defined in our type. This enables developers to
31
+ * use new API features without waiting for library updates.
28
32
  */
29
- export function transformRequest<TParams extends OpenRouterLLMParams>(
30
- request: LLMRequest<TParams>,
33
+ export function transformRequest(
34
+ request: LLMRequest<OpenRouterResponsesParams>,
31
35
  modelId: string
32
36
  ): OpenRouterResponsesRequest {
33
- const params: OpenRouterLLMParams = request.params ?? {};
37
+ const params = request.params ?? ({} as OpenRouterResponsesParams);
34
38
 
39
+ // Spread params to pass through all fields, then set required fields
35
40
  const openrouterRequest: OpenRouterResponsesRequest = {
41
+ ...params,
36
42
  model: modelId,
37
43
  input: transformInputItems(request.messages, request.system),
38
44
  };
39
45
 
40
- // Model parameters
41
- if (params.temperature !== undefined) {
42
- openrouterRequest.temperature = params.temperature;
43
- }
44
- if (params.top_p !== undefined) {
45
- openrouterRequest.top_p = params.top_p;
46
- }
47
- if (params.max_output_tokens !== undefined) {
48
- openrouterRequest.max_output_tokens = params.max_output_tokens;
49
- } else if (params.max_tokens !== undefined) {
50
- openrouterRequest.max_output_tokens = params.max_tokens;
51
- }
52
- if (params.reasoning !== undefined) {
53
- openrouterRequest.reasoning = { ...params.reasoning };
54
- }
55
-
56
- // Tools
46
+ // Tools come from request, not params
57
47
  if (request.tools && request.tools.length > 0) {
58
48
  openrouterRequest.tools = request.tools.map(transformTool);
59
- if (params.parallel_tool_calls !== undefined) {
60
- openrouterRequest.parallel_tool_calls = params.parallel_tool_calls;
61
- }
62
49
  }
63
50
 
64
- // Structured output via text.format
51
+ // Structured output via text.format (overrides params.text if set)
65
52
  if (request.structure) {
66
53
  const schema: Record<string, unknown> = {
67
54
  type: 'object',
@@ -1,18 +1,11 @@
1
1
  /**
2
- * OpenRouter-specific LLM parameters
3
- * These are passed through to the OpenRouter APIs
2
+ * OpenRouter Chat Completions API parameters
3
+ * These are passed through to the /api/v1/chat/completions endpoint
4
4
  */
5
- export interface OpenRouterLLMParams {
6
- // ============================================
7
- // Common Parameters (both APIs)
8
- // ============================================
9
-
5
+ export interface OpenRouterCompletionsParams {
10
6
  /** Maximum number of tokens to generate */
11
7
  max_tokens?: number;
12
8
 
13
- /** Maximum output tokens (Responses API) */
14
- max_output_tokens?: number;
15
-
16
9
  /** Temperature for randomness (0.0 - 2.0) */
17
10
  temperature?: number;
18
11
 
@@ -55,24 +48,12 @@ export interface OpenRouterLLMParams {
55
48
  /** Top-a sampling threshold (0.0 - 1.0) */
56
49
  top_a?: number;
57
50
 
58
- // ============================================
59
- // Tool Calling
60
- // ============================================
61
-
62
51
  /** Whether to enable parallel tool calls */
63
52
  parallel_tool_calls?: boolean;
64
53
 
65
- // ============================================
66
- // Structured Output
67
- // ============================================
68
-
69
- /** Response format for structured output (Chat Completions API only) */
54
+ /** Response format for structured output */
70
55
  response_format?: OpenRouterResponseFormat;
71
56
 
72
- // ============================================
73
- // OpenRouter-Specific Parameters
74
- // ============================================
75
-
76
57
  /**
77
58
  * Prompt transforms to apply
78
59
  * See: https://openrouter.ai/docs/guides/features/message-transforms
@@ -98,7 +79,6 @@ export interface OpenRouterLLMParams {
98
79
 
99
80
  /**
100
81
  * Predicted output for latency optimization
101
- * https://platform.openai.com/docs/guides/latency-optimization#use-predicted-outputs
102
82
  */
103
83
  prediction?: {
104
84
  type: 'content';
@@ -112,13 +92,27 @@ export interface OpenRouterLLMParams {
112
92
  /** If true, returns the transformed request body sent to the provider */
113
93
  echo_upstream_body?: boolean;
114
94
  };
95
+ }
115
96
 
116
- // ============================================
117
- // Responses API Specific
118
- // ============================================
97
+ /**
98
+ * OpenRouter Responses API parameters (Beta)
99
+ * These are passed through to the /api/v1/responses endpoint
100
+ */
101
+ export interface OpenRouterResponsesParams {
102
+ /** Maximum output tokens */
103
+ max_output_tokens?: number;
104
+
105
+ /** Temperature for randomness (0.0 - 2.0) */
106
+ temperature?: number;
107
+
108
+ /** Top-p (nucleus) sampling (0.0 - 1.0) */
109
+ top_p?: number;
110
+
111
+ /** Whether to enable parallel tool calls */
112
+ parallel_tool_calls?: boolean;
119
113
 
120
114
  /**
121
- * Reasoning configuration (Responses API)
115
+ * Reasoning configuration
122
116
  */
123
117
  reasoning?: {
124
118
  effort?: 'low' | 'medium' | 'high';
@@ -7,7 +7,10 @@ import type {
7
7
  import { createCompletionsLLMHandler } from './llm.completions.ts';
8
8
  import { createResponsesLLMHandler } from './llm.responses.ts';
9
9
  import { createMessagesLLMHandler } from './llm.messages.ts';
10
- import type { XAILLMParams, XAIConfig, XAIAPIMode } from './types.ts';
10
+ import type { XAICompletionsParams, XAIResponsesParams, XAIMessagesParams, XAIConfig, XAIAPIMode } from './types.ts';
11
+
12
+ /** Union type for modalities interface */
13
+ type XAILLMParamsUnion = XAICompletionsParams | XAIResponsesParams | XAIMessagesParams;
11
14
 
12
15
  /**
13
16
  * xAI provider options
@@ -58,7 +61,7 @@ export interface XAIProvider extends Provider<XAIProviderOptions> {
58
61
 
59
62
  /** Supported modalities */
60
63
  readonly modalities: {
61
- llm: LLMHandler<XAILLMParams>;
64
+ llm: LLMHandler<XAILLMParamsUnion>;
62
65
  };
63
66
  }
64
67
 
@@ -86,15 +89,15 @@ function createXAIProvider(): XAIProvider {
86
89
 
87
90
  // Create a dynamic modalities object that returns the correct handler
88
91
  const modalities = {
89
- get llm(): LLMHandler<XAILLMParams> {
92
+ get llm(): LLMHandler<XAILLMParamsUnion> {
90
93
  switch (currentApiMode) {
91
94
  case 'responses':
92
- return responsesHandler;
95
+ return responsesHandler as unknown as LLMHandler<XAILLMParamsUnion>;
93
96
  case 'messages':
94
- return messagesHandler;
97
+ return messagesHandler as unknown as LLMHandler<XAILLMParamsUnion>;
95
98
  case 'completions':
96
99
  default:
97
- return completionsHandler;
100
+ return completionsHandler as unknown as LLMHandler<XAILLMParamsUnion>;
98
101
  }
99
102
  },
100
103
  };
@@ -121,9 +124,9 @@ function createXAIProvider(): XAIProvider {
121
124
  const provider = fn as XAIProvider;
122
125
 
123
126
  // Inject provider reference into all handlers (spec compliance)
124
- completionsHandler._setProvider?.(provider as unknown as LLMProvider<XAILLMParams>);
125
- responsesHandler._setProvider?.(provider as unknown as LLMProvider<XAILLMParams>);
126
- messagesHandler._setProvider?.(provider as unknown as LLMProvider<XAILLMParams>);
127
+ completionsHandler._setProvider?.(provider as unknown as LLMProvider<XAICompletionsParams>);
128
+ responsesHandler._setProvider?.(provider as unknown as LLMProvider<XAIResponsesParams>);
129
+ messagesHandler._setProvider?.(provider as unknown as LLMProvider<XAIMessagesParams>);
127
130
 
128
131
  return provider;
129
132
  }
@@ -208,7 +211,9 @@ export const xai = createXAIProvider();
208
211
 
209
212
  // Re-export types
210
213
  export type {
211
- XAILLMParams,
214
+ XAICompletionsParams,
215
+ XAIResponsesParams,
216
+ XAIMessagesParams,
212
217
  XAIConfig,
213
218
  XAIAPIMode,
214
219
  XAIModelOptions,