@providerprotocol/ai 0.0.2 → 0.0.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,235 @@
1
+ import { b as Provider, M as ModelReference, a as LLMHandler } from '../provider-CUJWjgNl.js';
2
+
3
+ /**
4
+ * OpenRouter-specific LLM parameters
5
+ * These are passed through to the OpenRouter APIs
6
+ */
7
+ interface OpenRouterLLMParams {
8
+ /** Maximum number of tokens to generate */
9
+ max_tokens?: number;
10
+ /** Maximum output tokens (Responses API) */
11
+ max_output_tokens?: number;
12
+ /** Temperature for randomness (0.0 - 2.0) */
13
+ temperature?: number;
14
+ /** Top-p (nucleus) sampling (0.0 - 1.0) */
15
+ top_p?: number;
16
+ /** Top-k sampling (not available for OpenAI models) */
17
+ top_k?: number;
18
+ /** Frequency penalty (-2.0 - 2.0) */
19
+ frequency_penalty?: number;
20
+ /** Presence penalty (-2.0 - 2.0) */
21
+ presence_penalty?: number;
22
+ /** Repetition penalty (0.0 - 2.0) */
23
+ repetition_penalty?: number;
24
+ /** Custom stop sequences */
25
+ stop?: string | string[];
26
+ /** Seed for deterministic sampling */
27
+ seed?: number;
28
+ /** User identifier for abuse detection */
29
+ user?: string;
30
+ /** Enable logprobs */
31
+ logprobs?: boolean;
32
+ /** Number of top logprobs to return */
33
+ top_logprobs?: number;
34
+ /** Logit bias map */
35
+ logit_bias?: Record<number, number>;
36
+ /** Minimum probability threshold (0.0 - 1.0) */
37
+ min_p?: number;
38
+ /** Top-a sampling threshold (0.0 - 1.0) */
39
+ top_a?: number;
40
+ /** Whether to enable parallel tool calls */
41
+ parallel_tool_calls?: boolean;
42
+ /** Response format for structured output (Chat Completions API only) */
43
+ response_format?: OpenRouterResponseFormat;
44
+ /**
45
+ * Prompt transforms to apply
46
+ * See: https://openrouter.ai/docs/guides/features/message-transforms
47
+ */
48
+ transforms?: string[];
49
+ /**
50
+ * Multiple models for routing
51
+ * See: https://openrouter.ai/docs/guides/features/model-routing
52
+ */
53
+ models?: string[];
54
+ /**
55
+ * Routing strategy (e.g., 'fallback')
56
+ */
57
+ route?: 'fallback';
58
+ /**
59
+ * Provider routing preferences
60
+ * See: https://openrouter.ai/docs/guides/routing/provider-selection
61
+ */
62
+ provider?: OpenRouterProviderPreferences;
63
+ /**
64
+ * Predicted output for latency optimization
65
+ * https://platform.openai.com/docs/guides/latency-optimization#use-predicted-outputs
66
+ */
67
+ prediction?: {
68
+ type: 'content';
69
+ content: string;
70
+ };
71
+ /**
72
+ * Debug options (streaming only)
73
+ */
74
+ debug?: {
75
+ /** If true, returns the transformed request body sent to the provider */
76
+ echo_upstream_body?: boolean;
77
+ };
78
+ /**
79
+ * Reasoning configuration (Responses API)
80
+ */
81
+ reasoning?: {
82
+ effort?: 'low' | 'medium' | 'high';
83
+ };
84
+ }
85
+ /**
86
+ * API mode for OpenRouter provider
87
+ */
88
+ type OpenRouterAPIMode = 'completions' | 'responses';
89
+ /**
90
+ * Model options when creating a model reference
91
+ */
92
+ interface OpenRouterModelOptions {
93
+ /** Which API to use */
94
+ api?: OpenRouterAPIMode;
95
+ }
96
+ /**
97
+ * Model reference with OpenRouter-specific options
98
+ */
99
+ interface OpenRouterModelReference {
100
+ modelId: string;
101
+ options?: OpenRouterModelOptions;
102
+ }
103
+ /**
104
+ * OpenRouter provider configuration
105
+ */
106
+ interface OpenRouterConfig {
107
+ /** Which API to use: 'completions' (default) or 'responses' (beta) */
108
+ api?: 'completions' | 'responses';
109
+ }
110
+ /**
111
+ * Provider routing preferences
112
+ */
113
+ interface OpenRouterProviderPreferences {
114
+ /** Allow fallback to other providers */
115
+ allow_fallbacks?: boolean;
116
+ /** Require specific parameters to be supported */
117
+ require_parameters?: boolean;
118
+ /** Data collection policy */
119
+ data_collection?: 'allow' | 'deny';
120
+ /** Order of provider preference */
121
+ order?: string[];
122
+ /** Ignore specific providers */
123
+ ignore?: string[];
124
+ /** Quantization preferences */
125
+ quantizations?: string[];
126
+ }
127
+ /**
128
+ * Response format
129
+ */
130
+ type OpenRouterResponseFormat = {
131
+ type: 'text';
132
+ } | {
133
+ type: 'json_object';
134
+ } | {
135
+ type: 'json_schema';
136
+ json_schema: {
137
+ name: string;
138
+ description?: string;
139
+ schema: Record<string, unknown>;
140
+ strict?: boolean;
141
+ };
142
+ };
143
+
144
+ /**
145
+ * OpenRouter provider options
146
+ */
147
+ interface OpenRouterProviderOptions {
148
+ /**
149
+ * Which API to use:
150
+ * - 'completions': Chat Completions API (default, recommended)
151
+ * - 'responses': Responses API (beta)
152
+ */
153
+ api?: 'completions' | 'responses';
154
+ }
155
+ /**
156
+ * OpenRouter provider with configurable API mode
157
+ *
158
+ * @example
159
+ * // Using the Chat Completions API (default)
160
+ * const model = openrouter('openai/gpt-4o');
161
+ *
162
+ * @example
163
+ * // Using the Responses API (beta)
164
+ * const model = openrouter('openai/gpt-4o', { api: 'responses' });
165
+ *
166
+ * @example
167
+ * // Explicit Completions API
168
+ * const model = openrouter('anthropic/claude-3.5-sonnet', { api: 'completions' });
169
+ */
170
+ interface OpenRouterProvider extends Provider<OpenRouterProviderOptions> {
171
+ /**
172
+ * Create a model reference
173
+ * @param modelId - The model identifier (e.g., 'openai/gpt-4o', 'anthropic/claude-3.5-sonnet', 'meta-llama/llama-3.1-70b-instruct')
174
+ * @param options - Provider options including API selection
175
+ */
176
+ (modelId: string, options?: OpenRouterProviderOptions): ModelReference<OpenRouterProviderOptions>;
177
+ /** Provider name */
178
+ readonly name: 'openrouter';
179
+ /** Provider version */
180
+ readonly version: string;
181
+ /** Supported modalities */
182
+ readonly modalities: {
183
+ llm: LLMHandler<OpenRouterLLMParams>;
184
+ };
185
+ }
186
+ /**
187
+ * OpenRouter provider
188
+ *
189
+ * Supports both the Chat Completions API (default) and Responses API (beta).
190
+ *
191
+ * OpenRouter is a unified API that provides access to hundreds of AI models
192
+ * through a single endpoint, including models from OpenAI, Anthropic, Google,
193
+ * Meta, Mistral, and many others.
194
+ *
195
+ * @example
196
+ * ```ts
197
+ * import { openrouter } from './providers/openrouter';
198
+ * import { llm } from './core/llm';
199
+ *
200
+ * // Using Chat Completions API (default, recommended)
201
+ * const model = llm({
202
+ * model: openrouter('openai/gpt-4o'),
203
+ * params: { max_tokens: 1000 }
204
+ * });
205
+ *
206
+ * // Using Responses API (beta)
207
+ * const betaModel = llm({
208
+ * model: openrouter('openai/gpt-4o', { api: 'responses' }),
209
+ * params: { max_output_tokens: 1000 }
210
+ * });
211
+ *
212
+ * // Using OpenRouter-specific features
213
+ * const routedModel = llm({
214
+ * model: openrouter('openai/gpt-4o'),
215
+ * params: {
216
+ * max_tokens: 1000,
217
+ * // Fallback routing
218
+ * models: ['openai/gpt-4o', 'anthropic/claude-3.5-sonnet'],
219
+ * route: 'fallback',
220
+ * // Provider preferences
221
+ * provider: {
222
+ * allow_fallbacks: true,
223
+ * require_parameters: true,
224
+ * },
225
+ * }
226
+ * });
227
+ *
228
+ * // Generate
229
+ * const turn = await model.generate('Hello!');
230
+ * console.log(turn.response.text);
231
+ * ```
232
+ */
233
+ declare const openrouter: OpenRouterProvider;
234
+
235
+ export { type OpenRouterAPIMode, type OpenRouterConfig, type OpenRouterLLMParams, type OpenRouterModelOptions, type OpenRouterModelReference, type OpenRouterProviderPreferences, openrouter };