@providerprotocol/ai 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (67) hide show
  1. package/LICENSE +21 -0
  2. package/README.md +84 -0
  3. package/dist/anthropic/index.d.ts +41 -0
  4. package/dist/anthropic/index.js +500 -0
  5. package/dist/anthropic/index.js.map +1 -0
  6. package/dist/chunk-CUCRF5W6.js +136 -0
  7. package/dist/chunk-CUCRF5W6.js.map +1 -0
  8. package/dist/chunk-FTFX2VET.js +424 -0
  9. package/dist/chunk-FTFX2VET.js.map +1 -0
  10. package/dist/chunk-QUUX4G7U.js +117 -0
  11. package/dist/chunk-QUUX4G7U.js.map +1 -0
  12. package/dist/chunk-Y6Q7JCNP.js +39 -0
  13. package/dist/chunk-Y6Q7JCNP.js.map +1 -0
  14. package/dist/google/index.d.ts +69 -0
  15. package/dist/google/index.js +517 -0
  16. package/dist/google/index.js.map +1 -0
  17. package/dist/http/index.d.ts +61 -0
  18. package/dist/http/index.js +43 -0
  19. package/dist/http/index.js.map +1 -0
  20. package/dist/index.d.ts +792 -0
  21. package/dist/index.js +898 -0
  22. package/dist/index.js.map +1 -0
  23. package/dist/openai/index.d.ts +204 -0
  24. package/dist/openai/index.js +1340 -0
  25. package/dist/openai/index.js.map +1 -0
  26. package/dist/provider-CUJWjgNl.d.ts +192 -0
  27. package/dist/retry-I2661_rv.d.ts +118 -0
  28. package/package.json +88 -0
  29. package/src/anthropic/index.ts +3 -0
  30. package/src/core/image.ts +188 -0
  31. package/src/core/llm.ts +619 -0
  32. package/src/core/provider.ts +92 -0
  33. package/src/google/index.ts +3 -0
  34. package/src/http/errors.ts +112 -0
  35. package/src/http/fetch.ts +210 -0
  36. package/src/http/index.ts +31 -0
  37. package/src/http/keys.ts +136 -0
  38. package/src/http/retry.ts +205 -0
  39. package/src/http/sse.ts +136 -0
  40. package/src/index.ts +32 -0
  41. package/src/openai/index.ts +9 -0
  42. package/src/providers/anthropic/index.ts +17 -0
  43. package/src/providers/anthropic/llm.ts +196 -0
  44. package/src/providers/anthropic/transform.ts +452 -0
  45. package/src/providers/anthropic/types.ts +213 -0
  46. package/src/providers/google/index.ts +17 -0
  47. package/src/providers/google/llm.ts +203 -0
  48. package/src/providers/google/transform.ts +487 -0
  49. package/src/providers/google/types.ts +214 -0
  50. package/src/providers/openai/index.ts +151 -0
  51. package/src/providers/openai/llm.completions.ts +201 -0
  52. package/src/providers/openai/llm.responses.ts +211 -0
  53. package/src/providers/openai/transform.completions.ts +628 -0
  54. package/src/providers/openai/transform.responses.ts +718 -0
  55. package/src/providers/openai/types.ts +711 -0
  56. package/src/types/content.ts +133 -0
  57. package/src/types/errors.ts +85 -0
  58. package/src/types/index.ts +105 -0
  59. package/src/types/llm.ts +211 -0
  60. package/src/types/messages.ts +182 -0
  61. package/src/types/provider.ts +195 -0
  62. package/src/types/schema.ts +58 -0
  63. package/src/types/stream.ts +146 -0
  64. package/src/types/thread.ts +226 -0
  65. package/src/types/tool.ts +88 -0
  66. package/src/types/turn.ts +118 -0
  67. package/src/utils/id.ts +28 -0
@@ -0,0 +1,214 @@
1
+ /**
2
+ * Google Gemini-specific LLM parameters
3
+ */
4
+ export interface GoogleLLMParams {
5
+ /** Maximum number of tokens to generate */
6
+ maxOutputTokens?: number;
7
+
8
+ /** Temperature for randomness (0.0 - 2.0) */
9
+ temperature?: number;
10
+
11
+ /** Top-p (nucleus) sampling */
12
+ topP?: number;
13
+
14
+ /** Top-k sampling */
15
+ topK?: number;
16
+
17
+ /** Stop sequences */
18
+ stopSequences?: string[];
19
+
20
+ /** Number of candidates to generate */
21
+ candidateCount?: number;
22
+
23
+ /** Response MIME type */
24
+ responseMimeType?: 'text/plain' | 'application/json';
25
+
26
+ /** Response schema for structured output */
27
+ responseSchema?: Record<string, unknown>;
28
+
29
+ /**
30
+ * Presence penalty for new topics
31
+ * Positive values encourage discussing new topics
32
+ */
33
+ presencePenalty?: number;
34
+
35
+ /**
36
+ * Frequency penalty for repeated tokens
37
+ * Positive values discourage repetition
38
+ */
39
+ frequencyPenalty?: number;
40
+
41
+ /**
42
+ * Seed for deterministic sampling
43
+ * Same seed with same parameters should produce same results
44
+ */
45
+ seed?: number;
46
+
47
+ /**
48
+ * Whether to return log probabilities in response
49
+ */
50
+ responseLogprobs?: boolean;
51
+
52
+ /**
53
+ * Number of log probabilities to return (requires responseLogprobs: true)
54
+ */
55
+ logprobs?: number;
56
+
57
+ /**
58
+ * Whether to include audio timestamps in response
59
+ */
60
+ audioTimestamp?: boolean;
61
+
62
+ /**
63
+ * Thinking/reasoning configuration for Gemini 3+ models
64
+ */
65
+ thinkingConfig?: GoogleThinkingConfig;
66
+ }
67
+
68
+ /**
69
+ * Thinking configuration for Gemini 3+ models
70
+ */
71
+ export interface GoogleThinkingConfig {
72
+ /** Whether thinking is enabled */
73
+ thinkingBudget?: number;
74
+ }
75
+
76
+ /**
77
+ * Google API request body
78
+ */
79
+ export interface GoogleRequest {
80
+ contents: GoogleContent[];
81
+ systemInstruction?: {
82
+ parts: GooglePart[];
83
+ };
84
+ generationConfig?: {
85
+ maxOutputTokens?: number;
86
+ temperature?: number;
87
+ topP?: number;
88
+ topK?: number;
89
+ stopSequences?: string[];
90
+ candidateCount?: number;
91
+ responseMimeType?: string;
92
+ responseSchema?: Record<string, unknown>;
93
+ presencePenalty?: number;
94
+ frequencyPenalty?: number;
95
+ seed?: number;
96
+ responseLogprobs?: boolean;
97
+ logprobs?: number;
98
+ audioTimestamp?: boolean;
99
+ thinkingConfig?: GoogleThinkingConfig;
100
+ };
101
+ tools?: GoogleTool[];
102
+ safetySettings?: GoogleSafetySetting[];
103
+ }
104
+
105
+ /**
106
+ * Google content (message) format
107
+ */
108
+ export interface GoogleContent {
109
+ role: 'user' | 'model';
110
+ parts: GooglePart[];
111
+ }
112
+
113
+ /**
114
+ * Google content part types
115
+ */
116
+ export type GooglePart =
117
+ | GoogleTextPart
118
+ | GoogleImagePart
119
+ | GoogleFunctionCallPart
120
+ | GoogleFunctionResponsePart;
121
+
122
+ export interface GoogleTextPart {
123
+ text: string;
124
+ }
125
+
126
+ export interface GoogleImagePart {
127
+ inlineData: {
128
+ mimeType: string;
129
+ data: string;
130
+ };
131
+ }
132
+
133
+ export interface GoogleFunctionCallPart {
134
+ functionCall: {
135
+ name: string;
136
+ args: Record<string, unknown>;
137
+ };
138
+ /** Gemini 3+ thought signature for multi-turn tool calls */
139
+ thoughtSignature?: string;
140
+ }
141
+
142
+ export interface GoogleFunctionResponsePart {
143
+ functionResponse: {
144
+ name: string;
145
+ response: Record<string, unknown>;
146
+ };
147
+ }
148
+
149
+ /**
150
+ * Google tool format
151
+ */
152
+ export interface GoogleTool {
153
+ functionDeclarations: GoogleFunctionDeclaration[];
154
+ }
155
+
156
+ export interface GoogleFunctionDeclaration {
157
+ name: string;
158
+ description: string;
159
+ parameters: {
160
+ type: 'object';
161
+ properties: Record<string, unknown>;
162
+ required?: string[];
163
+ };
164
+ }
165
+
166
+ /**
167
+ * Google safety setting
168
+ */
169
+ export interface GoogleSafetySetting {
170
+ category: string;
171
+ threshold: string;
172
+ }
173
+
174
+ /**
175
+ * Google response format
176
+ */
177
+ export interface GoogleResponse {
178
+ candidates: GoogleCandidate[];
179
+ usageMetadata?: {
180
+ promptTokenCount: number;
181
+ candidatesTokenCount: number;
182
+ totalTokenCount: number;
183
+ };
184
+ }
185
+
186
+ export interface GoogleCandidate {
187
+ content: {
188
+ role: 'model';
189
+ parts: GoogleResponsePart[];
190
+ };
191
+ finishReason: 'STOP' | 'MAX_TOKENS' | 'SAFETY' | 'RECITATION' | 'OTHER' | 'TOOL_USE' | null;
192
+ index: number;
193
+ safetyRatings?: GoogleSafetyRating[];
194
+ }
195
+
196
+ export type GoogleResponsePart = GoogleTextPart | GoogleFunctionCallPart;
197
+
198
+ export interface GoogleSafetyRating {
199
+ category: string;
200
+ probability: string;
201
+ }
202
+
203
+ /**
204
+ * Google streaming response chunk
205
+ * Same structure as regular response but may be partial
206
+ */
207
+ export interface GoogleStreamChunk {
208
+ candidates?: GoogleCandidate[];
209
+ usageMetadata?: {
210
+ promptTokenCount: number;
211
+ candidatesTokenCount: number;
212
+ totalTokenCount: number;
213
+ };
214
+ }
@@ -0,0 +1,151 @@
1
+ import type {
2
+ Provider,
3
+ ModelReference,
4
+ LLMHandler,
5
+ LLMProvider,
6
+ } from '../../types/provider.ts';
7
+ import { createCompletionsLLMHandler } from './llm.completions.ts';
8
+ import { createResponsesLLMHandler } from './llm.responses.ts';
9
+ import type { OpenAILLMParams, OpenAIConfig } from './types.ts';
10
+
11
+ /**
12
+ * OpenAI provider options
13
+ */
14
+ export interface OpenAIProviderOptions {
15
+ /**
16
+ * Which API to use:
17
+ * - 'responses': Modern Responses API (default, recommended)
18
+ * - 'completions': Legacy Chat Completions API
19
+ */
20
+ api?: 'responses' | 'completions';
21
+ }
22
+
23
+ /**
24
+ * OpenAI provider with configurable API mode
25
+ *
26
+ * @example
27
+ * // Using the modern Responses API (default)
28
+ * const model = openai('gpt-4o');
29
+ *
30
+ * @example
31
+ * // Using the legacy Chat Completions API
32
+ * const model = openai('gpt-4o', { api: 'completions' });
33
+ *
34
+ * @example
35
+ * // Explicit Responses API
36
+ * const model = openai('gpt-4o', { api: 'responses' });
37
+ */
38
+ export interface OpenAIProvider extends Provider<OpenAIProviderOptions> {
39
+ /**
40
+ * Create a model reference
41
+ * @param modelId - The model identifier (e.g., 'gpt-4o', 'gpt-4-turbo', 'o1-preview')
42
+ * @param options - Provider options including API selection
43
+ */
44
+ (modelId: string, options?: OpenAIProviderOptions): ModelReference<OpenAIProviderOptions>;
45
+
46
+ /** Provider name */
47
+ readonly name: 'openai';
48
+
49
+ /** Provider version */
50
+ readonly version: string;
51
+
52
+ /** Supported modalities */
53
+ readonly modalities: {
54
+ llm: LLMHandler<OpenAILLMParams>;
55
+ };
56
+ }
57
+
58
+ /**
59
+ * Create the OpenAI provider
60
+ */
61
+ function createOpenAIProvider(): OpenAIProvider {
62
+ // Track which API mode is currently active for the modalities
63
+ let currentApiMode: 'responses' | 'completions' = 'responses';
64
+
65
+ // Create handlers eagerly so we can inject provider reference
66
+ const responsesHandler = createResponsesLLMHandler();
67
+ const completionsHandler = createCompletionsLLMHandler();
68
+
69
+ const fn = function (
70
+ modelId: string,
71
+ options?: OpenAIProviderOptions
72
+ ): ModelReference<OpenAIProviderOptions> {
73
+ const apiMode = options?.api ?? 'responses';
74
+ currentApiMode = apiMode;
75
+ return { modelId, provider };
76
+ };
77
+
78
+ // Create a dynamic modalities object that returns the correct handler
79
+ const modalities = {
80
+ get llm(): LLMHandler<OpenAILLMParams> {
81
+ return currentApiMode === 'completions'
82
+ ? completionsHandler
83
+ : responsesHandler;
84
+ },
85
+ };
86
+
87
+ // Define properties
88
+ Object.defineProperties(fn, {
89
+ name: {
90
+ value: 'openai',
91
+ writable: false,
92
+ configurable: true,
93
+ },
94
+ version: {
95
+ value: '1.0.0',
96
+ writable: false,
97
+ configurable: true,
98
+ },
99
+ modalities: {
100
+ value: modalities,
101
+ writable: false,
102
+ configurable: true,
103
+ },
104
+ });
105
+
106
+ const provider = fn as OpenAIProvider;
107
+
108
+ // Inject provider reference into both handlers (spec compliance)
109
+ responsesHandler._setProvider?.(provider as unknown as LLMProvider<OpenAILLMParams>);
110
+ completionsHandler._setProvider?.(provider as unknown as LLMProvider<OpenAILLMParams>);
111
+
112
+ return provider;
113
+ }
114
+
115
+ /**
116
+ * OpenAI provider
117
+ *
118
+ * Supports both the modern Responses API (default) and legacy Chat Completions API.
119
+ *
120
+ * @example
121
+ * ```ts
122
+ * import { openai } from './providers/openai';
123
+ * import { llm } from './core/llm';
124
+ *
125
+ * // Using Responses API (default, modern, recommended)
126
+ * const model = llm({
127
+ * model: openai('gpt-4o'),
128
+ * params: { max_tokens: 1000 }
129
+ * });
130
+ *
131
+ * // Using Chat Completions API (legacy)
132
+ * const legacyModel = llm({
133
+ * model: openai('gpt-4o', { api: 'completions' }),
134
+ * params: { max_tokens: 1000 }
135
+ * });
136
+ *
137
+ * // Generate
138
+ * const turn = await model.generate('Hello!');
139
+ * console.log(turn.response.text);
140
+ * ```
141
+ */
142
+ export const openai = createOpenAIProvider();
143
+
144
+ // Re-export types
145
+ export type {
146
+ OpenAILLMParams,
147
+ OpenAIConfig,
148
+ OpenAIAPIMode,
149
+ OpenAIModelOptions,
150
+ OpenAIModelReference,
151
+ } from './types.ts';
@@ -0,0 +1,201 @@
1
+ import type { LLMHandler, BoundLLMModel, LLMRequest, LLMResponse, LLMStreamResult, LLMCapabilities } from '../../types/llm.ts';
2
+ import type { StreamEvent } from '../../types/stream.ts';
3
+ import type { LLMProvider } from '../../types/provider.ts';
4
+ import { UPPError } from '../../types/errors.ts';
5
+ import { resolveApiKey } from '../../http/keys.ts';
6
+ import { doFetch, doStreamFetch } from '../../http/fetch.ts';
7
+ import { parseSSEStream } from '../../http/sse.ts';
8
+ import { normalizeHttpError } from '../../http/errors.ts';
9
+ import type { OpenAILLMParams, OpenAICompletionsResponse, OpenAICompletionsStreamChunk } from './types.ts';
10
+ import {
11
+ transformRequest,
12
+ transformResponse,
13
+ transformStreamEvent,
14
+ createStreamState,
15
+ buildResponseFromState,
16
+ } from './transform.completions.ts';
17
+
18
+ const OPENAI_API_URL = 'https://api.openai.com/v1/chat/completions';
19
+
20
+ /**
21
+ * OpenAI API capabilities
22
+ */
23
+ const OPENAI_CAPABILITIES: LLMCapabilities = {
24
+ streaming: true,
25
+ tools: true,
26
+ structuredOutput: true,
27
+ imageInput: true,
28
+ videoInput: false,
29
+ audioInput: false,
30
+ };
31
+
32
+ /**
33
+ * Create OpenAI Chat Completions LLM handler
34
+ */
35
+ export function createCompletionsLLMHandler(): LLMHandler<OpenAILLMParams> {
36
+ // Provider reference injected by createProvider() or OpenAI's custom factory
37
+ let providerRef: LLMProvider<OpenAILLMParams> | null = null;
38
+
39
+ return {
40
+ _setProvider(provider: LLMProvider<OpenAILLMParams>) {
41
+ providerRef = provider;
42
+ },
43
+
44
+ bind(modelId: string): BoundLLMModel<OpenAILLMParams> {
45
+ // Use the injected provider reference
46
+ if (!providerRef) {
47
+ throw new UPPError(
48
+ 'Provider reference not set. Handler must be used with createProvider() or have _setProvider called.',
49
+ 'INVALID_REQUEST',
50
+ 'openai',
51
+ 'llm'
52
+ );
53
+ }
54
+
55
+ const model: BoundLLMModel<OpenAILLMParams> = {
56
+ modelId,
57
+ capabilities: OPENAI_CAPABILITIES,
58
+
59
+ get provider(): LLMProvider<OpenAILLMParams> {
60
+ return providerRef!;
61
+ },
62
+
63
+ async complete(request: LLMRequest<OpenAILLMParams>): Promise<LLMResponse> {
64
+ const apiKey = await resolveApiKey(
65
+ request.config,
66
+ 'OPENAI_API_KEY',
67
+ 'openai',
68
+ 'llm'
69
+ );
70
+
71
+ const baseUrl = request.config.baseUrl ?? OPENAI_API_URL;
72
+ const body = transformRequest(request, modelId);
73
+
74
+ const response = await doFetch(
75
+ baseUrl,
76
+ {
77
+ method: 'POST',
78
+ headers: {
79
+ 'Content-Type': 'application/json',
80
+ Authorization: `Bearer ${apiKey}`,
81
+ },
82
+ body: JSON.stringify(body),
83
+ signal: request.signal,
84
+ },
85
+ request.config,
86
+ 'openai',
87
+ 'llm'
88
+ );
89
+
90
+ const data = (await response.json()) as OpenAICompletionsResponse;
91
+ return transformResponse(data);
92
+ },
93
+
94
+ stream(request: LLMRequest<OpenAILLMParams>): LLMStreamResult {
95
+ const state = createStreamState();
96
+ let responseResolve: (value: LLMResponse) => void;
97
+ let responseReject: (error: Error) => void;
98
+
99
+ const responsePromise = new Promise<LLMResponse>((resolve, reject) => {
100
+ responseResolve = resolve;
101
+ responseReject = reject;
102
+ });
103
+
104
+ async function* generateEvents(): AsyncGenerator<StreamEvent, void, unknown> {
105
+ try {
106
+ const apiKey = await resolveApiKey(
107
+ request.config,
108
+ 'OPENAI_API_KEY',
109
+ 'openai',
110
+ 'llm'
111
+ );
112
+
113
+ const baseUrl = request.config.baseUrl ?? OPENAI_API_URL;
114
+ const body = transformRequest(request, modelId);
115
+ body.stream = true;
116
+ body.stream_options = { include_usage: true };
117
+
118
+ const response = await doStreamFetch(
119
+ baseUrl,
120
+ {
121
+ method: 'POST',
122
+ headers: {
123
+ 'Content-Type': 'application/json',
124
+ Authorization: `Bearer ${apiKey}`,
125
+ },
126
+ body: JSON.stringify(body),
127
+ signal: request.signal,
128
+ },
129
+ request.config,
130
+ 'openai',
131
+ 'llm'
132
+ );
133
+
134
+ if (!response.ok) {
135
+ const error = await normalizeHttpError(response, 'openai', 'llm');
136
+ responseReject(error);
137
+ throw error;
138
+ }
139
+
140
+ if (!response.body) {
141
+ const error = new UPPError(
142
+ 'No response body for streaming request',
143
+ 'PROVIDER_ERROR',
144
+ 'openai',
145
+ 'llm'
146
+ );
147
+ responseReject(error);
148
+ throw error;
149
+ }
150
+
151
+ for await (const data of parseSSEStream(response.body)) {
152
+ // Skip [DONE] marker
153
+ if (data === '[DONE]') {
154
+ continue;
155
+ }
156
+
157
+ // Check for OpenAI error event
158
+ if (typeof data === 'object' && data !== null) {
159
+ const chunk = data as OpenAICompletionsStreamChunk;
160
+
161
+ // Check for error in chunk
162
+ if ('error' in chunk && chunk.error) {
163
+ const errorData = chunk.error as { message?: string; type?: string };
164
+ const error = new UPPError(
165
+ errorData.message ?? 'Unknown error',
166
+ 'PROVIDER_ERROR',
167
+ 'openai',
168
+ 'llm'
169
+ );
170
+ responseReject(error);
171
+ throw error;
172
+ }
173
+
174
+ const uppEvents = transformStreamEvent(chunk, state);
175
+ for (const event of uppEvents) {
176
+ yield event;
177
+ }
178
+ }
179
+ }
180
+
181
+ // Build final response
182
+ responseResolve(buildResponseFromState(state));
183
+ } catch (error) {
184
+ responseReject(error as Error);
185
+ throw error;
186
+ }
187
+ }
188
+
189
+ return {
190
+ [Symbol.asyncIterator]() {
191
+ return generateEvents();
192
+ },
193
+ response: responsePromise,
194
+ };
195
+ },
196
+ };
197
+
198
+ return model;
199
+ },
200
+ };
201
+ }