@push.rocks/smartai 2.0.1 → 2.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,303 @@
1
+ import type {
2
+ IOpenAiMaxAuthCredentials,
3
+ IOpenAiMaxAuthOptions,
4
+ IOpenAiMaxCompleteDeviceCodeOptions,
5
+ IOpenAiMaxDeviceCode,
6
+ IOpenAiMaxDeviceCodePollOptions,
7
+ IOpenAiMaxIdTokenInfo,
8
+ IOpenAiMaxTokenData,
9
+ } from './smartai.interfaces.js';
10
+
11
+ export const OPENAI_MAX_AUTH_ISSUER = 'https://auth.openai.com';
12
+ export const OPENAI_MAX_CLIENT_ID = 'app_EMoamEEZ73f0CkXaXp7hrann';
13
+ export const OPENAI_MAX_CODEX_BASE_URL = 'https://chatgpt.com/backend-api/codex';
14
+ export const OPENAI_MAX_DEFAULT_ORIGINATOR = 'smartai';
15
+
16
+ const DEVICE_CODE_TIMEOUT_MS = 15 * 60 * 1000;
17
+
18
+ export class OpenAiMaxAuthError extends Error {
19
+ public status?: number;
20
+ public body?: string;
21
+
22
+ constructor(message: string, options: { status?: number; body?: string } = {}) {
23
+ super(message);
24
+ this.name = 'OpenAiMaxAuthError';
25
+ this.status = options.status;
26
+ this.body = options.body;
27
+ }
28
+ }
29
+
30
+ export interface IOpenAiMaxAuthorizationCode {
31
+ authorizationCode: string;
32
+ codeChallenge: string;
33
+ codeVerifier: string;
34
+ }
35
+
36
+ interface IOpenAiMaxTokenResponse {
37
+ id_token?: unknown;
38
+ access_token?: unknown;
39
+ refresh_token?: unknown;
40
+ }
41
+
42
+ function getFetch(options: IOpenAiMaxAuthOptions): typeof fetch {
43
+ const fetchFunction = options.fetch ?? globalThis.fetch;
44
+ if (!fetchFunction) {
45
+ throw new OpenAiMaxAuthError('fetch is not available for OpenAI Max authentication.');
46
+ }
47
+ return fetchFunction;
48
+ }
49
+
50
+ function getIssuer(options: IOpenAiMaxAuthOptions): string {
51
+ return (options.issuer ?? OPENAI_MAX_AUTH_ISSUER).replace(/\/+$/, '');
52
+ }
53
+
54
+ function getClientId(options: IOpenAiMaxAuthOptions): string {
55
+ return options.clientId ?? OPENAI_MAX_CLIENT_ID;
56
+ }
57
+
58
+ function asString(value: unknown, name: string): string {
59
+ if (typeof value !== 'string' || value.length === 0) {
60
+ throw new OpenAiMaxAuthError(`OpenAI Max auth response is missing ${name}.`);
61
+ }
62
+ return value;
63
+ }
64
+
65
+ function asOptionalString(value: unknown): string | undefined {
66
+ return typeof value === 'string' && value.length > 0 ? value : undefined;
67
+ }
68
+
69
+ function asIntervalSeconds(value: unknown): number {
70
+ const interval = typeof value === 'number' ? value : Number.parseInt(String(value ?? ''), 10);
71
+ if (!Number.isFinite(interval) || interval <= 0) {
72
+ throw new OpenAiMaxAuthError('OpenAI Max device-code response has an invalid interval.');
73
+ }
74
+ return interval;
75
+ }
76
+
77
+ async function readJson(response: Response, context: string): Promise<unknown> {
78
+ const body = await response.text();
79
+ if (!response.ok) {
80
+ throw new OpenAiMaxAuthError(`${context} failed with status ${response.status}.`, {
81
+ status: response.status,
82
+ body,
83
+ });
84
+ }
85
+
86
+ try {
87
+ return body ? JSON.parse(body) : {};
88
+ } catch (error) {
89
+ throw new OpenAiMaxAuthError(`${context} returned invalid JSON: ${(error as Error).message}`, {
90
+ status: response.status,
91
+ body,
92
+ });
93
+ }
94
+ }
95
+
96
+ async function postJson(url: string, body: unknown, options: IOpenAiMaxAuthOptions): Promise<unknown> {
97
+ const response = await getFetch(options)(url, {
98
+ method: 'POST',
99
+ headers: { 'Content-Type': 'application/json' },
100
+ body: JSON.stringify(body),
101
+ });
102
+ return readJson(response, `POST ${url}`);
103
+ }
104
+
105
+ async function postForm(url: string, body: URLSearchParams, options: IOpenAiMaxAuthOptions): Promise<unknown> {
106
+ const response = await getFetch(options)(url, {
107
+ method: 'POST',
108
+ headers: { 'Content-Type': 'application/x-www-form-urlencoded' },
109
+ body: body.toString(),
110
+ });
111
+ return readJson(response, `POST ${url}`);
112
+ }
113
+
114
+ function sleep(ms: number): Promise<void> {
115
+ return new Promise((resolve) => setTimeout(resolve, ms));
116
+ }
117
+
118
+ function parseJwtPayload(jwt: string): Record<string, unknown> {
119
+ const parts = jwt.split('.');
120
+ if (parts.length !== 3 || !parts[1]) {
121
+ throw new OpenAiMaxAuthError('OpenAI Max auth returned an invalid ID token.');
122
+ }
123
+
124
+ try {
125
+ return JSON.parse(Buffer.from(parts[1], 'base64url').toString('utf8')) as Record<string, unknown>;
126
+ } catch (error) {
127
+ throw new OpenAiMaxAuthError(`OpenAI Max ID token could not be parsed: ${(error as Error).message}`);
128
+ }
129
+ }
130
+
131
+ export function parseOpenAiMaxIdToken(idToken: string): IOpenAiMaxIdTokenInfo {
132
+ const claims = parseJwtPayload(idToken);
133
+ const profile = claims['https://api.openai.com/profile'] as Record<string, unknown> | undefined;
134
+ const auth = claims['https://api.openai.com/auth'] as Record<string, unknown> | undefined;
135
+ const expiresAtSeconds = typeof claims.exp === 'number' ? claims.exp : undefined;
136
+
137
+ return {
138
+ email: asOptionalString(claims.email) ?? asOptionalString(profile?.email),
139
+ chatgptPlanType: asOptionalString(auth?.chatgpt_plan_type),
140
+ chatgptUserId: asOptionalString(auth?.chatgpt_user_id) ?? asOptionalString(auth?.user_id),
141
+ chatgptAccountId: asOptionalString(auth?.chatgpt_account_id),
142
+ chatgptAccountIsFedramp: auth?.chatgpt_account_is_fedramp === true,
143
+ expiresAt: expiresAtSeconds ? new Date(expiresAtSeconds * 1000).toISOString() : undefined,
144
+ rawJwt: idToken,
145
+ };
146
+ }
147
+
148
+ function createTokenData(response: IOpenAiMaxTokenResponse): IOpenAiMaxTokenData {
149
+ const idToken = asString(response.id_token, 'id_token');
150
+ const accessToken = asString(response.access_token, 'access_token');
151
+ const refreshToken = asString(response.refresh_token, 'refresh_token');
152
+ const idTokenInfo = parseOpenAiMaxIdToken(idToken);
153
+
154
+ return {
155
+ idToken,
156
+ accessToken,
157
+ refreshToken,
158
+ accountId: idTokenInfo.chatgptAccountId,
159
+ idTokenInfo,
160
+ };
161
+ }
162
+
163
+ export async function requestOpenAiMaxDeviceCode(
164
+ options: IOpenAiMaxAuthOptions = {},
165
+ ): Promise<IOpenAiMaxDeviceCode> {
166
+ const issuer = getIssuer(options);
167
+ const response = await postJson(`${issuer}/api/accounts/deviceauth/usercode`, {
168
+ client_id: getClientId(options),
169
+ }, options) as Record<string, unknown>;
170
+
171
+ return {
172
+ verificationUrl: `${issuer}/codex/device`,
173
+ userCode: asString(response.user_code ?? response.usercode, 'user_code'),
174
+ deviceAuthId: asString(response.device_auth_id, 'device_auth_id'),
175
+ intervalSeconds: asIntervalSeconds(response.interval),
176
+ };
177
+ }
178
+
179
+ export async function pollOpenAiMaxDeviceCode(
180
+ deviceCode: IOpenAiMaxDeviceCode,
181
+ options: IOpenAiMaxDeviceCodePollOptions = {},
182
+ ): Promise<IOpenAiMaxAuthorizationCode> {
183
+ const issuer = getIssuer(options);
184
+ const pollUrl = `${issuer}/api/accounts/deviceauth/token`;
185
+ const timeoutMs = options.timeoutMs ?? DEVICE_CODE_TIMEOUT_MS;
186
+ const sleepFunction = options.sleep ?? sleep;
187
+ const startedAt = Date.now();
188
+
189
+ while (Date.now() - startedAt < timeoutMs) {
190
+ const response = await getFetch(options)(pollUrl, {
191
+ method: 'POST',
192
+ headers: { 'Content-Type': 'application/json' },
193
+ body: JSON.stringify({
194
+ device_auth_id: deviceCode.deviceAuthId,
195
+ user_code: deviceCode.userCode,
196
+ }),
197
+ });
198
+
199
+ if (response.ok) {
200
+ const body = await readJson(response, `POST ${pollUrl}`) as Record<string, unknown>;
201
+ return {
202
+ authorizationCode: asString(body.authorization_code, 'authorization_code'),
203
+ codeChallenge: asString(body.code_challenge, 'code_challenge'),
204
+ codeVerifier: asString(body.code_verifier, 'code_verifier'),
205
+ };
206
+ }
207
+
208
+ if (response.status !== 403 && response.status !== 404) {
209
+ const body = await response.text();
210
+ throw new OpenAiMaxAuthError(`OpenAI Max device-code polling failed with status ${response.status}.`, {
211
+ status: response.status,
212
+ body,
213
+ });
214
+ }
215
+
216
+ await response.arrayBuffer().catch(() => undefined);
217
+ const remaining = timeoutMs - (Date.now() - startedAt);
218
+ await sleepFunction(Math.min(deviceCode.intervalSeconds * 1000, Math.max(remaining, 0)));
219
+ }
220
+
221
+ throw new OpenAiMaxAuthError('OpenAI Max device-code login timed out.');
222
+ }
223
+
224
+ export async function exchangeOpenAiMaxAuthorizationCode(
225
+ authorizationCode: IOpenAiMaxAuthorizationCode,
226
+ options: IOpenAiMaxAuthOptions = {},
227
+ ): Promise<IOpenAiMaxTokenData> {
228
+ const issuer = getIssuer(options);
229
+ const response = await postForm(`${issuer}/oauth/token`, new URLSearchParams({
230
+ grant_type: 'authorization_code',
231
+ code: authorizationCode.authorizationCode,
232
+ redirect_uri: `${issuer}/deviceauth/callback`,
233
+ client_id: getClientId(options),
234
+ code_verifier: authorizationCode.codeVerifier,
235
+ }), options) as IOpenAiMaxTokenResponse;
236
+
237
+ return createTokenData(response);
238
+ }
239
+
240
+ export function ensureOpenAiMaxWorkspaceAllowed(
241
+ tokenData: IOpenAiMaxTokenData,
242
+ forcedChatGptWorkspaceId?: string,
243
+ ): void {
244
+ if (!forcedChatGptWorkspaceId) {
245
+ return;
246
+ }
247
+ if (tokenData.idTokenInfo.chatgptAccountId !== forcedChatGptWorkspaceId) {
248
+ throw new OpenAiMaxAuthError(`OpenAI Max login is restricted to workspace ${forcedChatGptWorkspaceId}.`);
249
+ }
250
+ }
251
+
252
+ export async function completeOpenAiMaxDeviceCodeLogin(
253
+ deviceCode: IOpenAiMaxDeviceCode,
254
+ options: IOpenAiMaxCompleteDeviceCodeOptions = {},
255
+ ): Promise<IOpenAiMaxTokenData> {
256
+ const authorizationCode = await pollOpenAiMaxDeviceCode(deviceCode, options);
257
+ const tokenData = await exchangeOpenAiMaxAuthorizationCode(authorizationCode, options);
258
+ ensureOpenAiMaxWorkspaceAllowed(tokenData, options.forcedChatGptWorkspaceId);
259
+ return tokenData;
260
+ }
261
+
262
+ export async function refreshOpenAiMaxTokenData(
263
+ tokenData: IOpenAiMaxTokenData,
264
+ options: IOpenAiMaxAuthOptions = {},
265
+ ): Promise<IOpenAiMaxTokenData> {
266
+ const issuer = getIssuer(options);
267
+ const response = await postJson(`${issuer}/oauth/token`, {
268
+ client_id: getClientId(options),
269
+ grant_type: 'refresh_token',
270
+ refresh_token: tokenData.refreshToken,
271
+ }, options) as IOpenAiMaxTokenResponse;
272
+
273
+ return createTokenData({
274
+ id_token: response.id_token ?? tokenData.idToken,
275
+ access_token: response.access_token ?? tokenData.accessToken,
276
+ refresh_token: response.refresh_token ?? tokenData.refreshToken,
277
+ });
278
+ }
279
+
280
+ export function createOpenAiMaxProviderSettings(credentials: IOpenAiMaxAuthCredentials): {
281
+ apiKey: string;
282
+ baseURL: string;
283
+ headers: Record<string, string>;
284
+ } {
285
+ const accountId = credentials.accountId ?? credentials.idTokenInfo?.chatgptAccountId;
286
+ const isFedrampAccount = credentials.idTokenInfo?.chatgptAccountIsFedramp === true;
287
+ const headers: Record<string, string> = {
288
+ originator: credentials.originator ?? OPENAI_MAX_DEFAULT_ORIGINATOR,
289
+ };
290
+
291
+ if (accountId) {
292
+ headers['ChatGPT-Account-ID'] = accountId;
293
+ }
294
+ if (isFedrampAccount) {
295
+ headers['X-OpenAI-Fedramp'] = 'true';
296
+ }
297
+
298
+ return {
299
+ apiKey: credentials.accessToken,
300
+ baseURL: credentials.baseUrl ?? OPENAI_MAX_CODEX_BASE_URL,
301
+ headers,
302
+ };
303
+ }
@@ -0,0 +1,250 @@
1
+ import type { JSONObject, JSONValue, LanguageModelV3Middleware, LanguageModelV3Prompt } from '@ai-sdk/provider';
2
+ import type { TSmartAiProviderOptions } from './smartai.interfaces.js';
3
+
4
+ export type TSmartAiMessageCacheProvider =
5
+ | 'anthropic'
6
+ | 'openrouter'
7
+ | 'bedrock'
8
+ | 'openaiCompatible'
9
+ | 'copilot'
10
+ | 'alibaba';
11
+
12
+ export type TSmartAiCacheRetention = 'ephemeral' | '1h' | 'in_memory' | '24h';
13
+
14
+ export interface ISmartAiCacheOptions {
15
+ /** Provider-specific message cache marker namespace. Usually inferred from the model. */
16
+ provider?: TSmartAiMessageCacheProvider;
17
+ /** Stable session/request key for providers that support request-level prompt cache affinity. */
18
+ key?: string;
19
+ /** Short retention is the default; longer retention is opt-in. */
20
+ retention?: TSmartAiCacheRetention;
21
+ }
22
+
23
+ export type TSmartAiCacheSetting = boolean | 'auto' | ISmartAiCacheOptions;
24
+
25
+ function isObject(input: unknown): input is Record<string, unknown> {
26
+ return typeof input === 'object' && input !== null && !Array.isArray(input);
27
+ }
28
+
29
+ function mergeJsonDefaults(defaults: JSONObject, overrides?: JSONObject): JSONObject {
30
+ const result: JSONObject = { ...defaults };
31
+
32
+ if (!overrides) return result;
33
+
34
+ for (const [key, value] of Object.entries(overrides)) {
35
+ const existing = result[key];
36
+ if (isObject(existing) && isObject(value)) {
37
+ result[key] = mergeJsonDefaults(existing as JSONObject, value as JSONObject);
38
+ continue;
39
+ }
40
+ result[key] = value as JSONValue;
41
+ }
42
+
43
+ return result;
44
+ }
45
+
46
+ export function mergeSmartAiProviderOptions(
47
+ defaults?: TSmartAiProviderOptions,
48
+ overrides?: TSmartAiProviderOptions,
49
+ ): TSmartAiProviderOptions | undefined {
50
+ if (!defaults) return overrides;
51
+ if (!overrides) return defaults;
52
+ return mergeJsonDefaults(defaults as JSONObject, overrides as JSONObject) as TSmartAiProviderOptions;
53
+ }
54
+
55
+ function cacheOptionsFromSetting(cache: TSmartAiCacheSetting | undefined): ISmartAiCacheOptions | undefined {
56
+ if (cache === false) return undefined;
57
+ if (cache === undefined || cache === true || cache === 'auto') return {};
58
+ return cache;
59
+ }
60
+
61
+ export function resolveSmartAiCacheProvider(provider?: string, modelId?: string): TSmartAiMessageCacheProvider | undefined {
62
+ const providerLower = provider?.toLowerCase() ?? '';
63
+ const modelLower = modelId?.toLowerCase() ?? '';
64
+
65
+ if (providerLower.includes('openrouter')) return 'openrouter';
66
+ if (providerLower.includes('bedrock')) return 'bedrock';
67
+ if (providerLower.includes('copilot')) return 'copilot';
68
+ if (providerLower.includes('alibaba')) return 'alibaba';
69
+ if (providerLower.includes('openai-compatible') || providerLower.includes('openaicompatible')) {
70
+ return 'openaiCompatible';
71
+ }
72
+ if (providerLower.includes('anthropic')) return 'anthropic';
73
+ if (modelLower.includes('claude') || modelLower.includes('anthropic')) return 'anthropic';
74
+
75
+ return undefined;
76
+ }
77
+
78
+ export function getSmartAiMessageCacheProviderOptions(
79
+ provider: TSmartAiMessageCacheProvider,
80
+ options: ISmartAiCacheOptions = {},
81
+ ): TSmartAiProviderOptions {
82
+ const anthropicCacheControl: JSONObject = {
83
+ type: 'ephemeral',
84
+ ...(options.retention === '1h' ? { ttl: '1h' } : {}),
85
+ };
86
+
87
+ const providerOptions: Record<TSmartAiMessageCacheProvider, JSONObject> = {
88
+ anthropic: {
89
+ anthropic: {
90
+ cacheControl: anthropicCacheControl,
91
+ },
92
+ },
93
+ openrouter: {
94
+ openrouter: {
95
+ cacheControl: { type: 'ephemeral' },
96
+ },
97
+ },
98
+ bedrock: {
99
+ bedrock: {
100
+ cachePoint: { type: 'default' },
101
+ },
102
+ },
103
+ openaiCompatible: {
104
+ openaiCompatible: {
105
+ cache_control: { type: 'ephemeral' },
106
+ },
107
+ },
108
+ copilot: {
109
+ copilot: {
110
+ copilot_cache_control: { type: 'ephemeral' },
111
+ },
112
+ },
113
+ alibaba: {
114
+ alibaba: {
115
+ cacheControl: { type: 'ephemeral' },
116
+ },
117
+ },
118
+ };
119
+
120
+ return providerOptions[provider] as TSmartAiProviderOptions;
121
+ }
122
+
123
+ function shouldUseMessageLevelOptions(provider: TSmartAiMessageCacheProvider): boolean {
124
+ return provider === 'anthropic' || provider === 'bedrock';
125
+ }
126
+
127
+ function applyProviderOptionsDefaults<T extends { providerOptions?: TSmartAiProviderOptions }>(
128
+ item: T,
129
+ defaults: TSmartAiProviderOptions,
130
+ ): T {
131
+ return {
132
+ ...item,
133
+ providerOptions: mergeSmartAiProviderOptions(defaults, item.providerOptions),
134
+ };
135
+ }
136
+
137
+ function isToolApprovalPart(part: unknown): boolean {
138
+ if (!isObject(part)) return false;
139
+ return part.type === 'tool-approval-request' || part.type === 'tool-approval-response';
140
+ }
141
+
142
+ function applyCacheToMessage(
143
+ message: LanguageModelV3Prompt[number],
144
+ provider: TSmartAiMessageCacheProvider,
145
+ options: ISmartAiCacheOptions,
146
+ ): LanguageModelV3Prompt[number] {
147
+ const providerOptions = getSmartAiMessageCacheProviderOptions(provider, options);
148
+ const content = message.content;
149
+
150
+ if (!shouldUseMessageLevelOptions(provider) && Array.isArray(content) && content.length > 0) {
151
+ const lastIndex = content.length - 1;
152
+ const lastPart = content[lastIndex];
153
+ if (!isToolApprovalPart(lastPart)) {
154
+ const messageWithArrayContent = message as Extract<LanguageModelV3Prompt[number], { content: unknown[] }>;
155
+ return {
156
+ ...messageWithArrayContent,
157
+ content: content.map((part, index) =>
158
+ index === lastIndex ? applyProviderOptionsDefaults(part, providerOptions) : part,
159
+ ) as typeof messageWithArrayContent.content,
160
+ } as LanguageModelV3Prompt[number];
161
+ }
162
+ }
163
+
164
+ return applyProviderOptionsDefaults(message, providerOptions);
165
+ }
166
+
167
+ export function applySmartAiPromptCaching(
168
+ prompt: LanguageModelV3Prompt,
169
+ options: ISmartAiCacheOptions = {},
170
+ ): LanguageModelV3Prompt {
171
+ const provider = options.provider ?? 'anthropic';
172
+ const targetIndexes = new Set<number>();
173
+ const nonSystemIndexes: number[] = [];
174
+ let systemCount = 0;
175
+
176
+ for (let i = 0; i < prompt.length; i++) {
177
+ const message = prompt[i];
178
+ if (message.role === 'system') {
179
+ if (systemCount < 2) targetIndexes.add(i);
180
+ systemCount++;
181
+ continue;
182
+ }
183
+ nonSystemIndexes.push(i);
184
+ }
185
+
186
+ for (const index of nonSystemIndexes.slice(-2)) {
187
+ targetIndexes.add(index);
188
+ }
189
+
190
+ if (targetIndexes.size === 0) return prompt;
191
+
192
+ return prompt.map((message, index) =>
193
+ targetIndexes.has(index) ? applyCacheToMessage(message, provider, options) : message,
194
+ ) as LanguageModelV3Prompt;
195
+ }
196
+
197
+ export function createSmartAiCachingMiddleware(options: ISmartAiCacheOptions = {}): LanguageModelV3Middleware {
198
+ return {
199
+ specificationVersion: 'v3',
200
+ transformParams: async ({ params }) => ({
201
+ ...params,
202
+ prompt: applySmartAiPromptCaching(params.prompt, options),
203
+ }),
204
+ };
205
+ }
206
+
207
+ function isOpenAiProvider(provider?: string): boolean {
208
+ const providerLower = provider?.toLowerCase() ?? '';
209
+ return providerLower === 'openai' || providerLower.startsWith('openai.') || providerLower.includes('@ai-sdk/openai');
210
+ }
211
+
212
+ export function getSmartAiCacheProviderOptions(input: {
213
+ provider?: string;
214
+ modelId?: string;
215
+ cache?: TSmartAiCacheSetting;
216
+ sessionId?: string;
217
+ }): TSmartAiProviderOptions | undefined {
218
+ const cacheOptions = cacheOptionsFromSetting(input.cache);
219
+ if (!cacheOptions) return undefined;
220
+
221
+ if (isOpenAiProvider(input.provider)) {
222
+ const key = cacheOptions.key ?? input.sessionId;
223
+ return {
224
+ openai: {
225
+ store: false,
226
+ ...(key ? { promptCacheKey: key } : {}),
227
+ ...(cacheOptions.retention === '24h' || cacheOptions.retention === 'in_memory'
228
+ ? { promptCacheRetention: cacheOptions.retention }
229
+ : key
230
+ ? { promptCacheRetention: 'in_memory' }
231
+ : {}),
232
+ },
233
+ };
234
+ }
235
+
236
+ return undefined;
237
+ }
238
+
239
+ export function applySmartAiCacheProviderOptions(input: {
240
+ provider?: string;
241
+ modelId?: string;
242
+ providerOptions?: TSmartAiProviderOptions;
243
+ cache?: TSmartAiCacheSetting;
244
+ sessionId?: string;
245
+ }): TSmartAiProviderOptions | undefined {
246
+ return mergeSmartAiProviderOptions(
247
+ getSmartAiCacheProviderOptions(input),
248
+ input.providerOptions,
249
+ );
250
+ }
@@ -1,7 +1,8 @@
1
1
  import * as plugins from './plugins.js';
2
- import type { ISmartAiOptions, LanguageModelV3 } from './smartai.interfaces.js';
2
+ import type { ISmartAiModelSetup, ISmartAiOptions, LanguageModelV3 } from './smartai.interfaces.js';
3
3
  import { createOllamaModel } from './smartai.provider.ollama.js';
4
4
  import { createAnthropicCachingMiddleware } from './smartai.middleware.anthropic.js';
5
+ import { createOpenAiMaxProviderSettings } from './smartai.auth.openai.js';
5
6
 
6
7
  /**
7
8
  * Returns a LanguageModelV3 for the given provider and model.
@@ -16,11 +17,17 @@ export function getModel(options: ISmartAiOptions): LanguageModelV3 {
16
17
  if (options.promptCaching === false) return base;
17
18
  return plugins.wrapLanguageModel({
18
19
  model: base,
19
- middleware: createAnthropicCachingMiddleware(),
20
+ middleware: createAnthropicCachingMiddleware(
21
+ typeof options.promptCaching === 'object' ? options.promptCaching : undefined,
22
+ ),
20
23
  }) as unknown as LanguageModelV3;
21
24
  }
22
25
  case 'openai': {
23
- const p = plugins.createOpenAI({ apiKey: options.apiKey });
26
+ const p = plugins.createOpenAI(
27
+ options.openAiMaxAuth
28
+ ? createOpenAiMaxProviderSettings(options.openAiMaxAuth)
29
+ : { apiKey: options.apiKey },
30
+ );
24
31
  return p(options.model) as LanguageModelV3;
25
32
  }
26
33
  case 'google': {
@@ -49,3 +56,11 @@ export function getModel(options: ISmartAiOptions): LanguageModelV3 {
49
56
  throw new Error(`Unknown provider: ${(options as ISmartAiOptions).provider}`);
50
57
  }
51
58
  }
59
+
60
+ /**
61
+ * Returns the model plus request-time providerOptions for AI SDK calls.
62
+ */
63
+ export function getModelSetup(options: ISmartAiOptions): ISmartAiModelSetup {
64
+ const model = getModel(options);
65
+ return options.providerOptions ? { model, providerOptions: options.providerOptions } : { model };
66
+ }