@push.rocks/smartai 2.0.1 → 2.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.smartconfig.json +12 -7
- package/dist_ts/00_commitinfo_data.js +1 -1
- package/dist_ts/index.d.ts +6 -2
- package/dist_ts/index.js +4 -2
- package/dist_ts/smartai.auth.openai.d.ts +30 -0
- package/dist_ts/smartai.auth.openai.js +224 -0
- package/dist_ts/smartai.cache.d.ts +31 -0
- package/dist_ts/smartai.cache.js +179 -0
- package/dist_ts/smartai.classes.smartai.d.ts +5 -1
- package/dist_ts/smartai.classes.smartai.js +13 -3
- package/dist_ts/smartai.interfaces.d.ts +91 -3
- package/dist_ts/smartai.middleware.anthropic.d.ts +2 -1
- package/dist_ts/smartai.middleware.anthropic.js +4 -30
- package/package.json +25 -32
- package/readme.hints.md +9 -4
- package/readme.md +88 -4
- package/ts/00_commitinfo_data.ts +1 -1
- package/ts/index.ts +51 -2
- package/ts/smartai.auth.openai.ts +303 -0
- package/ts/smartai.cache.ts +250 -0
- package/ts/smartai.classes.smartai.ts +18 -3
- package/ts/smartai.interfaces.ts +103 -3
- package/ts/smartai.middleware.anthropic.ts +5 -31
package/ts/smartai.interfaces.ts
CHANGED
|
@@ -1,4 +1,5 @@
|
|
|
1
|
-
import type { LanguageModelV3 } from '@ai-sdk/provider';
|
|
1
|
+
import type { JSONObject, JSONValue, LanguageModelV3, LanguageModelV3Prompt } from '@ai-sdk/provider';
|
|
2
|
+
import type { ISmartAiCacheOptions } from './smartai.cache.js';
|
|
2
3
|
|
|
3
4
|
export type TProvider =
|
|
4
5
|
| 'anthropic'
|
|
@@ -10,10 +11,109 @@ export type TProvider =
|
|
|
10
11
|
| 'perplexity'
|
|
11
12
|
| 'ollama';
|
|
12
13
|
|
|
14
|
+
export type TOpenAiReasoningEffort = 'none' | 'minimal' | 'low' | 'medium' | 'high' | 'xhigh';
|
|
15
|
+
|
|
16
|
+
export type TOpenAiTextVerbosity = 'low' | 'medium' | 'high';
|
|
17
|
+
|
|
18
|
+
export interface IOpenAiMaxIdTokenInfo {
|
|
19
|
+
email?: string;
|
|
20
|
+
chatgptPlanType?: string;
|
|
21
|
+
chatgptUserId?: string;
|
|
22
|
+
chatgptAccountId?: string;
|
|
23
|
+
chatgptAccountIsFedramp: boolean;
|
|
24
|
+
expiresAt?: string;
|
|
25
|
+
rawJwt: string;
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
export interface IOpenAiMaxAuthCredentials {
|
|
29
|
+
accessToken: string;
|
|
30
|
+
refreshToken?: string;
|
|
31
|
+
idToken?: string;
|
|
32
|
+
accountId?: string;
|
|
33
|
+
idTokenInfo?: IOpenAiMaxIdTokenInfo;
|
|
34
|
+
baseUrl?: string;
|
|
35
|
+
originator?: string;
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
export interface IOpenAiMaxTokenData extends IOpenAiMaxAuthCredentials {
|
|
39
|
+
refreshToken: string;
|
|
40
|
+
idToken: string;
|
|
41
|
+
idTokenInfo: IOpenAiMaxIdTokenInfo;
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
export interface IOpenAiMaxDeviceCode {
|
|
45
|
+
verificationUrl: string;
|
|
46
|
+
userCode: string;
|
|
47
|
+
deviceAuthId: string;
|
|
48
|
+
intervalSeconds: number;
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
export interface IOpenAiMaxAuthOptions {
|
|
52
|
+
issuer?: string;
|
|
53
|
+
clientId?: string;
|
|
54
|
+
fetch?: typeof fetch;
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
export interface IOpenAiMaxDeviceCodePollOptions extends IOpenAiMaxAuthOptions {
|
|
58
|
+
timeoutMs?: number;
|
|
59
|
+
sleep?: (ms: number) => Promise<void>;
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
export interface IOpenAiMaxCompleteDeviceCodeOptions extends IOpenAiMaxDeviceCodePollOptions {
|
|
63
|
+
forcedChatGptWorkspaceId?: string;
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
export interface IOpenAiProviderOptions extends JSONObject {
|
|
67
|
+
conversation?: string | null;
|
|
68
|
+
include?: string[] | null;
|
|
69
|
+
instructions?: string | null;
|
|
70
|
+
logitBias?: Record<string, number>;
|
|
71
|
+
logprobs?: boolean | number | null;
|
|
72
|
+
maxCompletionTokens?: number;
|
|
73
|
+
maxToolCalls?: number | null;
|
|
74
|
+
metadata?: JSONObject | null;
|
|
75
|
+
parallelToolCalls?: boolean | null;
|
|
76
|
+
previousResponseId?: string | null;
|
|
77
|
+
prediction?: JSONObject;
|
|
78
|
+
promptCacheKey?: string | null;
|
|
79
|
+
promptCacheRetention?: 'in_memory' | '24h' | null;
|
|
80
|
+
reasoningEffort?: TOpenAiReasoningEffort | null;
|
|
81
|
+
reasoningSummary?: string | null;
|
|
82
|
+
safetyIdentifier?: string | null;
|
|
83
|
+
serviceTier?: 'auto' | 'flex' | 'priority' | 'default' | null;
|
|
84
|
+
store?: boolean | null;
|
|
85
|
+
strictJsonSchema?: boolean | null;
|
|
86
|
+
systemMessageMode?: 'remove' | 'system' | 'developer';
|
|
87
|
+
textVerbosity?: TOpenAiTextVerbosity | null;
|
|
88
|
+
truncation?: 'auto' | 'disabled' | null;
|
|
89
|
+
user?: string | null;
|
|
90
|
+
forceReasoning?: boolean;
|
|
91
|
+
[key: string]: JSONValue | undefined;
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
export type TSmartAiProviderOptions = Record<string, JSONObject> & {
|
|
95
|
+
openai?: IOpenAiProviderOptions;
|
|
96
|
+
};
|
|
97
|
+
|
|
98
|
+
export interface ISmartAiModelSetup {
|
|
99
|
+
model: LanguageModelV3;
|
|
100
|
+
providerOptions?: TSmartAiProviderOptions;
|
|
101
|
+
}
|
|
102
|
+
|
|
13
103
|
export interface ISmartAiOptions {
|
|
14
104
|
provider: TProvider;
|
|
15
105
|
model: string;
|
|
16
106
|
apiKey?: string;
|
|
107
|
+
/**
|
|
108
|
+
* OpenAI ChatGPT/Codex subscription credentials from the device-code auth flow.
|
|
109
|
+
* Only used when provider === 'openai'.
|
|
110
|
+
*/
|
|
111
|
+
openAiMaxAuth?: IOpenAiMaxAuthCredentials;
|
|
112
|
+
/**
|
|
113
|
+
* Provider-specific AI SDK generation options.
|
|
114
|
+
* Pass this to generateText()/streamText() alongside the model.
|
|
115
|
+
*/
|
|
116
|
+
providerOptions?: TSmartAiProviderOptions;
|
|
17
117
|
/** For Ollama: base URL of the local server. Default: http://localhost:11434 */
|
|
18
118
|
baseUrl?: string;
|
|
19
119
|
/**
|
|
@@ -25,7 +125,7 @@ export interface ISmartAiOptions {
|
|
|
25
125
|
* Enable Anthropic prompt caching on system + recent messages.
|
|
26
126
|
* Only used when provider === 'anthropic'. Default: true.
|
|
27
127
|
*/
|
|
28
|
-
promptCaching?: boolean;
|
|
128
|
+
promptCaching?: boolean | ISmartAiCacheOptions;
|
|
29
129
|
}
|
|
30
130
|
|
|
31
131
|
/**
|
|
@@ -50,4 +150,4 @@ export interface IOllamaModelOptions {
|
|
|
50
150
|
think?: boolean;
|
|
51
151
|
}
|
|
52
152
|
|
|
53
|
-
export type { LanguageModelV3 };
|
|
153
|
+
export type { LanguageModelV3, LanguageModelV3Prompt };
|
|
@@ -1,38 +1,12 @@
|
|
|
1
|
-
import type { LanguageModelV3Middleware
|
|
1
|
+
import type { LanguageModelV3Middleware } from '@ai-sdk/provider';
|
|
2
|
+
import { createSmartAiCachingMiddleware } from './smartai.cache.js';
|
|
3
|
+
import type { ISmartAiCacheOptions } from './smartai.cache.js';
|
|
2
4
|
|
|
3
5
|
/**
|
|
4
6
|
* Creates middleware that adds Anthropic prompt caching directives.
|
|
5
7
|
* Marks the last system message and last user message with ephemeral cache control,
|
|
6
8
|
* reducing input token cost and latency on repeated calls.
|
|
7
9
|
*/
|
|
8
|
-
export function createAnthropicCachingMiddleware(): LanguageModelV3Middleware {
|
|
9
|
-
return {
|
|
10
|
-
specificationVersion: 'v3',
|
|
11
|
-
transformParams: async ({ params }) => {
|
|
12
|
-
const messages = [...params.prompt] as Array<Record<string, unknown>>;
|
|
13
|
-
|
|
14
|
-
// Find the last system message and last user message
|
|
15
|
-
let lastSystemIdx = -1;
|
|
16
|
-
let lastUserIdx = -1;
|
|
17
|
-
for (let i = 0; i < messages.length; i++) {
|
|
18
|
-
if (messages[i].role === 'system') lastSystemIdx = i;
|
|
19
|
-
if (messages[i].role === 'user') lastUserIdx = i;
|
|
20
|
-
}
|
|
21
|
-
|
|
22
|
-
const targets = [lastSystemIdx, lastUserIdx].filter(i => i >= 0);
|
|
23
|
-
for (const idx of targets) {
|
|
24
|
-
const msg = { ...messages[idx] };
|
|
25
|
-
msg.providerOptions = {
|
|
26
|
-
...(msg.providerOptions as Record<string, unknown> || {}),
|
|
27
|
-
anthropic: {
|
|
28
|
-
...((msg.providerOptions as Record<string, unknown>)?.anthropic as Record<string, unknown> || {}),
|
|
29
|
-
cacheControl: { type: 'ephemeral' },
|
|
30
|
-
},
|
|
31
|
-
};
|
|
32
|
-
messages[idx] = msg;
|
|
33
|
-
}
|
|
34
|
-
|
|
35
|
-
return { ...params, prompt: messages as unknown as LanguageModelV3Prompt };
|
|
36
|
-
},
|
|
37
|
-
};
|
|
10
|
+
export function createAnthropicCachingMiddleware(options: ISmartAiCacheOptions = {}): LanguageModelV3Middleware {
|
|
11
|
+
return createSmartAiCachingMiddleware({ ...options, provider: 'anthropic' });
|
|
38
12
|
}
|