@astro-minimax/ai 0.7.4 → 0.8.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/components/AIChatContainer.d.ts +9 -0
- package/dist/components/AIChatContainer.d.ts.map +1 -0
- package/dist/components/AIChatContainer.js +936 -0
- package/{src → dist}/components/AIChatWidget.astro +1 -1
- package/dist/components/ChatPanel.d.ts +19 -0
- package/dist/components/ChatPanel.d.ts.map +1 -0
- package/dist/components/ChatPanel.js +914 -0
- package/dist/data/index.js +18 -1
- package/dist/fact-registry/index.js +16 -3
- package/dist/index.js +11 -30
- package/dist/intelligence/evidence-analysis.d.ts.map +1 -1
- package/dist/intelligence/index.js +56 -5
- package/dist/intelligence/keyword-extract.d.ts.map +1 -1
- package/dist/middleware/index.js +10 -1
- package/dist/prompt/index.js +10 -4
- package/dist/provider-manager/base.d.ts +1 -0
- package/dist/provider-manager/base.d.ts.map +1 -1
- package/dist/provider-manager/types.d.ts +1 -0
- package/dist/provider-manager/types.d.ts.map +1 -1
- package/dist/providers/index.js +5 -1
- package/dist/search/index.js +48 -6
- package/dist/server/dev-server.js +236 -259
- package/dist/server/index.js +39 -6
- package/dist/stream/index.js +8 -2
- package/package.json +16 -10
- package/dist/cache/global-cache.js +0 -141
- package/dist/cache/index.js +0 -62
- package/dist/cache/kv-adapter.js +0 -102
- package/dist/cache/memory-adapter.js +0 -95
- package/dist/cache/response-cache.js +0 -85
- package/dist/cache/types.js +0 -16
- package/dist/data/metadata-loader.js +0 -66
- package/dist/data/types.js +0 -1
- package/dist/fact-registry/fact-matcher.js +0 -94
- package/dist/fact-registry/prompt-injector.js +0 -57
- package/dist/fact-registry/registry.js +0 -38
- package/dist/fact-registry/types.js +0 -5
- package/dist/intelligence/citation-appender.js +0 -65
- package/dist/intelligence/citation-guard.js +0 -125
- package/dist/intelligence/evidence-analysis.js +0 -88
- package/dist/intelligence/intent-detect.js +0 -131
- package/dist/intelligence/keyword-extract.js +0 -114
- package/dist/intelligence/response-templates.js +0 -116
- package/dist/intelligence/types.js +0 -1
- package/dist/middleware/rate-limiter.js +0 -129
- package/dist/prompt/dynamic-layer.js +0 -67
- package/dist/prompt/prompt-builder.js +0 -12
- package/dist/prompt/semi-static-layer.js +0 -29
- package/dist/prompt/static-layer.js +0 -150
- package/dist/prompt/types.js +0 -1
- package/dist/provider-manager/base.js +0 -47
- package/dist/provider-manager/config.js +0 -134
- package/dist/provider-manager/index.js +0 -6
- package/dist/provider-manager/manager.js +0 -121
- package/dist/provider-manager/mock.js +0 -56
- package/dist/provider-manager/openai.js +0 -112
- package/dist/provider-manager/types.js +0 -6
- package/dist/provider-manager/workers.js +0 -74
- package/dist/providers/mock.js +0 -234
- package/dist/search/idf.js +0 -31
- package/dist/search/search-api.js +0 -119
- package/dist/search/search-index.js +0 -35
- package/dist/search/search-utils.js +0 -122
- package/dist/search/session-cache.js +0 -92
- package/dist/search/types.js +0 -1
- package/dist/search/vector-reranker.js +0 -135
- package/dist/server/chat-handler.js +0 -590
- package/dist/server/errors.js +0 -41
- package/dist/server/metadata-init.js +0 -47
- package/dist/server/notify.js +0 -74
- package/dist/server/stream-helpers.js +0 -197
- package/dist/server/types.js +0 -13
- package/dist/stream/mock-stream.js +0 -27
- package/dist/stream/response.js +0 -22
- package/dist/utils/i18n.js +0 -164
- package/src/components/AIChatContainer.tsx +0 -31
- package/src/components/ChatPanel.tsx +0 -866
- package/src/providers/mock.ts +0 -240
- package/src/server/types.ts +0 -89
- package/src/utils/i18n.ts +0 -238
|
@@ -1,134 +0,0 @@
|
|
|
1
|
-
export const DEFAULT_WORKERS_BINDING_NAME = 'minimaxAI';
|
|
2
|
-
const DEFAULT_WEIGHT = 100;
|
|
3
|
-
const DEFAULT_TIMEOUT = 30000;
|
|
4
|
-
const DEFAULT_MODEL = 'gpt-4o-mini';
|
|
5
|
-
function hasOpenAIConfig(env) {
|
|
6
|
-
return !!(env.AI_BASE_URL && env.AI_API_KEY);
|
|
7
|
-
}
|
|
8
|
-
function hasWorkersAIBinding(env) {
|
|
9
|
-
const bindingName = env.AI_BINDING_NAME || DEFAULT_WORKERS_BINDING_NAME;
|
|
10
|
-
return !!env[bindingName];
|
|
11
|
-
}
|
|
12
|
-
function createOpenAIConfigFromEnv(env) {
|
|
13
|
-
if (!hasOpenAIConfig(env))
|
|
14
|
-
return null;
|
|
15
|
-
return {
|
|
16
|
-
id: 'openai-default',
|
|
17
|
-
type: 'openai',
|
|
18
|
-
weight: DEFAULT_WEIGHT - 10, // Lower priority than Workers AI (fallback)
|
|
19
|
-
baseURL: env.AI_BASE_URL,
|
|
20
|
-
apiKey: env.AI_API_KEY,
|
|
21
|
-
model: env.AI_MODEL || DEFAULT_MODEL,
|
|
22
|
-
keywordModel: env.AI_KEYWORD_MODEL,
|
|
23
|
-
evidenceModel: env.AI_EVIDENCE_MODEL,
|
|
24
|
-
timeout: DEFAULT_TIMEOUT,
|
|
25
|
-
enabled: true,
|
|
26
|
-
};
|
|
27
|
-
}
|
|
28
|
-
function createWorkersAIConfigFromEnv(env) {
|
|
29
|
-
const bindingName = env.AI_BINDING_NAME || DEFAULT_WORKERS_BINDING_NAME;
|
|
30
|
-
if (!env[bindingName])
|
|
31
|
-
return null;
|
|
32
|
-
return {
|
|
33
|
-
id: 'workers-ai-default',
|
|
34
|
-
type: 'workers',
|
|
35
|
-
weight: DEFAULT_WEIGHT,
|
|
36
|
-
bindingName,
|
|
37
|
-
model: env.AI_WORKERS_MODEL || '@cf/zai-org/glm-4.7-flash',
|
|
38
|
-
keywordModel: env.AI_WORKERS_MODEL || undefined,
|
|
39
|
-
evidenceModel: env.AI_WORKERS_MODEL || undefined,
|
|
40
|
-
timeout: DEFAULT_TIMEOUT,
|
|
41
|
-
enabled: true,
|
|
42
|
-
};
|
|
43
|
-
}
|
|
44
|
-
function parseAIProvidersJSON(jsonString) {
|
|
45
|
-
try {
|
|
46
|
-
const configs = JSON.parse(jsonString);
|
|
47
|
-
if (!Array.isArray(configs))
|
|
48
|
-
return null;
|
|
49
|
-
return configs.map((config, index) => {
|
|
50
|
-
const weight = config.weight ?? DEFAULT_WEIGHT;
|
|
51
|
-
const timeout = config.timeout ?? DEFAULT_TIMEOUT;
|
|
52
|
-
const enabled = config.enabled ?? true;
|
|
53
|
-
if (config.type === 'openai') {
|
|
54
|
-
return {
|
|
55
|
-
...config,
|
|
56
|
-
weight,
|
|
57
|
-
timeout,
|
|
58
|
-
enabled,
|
|
59
|
-
id: config.id || `openai-${index}`,
|
|
60
|
-
};
|
|
61
|
-
}
|
|
62
|
-
if (config.type === 'workers') {
|
|
63
|
-
return {
|
|
64
|
-
...config,
|
|
65
|
-
weight,
|
|
66
|
-
timeout,
|
|
67
|
-
enabled,
|
|
68
|
-
id: config.id || `workers-${index}`,
|
|
69
|
-
bindingName: config.bindingName || DEFAULT_WORKERS_BINDING_NAME,
|
|
70
|
-
};
|
|
71
|
-
}
|
|
72
|
-
return null;
|
|
73
|
-
}).filter((c) => c !== null);
|
|
74
|
-
}
|
|
75
|
-
catch {
|
|
76
|
-
return null;
|
|
77
|
-
}
|
|
78
|
-
}
|
|
79
|
-
export function parseProviderConfigs(env) {
|
|
80
|
-
// Priority 1: AI_PROVIDERS JSON string
|
|
81
|
-
if (env.AI_PROVIDERS) {
|
|
82
|
-
const configs = parseAIProvidersJSON(env.AI_PROVIDERS);
|
|
83
|
-
if (configs && configs.length > 0) {
|
|
84
|
-
return configs;
|
|
85
|
-
}
|
|
86
|
-
}
|
|
87
|
-
// Priority 2: Legacy environment variables
|
|
88
|
-
const configs = [];
|
|
89
|
-
const openaiConfig = createOpenAIConfigFromEnv(env);
|
|
90
|
-
if (openaiConfig) {
|
|
91
|
-
configs.push(openaiConfig);
|
|
92
|
-
}
|
|
93
|
-
const workersConfig = createWorkersAIConfigFromEnv(env);
|
|
94
|
-
if (workersConfig) {
|
|
95
|
-
configs.push(workersConfig);
|
|
96
|
-
}
|
|
97
|
-
return configs;
|
|
98
|
-
}
|
|
99
|
-
export function validateProviderConfig(config) {
|
|
100
|
-
if (!config.id) {
|
|
101
|
-
return 'Provider config missing id';
|
|
102
|
-
}
|
|
103
|
-
if (!config.model) {
|
|
104
|
-
return `Provider ${config.id} missing model`;
|
|
105
|
-
}
|
|
106
|
-
if (config.type === 'openai') {
|
|
107
|
-
const openaiConfig = config;
|
|
108
|
-
if (!openaiConfig.baseURL) {
|
|
109
|
-
return `OpenAI provider ${config.id} missing baseURL`;
|
|
110
|
-
}
|
|
111
|
-
if (!openaiConfig.apiKey) {
|
|
112
|
-
return `OpenAI provider ${config.id} missing apiKey`;
|
|
113
|
-
}
|
|
114
|
-
}
|
|
115
|
-
if (config.type === 'workers') {
|
|
116
|
-
const workersConfig = config;
|
|
117
|
-
if (!workersConfig.bindingName) {
|
|
118
|
-
return `Workers AI provider ${config.id} missing bindingName`;
|
|
119
|
-
}
|
|
120
|
-
}
|
|
121
|
-
return null;
|
|
122
|
-
}
|
|
123
|
-
export function getAvailableProvidersCount(env) {
|
|
124
|
-
const configs = parseProviderConfigs(env);
|
|
125
|
-
return configs.filter(c => c.enabled !== false).length;
|
|
126
|
-
}
|
|
127
|
-
export function hasAnyProviderConfigured(env) {
|
|
128
|
-
if (env.AI_PROVIDERS) {
|
|
129
|
-
const configs = parseAIProvidersJSON(env.AI_PROVIDERS);
|
|
130
|
-
if (configs && configs.length > 0)
|
|
131
|
-
return true;
|
|
132
|
-
}
|
|
133
|
-
return hasOpenAIConfig(env) || hasWorkersAIBinding(env);
|
|
134
|
-
}
|
|
@@ -1,6 +0,0 @@
|
|
|
1
|
-
export { ProviderManager, getProviderManager, resetProviderManager } from './manager.js';
|
|
2
|
-
export { BaseProviderAdapter } from './base.js';
|
|
3
|
-
export { OpenAIAdapter } from './openai.js';
|
|
4
|
-
export { WorkersAIAdapter } from './workers.js';
|
|
5
|
-
export { MockAdapter } from './mock.js';
|
|
6
|
-
export { parseProviderConfigs, validateProviderConfig, hasAnyProviderConfigured, DEFAULT_WORKERS_BINDING_NAME } from './config.js';
|
|
@@ -1,121 +0,0 @@
|
|
|
1
|
-
import { parseProviderConfigs, validateProviderConfig } from './config.js';
|
|
2
|
-
import { OpenAIAdapter } from './openai.js';
|
|
3
|
-
import { WorkersAIAdapter } from './workers.js';
|
|
4
|
-
import { MockAdapter } from './mock.js';
|
|
5
|
-
export class ProviderManager {
|
|
6
|
-
providers = [];
|
|
7
|
-
mockAdapter;
|
|
8
|
-
options;
|
|
9
|
-
constructor(env, options) {
|
|
10
|
-
this.options = {
|
|
11
|
-
unhealthyThreshold: options?.unhealthyThreshold ?? 3,
|
|
12
|
-
healthRecoveryTTL: options?.healthRecoveryTTL ?? 60000,
|
|
13
|
-
enableMockFallback: options?.enableMockFallback ?? true,
|
|
14
|
-
onProviderSwitch: options?.onProviderSwitch,
|
|
15
|
-
onStreamError: options?.onStreamError,
|
|
16
|
-
onHealthChange: options?.onHealthChange,
|
|
17
|
-
};
|
|
18
|
-
this.mockAdapter = new MockAdapter();
|
|
19
|
-
this.initializeProviders(env);
|
|
20
|
-
}
|
|
21
|
-
initializeProviders(env) {
|
|
22
|
-
const configs = parseProviderConfigs(env);
|
|
23
|
-
for (const config of configs) {
|
|
24
|
-
if (config.enabled === false)
|
|
25
|
-
continue;
|
|
26
|
-
const validationError = validateProviderConfig(config);
|
|
27
|
-
if (validationError) {
|
|
28
|
-
console.warn(`[ProviderManager] Skipping invalid config: ${validationError}`);
|
|
29
|
-
continue;
|
|
30
|
-
}
|
|
31
|
-
try {
|
|
32
|
-
const adapter = this.createAdapter(config, env);
|
|
33
|
-
if (adapter) {
|
|
34
|
-
this.providers.push(adapter);
|
|
35
|
-
}
|
|
36
|
-
}
|
|
37
|
-
catch (error) {
|
|
38
|
-
console.warn(`[ProviderManager] Failed to create adapter for ${config.id}:`, error);
|
|
39
|
-
}
|
|
40
|
-
}
|
|
41
|
-
this.providers.sort((a, b) => b.weight - a.weight);
|
|
42
|
-
}
|
|
43
|
-
createAdapter(config, env) {
|
|
44
|
-
switch (config.type) {
|
|
45
|
-
case 'openai':
|
|
46
|
-
return new OpenAIAdapter(config);
|
|
47
|
-
case 'workers':
|
|
48
|
-
return new WorkersAIAdapter(config, env);
|
|
49
|
-
default:
|
|
50
|
-
return null;
|
|
51
|
-
}
|
|
52
|
-
}
|
|
53
|
-
async getAvailableProvider() {
|
|
54
|
-
for (const provider of this.providers) {
|
|
55
|
-
if (await provider.isAvailable()) {
|
|
56
|
-
return provider;
|
|
57
|
-
}
|
|
58
|
-
}
|
|
59
|
-
return null;
|
|
60
|
-
}
|
|
61
|
-
async streamText(options) {
|
|
62
|
-
let lastProviderId = null;
|
|
63
|
-
let lastError = null;
|
|
64
|
-
for (const provider of this.providers) {
|
|
65
|
-
const isAvailable = await provider.isAvailable();
|
|
66
|
-
if (!isAvailable)
|
|
67
|
-
continue;
|
|
68
|
-
try {
|
|
69
|
-
const result = await provider.streamText(options);
|
|
70
|
-
provider.recordSuccess();
|
|
71
|
-
if (lastProviderId && lastProviderId !== provider.id) {
|
|
72
|
-
this.options.onProviderSwitch?.(lastProviderId, provider.id, 'fallback success');
|
|
73
|
-
}
|
|
74
|
-
return result;
|
|
75
|
-
}
|
|
76
|
-
catch (error) {
|
|
77
|
-
lastError = error instanceof Error ? error : new Error(String(error));
|
|
78
|
-
lastProviderId = provider.id;
|
|
79
|
-
provider.recordFailure(lastError);
|
|
80
|
-
this.options.onStreamError?.(provider.id, lastError);
|
|
81
|
-
if (!provider.getHealth().healthy) {
|
|
82
|
-
this.options.onHealthChange?.(provider.id, false);
|
|
83
|
-
}
|
|
84
|
-
}
|
|
85
|
-
}
|
|
86
|
-
if (this.options.enableMockFallback) {
|
|
87
|
-
this.options.onProviderSwitch?.(lastProviderId, 'mock', 'all providers failed');
|
|
88
|
-
return this.mockAdapter.streamText(options);
|
|
89
|
-
}
|
|
90
|
-
throw lastError || new Error('No providers available');
|
|
91
|
-
}
|
|
92
|
-
getProviderStatus() {
|
|
93
|
-
return this.providers.map(provider => ({
|
|
94
|
-
id: provider.id,
|
|
95
|
-
type: provider.type,
|
|
96
|
-
weight: provider.weight,
|
|
97
|
-
enabled: true,
|
|
98
|
-
health: provider.getHealth(),
|
|
99
|
-
model: provider.model,
|
|
100
|
-
}));
|
|
101
|
-
}
|
|
102
|
-
hasProviders() {
|
|
103
|
-
return this.providers.length > 0;
|
|
104
|
-
}
|
|
105
|
-
getProviderCount() {
|
|
106
|
-
return this.providers.length;
|
|
107
|
-
}
|
|
108
|
-
async getAvailableAdapter() {
|
|
109
|
-
return this.getAvailableProvider();
|
|
110
|
-
}
|
|
111
|
-
}
|
|
112
|
-
let managerInstance = null;
|
|
113
|
-
export function getProviderManager(env, options) {
|
|
114
|
-
if (!managerInstance) {
|
|
115
|
-
managerInstance = new ProviderManager(env, options);
|
|
116
|
-
}
|
|
117
|
-
return managerInstance;
|
|
118
|
-
}
|
|
119
|
-
export function resetProviderManager() {
|
|
120
|
-
managerInstance = null;
|
|
121
|
-
}
|
|
@@ -1,56 +0,0 @@
|
|
|
1
|
-
import { BaseProviderAdapter } from './base.js';
|
|
2
|
-
import { getMockResponse } from '../providers/mock.js';
|
|
3
|
-
const MOCK_WEIGHT = 0;
|
|
4
|
-
const CHAR_DELAY_MS = 15;
|
|
5
|
-
export class MockAdapter extends BaseProviderAdapter {
|
|
6
|
-
id = 'mock';
|
|
7
|
-
type = 'mock';
|
|
8
|
-
weight = MOCK_WEIGHT;
|
|
9
|
-
model = 'mock';
|
|
10
|
-
keywordModel = 'mock';
|
|
11
|
-
evidenceModel = 'mock';
|
|
12
|
-
timeout = 0;
|
|
13
|
-
constructor() {
|
|
14
|
-
super({ unhealthyThreshold: 999 });
|
|
15
|
-
}
|
|
16
|
-
async isAvailable() {
|
|
17
|
-
return true;
|
|
18
|
-
}
|
|
19
|
-
async streamText(options) {
|
|
20
|
-
const { userQuestion = '', lang = 'zh' } = options;
|
|
21
|
-
const text = getMockResponse(userQuestion, lang);
|
|
22
|
-
const partId = `mock-${Date.now()}`;
|
|
23
|
-
const encoder = new TextEncoder();
|
|
24
|
-
const stream = new ReadableStream({
|
|
25
|
-
async start(controller) {
|
|
26
|
-
const write = (event) => controller.enqueue(encoder.encode(`data: ${JSON.stringify(event)}\n\n`));
|
|
27
|
-
write({ type: 'text-start', id: partId });
|
|
28
|
-
for (let i = 0; i < text.length;) {
|
|
29
|
-
const chunkSize = Math.random() < 0.3 ? 3 : Math.random() < 0.5 ? 2 : 1;
|
|
30
|
-
const chunk = text.slice(i, i + chunkSize);
|
|
31
|
-
i += chunkSize;
|
|
32
|
-
controller.enqueue(encoder.encode(`data: ${JSON.stringify({ type: 'text-delta', id: partId, delta: chunk })}\n\n`));
|
|
33
|
-
await new Promise(r => setTimeout(r, CHAR_DELAY_MS + Math.random() * 20));
|
|
34
|
-
}
|
|
35
|
-
controller.enqueue(encoder.encode(`data: ${JSON.stringify({ type: 'text-end', id: partId })}\n\n`));
|
|
36
|
-
controller.enqueue(encoder.encode(`data: ${JSON.stringify({ type: 'finish', finishReason: 'stop' })}\n\n`));
|
|
37
|
-
controller.close();
|
|
38
|
-
},
|
|
39
|
-
});
|
|
40
|
-
const response = new Response(stream, {
|
|
41
|
-
headers: {
|
|
42
|
-
'Content-Type': 'text/event-stream; charset=utf-8',
|
|
43
|
-
'Cache-Control': 'no-cache, no-store',
|
|
44
|
-
'Access-Control-Allow-Origin': '*',
|
|
45
|
-
},
|
|
46
|
-
});
|
|
47
|
-
return {
|
|
48
|
-
toUIMessageStreamResponse: () => response,
|
|
49
|
-
providerId: this.id,
|
|
50
|
-
isMock: true,
|
|
51
|
-
};
|
|
52
|
-
}
|
|
53
|
-
getProvider() {
|
|
54
|
-
throw new Error('Mock provider does not support chatModel interface');
|
|
55
|
-
}
|
|
56
|
-
}
|
|
@@ -1,112 +0,0 @@
|
|
|
1
|
-
import { createOpenAICompatible } from '@ai-sdk/openai-compatible';
|
|
2
|
-
import { streamText, convertToModelMessages } from 'ai';
|
|
3
|
-
import { BaseProviderAdapter } from './base.js';
|
|
4
|
-
let proxyInitialized = false;
|
|
5
|
-
async function setupGlobalProxy() {
|
|
6
|
-
if (proxyInitialized)
|
|
7
|
-
return;
|
|
8
|
-
// Check if running in a Node.js-like environment with proxy configured
|
|
9
|
-
// In Cloudflare Edge Runtime with nodejs_compat, process exists but undici APIs don't work
|
|
10
|
-
if (typeof process === 'undefined' || !process.env) {
|
|
11
|
-
return;
|
|
12
|
-
}
|
|
13
|
-
const proxyUrl = process.env.https_proxy || process.env.HTTPS_PROXY ||
|
|
14
|
-
process.env.http_proxy || process.env.HTTP_PROXY;
|
|
15
|
-
if (!proxyUrl) {
|
|
16
|
-
return;
|
|
17
|
-
}
|
|
18
|
-
try {
|
|
19
|
-
// Dynamic import - will fail or return stubs in Cloudflare Edge Runtime
|
|
20
|
-
const undici = await import('undici');
|
|
21
|
-
// Verify the APIs actually exist (they won't in Edge Runtime polyfills)
|
|
22
|
-
if (typeof undici.setGlobalDispatcher !== 'function' ||
|
|
23
|
-
typeof undici.ProxyAgent !== 'function') {
|
|
24
|
-
console.log('[OpenAIAdapter] undici APIs not available, skipping proxy setup (likely Edge Runtime)');
|
|
25
|
-
return;
|
|
26
|
-
}
|
|
27
|
-
undici.setGlobalDispatcher(new undici.ProxyAgent(proxyUrl));
|
|
28
|
-
console.log('[OpenAIAdapter] Global proxy dispatcher set:', proxyUrl);
|
|
29
|
-
proxyInitialized = true;
|
|
30
|
-
}
|
|
31
|
-
catch (e) {
|
|
32
|
-
// Expected in Cloudflare Edge Runtime - undici import may fail or APIs may not exist
|
|
33
|
-
console.log('[OpenAIAdapter] Proxy setup skipped:', e instanceof Error ? e.message : String(e));
|
|
34
|
-
}
|
|
35
|
-
}
|
|
36
|
-
let proxySetupPromise = null;
|
|
37
|
-
function ensureProxySetup() {
|
|
38
|
-
if (!proxySetupPromise) {
|
|
39
|
-
proxySetupPromise = setupGlobalProxy();
|
|
40
|
-
}
|
|
41
|
-
return proxySetupPromise;
|
|
42
|
-
}
|
|
43
|
-
export class OpenAIAdapter extends BaseProviderAdapter {
|
|
44
|
-
id;
|
|
45
|
-
type = 'openai';
|
|
46
|
-
weight;
|
|
47
|
-
model;
|
|
48
|
-
keywordModel;
|
|
49
|
-
evidenceModel;
|
|
50
|
-
timeout;
|
|
51
|
-
provider;
|
|
52
|
-
config;
|
|
53
|
-
constructor(config) {
|
|
54
|
-
super({
|
|
55
|
-
unhealthyThreshold: config.maxRetries ? config.maxRetries + 2 : 3,
|
|
56
|
-
});
|
|
57
|
-
this.id = config.id;
|
|
58
|
-
this.weight = config.weight ?? 100;
|
|
59
|
-
this.model = config.model;
|
|
60
|
-
this.keywordModel = config.keywordModel ?? config.model;
|
|
61
|
-
this.evidenceModel = config.evidenceModel ?? this.keywordModel;
|
|
62
|
-
this.timeout = config.timeout ?? 30000;
|
|
63
|
-
this.config = config;
|
|
64
|
-
this.provider = createOpenAICompatible({
|
|
65
|
-
name: `openai-${config.id}`,
|
|
66
|
-
baseURL: config.baseURL,
|
|
67
|
-
apiKey: config.apiKey,
|
|
68
|
-
includeUsage: true,
|
|
69
|
-
});
|
|
70
|
-
ensureProxySetup().catch(() => { });
|
|
71
|
-
}
|
|
72
|
-
async streamText(options) {
|
|
73
|
-
await ensureProxySetup();
|
|
74
|
-
const { system, messages, temperature = 0.7, maxOutputTokens, topP, abortSignal, onError } = options;
|
|
75
|
-
const abortController = new AbortController();
|
|
76
|
-
const timeoutId = setTimeout(() => abortController.abort(), this.timeout);
|
|
77
|
-
if (abortSignal) {
|
|
78
|
-
abortSignal.addEventListener('abort', () => abortController.abort());
|
|
79
|
-
}
|
|
80
|
-
try {
|
|
81
|
-
const result = streamText({
|
|
82
|
-
model: this.provider.chatModel(this.model),
|
|
83
|
-
system,
|
|
84
|
-
messages: await convertToModelMessages(messages),
|
|
85
|
-
temperature,
|
|
86
|
-
maxOutputTokens,
|
|
87
|
-
topP,
|
|
88
|
-
abortSignal: abortController.signal,
|
|
89
|
-
onError: ({ error }) => {
|
|
90
|
-
onError?.(error instanceof Error ? error : new Error(String(error)));
|
|
91
|
-
},
|
|
92
|
-
});
|
|
93
|
-
const streamResult = {
|
|
94
|
-
toUIMessageStreamResponse: (responseOptions) => result.toUIMessageStreamResponse(responseOptions),
|
|
95
|
-
providerId: this.id,
|
|
96
|
-
isMock: false,
|
|
97
|
-
};
|
|
98
|
-
clearTimeout(timeoutId);
|
|
99
|
-
return streamResult;
|
|
100
|
-
}
|
|
101
|
-
catch (error) {
|
|
102
|
-
clearTimeout(timeoutId);
|
|
103
|
-
throw error;
|
|
104
|
-
}
|
|
105
|
-
}
|
|
106
|
-
getConfig() {
|
|
107
|
-
return { ...this.config };
|
|
108
|
-
}
|
|
109
|
-
getProvider() {
|
|
110
|
-
return this.provider;
|
|
111
|
-
}
|
|
112
|
-
}
|
|
@@ -1,74 +0,0 @@
|
|
|
1
|
-
import { createWorkersAI } from 'workers-ai-provider';
|
|
2
|
-
import { streamText, convertToModelMessages } from 'ai';
|
|
3
|
-
import { BaseProviderAdapter } from './base.js';
|
|
4
|
-
export class WorkersAIAdapter extends BaseProviderAdapter {
|
|
5
|
-
id;
|
|
6
|
-
type = 'workers';
|
|
7
|
-
weight;
|
|
8
|
-
model;
|
|
9
|
-
keywordModel;
|
|
10
|
-
evidenceModel;
|
|
11
|
-
timeout;
|
|
12
|
-
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
13
|
-
provider;
|
|
14
|
-
config;
|
|
15
|
-
constructor(config, env) {
|
|
16
|
-
super({
|
|
17
|
-
unhealthyThreshold: config.maxRetries ? config.maxRetries + 2 : 3,
|
|
18
|
-
});
|
|
19
|
-
this.id = config.id;
|
|
20
|
-
this.weight = config.weight ?? 90;
|
|
21
|
-
this.model = config.model;
|
|
22
|
-
this.keywordModel = config.keywordModel ?? config.model;
|
|
23
|
-
this.evidenceModel = config.evidenceModel ?? this.keywordModel;
|
|
24
|
-
this.timeout = config.timeout ?? 30000;
|
|
25
|
-
this.config = config;
|
|
26
|
-
const binding = env[config.bindingName];
|
|
27
|
-
if (!binding) {
|
|
28
|
-
throw new Error(`Workers AI binding '${config.bindingName}' not found in environment`);
|
|
29
|
-
}
|
|
30
|
-
this.provider = createWorkersAI({ binding: binding });
|
|
31
|
-
}
|
|
32
|
-
async streamText(options) {
|
|
33
|
-
const { system, messages, temperature = 0.7, maxOutputTokens, topP, abortSignal, onError } = options;
|
|
34
|
-
const abortController = new AbortController();
|
|
35
|
-
const timeoutId = setTimeout(() => abortController.abort(), this.timeout);
|
|
36
|
-
if (abortSignal) {
|
|
37
|
-
abortSignal.addEventListener('abort', () => abortController.abort());
|
|
38
|
-
}
|
|
39
|
-
try {
|
|
40
|
-
const model = this.provider(this.model, { safePrompt: true });
|
|
41
|
-
const result = streamText({
|
|
42
|
-
model,
|
|
43
|
-
system,
|
|
44
|
-
messages: await convertToModelMessages(messages),
|
|
45
|
-
temperature,
|
|
46
|
-
maxOutputTokens,
|
|
47
|
-
topP,
|
|
48
|
-
abortSignal: abortController.signal,
|
|
49
|
-
onError: ({ error }) => {
|
|
50
|
-
onError?.(error instanceof Error ? error : new Error(String(error)));
|
|
51
|
-
},
|
|
52
|
-
});
|
|
53
|
-
const streamResult = {
|
|
54
|
-
toUIMessageStreamResponse: (responseOptions) => result.toUIMessageStreamResponse(responseOptions),
|
|
55
|
-
providerId: this.id,
|
|
56
|
-
isMock: false,
|
|
57
|
-
};
|
|
58
|
-
clearTimeout(timeoutId);
|
|
59
|
-
return streamResult;
|
|
60
|
-
}
|
|
61
|
-
catch (error) {
|
|
62
|
-
clearTimeout(timeoutId);
|
|
63
|
-
throw error;
|
|
64
|
-
}
|
|
65
|
-
}
|
|
66
|
-
getConfig() {
|
|
67
|
-
return { ...this.config };
|
|
68
|
-
}
|
|
69
|
-
getProvider() {
|
|
70
|
-
return {
|
|
71
|
-
chatModel: (modelId) => this.provider(modelId, { safePrompt: true }),
|
|
72
|
-
};
|
|
73
|
-
}
|
|
74
|
-
}
|