@astro-minimax/ai 0.8.2 → 0.9.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. package/dist/cache/global-cache.js +145 -0
  2. package/dist/cache/index.js +96 -0
  3. package/dist/cache/kv-adapter.js +99 -0
  4. package/dist/cache/memory-adapter.js +97 -0
  5. package/dist/cache/response-cache.js +87 -0
  6. package/dist/cache/types.js +8 -0
  7. package/dist/data/metadata-loader.js +48 -0
  8. package/dist/data/types.js +0 -0
  9. package/dist/fact-registry/fact-matcher.js +128 -0
  10. package/dist/fact-registry/prompt-injector.js +54 -0
  11. package/dist/fact-registry/registry.js +41 -0
  12. package/dist/fact-registry/types.js +0 -0
  13. package/dist/intelligence/citation-appender.js +63 -0
  14. package/dist/intelligence/citation-guard.js +108 -0
  15. package/dist/intelligence/evidence-analysis.js +79 -0
  16. package/dist/intelligence/intent-detect.js +93 -0
  17. package/dist/intelligence/keyword-extract.js +89 -0
  18. package/dist/intelligence/response-templates.js +117 -0
  19. package/dist/intelligence/types.js +0 -0
  20. package/dist/middleware/rate-limiter.js +110 -0
  21. package/dist/prompt/dynamic-layer.js +64 -0
  22. package/dist/prompt/prompt-builder.js +15 -0
  23. package/dist/prompt/semi-static-layer.js +28 -0
  24. package/dist/prompt/static-layer.js +153 -0
  25. package/dist/prompt/types.js +0 -0
  26. package/dist/provider-manager/base.js +53 -0
  27. package/dist/provider-manager/config.js +135 -0
  28. package/dist/provider-manager/index.js +19 -0
  29. package/dist/provider-manager/manager.js +122 -0
  30. package/dist/provider-manager/mock.js +77 -0
  31. package/dist/provider-manager/openai.js +106 -0
  32. package/dist/provider-manager/types.js +0 -0
  33. package/dist/provider-manager/workers.js +76 -0
  34. package/dist/providers/mock.js +227 -0
  35. package/dist/search/idf.js +24 -0
  36. package/dist/search/search-api.js +94 -0
  37. package/dist/search/search-index.js +32 -0
  38. package/dist/search/search-utils.js +81 -0
  39. package/dist/search/session-cache.js +96 -0
  40. package/dist/search/types.js +0 -0
  41. package/dist/search/vector-reranker.js +103 -0
  42. package/dist/server/chat-handler.js +603 -0
  43. package/dist/server/errors.js +46 -0
  44. package/dist/server/metadata-init.js +49 -0
  45. package/dist/server/notify.js +70 -0
  46. package/dist/server/stream-helpers.js +202 -0
  47. package/dist/server/types.js +16 -0
  48. package/dist/stream/mock-stream.js +26 -0
  49. package/dist/stream/response.js +21 -0
  50. package/dist/utils/i18n.js +154 -0
  51. package/package.json +3 -3
@@ -0,0 +1,70 @@
1
+ import { createNotifier } from "@astro-minimax/notify";
2
+ let notifierInstance = null;
3
+ function getNotifier(env) {
4
+ if (notifierInstance) return notifierInstance;
5
+ const hasConfig = env.NOTIFY_TELEGRAM_BOT_TOKEN || env.NOTIFY_WEBHOOK_URL || env.NOTIFY_RESEND_API_KEY;
6
+ if (!hasConfig) {
7
+ console.warn("[notify] No notification providers configured. Missing environment variables: NOTIFY_TELEGRAM_BOT_TOKEN, NOTIFY_WEBHOOK_URL, or NOTIFY_RESEND_API_KEY");
8
+ return null;
9
+ }
10
+ const providers = [];
11
+ if (env.NOTIFY_TELEGRAM_BOT_TOKEN && env.NOTIFY_TELEGRAM_CHAT_ID) providers.push("telegram");
12
+ if (env.NOTIFY_WEBHOOK_URL) providers.push("webhook");
13
+ if (env.NOTIFY_RESEND_API_KEY && env.NOTIFY_RESEND_FROM && env.NOTIFY_RESEND_TO) providers.push("email");
14
+ console.log(`[notify] Initializing notifier with providers: ${providers.join(", ") || "none"}`);
15
+ notifierInstance = createNotifier({
16
+ telegram: env.NOTIFY_TELEGRAM_BOT_TOKEN && env.NOTIFY_TELEGRAM_CHAT_ID ? {
17
+ botToken: env.NOTIFY_TELEGRAM_BOT_TOKEN,
18
+ chatId: env.NOTIFY_TELEGRAM_CHAT_ID
19
+ } : void 0,
20
+ webhook: env.NOTIFY_WEBHOOK_URL ? {
21
+ url: env.NOTIFY_WEBHOOK_URL
22
+ } : void 0,
23
+ email: env.NOTIFY_RESEND_API_KEY && env.NOTIFY_RESEND_FROM && env.NOTIFY_RESEND_TO ? {
24
+ provider: "resend",
25
+ apiKey: env.NOTIFY_RESEND_API_KEY,
26
+ from: env.NOTIFY_RESEND_FROM,
27
+ to: env.NOTIFY_RESEND_TO
28
+ } : void 0
29
+ });
30
+ return notifierInstance;
31
+ }
32
+ function getMessageText(message) {
33
+ if (Array.isArray(message.parts)) {
34
+ return message.parts.filter((p) => p.type === "text").map((p) => p.text).join("");
35
+ }
36
+ return "";
37
+ }
38
+ function notifyAiChat(options) {
39
+ const { env, sessionId, messages, aiResponse, referencedArticles, model, usage, timing } = options;
40
+ const notifier = getNotifier(env);
41
+ if (!notifier) {
42
+ console.warn("[notify] AI chat notification skipped: no notifier available. Check environment variables.");
43
+ return Promise.resolve(null);
44
+ }
45
+ const userMessages = messages.filter((m) => m.role === "user");
46
+ const lastUserMessage = userMessages[userMessages.length - 1];
47
+ if (!lastUserMessage) {
48
+ console.warn("[notify] AI chat notification skipped: no user message found in messages array");
49
+ return Promise.resolve(null);
50
+ }
51
+ const userMessage = getMessageText(lastUserMessage);
52
+ const roundNumber = userMessages.length;
53
+ return notifier.aiChat({
54
+ sessionId,
55
+ roundNumber,
56
+ userMessage,
57
+ aiResponse: aiResponse?.slice(0, 500),
58
+ referencedArticles,
59
+ model,
60
+ usage,
61
+ timing,
62
+ siteUrl: env.SITE_URL
63
+ }).catch((error) => {
64
+ console.error("[notify] AI chat notification failed:", error);
65
+ return null;
66
+ });
67
+ }
68
+ export {
69
+ notifyAiChat
70
+ };
@@ -0,0 +1,202 @@
1
+ import {
2
+ streamText,
3
+ convertToModelMessages
4
+ } from "ai";
5
+ import { t } from "../utils/i18n.js";
6
+ import { createChatStatusData } from "./types.js";
7
+ import { createResponsePlaybackGenerator } from "../cache/response-cache.js";
8
+ function writeSearchStatus(writer, count, lang) {
9
+ writer.write({
10
+ type: "message-metadata",
11
+ messageMetadata: createChatStatusData({
12
+ stage: "search",
13
+ message: t("ai.status.found", lang, { count }),
14
+ progress: 40
15
+ })
16
+ });
17
+ }
18
+ function writeGeneratingStatus(writer, lang, progress = 60) {
19
+ writer.write({
20
+ type: "message-metadata",
21
+ messageMetadata: createChatStatusData({
22
+ stage: "answer",
23
+ message: t("ai.status.generating", lang),
24
+ progress
25
+ })
26
+ });
27
+ }
28
+ function writeDoneStatus(writer, lang) {
29
+ writer.write({
30
+ type: "message-metadata",
31
+ messageMetadata: createChatStatusData({
32
+ stage: "answer",
33
+ message: t("ai.status.generating", lang),
34
+ progress: 100,
35
+ done: true
36
+ })
37
+ });
38
+ }
39
+ function writeSourceArticles(writer, articles, max = 3) {
40
+ for (const article of articles.slice(0, max)) {
41
+ try {
42
+ writer.write({
43
+ type: "source-url",
44
+ sourceId: `source-${article.title}`,
45
+ url: article.url ?? "#",
46
+ title: article.title
47
+ });
48
+ } catch {
49
+ }
50
+ }
51
+ }
52
+ function writeTextChunk(writer, text, idPrefix = "text") {
53
+ const id = `${idPrefix}-${Date.now()}`;
54
+ writer.write({ type: "text-start", id });
55
+ writer.write({ type: "text-delta", id, delta: text });
56
+ writer.write({ type: "text-end", id });
57
+ }
58
+ function writeFinish(writer, reason = "stop") {
59
+ writer.write({ type: "finish", finishReason: reason });
60
+ }
61
+ async function streamLLMResponse(params) {
62
+ const {
63
+ writer,
64
+ adapter,
65
+ systemPrompt,
66
+ messages,
67
+ lang,
68
+ temperature = 0.3,
69
+ maxOutputTokens = 2500
70
+ } = params;
71
+ const start = Date.now();
72
+ try {
73
+ const provider = adapter.getProvider();
74
+ const result = streamText({
75
+ model: provider.chatModel(adapter.model),
76
+ system: systemPrompt,
77
+ messages: await convertToModelMessages(messages),
78
+ temperature,
79
+ maxOutputTokens,
80
+ onError: ({ error }) => {
81
+ console.error("[stream-helpers] streamText error:", error);
82
+ }
83
+ });
84
+ const streamErrors = [];
85
+ writer.merge(result.toUIMessageStream({ sendFinish: false }));
86
+ await result.consumeStream({
87
+ onError: (error) => {
88
+ streamErrors.push(error instanceof Error ? error : new Error(String(error)));
89
+ }
90
+ });
91
+ const text = await result.text;
92
+ let reasoningText;
93
+ const reasoningPromise = result.reasoning;
94
+ if (reasoningPromise) {
95
+ try {
96
+ const reasoningOutput = await Promise.resolve(reasoningPromise);
97
+ reasoningText = typeof reasoningOutput === "string" ? reasoningOutput : Array.isArray(reasoningOutput) ? reasoningOutput.map((r) => {
98
+ if (typeof r === "object" && r !== null && "text" in r) return r.text;
99
+ return String(r);
100
+ }).join("") : void 0;
101
+ } catch {
102
+ }
103
+ }
104
+ let tokenUsage;
105
+ const usagePromise = result.usage;
106
+ if (usagePromise) {
107
+ try {
108
+ const usage = await Promise.resolve(usagePromise);
109
+ const inputTokens = usage.inputTokens ?? 0;
110
+ const outputTokens = usage.outputTokens ?? 0;
111
+ tokenUsage = {
112
+ total: usage.totalTokens ?? inputTokens + outputTokens,
113
+ input: inputTokens,
114
+ output: outputTokens
115
+ };
116
+ } catch {
117
+ }
118
+ }
119
+ const generationMs = Date.now() - start;
120
+ if (streamErrors.length > 0) {
121
+ adapter.recordFailure(streamErrors[0]);
122
+ writeTextChunk(writer, t("ai.error.generic", lang), "error");
123
+ writeFinish(writer, "error");
124
+ return { success: true, responseText: text, reasoningText, tokenUsage, generationMs };
125
+ }
126
+ if (text.length > 0) {
127
+ adapter.recordSuccess();
128
+ writeFinish(writer);
129
+ return { success: true, responseText: text, reasoningText, tokenUsage, generationMs };
130
+ }
131
+ writeTextChunk(writer, t("ai.error.noOutput", lang), "no-output");
132
+ writeFinish(writer);
133
+ return { success: true, responseText: "", reasoningText, tokenUsage, generationMs };
134
+ } catch (err) {
135
+ adapter.recordFailure(err instanceof Error ? err : new Error(String(err)));
136
+ console.error("[stream-helpers] Provider threw:", err.message);
137
+ return { success: false, responseText: "", generationMs: Date.now() - start };
138
+ }
139
+ }
140
+ async function streamMockFallback(writer, question, lang) {
141
+ const { getMockResponse } = await import("../providers/mock.js");
142
+ const mockText = getMockResponse(question, lang);
143
+ writer.write({
144
+ type: "message-metadata",
145
+ messageMetadata: createChatStatusData({
146
+ stage: "answer",
147
+ message: t("ai.status.fallback", lang),
148
+ progress: 80
149
+ })
150
+ });
151
+ writeTextChunk(writer, mockText, "fallback");
152
+ writeFinish(writer);
153
+ return mockText;
154
+ }
155
+ async function streamCachedResponse(writer, cachedResponse, config, lang) {
156
+ writeSearchStatus(writer, cachedResponse.articles.length + cachedResponse.projects.length, lang);
157
+ writeGeneratingStatus(writer, lang);
158
+ writeSourceArticles(writer, cachedResponse.articles);
159
+ writeGeneratingStatus(writer, lang, 70);
160
+ const playbackGenerator = createResponsePlaybackGenerator(cachedResponse, config);
161
+ let thinkingId;
162
+ const textId = `text-${Date.now()}`;
163
+ let textStarted = false;
164
+ for await (const chunk of playbackGenerator) {
165
+ if (chunk.type === "thinking") {
166
+ if (!thinkingId) {
167
+ thinkingId = `thinking-${Date.now()}`;
168
+ writer.write({ type: "reasoning-start", id: thinkingId });
169
+ }
170
+ writer.write({ type: "reasoning-delta", id: thinkingId, delta: chunk.text });
171
+ } else {
172
+ if (thinkingId) {
173
+ writer.write({ type: "reasoning-end", id: thinkingId });
174
+ thinkingId = void 0;
175
+ }
176
+ if (!textStarted) {
177
+ writer.write({ type: "text-start", id: textId });
178
+ textStarted = true;
179
+ }
180
+ writer.write({ type: "text-delta", id: textId, delta: chunk.text });
181
+ }
182
+ }
183
+ if (thinkingId) {
184
+ writer.write({ type: "reasoning-end", id: thinkingId });
185
+ }
186
+ if (textStarted) {
187
+ writer.write({ type: "text-end", id: textId });
188
+ }
189
+ writeDoneStatus(writer, lang);
190
+ writeFinish(writer);
191
+ }
192
+ export {
193
+ streamCachedResponse,
194
+ streamLLMResponse,
195
+ streamMockFallback,
196
+ writeDoneStatus,
197
+ writeFinish,
198
+ writeGeneratingStatus,
199
+ writeSearchStatus,
200
+ writeSourceArticles,
201
+ writeTextChunk
202
+ };
@@ -0,0 +1,16 @@
1
+ function createChatStatusData(partial) {
2
+ return {
3
+ ...partial,
4
+ done: partial.done ?? partial.stage === "complete",
5
+ at: Date.now()
6
+ };
7
+ }
8
+ function isChatStatusData(value) {
9
+ if (!value || typeof value !== "object") return false;
10
+ const v = value;
11
+ return typeof v.stage === "string" && typeof v.message === "string" && typeof v.progress === "number";
12
+ }
13
+ export {
14
+ createChatStatusData,
15
+ isChatStatusData
16
+ };
@@ -0,0 +1,26 @@
1
+ import { getMockResponse } from "../providers/mock.js";
2
+ function streamMockResponse(options) {
3
+ const { question, lang = "zh", delayRange = [12, 35] } = options;
4
+ const text = getMockResponse(question, lang);
5
+ const [minDelay, maxDelay] = delayRange;
6
+ let index = 0;
7
+ return new ReadableStream({
8
+ async pull(controller) {
9
+ if (index >= text.length) {
10
+ controller.close();
11
+ return;
12
+ }
13
+ const chunkSize = Math.random() < 0.25 ? 2 : 1;
14
+ const chunk = text.slice(index, index + chunkSize);
15
+ index += chunkSize;
16
+ controller.enqueue(chunk);
17
+ await sleep(minDelay + Math.random() * (maxDelay - minDelay));
18
+ }
19
+ });
20
+ }
21
+ function sleep(ms) {
22
+ return new Promise((resolve) => setTimeout(resolve, ms));
23
+ }
24
+ export {
25
+ streamMockResponse
26
+ };
@@ -0,0 +1,21 @@
1
+ const STREAM_HEADERS = {
2
+ "Content-Type": "text/event-stream",
3
+ "Cache-Control": "no-cache",
4
+ "Connection": "keep-alive",
5
+ "Access-Control-Allow-Origin": "*"
6
+ };
7
+ const JSON_HEADERS = {
8
+ "Content-Type": "application/json",
9
+ "Access-Control-Allow-Origin": "*"
10
+ };
11
+ function errorResponse(message, status = 500) {
12
+ return new Response(JSON.stringify({ error: message }), {
13
+ status,
14
+ headers: JSON_HEADERS
15
+ });
16
+ }
17
+ export {
18
+ JSON_HEADERS,
19
+ STREAM_HEADERS,
20
+ errorResponse
21
+ };
@@ -0,0 +1,154 @@
1
+ const translations = {
2
+ en: {
3
+ // Reasoning UI
4
+ "ai.reasoning.thinking": "Thinking...",
5
+ "ai.reasoning.viewReasoning": "View reasoning",
6
+ "ai.reasoning.waiting": "Waiting for thoughts...",
7
+ // Error messages
8
+ "ai.error.network": "Network connection failed. Please check your connection.",
9
+ "ai.error.aborted": "Request was cancelled.",
10
+ "ai.error.rateLimit": "Too many requests. Please try again later.",
11
+ "ai.error.unavailable": "AI service is temporarily unavailable.",
12
+ "ai.error.generic": "Something went wrong. Please try again later.",
13
+ "ai.error.format": "Invalid request format.",
14
+ // UI labels
15
+ "ai.placeholder": "Ask a question...",
16
+ "ai.clear": "Clear",
17
+ "ai.clearConversation": "Clear conversation",
18
+ "ai.close": "Close",
19
+ "ai.closeChat": "Close chat",
20
+ "ai.retry": "Retry",
21
+ "ai.status.searching": "Searching...",
22
+ "ai.status.generating": "Generating response...",
23
+ "ai.status.found": "Found {count} related items",
24
+ "ai.status.citation": "Answered from public records",
25
+ "ai.status.fallback": "AI service unavailable, using demo mode",
26
+ // Quick prompts
27
+ "ai.prompt.techStack": "What tech stack is used?",
28
+ "ai.prompt.recommend": "Recommend some articles?",
29
+ "ai.prompt.build": "How to build a similar blog?",
30
+ "ai.prompt.summarize": 'Summarize the key points of "{title}"',
31
+ "ai.prompt.explain": 'Explain "{point}"',
32
+ "ai.prompt.related": "What related content should I read next?",
33
+ // Welcome messages
34
+ "ai.welcome.reading": `I'm reading "{title}" with you.
35
+ Ask me to summarize, explain a concept, or explore related topics.`,
36
+ "ai.welcome.canHelp": "Hi! I'm the blog AI assistant. Ask me anything and I'll help you find related articles.",
37
+ "ai.welcome.greeting": "Hi! I'm the blog AI assistant.",
38
+ "ai.welcome.demo": "I'm running in demo mode. I can recommend blog articles and external resources.",
39
+ "ai.welcome.demoHint": "For full AI features (RAG search), configure AI_BASE_URL and AI_API_KEY.",
40
+ "ai.welcome.demoPrompt": 'Try: "Recommend articles?" or "How to build this blog?"',
41
+ // Header
42
+ "ai.header.reading": "Reading:",
43
+ "ai.header.mode": "Demo",
44
+ // Assistant branding
45
+ "ai.assistantName": "Blog Avatar",
46
+ "ai.status.live": "Live",
47
+ // Additional error messages
48
+ "ai.error.emptyMessage": "Message cannot be empty.",
49
+ "ai.error.emptyContent": "Message content cannot be empty.",
50
+ "ai.error.inputTooLong": "Message too long, max {max} characters.",
51
+ "ai.error.timeout": "Response timeout, please retry or simplify your question.",
52
+ // Rate limit messages
53
+ "ai.error.rateLimit.burst": "Too many requests, please try again later.",
54
+ "ai.error.rateLimit.sustained": "Too many requests, please wait a minute.",
55
+ "ai.error.rateLimit.daily": "Daily limit reached, please come back tomorrow.",
56
+ "ai.error.noOutput": "Sorry, I could not generate a valid response. Please try rephrasing your question.",
57
+ "ai.prompt.section.responsibilities": "Your Responsibilities",
58
+ "ai.prompt.section.format": "Response Format",
59
+ "ai.prompt.section.principles": "Recommendation Principles",
60
+ "ai.prompt.section.constraints": "Constraints",
61
+ "ai.prompt.section.sourceLayers": "Source Priority Protocol (must follow)",
62
+ "ai.prompt.section.privacy": "Privacy Protection",
63
+ "ai.prompt.section.answerModes": "Answer Mode Guide (follow detected mode)",
64
+ "ai.prompt.section.preOutputChecks": "Pre-Output Checks (execute mentally, do not output steps)",
65
+ "ai.semiStatic.blogOverview": "Blog Overview",
66
+ "ai.semiStatic.totalPosts": "{count} posts total",
67
+ "ai.semiStatic.mainCategories": "Main categories: {categories}",
68
+ "ai.semiStatic.latestArticles": "Latest Posts"
69
+ },
70
+ zh: {
71
+ // Reasoning UI
72
+ "ai.reasoning.thinking": "\u601D\u8003\u4E2D...",
73
+ "ai.reasoning.viewReasoning": "\u67E5\u770B\u601D\u8003\u8FC7\u7A0B",
74
+ "ai.reasoning.waiting": "\u7B49\u5F85\u601D\u8003...",
75
+ // Error messages
76
+ "ai.error.network": "\u7F51\u7EDC\u8FDE\u63A5\u5931\u8D25\uFF0C\u8BF7\u68C0\u67E5\u7F51\u7EDC",
77
+ "ai.error.aborted": "\u8BF7\u6C42\u5DF2\u53D6\u6D88",
78
+ "ai.error.rateLimit": "\u8BF7\u6C42\u592A\u9891\u7E41\uFF0C\u8BF7\u7A0D\u540E\u518D\u8BD5",
79
+ "ai.error.unavailable": "AI \u670D\u52A1\u6682\u65F6\u4E0D\u53EF\u7528",
80
+ "ai.error.generic": "\u51FA\u4E86\u70B9\u95EE\u9898\uFF0C\u8BF7\u7A0D\u540E\u518D\u8BD5",
81
+ "ai.error.format": "\u8BF7\u6C42\u683C\u5F0F\u9519\u8BEF",
82
+ // UI labels
83
+ "ai.placeholder": "\u8F93\u5165\u4F60\u7684\u95EE\u9898...",
84
+ "ai.clear": "\u6E05\u9664",
85
+ "ai.clearConversation": "\u6E05\u9664\u5BF9\u8BDD",
86
+ "ai.close": "\u5173\u95ED",
87
+ "ai.closeChat": "\u5173\u95ED\u804A\u5929",
88
+ "ai.retry": "\u91CD\u8BD5",
89
+ "ai.status.searching": "\u641C\u7D22\u4E2D...",
90
+ "ai.status.generating": "\u6B63\u5728\u751F\u6210\u56DE\u7B54...",
91
+ "ai.status.found": "\u627E\u5230 {count} \u7BC7\u76F8\u5173\u5185\u5BB9",
92
+ "ai.status.citation": "\u5DF2\u57FA\u4E8E\u516C\u5F00\u8BB0\u5F55\u76F4\u63A5\u7ED9\u51FA\u56DE\u7B54",
93
+ "ai.status.fallback": "AI \u670D\u52A1\u4E0D\u53EF\u7528\uFF0C\u4F7F\u7528\u6F14\u793A\u6A21\u5F0F\u56DE\u590D",
94
+ // Quick prompts
95
+ "ai.prompt.techStack": "\u8FD9\u4E2A\u535A\u5BA2\u7528\u4E86\u4EC0\u4E48\u6280\u672F\uFF1F",
96
+ "ai.prompt.recommend": "\u6709\u54EA\u4E9B\u6587\u7AE0\u63A8\u8350\uFF1F",
97
+ "ai.prompt.build": "\u600E\u4E48\u642D\u5EFA\u7C7B\u4F3C\u7684\u535A\u5BA2\uFF1F",
98
+ "ai.prompt.summarize": "\u603B\u7ED3\u4E00\u4E0B\u300A{title}\u300B\u7684\u6838\u5FC3\u89C2\u70B9",
99
+ "ai.prompt.explain": "\u89E3\u91CA\u4E00\u4E0B\u300C{point}\u300D",
100
+ "ai.prompt.related": "\u8FD9\u7BC7\u6587\u7AE0\u548C\u54EA\u4E9B\u5185\u5BB9\u76F8\u5173\uFF1F",
101
+ // Welcome messages
102
+ "ai.welcome.reading": "\u6211\u5728\u7ED3\u5408\u300A{title}\u300B\u966A\u4F60\u9605\u8BFB\u3002\n\u4F60\u53EF\u4EE5\u8BA9\u6211\u603B\u7ED3\u8FD9\u7BC7\u6587\u7AE0\u3001\u89E3\u91CA\u67D0\u4E2A\u89C2\u70B9\uFF0C\u6216\u8005\u987A\u7740\u8FD9\u7BC7\u6587\u7AE0\u7EE7\u7EED\u5EF6\u4F38\u5230\u76F8\u5173\u4E3B\u9898\u3002",
103
+ "ai.welcome.canHelp": "\u4F60\u597D\uFF01\u6211\u662F\u535A\u5BA2 AI \u52A9\u624B\uFF0C\u95EE\u6211\u4EFB\u4F55\u5173\u4E8E\u535A\u5BA2\u5185\u5BB9\u7684\u95EE\u9898\uFF0C\u6211\u53EF\u4EE5\u5E2E\u4F60\u627E\u5230\u76F8\u5173\u6587\u7AE0\u3002",
104
+ "ai.welcome.greeting": "\u4F60\u597D\uFF01\u6211\u662F\u535A\u5BA2 AI \u52A9\u624B\u3002",
105
+ "ai.welcome.demo": "\u6211\u76EE\u524D\u5728 Demo \u6A21\u5F0F\u4E0B\uFF0C\u53EF\u4EE5\u63A8\u8350\u535A\u5BA2\u6587\u7AE0\u548C\u5916\u90E8\u8D44\u6E90\u3002",
106
+ "ai.welcome.demoHint": "\u542F\u7528\u5B8C\u6574 AI \u529F\u80FD\uFF08RAG \u641C\u7D22\u589E\u5F3A\uFF09\u9700\u8981\u914D\u7F6E AI_BASE_URL \u548C AI_API_KEY \u73AF\u5883\u53D8\u91CF\u3002",
107
+ "ai.welcome.demoPrompt": "\u8BD5\u8BD5\uFF1A\u300C\u6709\u54EA\u4E9B\u6587\u7AE0\u63A8\u8350\uFF1F\u300D\u6216\u300C\u600E\u4E48\u642D\u5EFA\u7C7B\u4F3C\u7684\u535A\u5BA2\uFF1F\u300D",
108
+ // Header
109
+ "ai.header.reading": "\u6B63\u5728\u9605\u8BFB\uFF1A",
110
+ "ai.header.mode": "\u6F14\u793A",
111
+ // Assistant branding
112
+ "ai.assistantName": "\u535A\u5BA2\u5206\u8EAB",
113
+ "ai.status.live": "\u5728\u7EBF",
114
+ // Additional error messages
115
+ "ai.error.emptyMessage": "\u6D88\u606F\u4E0D\u80FD\u4E3A\u7A7A\u3002",
116
+ "ai.error.emptyContent": "\u6D88\u606F\u5185\u5BB9\u4E0D\u80FD\u4E3A\u7A7A\u3002",
117
+ "ai.error.inputTooLong": "\u6D88\u606F\u8FC7\u957F\uFF0C\u6700\u591A {max} \u5B57\u3002",
118
+ "ai.error.timeout": "\u54CD\u5E94\u8D85\u65F6\uFF0C\u8BF7\u91CD\u8BD5\u6216\u7B80\u5316\u95EE\u9898\u3002",
119
+ // Rate limit messages
120
+ "ai.error.rateLimit.burst": "\u8BF7\u6C42\u592A\u9891\u7E41\uFF0C\u8BF7\u7A0D\u540E\u518D\u8BD5\u3002",
121
+ "ai.error.rateLimit.sustained": "\u8BF7\u6C42\u6B21\u6570\u8FC7\u591A\uFF0C\u8BF7\u4E00\u5206\u949F\u540E\u518D\u8BD5\u3002",
122
+ "ai.error.rateLimit.daily": "\u4ECA\u65E5\u5BF9\u8BDD\u6B21\u6570\u5DF2\u8FBE\u4E0A\u9650\uFF0C\u8BF7\u660E\u5929\u518D\u6765\u3002",
123
+ "ai.error.noOutput": "\u62B1\u6B49\uFF0C\u6211\u65E0\u6CD5\u751F\u6210\u6709\u6548\u7684\u56DE\u7B54\u3002\u8BF7\u5C1D\u8BD5\u6362\u4E00\u79CD\u65B9\u5F0F\u63D0\u95EE\u3002",
124
+ "ai.prompt.section.responsibilities": "\u4F60\u7684\u804C\u8D23",
125
+ "ai.prompt.section.format": "\u56DE\u7B54\u683C\u5F0F",
126
+ "ai.prompt.section.principles": "\u63A8\u8350\u539F\u5219",
127
+ "ai.prompt.section.constraints": "\u7EA6\u675F",
128
+ "ai.prompt.section.sourceLayers": "\u6765\u6E90\u5206\u5C42\u534F\u8BAE\uFF08\u5FC5\u987B\u9075\u5B88\uFF09",
129
+ "ai.prompt.section.privacy": "\u9690\u79C1\u4FDD\u62A4",
130
+ "ai.prompt.section.answerModes": "\u56DE\u7B54\u6A21\u5F0F\u6307\u5BFC\uFF08\u6309\u68C0\u6D4B\u5230\u7684\u6A21\u5F0F\u6267\u884C\uFF09",
131
+ "ai.prompt.section.preOutputChecks": "\u8F93\u51FA\u524D\u68C0\u67E5\uFF08\u5728\u5FC3\u91CC\u6267\u884C\uFF0C\u4E0D\u8F93\u51FA\u6B65\u9AA4\uFF09",
132
+ "ai.semiStatic.blogOverview": "\u535A\u5BA2\u6982\u51B5",
133
+ "ai.semiStatic.totalPosts": "\u5171\u6709 {count} \u7BC7\u6587\u7AE0",
134
+ "ai.semiStatic.mainCategories": "\u4E3B\u8981\u5206\u7C7B\uFF1A{categories}",
135
+ "ai.semiStatic.latestArticles": "\u6700\u65B0\u6587\u7AE0"
136
+ }
137
+ };
138
+ function t(key, lang = "zh", vars) {
139
+ const l = lang === "zh" ? "zh" : "en";
140
+ let text = translations[l]?.[key] ?? translations["en"][key] ?? key;
141
+ if (vars) {
142
+ for (const [k, v] of Object.entries(vars)) {
143
+ text = text.replace(new RegExp(`\\{${k}\\}`, "g"), String(v));
144
+ }
145
+ }
146
+ return text;
147
+ }
148
+ function getLang(lang) {
149
+ return lang === "zh" ? "zh" : "en";
150
+ }
151
+ export {
152
+ getLang,
153
+ t
154
+ };
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@astro-minimax/ai",
3
- "version": "0.8.2",
3
+ "version": "0.9.0",
4
4
  "type": "module",
5
5
  "description": "Vendor-agnostic AI integration package with full RAG pipeline for astro-minimax blogs — supports OpenAI, Cloudflare AI, and custom providers.",
6
6
  "author": "Souloss",
@@ -89,9 +89,9 @@
89
89
  "sideEffects": false,
90
90
  "dependencies": {
91
91
  "@ai-sdk/openai-compatible": "^2.0.35",
92
- "@astro-minimax/notify": "^0.7.5",
93
92
  "ai": "^6.0.116",
94
- "workers-ai-provider": "^3.1.2"
93
+ "workers-ai-provider": "^3.1.2",
94
+ "@astro-minimax/notify": "0.7.5"
95
95
  },
96
96
  "optionalDependencies": {
97
97
  "undici": "^6.0.0"