@lobehub/chat 1.134.3 → 1.134.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. package/.github/workflows/docker-database.yml +2 -0
  2. package/.github/workflows/docker-pglite.yml +2 -0
  3. package/.github/workflows/docker.yml +2 -0
  4. package/.github/workflows/release.yml +6 -0
  5. package/.github/workflows/sync-database-schema.yml +2 -0
  6. package/CHANGELOG.md +25 -0
  7. package/changelog/v1.json +9 -0
  8. package/package.json +3 -2
  9. package/packages/prompts/CLAUDE.md +329 -0
  10. package/packages/prompts/README.md +224 -0
  11. package/packages/prompts/package.json +14 -1
  12. package/packages/prompts/promptfoo/emoji-picker/eval.yaml +170 -0
  13. package/packages/prompts/promptfoo/emoji-picker/prompt.ts +16 -0
  14. package/packages/prompts/promptfoo/knowledge-qa/eval.yaml +89 -0
  15. package/packages/prompts/promptfoo/knowledge-qa/prompt.ts +26 -0
  16. package/packages/prompts/promptfoo/language-detection/eval.yaml +65 -0
  17. package/packages/prompts/promptfoo/language-detection/prompt.ts +16 -0
  18. package/packages/prompts/promptfoo/summary-title/eval.yaml +85 -0
  19. package/packages/prompts/promptfoo/summary-title/prompt.ts +18 -0
  20. package/packages/prompts/promptfoo/translate/eval.yaml +79 -0
  21. package/packages/prompts/promptfoo/translate/prompt.ts +18 -0
  22. package/packages/prompts/promptfooconfig.yaml +35 -0
  23. package/packages/prompts/src/chains/__tests__/__snapshots__/answerWithContext.test.ts.snap +164 -0
  24. package/packages/prompts/src/chains/__tests__/__snapshots__/pickEmoji.test.ts.snap +58 -0
  25. package/packages/prompts/src/chains/__tests__/__snapshots__/summaryTitle.test.ts.snap +26 -0
  26. package/packages/prompts/src/chains/__tests__/__snapshots__/translate.test.ts.snap +22 -0
  27. package/packages/prompts/src/chains/__tests__/answerWithContext.test.ts +18 -63
  28. package/packages/prompts/src/chains/__tests__/pickEmoji.test.ts +2 -37
  29. package/packages/prompts/src/chains/__tests__/summaryTitle.test.ts +2 -16
  30. package/packages/prompts/src/chains/__tests__/translate.test.ts +1 -12
  31. package/packages/prompts/src/chains/answerWithContext.ts +45 -21
  32. package/packages/prompts/src/chains/pickEmoji.ts +20 -6
  33. package/packages/prompts/src/chains/summaryTitle.ts +20 -15
  34. package/packages/prompts/src/chains/translate.ts +8 -2
@@ -0,0 +1,170 @@
1
+ description: Test emoji selection for different conversation topics and contexts
2
+
3
+ providers:
4
+ - openai:chat:gpt-5-mini
5
+ - openai:chat:claude-3-5-haiku-latest
6
+ - openai:chat:gemini-flash-latest
7
+ - openai:chat:deepseek-chat
8
+
9
+ prompts:
10
+ - file://promptfoo/emoji-picker/prompt.ts
11
+
12
+ tests:
13
+ - vars:
14
+ content: "I just got a promotion at work! I'm so excited!"
15
+ assert:
16
+ - type: llm-rubric
17
+ provider: openai:gpt-5-mini
18
+ value: "Should select a positive, celebratory emoji appropriate for work success (e.g., 🎉, 🎊, ⭐, 💼, 🚀)"
19
+ - type: not-contains
20
+ value: "explanation"
21
+
22
+ - vars:
23
+ content: "My dog passed away yesterday. I'm really sad."
24
+ assert:
25
+ - type: llm-rubric
26
+ provider: openai:gpt-5-mini
27
+ value: "Should select a sympathetic, sad emoji appropriate for loss and grief (e.g., 😢, 😭, 💔, 😔)"
28
+ - type: not-contains
29
+ value: "explanation"
30
+
31
+ - vars:
32
+ content: "Can you help me with this math problem?"
33
+ assert:
34
+ - type: llm-rubric
35
+ provider: openai:gpt-5-mini
36
+ value: "Should select an emoji related to learning, thinking, or mathematics (e.g., 🤔, 📚, ✏️, 🧮, 🔢)"
37
+ - type: not-contains
38
+ value: "explanation"
39
+
40
+ - vars:
41
+ content: "I'm going on vacation to Hawaii next week!"
42
+ assert:
43
+ - type: llm-rubric
44
+ provider: openai:gpt-5-mini
45
+ value: "Should select an emoji related to vacation, travel, or tropical themes (e.g., 🌺, 🏖️, ✈️, 🌴, ☀️)"
46
+ - type: not-contains
47
+ value: "explanation"
48
+
49
+ - vars:
50
+ content: "I'm learning to cook Italian food"
51
+ assert:
52
+ - type: llm-rubric
53
+ provider: openai:gpt-5-mini
54
+ value: "Should select an emoji related to cooking or Italian food (e.g., 🍝, 🍕, 👨‍🍳, 🍽️)"
55
+ - type: not-contains
56
+ value: "explanation"
57
+
58
+ - vars:
59
+ content: "Technical documentation about API endpoints"
60
+ assert:
61
+ - type: llm-rubric
62
+ provider: openai:gpt-5-mini
63
+ value: "Should select an emoji related to technology, development, or documentation (e.g., 💻, 📖, ⚙️, 🔧, 📝)"
64
+ - type: not-contains
65
+ value: "explanation"
66
+
67
+ # Chinese language tests
68
+ - vars:
69
+ content: "我刚刚升职了!太激动了!"
70
+ assert:
71
+ - type: llm-rubric
72
+ provider: openai:gpt-5-mini
73
+ value: "Should select a positive, celebratory emoji appropriate for work success (e.g., 🎉, 🎊, ⭐, 💼, 🚀)"
74
+ - type: not-contains
75
+ value: "explanation"
76
+
77
+ - vars:
78
+ content: "我的猫咪昨天去世了,我很难过"
79
+ assert:
80
+ - type: llm-rubric
81
+ provider: openai:gpt-5-mini
82
+ value: "Should select a sympathetic, sad emoji appropriate for loss and grief (e.g., 😢, 😭, 💔, 😔)"
83
+ - type: not-contains
84
+ value: "explanation"
85
+
86
+ - vars:
87
+ content: "我正在学习做日本料理"
88
+ assert:
89
+ - type: llm-rubric
90
+ provider: openai:gpt-5-mini
91
+ value: "Should select an emoji related to cooking or Japanese food (e.g., 🍱, 🍣, 🍜, 👨‍🍳)"
92
+ - type: not-contains
93
+ value: "explanation"
94
+
95
+ # Spanish language tests
96
+ - vars:
97
+ content: "¡Me voy de vacaciones a la playa la próxima semana!"
98
+ assert:
99
+ - type: llm-rubric
100
+ provider: openai:gpt-5-mini
101
+ value: "Should select an emoji related to vacation, beach, or tropical themes (e.g., 🏖️, ☀️, 🌊, 🏝️)"
102
+ - type: not-contains
103
+ value: "explanation"
104
+
105
+ - vars:
106
+ content: "Estoy estudiando para mi examen de matemáticas"
107
+ assert:
108
+ - type: llm-rubric
109
+ provider: openai:gpt-5-mini
110
+ value: "Should select an emoji related to studying, learning, or mathematics (e.g., 📚, 🤓, 🧮, ✏️, 📊, 📐, 📏)"
111
+ - type: not-contains
112
+ value: "explanation"
113
+
114
+ # French language tests
115
+ - vars:
116
+ content: "Je viens de terminer mon marathon! Je suis épuisé mais heureux"
117
+ assert:
118
+ - type: llm-rubric
119
+ provider: openai:gpt-5-mini
120
+ value: "Should select an emoji related to running, sports, or achievement (e.g., 🏃, 🏅, 💪, 🎯)"
121
+ - type: not-contains
122
+ value: "explanation"
123
+
124
+ - vars:
125
+ content: "J'apprends à jouer de la guitare"
126
+ assert:
127
+ - type: llm-rubric
128
+ provider: openai:gpt-5-mini
129
+ value: "Should select an emoji related to music or guitar (e.g., 🎸, 🎵, 🎶, 🎼)"
130
+ - type: not-contains
131
+ value: "explanation"
132
+
133
+ # Japanese language tests
134
+ - vars:
135
+ content: "新しいプロジェクトが始まりました!頑張ります"
136
+ assert:
137
+ - type: llm-rubric
138
+ provider: openai:gpt-5-mini
139
+ value: "Should select an emoji related to new beginning, work, or motivation (e.g., 🚀, 💼, 💪, ✨)"
140
+ - type: not-contains
141
+ value: "explanation"
142
+
143
+ - vars:
144
+ content: "桜が咲いて本当に綺麗です"
145
+ assert:
146
+ - type: llm-rubric
147
+ provider: openai:gpt-5-mini
148
+ value: "Should select an emoji related to cherry blossoms, flowers, or beauty (e.g., 🌸, 🌺, 🌼, 🌷)"
149
+ - type: not-contains
150
+ value: "explanation"
151
+
152
+ # German language tests
153
+ - vars:
154
+ content: "Ich habe gerade ein neues Auto gekauft!"
155
+ assert:
156
+ - type: llm-rubric
157
+ provider: openai:gpt-5-mini
158
+ value: "Should select an emoji related to cars or excitement (e.g., 🚗, 🎉, 🚙)"
159
+ - type: not-contains
160
+ value: "explanation"
161
+
162
+ # Russian language tests
163
+ - vars:
164
+ content: "Я люблю читать книги по вечерам"
165
+ assert:
166
+ - type: llm-rubric
167
+ provider: openai:gpt-5-mini
168
+ value: "Should select an emoji related to reading or books (e.g., 📚, 📖, 📕, 🤓)"
169
+ - type: not-contains
170
+ value: "explanation"
@@ -0,0 +1,16 @@
1
+ // TypeScript prompt wrapper that uses actual chain implementation
2
+ import { chainPickEmoji } from '@lobechat/prompts';
3
+
4
+ interface PromptVars {
5
+ content: string;
6
+ }
7
+
8
+ export default function generatePrompt({ vars }: { vars: PromptVars }) {
9
+ const { content } = vars;
10
+
11
+ // Use the actual chain function from src
12
+ const result = chainPickEmoji(content);
13
+
14
+ // Return messages array as expected by promptfoo
15
+ return result.messages || [];
16
+ }
@@ -0,0 +1,89 @@
1
+ description: Test knowledge base Q&A with context retrieval and answer generation
2
+
3
+ providers:
4
+ - openai:chat:gpt-5-mini
5
+ - openai:chat:claude-3-5-haiku-latest
6
+ - openai:chat:gemini-flash-latest
7
+ - openai:chat:deepseek-chat
8
+
9
+ prompts:
10
+ - file://promptfoo/knowledge-qa/prompt.ts
11
+
12
+ tests:
13
+ - vars:
14
+ context: "React is a JavaScript library for building user interfaces. It was developed by Facebook and is now maintained by Facebook and the community. React uses a virtual DOM to efficiently update and render components. Key features include component-based architecture, JSX syntax, and state management through hooks."
15
+ query: "What is React and who developed it?"
16
+ assert:
17
+ - type: llm-rubric
18
+ provider: openai:gpt-5-mini
19
+ value: "The response should accurately explain what React is and mention Facebook as the developer"
20
+ - type: contains-any
21
+ value: ["React", "JavaScript", "library", "Facebook"]
22
+ - type: not-contains
23
+ value: "I don't know"
24
+
25
+ - vars:
26
+ context: "TypeScript is a strongly typed programming language that builds on JavaScript by adding static type definitions. It was developed by Microsoft. TypeScript code compiles to clean, simple JavaScript code which runs on any browser, Node.js environment, or any JavaScript engine that supports ECMAScript 3 or newer."
27
+ query: "How does TypeScript relate to JavaScript?"
28
+ assert:
29
+ - type: llm-rubric
30
+ provider: openai:gpt-5-mini
31
+ value: "The response should explain the relationship between TypeScript and JavaScript, mentioning type definitions"
32
+ - type: contains-any
33
+ value: ["TypeScript", "JavaScript", "type", "compiles", "strongly typed"]
34
+ - type: not-contains
35
+ value: "不知道"
36
+
37
+ - vars:
38
+ context: "Node.js是一个基于Chrome V8引擎的JavaScript运行时环境。它使用了事件驱动、非阻塞I/O模型,使其轻量而高效。Node.js的包管理器npm是世界上最大的开源库生态系统。"
39
+ query: "Node.js有什么特点?"
40
+ assert:
41
+ - type: llm-rubric
42
+ provider: openai:gpt-5-mini
43
+ value: "The response should describe Node.js features in Chinese, mentioning event-driven and non-blocking I/O"
44
+ - type: contains-any
45
+ value: ["Node.js", "事件驱动", "非阻塞", "JavaScript", "运行时"]
46
+ - type: not-contains
47
+ value: "I don't know"
48
+
49
+ - vars:
50
+ context: "Docker is a containerization platform that allows developers to package applications and their dependencies into lightweight, portable containers. Containers share the OS kernel but run in isolated user spaces. This makes applications more consistent across different environments."
51
+ query: "How can I deploy my app with Docker?"
52
+ assert:
53
+ - type: llm-rubric
54
+ provider: openai:gpt-5-mini
55
+ value: "The response should provide helpful information about deploying with Docker based on the containerization context. It should mention containers, packaging, or provide deployment-related guidance."
56
+ - type: contains-any
57
+ value: ["Docker", "container", "package", "deploy"]
58
+
59
+ - vars:
60
+ context: "GraphQL is a query language for APIs and a runtime for fulfilling those queries with existing data. Unlike REST APIs that require multiple requests to different endpoints, GraphQL allows clients to request exactly the data they need in a single request."
61
+ query: "What are the benefits of using GraphQL over REST?"
62
+ assert:
63
+ - type: llm-rubric
64
+ provider: openai:gpt-5-mini
65
+ value: "The response should compare GraphQL and REST, highlighting GraphQL's advantages such as single request capability or requesting specific data"
66
+ - type: contains-any
67
+ value: ["GraphQL", "REST"]
68
+
69
+ - vars:
70
+ context: "Machine learning algorithms can be categorized into supervised, unsupervised, and reinforcement learning. Supervised learning uses labeled data to train models, unsupervised learning finds patterns in unlabeled data, and reinforcement learning learns through trial and error with rewards."
71
+ query: "Can you explain blockchain technology?"
72
+ assert:
73
+ - type: llm-rubric
74
+ provider: openai:gpt-5-mini
75
+ value: "The response should indicate that the provided context is about machine learning, not blockchain, and cannot answer the blockchain question based on the context"
76
+ - type: contains-any
77
+ value: ["machine learning", "cannot", "不能", "no information", "context", "does not contain"]
78
+
79
+ - vars:
80
+ context: ""
81
+ query: "How do I set up a web server?"
82
+ assert:
83
+ - type: llm-rubric
84
+ provider: openai:gpt-5-mini
85
+ value: "The response should provide helpful information about setting up a web server using general knowledge"
86
+ - type: contains-any
87
+ value: ["server", "web", "setup", "install", "configure"]
88
+ - type: not-contains
89
+ value: "cannot answer"
@@ -0,0 +1,26 @@
1
+ // TypeScript prompt wrapper that uses actual chain implementation
2
+ import { chainAnswerWithContext } from '@lobechat/prompts';
3
+
4
+ interface PromptVars {
5
+ context: string | string[];
6
+ knowledge?: string | string[];
7
+ query: string;
8
+ }
9
+
10
+ export default function generatePrompt({ vars }: { vars: PromptVars }) {
11
+ const { context, query, knowledge = ['general knowledge'] } = vars;
12
+
13
+ // Ensure context and knowledge are arrays
14
+ const contextArray = Array.isArray(context) ? context : [context];
15
+ const knowledgeArray = Array.isArray(knowledge) ? knowledge : [knowledge];
16
+
17
+ // Use the actual chain function from src
18
+ const result = chainAnswerWithContext({
19
+ context: contextArray,
20
+ knowledge: knowledgeArray,
21
+ question: query,
22
+ });
23
+
24
+ // Return messages array as expected by promptfoo
25
+ return result.messages || [];
26
+ }
@@ -0,0 +1,65 @@
1
+ description: Test language detection accuracy for various text inputs
2
+
3
+ providers:
4
+ - openai:chat:gpt-5-mini
5
+ - openai:chat:claude-3-5-haiku-latest
6
+ - openai:chat:gemini-flash-latest
7
+ - openai:chat:deepseek-chat
8
+
9
+ prompts:
10
+ - file://promptfoo/language-detection/prompt.ts
11
+
12
+ tests:
13
+ - vars:
14
+ content: "Hello, how are you today? I hope you're having a great day!"
15
+ assert:
16
+ - type: llm-rubric
17
+ provider: openai:gpt-5-mini
18
+ value: "Should output a valid locale code for English (e.g., en-US, en-GB, en)"
19
+ - type: contains-any
20
+ value: ["en-", "en"]
21
+
22
+ - vars:
23
+ content: "Bonjour, comment allez-vous? J'espère que vous passez une excellente journée!"
24
+ assert:
25
+ - type: llm-rubric
26
+ provider: openai:gpt-5-mini
27
+ value: "Should output a valid locale code for French (e.g., fr-FR, fr-CA, fr)"
28
+ - type: contains-any
29
+ value: ["fr-", "fr"]
30
+
31
+ - vars:
32
+ content: "你好,你今天怎么样?希望你过得愉快!"
33
+ assert:
34
+ - type: llm-rubric
35
+ provider: openai:gpt-5-mini
36
+ value: "Should output a valid locale code for Chinese (e.g., zh-CN, zh-TW, zh)"
37
+ - type: contains-any
38
+ value: ["zh-", "zh"]
39
+
40
+ - vars:
41
+ content: "Hola, ¿cómo estás hoy? ¡Espero que tengas un gran día!"
42
+ assert:
43
+ - type: llm-rubric
44
+ provider: openai:gpt-5-mini
45
+ value: "Should output a valid locale code for Spanish (e.g., es-ES, es-MX, es)"
46
+ - type: contains-any
47
+ value: ["es-", "es"]
48
+
49
+ - vars:
50
+ content: "Привет, как дела сегодня? Надеюсь, у тебя отличный день!"
51
+ assert:
52
+ - type: llm-rubric
53
+ provider: openai:gpt-5-mini
54
+ value: "Should output a valid locale code for Russian (e.g., ru-RU, ru)"
55
+ - type: contains-any
56
+ value: ["ru-", "ru"]
57
+
58
+ - vars:
59
+ content: "こんにちは、今日はいかがですか?素晴らしい一日をお過ごしください!"
60
+ assert:
61
+ - type: llm-rubric
62
+ provider: openai:gpt-5-mini
63
+ value: "Should output a valid locale code for Japanese (e.g., ja-JP, ja)"
64
+ - type: contains-any
65
+ value: ["ja-", "ja"]
@@ -0,0 +1,16 @@
1
+ // TypeScript prompt wrapper that uses actual chain implementation
2
+ import { chainLangDetect } from '@lobechat/prompts';
3
+
4
+ interface PromptVars {
5
+ content: string;
6
+ }
7
+
8
+ export default function generatePrompt({ vars }: { vars: PromptVars }) {
9
+ const { content } = vars;
10
+
11
+ // Use the actual chain function from src
12
+ const result = chainLangDetect(content);
13
+
14
+ // Return messages array as expected by promptfoo
15
+ return result.messages || [];
16
+ }
@@ -0,0 +1,85 @@
1
+ description: Test summary title generation for different conversation types
2
+
3
+ providers:
4
+ - openai:chat:gpt-5-mini
5
+ - openai:chat:claude-3-5-haiku-latest
6
+ - openai:chat:gemini-flash-latest
7
+ - openai:chat:deepseek-chat
8
+
9
+ prompts:
10
+ - file://promptfoo/summary-title/prompt.ts
11
+
12
+ tests:
13
+ - vars:
14
+ messages:
15
+ - role: "user"
16
+ content: "How do I install Node.js on my computer?"
17
+ - role: "assistant"
18
+ content: "To install Node.js, you can download it from the official website nodejs.org and follow the installation instructions for your operating system."
19
+ - role: "user"
20
+ content: "What about using a version manager?"
21
+ - role: "assistant"
22
+ content: "Yes! I recommend using nvm (Node Version Manager) which allows you to install and switch between different Node.js versions easily."
23
+ locale: "en-US"
24
+ assert:
25
+ - type: llm-rubric
26
+ provider: openai:gpt-5-mini
27
+ value: "The response should be a concise title (10 words or less) that summarizes the conversation about Node.js installation"
28
+ - type: regex
29
+ value: "^.{1,50}$" # Title should be between 1-50 characters
30
+ - type: not-contains
31
+ value: "标点符号" # Should not contain punctuation as requested in Chinese
32
+
33
+ - vars:
34
+ messages:
35
+ - role: "user"
36
+ content: "我想学习做蛋炒饭"
37
+ - role: "assistant"
38
+ content: "蛋炒饭是很经典的家常菜!你需要准备鸡蛋、米饭、葱花、盐和生抽等基本材料。"
39
+ - role: "user"
40
+ content: "具体步骤是什么?"
41
+ - role: "assistant"
42
+ content: "首先打散鸡蛋炒熟盛起,然后下米饭炒散,最后加入鸡蛋和调料炒匀即可。"
43
+ locale: "zh-CN"
44
+ assert:
45
+ - type: llm-rubric
46
+ provider: openai:gpt-5-mini
47
+ value: "The response should be a Chinese title summarizing the conversation about fried rice cooking"
48
+ - type: regex
49
+ value: "^.{1,30}$" # Chinese titles can be shorter
50
+ - type: contains-any
51
+ value: ["蛋炒饭", "做饭", "烹饪", "料理"]
52
+
53
+ - vars:
54
+ messages:
55
+ - role: "user"
56
+ content: "Can you help me debug this Python error?"
57
+ - role: "assistant"
58
+ content: "Of course! Please share the error message and the relevant code."
59
+ - role: "user"
60
+ content: "I'm getting 'AttributeError: 'NoneType' object has no attribute 'split''"
61
+ - role: "assistant"
62
+ content: "This error occurs when you're trying to call .split() on a None value. The variable is likely None instead of a string."
63
+ locale: "en-US"
64
+ assert:
65
+ - type: llm-rubric
66
+ provider: openai:gpt-5-mini
67
+
68
+ value: "The response should be a title about Python debugging or error resolution"
69
+ - type: contains-any
70
+ value: ["Python", "debug", "error", "AttributeError", "code"]
71
+
72
+ - vars:
73
+ messages:
74
+ - role: "user"
75
+ content: "¿Cómo está el tiempo hoy?"
76
+ - role: "assistant"
77
+ content: "No tengo acceso a información meteorológica en tiempo real, pero puedes consultar el clima en tu área usando aplicaciones como Weather.com o tu app del tiempo local."
78
+ locale: "es-ES"
79
+ assert:
80
+ - type: llm-rubric
81
+ provider: openai:gpt-5-mini
82
+
83
+ value: "The response should be a Spanish title about weather inquiry"
84
+ - type: regex
85
+ value: "^.{1,50}$"
@@ -0,0 +1,18 @@
1
+ // TypeScript prompt wrapper that uses actual chain implementation
2
+ import { chainSummaryTitle } from '@lobechat/prompts';
3
+ import type { OpenAIChatMessage } from '@lobechat/types';
4
+
5
+ interface PromptVars {
6
+ locale: string;
7
+ messages: OpenAIChatMessage[];
8
+ }
9
+
10
+ export default function generatePrompt({ vars }: { vars: PromptVars }) {
11
+ const { messages, locale } = vars;
12
+
13
+ // Use the actual chain function from src
14
+ const result = chainSummaryTitle(messages, locale);
15
+
16
+ // Return messages array as expected by promptfoo
17
+ return result.messages || [];
18
+ }
@@ -0,0 +1,79 @@
1
+ description: Test translation accuracy between different languages
2
+
3
+ providers:
4
+ - openai:chat:gpt-5-mini
5
+ - openai:chat:claude-3-5-haiku-latest
6
+ - openai:chat:gemini-flash-latest
7
+ - openai:chat:deepseek-chat
8
+
9
+ prompts:
10
+ - file://promptfoo/translate/prompt.ts
11
+
12
+ tests:
13
+ - vars:
14
+ content: 'Hello, how are you?'
15
+ from: 'en-US'
16
+ to: 'zh-CN'
17
+ assert:
18
+ - type: contains-any
19
+ value: ['你好', '您好']
20
+ - type: not-contains
21
+ value: 'Hello'
22
+
23
+ - vars:
24
+ content: '你好,你怎么样?'
25
+ from: 'zh-CN'
26
+ to: 'en-US'
27
+ assert:
28
+ - type: contains-any
29
+ value: ['Hello', 'Hi', 'how are you', 'How are you']
30
+ - type: not-contains
31
+ value: '你好'
32
+
33
+ - vars:
34
+ content: 'Je suis content de vous rencontrer'
35
+ from: 'fr-FR'
36
+ to: 'en-US'
37
+ assert:
38
+ - type: contains-any
39
+ value: ['pleased', 'happy', 'glad', 'meet', 'meeting']
40
+ - type: not-contains
41
+ value: 'Je suis'
42
+
43
+ - vars:
44
+ content: 'The weather is beautiful today'
45
+ from: 'en-US'
46
+ to: 'es-ES'
47
+ assert:
48
+ - type: contains-any
49
+ value: ['tiempo', 'clima', 'hermoso', 'bonito', 'hoy', 'día', 'precioso']
50
+ - type: not-contains
51
+ value: 'weather'
52
+
53
+ - vars:
54
+ content: 'I love programming with TypeScript'
55
+ from: 'en-US'
56
+ to: 'ja-JP'
57
+ assert:
58
+ - type: contains
59
+ value: 'TypeScript' # Technical terms often remain unchanged
60
+ - type: not-contains
61
+ value: 'I love'
62
+
63
+ - vars:
64
+ content: 'Machine learning is revolutionizing technology'
65
+ from: 'en-US'
66
+ to: 'de-DE'
67
+ assert:
68
+ - type: contains-any
69
+ value: ['Technologie', 'revolutioniert', 'maschinelles', 'Lernen']
70
+ - type: not-contains
71
+ value: 'Machine learning'
72
+
73
+ - vars:
74
+ content: 'API_KEY_12345'
75
+ from: 'en-US'
76
+ to: 'zh-CN'
77
+ assert:
78
+ - type: contains
79
+ value: 'API_KEY_12345'
@@ -0,0 +1,18 @@
1
+ // TypeScript prompt wrapper that uses actual chain implementation
2
+ import { chainTranslate } from '@lobechat/prompts';
3
+
4
+ interface PromptVars {
5
+ content: string;
6
+ from: string;
7
+ to: string;
8
+ }
9
+
10
+ export default function generatePrompt({ vars }: { vars: PromptVars }) {
11
+ const { content, to } = vars;
12
+
13
+ // Use the actual chain function from src
14
+ const result = chainTranslate(content, to);
15
+
16
+ // Return messages array as expected by promptfoo
17
+ return result.messages || [];
18
+ }
@@ -0,0 +1,35 @@
1
+ description: LobeHub Prompts Testing Suite
2
+
3
+ # Test configurations - run all prompt tests
4
+ testPaths:
5
+ - promptfoo/translate/eval.yaml
6
+ - promptfoo/summary-title/eval.yaml
7
+ - promptfoo/language-detection/eval.yaml
8
+ - promptfoo/emoji-picker/eval.yaml
9
+ - promptfoo/knowledge-qa/eval.yaml
10
+
11
+ # Output configuration
12
+ outputPath: promptfoo-results.json
13
+
14
+ # Default test settings
15
+ defaultTest:
16
+ assert:
17
+ - type: llm-rubric
18
+ provider: openai:gpt-5-mini
19
+ value: "The response should be relevant and well-formatted"
20
+ - type: cost
21
+ threshold: 0.01 # Maximum cost per test in USD
22
+
23
+ # Environment variables for API keys
24
+ env:
25
+ OPENAI_API_KEY: ${OPENAI_API_KEY}
26
+ ANTHROPIC_API_KEY: ${ANTHROPIC_API_KEY}
27
+
28
+ # Evaluation settings
29
+ evaluateOptions:
30
+ maxConcurrency: 5
31
+ delay: 100
32
+
33
+ # TypeScript support
34
+ transforms:
35
+ - "typescript"