ctod 0.8.2 → 0.8.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,101 @@
1
+ import fs from 'fs'
2
+ import { Anthropic } from '@anthropic-ai/sdk'
3
+ import { CtoD, AnthropicCtodService, plugins } from '../lib/index'
4
+
5
+ /**
6
+ * @test npx esno ./examples/anthropic.ts
7
+ * 必須手動安裝 '@anthropic-ai/sdk' 套件
8
+ */
9
+
10
+ const apiKey = fs.readFileSync('./.anthropic-api-key', 'utf-8').trim()
11
+
12
+ const ctod = new CtoD({
13
+ plugins: () => {
14
+ return {
15
+ retry: plugins.RetryPlugin.use({
16
+ retry: 3,
17
+ printWarn: true
18
+ })
19
+ }
20
+ },
21
+ request: AnthropicCtodService.createChatRequestWithJsonSchema({
22
+ config: {
23
+ model: 'claude-3-5-haiku-latest'
24
+ },
25
+ anthropicSdk: new Anthropic({
26
+ apiKey
27
+ })
28
+ })
29
+ })
30
+
31
+ const brokerBuilder = ctod.createBrokerBuilder<{
32
+ indexes: string[]
33
+ question: string
34
+ }>({
35
+ install: ({ attach }) => {
36
+ attach('start', async({ setPreMessages }) => {
37
+ setPreMessages([
38
+ {
39
+ role: 'system',
40
+ content: '你現在是一位擅長分類索引的藥師'
41
+ }
42
+ ])
43
+ })
44
+ }
45
+ })
46
+
47
+ const broker = brokerBuilder.create(async({ yup, data, setMessages }) => {
48
+ const { indexes, question } = data
49
+ setMessages([
50
+ {
51
+ role: 'user',
52
+ content: [
53
+ '我有以下索引',
54
+ `${JSON.stringify(indexes)}`,
55
+ `請幫我解析"${question}"可能是哪個索引`,
56
+ '且相關性由高到低排序並給予分數,分數由 0 ~ 1'
57
+ ]
58
+ }
59
+ ])
60
+ const item = yup.object({
61
+ name: yup.string().required().meta({
62
+ jsonSchema: {
63
+ description: '索引名稱'
64
+ }
65
+ }),
66
+ score: yup.number().required().meta({
67
+ jsonSchema: {
68
+ description: '評比分數'
69
+ }
70
+ })
71
+ }).required()
72
+ return {
73
+ indexes: yup.array(item).required().meta({
74
+ jsonSchema: {
75
+ description: '由高到低排序的索引'
76
+ }
77
+ })
78
+ }
79
+ })
80
+
81
+ broker.request({
82
+ indexes: ['胃痛', '腰痛', '頭痛', '喉嚨痛', '四肢疼痛'],
83
+ question: '喝咖啡,吃甜食,胃食道逆流'
84
+ }).then(e => {
85
+ console.log('輸出結果:', e.indexes)
86
+ /*
87
+ [
88
+ {
89
+ name: '胃痛',
90
+ score: 1
91
+ },
92
+ {
93
+ name: '喉嚨痛',
94
+ score: 0.7
95
+ },
96
+ ...
97
+ ]
98
+ */
99
+ }).catch(error => {
100
+ console.error('Error:', error)
101
+ })
package/lib/index.ts CHANGED
@@ -6,6 +6,7 @@ export { validateToJsonSchema, defineYupSchema } from './utils/validate'
6
6
  export { OpenAICtodService } from './service/openai'
7
7
  export { Llama3CppCtodService } from './service/llama3.cpp'
8
8
  export { GoogleCtodService } from './service/google'
9
+ export { AnthropicCtodService } from './service/anthropic'
9
10
 
10
11
  export { TextParser } from './core/parser'
11
12
  export { ChatBroker } from './broker/chat'
@@ -0,0 +1,146 @@
1
+ import { AnthropicCtodService } from './index'
2
+ import { PromiseResponseType } from '../../types'
3
+
4
+ export type Message = {
5
+ role: string
6
+ content: string
7
+ }
8
+
9
+ export type Config = {
10
+ /**
11
+ * @zh 選擇運行的模型。
12
+ * @en What model to use.
13
+ */
14
+ model: string
15
+ maxTokens: number
16
+ }
17
+
18
+ export class AnthropicChat {
19
+ anthropic: AnthropicCtodService
20
+ config: Config = {
21
+ model: 'claude-3-5-haiku-latest',
22
+ maxTokens: 8192
23
+ }
24
+
25
+ constructor(anthropic: AnthropicCtodService) {
26
+ this.anthropic = anthropic
27
+ }
28
+
29
+ /**
30
+ * @zh 改變對話的一些設定
31
+ * @en Change some settings of the conversation
32
+ */
33
+
34
+ setConfig(options: Partial<Config>) {
35
+ Object.assign(this.config, options)
36
+ }
37
+
38
+ /**
39
+ * 移除 system 訊息
40
+ */
41
+
42
+ private translateMessages(messages: any[]) {
43
+ return {
44
+ system: messages.find(e => e.role === 'system')?.content[0].text,
45
+ messages: messages.filter(e => e.role !== 'system')
46
+ }
47
+ }
48
+
49
+ /**
50
+ * @zh 進行對話,並且以結構化的方式輸出
51
+ * @en Talk to the AI and output in a structured way
52
+ */
53
+
54
+ async chatAndStructure(messages: Message[], jsonSchema: any) {
55
+ const anthropic = this.anthropic.anthropicSdk
56
+ const translateMessages = this.translateMessages(messages)
57
+ const msg = await anthropic.messages.create({
58
+ model: this.config.model,
59
+ max_tokens: this.config.maxTokens,
60
+ system: translateMessages.system,
61
+ messages: translateMessages.messages,
62
+ tools: [
63
+ {
64
+ name: 'data',
65
+ description: 'Response Data',
66
+ input_schema: jsonSchema
67
+ }
68
+ ],
69
+ tool_choice: {
70
+ type: 'tool',
71
+ name: 'data'
72
+ }
73
+ })
74
+ let toolUseContent: any = msg.content.find(e => e.type === 'tool_use')
75
+ let response = toolUseContent?.input || null
76
+ if (response == null) {
77
+ return 'null'
78
+ }
79
+ return JSON.stringify(response)
80
+ }
81
+
82
+ /**
83
+ * @zh 進行對話
84
+ * @en Talk to the AI
85
+ */
86
+
87
+ async talk(messages: Message[] = []) {
88
+ const anthropic = this.anthropic.anthropicSdk
89
+ const newMessages = this.translateMessages(messages)
90
+ const msg = await anthropic.messages.create({
91
+ model: this.config.model,
92
+ max_tokens: this.config.maxTokens,
93
+ system: newMessages.system,
94
+ messages: newMessages.messages
95
+ })
96
+ let output = ''
97
+ let textContent: any = msg.content.find(e => e.type === 'text')
98
+ if (textContent) {
99
+ output = textContent.text
100
+ }
101
+ return output
102
+ }
103
+
104
+ /**
105
+ * @zh 進行對話,並且以串流的方式輸出
106
+ * @en Talk to the AI and output in a streaming way
107
+ */
108
+
109
+ talkStream(params: {
110
+ messages: Message[]
111
+ onMessage: (_message: string) => void
112
+ onEnd: () => void
113
+ onWarn: (_warn: any) => void
114
+ onError: (_error: any) => void
115
+ }) {
116
+ const anthropic = this.anthropic.anthropicSdk
117
+ const { onMessage, onEnd, onError } = params
118
+ const { messages, system } = this.translateMessages(params.messages)
119
+ const performStreamedChat = async () => {
120
+ try {
121
+ const stream = await anthropic.messages.create({
122
+ model: this.config.model,
123
+ max_tokens: this.config.maxTokens,
124
+ system: system,
125
+ stream: true,
126
+ messages
127
+ })
128
+ for await (const messageStream of stream) {
129
+ if (messageStream.type === 'content_block_delta') {
130
+ const deltaText = 'text' in messageStream.delta ? messageStream.delta.text : ''
131
+ onMessage(deltaText)
132
+ }
133
+ }
134
+ onEnd()
135
+ } catch (error) {
136
+ onError(error)
137
+ }
138
+ }
139
+ performStreamedChat()
140
+ return {
141
+ cancel: () => null
142
+ }
143
+ }
144
+ }
145
+
146
+ export type AnthropicChatTalkResponse = PromiseResponseType<AnthropicChat['talk']>
@@ -0,0 +1,29 @@
1
+ import { validateToJsonSchema } from '../../utils/validate'
2
+ import { Config, AnthropicChat } from './chat'
3
+ import type { Anthropic } from '@anthropic-ai/sdk'
4
+
5
+ export class AnthropicCtodService {
6
+ anthropicSdk: Anthropic
7
+
8
+ constructor(anthropicSdk: Anthropic) {
9
+ this.anthropicSdk = anthropicSdk
10
+ }
11
+
12
+ static createChatRequestWithJsonSchema(params: {
13
+ anthropicSdk: Anthropic
14
+ config?: Partial<Config>
15
+ }) {
16
+ const anthropic = new AnthropicCtodService(params.anthropicSdk)
17
+ const chat = anthropic.createChat()
18
+ chat.setConfig(params.config || {})
19
+ return async (messages: any[], { schema }: any) => {
20
+ const jsonSchema = validateToJsonSchema(schema.output)
21
+ const content = await chat.chatAndStructure(messages, jsonSchema)
22
+ return content
23
+ }
24
+ }
25
+
26
+ createChat() {
27
+ return new AnthropicChat(this)
28
+ }
29
+ }
@@ -1,6 +1,7 @@
1
1
  import { Llama3CppCtodService } from './index'
2
- import { flow } from 'power-helper'
2
+ import { flow, Once } from 'power-helper'
3
3
  import { tify, sify } from 'chinese-conv/dist'
4
+ import { Template } from '@huggingface/jinja'
4
5
 
5
6
  type Message = {
6
7
  role: string
@@ -16,7 +17,7 @@ export type Config = {
16
17
  }
17
18
 
18
19
  type Stream = {
19
- onMessage: (data: { message: string }) => void
20
+ onMessage: (message: string) => void
20
21
  onEnd?: () => void
21
22
  onWarn?: (error: any) => void
22
23
  onError?: (error: any) => void
@@ -52,7 +53,7 @@ class Requester {
52
53
 
53
54
  async stream(params: {
54
55
  path: string
55
- data: any
56
+ data: Record<string, any> | (() => Promise<any>)
56
57
  onMessage: (data: any) => void
57
58
  onEnd: () => void
58
59
  onWarn: (error: any) => void
@@ -99,7 +100,7 @@ class Requester {
99
100
  }
100
101
  fetch(`${this.core.config.baseUrl}/${params.path}`, {
101
102
  method: 'POST',
102
- body: JSON.stringify(params.data),
103
+ body: JSON.stringify(typeof params.data === 'function' ? (await params.data()) : params.data),
103
104
  signal,
104
105
  headers: {
105
106
  'Content-Type': 'application/json',
@@ -150,6 +151,18 @@ class Requester {
150
151
  }
151
152
 
152
153
  export class Llama3CppCompletion {
154
+ private getProp = new Once({
155
+ handler: async() => {
156
+ const url = `${this.config.baseUrl}/props`
157
+ const { data: props } = await this.core._axios.get<{
158
+ chat_template: string
159
+ bos_token: string
160
+ eos_token: string
161
+ }>(url, {})
162
+ return props
163
+ }
164
+ })
165
+
153
166
  core: Llama3CppCtodService
154
167
  config: Config = {
155
168
  baseUrl: '',
@@ -172,18 +185,6 @@ export class Llama3CppCompletion {
172
185
  options?: Options
173
186
  messages: Message[]
174
187
  }) {
175
- const prompts: string[] = []
176
- for (let { role, content } of params.messages) {
177
- if (role === 'system') {
178
- prompts.push(`<|start_header_id|>system<|end_header_id|>\n\n${content}\n\n`)
179
- }
180
- if (role === 'user') {
181
- prompts.push(`<|start_header_id|>user<|end_header_id|>\n\n${content?.replaceAll('\n', '\\n') ?? ''}`)
182
- }
183
- if (role === 'assistant') {
184
- prompts.push('<|start_header_id|>assistant<|end_header_id|>\n\n' + content)
185
- }
186
- }
187
188
  const lastMessage = params.messages.at(-1) || ''
188
189
  const requester = new Requester(this)
189
190
  return {
@@ -192,11 +193,17 @@ export class Llama3CppCompletion {
192
193
  message: string
193
194
  fullMessage: string
194
195
  }> => {
196
+ const props = await this.getProp.run()
197
+ const template = new Template(props.chat_template)
198
+ const prompt = template.render({
199
+ bos_token: props.bos_token,
200
+ messages: params.messages
201
+ }).slice(0, props.eos_token.length * -1 - 1)
195
202
  const result = await requester.fetch({
196
203
  path: 'completion',
197
204
  data: {
198
205
  ...(params.options || {}),
199
- prompt: this.config.autoConvertTraditionalChinese ? sify(prompts.join('\n')) : prompts.join('\n')
206
+ prompt: this.config.autoConvertTraditionalChinese ? sify(prompt) : prompt
200
207
  }
201
208
  })
202
209
  const message = this.config.autoConvertTraditionalChinese ? tify(result.data.content) : result.data.content
@@ -212,33 +219,28 @@ export class Llama3CppCompletion {
212
219
  messages: Message[]
213
220
  options?: Options
214
221
  }) {
215
- const prompts: string[] = []
216
- for (let { role, content } of params.messages) {
217
- if (role === 'system') {
218
- prompts.push(`<|start_header_id|>system<|end_header_id|>\n\n${content}\n\n`)
219
- }
220
- if (role === 'user') {
221
- prompts.push(`<|start_header_id|>user<|end_header_id|>\n\n${content?.replaceAll('\n', '\\n') ?? ''}`)
222
- }
223
- if (role === 'assistant') {
224
- prompts.push('<|start_header_id|>assistant<|end_header_id|>\n\n' + content)
225
- }
226
- }
227
222
  const requester = new Requester(this)
228
223
  requester.stream({
229
224
  path: 'completion',
230
225
  onEnd: params.onEnd || (() => null),
231
226
  onMessage: e => {
232
- params.onMessage({
233
- message: this.config.autoConvertTraditionalChinese ? tify(e.content) : e.content
234
- })
227
+ const message = this.config.autoConvertTraditionalChinese ? tify(e.content) : e.content
228
+ params.onMessage(message)
235
229
  },
236
230
  onWarn: params.onWarn || (() => null),
237
231
  onError: params.onError || (() => null),
238
- data: {
239
- ...(params.options || {}),
240
- prompt: this.config.autoConvertTraditionalChinese ? sify(prompts.join('\n')) : prompts.join('\n'),
241
- stream: true
232
+ data: async() => {
233
+ const props = await this.getProp.run()
234
+ const template = new Template(props.chat_template)
235
+ const prompt = template.render({
236
+ bos_token: props.bos_token,
237
+ messages: params.messages
238
+ }).slice(0, props.eos_token.length * -1 - 1)
239
+ return {
240
+ ...(params.options || {}),
241
+ prompt: this.config.autoConvertTraditionalChinese ? sify(prompt) : prompt,
242
+ stream: true
243
+ }
242
244
  }
243
245
  })
244
246
  return requester.export()
@@ -290,9 +292,8 @@ export class Llama3CppCompletion {
290
292
  onMessage: e => {
291
293
  let content = e.choices[0].delta.content
292
294
  if (content) {
293
- params.onMessage({
294
- message: this.config.autoConvertTraditionalChinese ? tify(content) : content
295
- })
295
+ const message = this.config.autoConvertTraditionalChinese ? tify(content) : content
296
+ params.onMessage(message)
296
297
  }
297
298
  },
298
299
  onWarn: params.onWarn || (() => null),
@@ -35,8 +35,8 @@ export type Config = {
35
35
  */
36
36
  n: number
37
37
  /**
38
- * @zh 選擇運行的模型,建議: 'gpt-4' | 'gpt-3.5-turbo' | 'gpt-4-turbo' | 'gpt-4o' | 'gpt-4o-mini' | 'o1-preview' | 'o1' | 'o1-mini'
39
- * @en What model to use, recommended: 'gpt-4' | 'gpt-3.5-turbo' | 'gpt-4-turbo' | 'gpt-4o' | 'gpt-4o-mini' | 'o1-preview' | 'o1' | 'o1-mini'
38
+ * @zh 選擇運行的模型'
39
+ * @en The model to use for this chat completion.
40
40
  */
41
41
  model: string
42
42
  /**
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "ctod",
3
- "version": "0.8.2",
3
+ "version": "0.8.4",
4
4
  "description": "CtoD Is Chat To Data Utils.",
5
5
  "main": "./dist/index.js",
6
6
  "type": "module",
@@ -26,9 +26,11 @@
26
26
  },
27
27
  "homepage": "https://github.com/KHC-ZhiHao/ctod#readme",
28
28
  "devDependencies": {
29
+ "@anthropic-ai/sdk": "^0.39.0",
29
30
  "@babel/core": "^7.4.5",
30
31
  "@babel/preset-env": "^7.4.5",
31
32
  "@google/generative-ai": "^0.21.0",
33
+ "@stylistic/eslint-plugin": "^2.12.1",
32
34
  "@types/chai": "^4.2.22",
33
35
  "@types/inquirer": "8.2.5",
34
36
  "@types/jsdom": "^16.2.13",
@@ -50,16 +52,16 @@
50
52
  "nyc": "^15.1.0",
51
53
  "ts-loader": "^9.3.1",
52
54
  "typescript": "^4.7.4",
53
- "typescript-eslint": "^8.18.1",
54
- "@stylistic/eslint-plugin": "^2.12.1"
55
+ "typescript-eslint": "^8.18.1"
55
56
  },
56
57
  "dependencies": {
58
+ "@huggingface/jinja": "^0.3.3",
57
59
  "@sodaru/yup-to-json-schema": "^2.0.1",
58
60
  "axios": "^1.4.0",
59
61
  "chinese-conv": "^3.2.2",
60
62
  "handlebars": "^4.7.7",
61
63
  "json5": "^2.2.3",
62
- "power-helper": "^0.8.0",
64
+ "power-helper": "^0.8.1",
63
65
  "yup": "^1.4.0"
64
66
  }
65
67
  }
@@ -0,0 +1 @@
1
+ export {};
@@ -5,6 +5,7 @@ export { validateToJsonSchema, defineYupSchema } from './utils/validate';
5
5
  export { OpenAICtodService } from './service/openai';
6
6
  export { Llama3CppCtodService } from './service/llama3.cpp';
7
7
  export { GoogleCtodService } from './service/google';
8
+ export { AnthropicCtodService } from './service/anthropic';
8
9
  export { TextParser } from './core/parser';
9
10
  export { ChatBroker } from './broker/chat';
10
11
  export { ChatBrokerPlugin } from './core/plugin';
@@ -0,0 +1,52 @@
1
+ import { AnthropicCtodService } from './index';
2
+ import { PromiseResponseType } from '../../types';
3
+ export type Message = {
4
+ role: string;
5
+ content: string;
6
+ };
7
+ export type Config = {
8
+ /**
9
+ * @zh 選擇運行的模型。
10
+ * @en What model to use.
11
+ */
12
+ model: string;
13
+ maxTokens: number;
14
+ };
15
+ export declare class AnthropicChat {
16
+ anthropic: AnthropicCtodService;
17
+ config: Config;
18
+ constructor(anthropic: AnthropicCtodService);
19
+ /**
20
+ * @zh 改變對話的一些設定
21
+ * @en Change some settings of the conversation
22
+ */
23
+ setConfig(options: Partial<Config>): void;
24
+ /**
25
+ * 移除 system 訊息
26
+ */
27
+ private translateMessages;
28
+ /**
29
+ * @zh 進行對話,並且以結構化的方式輸出
30
+ * @en Talk to the AI and output in a structured way
31
+ */
32
+ chatAndStructure(messages: Message[], jsonSchema: any): Promise<string>;
33
+ /**
34
+ * @zh 進行對話
35
+ * @en Talk to the AI
36
+ */
37
+ talk(messages?: Message[]): Promise<string>;
38
+ /**
39
+ * @zh 進行對話,並且以串流的方式輸出
40
+ * @en Talk to the AI and output in a streaming way
41
+ */
42
+ talkStream(params: {
43
+ messages: Message[];
44
+ onMessage: (_message: string) => void;
45
+ onEnd: () => void;
46
+ onWarn: (_warn: any) => void;
47
+ onError: (_error: any) => void;
48
+ }): {
49
+ cancel: () => null;
50
+ };
51
+ }
52
+ export type AnthropicChatTalkResponse = PromiseResponseType<AnthropicChat['talk']>;
@@ -0,0 +1,11 @@
1
+ import { Config, AnthropicChat } from './chat';
2
+ import type { Anthropic } from '@anthropic-ai/sdk';
3
+ export declare class AnthropicCtodService {
4
+ anthropicSdk: Anthropic;
5
+ constructor(anthropicSdk: Anthropic);
6
+ static createChatRequestWithJsonSchema(params: {
7
+ anthropicSdk: Anthropic;
8
+ config?: Partial<Config>;
9
+ }): (messages: any[], { schema }: any) => Promise<string>;
10
+ createChat(): AnthropicChat;
11
+ }
@@ -10,14 +10,13 @@ export type Config = {
10
10
  autoConvertTraditionalChinese: boolean;
11
11
  };
12
12
  type Stream = {
13
- onMessage: (data: {
14
- message: string;
15
- }) => void;
13
+ onMessage: (message: string) => void;
16
14
  onEnd?: () => void;
17
15
  onWarn?: (error: any) => void;
18
16
  onError?: (error: any) => void;
19
17
  };
20
18
  export declare class Llama3CppCompletion {
19
+ private getProp;
21
20
  core: Llama3CppCtodService;
22
21
  config: Config;
23
22
  constructor(core: Llama3CppCtodService);
@@ -31,8 +31,8 @@ export type Config = {
31
31
  */
32
32
  n: number;
33
33
  /**
34
- * @zh 選擇運行的模型,建議: 'gpt-4' | 'gpt-3.5-turbo' | 'gpt-4-turbo' | 'gpt-4o' | 'gpt-4o-mini' | 'o1-preview' | 'o1' | 'o1-mini'
35
- * @en What model to use, recommended: 'gpt-4' | 'gpt-3.5-turbo' | 'gpt-4-turbo' | 'gpt-4o' | 'gpt-4o-mini' | 'o1-preview' | 'o1' | 'o1-mini'
34
+ * @zh 選擇運行的模型'
35
+ * @en The model to use for this chat completion.
36
36
  */
37
37
  model: string;
38
38
  /**