ctod 0.7.0 → 0.7.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (99) hide show
  1. package/.api-key +1 -0
  2. package/.nyc_output/42919e68-b472-4a5d-b2d3-5d5153f28467.json +1 -0
  3. package/.nyc_output/processinfo/42919e68-b472-4a5d-b2d3-5d5153f28467.json +1 -0
  4. package/.nyc_output/processinfo/index.json +1 -0
  5. package/.output/.output/stores//344/270/200/345/240/264/346/234/237/345/276/205/345/267/262/344/271/205/347/232/204/345/222/214/350/247/243/cover-0.png +1 -0
  6. package/.output/.output/stores//344/270/200/345/240/264/346/234/237/345/276/205/345/267/262/344/271/205/347/232/204/345/222/214/350/247/243/cover-1.png +1 -0
  7. package/.output/.output/stores//344/270/200/345/240/264/346/234/237/345/276/205/345/267/262/344/271/205/347/232/204/345/222/214/350/247/243/story-config.json +4 -0
  8. package/.output/.output/stores//344/270/200/345/240/264/346/234/237/345/276/205/345/267/262/344/271/205/347/232/204/345/222/214/350/247/243/story.json +22 -0
  9. package/.output/.output/stores//345/276/236/351/273/221/346/232/227/350/265/260/345/220/221/345/205/211/346/230/216/cover-0.png +1 -0
  10. package/.output/.output/stores//345/276/236/351/273/221/346/232/227/350/265/260/345/220/221/345/205/211/346/230/216/cover-1.png +1 -0
  11. package/.output/.output/stores//345/276/236/351/273/221/346/232/227/350/265/260/345/220/221/345/205/211/346/230/216/story-config.json +4 -0
  12. package/.output/.output/stores//345/276/236/351/273/221/346/232/227/350/265/260/345/220/221/345/205/211/346/230/216/story.json +24 -0
  13. package/.output/.output/stores//347/240/264/347/242/216/347/232/204/345/271/273/350/261/241/cover-0.png +1 -0
  14. package/.output/.output/stores//347/240/264/347/242/216/347/232/204/345/271/273/350/261/241/cover-1.png +1 -0
  15. package/.output/.output/stores//347/240/264/347/242/216/347/232/204/345/271/273/350/261/241/story-config.json +4 -0
  16. package/.output/.output/stores//347/240/264/347/242/216/347/232/204/345/271/273/350/261/241/story.json +24 -0
  17. package/.output/.output/stores//350/227/235/350/241/223/345/256/266/347/232/204/351/235/210/346/204/237/cover-0.png +1 -0
  18. package/.output/.output/stores//350/227/235/350/241/223/345/256/266/347/232/204/351/235/210/346/204/237/cover-1.png +1 -0
  19. package/.output/.output/stores//350/227/235/350/241/223/345/256/266/347/232/204/351/235/210/346/204/237/story-config.json +4 -0
  20. package/.output/.output/stores//350/227/235/350/241/223/345/256/266/347/232/204/351/235/210/346/204/237/story.json +28 -0
  21. package/.output/.output/stores//350/250/230/346/206/266/345/225/206/344/272/272/cover-0.png +1 -0
  22. package/.output/.output/stores//350/250/230/346/206/266/345/225/206/344/272/272/cover-1.png +1 -0
  23. package/.output/.output/stores//350/250/230/346/206/266/345/225/206/344/272/272/story-config.json +4 -0
  24. package/.output/.output/stores//350/250/230/346/206/266/345/225/206/344/272/272/story.json +18 -0
  25. package/.output/.output/stores//350/250/255/350/250/210/350/210/207/351/226/213/347/231/274/347/232/204/346/214/221/346/210/260/content.json +22 -0
  26. package/.output/.output/talks//344/273/245/347/254/221/350/251/261/347/202/272/345/237/272/347/244/216/347/232/204/351/235/242/350/251/246/content.json +30 -0
  27. package/.output/.output/talks//344/273/245/347/254/221/350/251/261/347/202/272/345/237/272/347/244/216/347/232/204/351/235/242/350/251/246/cover-1684055229695.png +0 -0
  28. package/.output/.output/talks//345/234/250/346/224/277/346/262/273/345/200/231/351/201/270/344/272/272/350/276/257/350/253/226/344/270/255/347/232/204/350/252/252/346/234/215/346/200/247/346/272/235/351/200/232/content.json +30 -0
  29. package/.output/.output/talks//345/234/250/346/224/277/346/262/273/345/200/231/351/201/270/344/272/272/350/276/257/350/253/226/344/270/255/347/232/204/350/252/252/346/234/215/346/200/247/346/272/235/351/200/232/cover-1684056611678.png +0 -0
  30. package/.output/.output/talks//346/224/276/351/254/206/347/232/204/345/200/231/351/201/270/344/272/272/351/200/262/350/241/214/346/224/277/346/262/273/350/276/257/350/253/226/content.json +36 -0
  31. package/.output/.output/talks//346/224/276/351/254/206/347/232/204/345/200/231/351/201/270/344/272/272/351/200/262/350/241/214/346/224/277/346/262/273/350/276/257/350/253/226/cover-1684055140609.png +0 -0
  32. package/.output/.output/talks//346/224/277/346/262/273/347/254/221/350/251/261/content.json +30 -0
  33. package/.output/.output/talks//346/224/277/346/262/273/347/254/221/350/251/261/cover-1684056246465.png +0 -0
  34. package/.output/.output/talks//350/251/274/350/253/247/345/256/266/351/225/267/346/225/231/345/270/253/346/234/203/350/255/260/content.json +26 -0
  35. package/.output/.output/talks//350/251/274/350/253/247/345/256/266/351/225/267/346/225/231/345/270/253/346/234/203/350/255/260/cover-1685785935121.png +0 -0
  36. package/.output/.output/talks//350/262/241/345/213/231/351/241/247/345/225/217/350/253/256/350/251/242/content.json +26 -0
  37. package/.output/.output/talks//350/262/241/345/213/231/351/241/247/345/225/217/350/253/256/350/251/242/cover-1685785115833.png +0 -0
  38. package/.output/.output/talks//351/206/253/347/224/237/345/222/214/347/227/205/344/272/272/350/250/216/350/253/226/346/202/262/345/202/267/content.json +32 -0
  39. package/.output/.output/talks//351/206/253/347/224/237/345/222/214/347/227/205/344/272/272/350/250/216/350/253/226/346/202/262/345/202/267/cover-1684055075942.png +0 -0
  40. package/dist/index.js +1 -1
  41. package/package.json +1 -1
  42. package/examples/basic.ts +0 -89
  43. package/examples/llama.cpp.ts +0 -56
  44. package/examples/plugin.ts +0 -118
  45. package/lib/broker/chat.ts +0 -435
  46. package/lib/core/parser.ts +0 -62
  47. package/lib/core/plugin.ts +0 -46
  48. package/lib/core/translator.ts +0 -115
  49. package/lib/ctod.ts +0 -71
  50. package/lib/index.ts +0 -41
  51. package/lib/plugins/index.ts +0 -38
  52. package/lib/plugins/limiter.ts +0 -103
  53. package/lib/plugins/print-log.ts +0 -35
  54. package/lib/plugins/retry.ts +0 -25
  55. package/lib/plugins/role.ts +0 -28
  56. package/lib/service/llama3.cpp/completion.ts +0 -313
  57. package/lib/service/llama3.cpp/index.ts +0 -53
  58. package/lib/service/openai/chat.ts +0 -244
  59. package/lib/service/openai/images-generation.ts +0 -64
  60. package/lib/service/openai/index.ts +0 -97
  61. package/lib/service/openai/vision.ts +0 -111
  62. package/lib/shims.d.ts +0 -4
  63. package/lib/templates.ts +0 -71
  64. package/lib/types.ts +0 -4
  65. package/lib/utils/error.ts +0 -14
  66. package/lib/utils/validate.ts +0 -64
  67. package/logo.ai +2 -1141
  68. package/logo.png +0 -0
  69. package/types/examples/basic.d.ts +0 -1
  70. package/types/examples/chat-demo.d.ts +0 -2
  71. package/types/examples/chat-for-llama.cpp-demo.d.ts +0 -2
  72. package/types/examples/chat-with-json-schema-demo.d.ts +0 -2
  73. package/types/examples/llama.cpp.d.ts +0 -2
  74. package/types/examples/plugin-demo.d.ts +0 -2
  75. package/types/examples/plugin.d.ts +0 -2
  76. package/types/examples/stream-for-llama.cpp-demo.d.ts +0 -2
  77. package/types/examples/vision-demo.d.ts +0 -2
  78. package/types/lib/broker/chat.d.ts +0 -150
  79. package/types/lib/core/parser.d.ts +0 -32
  80. package/types/lib/core/plugin.d.ts +0 -34
  81. package/types/lib/core/translator.d.ts +0 -67
  82. package/types/lib/ctod.d.ts +0 -32
  83. package/types/lib/index.d.ts +0 -34
  84. package/types/lib/plugins/index.d.ts +0 -47
  85. package/types/lib/plugins/limiter.d.ts +0 -36
  86. package/types/lib/plugins/print-log.d.ts +0 -5
  87. package/types/lib/plugins/retry.d.ts +0 -6
  88. package/types/lib/plugins/role.d.ts +0 -5
  89. package/types/lib/service/llama3.cpp/completion.d.ts +0 -61
  90. package/types/lib/service/llama3.cpp/index.d.ts +0 -19
  91. package/types/lib/service/openai/chat.d.ts +0 -110
  92. package/types/lib/service/openai/completion.d.ts +0 -59
  93. package/types/lib/service/openai/images-generation.d.ts +0 -35
  94. package/types/lib/service/openai/index.d.ts +0 -29
  95. package/types/lib/service/openai/vision.d.ts +0 -74
  96. package/types/lib/templates.d.ts +0 -20
  97. package/types/lib/types.d.ts +0 -1
  98. package/types/lib/utils/error.d.ts +0 -11
  99. package/types/lib/utils/validate.d.ts +0 -16
@@ -1,313 +0,0 @@
1
- import { Llama3Cpp } from './index'
2
- import { flow } from 'power-helper'
3
- import { tify, sify } from 'chinese-conv'
4
-
5
- type Message = {
6
- role: string
7
- content: string
8
- }
9
-
10
- type Options = any
11
-
12
- export type Config = {
13
- baseUrl: string
14
- headers: Record<string, string>
15
- autoConvertTraditionalChinese: boolean
16
- }
17
-
18
- type Stream = {
19
- onMessage: (data: { message: string }) => void
20
- onEnd?: () => void
21
- onWarn?: (error: any) => void
22
- onError?: (error: any) => void
23
- }
24
-
25
- class Requester {
26
- private core: Llama3CppCompletion
27
- private streamAbortControllers: {
28
- id: string
29
- controller: AbortController
30
- }[] = []
31
-
32
- constructor(core: Llama3CppCompletion) {
33
- this.core = core
34
- }
35
-
36
- private createAbortController() {
37
- const streamAbortController = new AbortController()
38
- const streamAbortControllerId = flow.createUuid()
39
- this.streamAbortControllers.push({
40
- id: streamAbortControllerId,
41
- controller: streamAbortController
42
- })
43
- return {
44
- signal: streamAbortController.signal,
45
- controllerId: streamAbortControllerId
46
- }
47
- }
48
-
49
- private removeAbortController(streamAbortControllerId: string) {
50
- this.streamAbortControllers = this.streamAbortControllers.filter(e => e.id !== streamAbortControllerId)
51
- }
52
-
53
- async stream(params: {
54
- path: string
55
- data: any
56
- onMessage: (data: any) => void
57
- onEnd: () => void
58
- onWarn: (error: any) => void
59
- onError: (error: any) => void
60
- }) {
61
- const { signal, controllerId } = this.createAbortController()
62
- const end = () => {
63
- this.removeAbortController(controllerId)
64
- params.onEnd()
65
- }
66
- const reader = async(response: Response) => {
67
- if (response.body) {
68
- let reader = response.body.getReader()
69
- let done = false
70
- let chunk = ''
71
- while (!done) {
72
- const { value, done: readerDone } = await reader.read()
73
- if (value) {
74
- chunk += new TextDecoder('utf-8').decode(value)
75
- const payloads = chunk.split('\n\n')
76
- chunk = payloads.pop() || ''
77
- payloads.forEach(payload => {
78
- if (payload.includes('[DONE]')) {
79
- done = true
80
- }
81
- if (payload.startsWith('data:')) {
82
- try {
83
- const data = JSON.parse(payload.replace('data: ', ''))
84
- params.onMessage(data)
85
- } catch (error) {
86
- params.onWarn(error)
87
- }
88
- }
89
- })
90
- }
91
- if (readerDone) {
92
- done = true
93
- }
94
- }
95
- end()
96
- } else {
97
- params.onError(new Error('Body not found.'))
98
- }
99
- }
100
- fetch(`${this.core.config.baseUrl}/${params.path}`, {
101
- method: 'POST',
102
- body: JSON.stringify(params.data),
103
- signal,
104
- headers: {
105
- 'Content-Type': 'application/json',
106
- ...this.core.config.headers
107
- }
108
- })
109
- .then(reader)
110
- .catch(error => {
111
- if (error instanceof Error && error.message.includes('The user aborted a request')) {
112
- end()
113
- } else {
114
- params.onError(error)
115
- }
116
- })
117
- }
118
-
119
- async fetch(params: {
120
- path: string
121
- data: any
122
- }) {
123
- const { signal, controllerId } = this.createAbortController()
124
- try {
125
- const result = await this.core.core._axios.post(`${this.core.config.baseUrl}/${params.path}`, params.data, {
126
- signal,
127
- headers: {
128
- 'Content-Type': 'application/json',
129
- ...this.core.config.headers
130
- }
131
- })
132
- return {
133
- data: result.data
134
- }
135
- } finally {
136
- this.removeAbortController(controllerId)
137
- }
138
- }
139
-
140
- cancel() {
141
- this.streamAbortControllers.forEach(e => e.controller.abort())
142
- this.streamAbortControllers = []
143
- }
144
-
145
- export() {
146
- return {
147
- cancel: this.cancel.bind(this)
148
- }
149
- }
150
- }
151
-
152
- export class Llama3CppCompletion {
153
- core: Llama3Cpp
154
- config: Config = {
155
- baseUrl: '',
156
- headers: {},
157
- autoConvertTraditionalChinese: true
158
- }
159
-
160
- constructor(core: Llama3Cpp) {
161
- this.core = core
162
- }
163
-
164
- setConfig(config: Partial<Config>) {
165
- this.config = {
166
- ...this.config,
167
- ...config
168
- }
169
- }
170
-
171
- completion(params: {
172
- options?: Options
173
- messages: Message[]
174
- }) {
175
- const prompts: string[] = []
176
- for (let { role, content } of params.messages) {
177
- if (role === 'system') {
178
- prompts.push(`<|start_header_id|>system<|end_header_id|>\n\n${content}\n\n`)
179
- }
180
- if (role === 'user') {
181
- prompts.push(`<|start_header_id|>user<|end_header_id|>\n\n${content?.replaceAll('\n', '\\n') ?? ''}`)
182
- }
183
- if (role === 'assistant') {
184
- prompts.push('<|start_header_id|>assistant<|end_header_id|>\n\n' + content)
185
- }
186
- }
187
- const lastMessage = params.messages.at(-1) || ''
188
- const requester = new Requester(this)
189
- return {
190
- ...requester.export(),
191
- run: async(): Promise<{
192
- message: string
193
- fullMessage: string
194
- }> => {
195
- const result = await requester.fetch({
196
- path: 'completion',
197
- data: {
198
- ...(params.options || {}),
199
- prompt: this.config.autoConvertTraditionalChinese ? sify(prompts.join('\n')) : prompts.join('\n')
200
- }
201
- })
202
- const message = this.config.autoConvertTraditionalChinese ? tify(result.data.content) : result.data.content
203
- return {
204
- message,
205
- fullMessage: `${lastMessage}${message}`
206
- }
207
- }
208
- }
209
- }
210
-
211
- completionStream(params: Stream & {
212
- messages: Message[]
213
- options?: Options
214
- }) {
215
- const prompts: string[] = []
216
- for (let { role, content } of params.messages) {
217
- if (role === 'system') {
218
- prompts.push(`<|start_header_id|>system<|end_header_id|>\n\n${content}\n\n`)
219
- }
220
- if (role === 'user') {
221
- prompts.push(`<|start_header_id|>user<|end_header_id|>\n\n${content?.replaceAll('\n', '\\n') ?? ''}`)
222
- }
223
- if (role === 'assistant') {
224
- prompts.push('<|start_header_id|>assistant<|end_header_id|>\n\n' + content)
225
- }
226
- }
227
- const requester = new Requester(this)
228
- requester.stream({
229
- path: 'completion',
230
- onEnd: params.onEnd || (() => null),
231
- onMessage: e => {
232
- params.onMessage({
233
- message: this.config.autoConvertTraditionalChinese ? tify(e.content) : e.content
234
- })
235
- },
236
- onWarn: params.onWarn || (() => null),
237
- onError: params.onError || (() => null),
238
- data: {
239
- ...(params.options || {}),
240
- prompt: this.config.autoConvertTraditionalChinese ? sify(prompts.join('\n')) : prompts.join('\n'),
241
- stream: true
242
- }
243
- })
244
- return requester.export()
245
- }
246
-
247
- talk(params: {
248
- options?: Options
249
- messages: Message[]
250
- response_format?: {
251
- type: 'json_object',
252
- schema: any
253
- }
254
- }) {
255
- const requester = new Requester(this)
256
- return {
257
- ...requester.export(),
258
- run: async(): Promise<{
259
- message: string
260
- }> => {
261
- const result = await requester.fetch({
262
- path: 'v1/chat/completions',
263
- data: {
264
- ...(params.options || {}),
265
- response_format: params.response_format,
266
- messages: params.messages.map(e => {
267
- return {
268
- role: e.role,
269
- content: this.config.autoConvertTraditionalChinese ? sify(e.content) : e.content
270
- }
271
- })
272
- }
273
- })
274
- const content = result.data.choices[0].message.content || ''
275
- return {
276
- message: this.config.autoConvertTraditionalChinese ? tify(content) : content
277
- }
278
- }
279
- }
280
- }
281
-
282
- talkStream(params: Stream & {
283
- options?: Options
284
- messages: Message[]
285
- }) {
286
- const requester = new Requester(this)
287
- requester.stream({
288
- path: 'v1/chat/completions',
289
- onEnd: params.onEnd || (() => null),
290
- onMessage: e => {
291
- let content = e.choices[0].delta.content
292
- if (content) {
293
- params.onMessage({
294
- message: this.config.autoConvertTraditionalChinese ? tify(content) : content
295
- })
296
- }
297
- },
298
- onWarn: params.onWarn || (() => null),
299
- onError: params.onError || (() => null),
300
- data: {
301
- ...(params.options || {}),
302
- stream: true,
303
- messages: params.messages.map(e => {
304
- return {
305
- role: e.role,
306
- content: this.config.autoConvertTraditionalChinese ? sify(e.content) : e.content
307
- }
308
- })
309
- }
310
- })
311
- return requester.export()
312
- }
313
- }
@@ -1,53 +0,0 @@
1
- import axios, { AxiosInstance } from 'axios'
2
- import { sify } from 'chinese-conv'
3
- import { validateToJsonSchema } from '../../utils/validate'
4
- import { Llama3CppCompletion, Config } from './completion'
5
-
6
- export class Llama3Cpp {
7
- _axios = axios.create()
8
-
9
- static createChatRequest(params: {
10
- config: Partial<Config> | (() => Promise<Partial<Config>>)
11
- talkOptions?: any
12
- }) {
13
- return async(messages: any[], { schema, onCancel }: any) => {
14
- const ll3cpp = new Llama3Cpp()
15
- const chat = ll3cpp.createCompletion()
16
- const config = typeof params.config === 'function' ? await params.config() : params.config
17
- chat.setConfig(config)
18
- let formatSchema = validateToJsonSchema(schema.output)
19
- if (chat.config.autoConvertTraditionalChinese) {
20
- formatSchema = JSON.parse(sify(JSON.stringify(formatSchema)))
21
- }
22
- const { run, cancel } = chat.talk({
23
- options: params.talkOptions,
24
- messages: messages,
25
- response_format: {
26
- type: 'json_object',
27
- schema: formatSchema
28
- }
29
- })
30
- onCancel(cancel)
31
- const { message } = await run()
32
- return message
33
- }
34
- }
35
-
36
- /**
37
- * @zh 如果你有需要特別設定 axios,請使用這方法。
38
- * @en If you need to set axios, please use this method.
39
- */
40
-
41
- setAxios(axios: AxiosInstance) {
42
- this._axios = axios
43
- }
44
-
45
- /**
46
- * @zh 建立 補強/ 對話物件。
47
- * @en Create completion object.
48
- */
49
-
50
- createCompletion() {
51
- return new Llama3CppCompletion(this)
52
- }
53
- }
@@ -1,244 +0,0 @@
1
- import { json } from 'power-helper'
2
- import { OpenAI } from './index'
3
- import { PromiseResponseType } from '../../types'
4
-
5
- export type ChatGPTMessage = {
6
- role: 'system' | 'user' | 'assistant'
7
- name?: string
8
- content: string
9
- }
10
-
11
- type ApiResponse = {
12
- id: string
13
- object: string
14
- created: number
15
- choices: Array<{
16
- index: number
17
- finish_reason: string
18
- message: {
19
- role: 'system' | 'user' | 'assistant'
20
- name?: string
21
- content: string
22
- }
23
- }>
24
- usage: {
25
- prompt_tokens: number
26
- completion_tokens: number
27
- total_tokens: number
28
- }
29
- }
30
-
31
- export type Config = {
32
- /**
33
- * @zh 一次回應數量
34
- * @en How many chat completion choices to generate for each input message.
35
- */
36
- n: number
37
- /**
38
- * @zh 選擇運行的模型,16k意味著能處理長度為 16,384 的文本,32k意味著能處理長度為 32768 的文本。
39
- * @en How many chat completion choices to generate for each input message.
40
- */
41
- model: 'gpt-4' | 'gpt-3.5-turbo' | 'gpt-4-turbo' | 'gpt-4o' | 'gpt-4o-mini' | 'o1-preview' | 'o1' | 'o1-mini'
42
- /**
43
- * @zh 冒險指數,數值由 0 ~ 2 之間,越低回應越穩定。
44
- * @en What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
45
- */
46
- temperature: number
47
- /**
48
- * @zh 是否強制要回傳 JSON 格式的資料
49
- * @en Whether to force the return of JSON format data
50
- */
51
- forceJsonFormat: boolean
52
- /**
53
- * @zh 每次對話最多產生幾個 tokens。
54
- * @en How many tokens to complete to.
55
- */
56
- maxTokens?: number
57
- }
58
-
59
- export class OpenAIChat {
60
- openai: OpenAI
61
- config: Config = {
62
- n: 1,
63
- model: 'gpt-4o',
64
- temperature: 1,
65
- maxTokens: undefined,
66
- forceJsonFormat: true
67
- }
68
-
69
- constructor(openai: OpenAI) {
70
- this.openai = openai
71
- }
72
-
73
- /**
74
- * @zh 改變對話的一些設定
75
- * @en Change some settings of the conversation
76
- */
77
-
78
- setConfig(options: Partial<Config>) {
79
- Object.assign(this.config, options)
80
- }
81
-
82
- /**
83
- * @zh 檢視內容是否符合 OpenAI 的審查
84
- * @en View content for OpenAI moderation
85
- */
86
-
87
- async moderations(input: string) {
88
- const result = await this.openai._axios.post<any>('https://api.openai.com/v1/moderations', {
89
- input: input
90
- }, {
91
- headers: {
92
- 'Content-Type': 'application/json',
93
- 'Authorization': `Bearer ${this.openai._apiKey}`
94
- }
95
- })
96
- return {
97
- isSafe: result.data.results?.[0]?.flagged === false,
98
- result: result.data
99
- }
100
- }
101
-
102
- /**
103
- * @zh 進行對話
104
- * @en Talk to the AI
105
- */
106
-
107
- async talk(messages: ChatGPTMessage[] = [], options?: {
108
- /** 要 forceJsonFormat 為 true 才會生效 */
109
- jsonSchema?: any
110
- abortController?: AbortController
111
- }) {
112
- const newMessages = json.jpjs(messages)
113
- const isSupportJson = [
114
- 'gpt-4-turbo-preview',
115
- 'gpt-4-turbo',
116
- 'gpt-4o',
117
- 'gpt-4o-mini',
118
- 'gpt-3.5-turbo-1106'
119
- ].includes(this.config.model)
120
- let response_format: any = undefined
121
- if (isSupportJson && this.config.forceJsonFormat) {
122
- response_format = {
123
- type: 'json_object'
124
- }
125
- }
126
- if (isSupportJson && this.config.forceJsonFormat && options?.jsonSchema) {
127
- response_format = {
128
- type: 'json_schema',
129
- json_schema: options.jsonSchema
130
- }
131
- }
132
- const result = await this.openai._axios.post<ApiResponse>('https://api.openai.com/v1/chat/completions', {
133
- model: this.config.model,
134
- n: this.config.n,
135
- messages: newMessages,
136
- response_format,
137
- temperature: this.config.temperature
138
- }, {
139
- signal: options?.abortController?.signal,
140
- headers: {
141
- 'Content-Type': 'application/json',
142
- 'Authorization': `Bearer ${this.openai._apiKey}`
143
- }
144
- })
145
- const choices = result.data.choices || []
146
- const message = choices[0]?.message || {
147
- role: 'assistant',
148
- content: ''
149
- }
150
- newMessages.push(message)
151
- return {
152
- id: result?.data.id as string,
153
- text: message.content as string,
154
- newMessages,
155
- isDone: choices[0]?.finish_reason === 'stop',
156
- apiReseponse: result.data
157
- }
158
- }
159
-
160
- talkStream(params: {
161
- messages: any[]
162
- onMessage: (message: string) => void
163
- onEnd: () => void
164
- onWarn: (warn: any) => void
165
- onError: (error: any) => void
166
- }) {
167
- const controller = new AbortController()
168
- fetch('https://api.openai.com/v1/chat/completions', {
169
- method: 'POST',
170
- headers: {
171
- 'Content-Type': 'application/json',
172
- Authorization: `Bearer ${this.openai._apiKey}`
173
- },
174
- body: JSON.stringify({
175
- model: this.config.model,
176
- stream: true,
177
- messages: params.messages
178
- }),
179
- signal: controller.signal
180
- }).then(async response => {
181
- // eslint-disable-next-line no-undef
182
- const reader = response.body?.pipeThrough(new TextDecoderStream()).getReader()
183
- if (!reader) {
184
- throw new Error('Can not get reader')
185
- }
186
- // eslint-disable-next-line no-constant-condition
187
- while (true) {
188
- const { value, done } = await reader.read()
189
- if (done) {
190
- break
191
- }
192
- const items = value.split('\n')
193
- for (let item of items) {
194
- if (item.length === 0) {
195
- continue
196
- }
197
- if (item.startsWith(':')) {
198
- continue
199
- }
200
- if (item === 'data: [DONE]') {
201
- params.onEnd()
202
- break
203
- }
204
- try {
205
- const result = JSON.parse(item.substring(6))
206
- const content = result.choices[0].delta.content
207
- params.onMessage(content)
208
- } catch (error) {
209
- params.onWarn(error)
210
- }
211
- }
212
- }
213
- }).catch(error => {
214
- if (error.name === 'AbortError') {
215
- params.onEnd()
216
- } else {
217
- params.onError(error)
218
- }
219
- })
220
- return {
221
- cancel: () => controller.abort()
222
- }
223
- }
224
-
225
- /**
226
- * @zh 開啟持續性對話
227
- */
228
-
229
- async keepTalk(prompt: string | string[], oldMessages: ChatGPTMessage[] = []) {
230
- const result = await this.talk([
231
- ...oldMessages,
232
- {
233
- role: 'user',
234
- content: Array.isArray(prompt) ? prompt.join('\n') : prompt
235
- }
236
- ])
237
- return {
238
- result,
239
- nextTalk: (prompt: string | string[]) => this.keepTalk(prompt, result.newMessages)
240
- }
241
- }
242
- }
243
-
244
- export type OpenAIChatTalkResponse = PromiseResponseType<OpenAIChat['talk']>
@@ -1,64 +0,0 @@
1
- import { OpenAI } from './index'
2
-
3
- type ApiResponse = {
4
- created: string
5
- data: {
6
- b64_json: string
7
- }[]
8
- }
9
-
10
- type Config = {
11
- /**
12
- * @zh 模型,支援 dall-e-2 和 dall-e-3
13
- * @en Model, support dall-e-2 and dall-e-3
14
- */
15
- model: 'dall-e-2' | 'dall-e-3'
16
- /**
17
- * @zh 解析度,例如 1024x1024
18
- * @en Resolution, for example 1024x1024
19
- */
20
- size: `${number}x${number}`
21
- }
22
-
23
- export class OpenAIImagesGeneration {
24
- private openai: OpenAI
25
- private config: Config = {
26
- model: 'dall-e-2',
27
- size: '1024x1024'
28
- }
29
-
30
- constructor(openai: OpenAI) {
31
- this.openai = openai
32
- }
33
-
34
- /**
35
- * @zh 改變對話的一些設定
36
- * @en Change some settings of the conversation
37
- */
38
-
39
- setConfig(options: Partial<Config>) {
40
- Object.assign(this.config, options)
41
- }
42
-
43
- /**
44
- * @zh 產生圖片
45
- * @en Generate image
46
- */
47
-
48
- async create(prompt: string) {
49
- const result = await this.openai._axios.post<ApiResponse>('https://api.openai.com/v1/images/generations', {
50
- prompt,
51
- n: 1,
52
- size: this.config.size,
53
- model: this.config.model,
54
- response_format: 'b64_json'
55
- }, {
56
- timeout: 1000 * 60 * 5,
57
- headers: {
58
- 'Content-Type': 'application/json',
59
- 'Authorization': `Bearer ${this.openai._apiKey}`
60
- }
61
- })
62
- return result.data
63
- }
64
- }