@leikeduntech/leiai-js 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bin/cli.js +146 -0
- package/build/index.d.ts +513 -0
- package/build/index.js +681 -0
- package/build/index.js.map +1 -0
- package/package.json +44 -0
- package/readme.md +564 -0
package/bin/cli.js
ADDED
|
@@ -0,0 +1,146 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
import crypto from 'node:crypto'
|
|
3
|
+
|
|
4
|
+
import * as url from 'url'
|
|
5
|
+
import { cac } from 'cac'
|
|
6
|
+
import Conf from 'conf'
|
|
7
|
+
import { readPackageUp } from 'read-pkg-up'
|
|
8
|
+
|
|
9
|
+
import { ChatGPTAPI } from '../build/index.js'
|
|
10
|
+
|
|
11
|
+
async function main() {
|
|
12
|
+
const dirname = url.fileURLToPath(new URL('.', import.meta.url))
|
|
13
|
+
const pkg = await readPackageUp({ cwd: dirname })
|
|
14
|
+
const version = (pkg && pkg.packageJson && pkg.packageJson.version) || '4'
|
|
15
|
+
const config = new Conf({ projectName: 'chatgpt' })
|
|
16
|
+
|
|
17
|
+
const cli = cac('chatgpt')
|
|
18
|
+
cli
|
|
19
|
+
.command('<prompt>', 'Ask ChatGPT a question')
|
|
20
|
+
.option('-c, --continue', 'Continue last conversation', {
|
|
21
|
+
default: false
|
|
22
|
+
})
|
|
23
|
+
.option('-d, --debug', 'Enables debug logging', {
|
|
24
|
+
default: false
|
|
25
|
+
})
|
|
26
|
+
.option('-s, --stream', 'Streams the response', {
|
|
27
|
+
default: true
|
|
28
|
+
})
|
|
29
|
+
.option('-s, --store', 'Enables the local message cache', {
|
|
30
|
+
default: true
|
|
31
|
+
})
|
|
32
|
+
.option('-t, --timeout <timeout>', 'Timeout in milliseconds')
|
|
33
|
+
.option('-k, --apiKey <apiKey>', 'OpenAI API key')
|
|
34
|
+
.option('-o, --apiOrg <apiOrg>', 'OpenAI API key')
|
|
35
|
+
.option('-m, --model <model>', 'Model (gpt-3.5-turbo, gpt-4)', {
|
|
36
|
+
default: 'gpt-3.5-turbo'
|
|
37
|
+
})
|
|
38
|
+
.option(
|
|
39
|
+
'-n, --conversationName <conversationName>',
|
|
40
|
+
'Unique name for the conversation'
|
|
41
|
+
)
|
|
42
|
+
.action(async (prompt, options) => {
|
|
43
|
+
const apiOrg = options.apiOrg || process.env.OPENAI_API_ORG
|
|
44
|
+
const apiKey = options.apiKey || process.env.OPENAI_API_KEY
|
|
45
|
+
if (!apiKey) {
|
|
46
|
+
console.error('error: either set OPENAI_API_KEY or use --apiKey\n')
|
|
47
|
+
cli.outputHelp()
|
|
48
|
+
process.exit(1)
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
const apiKeyHash = hash(apiKey)
|
|
52
|
+
const conversationName = options.conversationName || 'default'
|
|
53
|
+
const conversationKey = `${conversationName}:${apiKeyHash}`
|
|
54
|
+
const conversation =
|
|
55
|
+
options.continue && options.store
|
|
56
|
+
? config.get(conversationKey, {}) || {}
|
|
57
|
+
: {}
|
|
58
|
+
const model = options.model
|
|
59
|
+
let conversationId = undefined
|
|
60
|
+
let parentMessageId = undefined
|
|
61
|
+
|
|
62
|
+
if (conversation.lastMessageId) {
|
|
63
|
+
const lastMessage = conversation[conversation.lastMessageId]
|
|
64
|
+
if (lastMessage) {
|
|
65
|
+
conversationId = lastMessage.conversationId
|
|
66
|
+
parentMessageId = lastMessage.id
|
|
67
|
+
}
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
if (options.debug) {
|
|
71
|
+
console.log('using config', config.path)
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
const api = new ChatGPTAPI({
|
|
75
|
+
apiKey,
|
|
76
|
+
apiOrg,
|
|
77
|
+
debug: options.debug,
|
|
78
|
+
completionParams: {
|
|
79
|
+
model
|
|
80
|
+
},
|
|
81
|
+
getMessageById: async (id) => {
|
|
82
|
+
if (options.store) {
|
|
83
|
+
return conversation[id]
|
|
84
|
+
} else {
|
|
85
|
+
return null
|
|
86
|
+
}
|
|
87
|
+
},
|
|
88
|
+
upsertMessage: async (message) => {
|
|
89
|
+
if (options.store) {
|
|
90
|
+
conversation[message.id] = message
|
|
91
|
+
conversation.lastMessageId = message.id
|
|
92
|
+
config.set(conversationKey, conversation)
|
|
93
|
+
}
|
|
94
|
+
}
|
|
95
|
+
})
|
|
96
|
+
|
|
97
|
+
const res = await api.sendMessage(prompt, {
|
|
98
|
+
conversationId,
|
|
99
|
+
parentMessageId,
|
|
100
|
+
timeoutMs: options.timeout || undefined,
|
|
101
|
+
onProgress: options.stream
|
|
102
|
+
? (progress) => {
|
|
103
|
+
if (progress.delta) {
|
|
104
|
+
process.stdout.write(progress.delta)
|
|
105
|
+
}
|
|
106
|
+
}
|
|
107
|
+
: undefined
|
|
108
|
+
})
|
|
109
|
+
|
|
110
|
+
if (options.stream) {
|
|
111
|
+
process.stdout.write('\n')
|
|
112
|
+
} else {
|
|
113
|
+
console.log(res.text)
|
|
114
|
+
}
|
|
115
|
+
})
|
|
116
|
+
|
|
117
|
+
cli.command('rm-cache', 'Clears the local message cache').action(() => {
|
|
118
|
+
config.clear()
|
|
119
|
+
console.log('cleared cache', config.path)
|
|
120
|
+
})
|
|
121
|
+
|
|
122
|
+
cli.command('ls-cache', 'Prints the local message cache path').action(() => {
|
|
123
|
+
console.log(config.path)
|
|
124
|
+
})
|
|
125
|
+
|
|
126
|
+
cli.help()
|
|
127
|
+
cli.version(version)
|
|
128
|
+
|
|
129
|
+
try {
|
|
130
|
+
cli.parse()
|
|
131
|
+
} catch (err) {
|
|
132
|
+
console.error(`error: ${err.message}\n`)
|
|
133
|
+
cli.outputHelp()
|
|
134
|
+
process.exit(1)
|
|
135
|
+
}
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
function hash(d) {
|
|
139
|
+
const buffer = Buffer.isBuffer(d) ? d : Buffer.from(d.toString())
|
|
140
|
+
return crypto.createHash('sha256').update(buffer).digest('hex')
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
main().catch((err) => {
|
|
144
|
+
console.error(err)
|
|
145
|
+
process.exit(1)
|
|
146
|
+
})
|
package/build/index.d.ts
ADDED
|
@@ -0,0 +1,513 @@
|
|
|
1
|
+
import Keyv from 'keyv';
|
|
2
|
+
|
|
3
|
+
type Role = 'user' | 'assistant' | 'system';
|
|
4
|
+
type FetchFn = typeof fetch;
|
|
5
|
+
type ChatGPTAPIOptions = {
|
|
6
|
+
apiKey: string;
|
|
7
|
+
/** @defaultValue `'https://api.openai.com'` **/
|
|
8
|
+
apiBaseUrl?: string;
|
|
9
|
+
apiOrg?: string;
|
|
10
|
+
/** @defaultValue `false` **/
|
|
11
|
+
debug?: boolean;
|
|
12
|
+
completionParams?: Partial<Omit<openai.CreateChatCompletionRequest, 'messages' | 'n' | 'stream'>>;
|
|
13
|
+
systemMessage?: string;
|
|
14
|
+
/** @defaultValue `4096` **/
|
|
15
|
+
maxModelTokens?: number;
|
|
16
|
+
/** @defaultValue `1000` **/
|
|
17
|
+
maxResponseTokens?: number;
|
|
18
|
+
messageStore?: Keyv;
|
|
19
|
+
getMessageById?: GetMessageByIdFunction;
|
|
20
|
+
upsertMessage?: UpsertMessageFunction;
|
|
21
|
+
fetch?: FetchFn;
|
|
22
|
+
};
|
|
23
|
+
type SendMessageOptions = {
|
|
24
|
+
/** The name of a user in a multi-user chat. */
|
|
25
|
+
name?: string;
|
|
26
|
+
parentMessageId?: string;
|
|
27
|
+
conversationId?: string;
|
|
28
|
+
messageId?: string;
|
|
29
|
+
stream?: boolean;
|
|
30
|
+
systemMessage?: string;
|
|
31
|
+
timeoutMs?: number;
|
|
32
|
+
onProgress?: (partialResponse: ChatMessage) => void;
|
|
33
|
+
abortSignal?: AbortSignal;
|
|
34
|
+
completionParams?: Partial<Omit<openai.CreateChatCompletionRequest, 'messages' | 'n' | 'stream'>>;
|
|
35
|
+
};
|
|
36
|
+
type MessageActionType = 'next' | 'variant';
|
|
37
|
+
type SendMessageBrowserOptions = {
|
|
38
|
+
conversationId?: string;
|
|
39
|
+
parentMessageId?: string;
|
|
40
|
+
messageId?: string;
|
|
41
|
+
action?: MessageActionType;
|
|
42
|
+
timeoutMs?: number;
|
|
43
|
+
onProgress?: (partialResponse: ChatMessage) => void;
|
|
44
|
+
abortSignal?: AbortSignal;
|
|
45
|
+
};
|
|
46
|
+
interface ChatMessage {
|
|
47
|
+
id: string;
|
|
48
|
+
text: string;
|
|
49
|
+
role: Role;
|
|
50
|
+
name?: string;
|
|
51
|
+
delta?: string;
|
|
52
|
+
detail?: openai.CreateChatCompletionResponse | CreateChatCompletionStreamResponse;
|
|
53
|
+
parentMessageId?: string;
|
|
54
|
+
conversationId?: string;
|
|
55
|
+
}
|
|
56
|
+
declare class ChatGPTError extends Error {
|
|
57
|
+
statusCode?: number;
|
|
58
|
+
statusText?: string;
|
|
59
|
+
isFinal?: boolean;
|
|
60
|
+
accountId?: string;
|
|
61
|
+
}
|
|
62
|
+
/** Returns a chat message from a store by it's ID (or null if not found). */
|
|
63
|
+
type GetMessageByIdFunction = (id: string) => Promise<ChatMessage>;
|
|
64
|
+
/** Upserts a chat message to a store. */
|
|
65
|
+
type UpsertMessageFunction = (message: ChatMessage) => Promise<void>;
|
|
66
|
+
interface CreateChatCompletionStreamResponse extends openai.CreateChatCompletionDeltaResponse {
|
|
67
|
+
usage: CreateCompletionStreamResponseUsage;
|
|
68
|
+
}
|
|
69
|
+
interface CreateCompletionStreamResponseUsage extends openai.CreateCompletionResponseUsage {
|
|
70
|
+
estimated: true;
|
|
71
|
+
}
|
|
72
|
+
/**
|
|
73
|
+
* https://chat.openapi.com/backend-api/conversation
|
|
74
|
+
*/
|
|
75
|
+
type ConversationJSONBody = {
|
|
76
|
+
/**
|
|
77
|
+
* The action to take
|
|
78
|
+
*/
|
|
79
|
+
action: string;
|
|
80
|
+
/**
|
|
81
|
+
* The ID of the conversation
|
|
82
|
+
*/
|
|
83
|
+
conversation_id?: string;
|
|
84
|
+
/**
|
|
85
|
+
* Prompts to provide
|
|
86
|
+
*/
|
|
87
|
+
messages: Prompt[];
|
|
88
|
+
/**
|
|
89
|
+
* The model to use
|
|
90
|
+
*/
|
|
91
|
+
model: string;
|
|
92
|
+
/**
|
|
93
|
+
* The parent message ID
|
|
94
|
+
*/
|
|
95
|
+
parent_message_id: string;
|
|
96
|
+
};
|
|
97
|
+
type Prompt = {
|
|
98
|
+
/**
|
|
99
|
+
* The content of the prompt
|
|
100
|
+
*/
|
|
101
|
+
content: PromptContent;
|
|
102
|
+
/**
|
|
103
|
+
* The ID of the prompt
|
|
104
|
+
*/
|
|
105
|
+
id: string;
|
|
106
|
+
/**
|
|
107
|
+
* The role played in the prompt
|
|
108
|
+
*/
|
|
109
|
+
role: Role;
|
|
110
|
+
};
|
|
111
|
+
type ContentType = 'text';
|
|
112
|
+
type PromptContent = {
|
|
113
|
+
/**
|
|
114
|
+
* The content type of the prompt
|
|
115
|
+
*/
|
|
116
|
+
content_type: ContentType;
|
|
117
|
+
/**
|
|
118
|
+
* The parts to the prompt
|
|
119
|
+
*/
|
|
120
|
+
parts: string[];
|
|
121
|
+
};
|
|
122
|
+
type ConversationResponseEvent = {
|
|
123
|
+
message?: Message;
|
|
124
|
+
conversation_id?: string;
|
|
125
|
+
error?: string | null;
|
|
126
|
+
};
|
|
127
|
+
type Message = {
|
|
128
|
+
id: string;
|
|
129
|
+
content: MessageContent;
|
|
130
|
+
role: Role;
|
|
131
|
+
user: string | null;
|
|
132
|
+
create_time: string | null;
|
|
133
|
+
update_time: string | null;
|
|
134
|
+
end_turn: null;
|
|
135
|
+
weight: number;
|
|
136
|
+
recipient: string;
|
|
137
|
+
metadata: MessageMetadata;
|
|
138
|
+
};
|
|
139
|
+
type MessageContent = {
|
|
140
|
+
content_type: string;
|
|
141
|
+
parts: string[];
|
|
142
|
+
};
|
|
143
|
+
type MessageMetadata = any;
|
|
144
|
+
declare namespace openai {
|
|
145
|
+
interface CreateChatCompletionDeltaResponse {
|
|
146
|
+
id: string;
|
|
147
|
+
object: 'chat.completion.chunk';
|
|
148
|
+
created: number;
|
|
149
|
+
model: string;
|
|
150
|
+
choices: [
|
|
151
|
+
{
|
|
152
|
+
delta: {
|
|
153
|
+
role: Role;
|
|
154
|
+
content?: string;
|
|
155
|
+
};
|
|
156
|
+
index: number;
|
|
157
|
+
finish_reason: string | null;
|
|
158
|
+
}
|
|
159
|
+
];
|
|
160
|
+
}
|
|
161
|
+
/**
|
|
162
|
+
*
|
|
163
|
+
* @export
|
|
164
|
+
* @interface ChatCompletionRequestMessage
|
|
165
|
+
*/
|
|
166
|
+
interface ChatCompletionRequestMessage {
|
|
167
|
+
/**
|
|
168
|
+
* The role of the author of this message.
|
|
169
|
+
* @type {string}
|
|
170
|
+
* @memberof ChatCompletionRequestMessage
|
|
171
|
+
*/
|
|
172
|
+
role: ChatCompletionRequestMessageRoleEnum;
|
|
173
|
+
/**
|
|
174
|
+
* The contents of the message
|
|
175
|
+
* @type {string}
|
|
176
|
+
* @memberof ChatCompletionRequestMessage
|
|
177
|
+
*/
|
|
178
|
+
content: string;
|
|
179
|
+
/**
|
|
180
|
+
* The name of the user in a multi-user chat
|
|
181
|
+
* @type {string}
|
|
182
|
+
* @memberof ChatCompletionRequestMessage
|
|
183
|
+
*/
|
|
184
|
+
name?: string;
|
|
185
|
+
}
|
|
186
|
+
const ChatCompletionRequestMessageRoleEnum: {
|
|
187
|
+
readonly System: 'system';
|
|
188
|
+
readonly User: 'user';
|
|
189
|
+
readonly Assistant: 'assistant';
|
|
190
|
+
};
|
|
191
|
+
type ChatCompletionRequestMessageRoleEnum = (typeof ChatCompletionRequestMessageRoleEnum)[keyof typeof ChatCompletionRequestMessageRoleEnum];
|
|
192
|
+
/**
|
|
193
|
+
*
|
|
194
|
+
* @export
|
|
195
|
+
* @interface ChatCompletionResponseMessage
|
|
196
|
+
*/
|
|
197
|
+
interface ChatCompletionResponseMessage {
|
|
198
|
+
/**
|
|
199
|
+
* The role of the author of this message.
|
|
200
|
+
* @type {string}
|
|
201
|
+
* @memberof ChatCompletionResponseMessage
|
|
202
|
+
*/
|
|
203
|
+
role: ChatCompletionResponseMessageRoleEnum;
|
|
204
|
+
/**
|
|
205
|
+
* The contents of the message
|
|
206
|
+
* @type {string}
|
|
207
|
+
* @memberof ChatCompletionResponseMessage
|
|
208
|
+
*/
|
|
209
|
+
content: string;
|
|
210
|
+
}
|
|
211
|
+
const ChatCompletionResponseMessageRoleEnum: {
|
|
212
|
+
readonly System: 'system';
|
|
213
|
+
readonly User: 'user';
|
|
214
|
+
readonly Assistant: 'assistant';
|
|
215
|
+
};
|
|
216
|
+
type ChatCompletionResponseMessageRoleEnum = (typeof ChatCompletionResponseMessageRoleEnum)[keyof typeof ChatCompletionResponseMessageRoleEnum];
|
|
217
|
+
/**
|
|
218
|
+
*
|
|
219
|
+
* @export
|
|
220
|
+
* @interface CreateChatCompletionRequest
|
|
221
|
+
*/
|
|
222
|
+
interface CreateChatCompletionRequest {
|
|
223
|
+
/**
|
|
224
|
+
* ID of the model to use. Currently, only `gpt-3.5-turbo` and `gpt-3.5-turbo-0301` are supported.
|
|
225
|
+
* @type {string}
|
|
226
|
+
* @memberof CreateChatCompletionRequest
|
|
227
|
+
*/
|
|
228
|
+
model: string;
|
|
229
|
+
/**
|
|
230
|
+
* The messages to generate chat completions for, in the [chat format](/docs/guides/chat/introduction).
|
|
231
|
+
* @type {Array<ChatCompletionRequestMessage>}
|
|
232
|
+
* @memberof CreateChatCompletionRequest
|
|
233
|
+
*/
|
|
234
|
+
messages: Array<ChatCompletionRequestMessage>;
|
|
235
|
+
/**
|
|
236
|
+
* What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
|
|
237
|
+
* @type {number}
|
|
238
|
+
* @memberof CreateChatCompletionRequest
|
|
239
|
+
*/
|
|
240
|
+
temperature?: number | null;
|
|
241
|
+
/**
|
|
242
|
+
* An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
|
|
243
|
+
* @type {number}
|
|
244
|
+
* @memberof CreateChatCompletionRequest
|
|
245
|
+
*/
|
|
246
|
+
top_p?: number | null;
|
|
247
|
+
/**
|
|
248
|
+
* How many chat completion choices to generate for each input message.
|
|
249
|
+
* @type {number}
|
|
250
|
+
* @memberof CreateChatCompletionRequest
|
|
251
|
+
*/
|
|
252
|
+
n?: number | null;
|
|
253
|
+
/**
|
|
254
|
+
* If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message.
|
|
255
|
+
* @type {boolean}
|
|
256
|
+
* @memberof CreateChatCompletionRequest
|
|
257
|
+
*/
|
|
258
|
+
stream?: boolean | null;
|
|
259
|
+
/**
|
|
260
|
+
*
|
|
261
|
+
* @type {CreateChatCompletionRequestStop}
|
|
262
|
+
* @memberof CreateChatCompletionRequest
|
|
263
|
+
*/
|
|
264
|
+
stop?: CreateChatCompletionRequestStop;
|
|
265
|
+
/**
|
|
266
|
+
* The maximum number of tokens allowed for the generated answer. By default, the number of tokens the model can return will be (4096 - prompt tokens).
|
|
267
|
+
* @type {number}
|
|
268
|
+
* @memberof CreateChatCompletionRequest
|
|
269
|
+
*/
|
|
270
|
+
max_tokens?: number;
|
|
271
|
+
/**
|
|
272
|
+
* Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model\'s likelihood to talk about new topics. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
|
|
273
|
+
* @type {number}
|
|
274
|
+
* @memberof CreateChatCompletionRequest
|
|
275
|
+
*/
|
|
276
|
+
presence_penalty?: number | null;
|
|
277
|
+
/**
|
|
278
|
+
* Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model\'s likelihood to repeat the same line verbatim. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
|
|
279
|
+
* @type {number}
|
|
280
|
+
* @memberof CreateChatCompletionRequest
|
|
281
|
+
*/
|
|
282
|
+
frequency_penalty?: number | null;
|
|
283
|
+
/**
|
|
284
|
+
* Modify the likelihood of specified tokens appearing in the completion. Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.
|
|
285
|
+
* @type {object}
|
|
286
|
+
* @memberof CreateChatCompletionRequest
|
|
287
|
+
*/
|
|
288
|
+
logit_bias?: object | null;
|
|
289
|
+
/**
|
|
290
|
+
* A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
|
|
291
|
+
* @type {string}
|
|
292
|
+
* @memberof CreateChatCompletionRequest
|
|
293
|
+
*/
|
|
294
|
+
user?: string;
|
|
295
|
+
}
|
|
296
|
+
/**
|
|
297
|
+
* @type CreateChatCompletionRequestStop
|
|
298
|
+
* Up to 4 sequences where the API will stop generating further tokens.
|
|
299
|
+
* @export
|
|
300
|
+
*/
|
|
301
|
+
type CreateChatCompletionRequestStop = Array<string> | string;
|
|
302
|
+
/**
|
|
303
|
+
*
|
|
304
|
+
* @export
|
|
305
|
+
* @interface CreateChatCompletionResponse
|
|
306
|
+
*/
|
|
307
|
+
interface CreateChatCompletionResponse {
|
|
308
|
+
/**
|
|
309
|
+
*
|
|
310
|
+
* @type {string}
|
|
311
|
+
* @memberof CreateChatCompletionResponse
|
|
312
|
+
*/
|
|
313
|
+
id: string;
|
|
314
|
+
/**
|
|
315
|
+
*
|
|
316
|
+
* @type {string}
|
|
317
|
+
* @memberof CreateChatCompletionResponse
|
|
318
|
+
*/
|
|
319
|
+
object: string;
|
|
320
|
+
/**
|
|
321
|
+
*
|
|
322
|
+
* @type {number}
|
|
323
|
+
* @memberof CreateChatCompletionResponse
|
|
324
|
+
*/
|
|
325
|
+
created: number;
|
|
326
|
+
/**
|
|
327
|
+
*
|
|
328
|
+
* @type {string}
|
|
329
|
+
* @memberof CreateChatCompletionResponse
|
|
330
|
+
*/
|
|
331
|
+
model: string;
|
|
332
|
+
/**
|
|
333
|
+
*
|
|
334
|
+
* @type {Array<CreateChatCompletionResponseChoicesInner>}
|
|
335
|
+
* @memberof CreateChatCompletionResponse
|
|
336
|
+
*/
|
|
337
|
+
choices: Array<CreateChatCompletionResponseChoicesInner>;
|
|
338
|
+
/**
|
|
339
|
+
*
|
|
340
|
+
* @type {CreateCompletionResponseUsage}
|
|
341
|
+
* @memberof CreateChatCompletionResponse
|
|
342
|
+
*/
|
|
343
|
+
usage?: CreateCompletionResponseUsage;
|
|
344
|
+
}
|
|
345
|
+
/**
|
|
346
|
+
*
|
|
347
|
+
* @export
|
|
348
|
+
* @interface CreateChatCompletionResponseChoicesInner
|
|
349
|
+
*/
|
|
350
|
+
interface CreateChatCompletionResponseChoicesInner {
|
|
351
|
+
/**
|
|
352
|
+
*
|
|
353
|
+
* @type {number}
|
|
354
|
+
* @memberof CreateChatCompletionResponseChoicesInner
|
|
355
|
+
*/
|
|
356
|
+
index?: number;
|
|
357
|
+
/**
|
|
358
|
+
*
|
|
359
|
+
* @type {ChatCompletionResponseMessage}
|
|
360
|
+
* @memberof CreateChatCompletionResponseChoicesInner
|
|
361
|
+
*/
|
|
362
|
+
message?: ChatCompletionResponseMessage;
|
|
363
|
+
/**
|
|
364
|
+
*
|
|
365
|
+
* @type {string}
|
|
366
|
+
* @memberof CreateChatCompletionResponseChoicesInner
|
|
367
|
+
*/
|
|
368
|
+
finish_reason?: string;
|
|
369
|
+
}
|
|
370
|
+
/**
|
|
371
|
+
*
|
|
372
|
+
* @export
|
|
373
|
+
* @interface CreateCompletionResponseUsage
|
|
374
|
+
*/
|
|
375
|
+
interface CreateCompletionResponseUsage {
|
|
376
|
+
/**
|
|
377
|
+
*
|
|
378
|
+
* @type {number}
|
|
379
|
+
* @memberof CreateCompletionResponseUsage
|
|
380
|
+
*/
|
|
381
|
+
prompt_tokens: number;
|
|
382
|
+
/**
|
|
383
|
+
*
|
|
384
|
+
* @type {number}
|
|
385
|
+
* @memberof CreateCompletionResponseUsage
|
|
386
|
+
*/
|
|
387
|
+
completion_tokens: number;
|
|
388
|
+
/**
|
|
389
|
+
*
|
|
390
|
+
* @type {number}
|
|
391
|
+
* @memberof CreateCompletionResponseUsage
|
|
392
|
+
*/
|
|
393
|
+
total_tokens: number;
|
|
394
|
+
}
|
|
395
|
+
}
|
|
396
|
+
|
|
397
|
+
declare class ChatGPTAPI {
|
|
398
|
+
protected _apiKey: string;
|
|
399
|
+
protected _apiBaseUrl: string;
|
|
400
|
+
protected _apiOrg?: string;
|
|
401
|
+
protected _debug: boolean;
|
|
402
|
+
protected _systemMessage: string;
|
|
403
|
+
protected _completionParams: Omit<openai.CreateChatCompletionRequest, 'messages' | 'n'>;
|
|
404
|
+
protected _maxModelTokens: number;
|
|
405
|
+
protected _maxResponseTokens: number;
|
|
406
|
+
protected _fetch: FetchFn;
|
|
407
|
+
protected _getMessageById: GetMessageByIdFunction;
|
|
408
|
+
protected _upsertMessage: UpsertMessageFunction;
|
|
409
|
+
protected _messageStore: Keyv<ChatMessage>;
|
|
410
|
+
/**
|
|
411
|
+
* Creates a new client wrapper around OpenAI's chat completion API, mimicing the official ChatGPT webapp's functionality as closely as possible.
|
|
412
|
+
*
|
|
413
|
+
* @param apiKey - OpenAI API key (required).
|
|
414
|
+
* @param apiOrg - Optional OpenAI API organization (optional).
|
|
415
|
+
* @param apiBaseUrl - Optional override for the OpenAI API base URL.
|
|
416
|
+
* @param debug - Optional enables logging debugging info to stdout.
|
|
417
|
+
* @param completionParams - Param overrides to send to the [OpenAI chat completion API](https://platform.openai.com/docs/api-reference/chat/create). Options like `temperature` and `presence_penalty` can be tweaked to change the personality of the assistant.
|
|
418
|
+
* @param maxModelTokens - Optional override for the maximum number of tokens allowed by the model's context. Defaults to 4096.
|
|
419
|
+
* @param maxResponseTokens - Optional override for the minimum number of tokens allowed for the model's response. Defaults to 1000.
|
|
420
|
+
* @param messageStore - Optional [Keyv](https://github.com/jaredwray/keyv) store to persist chat messages to. If not provided, messages will be lost when the process exits.
|
|
421
|
+
* @param getMessageById - Optional function to retrieve a message by its ID. If not provided, the default implementation will be used (using an in-memory `messageStore`).
|
|
422
|
+
* @param upsertMessage - Optional function to insert or update a message. If not provided, the default implementation will be used (using an in-memory `messageStore`).
|
|
423
|
+
* @param fetch - Optional override for the `fetch` implementation to use. Defaults to the global `fetch` function.
|
|
424
|
+
*/
|
|
425
|
+
constructor(opts: ChatGPTAPIOptions);
|
|
426
|
+
/**
|
|
427
|
+
* Sends a message to the OpenAI chat completions endpoint, waits for the response
|
|
428
|
+
* to resolve, and returns the response.
|
|
429
|
+
*
|
|
430
|
+
* If you want your response to have historical context, you must provide a valid `parentMessageId`.
|
|
431
|
+
*
|
|
432
|
+
* If you want to receive a stream of partial responses, use `opts.onProgress`.
|
|
433
|
+
*
|
|
434
|
+
* Set `debug: true` in the `ChatGPTAPI` constructor to log more info on the full prompt sent to the OpenAI chat completions API. You can override the `systemMessage` in `opts` to customize the assistant's instructions.
|
|
435
|
+
*
|
|
436
|
+
* @param message - The prompt message to send
|
|
437
|
+
* @param opts.parentMessageId - Optional ID of the previous message in the conversation (defaults to `undefined`)
|
|
438
|
+
* @param opts.conversationId - Optional ID of the conversation (defaults to `undefined`)
|
|
439
|
+
* @param opts.messageId - Optional ID of the message to send (defaults to a random UUID)
|
|
440
|
+
* @param opts.systemMessage - Optional override for the chat "system message" which acts as instructions to the model (defaults to the ChatGPT system message)
|
|
441
|
+
* @param opts.timeoutMs - Optional timeout in milliseconds (defaults to no timeout)
|
|
442
|
+
* @param opts.onProgress - Optional callback which will be invoked every time the partial response is updated
|
|
443
|
+
* @param opts.abortSignal - Optional callback used to abort the underlying `fetch` call using an [AbortController](https://developer.mozilla.org/en-US/docs/Web/API/AbortController)
|
|
444
|
+
* @param completionParams - Optional overrides to send to the [OpenAI chat completion API](https://platform.openai.com/docs/api-reference/chat/create). Options like `temperature` and `presence_penalty` can be tweaked to change the personality of the assistant.
|
|
445
|
+
*
|
|
446
|
+
* @returns The response from ChatGPT
|
|
447
|
+
*/
|
|
448
|
+
sendMessage(text: string, opts?: SendMessageOptions): Promise<ChatMessage>;
|
|
449
|
+
get apiKey(): string;
|
|
450
|
+
set apiKey(apiKey: string);
|
|
451
|
+
get apiOrg(): string;
|
|
452
|
+
set apiOrg(apiOrg: string);
|
|
453
|
+
protected _buildMessages(text: string, opts: SendMessageOptions): Promise<{
|
|
454
|
+
messages: openai.ChatCompletionRequestMessage[];
|
|
455
|
+
maxTokens: number;
|
|
456
|
+
numTokens: number;
|
|
457
|
+
}>;
|
|
458
|
+
protected _getTokenCount(text: string): Promise<number>;
|
|
459
|
+
protected _defaultGetMessageById(id: string): Promise<ChatMessage>;
|
|
460
|
+
protected _defaultUpsertMessage(message: ChatMessage): Promise<void>;
|
|
461
|
+
}
|
|
462
|
+
|
|
463
|
+
declare class ChatGPTUnofficialProxyAPI {
|
|
464
|
+
protected _accessToken: string;
|
|
465
|
+
protected _apiReverseProxyUrl: string;
|
|
466
|
+
protected _debug: boolean;
|
|
467
|
+
protected _model: string;
|
|
468
|
+
protected _headers: Record<string, string>;
|
|
469
|
+
protected _fetch: FetchFn;
|
|
470
|
+
/**
|
|
471
|
+
* @param fetch - Optional override for the `fetch` implementation to use. Defaults to the global `fetch` function.
|
|
472
|
+
*/
|
|
473
|
+
constructor(opts: {
|
|
474
|
+
accessToken: string;
|
|
475
|
+
/** @defaultValue `https://bypass.duti.tech/api/conversation` **/
|
|
476
|
+
apiReverseProxyUrl?: string;
|
|
477
|
+
/** @defaultValue `text-davinci-002-render-sha` **/
|
|
478
|
+
model?: string;
|
|
479
|
+
/** @defaultValue `false` **/
|
|
480
|
+
debug?: boolean;
|
|
481
|
+
/** @defaultValue `undefined` **/
|
|
482
|
+
headers?: Record<string, string>;
|
|
483
|
+
fetch?: FetchFn;
|
|
484
|
+
});
|
|
485
|
+
get accessToken(): string;
|
|
486
|
+
set accessToken(value: string);
|
|
487
|
+
/**
|
|
488
|
+
* Sends a message to ChatGPT, waits for the response to resolve, and returns
|
|
489
|
+
* the response.
|
|
490
|
+
*
|
|
491
|
+
* If you want your response to have historical context, you must provide a valid `parentMessageId`.
|
|
492
|
+
*
|
|
493
|
+
* If you want to receive a stream of partial responses, use `opts.onProgress`.
|
|
494
|
+
* If you want to receive the full response, including message and conversation IDs,
|
|
495
|
+
* you can use `opts.onConversationResponse` or use the `ChatGPTAPI.getConversation`
|
|
496
|
+
* helper.
|
|
497
|
+
*
|
|
498
|
+
* Set `debug: true` in the `ChatGPTAPI` constructor to log more info on the full prompt sent to the OpenAI completions API. You can override the `promptPrefix` and `promptSuffix` in `opts` to customize the prompt.
|
|
499
|
+
*
|
|
500
|
+
* @param message - The prompt message to send
|
|
501
|
+
* @param opts.conversationId - Optional ID of a conversation to continue (defaults to a random UUID)
|
|
502
|
+
* @param opts.parentMessageId - Optional ID of the previous message in the conversation (defaults to `undefined`)
|
|
503
|
+
* @param opts.messageId - Optional ID of the message to send (defaults to a random UUID)
|
|
504
|
+
* @param opts.timeoutMs - Optional timeout in milliseconds (defaults to no timeout)
|
|
505
|
+
* @param opts.onProgress - Optional callback which will be invoked every time the partial response is updated
|
|
506
|
+
* @param opts.abortSignal - Optional callback used to abort the underlying `fetch` call using an [AbortController](https://developer.mozilla.org/en-US/docs/Web/API/AbortController)
|
|
507
|
+
*
|
|
508
|
+
* @returns The response from ChatGPT
|
|
509
|
+
*/
|
|
510
|
+
sendMessage(text: string, opts?: SendMessageBrowserOptions): Promise<ChatMessage>;
|
|
511
|
+
}
|
|
512
|
+
|
|
513
|
+
export { ChatGPTAPI, ChatGPTAPIOptions, ChatGPTError, ChatGPTUnofficialProxyAPI, ChatMessage, ContentType, ConversationJSONBody, ConversationResponseEvent, CreateChatCompletionStreamResponse, CreateCompletionStreamResponseUsage, FetchFn, GetMessageByIdFunction, Message, MessageActionType, MessageContent, MessageMetadata, Prompt, PromptContent, Role, SendMessageBrowserOptions, SendMessageOptions, UpsertMessageFunction, openai };
|