@leikeduntech/leiai-js 2.2.0 → 2.3.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/build/index.js +0 -1
- package/package.json +2 -2
- package/build/index.js.map +0 -1
- package/readme.md +0 -564
package/build/index.js
CHANGED
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@leikeduntech/leiai-js",
|
|
3
|
-
"version": "2.
|
|
3
|
+
"version": "2.3.1",
|
|
4
4
|
"author": "liuhean",
|
|
5
5
|
"repository": {
|
|
6
6
|
"type": "git",
|
|
@@ -49,7 +49,7 @@
|
|
|
49
49
|
"p-timeout": "^6.1.1",
|
|
50
50
|
"quick-lru": "^6.1.1",
|
|
51
51
|
"read-pkg-up": "^9.1.0",
|
|
52
|
-
"@leikeduntech/spark-nodejs": "
|
|
52
|
+
"@leikeduntech/spark-nodejs": "0.3.0",
|
|
53
53
|
"uuid": "^9.0.0"
|
|
54
54
|
},
|
|
55
55
|
"devDependencies": {
|
package/build/index.js.map
DELETED
|
@@ -1 +0,0 @@
|
|
|
1
|
-
{"version":3,"sources":["../src/chatgpt-api.ts","../src/tokenizer.ts","../src/types.ts","../src/fetch.ts","../src/fetch-sse.ts","../src/stream-async-iterable.ts","../src/chatgpt-unofficial-proxy-api.ts","../src/utils.ts"],"sourcesContent":["import Keyv from 'keyv'\nimport pTimeout from 'p-timeout'\nimport QuickLRU from 'quick-lru'\nimport { v4 as uuidv4 } from 'uuid'\n\nimport * as tokenizer from './tokenizer'\nimport * as types from './types'\nimport { fetch as globalFetch } from './fetch'\nimport { fetchSSE } from './fetch-sse'\nimport {Spark} from \"@leikeduntech/spark-nodejs\";\n\nconst CHATGPT_MODEL = 'gpt-3.5-turbo'\n\nconst USER_LABEL_DEFAULT = 'User'\nconst ASSISTANT_LABEL_DEFAULT = 'ChatGPT'\n\nexport class ChatGPTAPI {\n protected _manufacturer: string\n protected _apiKey: string\n protected _apiBaseUrl: string\n protected _apiOrg?: string\n protected _debug: boolean\n\n protected _systemMessage: string\n protected _completionParams: Omit<\n types.openai.CreateChatCompletionRequest,\n 'messages' | 'n'\n >\n protected _maxModelTokens: number\n protected _maxResponseTokens: number\n protected _fetch: types.FetchFn\n\n protected _getMessageById: types.GetMessageByIdFunction\n protected _upsertMessage: types.UpsertMessageFunction\n\n protected _messageStore: Keyv<types.ChatMessage>\n\n /**\n * Creates a new client wrapper around OpenAI's chat completion API, mimicing the official ChatGPT webapp's functionality as closely as possible.\n *\n * @param apiKey - OpenAI API key (required).\n * @param apiOrg - Optional OpenAI API organization (optional).\n * @param apiBaseUrl - Optional override for the OpenAI API base URL.\n * @param debug - Optional enables logging debugging info to stdout.\n * @param completionParams - Param overrides to send to the [OpenAI chat completion API](https://platform.openai.com/docs/api-reference/chat/create). Options like `temperature` and `presence_penalty` can be tweaked to change the personality of the assistant.\n * @param maxModelTokens - Optional override for the maximum number of tokens allowed by the model's context. Defaults to 4096.\n * @param maxResponseTokens - Optional override for the minimum number of tokens allowed for the model's response. Defaults to 1000.\n * @param messageStore - Optional [Keyv](https://github.com/jaredwray/keyv) store to persist chat messages to. If not provided, messages will be lost when the process exits.\n * @param getMessageById - Optional function to retrieve a message by its ID. If not provided, the default implementation will be used (using an in-memory `messageStore`).\n * @param upsertMessage - Optional function to insert or update a message. If not provided, the default implementation will be used (using an in-memory `messageStore`).\n * @param fetch - Optional override for the `fetch` implementation to use. Defaults to the global `fetch` function.\n */\n constructor(opts: types.ChatGPTAPIOptions) {\n const {\n manufacturer = 'OpenAI',\n apiKey,\n apiOrg,\n apiBaseUrl = 'https://api.openai.com/v1',\n debug = false,\n messageStore,\n completionParams,\n systemMessage,\n maxModelTokens = 4000,\n maxResponseTokens = 2000,\n getMessageById,\n upsertMessage,\n fetch = globalFetch\n } = opts\n\n this._manufacturer = manufacturer\n this._apiKey = apiKey\n this._apiOrg = apiOrg\n this._apiBaseUrl = apiBaseUrl\n this._debug = !!debug\n this._fetch = fetch\n\n this._completionParams = {\n model: CHATGPT_MODEL,\n temperature: ['baidu','zhipu'].indexOf(manufacturer.toLowerCase()) > -1 ? 0.95 : (['xunfei'].indexOf(manufacturer.toLowerCase()) > -1 ? 0.5: 1),\n ...completionParams\n }\n\n this._systemMessage = systemMessage\n\n if (this._systemMessage === undefined) {\n const currentDate = new Date().toISOString().split('T')[0]\n this._systemMessage = `You are ChatGPT, a large language model trained by ${this._manufacturer}. Answer as concisely as possible.\\nKnowledge cutoff: 2021-09-01\\nCurrent date: ${currentDate}`\n }\n\n this._maxModelTokens = maxModelTokens\n this._maxResponseTokens = maxResponseTokens\n\n this._getMessageById = getMessageById ?? this._defaultGetMessageById\n this._upsertMessage = upsertMessage ?? this._defaultUpsertMessage\n\n if (messageStore) {\n this._messageStore = messageStore\n } else {\n this._messageStore = new Keyv<types.ChatMessage, any>({\n store: new QuickLRU<string, types.ChatMessage>({ maxSize: 10000 })\n })\n }\n\n if (!this._apiKey) {\n throw new Error(`${this._manufacturer} missing required apiKey`)\n }\n\n if (!this._fetch) {\n throw new Error('Invalid environment; fetch is not defined')\n }\n\n if (typeof this._fetch !== 'function') {\n throw new Error('Invalid \"fetch\" is not a function')\n }\n }\n\n /**\n * Sends a message to the OpenAI chat completions endpoint, waits for the response\n * to resolve, and returns the response.\n *\n * If you want your response to have historical context, you must provide a valid `parentMessageId`.\n *\n * If you want to receive a stream of partial responses, use `opts.onProgress`.\n *\n * Set `debug: true` in the `ChatGPTAPI` constructor to log more info on the full prompt sent to the OpenAI chat completions API. You can override the `systemMessage` in `opts` to customize the assistant's instructions.\n *\n * @param message - The prompt message to send\n * @param opts.parentMessageId - Optional ID of the previous message in the conversation (defaults to `undefined`)\n * @param opts.conversationId - Optional ID of the conversation (defaults to `undefined`)\n * @param opts.messageId - Optional ID of the message to send (defaults to a random UUID)\n * @param opts.systemMessage - Optional override for the chat \"system message\" which acts as instructions to the model (defaults to the ChatGPT system message)\n * @param opts.timeoutMs - Optional timeout in milliseconds (defaults to no timeout)\n * @param opts.onProgress - Optional callback which will be invoked every time the partial response is updated\n * @param opts.abortSignal - Optional callback used to abort the underlying `fetch` call using an [AbortController](https://developer.mozilla.org/en-US/docs/Web/API/AbortController)\n * @param completionParams - Optional overrides to send to the [OpenAI chat completion API](https://platform.openai.com/docs/api-reference/chat/create). Options like `temperature` and `presence_penalty` can be tweaked to change the personality of the assistant.\n *\n * @returns The response from ChatGPT\n */\n async sendMessage(\n text: string,\n opts: types.SendMessageOptions = {}\n ): Promise<types.ChatMessage> {\n const {\n parentMessageId,\n messageId = uuidv4(),\n timeoutMs,\n onProgress,\n stream = onProgress ? true : false,\n completionParams,\n conversationId\n } = opts\n\n let { abortSignal } = opts\n\n let abortController: AbortController = null\n if (timeoutMs && !abortSignal) {\n abortController = new AbortController()\n abortSignal = abortController.signal\n }\n\n const message: types.ChatMessage = {\n role: 'user',\n id: messageId,\n conversationId,\n parentMessageId,\n text\n }\n\n const latestQuestion = message\n\n const { messages, aliyunMessage, maxTokens, numTokens, errorMessage } = await this._buildMessages(\n text,\n opts\n )\n\n if(this._debug) console.log(`typeof errorMessage ${typeof errorMessage},numTokens:${numTokens}, _maxModelTokens:${this._maxModelTokens},errorMessage: ${errorMessage}`)\n\n // 发起请求之前的校验错误拦截\n if (errorMessage !== '' && errorMessage !== null && errorMessage.length > 0) {\n return new Promise((resolve, reject) => {\n const errMsg = {\n manufacturer: this._manufacturer,\n statusCode: 400,\n message: errorMessage\n }\n return reject(errMsg)\n })\n }\n\n const result: types.ChatMessage = {\n role: 'assistant',\n id: uuidv4(),\n conversationId,\n parentMessageId: messageId,\n text: '',\n result:'',\n detail: {usage: null}\n }\n\n const responseP = new Promise<types.ChatMessage>(\n async (resolve, reject) => {\n let url = `${this._apiBaseUrl}/chat/completions`\n const headers = {\n 'Content-Type': 'application/json',\n Authorization: `Bearer ${this._apiKey}`\n }\n if (this._manufacturer.toLowerCase() === 'baidu') {\n url = this._apiBaseUrl\n delete headers.Authorization\n } else if (this._manufacturer.toLowerCase() === 'aliyun') {\n url = this._apiBaseUrl\n if (stream) headers['X-DashScope-SSE'] = 'enable'\n } else if (this._manufacturer.toLowerCase() === 'zhipu') {\n url = this._apiBaseUrl\n headers['Authorization'] = this._apiKey\n if (stream) headers['accept'] = 'text/event-stream'\n } else if (this._manufacturer.toLowerCase() === 'azure') {\n url = this._apiBaseUrl\n headers['api-key'] = this._apiKey\n delete headers.Authorization\n } else if (this._manufacturer.toLowerCase() === 'xunfei') {\n url = ''\n delete headers.Authorization\n }\n\n let body = {\n max_tokens: maxTokens,\n ...this._completionParams,\n ...completionParams,\n messages,\n stream\n }\n if (this._manufacturer.toLowerCase() === 'aliyun') {\n delete body.messages;\n body = Object.assign(body, {input: aliyunMessage})\n } else if (this._manufacturer.toLowerCase() === 'zhipu') {\n delete body.messages;\n body = Object.assign(body, {prompt: messages})\n }\n\n // Support multiple organizations\n // See https://platform.openai.com/docs/api-reference/authentication\n if (this._apiOrg && this._manufacturer.toLowerCase() === 'openai') {\n headers['OpenAI-Organization'] = this._apiOrg\n }\n\n if (this._debug) {\n console.log(`api url (${url}`)\n console.log(`api header (${JSON.stringify(headers)}`)\n console.log(`sendMessage (${numTokens} tokens) body: `, body)\n console.log(`sendMessage (${numTokens} tokens) message : `, aliyunMessage || messages )\n }\n\n if (this._manufacturer.toLowerCase() === 'xunfei') {\n const self = this;\n const keyList = this._apiKey.split('.'); // appId, apiKey, apiSecret\n const options = {\n secret: keyList[2], key: keyList[1], appid: keyList[0],temperature: body?.temperature, useHistory: !!parentMessageId,chatId: ''\n };\n if (message?.conversationId) options.chatId = message?.conversationId\n if (self._debug) console.log('spark options ', options)\n const sparkIns = new Spark(options)\n const url = sparkIns.chat({\n content: messages && JSON.stringify(messages),\n // onData 表示分段拿到返回结果\n onData({content, start, end, seq}){\n // content 表示分段的内容\n // start 表示是否是第一段\n // end 表示是否是最后一段\n // seq 表示序号\n if (self._debug) console.log('onData',content, start, end, seq);\n result.id = `xunfei-${Math.floor(Math.random() * 10000000000000)}`\n result.delta = content\n if (content) result.text += content\n result.role = 'assistant'\n\n onProgress?.(result)\n },\n onEnd({content, tokens, questionTokens}){\n // content 表示完整的返回\n // token 表示返回回答的token数\n // questionTokens 表示发起对话的token数\n if (self._debug) console.log('onEnd',content, tokens, questionTokens);\n result.detail = {usage: {prompt_tokens: questionTokens, completion_tokens: tokens, total_tokens: questionTokens + tokens}}\n result.text = content.trim()\n return resolve(result)\n }\n });\n } else {\n if (stream) {\n fetchSSE(\n url,\n {\n method: 'POST',\n headers,\n body: JSON.stringify(body),\n signal: abortSignal,\n onMessage: (data: string) => {\n if (this._debug) {\n // console.log(typeof data, 'row data: ',data)\n }\n if (data === '[DONE]') {\n result.text = result.text.trim()\n return resolve(result)\n }\n\n try {\n const response: types.openai.CreateChatCompletionDeltaResponse =\n JSON.parse(data)\n\n if (this._manufacturer.toLowerCase() === 'baidu') {\n if (response?.is_end === true) {\n result.text += response.result.trim()\n result.detail = response\n return resolve(result)\n }\n } else if (this._manufacturer.toLowerCase() === 'azure') {\n if (response.choices[0]?.finish_reason === 'stop') {\n result.text = result.text.trim()\n return resolve(result)\n }\n } else if (this._manufacturer.toLowerCase() === 'aliyun') {\n if (response?.output?.finish_reason === 'stop') {\n result.text = response?.output.text.trim()\n return resolve(result)\n }\n } else if (this._manufacturer.toLowerCase() === 'zhipu') {\n if (response?.event === 'finish') {\n result.text += response?.data.trim()\n return resolve(result)\n }\n }\n\n if (this._manufacturer.toLowerCase() === 'aliyun') {\n if (response?.request_id) {\n result.id = response.request_id\n }\n } else {\n if (response?.id) {\n result.id = response.id\n }\n }\n\n if (response.choices?.length && ['openai','azure'].indexOf(this._manufacturer.toLowerCase()) > -1) {\n const delta = response.choices[0].delta\n result.delta = delta.content\n if (delta?.content) result.text += delta.content\n\n if (delta.role) {\n result.role = delta.role\n }\n\n result.detail = response\n onProgress?.(result)\n } else if (response?.result && this._manufacturer.toLowerCase() === 'baidu') {\n result.delta = response.result\n if (response?.result) result.text += response.result\n result.role = 'assistant'\n\n result.detail = response\n onProgress?.(result)\n } else if (response?.output && this._manufacturer.toLowerCase() === 'aliyun') {\n response.usage = Object.assign(response.usage, {prompt_tokens: response.usage?.input_tokens,completion_tokens: response.usage?.output_tokens, total_tokens: response.usage?.input_tokens + response.usage?.output_tokens})\n result.delta = ''\n if (response?.output?.text) result.text = response?.output?.text\n result.role = 'assistant'\n\n result.detail = response\n onProgress?.(result)\n } else if (response?.data && this._manufacturer.toLowerCase() === 'zhipu') {\n if (response.event === 'finish') {\n if (response?.meta?.usage) {\n response.usage = response?.meta?.usage\n } else {\n response.usage = {prompt_tokens: 1, completion_tokens: 1, total_tokens: 2}\n }\n }\n result.delta = response.data\n if (response?.data) result.text += response?.data\n result.role = 'assistant'\n\n result.detail = response\n onProgress?.(result)\n }\n } catch (err) {\n console.warn(`${this._manufacturer} stream SEE event unexpected error`, err)\n return reject(err)\n }\n }\n },\n this._fetch,\n this._manufacturer\n ).catch(reject)\n } else {\n try {\n const res = await this._fetch(url, {\n method: 'POST',\n headers,\n body: JSON.stringify(body),\n signal: abortSignal\n })\n\n if (!res.ok) {\n const reason = await res.text()\n const msg = `${this._manufacturer} error ${\n res.status || res.statusText\n }: ${reason}`\n const error = new types.ChatGPTError(msg, { cause: res })\n error.statusCode = res.status\n error.statusText = res.statusText\n return reject(error)\n }\n\n const response: types.openai.CreateChatCompletionResponse =\n await res.json()\n if (this._debug) {\n console.log(response)\n }\n\n if (this._manufacturer.toLowerCase() === 'aliyun') {\n if (response?.request_id) {\n result.id = response.request_id\n }\n } else {\n if (response?.id) {\n result.id = response.id\n }\n }\n\n if (response?.choices?.length && ['openai','azure'].indexOf(this._manufacturer.toLowerCase()) > -1) {\n const message = response.choices[0].message\n result.text = message.content\n if (message.role) {\n result.role = message.role\n }\n } else if(response?.result && this._manufacturer.toLowerCase() === 'baidu') {\n result.text = response.result\n result.role = 'assistant'\n } else if(response?.output?.text && this._manufacturer.toLowerCase() === 'aliyun') {\n result.text = response?.output?.text\n result.role = 'assistant'\n } else {\n const res = response as any\n return reject(\n new Error(\n `${this._manufacturer} error: ${\n res?.detail?.message || res?.detail?.error_msg || res?.detail || 'unknown'\n }`\n )\n )\n }\n\n result.detail = response\n\n return resolve(result)\n } catch (err) {\n return reject(err)\n }\n }\n }\n }\n ).then(async (message) => {\n if (message.detail && !message.detail.usage) {\n try {\n const promptTokens = numTokens\n let completionTokens = 0;\n if (['baidu','aliyun'].indexOf(this._manufacturer.toLowerCase()) > -1 ) {\n completionTokens = message.detail?.usage?.total_tokens;\n } else {\n completionTokens = await this._getTokenCount(message.text)\n }\n message.detail.usage = {\n prompt_tokens: promptTokens,\n completion_tokens: completionTokens,\n total_tokens: promptTokens + completionTokens,\n estimated: true\n }\n } catch (err) {\n // TODO: this should really never happen, but if it does,\n // we should handle notify the user gracefully\n }\n }\n\n return Promise.all([\n this._upsertMessage(latestQuestion),\n this._upsertMessage(message)\n ]).then(() => message)\n })\n\n if (timeoutMs) {\n if (abortController) {\n // This will be called when a timeout occurs in order for us to forcibly\n // ensure that the underlying HTTP request is aborted.\n ;(responseP as any).cancel = () => {\n abortController.abort()\n }\n }\n\n return pTimeout(responseP, {\n milliseconds: timeoutMs,\n message: `${this._manufacturer} timed out waiting for response`\n })\n } else {\n return responseP\n }\n }\n\n get manufacturer(): string {\n return this._manufacturer\n }\n\n set manufacturer(manufacturer: string) {\n this._manufacturer = manufacturer\n }\n\n get maxModelTokens(): number {\n return this._maxModelTokens\n }\n\n set maxModelTokens(maxModelTokens: number) {\n this._maxModelTokens = maxModelTokens\n }\n\n get maxResponseTokens(): number {\n return this._maxResponseTokens\n }\n\n set maxResponseTokens(maxResponseTokens: number) {\n this._maxResponseTokens = maxResponseTokens\n }\n\n get apiBaseUrl(): string {\n return this._apiBaseUrl\n }\n\n set apiBaseUrl(apiBaseUrl: string) {\n this._apiBaseUrl = apiBaseUrl\n }\n\n get apiKey(): string {\n return this._apiKey\n }\n\n set apiKey(apiKey: string) {\n this._apiKey = apiKey\n }\n\n get apiOrg(): string {\n return this._apiOrg\n }\n\n set apiOrg(apiOrg: string) {\n this._apiOrg = apiOrg\n }\n\n protected async _buildMessages(text: string, opts: types.SendMessageOptions) {\n const { systemMessage = this._systemMessage } = opts\n let { parentMessageId } = opts\n let errorMessage = ''\n\n const userLabel = USER_LABEL_DEFAULT\n const assistantLabel = ASSISTANT_LABEL_DEFAULT\n\n const maxNumTokens = this._maxModelTokens - this._maxResponseTokens\n let messages: types.openai.ChatCompletionRequestMessage[] = []\n\n if (systemMessage && ['openai','azure'].indexOf(this._manufacturer.toLowerCase()) > -1) {\n messages.push({\n role: 'system',\n content: systemMessage\n })\n }\n\n const systemMessageOffset = messages.length\n const userMessage:any = ['baidu','azure','zhipu', 'xunfei'].indexOf(this._manufacturer.toLowerCase()) > -1 ? [{role: 'user',content: text}] : [{role: 'user',content: text,name: opts.name}]\n let nextMessages = text\n ? messages.concat(userMessage)\n : messages\n let numTokens = 0\n\n do {\n const prompt = nextMessages\n .reduce((prompt, message) => {\n switch (message.role) {\n case 'system':\n return prompt.concat([`Instructions:\\n${message.content}`])\n case 'user':\n return prompt.concat([`${userLabel}:\\n${message.content}`])\n default:\n return prompt.concat([`${assistantLabel}:\\n${message.content}`])\n }\n }, [] as string[])\n .join('\\n\\n')\n\n const nextNumTokensEstimate = await this._getTokenCount(prompt)\n const isValidPrompt = nextNumTokensEstimate <= maxNumTokens\n\n if (prompt && !isValidPrompt && ['openai','azure'].indexOf(this._manufacturer.toLowerCase()) > -1) {\n break\n }\n\n messages = nextMessages\n numTokens = nextNumTokensEstimate\n\n if (!isValidPrompt && ['openai','azure'].indexOf(this._manufacturer.toLowerCase()) > -1) {\n break\n }\n\n if (!parentMessageId) {\n break\n }\n\n const parentMessage = await this._getMessageById(parentMessageId)\n if (!parentMessage) {\n break\n }\n\n const parentMessageRole = parentMessage.role || 'user'\n const parentMessageItem:any = ['baidu','azure', 'zhipu', 'xunfei'].indexOf(this._manufacturer.toLowerCase()) > -1 ? {\n role: parentMessageRole,\n content: parentMessage.text\n } : {\n role: parentMessageRole,\n content: parentMessage.text,\n name: parentMessage.name\n }\n\n nextMessages = nextMessages.slice(0, systemMessageOffset).concat([\n parentMessageItem,\n ...nextMessages.slice(systemMessageOffset)\n ])\n\n parentMessageId = parentMessage.parentMessageId\n } while (true)\n\n // Use up to 4096 tokens (prompt + response), but try to leave 1000 tokens\n // for the response.\n let maxTokens = this._maxModelTokens // 只有openai的接口需要,其他随便设一个值\n if (['openai','azure'].indexOf(this._manufacturer.toLowerCase()) > -1) {\n maxTokens = Math.max(\n 1,\n Math.min(this._maxModelTokens - numTokens, this._maxResponseTokens)\n )\n } else if (numTokens > this._maxModelTokens) {\n maxTokens = this._maxModelTokens\n errorMessage = `${this._manufacturer}:当前提问上下文内容长度${numTokens}tokns超长,该模型最大提问长度为${this._maxModelTokens}tokens,请切换其他增强AI模型或减少字数或者关闭上下文历史提高单次提问长度!`\n }\n\n let aliyunMessage:any;\n if (this._manufacturer.toLowerCase() === 'aliyun') {\n aliyunMessage = {prompt: '', history: []}\n let onceMessage = {user: '', bot: ''}\n messages.forEach((item, index) => {\n if (index < messages.length -1 && index > 1) {\n if (!onceMessage.user || !onceMessage.bot) {\n if (item.role === 'user') onceMessage.user = item.content\n if (item.role === 'assistant') onceMessage.bot = item.content\n }\n if (onceMessage.user && onceMessage.bot) {\n aliyunMessage.history.push(onceMessage)\n onceMessage = {user: '', bot: ''}\n }\n } else {\n aliyunMessage.prompt = item.content\n }\n })\n }\n\n return { messages,aliyunMessage, maxTokens, numTokens, errorMessage }\n }\n\n protected async _getTokenCount(text: string) {\n // TODO: use a better fix in the tokenizer\n text = text.replace(/<\\|endoftext\\|>/g, '')\n\n return tokenizer.encode(text).length\n }\n\n protected async _defaultGetMessageById(\n id: string\n ): Promise<types.ChatMessage> {\n const res = await this._messageStore.get(id)\n return res\n }\n\n protected async _defaultUpsertMessage(\n message: types.ChatMessage\n ): Promise<void> {\n await this._messageStore.set(message.id, message)\n }\n}\n","import { getEncoding } from 'js-tiktoken'\n\n// TODO: make this configurable\nconst tokenizer = getEncoding('cl100k_base')\n\nexport function encode(input: string): Uint32Array {\n return new Uint32Array(tokenizer.encode(input))\n}\n","import Keyv from 'keyv'\n\nexport type Role = 'user' | 'assistant' | 'system'\n\nexport type FetchFn = typeof fetch\n\nexport type ChatGPTAPIOptions = {\n /** 模型厂商公司 **/\n manufacturer: string\n\n apiKey: string\n\n /** @defaultValue `'https://api.openai.com'` **/\n apiBaseUrl?: string\n\n apiOrg?: string\n\n /** @defaultValue `false` **/\n debug?: boolean\n\n completionParams?: Partial<\n Omit<openai.CreateChatCompletionRequest, 'messages' | 'n' | 'stream'>\n >\n\n systemMessage?: string\n\n /** @defaultValue `4096` **/\n maxModelTokens?: number\n\n /** @defaultValue `1000` **/\n maxResponseTokens?: number\n\n messageStore?: Keyv\n getMessageById?: GetMessageByIdFunction\n upsertMessage?: UpsertMessageFunction\n\n fetch?: FetchFn\n}\n\nexport type SendMessageOptions = {\n /** The name of a user in a multi-user chat. */\n name?: string\n parentMessageId?: string\n conversationId?: string\n messageId?: string\n stream?: boolean\n systemMessage?: string\n timeoutMs?: number\n onProgress?: (partialResponse: ChatMessage) => void\n abortSignal?: AbortSignal\n completionParams?: Partial<\n Omit<openai.CreateChatCompletionRequest, 'messages' | 'n' | 'stream'>\n >\n}\n\nexport type MessageActionType = 'next' | 'variant'\n\nexport type SendMessageBrowserOptions = {\n conversationId?: string\n parentMessageId?: string\n messageId?: string\n action?: MessageActionType\n timeoutMs?: number\n onProgress?: (partialResponse: ChatMessage) => void\n abortSignal?: AbortSignal\n}\n\nexport interface ChatMessage {\n id?: string\n text?: string\n role?: Role\n name?: string\n delta?: string\n detail?:\n | openai.CreateChatCompletionResponse\n | CreateChatCompletionStreamResponse\n\n // relevant for both ChatGPTAPI and ChatGPTUnofficialProxyAPI\n parentMessageId?: string\n\n // only relevant for ChatGPTUnofficialProxyAPI (optional for ChatGPTAPI)\n conversationId?: string\n object?: string\n created?: number\n sentence_id?: number\n is_end?: boolean\n is_truncated?: boolean\n result?: string\n need_clear_history?: boolean\n usage?: object\n}\n\nexport class ChatGPTError extends Error {\n statusCode?: number\n statusText?: string\n isFinal?: boolean\n accountId?: string\n}\n\n/** Returns a chat message from a store by it's ID (or null if not found). */\nexport type GetMessageByIdFunction = (id: string) => Promise<ChatMessage>\n\n/** Upserts a chat message to a store. */\nexport type UpsertMessageFunction = (message: ChatMessage) => Promise<void>\n\nexport interface CreateChatCompletionStreamResponse\n extends openai.CreateChatCompletionDeltaResponse {\n usage: CreateCompletionStreamResponseUsage\n}\n\nexport interface CreateCompletionStreamResponseUsage\n extends openai.CreateCompletionResponseUsage {\n estimated: true\n}\n\n/**\n * https://chat.openapi.com/backend-api/conversation\n */\nexport type ConversationJSONBody = {\n /**\n * The action to take\n */\n action: string\n\n /**\n * The ID of the conversation\n */\n conversation_id?: string\n\n /**\n * Prompts to provide\n */\n messages: Prompt[]\n\n /**\n * The model to use\n */\n model: string\n\n /**\n * The parent message ID\n */\n parent_message_id: string\n}\n\nexport type Prompt = {\n /**\n * The content of the prompt\n */\n content: PromptContent\n\n /**\n * The ID of the prompt\n */\n id: string\n\n /**\n * The role played in the prompt\n */\n role: Role\n}\n\nexport type ContentType = 'text'\n\nexport type PromptContent = {\n /**\n * The content type of the prompt\n */\n content_type: ContentType\n\n /**\n * The parts to the prompt\n */\n parts: string[]\n}\n\nexport type ConversationResponseEvent = {\n message?: Message\n conversation_id?: string\n error?: string | null\n}\n\nexport type Message = {\n id: string\n content: MessageContent\n role: Role\n user: string | null\n create_time: string | null\n update_time: string | null\n end_turn: null\n weight: number\n recipient: string\n metadata: MessageMetadata\n}\n\nexport type MessageContent = {\n content_type: string\n parts: string[]\n}\n\nexport type MessageMetadata = any\n\nexport namespace openai {\n export interface CreateChatCompletionDeltaResponse {\n id: string\n object: 'chat.completion.chunk'\n created: number\n model: string\n choices: [\n {\n delta: {\n role: Role\n content?: string\n }\n index: number\n finish_reason: string | null\n }\n ]\n sentence_id?: number\n is_end?: boolean\n is_truncated?: boolean\n result?: string\n need_clear_history?: boolean\n usage?: any\n output?: any\n request_id?: string\n event?: string\n data?: string\n meta?: any\n type?: string\n }\n\n /**\n *\n * @export\n * @interface ChatCompletionRequestMessage\n */\n export interface ChatCompletionRequestMessage {\n /**\n * The role of the author of this message.\n * @type {string}\n * @memberof ChatCompletionRequestMessage\n */\n role?: ChatCompletionRequestMessageRoleEnum\n /**\n * The contents of the message\n * @type {string}\n * @memberof ChatCompletionRequestMessage\n */\n content?: string\n /**\n * The name of the user in a multi-user chat\n * @type {string}\n * @memberof ChatCompletionRequestMessage\n */\n name?: string\n\n /**\n * 阿里云的参数 input = {prompt: '哪个公园距离我更近', history: [\n *\n * {\n *\n * \"user\":\"今天天气好吗?\",\n *\n * \"bot\":\"今天天气不错,要出去玩玩嘛?\"\n *\n * },\n *\n * {\n *\n * \"user\":\"那你有什么地方推荐?\",\n *\n * \"bot\":\"我建议你去公园,春天来了,花朵开了,很美丽。\"\n *\n * }\n *\n * ]}\n */\n input?: object\n }\n export declare const ChatCompletionRequestMessageRoleEnum: {\n readonly System: 'system'\n readonly User: 'user'\n readonly Assistant: 'assistant'\n }\n export declare type ChatCompletionRequestMessageRoleEnum =\n (typeof ChatCompletionRequestMessageRoleEnum)[keyof typeof ChatCompletionRequestMessageRoleEnum]\n /**\n *\n * @export\n * @interface ChatCompletionResponseMessage\n */\n export interface ChatCompletionResponseMessage {\n /**\n * The role of the author of this message.\n * @type {string}\n * @memberof ChatCompletionResponseMessage\n */\n role: ChatCompletionResponseMessageRoleEnum\n /**\n * The contents of the message\n * @type {string}\n * @memberof ChatCompletionResponseMessage\n */\n content: string\n }\n export declare const ChatCompletionResponseMessageRoleEnum: {\n readonly System: 'system'\n readonly User: 'user'\n readonly Assistant: 'assistant'\n }\n export declare type ChatCompletionResponseMessageRoleEnum =\n (typeof ChatCompletionResponseMessageRoleEnum)[keyof typeof ChatCompletionResponseMessageRoleEnum]\n /**\n *\n * @export\n * @interface CreateChatCompletionRequest\n */\n export interface CreateChatCompletionRequest {\n /**\n * ID of the model to use. Currently, only `gpt-3.5-turbo` and `gpt-3.5-turbo-0301` are supported.\n * @type {string}\n * @memberof CreateChatCompletionRequest\n */\n model: string\n /**\n * The messages to generate chat completions for, in the [chat format](/docs/guides/chat/introduction).\n * @type {Array<ChatCompletionRequestMessage>}\n * @memberof CreateChatCompletionRequest\n */\n messages: Array<ChatCompletionRequestMessage>\n /**\n * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.\n * @type {number}\n * @memberof CreateChatCompletionRequest\n */\n temperature?: number | null\n /**\n * An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.\n * @type {number}\n * @memberof CreateChatCompletionRequest\n */\n top_p?: number | null\n /**\n * How many chat completion choices to generate for each input message.\n * @type {number}\n * @memberof CreateChatCompletionRequest\n */\n n?: number | null\n /**\n * If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message.\n * @type {boolean}\n * @memberof CreateChatCompletionRequest\n */\n stream?: boolean | null\n /**\n *\n * @type {CreateChatCompletionRequestStop}\n * @memberof CreateChatCompletionRequest\n */\n stop?: CreateChatCompletionRequestStop\n /**\n * The maximum number of tokens allowed for the generated answer. By default, the number of tokens the model can return will be (4096 - prompt tokens).\n * @type {number}\n * @memberof CreateChatCompletionRequest\n */\n max_tokens?: number\n /**\n * Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model\\'s likelihood to talk about new topics. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)\n * @type {number}\n * @memberof CreateChatCompletionRequest\n */\n presence_penalty?: number | null\n /**\n * Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model\\'s likelihood to repeat the same line verbatim. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)\n * @type {number}\n * @memberof CreateChatCompletionRequest\n */\n frequency_penalty?: number | null\n /**\n * Modify the likelihood of specified tokens appearing in the completion. Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.\n * @type {object}\n * @memberof CreateChatCompletionRequest\n */\n logit_bias?: object | null\n /**\n * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).\n * @type {string}\n * @memberof CreateChatCompletionRequest\n */\n user?: string\n }\n /**\n * @type CreateChatCompletionRequestStop\n * Up to 4 sequences where the API will stop generating further tokens.\n * @export\n */\n export declare type CreateChatCompletionRequestStop = Array<string> | string\n /**\n *\n * @export\n * @interface CreateChatCompletionResponse\n */\n export interface CreateChatCompletionResponse {\n /**\n *\n * @type {string}\n * @memberof CreateChatCompletionResponse\n */\n id?: string\n /**\n *\n * @type {string}\n * @memberof CreateChatCompletionResponse\n */\n object?: string\n /**\n *\n * @type {number}\n * @memberof CreateChatCompletionResponse\n */\n created?: number\n /**\n *\n * @type {string}\n * @memberof CreateChatCompletionResponse\n */\n model?: string\n /**\n *\n * @type {Array<CreateChatCompletionResponseChoicesInner>}\n * @memberof CreateChatCompletionResponse\n */\n choices?: Array<CreateChatCompletionResponseChoicesInner>\n /**\n *\n * @type {CreateCompletionResponseUsage}\n * @memberof CreateChatCompletionResponse\n */\n usage?: CreateCompletionResponseUsage\n result?: string\n is_truncated?: boolean\n need_clear_history?: boolean\n output?: any\n request_id?: string\n }\n /**\n *\n * @export\n * @interface CreateChatCompletionResponseChoicesInner\n */\n export interface CreateChatCompletionResponseChoicesInner {\n /**\n *\n * @type {number}\n * @memberof CreateChatCompletionResponseChoicesInner\n */\n index?: number\n /**\n *\n * @type {ChatCompletionResponseMessage}\n * @memberof CreateChatCompletionResponseChoicesInner\n */\n message?: ChatCompletionResponseMessage\n /**\n *\n * @type {string}\n * @memberof CreateChatCompletionResponseChoicesInner\n */\n finish_reason?: string\n }\n /**\n *\n * @export\n * @interface CreateCompletionResponseUsage\n */\n export interface CreateCompletionResponseUsage {\n /**\n *\n * @type {number}\n * @memberof CreateCompletionResponseUsage\n */\n prompt_tokens: number\n /**\n *\n * @type {number}\n * @memberof CreateCompletionResponseUsage\n */\n completion_tokens: number\n /**\n *\n * @type {number}\n * @memberof CreateCompletionResponseUsage\n */\n total_tokens: number\n }\n}\n","/// <reference lib=\"dom\" />\n\nconst fetch = globalThis.fetch\n\nexport { fetch }\n","import { createParser } from 'eventsource-parser'\n\nimport * as types from './types'\nimport { fetch as globalFetch } from './fetch'\nimport { streamAsyncIterable } from './stream-async-iterable'\n\nexport async function fetchSSE(\n url: string,\n options: Parameters<typeof fetch>[1] & {\n onMessage: (data: string) => void\n onError?: (error: any) => void\n },\n fetch: types.FetchFn = globalFetch,\n manufacturer: string = 'OpenAI'\n) {\n const { onMessage, onError, ...fetchOptions } = options\n const res = await fetch(url, fetchOptions)\n if (!res.ok) {\n let reason: string\n\n try {\n reason = await res.text()\n } catch (err) {\n reason = res.statusText\n }\n\n const msg = `${manufacturer} error ${res.status}: ${reason}`\n const error = new types.ChatGPTError(msg, { cause: res })\n error.statusCode = res.status\n error.statusText = res.statusText\n throw error\n }\n\n const parser = createParser((event) => {\n if (event.type === 'event') {\n if (manufacturer.toLowerCase() === 'zhipu') {\n onMessage(JSON.stringify(event))\n } else {\n onMessage(event.data)\n }\n }\n })\n\n // handle special response errors\n const feed = (chunk: string) => {\n let response = null\n\n try {\n response = JSON.parse(chunk)\n } catch {\n // ignore\n }\n\n if (response?.detail?.type === 'invalid_request_error') {\n const msg = `${manufacturer} error ${response.detail.message}: ${response.detail.code} (${response.detail.type})`\n const error = new types.ChatGPTError(msg, { cause: response })\n error.statusCode = response.detail.code\n error.statusText = response.detail.message\n\n if (onError) {\n onError(error)\n } else {\n console.error(error)\n }\n\n // don't feed to the event parser\n return\n }\n\n parser.feed(chunk)\n }\n\n if (!res.body.getReader) {\n // Vercel polyfills `fetch` with `node-fetch`, which doesn't conform to\n // web standards, so this is a workaround...\n const body: NodeJS.ReadableStream = res.body as any\n\n if (!body.on || !body.read) {\n throw new types.ChatGPTError('unsupported \"fetch\" implementation')\n }\n\n body.on('readable', () => {\n let chunk: string | Buffer\n while (null !== (chunk = body.read())) {\n feed(chunk.toString())\n }\n })\n } else {\n for await (const chunk of streamAsyncIterable(res.body)) {\n const str = new TextDecoder().decode(chunk)\n feed(str)\n }\n }\n}\n","export async function* streamAsyncIterable<T>(stream: ReadableStream<T>) {\n const reader = stream.getReader()\n try {\n while (true) {\n const { done, value } = await reader.read()\n if (done) {\n return\n }\n yield value\n }\n } finally {\n reader.releaseLock()\n }\n}\n","import pTimeout from 'p-timeout'\nimport { v4 as uuidv4 } from 'uuid'\n\nimport * as types from './types'\nimport { fetch as globalFetch } from './fetch'\nimport { fetchSSE } from './fetch-sse'\nimport { isValidUUIDv4 } from './utils'\n\nexport class ChatGPTUnofficialProxyAPI {\n protected _accessToken: string\n protected _apiReverseProxyUrl: string\n protected _debug: boolean\n protected _model: string\n protected _headers: Record<string, string>\n protected _fetch: types.FetchFn\n\n /**\n * @param fetch - Optional override for the `fetch` implementation to use. Defaults to the global `fetch` function.\n */\n constructor(opts: {\n accessToken: string\n\n /** @defaultValue `https://bypass.duti.tech/api/conversation` **/\n apiReverseProxyUrl?: string\n\n /** @defaultValue `text-davinci-002-render-sha` **/\n model?: string\n\n /** @defaultValue `false` **/\n debug?: boolean\n\n /** @defaultValue `undefined` **/\n headers?: Record<string, string>\n\n fetch?: types.FetchFn\n }) {\n const {\n accessToken,\n apiReverseProxyUrl = 'https://bypass.duti.tech/api/conversation',\n model = 'text-davinci-002-render-sha',\n debug = false,\n headers,\n fetch = globalFetch\n } = opts\n\n this._accessToken = accessToken\n this._apiReverseProxyUrl = apiReverseProxyUrl\n this._debug = !!debug\n this._model = model\n this._fetch = fetch\n this._headers = headers\n\n if (!this._accessToken) {\n throw new Error('ChatGPT invalid accessToken')\n }\n\n if (!this._fetch) {\n throw new Error('Invalid environment; fetch is not defined')\n }\n\n if (typeof this._fetch !== 'function') {\n throw new Error('Invalid \"fetch\" is not a function')\n }\n }\n\n get accessToken(): string {\n return this._accessToken\n }\n\n set accessToken(value: string) {\n this._accessToken = value\n }\n\n /**\n * Sends a message to ChatGPT, waits for the response to resolve, and returns\n * the response.\n *\n * If you want your response to have historical context, you must provide a valid `parentMessageId`.\n *\n * If you want to receive a stream of partial responses, use `opts.onProgress`.\n * If you want to receive the full response, including message and conversation IDs,\n * you can use `opts.onConversationResponse` or use the `ChatGPTAPI.getConversation`\n * helper.\n *\n * Set `debug: true` in the `ChatGPTAPI` constructor to log more info on the full prompt sent to the OpenAI completions API. You can override the `promptPrefix` and `promptSuffix` in `opts` to customize the prompt.\n *\n * @param message - The prompt message to send\n * @param opts.conversationId - Optional ID of a conversation to continue (defaults to a random UUID)\n * @param opts.parentMessageId - Optional ID of the previous message in the conversation (defaults to `undefined`)\n * @param opts.messageId - Optional ID of the message to send (defaults to a random UUID)\n * @param opts.timeoutMs - Optional timeout in milliseconds (defaults to no timeout)\n * @param opts.onProgress - Optional callback which will be invoked every time the partial response is updated\n * @param opts.abortSignal - Optional callback used to abort the underlying `fetch` call using an [AbortController](https://developer.mozilla.org/en-US/docs/Web/API/AbortController)\n *\n * @returns The response from ChatGPT\n */\n async sendMessage(\n text: string,\n opts: types.SendMessageBrowserOptions = {}\n ): Promise<types.ChatMessage> {\n if (!!opts.conversationId !== !!opts.parentMessageId) {\n throw new Error(\n 'ChatGPTUnofficialProxyAPI.sendMessage: conversationId and parentMessageId must both be set or both be undefined'\n )\n }\n\n if (opts.conversationId && !isValidUUIDv4(opts.conversationId)) {\n throw new Error(\n 'ChatGPTUnofficialProxyAPI.sendMessage: conversationId is not a valid v4 UUID'\n )\n }\n\n if (opts.parentMessageId && !isValidUUIDv4(opts.parentMessageId)) {\n throw new Error(\n 'ChatGPTUnofficialProxyAPI.sendMessage: parentMessageId is not a valid v4 UUID'\n )\n }\n\n if (opts.messageId && !isValidUUIDv4(opts.messageId)) {\n throw new Error(\n 'ChatGPTUnofficialProxyAPI.sendMessage: messageId is not a valid v4 UUID'\n )\n }\n\n const {\n conversationId,\n parentMessageId = uuidv4(),\n messageId = uuidv4(),\n action = 'next',\n timeoutMs,\n onProgress\n } = opts\n\n let { abortSignal } = opts\n\n let abortController: AbortController = null\n if (timeoutMs && !abortSignal) {\n abortController = new AbortController()\n abortSignal = abortController.signal\n }\n\n const body: types.ConversationJSONBody = {\n action,\n messages: [\n {\n id: messageId,\n role: 'user',\n content: {\n content_type: 'text',\n parts: [text]\n }\n }\n ],\n model: this._model,\n parent_message_id: parentMessageId\n }\n\n if (conversationId) {\n body.conversation_id = conversationId\n }\n\n const result: types.ChatMessage = {\n role: 'assistant',\n id: uuidv4(),\n parentMessageId: messageId,\n conversationId,\n text: ''\n }\n\n const responseP = new Promise<types.ChatMessage>((resolve, reject) => {\n const url = this._apiReverseProxyUrl\n const headers = {\n ...this._headers,\n Authorization: `Bearer ${this._accessToken}`,\n Accept: 'text/event-stream',\n 'Content-Type': 'application/json'\n }\n\n if (this._debug) {\n console.log('POST', url, { body, headers })\n }\n\n fetchSSE(\n url,\n {\n method: 'POST',\n headers,\n body: JSON.stringify(body),\n signal: abortSignal,\n onMessage: (data: string) => {\n if (data === '[DONE]') {\n return resolve(result)\n }\n\n try {\n const convoResponseEvent: types.ConversationResponseEvent =\n JSON.parse(data)\n if (convoResponseEvent.conversation_id) {\n result.conversationId = convoResponseEvent.conversation_id\n }\n\n if (convoResponseEvent.message?.id) {\n result.id = convoResponseEvent.message.id\n }\n\n const message = convoResponseEvent.message\n // console.log('event', JSON.stringify(convoResponseEvent, null, 2))\n\n if (message) {\n let text = message?.content?.parts?.[0]\n\n if (text) {\n result.text = text\n\n if (onProgress) {\n onProgress(result)\n }\n }\n }\n } catch (err) {\n if (this._debug) {\n console.warn('chatgpt unexpected JSON error', err)\n }\n // reject(err)\n }\n },\n onError: (err) => {\n reject(err)\n }\n },\n this._fetch\n ).catch((err) => {\n const errMessageL = err.toString().toLowerCase()\n\n if (\n result.text &&\n (errMessageL === 'error: typeerror: terminated' ||\n errMessageL === 'typeerror: terminated')\n ) {\n // OpenAI sometimes forcefully terminates the socket from their end before\n // the HTTP request has resolved cleanly. In my testing, these cases tend to\n // happen when OpenAI has already send the last `response`, so we can ignore\n // the `fetch` error in this case.\n return resolve(result)\n } else {\n return reject(err)\n }\n })\n })\n\n if (timeoutMs) {\n if (abortController) {\n // This will be called when a timeout occurs in order for us to forcibly\n // ensure that the underlying HTTP request is aborted.\n ;(responseP as any).cancel = () => {\n abortController.abort()\n }\n }\n\n return pTimeout(responseP, {\n milliseconds: timeoutMs,\n message: 'ChatGPT timed out waiting for response'\n })\n } else {\n return responseP\n }\n }\n}\n","const uuidv4Re =\n /^[0-9a-f]{8}-[0-9a-f]{4}-[1-5][0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$/i\n\nexport function isValidUUIDv4(str: string): boolean {\n return str && uuidv4Re.test(str)\n}\n"],"mappings":";AAAA,OAAO,UAAU;AACjB,OAAO,cAAc;AACrB,OAAO,cAAc;AACrB,SAAS,MAAM,cAAc;;;ACH7B,SAAS,mBAAmB;AAG5B,IAAM,YAAY,YAAY,aAAa;AAEpC,SAAS,OAAO,OAA4B;AACjD,SAAO,IAAI,YAAY,UAAU,OAAO,KAAK,CAAC;AAChD;;;ACqFO,IAAM,eAAN,cAA2B,MAAM;AAKxC;AAyGO,IAAU;AAAA,CAAV,CAAUA,YAAV;AAAA,GAAU;;;ACxMjB,IAAM,QAAQ,WAAW;;;ACFzB,SAAS,oBAAoB;;;ACA7B,gBAAuB,oBAAuB,QAA2B;AACvE,QAAM,SAAS,OAAO,UAAU;AAChC,MAAI;AACF,WAAO,MAAM;AACX,YAAM,EAAE,MAAM,MAAM,IAAI,MAAM,OAAO,KAAK;AAC1C,UAAI,MAAM;AACR;AAAA,MACF;AACA,YAAM;AAAA,IACR;AAAA,EACF,UAAE;AACA,WAAO,YAAY;AAAA,EACrB;AACF;;;ADPA,eAAsB,SACpB,KACA,SAIAC,SAAuB,OACvB,eAAuB,UACvB;AACA,QAAM,EAAE,WAAW,SAAS,GAAG,aAAa,IAAI;AAChD,QAAM,MAAM,MAAMA,OAAM,KAAK,YAAY;AACzC,MAAI,CAAC,IAAI,IAAI;AACX,QAAI;AAEJ,QAAI;AACF,eAAS,MAAM,IAAI,KAAK;AAAA,IAC1B,SAAS,KAAP;AACA,eAAS,IAAI;AAAA,IACf;AAEA,UAAM,MAAM,GAAG,sBAAsB,IAAI,WAAW;AACpD,UAAM,QAAQ,IAAU,aAAa,KAAK,EAAE,OAAO,IAAI,CAAC;AACxD,UAAM,aAAa,IAAI;AACvB,UAAM,aAAa,IAAI;AACvB,UAAM;AAAA,EACR;AAEA,QAAM,SAAS,aAAa,CAAC,UAAU;AACrC,QAAI,MAAM,SAAS,SAAS;AAC1B,UAAI,aAAa,YAAY,MAAM,SAAS;AAC1C,kBAAU,KAAK,UAAU,KAAK,CAAC;AAAA,MACjC,OAAO;AACL,kBAAU,MAAM,IAAI;AAAA,MACtB;AAAA,IACF;AAAA,EACF,CAAC;AAGD,QAAM,OAAO,CAAC,UAAkB;AA5ClC;AA6CI,QAAI,WAAW;AAEf,QAAI;AACF,iBAAW,KAAK,MAAM,KAAK;AAAA,IAC7B,QAAE;AAAA,IAEF;AAEA,UAAI,0CAAU,WAAV,mBAAkB,UAAS,yBAAyB;AACtD,YAAM,MAAM,GAAG,sBAAsB,SAAS,OAAO,YAAY,SAAS,OAAO,SAAS,SAAS,OAAO;AAC1G,YAAM,QAAQ,IAAU,aAAa,KAAK,EAAE,OAAO,SAAS,CAAC;AAC7D,YAAM,aAAa,SAAS,OAAO;AACnC,YAAM,aAAa,SAAS,OAAO;AAEnC,UAAI,SAAS;AACX,gBAAQ,KAAK;AAAA,MACf,OAAO;AACL,gBAAQ,MAAM,KAAK;AAAA,MACrB;AAGA;AAAA,IACF;AAEA,WAAO,KAAK,KAAK;AAAA,EACnB;AAEA,MAAI,CAAC,IAAI,KAAK,WAAW;AAGvB,UAAM,OAA8B,IAAI;AAExC,QAAI,CAAC,KAAK,MAAM,CAAC,KAAK,MAAM;AAC1B,YAAM,IAAU,aAAa,oCAAoC;AAAA,IACnE;AAEA,SAAK,GAAG,YAAY,MAAM;AACxB,UAAI;AACJ,aAAO,UAAU,QAAQ,KAAK,KAAK,IAAI;AACrC,aAAK,MAAM,SAAS,CAAC;AAAA,MACvB;AAAA,IACF,CAAC;AAAA,EACH,OAAO;AACL,qBAAiB,SAAS,oBAAoB,IAAI,IAAI,GAAG;AACvD,YAAM,MAAM,IAAI,YAAY,EAAE,OAAO,KAAK;AAC1C,WAAK,GAAG;AAAA,IACV;AAAA,EACF;AACF;;;AJpFA,SAAQ,aAAY;AAEpB,IAAM,gBAAgB;AAEtB,IAAM,qBAAqB;AAC3B,IAAM,0BAA0B;AAEzB,IAAM,aAAN,MAAiB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAoCtB,YAAY,MAA+B;AACzC,UAAM;AAAA,MACJ,eAAe;AAAA,MACf;AAAA,MACA;AAAA,MACA,aAAa;AAAA,MACb,QAAQ;AAAA,MACR;AAAA,MACA;AAAA,MACA;AAAA,MACA,iBAAiB;AAAA,MACjB,oBAAoB;AAAA,MACpB;AAAA,MACA;AAAA,MACA,OAAAC,SAAQ;AAAA,IACV,IAAI;AAEJ,SAAK,gBAAgB;AACrB,SAAK,UAAU;AACf,SAAK,UAAU;AACf,SAAK,cAAc;AACnB,SAAK,SAAS,CAAC,CAAC;AAChB,SAAK,SAASA;AAEd,SAAK,oBAAoB;AAAA,MACvB,OAAO;AAAA,MACP,aAAc,CAAC,SAAQ,OAAO,EAAE,QAAQ,aAAa,YAAY,CAAC,IAAI,KAAK,OAAQ,CAAC,QAAQ,EAAE,QAAQ,aAAa,YAAY,CAAC,IAAI,KAAK,MAAK;AAAA,MAC9I,GAAG;AAAA,IACL;AAEA,SAAK,iBAAiB;AAEtB,QAAI,KAAK,mBAAmB,QAAW;AACrC,YAAM,eAAc,oBAAI,KAAK,GAAE,YAAY,EAAE,MAAM,GAAG,EAAE,CAAC;AACzD,WAAK,iBAAiB,sDAAsD,KAAK;AAAA;AAAA,gBAAgG;AAAA,IACnL;AAEA,SAAK,kBAAkB;AACvB,SAAK,qBAAqB;AAE1B,SAAK,kBAAkB,kBAAkB,KAAK;AAC9C,SAAK,iBAAiB,iBAAiB,KAAK;AAE5C,QAAI,cAAc;AAChB,WAAK,gBAAgB;AAAA,IACvB,OAAO;AACL,WAAK,gBAAgB,IAAI,KAA6B;AAAA,QACpD,OAAO,IAAI,SAAoC,EAAE,SAAS,IAAM,CAAC;AAAA,MACnE,CAAC;AAAA,IACH;AAEA,QAAI,CAAC,KAAK,SAAS;AACjB,YAAM,IAAI,MAAM,GAAG,KAAK,uCAAuC;AAAA,IACjE;AAEA,QAAI,CAAC,KAAK,QAAQ;AAChB,YAAM,IAAI,MAAM,2CAA2C;AAAA,IAC7D;AAEA,QAAI,OAAO,KAAK,WAAW,YAAY;AACrC,YAAM,IAAI,MAAM,mCAAmC;AAAA,IACrD;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAwBA,MAAM,YACJ,MACA,OAAiC,CAAC,GACN;AAC5B,UAAM;AAAA,MACJ;AAAA,MACA,YAAY,OAAO;AAAA,MACnB;AAAA,MACA;AAAA,MACA,SAAS,aAAa,OAAO;AAAA,MAC7B;AAAA,MACA;AAAA,IACF,IAAI;AAEJ,QAAI,EAAE,YAAY,IAAI;AAEtB,QAAI,kBAAmC;AACvC,QAAI,aAAa,CAAC,aAAa;AAC7B,wBAAkB,IAAI,gBAAgB;AACtC,oBAAc,gBAAgB;AAAA,IAChC;AAEA,UAAM,UAA6B;AAAA,MACjC,MAAM;AAAA,MACN,IAAI;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAEA,UAAM,iBAAiB;AAEvB,UAAM,EAAE,UAAU,eAAe,WAAW,WAAW,aAAa,IAAI,MAAM,KAAK;AAAA,MACjF;AAAA,MACA;AAAA,IACF;AAEA,QAAG,KAAK;AAAQ,cAAQ,IAAI,uBAAuB,OAAO,0BAA0B,8BAA8B,KAAK,iCAAiC,cAAc;AAGtK,QAAI,iBAAiB,MAAM,iBAAiB,QAAQ,aAAa,SAAS,GAAG;AAC3E,aAAO,IAAI,QAAQ,CAAC,SAAS,WAAW;AACtC,cAAM,SAAS;AAAA,UACb,cAAc,KAAK;AAAA,UACnB,YAAY;AAAA,UACZ,SAAS;AAAA,QACX;AACA,eAAO,OAAO,MAAM;AAAA,MACtB,CAAC;AAAA,IACH;AAEA,UAAM,SAA4B;AAAA,MAChC,MAAM;AAAA,MACN,IAAI,OAAO;AAAA,MACX;AAAA,MACA,iBAAiB;AAAA,MACjB,MAAM;AAAA,MACN,QAAO;AAAA,MACP,QAAQ,EAAC,OAAO,KAAI;AAAA,IACtB;AAEA,UAAM,YAAY,IAAI;AAAA,MACpB,OAAO,SAAS,WAAW;AAxMjC;AAyMQ,YAAI,MAAM,GAAG,KAAK;AAClB,cAAM,UAAU;AAAA,UACd,gBAAgB;AAAA,UAChB,eAAe,UAAU,KAAK;AAAA,QAChC;AACA,YAAI,KAAK,cAAc,YAAY,MAAM,SAAS;AAChD,gBAAM,KAAK;AACX,iBAAO,QAAQ;AAAA,QACjB,WAAW,KAAK,cAAc,YAAY,MAAM,UAAU;AACxD,gBAAM,KAAK;AACX,cAAI;AAAQ,oBAAQ,iBAAiB,IAAI;AAAA,QAC3C,WAAW,KAAK,cAAc,YAAY,MAAM,SAAS;AACvD,gBAAM,KAAK;AACX,kBAAQ,eAAe,IAAI,KAAK;AAChC,cAAI;AAAQ,oBAAQ,QAAQ,IAAI;AAAA,QAClC,WAAW,KAAK,cAAc,YAAY,MAAM,SAAS;AACvD,gBAAM,KAAK;AACX,kBAAQ,SAAS,IAAI,KAAK;AAC1B,iBAAO,QAAQ;AAAA,QACjB,WAAW,KAAK,cAAc,YAAY,MAAM,UAAU;AACxD,gBAAM;AACN,iBAAO,QAAQ;AAAA,QACjB;AAEA,YAAI,OAAO;AAAA,UACT,YAAY;AAAA,UACZ,GAAG,KAAK;AAAA,UACR,GAAG;AAAA,UACH;AAAA,UACA;AAAA,QACF;AACA,YAAI,KAAK,cAAc,YAAY,MAAM,UAAU;AACjD,iBAAO,KAAK;AACZ,iBAAO,OAAO,OAAO,MAAM,EAAC,OAAO,cAAa,CAAC;AAAA,QACnD,WAAW,KAAK,cAAc,YAAY,MAAM,SAAS;AACvD,iBAAO,KAAK;AACZ,iBAAO,OAAO,OAAO,MAAM,EAAC,QAAQ,SAAQ,CAAC;AAAA,QAC/C;AAIA,YAAI,KAAK,WAAW,KAAK,cAAc,YAAY,MAAM,UAAU;AACjE,kBAAQ,qBAAqB,IAAI,KAAK;AAAA,QACxC;AAEA,YAAI,KAAK,QAAQ;AACf,kBAAQ,IAAI,YAAY,KAAK;AAC7B,kBAAQ,IAAI,eAAe,KAAK,UAAU,OAAO,GAAG;AACpD,kBAAQ,IAAI,gBAAgB,4BAA4B,IAAI;AAC5D,kBAAQ,IAAI,gBAAgB,gCAAgC,iBAAiB,QAAS;AAAA,QACxF;AAEA,YAAI,KAAK,cAAc,YAAY,MAAM,UAAU;AACjD,gBAAM,OAAO;AACb,gBAAM,UAAU,KAAK,QAAQ,MAAM,GAAG;AACtC,gBAAM,UAAU;AAAA,YACd,QAAQ,QAAQ,CAAC;AAAA,YAAG,KAAK,QAAQ,CAAC;AAAA,YAAG,OAAO,QAAQ,CAAC;AAAA,YAAE,aAAa,6BAAM;AAAA,YAAa,YAAY,CAAC,CAAC;AAAA,YAAgB,QAAQ;AAAA,UAC/H;AACA,cAAI,mCAAS;AAAgB,oBAAQ,SAAS,mCAAS;AACvD,cAAI,KAAK;AAAQ,oBAAQ,IAAI,kBAAkB,OAAO;AACtD,gBAAM,WAAW,IAAI,MAAM,OAAO;AAClC,gBAAMC,OAAM,SAAS,KAAK;AAAA,YACxB,SAAS,YAAY,KAAK,UAAU,QAAQ;AAAA;AAAA,YAE5C,OAAO,EAAC,SAAS,OAAO,KAAK,IAAG,GAAE;AAKhC,kBAAI,KAAK;AAAQ,wBAAQ,IAAI,UAAS,SAAS,OAAO,KAAK,GAAG;AAC9D,qBAAO,KAAK,UAAU,KAAK,MAAM,KAAK,OAAO,IAAI,IAAc;AAC/D,qBAAO,QAAQ;AACf,kBAAI;AAAS,uBAAO,QAAQ;AAC5B,qBAAO,OAAO;AAEd,uDAAa;AAAA,YACf;AAAA,YACA,MAAM,EAAC,SAAS,QAAQ,eAAc,GAAE;AAItC,kBAAI,KAAK;AAAQ,wBAAQ,IAAI,SAAQ,SAAS,QAAQ,cAAc;AACpE,qBAAO,SAAS,EAAC,OAAO,EAAC,eAAe,gBAAgB,mBAAmB,QAAQ,cAAc,iBAAiB,OAAM,EAAC;AACzH,qBAAO,OAAO,QAAQ,KAAK;AAC3B,qBAAO,QAAQ,MAAM;AAAA,YACvB;AAAA,UACF,CAAC;AAAA,QACH,OAAO;AACL,cAAI,QAAQ;AACV;AAAA,cACE;AAAA,cACA;AAAA,gBACE,QAAQ;AAAA,gBACR;AAAA,gBACA,MAAM,KAAK,UAAU,IAAI;AAAA,gBACzB,QAAQ;AAAA,gBACR,WAAW,CAAC,SAAiB;AAzS7C,sBAAAC,KAAAC,KAAAC,KAAAC,KAAAC,KAAA;AA0SkB,sBAAI,KAAK,QAAQ;AAAA,kBAEjB;AACA,sBAAI,SAAS,UAAU;AACrB,2BAAO,OAAO,OAAO,KAAK,KAAK;AAC/B,2BAAO,QAAQ,MAAM;AAAA,kBACvB;AAEA,sBAAI;AACF,0BAAM,WACJ,KAAK,MAAM,IAAI;AAEjB,wBAAI,KAAK,cAAc,YAAY,MAAM,SAAS;AAChD,2BAAI,qCAAU,YAAW,MAAM;AAC7B,+BAAO,QAAQ,SAAS,OAAO,KAAK;AACpC,+BAAO,SAAS;AAChB,+BAAO,QAAQ,MAAM;AAAA,sBACvB;AAAA,oBACF,WAAW,KAAK,cAAc,YAAY,MAAM,SAAS;AACvD,4BAAIJ,MAAA,SAAS,QAAQ,CAAC,MAAlB,gBAAAA,IAAqB,mBAAkB,QAAQ;AACjD,+BAAO,OAAO,OAAO,KAAK,KAAK;AAC/B,+BAAO,QAAQ,MAAM;AAAA,sBACvB;AAAA,oBACF,WAAW,KAAK,cAAc,YAAY,MAAM,UAAU;AACxD,4BAAIC,MAAA,qCAAU,WAAV,gBAAAA,IAAkB,mBAAkB,QAAQ;AAC9C,+BAAO,OAAO,qCAAU,OAAO,KAAK;AACpC,+BAAO,QAAQ,MAAM;AAAA,sBACvB;AAAA,oBACF,WAAW,KAAK,cAAc,YAAY,MAAM,SAAS;AACvD,2BAAI,qCAAU,WAAU,UAAU;AAChC,+BAAO,QAAQ,qCAAU,KAAK;AAC9B,+BAAO,QAAQ,MAAM;AAAA,sBACvB;AAAA,oBACF;AAEA,wBAAI,KAAK,cAAc,YAAY,MAAM,UAAU;AACjD,0BAAI,qCAAU,YAAY;AACxB,+BAAO,KAAK,SAAS;AAAA,sBACvB;AAAA,oBACF,OAAO;AACL,0BAAI,qCAAU,IAAI;AAChB,+BAAO,KAAK,SAAS;AAAA,sBACvB;AAAA,oBACF;AAEA,0BAAIC,MAAA,SAAS,YAAT,gBAAAA,IAAkB,WAAU,CAAC,UAAS,OAAO,EAAE,QAAQ,KAAK,cAAc,YAAY,CAAC,IAAI,IAAI;AACjG,4BAAM,QAAQ,SAAS,QAAQ,CAAC,EAAE;AAClC,6BAAO,QAAQ,MAAM;AACrB,0BAAI,+BAAO;AAAS,+BAAO,QAAQ,MAAM;AAEzC,0BAAI,MAAM,MAAM;AACd,+BAAO,OAAO,MAAM;AAAA,sBACtB;AAEA,6BAAO,SAAS;AAChB,+DAAa;AAAA,oBACf,YAAW,qCAAU,WAAU,KAAK,cAAc,YAAY,MAAM,SAAS;AAC3E,6BAAO,QAAQ,SAAS;AACxB,0BAAI,qCAAU;AAAQ,+BAAO,QAAQ,SAAS;AAC9C,6BAAO,OAAO;AAEd,6BAAO,SAAS;AAChB,+DAAa;AAAA,oBACf,YAAW,qCAAU,WAAU,KAAK,cAAc,YAAY,MAAM,UAAU;AAC5E,+BAAS,QAAQ,OAAO,OAAO,SAAS,OAAO,EAAC,gBAAeC,MAAA,SAAS,UAAT,gBAAAA,IAAgB,cAAa,oBAAmBC,MAAA,SAAS,UAAT,gBAAAA,IAAgB,eAAe,gBAAc,cAAS,UAAT,mBAAgB,kBAAe,cAAS,UAAT,mBAAgB,eAAa,CAAC;AACzN,6BAAO,QAAQ;AACf,2BAAI,0CAAU,WAAV,mBAAkB;AAAM,+BAAO,QAAO,0CAAU,WAAV,mBAAkB;AAC5D,6BAAO,OAAO;AAEd,6BAAO,SAAS;AAChB,+DAAa;AAAA,oBACf,YAAW,qCAAU,SAAQ,KAAK,cAAc,YAAY,MAAM,SAAS;AACzE,0BAAI,SAAS,UAAU,UAAU;AAC/B,6BAAI,0CAAU,SAAV,mBAAgB,OAAO;AACzB,mCAAS,SAAQ,0CAAU,SAAV,mBAAgB;AAAA,wBACnC,OAAO;AACL,mCAAS,QAAQ,EAAC,eAAe,GAAG,mBAAmB,GAAG,cAAc,EAAC;AAAA,wBAC3E;AAAA,sBACF;AACA,6BAAO,QAAQ,SAAS;AACxB,0BAAI,qCAAU;AAAM,+BAAO,QAAQ,qCAAU;AAC7C,6BAAO,OAAO;AAEd,6BAAO,SAAS;AAChB,+DAAa;AAAA,oBACf;AAAA,kBACF,SAAS,KAAP;AACA,4BAAQ,KAAK,GAAG,KAAK,mDAAmD,GAAG;AAC3E,2BAAO,OAAO,GAAG;AAAA,kBACnB;AAAA,gBACF;AAAA,cACF;AAAA,cACA,KAAK;AAAA,cACL,KAAK;AAAA,YACP,EAAE,MAAM,MAAM;AAAA,UAChB,OAAO;AACL,gBAAI;AACF,oBAAM,MAAM,MAAM,KAAK,OAAO,KAAK;AAAA,gBACjC,QAAQ;AAAA,gBACR;AAAA,gBACA,MAAM,KAAK,UAAU,IAAI;AAAA,gBACzB,QAAQ;AAAA,cACV,CAAC;AAED,kBAAI,CAAC,IAAI,IAAI;AACX,sBAAM,SAAS,MAAM,IAAI,KAAK;AAC9B,sBAAM,MAAM,GAAG,KAAK,uBAClB,IAAI,UAAU,IAAI,eACf;AACL,sBAAM,QAAQ,IAAU,aAAa,KAAK,EAAE,OAAO,IAAI,CAAC;AACxD,sBAAM,aAAa,IAAI;AACvB,sBAAM,aAAa,IAAI;AACvB,uBAAO,OAAO,KAAK;AAAA,cACrB;AAEA,oBAAM,WACJ,MAAM,IAAI,KAAK;AACjB,kBAAI,KAAK,QAAQ;AACf,wBAAQ,IAAI,QAAQ;AAAA,cACtB;AAEA,kBAAI,KAAK,cAAc,YAAY,MAAM,UAAU;AACjD,oBAAI,qCAAU,YAAY;AACxB,yBAAO,KAAK,SAAS;AAAA,gBACvB;AAAA,cACF,OAAO;AACL,oBAAI,qCAAU,IAAI;AAChB,yBAAO,KAAK,SAAS;AAAA,gBACvB;AAAA,cACF;AAEA,oBAAI,0CAAU,YAAV,mBAAmB,WAAU,CAAC,UAAS,OAAO,EAAE,QAAQ,KAAK,cAAc,YAAY,CAAC,IAAI,IAAI;AAClG,sBAAMC,WAAU,SAAS,QAAQ,CAAC,EAAE;AACpC,uBAAO,OAAOA,SAAQ;AACtB,oBAAIA,SAAQ,MAAM;AAChB,yBAAO,OAAOA,SAAQ;AAAA,gBACxB;AAAA,cACF,YAAU,qCAAU,WAAU,KAAK,cAAc,YAAY,MAAM,SAAS;AAC1E,uBAAO,OAAO,SAAS;AACvB,uBAAO,OAAO;AAAA,cAChB,aAAU,0CAAU,WAAV,mBAAkB,SAAQ,KAAK,cAAc,YAAY,MAAM,UAAU;AACjF,uBAAO,QAAO,0CAAU,WAAV,mBAAkB;AAChC,uBAAO,OAAO;AAAA,cAChB,OAAO;AACL,sBAAMC,OAAM;AACZ,uBAAO;AAAA,kBACL,IAAI;AAAA,oBACF,GAAG,KAAK,0BACN,KAAAA,QAAA,gBAAAA,KAAK,WAAL,mBAAa,cAAW,KAAAA,QAAA,gBAAAA,KAAK,WAAL,mBAAa,eAAaA,QAAA,gBAAAA,KAAK,WAAU;AAAA,kBAErE;AAAA,gBACF;AAAA,cACF;AAEA,qBAAO,SAAS;AAEhB,qBAAO,QAAQ,MAAM;AAAA,YACvB,SAAS,KAAP;AACA,qBAAO,OAAO,GAAG;AAAA,YACnB;AAAA,UACF;AAAA,QACF;AAAA,MACF;AAAA,IACF,EAAE,KAAK,OAAOD,aAAY;AA7c9B;AA8cM,UAAIA,SAAQ,UAAU,CAACA,SAAQ,OAAO,OAAO;AAC3C,YAAI;AACF,gBAAM,eAAe;AACrB,cAAI,mBAAmB;AACvB,cAAI,CAAC,SAAQ,QAAQ,EAAE,QAAQ,KAAK,cAAc,YAAY,CAAC,IAAI,IAAK;AACtE,gCAAmB,WAAAA,SAAQ,WAAR,mBAAgB,UAAhB,mBAAuB;AAAA,UAC5C,OAAO;AACL,+BAAmB,MAAM,KAAK,eAAeA,SAAQ,IAAI;AAAA,UAC3D;AACA,UAAAA,SAAQ,OAAO,QAAQ;AAAA,YACrB,eAAe;AAAA,YACf,mBAAmB;AAAA,YACnB,cAAc,eAAe;AAAA,YAC7B,WAAW;AAAA,UACb;AAAA,QACF,SAAS,KAAP;AAAA,QAGF;AAAA,MACF;AAEA,aAAO,QAAQ,IAAI;AAAA,QACjB,KAAK,eAAe,cAAc;AAAA,QAClC,KAAK,eAAeA,QAAO;AAAA,MAC7B,CAAC,EAAE,KAAK,MAAMA,QAAO;AAAA,IACvB,CAAC;AAED,QAAI,WAAW;AACb,UAAI,iBAAiB;AAGnB;AAAC,QAAC,UAAkB,SAAS,MAAM;AACjC,0BAAgB,MAAM;AAAA,QACxB;AAAA,MACF;AAEA,aAAO,SAAS,WAAW;AAAA,QACzB,cAAc;AAAA,QACd,SAAS,GAAG,KAAK;AAAA,MACnB,CAAC;AAAA,IACH,OAAO;AACL,aAAO;AAAA,IACT;AAAA,EACF;AAAA,EAEA,IAAI,eAAuB;AACzB,WAAO,KAAK;AAAA,EACd;AAAA,EAEA,IAAI,aAAa,cAAsB;AACrC,SAAK,gBAAgB;AAAA,EACvB;AAAA,EAEA,IAAI,iBAAyB;AAC3B,WAAO,KAAK;AAAA,EACd;AAAA,EAEA,IAAI,eAAe,gBAAwB;AACzC,SAAK,kBAAkB;AAAA,EACzB;AAAA,EAEA,IAAI,oBAA4B;AAC9B,WAAO,KAAK;AAAA,EACd;AAAA,EAEA,IAAI,kBAAkB,mBAA2B;AAC/C,SAAK,qBAAqB;AAAA,EAC5B;AAAA,EAEA,IAAI,aAAqB;AACvB,WAAO,KAAK;AAAA,EACd;AAAA,EAEA,IAAI,WAAW,YAAoB;AACjC,SAAK,cAAc;AAAA,EACrB;AAAA,EAEA,IAAI,SAAiB;AACnB,WAAO,KAAK;AAAA,EACd;AAAA,EAEA,IAAI,OAAO,QAAgB;AACzB,SAAK,UAAU;AAAA,EACjB;AAAA,EAEA,IAAI,SAAiB;AACnB,WAAO,KAAK;AAAA,EACd;AAAA,EAEA,IAAI,OAAO,QAAgB;AACzB,SAAK,UAAU;AAAA,EACjB;AAAA,EAEA,MAAgB,eAAe,MAAc,MAAgC;AAC3E,UAAM,EAAE,gBAAgB,KAAK,eAAe,IAAI;AAChD,QAAI,EAAE,gBAAgB,IAAI;AAC1B,QAAI,eAAe;AAEnB,UAAM,YAAY;AAClB,UAAM,iBAAiB;AAEvB,UAAM,eAAe,KAAK,kBAAkB,KAAK;AACjD,QAAI,WAAwD,CAAC;AAE7D,QAAI,iBAAiB,CAAC,UAAS,OAAO,EAAE,QAAQ,KAAK,cAAc,YAAY,CAAC,IAAI,IAAI;AACtF,eAAS,KAAK;AAAA,QACZ,MAAM;AAAA,QACN,SAAS;AAAA,MACX,CAAC;AAAA,IACH;AAEA,UAAM,sBAAsB,SAAS;AACrC,UAAM,cAAkB,CAAC,SAAQ,SAAQ,SAAS,QAAQ,EAAE,QAAQ,KAAK,cAAc,YAAY,CAAC,IAAI,KAAK,CAAC,EAAC,MAAM,QAAO,SAAS,KAAI,CAAC,IAAI,CAAC,EAAC,MAAM,QAAO,SAAS,MAAK,MAAM,KAAK,KAAI,CAAC;AAC3L,QAAI,eAAe,OACf,SAAS,OAAO,WAAW,IAC3B;AACJ,QAAI,YAAY;AAEhB,OAAG;AACD,YAAM,SAAS,aACZ,OAAO,CAACE,SAAQ,YAAY;AAC3B,gBAAQ,QAAQ,MAAM;AAAA,UACpB,KAAK;AACH,mBAAOA,QAAO,OAAO,CAAC;AAAA,EAAkB,QAAQ,SAAS,CAAC;AAAA,UAC5D,KAAK;AACH,mBAAOA,QAAO,OAAO,CAAC,GAAG;AAAA,EAAe,QAAQ,SAAS,CAAC;AAAA,UAC5D;AACE,mBAAOA,QAAO,OAAO,CAAC,GAAG;AAAA,EAAoB,QAAQ,SAAS,CAAC;AAAA,QACnE;AAAA,MACF,GAAG,CAAC,CAAa,EAChB,KAAK,MAAM;AAEd,YAAM,wBAAwB,MAAM,KAAK,eAAe,MAAM;AAC9D,YAAM,gBAAgB,yBAAyB;AAE/C,UAAI,UAAU,CAAC,iBAAiB,CAAC,UAAS,OAAO,EAAE,QAAQ,KAAK,cAAc,YAAY,CAAC,IAAI,IAAI;AACjG;AAAA,MACF;AAEA,iBAAW;AACX,kBAAY;AAEZ,UAAI,CAAC,iBAAiB,CAAC,UAAS,OAAO,EAAE,QAAQ,KAAK,cAAc,YAAY,CAAC,IAAI,IAAI;AACvF;AAAA,MACF;AAEA,UAAI,CAAC,iBAAiB;AACpB;AAAA,MACF;AAEA,YAAM,gBAAgB,MAAM,KAAK,gBAAgB,eAAe;AAChE,UAAI,CAAC,eAAe;AAClB;AAAA,MACF;AAEA,YAAM,oBAAoB,cAAc,QAAQ;AAChD,YAAM,oBAAwB,CAAC,SAAQ,SAAS,SAAS,QAAQ,EAAE,QAAQ,KAAK,cAAc,YAAY,CAAC,IAAI,KAAM;AAAA,QACjH,MAAM;AAAA,QACN,SAAS,cAAc;AAAA,MACzB,IAAI;AAAA,QACJ,MAAM;AAAA,QACN,SAAS,cAAc;AAAA,QACvB,MAAM,cAAc;AAAA,MACtB;AAEA,qBAAe,aAAa,MAAM,GAAG,mBAAmB,EAAE,OAAO;AAAA,QAC/D;AAAA,QACA,GAAG,aAAa,MAAM,mBAAmB;AAAA,MAC3C,CAAC;AAED,wBAAkB,cAAc;AAAA,IAClC,SAAS;AAIT,QAAI,YAAY,KAAK;AACrB,QAAI,CAAC,UAAS,OAAO,EAAE,QAAQ,KAAK,cAAc,YAAY,CAAC,IAAI,IAAI;AACrE,kBAAY,KAAK;AAAA,QACf;AAAA,QACA,KAAK,IAAI,KAAK,kBAAkB,WAAW,KAAK,kBAAkB;AAAA,MACpE;AAAA,IACF,WAAW,YAAY,KAAK,iBAAiB;AAC3C,kBAAY,KAAK;AACjB,qBAAe,GAAG,KAAK,wFAA4B,+FAA8B,KAAK;AAAA,IACxF;AAEA,QAAI;AACJ,QAAI,KAAK,cAAc,YAAY,MAAM,UAAU;AACjD,sBAAgB,EAAC,QAAQ,IAAI,SAAS,CAAC,EAAC;AACxC,UAAI,cAAc,EAAC,MAAM,IAAI,KAAK,GAAE;AACpC,eAAS,QAAQ,CAAC,MAAM,UAAU;AAChC,YAAI,QAAQ,SAAS,SAAQ,KAAK,QAAQ,GAAG;AAC3C,cAAI,CAAC,YAAY,QAAQ,CAAC,YAAY,KAAK;AACzC,gBAAI,KAAK,SAAS;AAAQ,0BAAY,OAAO,KAAK;AAClD,gBAAI,KAAK,SAAS;AAAa,0BAAY,MAAM,KAAK;AAAA,UACxD;AACA,cAAI,YAAY,QAAQ,YAAY,KAAK;AACvC,0BAAc,QAAQ,KAAK,WAAW;AACtC,0BAAc,EAAC,MAAM,IAAI,KAAK,GAAE;AAAA,UAClC;AAAA,QACF,OAAO;AACL,wBAAc,SAAS,KAAK;AAAA,QAC9B;AAAA,MACF,CAAC;AAAA,IACH;AAEA,WAAO,EAAE,UAAS,eAAe,WAAW,WAAW,aAAa;AAAA,EACtE;AAAA,EAEA,MAAgB,eAAe,MAAc;AAE3C,WAAO,KAAK,QAAQ,oBAAoB,EAAE;AAE1C,WAAiB,OAAO,IAAI,EAAE;AAAA,EAChC;AAAA,EAEA,MAAgB,uBACd,IAC4B;AAC5B,UAAM,MAAM,MAAM,KAAK,cAAc,IAAI,EAAE;AAC3C,WAAO;AAAA,EACT;AAAA,EAEA,MAAgB,sBACd,SACe;AACf,UAAM,KAAK,cAAc,IAAI,QAAQ,IAAI,OAAO;AAAA,EAClD;AACF;;;AMlrBA,OAAOC,eAAc;AACrB,SAAS,MAAMC,eAAc;;;ACD7B,IAAM,WACJ;AAEK,SAAS,cAAc,KAAsB;AAClD,SAAO,OAAO,SAAS,KAAK,GAAG;AACjC;;;ADGO,IAAM,4BAAN,MAAgC;AAAA;AAAA;AAAA;AAAA,EAWrC,YAAY,MAgBT;AACD,UAAM;AAAA,MACJ;AAAA,MACA,qBAAqB;AAAA,MACrB,QAAQ;AAAA,MACR,QAAQ;AAAA,MACR;AAAA,MACA,OAAAC,SAAQ;AAAA,IACV,IAAI;AAEJ,SAAK,eAAe;AACpB,SAAK,sBAAsB;AAC3B,SAAK,SAAS,CAAC,CAAC;AAChB,SAAK,SAAS;AACd,SAAK,SAASA;AACd,SAAK,WAAW;AAEhB,QAAI,CAAC,KAAK,cAAc;AACtB,YAAM,IAAI,MAAM,6BAA6B;AAAA,IAC/C;AAEA,QAAI,CAAC,KAAK,QAAQ;AAChB,YAAM,IAAI,MAAM,2CAA2C;AAAA,IAC7D;AAEA,QAAI,OAAO,KAAK,WAAW,YAAY;AACrC,YAAM,IAAI,MAAM,mCAAmC;AAAA,IACrD;AAAA,EACF;AAAA,EAEA,IAAI,cAAsB;AACxB,WAAO,KAAK;AAAA,EACd;AAAA,EAEA,IAAI,YAAY,OAAe;AAC7B,SAAK,eAAe;AAAA,EACtB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAyBA,MAAM,YACJ,MACA,OAAwC,CAAC,GACb;AAC5B,QAAI,CAAC,CAAC,KAAK,mBAAmB,CAAC,CAAC,KAAK,iBAAiB;AACpD,YAAM,IAAI;AAAA,QACR;AAAA,MACF;AAAA,IACF;AAEA,QAAI,KAAK,kBAAkB,CAAC,cAAc,KAAK,cAAc,GAAG;AAC9D,YAAM,IAAI;AAAA,QACR;AAAA,MACF;AAAA,IACF;AAEA,QAAI,KAAK,mBAAmB,CAAC,cAAc,KAAK,eAAe,GAAG;AAChE,YAAM,IAAI;AAAA,QACR;AAAA,MACF;AAAA,IACF;AAEA,QAAI,KAAK,aAAa,CAAC,cAAc,KAAK,SAAS,GAAG;AACpD,YAAM,IAAI;AAAA,QACR;AAAA,MACF;AAAA,IACF;AAEA,UAAM;AAAA,MACJ;AAAA,MACA,kBAAkBC,QAAO;AAAA,MACzB,YAAYA,QAAO;AAAA,MACnB,SAAS;AAAA,MACT;AAAA,MACA;AAAA,IACF,IAAI;AAEJ,QAAI,EAAE,YAAY,IAAI;AAEtB,QAAI,kBAAmC;AACvC,QAAI,aAAa,CAAC,aAAa;AAC7B,wBAAkB,IAAI,gBAAgB;AACtC,oBAAc,gBAAgB;AAAA,IAChC;AAEA,UAAM,OAAmC;AAAA,MACvC;AAAA,MACA,UAAU;AAAA,QACR;AAAA,UACE,IAAI;AAAA,UACJ,MAAM;AAAA,UACN,SAAS;AAAA,YACP,cAAc;AAAA,YACd,OAAO,CAAC,IAAI;AAAA,UACd;AAAA,QACF;AAAA,MACF;AAAA,MACA,OAAO,KAAK;AAAA,MACZ,mBAAmB;AAAA,IACrB;AAEA,QAAI,gBAAgB;AAClB,WAAK,kBAAkB;AAAA,IACzB;AAEA,UAAM,SAA4B;AAAA,MAChC,MAAM;AAAA,MACN,IAAIA,QAAO;AAAA,MACX,iBAAiB;AAAA,MACjB;AAAA,MACA,MAAM;AAAA,IACR;AAEA,UAAM,YAAY,IAAI,QAA2B,CAAC,SAAS,WAAW;AACpE,YAAM,MAAM,KAAK;AACjB,YAAM,UAAU;AAAA,QACd,GAAG,KAAK;AAAA,QACR,eAAe,UAAU,KAAK;AAAA,QAC9B,QAAQ;AAAA,QACR,gBAAgB;AAAA,MAClB;AAEA,UAAI,KAAK,QAAQ;AACf,gBAAQ,IAAI,QAAQ,KAAK,EAAE,MAAM,QAAQ,CAAC;AAAA,MAC5C;AAEA;AAAA,QACE;AAAA,QACA;AAAA,UACE,QAAQ;AAAA,UACR;AAAA,UACA,MAAM,KAAK,UAAU,IAAI;AAAA,UACzB,QAAQ;AAAA,UACR,WAAW,CAAC,SAAiB;AA7LvC;AA8LY,gBAAI,SAAS,UAAU;AACrB,qBAAO,QAAQ,MAAM;AAAA,YACvB;AAEA,gBAAI;AACF,oBAAM,qBACJ,KAAK,MAAM,IAAI;AACjB,kBAAI,mBAAmB,iBAAiB;AACtC,uBAAO,iBAAiB,mBAAmB;AAAA,cAC7C;AAEA,mBAAI,wBAAmB,YAAnB,mBAA4B,IAAI;AAClC,uBAAO,KAAK,mBAAmB,QAAQ;AAAA,cACzC;AAEA,oBAAM,UAAU,mBAAmB;AAGnC,kBAAI,SAAS;AACX,oBAAIC,SAAO,8CAAS,YAAT,mBAAkB,UAAlB,mBAA0B;AAErC,oBAAIA,OAAM;AACR,yBAAO,OAAOA;AAEd,sBAAI,YAAY;AACd,+BAAW,MAAM;AAAA,kBACnB;AAAA,gBACF;AAAA,cACF;AAAA,YACF,SAAS,KAAP;AACA,kBAAI,KAAK,QAAQ;AACf,wBAAQ,KAAK,iCAAiC,GAAG;AAAA,cACnD;AAAA,YAEF;AAAA,UACF;AAAA,UACA,SAAS,CAAC,QAAQ;AAChB,mBAAO,GAAG;AAAA,UACZ;AAAA,QACF;AAAA,QACA,KAAK;AAAA,MACP,EAAE,MAAM,CAAC,QAAQ;AACf,cAAM,cAAc,IAAI,SAAS,EAAE,YAAY;AAE/C,YACE,OAAO,SACN,gBAAgB,kCACf,gBAAgB,0BAClB;AAKA,iBAAO,QAAQ,MAAM;AAAA,QACvB,OAAO;AACL,iBAAO,OAAO,GAAG;AAAA,QACnB;AAAA,MACF,CAAC;AAAA,IACH,CAAC;AAED,QAAI,WAAW;AACb,UAAI,iBAAiB;AAGnB;AAAC,QAAC,UAAkB,SAAS,MAAM;AACjC,0BAAgB,MAAM;AAAA,QACxB;AAAA,MACF;AAEA,aAAOC,UAAS,WAAW;AAAA,QACzB,cAAc;AAAA,QACd,SAAS;AAAA,MACX,CAAC;AAAA,IACH,OAAO;AACL,aAAO;AAAA,IACT;AAAA,EACF;AACF;","names":["openai","fetch","fetch","url","_a","_b","_c","_d","_e","message","res","prompt","pTimeout","uuidv4","fetch","uuidv4","text","pTimeout"]}
|
package/readme.md
DELETED
|
@@ -1,564 +0,0 @@
|
|
|
1
|
-
# LEIAI API <!-- omit in toc -->
|
|
2
|
-
|
|
3
|
-
> Node.js client for the official [ChatGPT](https://openai.com/blog/chatgpt/) API.
|
|
4
|
-
|
|
5
|
-
[](https://www.npmjs.com/package/chatgpt) [](https://github.com/transitive-bullshit/chatgpt-api/actions/workflows/test.yml) [](https://github.com/transitive-bullshit/chatgpt-api/blob/main/license) [](https://prettier.io)
|
|
6
|
-
|
|
7
|
-
- [Intro](#intro)
|
|
8
|
-
- [Updates](#updates)
|
|
9
|
-
- [CLI](#cli)
|
|
10
|
-
- [Install](#install)
|
|
11
|
-
- [Usage](#usage)
|
|
12
|
-
- [Usage - ChatGPTAPI](#usage---chatgptapi)
|
|
13
|
-
- [Usage - ChatGPTUnofficialProxyAPI](#usage---chatgptunofficialproxyapi)
|
|
14
|
-
- [Reverse Proxy](#reverse-proxy)
|
|
15
|
-
- [Access Token](#access-token)
|
|
16
|
-
- [Docs](#docs)
|
|
17
|
-
- [Demos](#demos)
|
|
18
|
-
- [Projects](#projects)
|
|
19
|
-
- [Compatibility](#compatibility)
|
|
20
|
-
- [Credits](#credits)
|
|
21
|
-
- [License](#license)
|
|
22
|
-
|
|
23
|
-
## Intro
|
|
24
|
-
|
|
25
|
-
This package is a Node.js wrapper around [ChatGPT](https://openai.com/blog/chatgpt) by [OpenAI](https://openai.com). TS batteries included. ✨
|
|
26
|
-
|
|
27
|
-
<p align="center">
|
|
28
|
-
<img alt="Example usage" src="/media/demo.gif">
|
|
29
|
-
</p>
|
|
30
|
-
|
|
31
|
-
## Updates
|
|
32
|
-
|
|
33
|
-
<details open>
|
|
34
|
-
<summary><strong>April 10, 2023</strong></summary>
|
|
35
|
-
|
|
36
|
-
<br/>
|
|
37
|
-
|
|
38
|
-
This package now **fully supports GPT-4**! 🔥
|
|
39
|
-
|
|
40
|
-
We also just released a [TypeScript chatgpt-plugin package](https://github.com/transitive-bullshit/chatgpt-plugin-ts) which contains helpers and examples to make it as easy as possible to start building your own ChatGPT Plugins in JS/TS. Even if you don't have developer access to ChatGPT Plugins yet, you can still use the [chatgpt-plugin](https://github.com/transitive-bullshit/chatgpt-plugin-ts) repo to get a head start on building your own plugins locally.
|
|
41
|
-
|
|
42
|
-
If you have access to the `gpt-4` model, you can run the following to test out the CLI with GPT-4:
|
|
43
|
-
|
|
44
|
-
```bash
|
|
45
|
-
npx chatgpt@latest --model gpt-4 "Hello world"
|
|
46
|
-
```
|
|
47
|
-
|
|
48
|
-
<p align="center">
|
|
49
|
-
<img src="https://user-images.githubusercontent.com/552829/229368245-d22fbac7-4b56-4a5e-810b-5ac5793b6ac3.png" width="600px" alt="Using the chatgpt CLI with gpt-4">
|
|
50
|
-
</p>
|
|
51
|
-
|
|
52
|
-
We still support both the official ChatGPT API and the unofficial proxy API, but we now recommend using the official API since it's significantly more robust and supports **GPT-4**.
|
|
53
|
-
|
|
54
|
-
| Method | Free? | Robust? | Quality? |
|
|
55
|
-
| --------------------------- | ------ | ------- | ------------------------------- |
|
|
56
|
-
| `ChatGPTAPI` | ❌ No | ✅ Yes | ✅️ Real ChatGPT models + GPT-4 |
|
|
57
|
-
| `ChatGPTUnofficialProxyAPI` | ✅ Yes | ❌ No️ | ✅ ChatGPT webapp |
|
|
58
|
-
|
|
59
|
-
**Note**: We strongly recommend using `ChatGPTAPI` since it uses the officially supported API from OpenAI. We will likely remove support for `ChatGPTUnofficialProxyAPI` in a future release.
|
|
60
|
-
|
|
61
|
-
1. `ChatGPTAPI` - Uses the `gpt-3.5-turbo` model with the official OpenAI chat completions API (official, robust approach, but it's not free)
|
|
62
|
-
2. `ChatGPTUnofficialProxyAPI` - Uses an unofficial proxy server to access ChatGPT's backend API in a way that circumvents Cloudflare (uses the real ChatGPT and is pretty lightweight, but relies on a third-party server and is rate-limited)
|
|
63
|
-
|
|
64
|
-
</details>
|
|
65
|
-
|
|
66
|
-
<details>
|
|
67
|
-
<summary><strong>Previous Updates</strong></summary>
|
|
68
|
-
|
|
69
|
-
<br/>
|
|
70
|
-
|
|
71
|
-
<details>
|
|
72
|
-
<summary><strong>March 1, 2023</strong></summary>
|
|
73
|
-
|
|
74
|
-
<br/>
|
|
75
|
-
|
|
76
|
-
The [official OpenAI chat completions API](https://platform.openai.com/docs/guides/chat) has been released, and it is now the default for this package! 🔥
|
|
77
|
-
|
|
78
|
-
| Method | Free? | Robust? | Quality? |
|
|
79
|
-
| --------------------------- | ------ | -------- | ----------------------- |
|
|
80
|
-
| `ChatGPTAPI` | ❌ No | ✅ Yes | ✅️ Real ChatGPT models |
|
|
81
|
-
| `ChatGPTUnofficialProxyAPI` | ✅ Yes | ☑️ Maybe | ✅ Real ChatGPT |
|
|
82
|
-
|
|
83
|
-
**Note**: We strongly recommend using `ChatGPTAPI` since it uses the officially supported API from OpenAI. We may remove support for `ChatGPTUnofficialProxyAPI` in a future release.
|
|
84
|
-
|
|
85
|
-
1. `ChatGPTAPI` - Uses the `gpt-3.5-turbo` model with the official OpenAI chat completions API (official, robust approach, but it's not free)
|
|
86
|
-
2. `ChatGPTUnofficialProxyAPI` - Uses an unofficial proxy server to access ChatGPT's backend API in a way that circumvents Cloudflare (uses the real ChatGPT and is pretty lightweight, but relies on a third-party server and is rate-limited)
|
|
87
|
-
|
|
88
|
-
</details>
|
|
89
|
-
|
|
90
|
-
<details>
|
|
91
|
-
<summary><strong>Feb 19, 2023</strong></summary>
|
|
92
|
-
|
|
93
|
-
<br/>
|
|
94
|
-
|
|
95
|
-
We now provide three ways of accessing the unofficial ChatGPT API, all of which have tradeoffs:
|
|
96
|
-
|
|
97
|
-
| Method | Free? | Robust? | Quality? |
|
|
98
|
-
| --------------------------- | ------ | -------- | ----------------- |
|
|
99
|
-
| `ChatGPTAPI` | ❌ No | ✅ Yes | ☑️ Mimics ChatGPT |
|
|
100
|
-
| `ChatGPTUnofficialProxyAPI` | ✅ Yes | ☑️ Maybe | ✅ Real ChatGPT |
|
|
101
|
-
| `ChatGPTAPIBrowser` (v3) | ✅ Yes | ❌ No | ✅ Real ChatGPT |
|
|
102
|
-
|
|
103
|
-
**Note**: I recommend that you use either `ChatGPTAPI` or `ChatGPTUnofficialProxyAPI`.
|
|
104
|
-
|
|
105
|
-
1. `ChatGPTAPI` - (Used to use) `text-davinci-003` to mimic ChatGPT via the official OpenAI completions API (most robust approach, but it's not free and doesn't use a model fine-tuned for chat)
|
|
106
|
-
2. `ChatGPTUnofficialProxyAPI` - Uses an unofficial proxy server to access ChatGPT's backend API in a way that circumvents Cloudflare (uses the real ChatGPT and is pretty lightweight, but relies on a third-party server and is rate-limited)
|
|
107
|
-
3. `ChatGPTAPIBrowser` - (_deprecated_; v3.5.1 of this package) Uses Puppeteer to access the official ChatGPT webapp (uses the real ChatGPT, but very flaky, heavyweight, and error prone)
|
|
108
|
-
|
|
109
|
-
</details>
|
|
110
|
-
|
|
111
|
-
<details>
|
|
112
|
-
<summary><strong>Feb 5, 2023</strong></summary>
|
|
113
|
-
|
|
114
|
-
<br/>
|
|
115
|
-
|
|
116
|
-
OpenAI has disabled the leaked chat model we were previously using, so we're now defaulting to `text-davinci-003`, which is not free.
|
|
117
|
-
|
|
118
|
-
We've found several other hidden, fine-tuned chat models, but OpenAI keeps disabling them, so we're searching for alternative workarounds.
|
|
119
|
-
|
|
120
|
-
</details>
|
|
121
|
-
|
|
122
|
-
<details>
|
|
123
|
-
<summary><strong>Feb 1, 2023</strong></summary>
|
|
124
|
-
|
|
125
|
-
<br/>
|
|
126
|
-
|
|
127
|
-
This package no longer requires any browser hacks – **it is now using the official OpenAI completions API** with a leaked model that ChatGPT uses under the hood. 🔥
|
|
128
|
-
|
|
129
|
-
```ts
|
|
130
|
-
import { ChatGPTAPI } from 'chatgpt'
|
|
131
|
-
|
|
132
|
-
const api = new ChatGPTAPI({
|
|
133
|
-
apiKey: process.env.OPENAI_API_KEY
|
|
134
|
-
})
|
|
135
|
-
|
|
136
|
-
const res = await api.sendMessage('Hello World!')
|
|
137
|
-
console.log(res.text)
|
|
138
|
-
```
|
|
139
|
-
|
|
140
|
-
Please upgrade to `chatgpt@latest` (at least [v4.0.0](https://github.com/transitive-bullshit/chatgpt-api/releases/tag/v4.0.0)). The updated version is **significantly more lightweight and robust** compared with previous versions. You also don't have to worry about IP issues or rate limiting.
|
|
141
|
-
|
|
142
|
-
Huge shoutout to [@waylaidwanderer](https://github.com/waylaidwanderer) for discovering the leaked chat model!
|
|
143
|
-
|
|
144
|
-
</details>
|
|
145
|
-
</details>
|
|
146
|
-
|
|
147
|
-
If you run into any issues, we do have a pretty active [ChatGPT Hackers Discord](https://www.chatgpthackers.dev/) with over 8k developers from the Node.js & Python communities.
|
|
148
|
-
|
|
149
|
-
Lastly, please consider starring this repo and <a href="https://twitter.com/transitive_bs">following me on twitter <img src="https://storage.googleapis.com/saasify-assets/twitter-logo.svg" alt="twitter" height="24px" align="center"></a> to help support the project.
|
|
150
|
-
|
|
151
|
-
Thanks && cheers,
|
|
152
|
-
[Travis](https://twitter.com/transitive_bs)
|
|
153
|
-
|
|
154
|
-
## CLI
|
|
155
|
-
|
|
156
|
-
To run the CLI, you'll need an [OpenAI API key](https://platform.openai.com/overview):
|
|
157
|
-
|
|
158
|
-
```bash
|
|
159
|
-
export OPENAI_API_KEY="sk-TODO"
|
|
160
|
-
npx chatgpt "your prompt here"
|
|
161
|
-
```
|
|
162
|
-
|
|
163
|
-
By default, the response is streamed to stdout, the results are stored in a local config file, and every invocation starts a new conversation. You can use `-c` to continue the previous conversation and `--no-stream` to disable streaming.
|
|
164
|
-
|
|
165
|
-
```
|
|
166
|
-
Usage:
|
|
167
|
-
$ chatgpt <prompt>
|
|
168
|
-
|
|
169
|
-
Commands:
|
|
170
|
-
<prompt> Ask ChatGPT a question
|
|
171
|
-
rm-cache Clears the local message cache
|
|
172
|
-
ls-cache Prints the local message cache path
|
|
173
|
-
|
|
174
|
-
For more info, run any command with the `--help` flag:
|
|
175
|
-
$ chatgpt --help
|
|
176
|
-
$ chatgpt rm-cache --help
|
|
177
|
-
$ chatgpt ls-cache --help
|
|
178
|
-
|
|
179
|
-
Options:
|
|
180
|
-
-c, --continue Continue last conversation (default: false)
|
|
181
|
-
-d, --debug Enables debug logging (default: false)
|
|
182
|
-
-s, --stream Streams the response (default: true)
|
|
183
|
-
-s, --store Enables the local message cache (default: true)
|
|
184
|
-
-t, --timeout Timeout in milliseconds
|
|
185
|
-
-k, --apiKey OpenAI API key
|
|
186
|
-
-o, --apiOrg OpenAI API organization
|
|
187
|
-
-n, --conversationName Unique name for the conversation
|
|
188
|
-
-h, --help Display this message
|
|
189
|
-
-v, --version Display version number
|
|
190
|
-
```
|
|
191
|
-
|
|
192
|
-
If you have access to the `gpt-4` model, you can run the following to test out the CLI with GPT-4:
|
|
193
|
-
|
|
194
|
-
<p align="center">
|
|
195
|
-
<img src="https://user-images.githubusercontent.com/552829/229368245-d22fbac7-4b56-4a5e-810b-5ac5793b6ac3.png" width="600px" alt="Using the chatgpt CLI with gpt-4">
|
|
196
|
-
</p>
|
|
197
|
-
|
|
198
|
-
## Install
|
|
199
|
-
|
|
200
|
-
```bash
|
|
201
|
-
npm install chatgpt
|
|
202
|
-
```
|
|
203
|
-
|
|
204
|
-
Make sure you're using `node >= 18` so `fetch` is available (or `node >= 14` if you install a [fetch polyfill](https://github.com/developit/unfetch#usage-as-a-polyfill)).
|
|
205
|
-
|
|
206
|
-
## Usage
|
|
207
|
-
|
|
208
|
-
To use this module from Node.js, you need to pick between two methods:
|
|
209
|
-
|
|
210
|
-
| Method | Free? | Robust? | Quality? |
|
|
211
|
-
| --------------------------- | ------ | ------- | ------------------------------- |
|
|
212
|
-
| `ChatGPTAPI` | ❌ No | ✅ Yes | ✅️ Real ChatGPT models + GPT-4 |
|
|
213
|
-
| `ChatGPTUnofficialProxyAPI` | ✅ Yes | ❌ No️ | ✅ Real ChatGPT webapp |
|
|
214
|
-
|
|
215
|
-
1. `ChatGPTAPI` - Uses the `gpt-3.5-turbo` model with the official OpenAI chat completions API (official, robust approach, but it's not free). You can override the model, completion params, and system message to fully customize your assistant.
|
|
216
|
-
|
|
217
|
-
2. `ChatGPTUnofficialProxyAPI` - Uses an unofficial proxy server to access ChatGPT's backend API in a way that circumvents Cloudflare (uses the real ChatGPT and is pretty lightweight, but relies on a third-party server and is rate-limited)
|
|
218
|
-
|
|
219
|
-
Both approaches have very similar APIs, so it should be simple to swap between them.
|
|
220
|
-
|
|
221
|
-
**Note**: We strongly recommend using `ChatGPTAPI` since it uses the officially supported API from OpenAI and it also supports `gpt-4`. We will likely remove support for `ChatGPTUnofficialProxyAPI` in a future release.
|
|
222
|
-
|
|
223
|
-
### Usage - ChatGPTAPI
|
|
224
|
-
|
|
225
|
-
Sign up for an [OpenAI API key](https://platform.openai.com/overview) and store it in your environment.
|
|
226
|
-
|
|
227
|
-
```ts
|
|
228
|
-
import { ChatGPTAPI } from 'chatgpt'
|
|
229
|
-
|
|
230
|
-
async function example() {
|
|
231
|
-
const api = new ChatGPTAPI({
|
|
232
|
-
apiKey: process.env.OPENAI_API_KEY
|
|
233
|
-
})
|
|
234
|
-
|
|
235
|
-
const res = await api.sendMessage('Hello World!')
|
|
236
|
-
console.log(res.text)
|
|
237
|
-
}
|
|
238
|
-
```
|
|
239
|
-
|
|
240
|
-
You can override the default `model` (`gpt-3.5-turbo`) and any [OpenAI chat completion params](https://platform.openai.com/docs/api-reference/chat/create) using `completionParams`:
|
|
241
|
-
|
|
242
|
-
```ts
|
|
243
|
-
const api = new ChatGPTAPI({
|
|
244
|
-
apiKey: process.env.OPENAI_API_KEY,
|
|
245
|
-
completionParams: {
|
|
246
|
-
model: 'gpt-4',
|
|
247
|
-
temperature: 0.5,
|
|
248
|
-
top_p: 0.8
|
|
249
|
-
}
|
|
250
|
-
})
|
|
251
|
-
```
|
|
252
|
-
|
|
253
|
-
If you want to track the conversation, you'll need to pass the `parentMessageId` like this:
|
|
254
|
-
|
|
255
|
-
```ts
|
|
256
|
-
const api = new ChatGPTAPI({ apiKey: process.env.OPENAI_API_KEY })
|
|
257
|
-
|
|
258
|
-
// send a message and wait for the response
|
|
259
|
-
let res = await api.sendMessage('What is OpenAI?')
|
|
260
|
-
console.log(res.text)
|
|
261
|
-
|
|
262
|
-
// send a follow-up
|
|
263
|
-
res = await api.sendMessage('Can you expand on that?', {
|
|
264
|
-
parentMessageId: res.id
|
|
265
|
-
})
|
|
266
|
-
console.log(res.text)
|
|
267
|
-
|
|
268
|
-
// send another follow-up
|
|
269
|
-
res = await api.sendMessage('What were we talking about?', {
|
|
270
|
-
parentMessageId: res.id
|
|
271
|
-
})
|
|
272
|
-
console.log(res.text)
|
|
273
|
-
```
|
|
274
|
-
|
|
275
|
-
You can add streaming via the `onProgress` handler:
|
|
276
|
-
|
|
277
|
-
```ts
|
|
278
|
-
const res = await api.sendMessage('Write a 500 word essay on frogs.', {
|
|
279
|
-
// print the partial response as the AI is "typing"
|
|
280
|
-
onProgress: (partialResponse) => console.log(partialResponse.text)
|
|
281
|
-
})
|
|
282
|
-
|
|
283
|
-
// print the full text at the end
|
|
284
|
-
console.log(res.text)
|
|
285
|
-
```
|
|
286
|
-
|
|
287
|
-
You can add a timeout using the `timeoutMs` option:
|
|
288
|
-
|
|
289
|
-
```ts
|
|
290
|
-
// timeout after 2 minutes (which will also abort the underlying HTTP request)
|
|
291
|
-
const response = await api.sendMessage(
|
|
292
|
-
'write me a really really long essay on frogs',
|
|
293
|
-
{
|
|
294
|
-
timeoutMs: 2 * 60 * 1000
|
|
295
|
-
}
|
|
296
|
-
)
|
|
297
|
-
```
|
|
298
|
-
|
|
299
|
-
If you want to see more info about what's actually being sent to [OpenAI's chat completions API](https://platform.openai.com/docs/api-reference/chat/create), set the `debug: true` option in the `ChatGPTAPI` constructor:
|
|
300
|
-
|
|
301
|
-
```ts
|
|
302
|
-
const api = new ChatGPTAPI({
|
|
303
|
-
apiKey: process.env.OPENAI_API_KEY,
|
|
304
|
-
debug: true
|
|
305
|
-
})
|
|
306
|
-
```
|
|
307
|
-
|
|
308
|
-
We default to a basic `systemMessage`. You can override this in either the `ChatGPTAPI` constructor or `sendMessage`:
|
|
309
|
-
|
|
310
|
-
```ts
|
|
311
|
-
const res = await api.sendMessage('what is the answer to the universe?', {
|
|
312
|
-
systemMessage: `You are ChatGPT, a large language model trained by OpenAI. You answer as concisely as possible for each responseIf you are generating a list, do not have too many items.
|
|
313
|
-
Current date: ${new Date().toISOString()}\n\n`
|
|
314
|
-
})
|
|
315
|
-
```
|
|
316
|
-
|
|
317
|
-
Note that we automatically handle appending the previous messages to the prompt and attempt to optimize for the available tokens (which defaults to `4096`).
|
|
318
|
-
|
|
319
|
-
<details>
|
|
320
|
-
<summary>Usage in CommonJS (Dynamic import)</summary>
|
|
321
|
-
|
|
322
|
-
```js
|
|
323
|
-
async function example() {
|
|
324
|
-
// To use ESM in CommonJS, you can use a dynamic import like this:
|
|
325
|
-
const { ChatGPTAPI } = await import('chatgpt')
|
|
326
|
-
// You can also try dynamic importing like this:
|
|
327
|
-
// const importDynamic = new Function('modulePath', 'return import(modulePath)')
|
|
328
|
-
// const { ChatGPTAPI } = await importDynamic('chatgpt')
|
|
329
|
-
|
|
330
|
-
const api = new ChatGPTAPI({ apiKey: process.env.OPENAI_API_KEY })
|
|
331
|
-
|
|
332
|
-
const res = await api.sendMessage('Hello World!')
|
|
333
|
-
console.log(res.text)
|
|
334
|
-
}
|
|
335
|
-
```
|
|
336
|
-
|
|
337
|
-
</details>
|
|
338
|
-
|
|
339
|
-
### Usage - ChatGPTUnofficialProxyAPI
|
|
340
|
-
|
|
341
|
-
The API for `ChatGPTUnofficialProxyAPI` is almost exactly the same. You just need to provide a ChatGPT `accessToken` instead of an OpenAI API key.
|
|
342
|
-
|
|
343
|
-
```ts
|
|
344
|
-
import { ChatGPTUnofficialProxyAPI } from 'chatgpt'
|
|
345
|
-
|
|
346
|
-
async function example() {
|
|
347
|
-
const api = new ChatGPTUnofficialProxyAPI({
|
|
348
|
-
accessToken: process.env.OPENAI_ACCESS_TOKEN
|
|
349
|
-
})
|
|
350
|
-
|
|
351
|
-
const res = await api.sendMessage('Hello World!')
|
|
352
|
-
console.log(res.text)
|
|
353
|
-
}
|
|
354
|
-
```
|
|
355
|
-
|
|
356
|
-
See [demos/demo-reverse-proxy](./demos/demo-reverse-proxy.ts) for a full example:
|
|
357
|
-
|
|
358
|
-
```bash
|
|
359
|
-
npx tsx demos/demo-reverse-proxy.ts
|
|
360
|
-
```
|
|
361
|
-
|
|
362
|
-
`ChatGPTUnofficialProxyAPI` messages also contain a `conversationid` in addition to `parentMessageId`, since the ChatGPT webapp can't reference messages across different accounts & conversations.
|
|
363
|
-
|
|
364
|
-
#### Reverse Proxy
|
|
365
|
-
|
|
366
|
-
You can override the reverse proxy by passing `apiReverseProxyUrl`:
|
|
367
|
-
|
|
368
|
-
```ts
|
|
369
|
-
const api = new ChatGPTUnofficialProxyAPI({
|
|
370
|
-
accessToken: process.env.OPENAI_ACCESS_TOKEN,
|
|
371
|
-
apiReverseProxyUrl: 'https://your-example-server.com/api/conversation'
|
|
372
|
-
})
|
|
373
|
-
```
|
|
374
|
-
|
|
375
|
-
Known reverse proxies run by community members include:
|
|
376
|
-
|
|
377
|
-
| Reverse Proxy URL | Author | Rate Limits | Last Checked |
|
|
378
|
-
| ------------------------------------------------- | -------------------------------------------- | ---------------------------- | ------------ |
|
|
379
|
-
| `https://ai.fakeopen.com/api/conversation` | [@pengzhile](https://github.com/pengzhile) | 5 req / 10 seconds by IP | 4/18/2023 |
|
|
380
|
-
| `https://api.pawan.krd/backend-api/conversation` | [@PawanOsman](https://github.com/PawanOsman) | 50 req / 15 seconds (~3 r/s) | 3/23/2023 |
|
|
381
|
-
|
|
382
|
-
Note: info on how the reverse proxies work is not being published at this time in order to prevent OpenAI from disabling access.
|
|
383
|
-
|
|
384
|
-
#### Access Token
|
|
385
|
-
|
|
386
|
-
To use `ChatGPTUnofficialProxyAPI`, you'll need an OpenAI access token from the ChatGPT webapp. To do this, you can use any of the following methods which take an `email` and `password` and return an access token:
|
|
387
|
-
|
|
388
|
-
- Node.js libs
|
|
389
|
-
- [ericlewis/openai-authenticator](https://github.com/ericlewis/openai-authenticator)
|
|
390
|
-
- [michael-dm/openai-token](https://github.com/michael-dm/openai-token)
|
|
391
|
-
- [allanoricil/chat-gpt-authenticator](https://github.com/AllanOricil/chat-gpt-authenticator)
|
|
392
|
-
- Python libs
|
|
393
|
-
- [acheong08/OpenAIAuth](https://github.com/acheong08/OpenAIAuth)
|
|
394
|
-
|
|
395
|
-
These libraries work with email + password accounts (e.g., they do not support accounts where you auth via Microsoft / Google).
|
|
396
|
-
|
|
397
|
-
Alternatively, you can manually get an `accessToken` by logging in to the ChatGPT webapp and then opening `https://chat.openai.com/api/auth/session`, which will return a JSON object containing your `accessToken` string.
|
|
398
|
-
|
|
399
|
-
Access tokens last for days.
|
|
400
|
-
|
|
401
|
-
**Note**: using a reverse proxy will expose your access token to a third-party. There shouldn't be any adverse effects possible from this, but please consider the risks before using this method.
|
|
402
|
-
|
|
403
|
-
## Docs
|
|
404
|
-
|
|
405
|
-
See the [auto-generated docs](./docs/classes/ChatGPTAPI.md) for more info on methods and parameters.
|
|
406
|
-
|
|
407
|
-
## Demos
|
|
408
|
-
|
|
409
|
-
Most of the demos use `ChatGPTAPI`. It should be pretty easy to convert them to use `ChatGPTUnofficialProxyAPI` if you'd rather use that approach. The only thing that needs to change is how you initialize the api with an `accessToken` instead of an `apiKey`.
|
|
410
|
-
|
|
411
|
-
To run the included demos:
|
|
412
|
-
|
|
413
|
-
1. clone repo
|
|
414
|
-
2. install node deps
|
|
415
|
-
3. set `OPENAI_API_KEY` in .env
|
|
416
|
-
|
|
417
|
-
A [basic demo](./demos/demo.ts) is included for testing purposes:
|
|
418
|
-
|
|
419
|
-
```bash
|
|
420
|
-
npx tsx demos/demo.ts
|
|
421
|
-
```
|
|
422
|
-
|
|
423
|
-
A [demo showing on progress handler](./demos/demo-on-progress.ts):
|
|
424
|
-
|
|
425
|
-
```bash
|
|
426
|
-
npx tsx demos/demo-on-progress.ts
|
|
427
|
-
```
|
|
428
|
-
|
|
429
|
-
The on progress demo uses the optional `onProgress` parameter to `sendMessage` to receive intermediary results as ChatGPT is "typing".
|
|
430
|
-
|
|
431
|
-
A [conversation demo](./demos/demo-conversation.ts):
|
|
432
|
-
|
|
433
|
-
```bash
|
|
434
|
-
npx tsx demos/demo-conversation.ts
|
|
435
|
-
```
|
|
436
|
-
|
|
437
|
-
A [persistence demo](./demos/demo-persistence.ts) shows how to store messages in Redis for persistence:
|
|
438
|
-
|
|
439
|
-
```bash
|
|
440
|
-
npx tsx demos/demo-persistence.ts
|
|
441
|
-
```
|
|
442
|
-
|
|
443
|
-
Any [keyv adaptor](https://github.com/jaredwray/keyv) is supported for persistence, and there are overrides if you'd like to use a different way of storing / retrieving messages.
|
|
444
|
-
|
|
445
|
-
Note that persisting message is required for remembering the context of previous conversations beyond the scope of the current Node.js process, since by default, we only store messages in memory. Here's an [external demo](https://github.com/transitive-bullshit/chatgpt-twitter-bot/blob/main/src/index.ts#L86-L95) of using a completely custom database solution to persist messages.
|
|
446
|
-
|
|
447
|
-
**Note**: Persistence is handled automatically when using `ChatGPTUnofficialProxyAPI` because it is connecting indirectly to ChatGPT.
|
|
448
|
-
|
|
449
|
-
## Projects
|
|
450
|
-
|
|
451
|
-
All of these awesome projects are built using the `chatgpt` package. 🤯
|
|
452
|
-
|
|
453
|
-
- [Twitter Bot](https://github.com/transitive-bullshit/chatgpt-twitter-bot) powered by ChatGPT ✨
|
|
454
|
-
- Mention [@ChatGPTBot](https://twitter.com/ChatGPTBot) on Twitter with your prompt to try it out
|
|
455
|
-
- [ChatGPT API Server](https://github.com/waylaidwanderer/node-chatgpt-api) - API server for this package with support for multiple OpenAI accounts, proxies, and load-balancing requests between accounts.
|
|
456
|
-
- [ChatGPT Prompts](https://github.com/pacholoamit/chatgpt-prompts) - A collection of 140+ of the best ChatGPT prompts from the community.
|
|
457
|
-
- [Lovelines.xyz](https://lovelines.xyz?ref=chatgpt-api)
|
|
458
|
-
- [Chrome Extension](https://github.com/gragland/chatgpt-everywhere) ([demo](https://twitter.com/gabe_ragland/status/1599466486422470656))
|
|
459
|
-
- [VSCode Extension #1](https://github.com/mpociot/chatgpt-vscode) ([demo](https://twitter.com/marcelpociot/status/1599180144551526400), [updated version](https://github.com/timkmecl/chatgpt-vscode), [marketplace](https://marketplace.visualstudio.com/items?itemName=timkmecl.chatgpt))
|
|
460
|
-
- [VSCode Extension #2](https://github.com/barnesoir/chatgpt-vscode-plugin) ([marketplace](https://marketplace.visualstudio.com/items?itemName=JayBarnes.chatgpt-vscode-plugin))
|
|
461
|
-
- [VSCode Extension #3](https://github.com/gencay/vscode-chatgpt) ([marketplace](https://marketplace.visualstudio.com/items?itemName=gencay.vscode-chatgpt))
|
|
462
|
-
- [VSCode Extension #4](https://github.com/dogukanakkaya/chatgpt-code-vscode-extension) ([marketplace](https://marketplace.visualstudio.com/items?itemName=dogukanakkaya.chatgpt-code))
|
|
463
|
-
- [Raycast Extension #1](https://github.com/abielzulio/chatgpt-raycast) ([demo](https://twitter.com/abielzulio/status/1600176002042191875))
|
|
464
|
-
- [Raycast Extension #2](https://github.com/domnantas/raycast-chatgpt)
|
|
465
|
-
- [Telegram Bot #1](https://github.com/realies/chatgpt-telegram-bot)
|
|
466
|
-
- [Telegram Bot #2](https://github.com/dawangraoming/chatgpt-telegram-bot)
|
|
467
|
-
- [Telegram Bot #3](https://github.com/RainEggplant/chatgpt-telegram-bot) (group privacy mode, ID-based auth)
|
|
468
|
-
- [Telegram Bot #4](https://github.com/ArdaGnsrn/chatgpt-telegram) (queue system, ID-based chat thread)
|
|
469
|
-
- [Telegram Bot #5](https://github.com/azoway/chatgpt-telegram-bot) (group privacy mode, ID-based chat thread)
|
|
470
|
-
- [Deno Telegram Bot](https://github.com/Ciyou/chatbot-telegram)
|
|
471
|
-
- [Go Telegram Bot](https://github.com/m1guelpf/chatgpt-telegram)
|
|
472
|
-
- [Telegram Bot for YouTube Summaries](https://github.com/codextde/youtube-summary)
|
|
473
|
-
- [GitHub ProBot](https://github.com/oceanlvr/ChatGPTBot)
|
|
474
|
-
- [Discord Bot #1](https://github.com/onury5506/Discord-ChatGPT-Bot)
|
|
475
|
-
- [Discord Bot #2](https://github.com/Nageld/ChatGPT-Bot)
|
|
476
|
-
- [Discord Bot #3](https://github.com/leinstay/gptbot)
|
|
477
|
-
- [Discord Bot #4 (selfbot)](https://github.com/0x7030676e31/cumsocket)
|
|
478
|
-
- [Discord Bot #5](https://github.com/itskdhere/ChatGPT-Discord-BOT)
|
|
479
|
-
- [Discord Bot #6 (Shakespeare bot)](https://gist.github.com/TheBrokenRail/4b37e7c44e8f721d8bd845050d034c16)
|
|
480
|
-
- [Discord Bot #7](https://github.com/Elitezen/discordjs-chatgpt)
|
|
481
|
-
- [Zoom Chat](https://github.com/shixin-guo/my-bot)
|
|
482
|
-
- [WeChat Bot #1](https://github.com/AutumnWhj/ChatGPT-wechat-bot)
|
|
483
|
-
- [WeChat Bot #2](https://github.com/fuergaosi233/wechat-chatgpt)
|
|
484
|
-
- [WeChat Bot #3](https://github.com/wangrongding/wechat-bot) (
|
|
485
|
-
- [WeChat Bot #4](https://github.com/darknightlab/wechat-bot)
|
|
486
|
-
- [WeChat Bot #5](https://github.com/sunshanpeng/wechaty-chatgpt)
|
|
487
|
-
- [WeChat Bot #6](https://github.com/formulahendry/chatgpt-wechat-bot)
|
|
488
|
-
- [WeChat Bot #7](https://github.com/gfl94/Chatbot004)
|
|
489
|
-
- [QQ Bot (plugin for Yunzai-bot)](https://github.com/ikechan8370/chatgpt-plugin)
|
|
490
|
-
- [QQ Bot (plugin for KiviBot)](https://github.com/KiviBotLab/kivibot-plugin-chatgpt)
|
|
491
|
-
- [QQ Bot (oicq)](https://github.com/easydu2002/chat_gpt_oicq)
|
|
492
|
-
- [QQ Bot (oicq + RabbitMQ)](https://github.com/linsyking/ChatGPT-QQBot)
|
|
493
|
-
- [QQ Bot (go-cqhttp)](https://github.com/PairZhu/ChatGPT-QQRobot)
|
|
494
|
-
- [QQ Bot (plugin for Yunzai-Bot + Bull)](https://github.com/Micuks/chatGPT-yunzai) (Lightweight, Google Bard support 💪)
|
|
495
|
-
- [EXM smart contracts](https://github.com/decentldotland/molecule)
|
|
496
|
-
- [Flutter ChatGPT API](https://github.com/coskuncay/flutter_chatgpt_api)
|
|
497
|
-
- [Carik Bot](https://github.com/luridarmawan/Carik)
|
|
498
|
-
- [Github Action for reviewing PRs](https://github.com/kxxt/chatgpt-action/)
|
|
499
|
-
- [WhatsApp Bot #1](https://github.com/askrella/whatsapp-chatgpt) (DALL-E + Whisper support 💪)
|
|
500
|
-
- [WhatsApp Bot #2](https://github.com/amosayomide05/chatgpt-whatsapp-bot)
|
|
501
|
-
- [WhatsApp Bot #3](https://github.com/pascalroget/whatsgpt) (multi-user support)
|
|
502
|
-
- [WhatsApp Bot #4](https://github.com/noelzappy/chatgpt-whatsapp) (schedule periodic messages)
|
|
503
|
-
- [WhatsApp Bot #5](https://github.com/hujanais/bs-chat-gpt3-api) (RaspberryPi + ngrok + Twilio)
|
|
504
|
-
- [WhatsApp Bot #6](https://github.com/dannysantino/whatsgpt) (Session and chat history storage with MongoStore)
|
|
505
|
-
- [Matrix Bot](https://github.com/matrixgpt/matrix-chatgpt-bot)
|
|
506
|
-
- [Rental Cover Letter Generator](https://sharehouse.app/ai)
|
|
507
|
-
- [Assistant CLI](https://github.com/diciaup/assistant-cli)
|
|
508
|
-
- [Teams Bot](https://github.com/formulahendry/chatgpt-teams-bot)
|
|
509
|
-
- [Askai](https://github.com/yudax42/askai)
|
|
510
|
-
- [TalkGPT](https://github.com/ShadovvBeast/TalkGPT)
|
|
511
|
-
- [ChatGPT With Voice](https://github.com/thanhsonng/chatgpt-voice)
|
|
512
|
-
- [iOS Shortcut](https://github.com/leecobaby/shortcuts/blob/master/other/ChatGPT_EN.md)
|
|
513
|
-
- [Slack Bot #1](https://github.com/trietphm/chatgpt-slackbot/)
|
|
514
|
-
- [Slack Bot #2](https://github.com/lokwkin/chatgpt-slackbot-node/) (with queueing mechanism)
|
|
515
|
-
- [Slack Bot #3](https://github.com/NessunKim/slack-chatgpt/)
|
|
516
|
-
- [Slack Bot #4](https://github.com/MarkusGalant/chatgpt-slackbot-serverless/) ( Serverless AWS Lambda )
|
|
517
|
-
- [Slack Bot #5](https://github.com/benjiJanssens/SlackGPT) (Hosted)
|
|
518
|
-
- [Add to Slack](https://slackgpt.benji.sh/slack/install)
|
|
519
|
-
- [Electron Bot](https://github.com/ShiranAbir/chaty)
|
|
520
|
-
- [Kodyfire CLI](https://github.com/nooqta/chatgpt-kodyfire)
|
|
521
|
-
- [Twitch Bot](https://github.com/BennyDeeDev/chatgpt-twitch-bot)
|
|
522
|
-
- [Continuous Conversation](https://github.com/DanielTerletzkiy/chat-gtp-assistant)
|
|
523
|
-
- [Figma plugin](https://github.com/frederickk/chatgpt-figma-plugin)
|
|
524
|
-
- [NestJS server](https://github.com/RusDyn/chatgpt_nestjs_server)
|
|
525
|
-
- [NestJS ChatGPT Starter Boilerplate](https://github.com/mitkodkn/nestjs-chatgpt-starter)
|
|
526
|
-
- [Wordsmith: Add-in for Microsoft Word](https://github.com/xtremehpx/Wordsmith)
|
|
527
|
-
- [QuizGPT: Create Kahoot quizzes with ChatGPT](https://github.com/Kladdy/quizgpt)
|
|
528
|
-
- [openai-chatgpt: Talk to ChatGPT from the terminal](https://github.com/gmpetrov/openai-chatgpt)
|
|
529
|
-
- [Clippy the Saleforce chatbot](https://github.com/sebas00/chatgptclippy) ClippyJS joke bot
|
|
530
|
-
- [ai-assistant](https://github.com/youking-lib/ai-assistant) Chat assistant
|
|
531
|
-
- [Feishu Bot](https://github.com/linjungz/feishu-chatgpt-bot)
|
|
532
|
-
- [DomainGPT: Discover available domain names](https://github.com/billylo1/DomainGPT)
|
|
533
|
-
- [AI Poem Generator](https://aipoemgenerator.com/)
|
|
534
|
-
- [Next.js ChatGPT With Firebase](https://github.com/youngle316/chatgpt)
|
|
535
|
-
- [ai-commit – GPT-3 Commit Message Generator](https://github.com/insulineru/ai-commit)
|
|
536
|
-
- [AItinerary – ChatGPT itinerary Generator](https://aitinerary.ai)
|
|
537
|
-
- [wechaty-chatgpt - A chatbot based on Wechaty & ChatGPT](https://github.com/zhengxs2018/wechaty-chatgpt)
|
|
538
|
-
- [Julius GPT](https://github.com/christophebe/julius-gpt) - Generate and publish your content from the CLI
|
|
539
|
-
- [OpenAI-API-Service](https://github.com/Jarvan-via/api-service) - Provides OpenAI related APIs for businesses
|
|
540
|
-
- [Discord Daily News Bot](https://github.com/ZirionNeft/chatgpt-discord-daily-news-bot) - Discord bot that generate funny daily news
|
|
541
|
-
- [ai-assistant](https://github.com/chenweiyi/ai-assistant) - Create a chat website similar to ChatGPT
|
|
542
|
-
|
|
543
|
-
If you create a cool integration, feel free to open a PR and add it to the list.
|
|
544
|
-
|
|
545
|
-
## Compatibility
|
|
546
|
-
|
|
547
|
-
- This package is ESM-only.
|
|
548
|
-
- This package supports `node >= 14`.
|
|
549
|
-
- This module assumes that `fetch` is installed.
|
|
550
|
-
- In `node >= 18`, it's installed by default.
|
|
551
|
-
- In `node < 18`, you need to install a polyfill like `unfetch/polyfill` ([guide](https://github.com/developit/unfetch#usage-as-a-polyfill)) or `isomorphic-fetch` ([guide](https://github.com/matthew-andrews/isomorphic-fetch#readme)).
|
|
552
|
-
- If you want to build a website using `chatgpt`, we recommend using it only from your backend API
|
|
553
|
-
|
|
554
|
-
## Credits
|
|
555
|
-
|
|
556
|
-
- Huge thanks to [@waylaidwanderer](https://github.com/waylaidwanderer), [@abacaj](https://github.com/abacaj), [@wong2](https://github.com/wong2), [@simon300000](https://github.com/simon300000), [@RomanHotsiy](https://github.com/RomanHotsiy), [@ElijahPepe](https://github.com/ElijahPepe), and all the other contributors 💪
|
|
557
|
-
- [OpenAI](https://openai.com) for creating [ChatGPT](https://openai.com/blog/chatgpt/) 🔥
|
|
558
|
-
- I run the [ChatGPT Hackers Discord](https://www.chatgpthackers.dev/) with over 8k developers – come join us!
|
|
559
|
-
|
|
560
|
-
## License
|
|
561
|
-
|
|
562
|
-
MIT © [Travis Fischer](https://transitivebullsh.it)
|
|
563
|
-
|
|
564
|
-
If you found this project interesting, please consider [sponsoring me](https://github.com/sponsors/transitive-bullshit) or <a href="https://twitter.com/transitive_bs">following me on twitter <img src="https://storage.googleapis.com/saasify-assets/twitter-logo.svg" alt="twitter" height="24px" align="center"></a>
|