wechaty-web-panel 1.6.78 → 1.6.79
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.github/workflows/npm.yml +1 -1
- package/dist/cjs/src/bot/chatgpt/index.d.ts +60 -0
- package/dist/cjs/src/bot/chatgpt/index.js +235 -0
- package/dist/cjs/src/bot/fastgpt/index.d.ts +10 -0
- package/dist/cjs/src/bot/fastgpt/index.js +72 -0
- package/dist/cjs/src/bot/qanything/index.d.ts +35 -0
- package/dist/cjs/src/bot/qanything/index.js +143 -0
- package/dist/cjs/src/botInstance/officialOpenAi.d.ts +2 -28
- package/dist/cjs/src/botInstance/officialOpenAi.js +12 -60
- package/dist/cjs/src/botInstance/sdk/chatGPT.d.ts +0 -12
- package/dist/cjs/src/botInstance/sdk/chatGPT.js +1 -154
- package/dist/cjs/src/botInstance/sdk/difyClient.js +1 -1
- package/dist/cjs/src/package-json.d.ts +5 -0
- package/dist/cjs/src/package-json.js +6 -1
- package/dist/esm/src/bot/chatgpt/index.d.ts +60 -0
- package/dist/esm/src/bot/chatgpt/index.js +231 -0
- package/dist/esm/src/bot/fastgpt/index.d.ts +10 -0
- package/dist/esm/src/bot/fastgpt/index.js +68 -0
- package/dist/esm/src/bot/qanything/index.d.ts +35 -0
- package/dist/esm/src/bot/qanything/index.js +136 -0
- package/dist/esm/src/botInstance/officialOpenAi.d.ts +2 -28
- package/dist/esm/src/botInstance/officialOpenAi.js +10 -58
- package/dist/esm/src/botInstance/sdk/chatGPT.d.ts +0 -12
- package/dist/esm/src/botInstance/sdk/chatGPT.js +1 -153
- package/dist/esm/src/botInstance/sdk/difyClient.js +1 -1
- package/dist/esm/src/package-json.d.ts +5 -0
- package/dist/esm/src/package-json.js +6 -1
- package/package.json +6 -1
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
export class ChatGPTAPI {
|
|
2
|
+
constructor({ apiKey, apiBaseUrl, completionParams, debug, systemMessage, maxModelTokens, maxResponseTokens, memoryConfig, streaming, redis }: {
|
|
3
|
+
apiKey: any;
|
|
4
|
+
apiBaseUrl?: string | undefined;
|
|
5
|
+
completionParams?: {} | undefined;
|
|
6
|
+
debug?: boolean | undefined;
|
|
7
|
+
systemMessage?: string | undefined;
|
|
8
|
+
maxModelTokens?: number | undefined;
|
|
9
|
+
maxResponseTokens?: number | undefined;
|
|
10
|
+
memoryConfig?: {
|
|
11
|
+
type: string;
|
|
12
|
+
redisConfig: null;
|
|
13
|
+
expireTime: null;
|
|
14
|
+
} | undefined;
|
|
15
|
+
streaming?: boolean | undefined;
|
|
16
|
+
redis: any;
|
|
17
|
+
});
|
|
18
|
+
apiKey: any;
|
|
19
|
+
apiBaseUrl: string;
|
|
20
|
+
completionParams: {};
|
|
21
|
+
debug: boolean;
|
|
22
|
+
systemMessage: string;
|
|
23
|
+
maxModelTokens: number;
|
|
24
|
+
maxResponseTokens: number;
|
|
25
|
+
memoryConfig: {
|
|
26
|
+
type: string;
|
|
27
|
+
redisConfig: null;
|
|
28
|
+
expireTime: null;
|
|
29
|
+
};
|
|
30
|
+
conversations: Map<any, any>;
|
|
31
|
+
streaming: boolean;
|
|
32
|
+
redisClient: any;
|
|
33
|
+
/**
|
|
34
|
+
* Create a new conversation chain or retrieve an existing one
|
|
35
|
+
* @param {string} chatId - Unique identifier for the conversation
|
|
36
|
+
* @param {string} systemMessage - Optional system message to override the default
|
|
37
|
+
* @param {boolean} streaming - Whether to enable streaming for this conversation
|
|
38
|
+
* @returns {Object} - Conversation object with chain and metadata
|
|
39
|
+
*/
|
|
40
|
+
getOrCreateConversation(chatId: string, { systemMessage, streaming, timeoutMs }: string): Object;
|
|
41
|
+
/**
|
|
42
|
+
* Send a message to the model and get a response
|
|
43
|
+
* @param {string} message - The message to send
|
|
44
|
+
* @param {Object} options - Additional options
|
|
45
|
+
* @returns {Promise<Object>} - The response
|
|
46
|
+
*/
|
|
47
|
+
sendMessage(message: string, options?: Object): Promise<Object>;
|
|
48
|
+
/**
|
|
49
|
+
* Clear a specific conversation or all conversations
|
|
50
|
+
* @param {string} chatId - Optional chat ID to clear a specific conversation
|
|
51
|
+
*/
|
|
52
|
+
clearConversations(chatId?: string): Promise<void>;
|
|
53
|
+
/**
|
|
54
|
+
* Set expiration time for a conversation
|
|
55
|
+
* @param {string} chatId - The chat ID
|
|
56
|
+
* @param {number} seconds - Time in seconds until expiration
|
|
57
|
+
*/
|
|
58
|
+
setExpiration(chatId: string, seconds: number): Promise<void>;
|
|
59
|
+
}
|
|
60
|
+
//# sourceMappingURL=index.d.ts.map
|
|
@@ -0,0 +1,235 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.ChatGPTAPI = void 0;
|
|
4
|
+
const openai_1 = require("@langchain/openai");
|
|
5
|
+
const chains_1 = require("langchain/chains");
|
|
6
|
+
const memory_1 = require("langchain/memory");
|
|
7
|
+
const prompts_1 = require("@langchain/core/prompts");
|
|
8
|
+
const output_parsers_1 = require("@langchain/core/output_parsers");
|
|
9
|
+
const redis_1 = require("@langchain/redis");
|
|
10
|
+
const openai_2 = require("@langchain/openai");
|
|
11
|
+
const uuid_1 = require("uuid");
|
|
12
|
+
class ChatGPTAPI {
|
|
13
|
+
constructor({ apiKey, apiBaseUrl = "https://api.openai.com/v1", completionParams = {}, debug = false, systemMessage = "", maxModelTokens = 4096, maxResponseTokens = 1024, memoryConfig = {
|
|
14
|
+
type: "memory",
|
|
15
|
+
redisConfig: null,
|
|
16
|
+
expireTime: null, // Time in seconds, null means no expiration
|
|
17
|
+
}, streaming = true, // Enable streaming mode by default
|
|
18
|
+
redis }) {
|
|
19
|
+
this.apiKey = apiKey;
|
|
20
|
+
this.apiBaseUrl = apiBaseUrl;
|
|
21
|
+
this.completionParams = completionParams;
|
|
22
|
+
this.debug = debug;
|
|
23
|
+
this.systemMessage = systemMessage;
|
|
24
|
+
this.maxModelTokens = maxModelTokens;
|
|
25
|
+
this.maxResponseTokens = maxResponseTokens;
|
|
26
|
+
this.memoryConfig = memoryConfig;
|
|
27
|
+
this.conversations = new Map();
|
|
28
|
+
this.streaming = streaming;
|
|
29
|
+
// Initialize Redis if needed
|
|
30
|
+
if (memoryConfig.type === "redis" && memoryConfig.redisConfig) {
|
|
31
|
+
this.redisClient = redis;
|
|
32
|
+
}
|
|
33
|
+
}
|
|
34
|
+
/**
|
|
35
|
+
* Create a new conversation chain or retrieve an existing one
|
|
36
|
+
* @param {string} chatId - Unique identifier for the conversation
|
|
37
|
+
* @param {string} systemMessage - Optional system message to override the default
|
|
38
|
+
* @param {boolean} streaming - Whether to enable streaming for this conversation
|
|
39
|
+
* @returns {Object} - Conversation object with chain and metadata
|
|
40
|
+
*/
|
|
41
|
+
async getOrCreateConversation(chatId, { systemMessage = null, streaming = null, timeoutMs = 60000 }) {
|
|
42
|
+
// If conversation exists and no specific system message is requested, return it
|
|
43
|
+
if (this.conversations.has(chatId) && !systemMessage) {
|
|
44
|
+
return this.conversations.get(chatId);
|
|
45
|
+
}
|
|
46
|
+
// Calculate context window size
|
|
47
|
+
const contextWindowSize = this.maxModelTokens - this.maxResponseTokens - 500; // Buffer for metadata
|
|
48
|
+
// Create the appropriate memory system
|
|
49
|
+
let memory;
|
|
50
|
+
if (this.memoryConfig.type === "redis") {
|
|
51
|
+
// Setup Redis vector store for memory
|
|
52
|
+
const vectorStore = new redis_1.RedisVectorStore(new openai_2.OpenAIEmbeddings({
|
|
53
|
+
openAIApiKey: this.apiKey,
|
|
54
|
+
configuration: {
|
|
55
|
+
baseURL: this.apiBaseUrl
|
|
56
|
+
}
|
|
57
|
+
}), {
|
|
58
|
+
redisClient: this.redisClient,
|
|
59
|
+
indexName: `chat:${chatId}`,
|
|
60
|
+
});
|
|
61
|
+
memory = new memory_1.BufferMemory({
|
|
62
|
+
memoryKey: "chat_history",
|
|
63
|
+
chatHistory: vectorStore,
|
|
64
|
+
returnMessages: true,
|
|
65
|
+
outputKey: "output",
|
|
66
|
+
inputKey: "input",
|
|
67
|
+
});
|
|
68
|
+
// Set expiration if configured
|
|
69
|
+
if (this.memoryConfig.expireTime) {
|
|
70
|
+
const expireTimeSeconds = this.memoryConfig.expireTime;
|
|
71
|
+
this.redisClient.expire(`chat:${chatId}`, expireTimeSeconds);
|
|
72
|
+
}
|
|
73
|
+
}
|
|
74
|
+
else {
|
|
75
|
+
// Use in-memory storage
|
|
76
|
+
memory = new memory_1.BufferWindowMemory({
|
|
77
|
+
memoryKey: "chat_history",
|
|
78
|
+
// k: 10, // Keep last 10 interactions by default, adjust based on token size
|
|
79
|
+
returnMessages: true,
|
|
80
|
+
outputKey: "output",
|
|
81
|
+
inputKey: "input",
|
|
82
|
+
});
|
|
83
|
+
}
|
|
84
|
+
// Determine if streaming should be enabled
|
|
85
|
+
const useStreaming = streaming !== null ? streaming : this.streaming;
|
|
86
|
+
// Create chat model
|
|
87
|
+
const chatModel = new openai_1.ChatOpenAI({
|
|
88
|
+
openAIApiKey: this.apiKey,
|
|
89
|
+
modelName: this.completionParams.model,
|
|
90
|
+
temperature: this.completionParams.temperature || 0.7,
|
|
91
|
+
topP: this.completionParams.top_p || 1,
|
|
92
|
+
presencePenalty: this.completionParams.presence_penalty || 0,
|
|
93
|
+
frequencyPenalty: this.completionParams.frequency_penalty || 0,
|
|
94
|
+
maxTokens: this.maxResponseTokens,
|
|
95
|
+
timeout: timeoutMs || 60000,
|
|
96
|
+
streaming: useStreaming,
|
|
97
|
+
verbose: this.debug,
|
|
98
|
+
configuration: {
|
|
99
|
+
baseURL: this.apiBaseUrl
|
|
100
|
+
}
|
|
101
|
+
});
|
|
102
|
+
// Create prompt template with system message
|
|
103
|
+
const finalSystemMessage = systemMessage || this.systemMessage || "You are a helpful assistant.";
|
|
104
|
+
const prompt = prompts_1.ChatPromptTemplate.fromMessages([
|
|
105
|
+
prompts_1.SystemMessagePromptTemplate.fromTemplate(finalSystemMessage),
|
|
106
|
+
new prompts_1.MessagesPlaceholder("chat_history"),
|
|
107
|
+
prompts_1.HumanMessagePromptTemplate.fromTemplate("{input}"),
|
|
108
|
+
]);
|
|
109
|
+
// Create conversation chain using the new Runnable API in v0.3
|
|
110
|
+
const chain = new chains_1.ConversationChain({
|
|
111
|
+
memory,
|
|
112
|
+
llm: chatModel,
|
|
113
|
+
prompt,
|
|
114
|
+
verbose: this.debug,
|
|
115
|
+
outputParser: new output_parsers_1.StringOutputParser(),
|
|
116
|
+
inputKey: "input",
|
|
117
|
+
outputKey: "output"
|
|
118
|
+
});
|
|
119
|
+
// Store the conversation
|
|
120
|
+
this.conversations.set(chatId, {
|
|
121
|
+
chain,
|
|
122
|
+
createdAt: Date.now(),
|
|
123
|
+
model: this.completionParams.model,
|
|
124
|
+
contextWindowSize,
|
|
125
|
+
streaming: useStreaming
|
|
126
|
+
});
|
|
127
|
+
return this.conversations.get(chatId);
|
|
128
|
+
}
|
|
129
|
+
/**
|
|
130
|
+
* Send a message to the model and get a response
|
|
131
|
+
* @param {string} message - The message to send
|
|
132
|
+
* @param {Object} options - Additional options
|
|
133
|
+
* @returns {Promise<Object>} - The response
|
|
134
|
+
*/
|
|
135
|
+
async sendMessage(message, options = {}) {
|
|
136
|
+
const chatId = options.chatId || (0, uuid_1.v4)();
|
|
137
|
+
const systemMsg = options.systemMessage || this.systemMessage;
|
|
138
|
+
const timeoutMs = options.timeoutMs || 60000;
|
|
139
|
+
const streaming = options.streaming !== undefined ? options.streaming : this.streaming;
|
|
140
|
+
try {
|
|
141
|
+
if (this.debug) {
|
|
142
|
+
console.log(`Sending message to ${this.completionParams.model} with chatId: ${chatId}`);
|
|
143
|
+
console.log(`Message: ${message}`);
|
|
144
|
+
console.log(`Streaming: ${streaming}`);
|
|
145
|
+
}
|
|
146
|
+
// Get or create conversation
|
|
147
|
+
const conversation = await this.getOrCreateConversation(chatId, { systemMessage: systemMsg, streaming, timeoutMs: timeoutMs });
|
|
148
|
+
let responseText = "";
|
|
149
|
+
// Handle streaming or non-streaming response using v0.3 API
|
|
150
|
+
if (streaming) {
|
|
151
|
+
const responsePromise = new Promise(async (resolve, reject) => {
|
|
152
|
+
try {
|
|
153
|
+
responseText = "";
|
|
154
|
+
// Using v0.3 streaming API
|
|
155
|
+
const stream = await conversation.chain.stream({
|
|
156
|
+
input: message
|
|
157
|
+
});
|
|
158
|
+
for await (const chunk of stream) {
|
|
159
|
+
if (chunk.output) {
|
|
160
|
+
responseText += chunk.output;
|
|
161
|
+
}
|
|
162
|
+
}
|
|
163
|
+
resolve(responseText);
|
|
164
|
+
}
|
|
165
|
+
catch (error) {
|
|
166
|
+
reject(error);
|
|
167
|
+
}
|
|
168
|
+
});
|
|
169
|
+
// Race between response and timeout
|
|
170
|
+
await Promise.race([responsePromise]);
|
|
171
|
+
}
|
|
172
|
+
else {
|
|
173
|
+
// Non-streaming request using v0.3 API
|
|
174
|
+
const responsePromise = conversation.chain.invoke({
|
|
175
|
+
input: message
|
|
176
|
+
});
|
|
177
|
+
// Race between response and timeout
|
|
178
|
+
const response = await Promise.race([responsePromise]);
|
|
179
|
+
responseText = response.output;
|
|
180
|
+
}
|
|
181
|
+
// Return formatted response (same format for both streaming and non-streaming)
|
|
182
|
+
return {
|
|
183
|
+
chatId: chatId,
|
|
184
|
+
text: responseText,
|
|
185
|
+
id: (0, uuid_1.v4)()
|
|
186
|
+
};
|
|
187
|
+
}
|
|
188
|
+
catch (error) {
|
|
189
|
+
if (this.debug) {
|
|
190
|
+
console.error("Error in sendMessage:", error);
|
|
191
|
+
}
|
|
192
|
+
throw error;
|
|
193
|
+
}
|
|
194
|
+
}
|
|
195
|
+
/**
|
|
196
|
+
* Clear a specific conversation or all conversations
|
|
197
|
+
* @param {string} chatId - Optional chat ID to clear a specific conversation
|
|
198
|
+
*/
|
|
199
|
+
async clearConversations(chatId = null) {
|
|
200
|
+
if (chatId) {
|
|
201
|
+
// Clear a specific conversation
|
|
202
|
+
this.conversations.delete(chatId);
|
|
203
|
+
// Clear from Redis if using Redis
|
|
204
|
+
if (this.memoryConfig.type === "redis" && this.redisClient) {
|
|
205
|
+
await this.redisClient.del(`chat:${chatId}`);
|
|
206
|
+
}
|
|
207
|
+
}
|
|
208
|
+
else {
|
|
209
|
+
// Clear all conversations
|
|
210
|
+
this.conversations.clear();
|
|
211
|
+
// Would need to implement a more complex Redis cleanup if needed
|
|
212
|
+
}
|
|
213
|
+
}
|
|
214
|
+
/**
|
|
215
|
+
* Set expiration time for a conversation
|
|
216
|
+
* @param {string} chatId - The chat ID
|
|
217
|
+
* @param {number} seconds - Time in seconds until expiration
|
|
218
|
+
*/
|
|
219
|
+
async setExpiration(chatId, seconds) {
|
|
220
|
+
if (!this.conversations.has(chatId)) {
|
|
221
|
+
throw new Error(`Conversation with ID ${chatId} not found`);
|
|
222
|
+
}
|
|
223
|
+
if (this.memoryConfig.type === "redis" && this.redisClient) {
|
|
224
|
+
await this.redisClient.expire(`chat:${chatId}`, seconds);
|
|
225
|
+
}
|
|
226
|
+
else {
|
|
227
|
+
// For in-memory, we can set a timer to delete the conversation
|
|
228
|
+
setTimeout(() => {
|
|
229
|
+
this.conversations.delete(chatId);
|
|
230
|
+
}, seconds * 1000);
|
|
231
|
+
}
|
|
232
|
+
}
|
|
233
|
+
}
|
|
234
|
+
exports.ChatGPTAPI = ChatGPTAPI;
|
|
235
|
+
//# sourceMappingURL=index.js.map
|
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.FastGPTAPI = void 0;
|
|
4
|
+
const axios = require('axios');
|
|
5
|
+
const axiosRetry = require('axios-retry').default;
|
|
6
|
+
axiosRetry(axios, { retries: 3, retryDelay: () => 1000 });
|
|
7
|
+
class FastGPTAPI {
|
|
8
|
+
constructor(opts) {
|
|
9
|
+
const { apiKey, apiBaseUrl = 'https://api.openai.com/v1', debug, } = opts;
|
|
10
|
+
this._apiKey = apiKey;
|
|
11
|
+
this._apiBaseUrl = apiBaseUrl;
|
|
12
|
+
this._debug = debug;
|
|
13
|
+
if (!this._apiKey) {
|
|
14
|
+
throw new Error('缺少 apiKey');
|
|
15
|
+
}
|
|
16
|
+
}
|
|
17
|
+
async sendMessage(text, opts = {}) {
|
|
18
|
+
const { timeoutMs, chatId, variables } = opts;
|
|
19
|
+
const body = {
|
|
20
|
+
chatId,
|
|
21
|
+
variables: {
|
|
22
|
+
...variables,
|
|
23
|
+
},
|
|
24
|
+
messages: [{
|
|
25
|
+
role: 'user',
|
|
26
|
+
content: text,
|
|
27
|
+
}],
|
|
28
|
+
stream: false,
|
|
29
|
+
detail: false,
|
|
30
|
+
};
|
|
31
|
+
if (this._debug) {
|
|
32
|
+
console.log('request body', body);
|
|
33
|
+
}
|
|
34
|
+
const response = await axios({
|
|
35
|
+
method: 'POST',
|
|
36
|
+
url: `${this._apiBaseUrl}/chat/completions`,
|
|
37
|
+
data: body || null,
|
|
38
|
+
headers: {
|
|
39
|
+
'Content-Type': 'application/json',
|
|
40
|
+
Authorization: `Bearer ${this._apiKey}`,
|
|
41
|
+
},
|
|
42
|
+
timeout: timeoutMs,
|
|
43
|
+
responseType: 'json',
|
|
44
|
+
});
|
|
45
|
+
if (response.status === 200) {
|
|
46
|
+
console.log('response.data.choices', JSON.stringify(response.data));
|
|
47
|
+
if (response.data.code) {
|
|
48
|
+
console.log('私有知识库请求出错', JSON.stringify(response.data));
|
|
49
|
+
return {
|
|
50
|
+
text: '',
|
|
51
|
+
};
|
|
52
|
+
}
|
|
53
|
+
const data = response.data.choices[0].message;
|
|
54
|
+
let finalText = data.content;
|
|
55
|
+
if (Array.isArray(finalText)) {
|
|
56
|
+
const finalContent = finalText.find(item => item.type === 'text');
|
|
57
|
+
if (finalContent) {
|
|
58
|
+
finalText = finalContent.text.content || '';
|
|
59
|
+
}
|
|
60
|
+
}
|
|
61
|
+
return {
|
|
62
|
+
text: finalText,
|
|
63
|
+
};
|
|
64
|
+
}
|
|
65
|
+
console.log('接口请求报错', response.data);
|
|
66
|
+
return {
|
|
67
|
+
text: '',
|
|
68
|
+
};
|
|
69
|
+
}
|
|
70
|
+
}
|
|
71
|
+
exports.FastGPTAPI = FastGPTAPI;
|
|
72
|
+
//# sourceMappingURL=index.js.map
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
export class QAnyApi extends QAnyClient {
|
|
2
|
+
sendMessage(query: any, { needHistory, user, timeoutMs, variables, }: {
|
|
3
|
+
needHistory: any;
|
|
4
|
+
user: any;
|
|
5
|
+
timeoutMs?: number | undefined;
|
|
6
|
+
variables: any;
|
|
7
|
+
}): Promise<any>;
|
|
8
|
+
}
|
|
9
|
+
declare class QAnyClient {
|
|
10
|
+
constructor({ apiKey, baseUrl, debug, stream, botId }: {
|
|
11
|
+
apiKey: any;
|
|
12
|
+
baseUrl?: string | undefined;
|
|
13
|
+
debug?: boolean | undefined;
|
|
14
|
+
stream?: boolean | undefined;
|
|
15
|
+
botId: any;
|
|
16
|
+
});
|
|
17
|
+
apiKey: any;
|
|
18
|
+
baseUrl: string;
|
|
19
|
+
debug: boolean;
|
|
20
|
+
stream: boolean;
|
|
21
|
+
botId: any;
|
|
22
|
+
history: any[];
|
|
23
|
+
updateApiKey(apiKey: any): void;
|
|
24
|
+
sendRequest({ method, endpoint, data, params, stream, headerParams, timeoutMs }: {
|
|
25
|
+
method: any;
|
|
26
|
+
endpoint: any;
|
|
27
|
+
data: any;
|
|
28
|
+
params: any;
|
|
29
|
+
stream?: boolean | undefined;
|
|
30
|
+
headerParams?: {} | undefined;
|
|
31
|
+
timeoutMs?: number | undefined;
|
|
32
|
+
}): Promise<import("axios").AxiosResponse<any, any>>;
|
|
33
|
+
}
|
|
34
|
+
export {};
|
|
35
|
+
//# sourceMappingURL=index.d.ts.map
|
|
@@ -0,0 +1,143 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
3
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
4
|
+
};
|
|
5
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
|
+
exports.QAnyApi = void 0;
|
|
7
|
+
const axios_1 = __importDefault(require("axios"));
|
|
8
|
+
const BASE_URL = 'https://openapi.youdao.com';
|
|
9
|
+
const routes = {
|
|
10
|
+
// 发起对话
|
|
11
|
+
creatChat: {
|
|
12
|
+
method: 'POST', url: () => '/q_anything/api/bot/chat_stream',
|
|
13
|
+
}
|
|
14
|
+
};
|
|
15
|
+
class QAnyClient {
|
|
16
|
+
constructor({ apiKey, baseUrl = BASE_URL, debug = false, stream = true, botId }) {
|
|
17
|
+
this.apiKey = apiKey;
|
|
18
|
+
this.baseUrl = baseUrl || BASE_URL;
|
|
19
|
+
this.debug = debug;
|
|
20
|
+
this.stream = stream;
|
|
21
|
+
this.botId = botId;
|
|
22
|
+
this.history = [];
|
|
23
|
+
}
|
|
24
|
+
updateApiKey(apiKey) {
|
|
25
|
+
this.apiKey = apiKey;
|
|
26
|
+
}
|
|
27
|
+
async sendRequest({ method, endpoint, data, params, stream = false, headerParams = {}, timeoutMs = 100 * 1000 }) {
|
|
28
|
+
const headers = {
|
|
29
|
+
...{
|
|
30
|
+
Authorization: `${this.apiKey}`,
|
|
31
|
+
'Content-Type': 'application/json',
|
|
32
|
+
},
|
|
33
|
+
...headerParams,
|
|
34
|
+
};
|
|
35
|
+
const url = `${this.baseUrl}${endpoint}`;
|
|
36
|
+
let response;
|
|
37
|
+
if (this.debug) {
|
|
38
|
+
console.log('request', url, { data, headers, params });
|
|
39
|
+
}
|
|
40
|
+
if (!stream) {
|
|
41
|
+
response = await axios_1.default.request({
|
|
42
|
+
method,
|
|
43
|
+
url,
|
|
44
|
+
data: data || {},
|
|
45
|
+
params: params || {},
|
|
46
|
+
headers,
|
|
47
|
+
timeout: timeoutMs,
|
|
48
|
+
});
|
|
49
|
+
}
|
|
50
|
+
else {
|
|
51
|
+
response = await (0, axios_1.default)({
|
|
52
|
+
method,
|
|
53
|
+
url,
|
|
54
|
+
data,
|
|
55
|
+
params,
|
|
56
|
+
headers,
|
|
57
|
+
responseType: 'stream',
|
|
58
|
+
});
|
|
59
|
+
}
|
|
60
|
+
return response;
|
|
61
|
+
}
|
|
62
|
+
}
|
|
63
|
+
class QAnyApi extends QAnyClient {
|
|
64
|
+
async sendMessage(query, { needHistory, user, timeoutMs = 100 * 1000, variables, }) {
|
|
65
|
+
if (needHistory) {
|
|
66
|
+
this.history = this.history.slice(-2);
|
|
67
|
+
}
|
|
68
|
+
else {
|
|
69
|
+
this.history = [];
|
|
70
|
+
}
|
|
71
|
+
const data = {
|
|
72
|
+
uuid: this.botId,
|
|
73
|
+
question: query,
|
|
74
|
+
history: this.history
|
|
75
|
+
};
|
|
76
|
+
const res = await this.sendRequest({
|
|
77
|
+
method: routes.creatChat.method,
|
|
78
|
+
endpoint: routes.creatChat.url(),
|
|
79
|
+
data,
|
|
80
|
+
stream: this.stream,
|
|
81
|
+
timeoutMs,
|
|
82
|
+
});
|
|
83
|
+
const asyncSSE = stream => {
|
|
84
|
+
return new Promise((resolve, reject) => {
|
|
85
|
+
const answers = [];
|
|
86
|
+
let answer = '';
|
|
87
|
+
let id = '';
|
|
88
|
+
const chunks = [];
|
|
89
|
+
try {
|
|
90
|
+
stream.on('data', data => {
|
|
91
|
+
chunks.push(data);
|
|
92
|
+
});
|
|
93
|
+
stream.on('end', async () => {
|
|
94
|
+
const result = Buffer.concat(chunks);
|
|
95
|
+
console.log('思考完毕,准备回复');
|
|
96
|
+
const streams = new TextDecoder('utf-8').decode(result, { stream: true }).split('\n');
|
|
97
|
+
for (const stream of streams) {
|
|
98
|
+
let res = {};
|
|
99
|
+
try {
|
|
100
|
+
res = JSON.parse(stream.substring(5)) || {};
|
|
101
|
+
}
|
|
102
|
+
catch (e) {
|
|
103
|
+
// console.log('json 解析错误,不影响输出', stream)
|
|
104
|
+
try {
|
|
105
|
+
res = JSON.parse(stream) || {};
|
|
106
|
+
}
|
|
107
|
+
catch (e) {
|
|
108
|
+
// console.log('json 解析错误,不影响输出', stream)
|
|
109
|
+
}
|
|
110
|
+
}
|
|
111
|
+
if (res.errorCode && res.errorCode !== '0') {
|
|
112
|
+
console.log('QAnything 请求报错', res.msg);
|
|
113
|
+
answer = res.msg;
|
|
114
|
+
}
|
|
115
|
+
else if (res.errorCode) {
|
|
116
|
+
if (res.result.response && !res.result.singleQAId) {
|
|
117
|
+
answers.push(res.result.response);
|
|
118
|
+
}
|
|
119
|
+
if (res.result.singleQAId) {
|
|
120
|
+
this.history = res.result.history;
|
|
121
|
+
id = res.result.singleQAId;
|
|
122
|
+
answer = res.result.response;
|
|
123
|
+
}
|
|
124
|
+
}
|
|
125
|
+
}
|
|
126
|
+
if (!answer) {
|
|
127
|
+
answer = answers.join('');
|
|
128
|
+
}
|
|
129
|
+
resolve({ text: answer, conversationId: '', id: id });
|
|
130
|
+
});
|
|
131
|
+
}
|
|
132
|
+
catch (e) {
|
|
133
|
+
resolve({ text: `agent 出错,${e}`, conversationId: '', files: [] });
|
|
134
|
+
}
|
|
135
|
+
});
|
|
136
|
+
};
|
|
137
|
+
console.log('进入流式输出模式,请耐心等待模型的思考');
|
|
138
|
+
const result = await asyncSSE(res.data);
|
|
139
|
+
return result;
|
|
140
|
+
}
|
|
141
|
+
}
|
|
142
|
+
exports.QAnyApi = QAnyApi;
|
|
143
|
+
//# sourceMappingURL=index.js.map
|
|
@@ -14,34 +14,7 @@ declare class OfficialOpenAi {
|
|
|
14
14
|
promotId: string;
|
|
15
15
|
systemMessage: string;
|
|
16
16
|
});
|
|
17
|
-
chatGPT:
|
|
18
|
-
_apiKey: any;
|
|
19
|
-
_apiOrg: any;
|
|
20
|
-
_apiBaseUrl: any;
|
|
21
|
-
_debug: boolean;
|
|
22
|
-
_fetch: any;
|
|
23
|
-
_completionParams: any;
|
|
24
|
-
_systemMessage: any;
|
|
25
|
-
_maxModelTokens: any;
|
|
26
|
-
_maxResponseTokens: any;
|
|
27
|
-
_getMessageById: any;
|
|
28
|
-
_upsertMessage: any;
|
|
29
|
-
_messageStore: any;
|
|
30
|
-
sendMessage(text: any, opts?: {}): Promise<any>;
|
|
31
|
-
apiKey: any;
|
|
32
|
-
apiOrg: any;
|
|
33
|
-
_buildMessages(text: any, opts: any): Promise<{
|
|
34
|
-
messages: {
|
|
35
|
-
role: string;
|
|
36
|
-
content: any;
|
|
37
|
-
}[];
|
|
38
|
-
maxTokens: number;
|
|
39
|
-
numTokens: number;
|
|
40
|
-
}>;
|
|
41
|
-
_getTokenCount(text: any): Promise<number>;
|
|
42
|
-
_defaultGetMessageById(id: any): Promise<any>;
|
|
43
|
-
_defaultUpsertMessage(message: any): Promise<void>;
|
|
44
|
-
} | null;
|
|
17
|
+
chatGPT: ChatGPTAPI | null;
|
|
45
18
|
config: {
|
|
46
19
|
temperature: number;
|
|
47
20
|
top_p: number;
|
|
@@ -68,5 +41,6 @@ declare class OfficialOpenAi {
|
|
|
68
41
|
reset(): Promise<void>;
|
|
69
42
|
getReply(content: any, uid: any, adminId: string | undefined, systemMessage: string | undefined, isFastGPT: any, variables: any): Promise<any>;
|
|
70
43
|
}
|
|
44
|
+
import { ChatGPTAPI } from "../bot/chatgpt/index.js";
|
|
71
45
|
import { ContentCensor } from "../lib/contentCensor.js";
|
|
72
46
|
//# sourceMappingURL=officialOpenAi.d.ts.map
|