doomiaichat 7.1.29 → 7.1.31
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.esm.js +16 -0
- package/dist/index.js +16 -28
- package/dist/{aimp.d.ts → types/aimp.d.ts} +1 -0
- package/dist/types/aimp.d.ts.map +1 -0
- package/dist/{azureai.d.ts → types/azureai.d.ts} +1 -0
- package/dist/types/azureai.d.ts.map +1 -0
- package/dist/{corzauthorization.d.ts → types/corzauthorization.d.ts} +1 -0
- package/dist/types/corzauthorization.d.ts.map +1 -0
- package/dist/{corzbot.d.ts → types/corzbot.d.ts} +1 -0
- package/dist/types/corzbot.d.ts.map +1 -0
- package/dist/{declare.d.ts → types/declare.d.ts} +2 -1
- package/dist/types/declare.d.ts.map +1 -0
- package/dist/{doubaoai.d.ts → types/doubaoai.d.ts} +1 -0
- package/dist/types/doubaoai.d.ts.map +1 -0
- package/dist/{gptbase.d.ts → types/gptbase.d.ts} +1 -1
- package/dist/types/gptbase.d.ts.map +1 -0
- package/dist/{gptprovider.d.ts → types/gptprovider.d.ts} +2 -7
- package/dist/types/gptprovider.d.ts.map +1 -0
- package/dist/{index.d.ts → types/index.d.ts} +1 -0
- package/dist/types/index.d.ts.map +1 -0
- package/dist/{openai.d.ts → types/openai.d.ts} +1 -0
- package/dist/types/openai.d.ts.map +1 -0
- package/dist/{openaibase.d.ts → types/openaibase.d.ts} +1 -0
- package/dist/types/openaibase.d.ts.map +1 -0
- package/dist/{openaiproxy.d.ts → types/openaiproxy.d.ts} +1 -0
- package/dist/types/openaiproxy.d.ts.map +1 -0
- package/package.json +16 -4
- package/dist/aimp.js +0 -162
- package/dist/azureai.js +0 -220
- package/dist/baiduai.d.ts +0 -28
- package/dist/baiduai.js +0 -92
- package/dist/corzauthorization.js +0 -79
- package/dist/corzbot.js +0 -490
- package/dist/declare.js +0 -51
- package/dist/deepseek.d.ts +0 -5
- package/dist/deepseek.js +0 -16
- package/dist/doubaoai.js +0 -149
- package/dist/gptbase.js +0 -58
- package/dist/gptprovider.js +0 -80
- package/dist/openai.js +0 -184
- package/dist/openaibase.js +0 -20
- package/dist/openaiprovider.d.ts +0 -20
- package/dist/openaiprovider.js +0 -43
- package/dist/openaiproxy.js +0 -108
- package/dist/stabilityai.d.ts +0 -18
- package/dist/stabilityai.js +0 -75
- package/dist/stabilityplusai.d.ts +0 -11
- package/dist/stabilityplusai.js +0 -86
- package/src/aimp.ts +0 -125
- package/src/azureai.ts +0 -180
- package/src/baiduai.ts +0 -86
- package/src/corzauthorization.ts +0 -59
- package/src/corzbot.ts +0 -434
- package/src/declare.ts +0 -152
- package/src/deepseek.ts +0 -11
- package/src/doubaoai.ts +0 -129
- package/src/gptbase.ts +0 -52
- package/src/gptprovider.ts +0 -74
- package/src/index.ts +0 -2
- package/src/openai.ts +0 -136
- package/src/openaibase.ts +0 -30
- package/src/openaiproxy.ts +0 -97
- package/src/stabilityai.ts +0 -67
- package/src/stabilityplusai.ts +0 -77
- package/tsconfig.json +0 -31
package/src/declare.ts
DELETED
|
@@ -1,152 +0,0 @@
|
|
|
1
|
-
|
|
2
|
-
import { EmbeddingItem } from '@azure/openai';
|
|
3
|
-
import axios from 'axios';
|
|
4
|
-
|
|
5
|
-
export interface ApiResult {
|
|
6
|
-
/**
|
|
7
|
-
* return the result of api called
|
|
8
|
-
* @type {boolean}
|
|
9
|
-
*/
|
|
10
|
-
'successed': boolean
|
|
11
|
-
/**
|
|
12
|
-
* The error info
|
|
13
|
-
* @type {any}
|
|
14
|
-
* @memberof ChatReponse
|
|
15
|
-
*/
|
|
16
|
-
'error'?: any;
|
|
17
|
-
}
|
|
18
|
-
/**
|
|
19
|
-
* Api封装后的返回结果
|
|
20
|
-
*/
|
|
21
|
-
export interface ChatReponse extends ApiResult {
|
|
22
|
-
|
|
23
|
-
/**
|
|
24
|
-
* The name of the user in a multi-user chat
|
|
25
|
-
* @type {Array<any>}
|
|
26
|
-
* @memberof ChatReponse
|
|
27
|
-
*/
|
|
28
|
-
'message'?: Array<any>;
|
|
29
|
-
'usage'?: any;
|
|
30
|
-
}
|
|
31
|
-
/**
|
|
32
|
-
* 调用OpenAI Api的参数约定
|
|
33
|
-
*/
|
|
34
|
-
export interface OpenAIApiParameters {
|
|
35
|
-
'embedding'?: string, ///模型引擎,兼容Azure
|
|
36
|
-
'model'?: string, ///模型名称
|
|
37
|
-
'maxtoken'?: number; ///返回的最大token
|
|
38
|
-
'temperature'?: number;
|
|
39
|
-
'top_p'?: number;
|
|
40
|
-
'presence_penalty'?: number;
|
|
41
|
-
'frequency_penalty'?: number;
|
|
42
|
-
'replyCounts'?: number; ///返回多少答案
|
|
43
|
-
'tools'?: Array<any>,
|
|
44
|
-
'tool_choice'?: string,
|
|
45
|
-
'enableToolCall'?: number ///是否允许调用toolfunction
|
|
46
|
-
}
|
|
47
|
-
/**
|
|
48
|
-
* Azure 上的OpenAI的链接参数
|
|
49
|
-
*/
|
|
50
|
-
export interface ProxyPatameters {
|
|
51
|
-
'serviceurl': string; ///端点
|
|
52
|
-
}
|
|
53
|
-
|
|
54
|
-
/**
|
|
55
|
-
* OpenAI Proxy 链接参数
|
|
56
|
-
*/
|
|
57
|
-
export interface AzureOpenAIPatameters {
|
|
58
|
-
'endpoint': string; ///端点
|
|
59
|
-
'engine': string; ///GPT部署的项目名称
|
|
60
|
-
'embedding'?: string; ///向量引擎项目名称
|
|
61
|
-
'version'?: string; ///Api 版本
|
|
62
|
-
}
|
|
63
|
-
|
|
64
|
-
/**
|
|
65
|
-
* 调用OpenAI Api的向量约定
|
|
66
|
-
*/
|
|
67
|
-
export interface EmbeddingResult extends ApiResult {
|
|
68
|
-
'embedding'?: EmbeddingItem[],
|
|
69
|
-
}
|
|
70
|
-
/**
|
|
71
|
-
* 远程请求的返回
|
|
72
|
-
*/
|
|
73
|
-
export interface RpcResult extends ApiResult {
|
|
74
|
-
'data'?: any;
|
|
75
|
-
}
|
|
76
|
-
|
|
77
|
-
/**
|
|
78
|
-
* Axios远程请求封装
|
|
79
|
-
* @param opts
|
|
80
|
-
* @returns
|
|
81
|
-
*/
|
|
82
|
-
export async function request(opts: any = {}): Promise<RpcResult> {
|
|
83
|
-
if (!opts.data) opts.data = opts.body;
|
|
84
|
-
try {
|
|
85
|
-
let result = await axios(opts);
|
|
86
|
-
return { successed: true, data: result.data }
|
|
87
|
-
} catch (err) {
|
|
88
|
-
return { successed: false, error: err, data: err }
|
|
89
|
-
}
|
|
90
|
-
}
|
|
91
|
-
/**
|
|
92
|
-
*
|
|
93
|
-
* @param opts
|
|
94
|
-
* @returns
|
|
95
|
-
*/
|
|
96
|
-
export function requestStream(opts: any = {},processChunkData:Function) {
|
|
97
|
-
if (!opts.data) opts.data = opts.body;
|
|
98
|
-
axios(opts)
|
|
99
|
-
.then((res: any) => {
|
|
100
|
-
res.data.on('data', (chunk: any) => {
|
|
101
|
-
processChunkData(chunk);
|
|
102
|
-
});
|
|
103
|
-
})
|
|
104
|
-
}
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
/**
|
|
108
|
-
* 数据缓存提供者接口
|
|
109
|
-
*/
|
|
110
|
-
export interface CacheProvider {
|
|
111
|
-
/**
|
|
112
|
-
* 缓存数据
|
|
113
|
-
* @param key 数据的键名称
|
|
114
|
-
* @param value 数据的键值
|
|
115
|
-
*/
|
|
116
|
-
set(key: string, value: string | object, exp?: number): void;
|
|
117
|
-
/**
|
|
118
|
-
* 从缓存中读取数据
|
|
119
|
-
* @param key 数据的键名称
|
|
120
|
-
*/
|
|
121
|
-
get(key: string): Promise<string | null>;
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
/**
|
|
125
|
-
* 删除缓存信息
|
|
126
|
-
* @param key 数据的键名称
|
|
127
|
-
*/
|
|
128
|
-
delete(key: string): void;
|
|
129
|
-
|
|
130
|
-
}
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
export interface StabilityOption {
|
|
134
|
-
'cfg_scale'?: number,
|
|
135
|
-
'clip_guidance_preset'?: string,
|
|
136
|
-
'height'?: number,
|
|
137
|
-
'width'?: number,
|
|
138
|
-
'samples'?: number,
|
|
139
|
-
'seed'?: number,
|
|
140
|
-
'steps'?: number,
|
|
141
|
-
'sampler'?: string,
|
|
142
|
-
'negative'?: string,
|
|
143
|
-
'engine'?: string,
|
|
144
|
-
'endpoint'?: string
|
|
145
|
-
'denoising_strength'?: number,
|
|
146
|
-
'hr_scale'?: number
|
|
147
|
-
}
|
|
148
|
-
|
|
149
|
-
export interface StabilityResult extends ApiResult {
|
|
150
|
-
'data'?: any;
|
|
151
|
-
'type'?: string;
|
|
152
|
-
}
|
package/src/deepseek.ts
DELETED
package/src/doubaoai.ts
DELETED
|
@@ -1,129 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* 火山方舟-豆包大模型引擎
|
|
3
|
-
*/
|
|
4
|
-
import { request, requestStream } from "./declare";
|
|
5
|
-
import GptBase from "./gptbase"
|
|
6
|
-
export default class DouBaoAI extends GptBase {
|
|
7
|
-
protected apiKey: string;
|
|
8
|
-
protected apiOption: any = {}
|
|
9
|
-
/**
|
|
10
|
-
* 构造函数
|
|
11
|
-
*/
|
|
12
|
-
constructor(apiKey: string, apiOption: any = {}) {
|
|
13
|
-
super();
|
|
14
|
-
this.apiKey = apiKey;
|
|
15
|
-
this.apiOption = apiOption;
|
|
16
|
-
}
|
|
17
|
-
/**
|
|
18
|
-
* 请求接口
|
|
19
|
-
*/
|
|
20
|
-
public async chatRequest(chatText: string | Array<any>, callChatOption: any, axiosOption: any = {}): Promise<any> {
|
|
21
|
-
if (!chatText) return { successed: false, error: { errcode: 2, errmsg: '缺失聊天的内容' } };
|
|
22
|
-
const callParams = this.assembleApiParams(chatText, false, callChatOption, axiosOption);
|
|
23
|
-
try {
|
|
24
|
-
const response = await request(callParams)
|
|
25
|
-
if (response.successed && !response.data.code) return { successed: true, message: response.data.choices, usage: response.data.usage }
|
|
26
|
-
return { successed: false, ...response.data };
|
|
27
|
-
} catch (error) {
|
|
28
|
-
console.log('result is error ', error)
|
|
29
|
-
return { successed: false, error };
|
|
30
|
-
}
|
|
31
|
-
}
|
|
32
|
-
/**
|
|
33
|
-
* 组装最后的调用参数
|
|
34
|
-
* @param callChatOption
|
|
35
|
-
* @returns
|
|
36
|
-
*/
|
|
37
|
-
private assembleApiParams(chatText: string | Array<any>, streamCall: boolean = false, callChatOption: any, axiosOption: any = {}): any {
|
|
38
|
-
let messages: Array<any> = typeof (chatText) == 'string' ? [{ role: 'user', content: chatText }] : chatText;
|
|
39
|
-
let params: any = {};
|
|
40
|
-
if (callChatOption?.temperature || this.apiOption.temperature) params.temperature = Number(callChatOption?.temperature || this.apiOption.temperature);
|
|
41
|
-
params.max_tokens = Number(callChatOption?.maxtoken || this.apiOption.maxtoken);
|
|
42
|
-
if (callChatOption?.top_p || this.apiOption.top_p) params.top_p = Number(callChatOption?.top_p || this.apiOption.top_p);
|
|
43
|
-
if (callChatOption?.presence_penalty || this.apiOption.presence_penalty) params.presence_penalty = Number(callChatOption?.presence_penalty || this.apiOption.presence_penalty);
|
|
44
|
-
if (callChatOption?.frequency_penalty || this.apiOption.frequency_penalty) params.frequency_penalty = Number(callChatOption?.frequency_penalty || this.apiOption.frequency_penalty);
|
|
45
|
-
if (callChatOption?.top_logprobs || this.apiOption.top_logprobs) {
|
|
46
|
-
params.logprobs = true;
|
|
47
|
-
params.top_logprobs = Number(callChatOption?.top_logprobs || this.apiOption.top_logprobs);
|
|
48
|
-
}
|
|
49
|
-
params.tools = (callChatOption?.enableToolCall === 1 && callChatOption?.tools) ? callChatOption.tools : undefined;
|
|
50
|
-
params.tool_choice = callChatOption?.enableToolCall === 1 ? 'auto' : undefined;
|
|
51
|
-
const axiosParams = {
|
|
52
|
-
...axiosOption,
|
|
53
|
-
method: "post",
|
|
54
|
-
headers: {
|
|
55
|
-
'Content-Type': 'application/json',
|
|
56
|
-
'authorization': `Bearer ${this.apiKey}`
|
|
57
|
-
},
|
|
58
|
-
data: {
|
|
59
|
-
model: callChatOption?.model || this.apiOption.model,
|
|
60
|
-
...params,
|
|
61
|
-
messages,
|
|
62
|
-
stream: streamCall
|
|
63
|
-
},
|
|
64
|
-
url: 'https://ark.cn-beijing.volces.com/api/v3/chat/completions'
|
|
65
|
-
};
|
|
66
|
-
if (streamCall) axiosParams.responseType = 'stream';
|
|
67
|
-
return axiosParams;
|
|
68
|
-
}
|
|
69
|
-
/**
|
|
70
|
-
* 流式的聊天模式
|
|
71
|
-
* @param chatText
|
|
72
|
-
* @param _paramOption
|
|
73
|
-
* @param axiosOption
|
|
74
|
-
*/
|
|
75
|
-
override async chatRequestInStream(chatText: string | Array<any>, callChatOption: any, attach?: any, axiosOption?: any): Promise<any> {
|
|
76
|
-
if (!chatText) this.emit('chaterror', { successed: false, error: 'no text in chat' });
|
|
77
|
-
axiosOption = Object.assign({}, axiosOption || { timeout: 10000 })
|
|
78
|
-
const callParams = this.assembleApiParams(chatText, true, callChatOption, axiosOption);
|
|
79
|
-
let requestid = Math.ceil(Math.random() * (new Date().getTime() * Math.random()) / 1000), replytext: string[] = [];
|
|
80
|
-
try {
|
|
81
|
-
requestStream(callParams, (chunk: any) => {
|
|
82
|
-
let streamText = chunk.toString().replace('[DONE]', '').replace(/[\r\n]+/gm, '')
|
|
83
|
-
this.processChunkData(streamText.split(/data: /), requestid, replytext, attach)
|
|
84
|
-
})
|
|
85
|
-
return { successed: true, requestid }
|
|
86
|
-
} catch (error) {
|
|
87
|
-
this.emit('requesterror', { successed: false, requestid, error: 'call axios faied ' + error });
|
|
88
|
-
return { successed: false, requestid }
|
|
89
|
-
}
|
|
90
|
-
}
|
|
91
|
-
/**
|
|
92
|
-
* 处理每次流式返回的对话片段
|
|
93
|
-
* @param chunks
|
|
94
|
-
* @param requestid
|
|
95
|
-
* @param replytext
|
|
96
|
-
* @param attach
|
|
97
|
-
*/
|
|
98
|
-
processChunkData(chunks: string[], requestid: Number, replytext: string[], attach: any) {
|
|
99
|
-
let has_tool_calls = 0, currentIndex, previous_index = -1, tool_calls: any[] = [];// 使用数组来存储工具调用
|
|
100
|
-
for (const splitString of chunks) {
|
|
101
|
-
if (!splitString) continue;
|
|
102
|
-
const chunk = JSON.parse(splitString);
|
|
103
|
-
const [choice] = chunk.choices,
|
|
104
|
-
{ finish_reason: finishreason, index, usage } = choice,
|
|
105
|
-
{ content, tool_calls: toolCalls } = choice.delta;
|
|
106
|
-
if (toolCalls && toolCalls.length) {
|
|
107
|
-
currentIndex = toolCalls[0].index;
|
|
108
|
-
has_tool_calls = 1;
|
|
109
|
-
if (currentIndex !== previous_index) {
|
|
110
|
-
tool_calls.push({
|
|
111
|
-
id: toolCalls[0].id,
|
|
112
|
-
type: 'function',
|
|
113
|
-
function: {
|
|
114
|
-
name: toolCalls[0].function.name,
|
|
115
|
-
arguments: toolCalls[0].function.arguments
|
|
116
|
-
}
|
|
117
|
-
});
|
|
118
|
-
// 更新previousIndex以供下次比较
|
|
119
|
-
previous_index = currentIndex;
|
|
120
|
-
} else {
|
|
121
|
-
tool_calls[previous_index].function.arguments += toolCalls[0].function.arguments
|
|
122
|
-
}
|
|
123
|
-
} else replytext.push(content);
|
|
124
|
-
let output = { successed: true, requestid, segment: content, text: replytext.join(''), finish_reason: finishreason, index, usage, has_tool_calls: has_tool_calls, tool_calls: tool_calls };
|
|
125
|
-
if (attach) output = Object.assign({}, output, attach);
|
|
126
|
-
this.emit(finishreason ? 'chatdone' : 'chattext', output)
|
|
127
|
-
}
|
|
128
|
-
}
|
|
129
|
-
}
|
package/src/gptbase.ts
DELETED
|
@@ -1,52 +0,0 @@
|
|
|
1
|
-
import { EventEmitter } from "events"
|
|
2
|
-
import { ApiResult } from './declare'
|
|
3
|
-
|
|
4
|
-
export default abstract class GptBase extends EventEmitter {
|
|
5
|
-
/**
|
|
6
|
-
* 构造函数
|
|
7
|
-
*/
|
|
8
|
-
constructor() {
|
|
9
|
-
super();
|
|
10
|
-
}
|
|
11
|
-
/**
|
|
12
|
-
* 获得文字的向量
|
|
13
|
-
* @param text
|
|
14
|
-
*/
|
|
15
|
-
getTextEmbedding(_text: string, _axiosOption: any):any {return null;}
|
|
16
|
-
/**
|
|
17
|
-
* 自由聊天模式
|
|
18
|
-
* @param chatText
|
|
19
|
-
* @param _paramOption
|
|
20
|
-
* @param axiosOption
|
|
21
|
-
*/
|
|
22
|
-
abstract chatRequest(chatText: string | Array<any>, _paramOption: any, axiosOption?: any): Promise<ApiResult>;
|
|
23
|
-
|
|
24
|
-
/**
|
|
25
|
-
* 创建一个会话主题id
|
|
26
|
-
* @returns
|
|
27
|
-
*/
|
|
28
|
-
async createCoversation(_client?:any):Promise<string|null> {
|
|
29
|
-
return null;
|
|
30
|
-
}
|
|
31
|
-
|
|
32
|
-
/**
|
|
33
|
-
* 设置智能体的变量
|
|
34
|
-
* @returns
|
|
35
|
-
*/
|
|
36
|
-
async setVariables(_params: any):Promise<ApiResult> {
|
|
37
|
-
return { successed: true };
|
|
38
|
-
}
|
|
39
|
-
/**
|
|
40
|
-
* 获取智能体的变量
|
|
41
|
-
* @returns
|
|
42
|
-
*/
|
|
43
|
-
async getVariables(_params: any): Promise<any> { return {successed: false}}
|
|
44
|
-
/**
|
|
45
|
-
* 流式的聊天模式
|
|
46
|
-
* @param chatText
|
|
47
|
-
* @param _paramOption
|
|
48
|
-
* @param axiosOption
|
|
49
|
-
*/
|
|
50
|
-
chatRequestInStream(_chatText: string | Array<any>, _paramOption: any, _attach?: any, _axiosOption?: any): any { return null; }
|
|
51
|
-
}
|
|
52
|
-
|
package/src/gptprovider.ts
DELETED
|
@@ -1,74 +0,0 @@
|
|
|
1
|
-
//ts check
|
|
2
|
-
/**
|
|
3
|
-
* 语音转文字服务商工厂
|
|
4
|
-
*/
|
|
5
|
-
import OpenAIGpt from './openai';
|
|
6
|
-
import DeepSeek from './deepseek';
|
|
7
|
-
import CorzBot from './corzbot'
|
|
8
|
-
import OpenAIProxt from './openaiproxy';
|
|
9
|
-
import AzureAI from './azureai'
|
|
10
|
-
import StabilityAI from './stabilityai'
|
|
11
|
-
import StabilityPlusAI from './stabilityplusai'
|
|
12
|
-
import BaiduWenXinAI, { ApiCredential } from './baiduai'
|
|
13
|
-
import AIMiddlePlatform from './aimp';
|
|
14
|
-
import DouBaoAI from './doubaoai'
|
|
15
|
-
import GptBase from './gptbase';
|
|
16
|
-
import { CorzAuthorization } from './corzauthorization'
|
|
17
|
-
// 扣子的身份认证应用
|
|
18
|
-
const corzAuth: Map<string, CorzAuthorization> = new Map();
|
|
19
|
-
/**
|
|
20
|
-
* OpenAI/NLP 的服务提供商 OpenAI,微软,百度文心(待接入),google(待接入)
|
|
21
|
-
*/
|
|
22
|
-
export const GptProviderEnum = {
|
|
23
|
-
OPENAI: 'openai',
|
|
24
|
-
OPENAIPROXY:'openaiproxy',
|
|
25
|
-
MICROSOFT: 'microsoft',
|
|
26
|
-
AIMP: 'aimp', ///AI 中台业务
|
|
27
|
-
COZE:'coze',
|
|
28
|
-
BAIDU: 'baidu',
|
|
29
|
-
DOUBAO:'doubao',
|
|
30
|
-
DEEPSEEK:'deepseek',
|
|
31
|
-
GOOGLE:'google',
|
|
32
|
-
STABILITY:'stability',
|
|
33
|
-
STABILITY2: 'stability2',
|
|
34
|
-
} as const;
|
|
35
|
-
export type GptProviderEnum = typeof GptProviderEnum[keyof typeof GptProviderEnum];
|
|
36
|
-
/**
|
|
37
|
-
* 根据类型创建不同的TTS引擎对象
|
|
38
|
-
* @param {*} provider
|
|
39
|
-
* @param {*} apikey
|
|
40
|
-
* @param {*} setting
|
|
41
|
-
* @returns
|
|
42
|
-
*/
|
|
43
|
-
export function createGpt(provider: GptProviderEnum, apikey: string|ApiCredential, setting: any): GptBase | null {
|
|
44
|
-
let { model,agentid, maxtoken, temperature, serviceurl,endpoint, engine, version, embedding, top_p, presence_penalty, frequency_penalty } = setting || {};
|
|
45
|
-
switch (provider) {
|
|
46
|
-
case GptProviderEnum.OPENAI:
|
|
47
|
-
return new OpenAIGpt(apikey + '', { model, maxtoken, temperature, embedding, top_p, presence_penalty, frequency_penalty });
|
|
48
|
-
case GptProviderEnum.DEEPSEEK:
|
|
49
|
-
return new DeepSeek(apikey + '', { model, maxtoken, temperature, embedding, top_p, presence_penalty, frequency_penalty });
|
|
50
|
-
case GptProviderEnum.OPENAIPROXY:
|
|
51
|
-
return new OpenAIProxt(apikey + '', { serviceurl}, { model, maxtoken, temperature, embedding, top_p, presence_penalty, frequency_penalty });
|
|
52
|
-
case GptProviderEnum.MICROSOFT:
|
|
53
|
-
return new AzureAI(apikey + '', { endpoint, engine, version }, { model, maxtoken, temperature, embedding, top_p, presence_penalty, frequency_penalty }, );
|
|
54
|
-
case GptProviderEnum.BAIDU:
|
|
55
|
-
let cred: ApiCredential = typeof (apikey) === 'string' ? { apikey, securitykey: apikey } : apikey;
|
|
56
|
-
return new BaiduWenXinAI(cred);
|
|
57
|
-
case GptProviderEnum.AIMP:
|
|
58
|
-
return new AIMiddlePlatform(apikey+'',{ endpoint,agentid });
|
|
59
|
-
case GptProviderEnum.COZE:
|
|
60
|
-
let authorizationProvider = corzAuth.get(apikey + '');
|
|
61
|
-
if (!authorizationProvider) {
|
|
62
|
-
authorizationProvider = new CorzAuthorization(apikey + '',setting);
|
|
63
|
-
corzAuth.set(apikey + '', authorizationProvider);
|
|
64
|
-
}
|
|
65
|
-
return new CorzBot(authorizationProvider!, setting);
|
|
66
|
-
case GptProviderEnum.DOUBAO:
|
|
67
|
-
return new DouBaoAI(apikey + '', { model, maxtoken, temperature, top_p, presence_penalty, frequency_penalty })
|
|
68
|
-
case GptProviderEnum.STABILITY:
|
|
69
|
-
return new StabilityAI(apikey + '', { endpoint, engine }, setting);
|
|
70
|
-
case GptProviderEnum.STABILITY2:
|
|
71
|
-
return new StabilityPlusAI(apikey + '', { endpoint, engine }, setting);
|
|
72
|
-
default: return null;
|
|
73
|
-
}
|
|
74
|
-
};
|
package/src/index.ts
DELETED
package/src/openai.ts
DELETED
|
@@ -1,136 +0,0 @@
|
|
|
1
|
-
// import { Configuration, OpenAIApi, ChatCompletionRequestMessage } from "azure-openai"
|
|
2
|
-
/**
|
|
3
|
-
* OpenAI
|
|
4
|
-
*/
|
|
5
|
-
import OpenAIBase from "./openaibase"
|
|
6
|
-
import { OpenAIApiParameters, ChatReponse, EmbeddingResult } from './declare'
|
|
7
|
-
import OpenAI from "openai";
|
|
8
|
-
// import { ChatCompletionToolChoiceOption } from "openai/resources";
|
|
9
|
-
export default class OpenAIGpt extends OpenAIBase<OpenAI> {
|
|
10
|
-
/**
|
|
11
|
-
* 初始化OpenAI 的聊天对象Api
|
|
12
|
-
*/
|
|
13
|
-
createOpenAI(apiKey: string): OpenAI {
|
|
14
|
-
return new OpenAI({ apiKey })
|
|
15
|
-
}
|
|
16
|
-
|
|
17
|
-
/**
|
|
18
|
-
* 获得文字的向量
|
|
19
|
-
* @param text
|
|
20
|
-
*/
|
|
21
|
-
override async getTextEmbedding(text: string|string[], axiosOption: any): Promise<EmbeddingResult> {
|
|
22
|
-
if (!text) return { successed: false, error: { errcode: 2, errmsg: 'content required' } };
|
|
23
|
-
if (!this.aiApi) {
|
|
24
|
-
this.aiApi = this.createOpenAI(this.apiKey);
|
|
25
|
-
}
|
|
26
|
-
try {
|
|
27
|
-
const response: any = await this.aiApi.embeddings.create({
|
|
28
|
-
model: this.embeddingmodel,
|
|
29
|
-
input: text,
|
|
30
|
-
}, axiosOption);
|
|
31
|
-
return { successed: true, embedding: response.data.data};//[0].embedding };
|
|
32
|
-
} catch (error) {
|
|
33
|
-
return { successed: false, error };
|
|
34
|
-
}
|
|
35
|
-
}
|
|
36
|
-
/**
|
|
37
|
-
* 向OpenAI发送一个聊天请求
|
|
38
|
-
* @param {*} chatText
|
|
39
|
-
*/
|
|
40
|
-
public async chatRequest(chatText: string | Array<any>, callChatOption: OpenAIApiParameters, axiosOption: any = {}): Promise<ChatReponse> {
|
|
41
|
-
if (!chatText) return { successed: false, error: { errcode: 2, errmsg: '缺失聊天的内容' } };
|
|
42
|
-
if (!this.aiApi) this.aiApi = this.createOpenAI(this.apiKey);
|
|
43
|
-
|
|
44
|
-
let message: Array<any> = typeof (chatText) == 'string' ?
|
|
45
|
-
[{ role: 'user', content: chatText }] : chatText;
|
|
46
|
-
try {
|
|
47
|
-
// const response: any = await this.aiApi.createChatCompletion({
|
|
48
|
-
const response: any = await this.aiApi.chat.completions.create(
|
|
49
|
-
{
|
|
50
|
-
model:callChatOption?.model || this.chatModel,
|
|
51
|
-
messages:message,
|
|
52
|
-
temperature: Number(callChatOption?.temperature || this.temperature),
|
|
53
|
-
max_tokens: Number(callChatOption?.maxtoken || this.maxtoken),
|
|
54
|
-
top_p: Number(callChatOption?.top_p || this.top_p),
|
|
55
|
-
presence_penalty: Number(callChatOption?.presence_penalty || this.presence_penalty),
|
|
56
|
-
frequency_penalty: Number(callChatOption?.frequency_penalty || this.frequency_penalty),
|
|
57
|
-
n: Number(callChatOption?.replyCounts || 1) || 1,
|
|
58
|
-
// tools: (callChatOption?.enableToolCall === 1 && callChatOption?.tools) ? callChatOption.tools : undefined,
|
|
59
|
-
// tool_choice: callChatOption?.enableToolCall === 1 ? 'auto' : undefined,
|
|
60
|
-
}, axiosOption);
|
|
61
|
-
// console.log('response.data', response)
|
|
62
|
-
return { successed: true, message: response.choices, usage: response.usage };
|
|
63
|
-
} catch (error) {
|
|
64
|
-
console.log('result is error ', error)
|
|
65
|
-
return { successed: false, error };
|
|
66
|
-
}
|
|
67
|
-
|
|
68
|
-
}
|
|
69
|
-
/**
|
|
70
|
-
* 流式的聊天模式
|
|
71
|
-
* @param chatText
|
|
72
|
-
* @param _paramOption
|
|
73
|
-
* @param axiosOption
|
|
74
|
-
*/
|
|
75
|
-
override async chatRequestInStream(chatText: string | Array<any>, callChatOption: OpenAIApiParameters, attach?: any, axiosOption?: any): Promise<any> {
|
|
76
|
-
if (!chatText) this.emit('chaterror', { successed: false, error: 'no text in chat' });
|
|
77
|
-
if (!this.aiApi) {
|
|
78
|
-
this.aiApi = this.createOpenAI(this.apiKey);
|
|
79
|
-
}
|
|
80
|
-
let message: Array<any> = typeof (chatText) == 'string' ? [{ role: 'user', content: chatText }] : chatText;
|
|
81
|
-
axiosOption = Object.assign({}, axiosOption || { timeout: 60000 })
|
|
82
|
-
let requestid = Math.ceil(Math.random() * (new Date().getTime() * Math.random()) / 1000);
|
|
83
|
-
try {
|
|
84
|
-
const response: any = await this.aiApi.chat.completions.create(
|
|
85
|
-
{
|
|
86
|
-
model: callChatOption?.model || this.chatModel,
|
|
87
|
-
messages: message,
|
|
88
|
-
temperature: Number(callChatOption?.temperature || this.temperature),
|
|
89
|
-
max_tokens: Number(callChatOption?.maxtoken || this.maxtoken),
|
|
90
|
-
top_p: Number(callChatOption?.top_p || this.top_p),
|
|
91
|
-
presence_penalty: Number(callChatOption?.presence_penalty || this.presence_penalty),
|
|
92
|
-
frequency_penalty: Number(callChatOption?.frequency_penalty || this.frequency_penalty),
|
|
93
|
-
n: Number(callChatOption?.replyCounts || 1) || 1,
|
|
94
|
-
tools: (callChatOption?.enableToolCall === 1 && callChatOption?.tools) ? callChatOption.tools : undefined,
|
|
95
|
-
tool_choice: callChatOption?.enableToolCall === 1 ? 'auto' : undefined,
|
|
96
|
-
stream:true
|
|
97
|
-
}, axiosOption);
|
|
98
|
-
let replytext: string[] = [];
|
|
99
|
-
let has_tool_calls = 0, currentIndex, previous_index = -1, tool_calls: any[] = [];// 使用数组来存储工具调用
|
|
100
|
-
for await (const chunk of response) {
|
|
101
|
-
const [choice] = chunk.choices,
|
|
102
|
-
{ finish_reason:finishreason, index, usage } = choice,
|
|
103
|
-
{ content, tool_calls:toolCalls } = choice.delta;
|
|
104
|
-
if (toolCalls && toolCalls.length) {
|
|
105
|
-
currentIndex = toolCalls[0].index;
|
|
106
|
-
has_tool_calls = 1;
|
|
107
|
-
// 检查index是否发生变化
|
|
108
|
-
//console.log('currentIndex,previous_index', currentIndex, previous_index)
|
|
109
|
-
if (currentIndex !== previous_index) {
|
|
110
|
-
tool_calls.push({
|
|
111
|
-
id: toolCalls[0].id,
|
|
112
|
-
type: 'function',
|
|
113
|
-
function: {
|
|
114
|
-
name: toolCalls[0].function.name,
|
|
115
|
-
arguments: toolCalls[0].function.arguments
|
|
116
|
-
}
|
|
117
|
-
});
|
|
118
|
-
// 更新previousIndex以供下次比较
|
|
119
|
-
previous_index = currentIndex;
|
|
120
|
-
} else {
|
|
121
|
-
tool_calls[previous_index].function.arguments += toolCalls[0].function.arguments
|
|
122
|
-
}
|
|
123
|
-
} else {
|
|
124
|
-
replytext.push(content);
|
|
125
|
-
}
|
|
126
|
-
let output = { successed: true, requestid, segment: content, text: replytext.join(''), finish_reason: finishreason, index, usage, has_tool_calls: has_tool_calls, tool_calls: tool_calls };
|
|
127
|
-
if (attach) output = Object.assign({}, output, attach);
|
|
128
|
-
this.emit(finishreason ? 'chatdone' : 'chattext', output)
|
|
129
|
-
}
|
|
130
|
-
return { successed: true, requestid }
|
|
131
|
-
} catch (error) {
|
|
132
|
-
this.emit('requesterror', { successed: false, requestid, error: 'call axios faied ' + error });
|
|
133
|
-
return { successed: false, requestid }
|
|
134
|
-
}
|
|
135
|
-
}
|
|
136
|
-
}
|
package/src/openaibase.ts
DELETED
|
@@ -1,30 +0,0 @@
|
|
|
1
|
-
// import { Configuration, OpenAIApi, ChatCompletionRequestMessage } from "azure-openai"
|
|
2
|
-
import { OpenAIApiParameters } from "./declare";
|
|
3
|
-
import GptBase from "./gptbase"
|
|
4
|
-
export default abstract class OpenAIBase<T> extends GptBase {
|
|
5
|
-
protected readonly apiKey: string;
|
|
6
|
-
protected readonly chatModel: string;
|
|
7
|
-
protected readonly maxtoken: number;
|
|
8
|
-
protected readonly top_p: number;
|
|
9
|
-
protected readonly presence_penalty: number;
|
|
10
|
-
protected readonly frequency_penalty: number;
|
|
11
|
-
protected readonly temperature: number;
|
|
12
|
-
protected readonly embeddingmodel: string;
|
|
13
|
-
protected aiApi: T | undefined;//OpenAIApi | undefined;
|
|
14
|
-
constructor(apiKey: string, apiOption: OpenAIApiParameters = {}) {
|
|
15
|
-
super();
|
|
16
|
-
this.apiKey = apiKey;
|
|
17
|
-
this.chatModel = apiOption.model || 'gpt-3.5-turbo';
|
|
18
|
-
this.maxtoken = apiOption.maxtoken || 2048;
|
|
19
|
-
this.top_p = apiOption.top_p || 0.95;
|
|
20
|
-
this.temperature = apiOption.temperature || 0.9;
|
|
21
|
-
this.presence_penalty = apiOption.presence_penalty || 0;
|
|
22
|
-
this.frequency_penalty = apiOption.frequency_penalty || 0;
|
|
23
|
-
this.embeddingmodel = apiOption.embedding || 'text-embedding-ada-002';
|
|
24
|
-
}
|
|
25
|
-
/**
|
|
26
|
-
* 初始化OpenAI 的聊天对象Api
|
|
27
|
-
*/
|
|
28
|
-
abstract createOpenAI(apiKey: string): T ;
|
|
29
|
-
|
|
30
|
-
}
|