ohlcv-ai 1.0.2 → 1.0.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.ts +615 -0
- package/dist/index.js +68 -40
- package/dist/index.mjs +2051 -0
- package/package.json +31 -18
- package/dist/aliyun/index.d.ts.map +0 -1
- package/dist/aliyun/model.d.ts.map +0 -1
- package/dist/deepseek/index.d.ts.map +0 -1
- package/dist/deepseek/model.d.ts.map +0 -1
- package/dist/index.d.ts.map +0 -1
- package/dist/openai/index.d.ts.map +0 -1
- package/dist/openai/model.d.ts.map +0 -1
- package/dist/types.d.ts.map +0 -1
- package/src/aliyun/index.ts +0 -446
- package/src/aliyun/model.ts +0 -475
- package/src/deepseek/index.ts +0 -479
- package/src/deepseek/model.ts +0 -455
- package/src/index.ts +0 -56
- package/src/openai/index.ts +0 -766
- package/src/openai/model.ts +0 -430
- package/src/types.ts +0 -12
- package/tsconfig.json +0 -28
package/dist/index.d.ts
ADDED
|
@@ -0,0 +1,615 @@
|
|
|
1
|
+
export declare const ALIYUN_MODELS: Map<AliYunModelType, Model>;
|
|
2
|
+
|
|
3
|
+
export declare class AliyunAI {
|
|
4
|
+
private apiKey;
|
|
5
|
+
private modelType;
|
|
6
|
+
private timeout;
|
|
7
|
+
/**
|
|
8
|
+
* Constructor - Minimal configuration
|
|
9
|
+
* @param config.apiKey - API key (required)
|
|
10
|
+
* @param config.modelType - Model type, default qwen-turbo
|
|
11
|
+
* @param config.timeout - Timeout, default 30 seconds
|
|
12
|
+
*/
|
|
13
|
+
constructor(config: AliyunConfig);
|
|
14
|
+
/**
|
|
15
|
+
* Simplest method: single conversation
|
|
16
|
+
* @param message - User message
|
|
17
|
+
* @param options - Chat options
|
|
18
|
+
* @returns AI response
|
|
19
|
+
*/
|
|
20
|
+
chat(message: string, options?: AliYunChatOptions): Promise<string>;
|
|
21
|
+
/**
|
|
22
|
+
* Multi-turn conversation
|
|
23
|
+
* @param messages - Message history
|
|
24
|
+
* @param options - Chat options
|
|
25
|
+
* @returns Complete API response
|
|
26
|
+
*/
|
|
27
|
+
chatCompletion(messages: ChatMessage[], options?: {
|
|
28
|
+
temperature?: number;
|
|
29
|
+
maxTokens?: number;
|
|
30
|
+
stream?: boolean;
|
|
31
|
+
modelType?: AliYunModelType;
|
|
32
|
+
}): Promise<any>;
|
|
33
|
+
/**
|
|
34
|
+
* Streaming conversation (only supports OpenAI format)
|
|
35
|
+
* @param messages - Message history
|
|
36
|
+
* @param callback - Streaming callback function
|
|
37
|
+
* @param options - Chat options
|
|
38
|
+
*/
|
|
39
|
+
chatStream(messages: ChatMessage[], callback: AliYunStreamCallback, options?: {
|
|
40
|
+
temperature?: number;
|
|
41
|
+
maxTokens?: number;
|
|
42
|
+
modelType?: AliYunModelType;
|
|
43
|
+
}): Promise<void>;
|
|
44
|
+
/**
|
|
45
|
+
* Switch model
|
|
46
|
+
* @param modelType - New model type
|
|
47
|
+
*/
|
|
48
|
+
setModel(modelType: AliYunModelType): void;
|
|
49
|
+
/**
|
|
50
|
+
* Get current model configuration
|
|
51
|
+
*/
|
|
52
|
+
getCurrentModel(): {
|
|
53
|
+
name: string;
|
|
54
|
+
displayName: string;
|
|
55
|
+
description?: string;
|
|
56
|
+
};
|
|
57
|
+
/**
|
|
58
|
+
* Test connection
|
|
59
|
+
* @returns Connection test result
|
|
60
|
+
*/
|
|
61
|
+
testConnection(): Promise<{
|
|
62
|
+
success: boolean;
|
|
63
|
+
model: string;
|
|
64
|
+
response?: string;
|
|
65
|
+
error?: string;
|
|
66
|
+
}>;
|
|
67
|
+
private buildOpenAIRequest;
|
|
68
|
+
private buildDashScopeRequest;
|
|
69
|
+
private makeRequest;
|
|
70
|
+
private makeStreamRequest;
|
|
71
|
+
private extractContent;
|
|
72
|
+
/**
|
|
73
|
+
* Specialized method for processing OHLCV arrays
|
|
74
|
+
* @param ohlcvArray - OHLCV data array
|
|
75
|
+
* @param instructions - Processing instructions, supports Chinese and English (optional, default: "Based on these OHLCV data, predict the next period")
|
|
76
|
+
* @param count - Number of OHLCV data items to return (optional, default: 1)
|
|
77
|
+
* @param options - Chat options
|
|
78
|
+
* @returns Predicted OHLCV array
|
|
79
|
+
*/
|
|
80
|
+
predictingOHLCV(ohlcvArray: OHLCV[], instructions?: string, count?: number, options?: AliYunChatOptions): Promise<OHLCV[]>;
|
|
81
|
+
/**
|
|
82
|
+
* Parse AI returned OHLCV response
|
|
83
|
+
* @private
|
|
84
|
+
*/
|
|
85
|
+
private parseOHLCVResponse;
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
export declare interface AliYunChatOptions {
|
|
89
|
+
temperature?: number;
|
|
90
|
+
maxTokens?: number;
|
|
91
|
+
stream?: boolean;
|
|
92
|
+
systemPrompt?: string;
|
|
93
|
+
modelType?: AliYunModelType;
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
export declare interface AliyunConfig {
|
|
97
|
+
apiKey: string;
|
|
98
|
+
modelType?: AliYunModelType;
|
|
99
|
+
timeout?: number;
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
export declare enum AliYunModelType {
|
|
103
|
+
QWEN_TURBO = "qwen-turbo",
|
|
104
|
+
QWEN_PLUS = "qwen-plus",
|
|
105
|
+
QWEN_MAX = "qwen-max",
|
|
106
|
+
QWEN_MAX_LONGCONTEXT = "qwen-max-longcontext",
|
|
107
|
+
QWEN_2_5B = "qwen2.5-0.5b",
|
|
108
|
+
QWEN_2_5B_INSTRUCT = "qwen2.5-0.5b-instruct",
|
|
109
|
+
QWEN_2_5B_7B = "qwen2.5-7b",
|
|
110
|
+
QWEN_2_5B_7B_INSTRUCT = "qwen2.5-7b-instruct",
|
|
111
|
+
QWEN_2_5B_14B = "qwen2.5-14b",
|
|
112
|
+
QWEN_2_5B_14B_INSTRUCT = "qwen2.5-14b-instruct",
|
|
113
|
+
QWEN_2_5B_32B = "qwen2.5-32b",
|
|
114
|
+
QWEN_2_5B_32B_INSTRUCT = "qwen2.5-32b-instruct",
|
|
115
|
+
QWEN_2_5B_72B = "qwen2.5-72b",
|
|
116
|
+
QWEN_2_5B_72B_INSTRUCT = "qwen2.5-72b-instruct",
|
|
117
|
+
QWEN_2_5B_CODER = "qwen2.5-coder",
|
|
118
|
+
QWEN_2_5B_CODER_7B = "qwen2.5-coder-7b",
|
|
119
|
+
QWEN_2_5B_CODER_14B = "qwen2.5-coder-14b",
|
|
120
|
+
QWEN_2_5B_CODER_32B = "qwen2.5-coder-32b",
|
|
121
|
+
QWEN_VL_LITE = "qwen-vl-lite",
|
|
122
|
+
QWEN_VL_PLUS = "qwen-vl-plus",
|
|
123
|
+
QWEN_VL_MAX = "qwen-vl-max",
|
|
124
|
+
QWEN_AUDIO_TURBO = "qwen-audio-turbo",
|
|
125
|
+
QWEN_AUDIO_CHAT = "qwen-audio-chat",
|
|
126
|
+
QWEN_MATH_7B = "qwen-math-7b",
|
|
127
|
+
LLAMA2_7B_CHAT_V2 = "llama2-7b-chat-v2",
|
|
128
|
+
BAICHUAN2_7B_CHAT_V1 = "baichuan2-7b-chat-v1",
|
|
129
|
+
QWEN_FINANCIAL = "qwen-financial",
|
|
130
|
+
QWEN_FINANCIAL_14B = "qwen-financial-14b",
|
|
131
|
+
QWEN_FINANCIAL_32B = "qwen-financial-32b",
|
|
132
|
+
QWEN_MEDICAL = "qwen-medical",
|
|
133
|
+
QWEN_MEDICAL_14B = "qwen-medical-14b",
|
|
134
|
+
QWEN_MEDICAL_32B = "qwen-medical-32b",
|
|
135
|
+
QWEN_OMNI = "qwen-omni",
|
|
136
|
+
QWEN_OMNI_PRO = "qwen-omni-pro"
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
export declare type AliYunStreamCallback = (chunk: string, isEnd: boolean) => void;
|
|
140
|
+
|
|
141
|
+
export declare interface ChatMessage {
|
|
142
|
+
role: 'system' | 'user' | 'assistant';
|
|
143
|
+
content: string;
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
export declare interface CostEstimate {
|
|
147
|
+
inputTokens: number;
|
|
148
|
+
outputTokens: number;
|
|
149
|
+
inputCost: number;
|
|
150
|
+
outputCost: number;
|
|
151
|
+
totalCost: number;
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
/**
|
|
155
|
+
* Factory function for quick instance creation
|
|
156
|
+
*/
|
|
157
|
+
export declare function createAliyunAI(apiKey: string, modelType?: AliYunModelType): AliyunAI;
|
|
158
|
+
|
|
159
|
+
/**
|
|
160
|
+
* Factory function for quick instance creation
|
|
161
|
+
*/
|
|
162
|
+
export declare function createDeepSeekAI(apiKey: string, modelType?: DeepSeekModelType): DeepSeekAI;
|
|
163
|
+
|
|
164
|
+
/**
|
|
165
|
+
* Factory function for quick instance creation
|
|
166
|
+
*/
|
|
167
|
+
export declare function createOpenAI(apiKey: string, modelType?: OpenAIModelType): OpenAI;
|
|
168
|
+
|
|
169
|
+
export declare const DEEPSEEK_MODELS: Map<DeepSeekModelType, DeepSeekModel>;
|
|
170
|
+
|
|
171
|
+
export declare class DeepSeekAI {
|
|
172
|
+
private apiKey;
|
|
173
|
+
private modelType;
|
|
174
|
+
private timeout;
|
|
175
|
+
private baseURL;
|
|
176
|
+
/**
|
|
177
|
+
* Constructor - Minimal configuration
|
|
178
|
+
* @param config.apiKey - API key (required)
|
|
179
|
+
* @param config.modelType - Model type, default deepseek-chat
|
|
180
|
+
* @param config.timeout - Timeout, default 30 seconds
|
|
181
|
+
* @param config.baseURL - Base URL for API, default official endpoint
|
|
182
|
+
*/
|
|
183
|
+
constructor(config: DeepSeekConfig);
|
|
184
|
+
/**
|
|
185
|
+
* Simplest method: single conversation
|
|
186
|
+
* @param message - User message
|
|
187
|
+
* @param options - Chat options
|
|
188
|
+
* @returns AI response
|
|
189
|
+
*/
|
|
190
|
+
chat(message: string, options?: DeepSeekChatOptions): Promise<string>;
|
|
191
|
+
/**
|
|
192
|
+
* Multi-turn conversation
|
|
193
|
+
* @param messages - Message history
|
|
194
|
+
* @param options - Chat options
|
|
195
|
+
* @returns Complete API response
|
|
196
|
+
*/
|
|
197
|
+
chatCompletion(messages: DeepSeekChatMessage[], options?: DeepSeekChatOptions): Promise<any>;
|
|
198
|
+
/**
|
|
199
|
+
* Streaming conversation
|
|
200
|
+
* @param messages - Message history
|
|
201
|
+
* @param callback - Streaming callback function
|
|
202
|
+
* @param options - Chat options
|
|
203
|
+
*/
|
|
204
|
+
chatStream(messages: DeepSeekChatMessage[], callback: DeepSeekStreamCallback, options?: DeepSeekChatOptions): Promise<void>;
|
|
205
|
+
/**
|
|
206
|
+
* Specialized method for processing OHLCV arrays
|
|
207
|
+
* @param ohlcvArray - OHLCV data array
|
|
208
|
+
* @param instructions - Processing instructions (optional)
|
|
209
|
+
* @param count - Number of OHLCV data items to return (optional, default: 1)
|
|
210
|
+
* @param options - Chat options
|
|
211
|
+
* @returns Predicted OHLCV array
|
|
212
|
+
*/
|
|
213
|
+
predictingOHLCV(ohlcvArray: OHLCV[], instructions?: string, count?: number, options?: DeepSeekChatOptions): Promise<OHLCV[]>;
|
|
214
|
+
/**
|
|
215
|
+
* Switch model
|
|
216
|
+
* @param modelType - New model type
|
|
217
|
+
*/
|
|
218
|
+
setModel(modelType: DeepSeekModelType): void;
|
|
219
|
+
/**
|
|
220
|
+
* Get current model configuration
|
|
221
|
+
*/
|
|
222
|
+
getCurrentModel(): {
|
|
223
|
+
name: string;
|
|
224
|
+
displayName: string;
|
|
225
|
+
description?: string;
|
|
226
|
+
};
|
|
227
|
+
/**
|
|
228
|
+
* Test connection
|
|
229
|
+
* @returns Connection test result
|
|
230
|
+
*/
|
|
231
|
+
testConnection(): Promise<{
|
|
232
|
+
success: boolean;
|
|
233
|
+
model: string;
|
|
234
|
+
response?: string;
|
|
235
|
+
error?: string;
|
|
236
|
+
}>;
|
|
237
|
+
private buildOpenAIRequest;
|
|
238
|
+
private makeRequest;
|
|
239
|
+
private makeStreamRequest;
|
|
240
|
+
private extractContent;
|
|
241
|
+
private parseOHLCVResponse;
|
|
242
|
+
}
|
|
243
|
+
|
|
244
|
+
export declare interface DeepSeekChatMessage {
|
|
245
|
+
role: 'system' | 'user' | 'assistant' | 'tool';
|
|
246
|
+
content: string;
|
|
247
|
+
name?: string;
|
|
248
|
+
tool_call_id?: string;
|
|
249
|
+
}
|
|
250
|
+
|
|
251
|
+
export declare interface DeepSeekChatOptions {
|
|
252
|
+
temperature?: number;
|
|
253
|
+
maxTokens?: number;
|
|
254
|
+
stream?: boolean;
|
|
255
|
+
systemPrompt?: string;
|
|
256
|
+
modelType?: DeepSeekModelType;
|
|
257
|
+
topP?: number;
|
|
258
|
+
frequencyPenalty?: number;
|
|
259
|
+
presencePenalty?: number;
|
|
260
|
+
stop?: string[];
|
|
261
|
+
tools?: any[];
|
|
262
|
+
toolChoice?: string | object;
|
|
263
|
+
}
|
|
264
|
+
|
|
265
|
+
export declare interface DeepSeekConfig {
|
|
266
|
+
apiKey: string;
|
|
267
|
+
modelType?: DeepSeekModelType;
|
|
268
|
+
timeout?: number;
|
|
269
|
+
baseURL?: string;
|
|
270
|
+
}
|
|
271
|
+
|
|
272
|
+
declare interface DeepSeekModel {
|
|
273
|
+
name: string;
|
|
274
|
+
displayName: string;
|
|
275
|
+
endpoint: string;
|
|
276
|
+
endpoints?: string[];
|
|
277
|
+
format: 'openai' | 'deepseek';
|
|
278
|
+
description?: string;
|
|
279
|
+
maxTokens?: number;
|
|
280
|
+
contextLength?: number;
|
|
281
|
+
capabilities?: string[];
|
|
282
|
+
version?: string;
|
|
283
|
+
}
|
|
284
|
+
|
|
285
|
+
export declare enum DeepSeekModelType {
|
|
286
|
+
DEEPSEEK_CHAT = "deepseek-chat",
|
|
287
|
+
DEEPSEEK_CHAT_LITE = "deepseek-chat-lite",
|
|
288
|
+
DEEPSEEK_CHAT_PRO = "deepseek-chat-pro",
|
|
289
|
+
DEEPSEEK_CHAT_MAX = "deepseek-chat-max",
|
|
290
|
+
DEEPSEEK_CODER = "deepseek-coder",
|
|
291
|
+
DEEPSEEK_CODER_LITE = "deepseek-coder-lite",
|
|
292
|
+
DEEPSEEK_CODER_PRO = "deepseek-coder-pro",
|
|
293
|
+
DEEPSEEK_MATH = "deepseek-math",
|
|
294
|
+
DEEPSEEK_MATH_PRO = "deepseek-math-pro",
|
|
295
|
+
DEEPSEEK_REASONER = "deepseek-reasoner",
|
|
296
|
+
DEEPSEEK_REASONER_PRO = "deepseek-reasoner-pro",
|
|
297
|
+
DEEPSEEK_VISION = "deepseek-vision",
|
|
298
|
+
DEEPSEEK_VISION_PRO = "deepseek-vision-pro",
|
|
299
|
+
DEEPSEEK_FINANCE = "deepseek-finance",
|
|
300
|
+
DEEPSEEK_LAW = "deepseek-law",
|
|
301
|
+
DEEPSEEK_MEDICAL = "deepseek-medical",
|
|
302
|
+
DEEPSEEK_RESEARCH = "deepseek-research",
|
|
303
|
+
DEEPSEEK_OMNI = "deepseek-omni",
|
|
304
|
+
DEEPSEEK_OMNI_PRO = "deepseek-omni-pro",
|
|
305
|
+
DEEPSEEK_LLM = "deepseek-llm",
|
|
306
|
+
DEEPSEEK_LLM_67B = "deepseek-llm-67b",
|
|
307
|
+
DEEPSEEK_LLM_131B = "deepseek-llm-131b"
|
|
308
|
+
}
|
|
309
|
+
|
|
310
|
+
export declare type DeepSeekStreamCallback = (chunk: string, isEnd: boolean) => void;
|
|
311
|
+
|
|
312
|
+
export declare function estimateCost(model: OpenAIModel, inputTokens: number, outputTokens?: number): CostEstimate;
|
|
313
|
+
|
|
314
|
+
export declare function getAllOpenAIModels(): OpenAIModel[];
|
|
315
|
+
|
|
316
|
+
export declare function getAudioModelsOpenAI(): OpenAIModel[];
|
|
317
|
+
|
|
318
|
+
export declare function getAvailableOpenAIModelTypes(): OpenAIModelType[];
|
|
319
|
+
|
|
320
|
+
export declare function getChatModels(): OpenAIModel[];
|
|
321
|
+
|
|
322
|
+
export declare function getCompletionModels(): OpenAIModel[];
|
|
323
|
+
|
|
324
|
+
export declare function getCostEfficientModels(): OpenAIModel[];
|
|
325
|
+
|
|
326
|
+
export declare function getEmbeddingModels(): OpenAIModel[];
|
|
327
|
+
|
|
328
|
+
export declare function getHighContextModels(): OpenAIModel[];
|
|
329
|
+
|
|
330
|
+
export declare function getLatestModels(): OpenAIModel[];
|
|
331
|
+
|
|
332
|
+
export declare function getMultimodalModelsOpenAI(): OpenAIModel[];
|
|
333
|
+
|
|
334
|
+
export declare function getOpenAIModel(type: OpenAIModelType): OpenAIModel | undefined;
|
|
335
|
+
|
|
336
|
+
export declare function getOpenAIModelByName(name: string): OpenAIModel | undefined;
|
|
337
|
+
|
|
338
|
+
export declare function getVisionModelsOpenAI(): OpenAIModel[];
|
|
339
|
+
|
|
340
|
+
declare interface Model {
|
|
341
|
+
name: string;
|
|
342
|
+
displayName: string;
|
|
343
|
+
endpoint: string;
|
|
344
|
+
endpoints?: string[];
|
|
345
|
+
format: 'openai' | 'dashscope';
|
|
346
|
+
description?: string;
|
|
347
|
+
maxTokens?: number;
|
|
348
|
+
contextLength?: number;
|
|
349
|
+
capabilities?: string[];
|
|
350
|
+
}
|
|
351
|
+
|
|
352
|
+
export declare interface OHLCV {
|
|
353
|
+
open: number;
|
|
354
|
+
high: number;
|
|
355
|
+
low: number;
|
|
356
|
+
close: number;
|
|
357
|
+
volume: number;
|
|
358
|
+
}
|
|
359
|
+
|
|
360
|
+
export declare class OpenAI {
|
|
361
|
+
private apiKey;
|
|
362
|
+
private modelType;
|
|
363
|
+
private timeout;
|
|
364
|
+
private organization?;
|
|
365
|
+
private baseURL;
|
|
366
|
+
/**
|
|
367
|
+
* Constructor - Minimal configuration
|
|
368
|
+
* @param config.apiKey - API key (required)
|
|
369
|
+
* @param config.modelType - Model type, default gpt-3.5-turbo
|
|
370
|
+
* @param config.timeout - Timeout, default 30 seconds
|
|
371
|
+
* @param config.organization - Organization ID (optional)
|
|
372
|
+
* @param config.baseURL - Custom base URL (optional)
|
|
373
|
+
*/
|
|
374
|
+
constructor(config: OpenAIConfig);
|
|
375
|
+
/**
|
|
376
|
+
* Simplest method: single conversation
|
|
377
|
+
* @param message - User message
|
|
378
|
+
* @param options - Chat options
|
|
379
|
+
* @returns AI response
|
|
380
|
+
*/
|
|
381
|
+
chat(message: string, options?: OpenAIChatOptions): Promise<string>;
|
|
382
|
+
/**
|
|
383
|
+
* Multi-turn conversation
|
|
384
|
+
* @param messages - Message history
|
|
385
|
+
* @param options - Chat options
|
|
386
|
+
* @returns Complete API response
|
|
387
|
+
*/
|
|
388
|
+
chatCompletion(messages: ChatMessage[], options?: {
|
|
389
|
+
temperature?: number;
|
|
390
|
+
maxTokens?: number;
|
|
391
|
+
stream?: boolean;
|
|
392
|
+
modelType?: OpenAIModelType;
|
|
393
|
+
topP?: number;
|
|
394
|
+
frequencyPenalty?: number;
|
|
395
|
+
presencePenalty?: number;
|
|
396
|
+
stop?: string[];
|
|
397
|
+
}): Promise<any>;
|
|
398
|
+
/**
|
|
399
|
+
* Streaming conversation
|
|
400
|
+
* @param messages - Message history
|
|
401
|
+
* @param callback - Streaming callback function
|
|
402
|
+
* @param options - Chat options
|
|
403
|
+
*/
|
|
404
|
+
chatStream(messages: ChatMessage[], callback: OpenAIStreamCallback, options?: {
|
|
405
|
+
temperature?: number;
|
|
406
|
+
maxTokens?: number;
|
|
407
|
+
modelType?: OpenAIModelType;
|
|
408
|
+
topP?: number;
|
|
409
|
+
frequencyPenalty?: number;
|
|
410
|
+
presencePenalty?: number;
|
|
411
|
+
stop?: string[];
|
|
412
|
+
}): Promise<void>;
|
|
413
|
+
/**
|
|
414
|
+
* Generate images using DALL-E
|
|
415
|
+
* @param prompt - Image generation prompt
|
|
416
|
+
* @param options - Image generation options
|
|
417
|
+
* @returns Generated image URLs
|
|
418
|
+
*/
|
|
419
|
+
generateImage(prompt: string, options?: {
|
|
420
|
+
modelType?: OpenAIModelType;
|
|
421
|
+
n?: number;
|
|
422
|
+
size?: '256x256' | '512x512' | '1024x1024' | '1792x1024' | '1024x1792';
|
|
423
|
+
quality?: 'standard' | 'hd';
|
|
424
|
+
style?: 'vivid' | 'natural';
|
|
425
|
+
responseFormat?: 'url' | 'b64_json';
|
|
426
|
+
}): Promise<string[]>;
|
|
427
|
+
/**
|
|
428
|
+
* Create text embeddings
|
|
429
|
+
* @param input - Text or array of texts to embed
|
|
430
|
+
* @param options - Embedding options
|
|
431
|
+
* @returns Embedding vectors
|
|
432
|
+
*/
|
|
433
|
+
createEmbeddings(input: string | string[], options?: {
|
|
434
|
+
modelType?: OpenAIModelType;
|
|
435
|
+
dimensions?: number;
|
|
436
|
+
}): Promise<number[][]>;
|
|
437
|
+
/**
|
|
438
|
+
* Transcribe audio using Whisper
|
|
439
|
+
* @param audioData - Base64 encoded audio data or file path
|
|
440
|
+
* @param options - Transcription options
|
|
441
|
+
* @returns Transcribed text
|
|
442
|
+
*/
|
|
443
|
+
transcribeAudio(audioData: string | File, options?: {
|
|
444
|
+
modelType?: OpenAIModelType;
|
|
445
|
+
language?: string;
|
|
446
|
+
prompt?: string;
|
|
447
|
+
responseFormat?: 'json' | 'text' | 'srt' | 'verbose_json' | 'vtt';
|
|
448
|
+
temperature?: number;
|
|
449
|
+
}): Promise<string>;
|
|
450
|
+
/**
|
|
451
|
+
* Text-to-speech conversion
|
|
452
|
+
* @param text - Text to convert to speech
|
|
453
|
+
* @param options - TTS options
|
|
454
|
+
* @returns Audio data (base64 or blob)
|
|
455
|
+
*/
|
|
456
|
+
textToSpeech(text: string, options?: {
|
|
457
|
+
modelType?: OpenAIModelType;
|
|
458
|
+
voice?: 'alloy' | 'echo' | 'fable' | 'onyx' | 'nova' | 'shimmer';
|
|
459
|
+
responseFormat?: 'mp3' | 'opus' | 'aac' | 'flac' | 'wav' | 'pcm';
|
|
460
|
+
speed?: number;
|
|
461
|
+
}): Promise<ArrayBuffer>;
|
|
462
|
+
/**
|
|
463
|
+
* Content moderation
|
|
464
|
+
* @param input - Text to moderate
|
|
465
|
+
* @param options - Moderation options
|
|
466
|
+
* @returns Moderation results
|
|
467
|
+
*/
|
|
468
|
+
moderateContent(input: string, options?: {
|
|
469
|
+
modelType?: OpenAIModelType;
|
|
470
|
+
}): Promise<any>;
|
|
471
|
+
/**
|
|
472
|
+
* Switch model
|
|
473
|
+
* @param modelType - New model type
|
|
474
|
+
*/
|
|
475
|
+
setModel(modelType: OpenAIModelType): void;
|
|
476
|
+
/**
|
|
477
|
+
* Get current model configuration
|
|
478
|
+
*/
|
|
479
|
+
getCurrentModel(): {
|
|
480
|
+
name: string;
|
|
481
|
+
displayName: string;
|
|
482
|
+
description?: string;
|
|
483
|
+
};
|
|
484
|
+
/**
|
|
485
|
+
* Test connection
|
|
486
|
+
* @returns Connection test result
|
|
487
|
+
*/
|
|
488
|
+
testConnection(): Promise<{
|
|
489
|
+
success: boolean;
|
|
490
|
+
model: string;
|
|
491
|
+
response?: string;
|
|
492
|
+
error?: string;
|
|
493
|
+
}>;
|
|
494
|
+
/**
|
|
495
|
+
* Estimate cost for a request
|
|
496
|
+
* @param inputTokens - Number of input tokens
|
|
497
|
+
* @param outputTokens - Number of output tokens
|
|
498
|
+
* @param modelType - Model type (optional, uses current if not provided)
|
|
499
|
+
* @returns Cost estimate
|
|
500
|
+
*/
|
|
501
|
+
estimateCost(inputTokens: number, outputTokens?: number, modelType?: OpenAIModelType): {
|
|
502
|
+
inputCost: number;
|
|
503
|
+
outputCost: number;
|
|
504
|
+
totalCost: number;
|
|
505
|
+
};
|
|
506
|
+
private buildOpenAIRequest;
|
|
507
|
+
private makeRequest;
|
|
508
|
+
private makeFormDataRequest;
|
|
509
|
+
private makeStreamRequest;
|
|
510
|
+
private extractContent;
|
|
511
|
+
/**
|
|
512
|
+
* Specialized method for processing OHLCV arrays
|
|
513
|
+
* @param ohlcvArray - OHLCV data array
|
|
514
|
+
* @param instructions - Processing instructions (optional, default: "Based on these OHLCV data, predict the next period")
|
|
515
|
+
* @param count - Number of OHLCV data items to return (optional, default: 1)
|
|
516
|
+
* @param options - Chat options
|
|
517
|
+
* @returns Predicted OHLCV array
|
|
518
|
+
*/
|
|
519
|
+
analyzeOHLCV(ohlcvArray: OHLCV[], instructions?: string, count?: number, options?: OpenAIChatOptions): Promise<OHLCV[]>;
|
|
520
|
+
/**
|
|
521
|
+
* Parse AI returned OHLCV response
|
|
522
|
+
* @private
|
|
523
|
+
*/
|
|
524
|
+
private parseOHLCVResponse;
|
|
525
|
+
}
|
|
526
|
+
|
|
527
|
+
export declare const OPENAI_MODELS: Map<OpenAIModelType, OpenAIModel>;
|
|
528
|
+
|
|
529
|
+
export declare interface OpenAIChatOptions {
|
|
530
|
+
temperature?: number;
|
|
531
|
+
maxTokens?: number;
|
|
532
|
+
stream?: boolean;
|
|
533
|
+
systemPrompt?: string;
|
|
534
|
+
modelType?: OpenAIModelType;
|
|
535
|
+
topP?: number;
|
|
536
|
+
frequencyPenalty?: number;
|
|
537
|
+
presencePenalty?: number;
|
|
538
|
+
stop?: string[];
|
|
539
|
+
}
|
|
540
|
+
|
|
541
|
+
export declare interface OpenAIConfig {
|
|
542
|
+
apiKey: string;
|
|
543
|
+
modelType?: OpenAIModelType;
|
|
544
|
+
timeout?: number;
|
|
545
|
+
organization?: string;
|
|
546
|
+
baseURL?: string;
|
|
547
|
+
}
|
|
548
|
+
|
|
549
|
+
export declare interface OpenAIModel {
|
|
550
|
+
name: string;
|
|
551
|
+
displayName: string;
|
|
552
|
+
endpoint: string;
|
|
553
|
+
format: 'openai';
|
|
554
|
+
description: string;
|
|
555
|
+
maxTokens?: number;
|
|
556
|
+
contextLength?: number;
|
|
557
|
+
capabilities: string[];
|
|
558
|
+
inputCostPer1KTokens?: number;
|
|
559
|
+
outputCostPer1KTokens?: number;
|
|
560
|
+
supportedFeatures?: string[];
|
|
561
|
+
}
|
|
562
|
+
|
|
563
|
+
export declare enum OpenAIModelType {
|
|
564
|
+
GPT4 = "gpt-4",
|
|
565
|
+
GPT4_0314 = "gpt-4-0314",
|
|
566
|
+
GPT4_0613 = "gpt-4-0613",
|
|
567
|
+
GPT4_32K = "gpt-4-32k",
|
|
568
|
+
GPT4_32K_0314 = "gpt-4-32k-0314",
|
|
569
|
+
GPT4_32K_0613 = "gpt-4-32k-0613",
|
|
570
|
+
GPT4_TURBO = "gpt-4-turbo",
|
|
571
|
+
GPT4_TURBO_PREVIEW = "gpt-4-turbo-preview",
|
|
572
|
+
GPT4_TURBO_2024_04_09 = "gpt-4-turbo-2024-04-09",
|
|
573
|
+
GPT4_OMNI = "gpt-4o",
|
|
574
|
+
GPT4_OMNI_2024_05_13 = "gpt-4o-2024-05-13",
|
|
575
|
+
GPT4_OMNI_MINI = "gpt-4o-mini",
|
|
576
|
+
GPT4_OMNI_MINI_2024_07_18 = "gpt-4o-mini-2024-07-18",
|
|
577
|
+
GPT3_5_TURBO = "gpt-3.5-turbo",
|
|
578
|
+
GPT3_5_TURBO_0125 = "gpt-3.5-turbo-0125",
|
|
579
|
+
GPT3_5_TURBO_1106 = "gpt-3.5-turbo-1106",
|
|
580
|
+
GPT3_5_TURBO_INSTRUCT = "gpt-3.5-turbo-instruct",
|
|
581
|
+
GPT3_5_TURBO_16K = "gpt-3.5-turbo-16k",
|
|
582
|
+
GPT3_5_TURBO_16K_0613 = "gpt-3.5-turbo-16k-0613",
|
|
583
|
+
DAVINCI_002 = "davinci-002",
|
|
584
|
+
BABBAGE_002 = "babbage-002",
|
|
585
|
+
TEXT_DAVINCI_003 = "text-davinci-003",
|
|
586
|
+
TEXT_DAVINCI_002 = "text-davinci-002",
|
|
587
|
+
TEXT_DAVINCI_001 = "text-davinci-001",
|
|
588
|
+
TEXT_CURIE_001 = "text-curie-001",
|
|
589
|
+
TEXT_BABBAGE_001 = "text-babbage-001",
|
|
590
|
+
TEXT_ADA_001 = "text-ada-001",
|
|
591
|
+
TEXT_EMBEDDING_ADA_002 = "text-embedding-ada-002",
|
|
592
|
+
TEXT_EMBEDDING_3_SMALL = "text-embedding-3-small",
|
|
593
|
+
TEXT_EMBEDDING_3_LARGE = "text-embedding-3-large",
|
|
594
|
+
DALL_E_2 = "dall-e-2",
|
|
595
|
+
DALL_E_3 = "dall-e-3",
|
|
596
|
+
WHISPER_1 = "whisper-1",
|
|
597
|
+
TTS_1 = "tts-1",
|
|
598
|
+
TTS_1_HD = "tts-1-hd",
|
|
599
|
+
MODERATION_LATEST = "text-moderation-latest",
|
|
600
|
+
MODERATION_STABLE = "text-moderation-stable",
|
|
601
|
+
GPT3_5_TURBO_FINETUNED = "ft:gpt-3.5-turbo-0125:personal:",
|
|
602
|
+
GPT4_FINETUNED = "ft:gpt-4-0125-preview:personal:",
|
|
603
|
+
GPT4_VISION_PREVIEW = "gpt-4-vision-preview"
|
|
604
|
+
}
|
|
605
|
+
|
|
606
|
+
export declare type OpenAIStreamCallback = (chunk: string, isEnd: boolean) => void;
|
|
607
|
+
|
|
608
|
+
export declare function suggestModel(requirements: {
|
|
609
|
+
taskType: 'chat' | 'completion' | 'embedding' | 'image' | 'audio';
|
|
610
|
+
budget?: number;
|
|
611
|
+
contextLength?: number;
|
|
612
|
+
features?: string[];
|
|
613
|
+
}): OpenAIModel[];
|
|
614
|
+
|
|
615
|
+
export { }
|