ohlcv-ai 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +202 -0
- package/README.md +3 -0
- package/dist/aliyun/index.d.ts.map +1 -0
- package/dist/aliyun/model.d.ts.map +1 -0
- package/dist/deepseek/index.d.ts.map +1 -0
- package/dist/deepseek/model.d.ts.map +1 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +40 -0
- package/dist/openai/index.d.ts.map +1 -0
- package/dist/openai/model.d.ts.map +1 -0
- package/dist/types.d.ts.map +1 -0
- package/package.json +42 -0
- package/src/aliyun/index.ts +446 -0
- package/src/aliyun/model.ts +475 -0
- package/src/deepseek/index.ts +479 -0
- package/src/deepseek/model.ts +455 -0
- package/src/index.ts +50 -0
- package/src/openai/index.ts +766 -0
- package/src/openai/model.ts +430 -0
- package/src/types.ts +12 -0
- package/tsconfig.json +26 -0
|
@@ -0,0 +1,766 @@
|
|
|
1
|
+
import { ChatMessage, OHLCV } from '@/types';
|
|
2
|
+
import { OpenAIModelType, OPENAI_MODELS } from './model';
|
|
3
|
+
|
|
4
|
+
// Minimal configuration
|
|
5
|
+
export interface OpenAIConfig {
|
|
6
|
+
apiKey: string;
|
|
7
|
+
modelType?: OpenAIModelType;
|
|
8
|
+
timeout?: number;
|
|
9
|
+
organization?: string;
|
|
10
|
+
baseURL?: string;
|
|
11
|
+
}
|
|
12
|
+
|
|
13
|
+
// Chat options
|
|
14
|
+
export interface OpenAIChatOptions {
|
|
15
|
+
temperature?: number;
|
|
16
|
+
maxTokens?: number;
|
|
17
|
+
stream?: boolean;
|
|
18
|
+
systemPrompt?: string;
|
|
19
|
+
modelType?: OpenAIModelType;
|
|
20
|
+
topP?: number;
|
|
21
|
+
frequencyPenalty?: number;
|
|
22
|
+
presencePenalty?: number;
|
|
23
|
+
stop?: string[];
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
// Streaming response callback
|
|
27
|
+
export type OpenAIStreamCallback = (chunk: string, isEnd: boolean) => void;
|
|
28
|
+
|
|
29
|
+
export class OpenAI {
|
|
30
|
+
private apiKey: string;
|
|
31
|
+
private modelType: OpenAIModelType;
|
|
32
|
+
private timeout: number;
|
|
33
|
+
private organization?: string;
|
|
34
|
+
private baseURL: string;
|
|
35
|
+
|
|
36
|
+
/**
|
|
37
|
+
* Constructor - Minimal configuration
|
|
38
|
+
* @param config.apiKey - API key (required)
|
|
39
|
+
* @param config.modelType - Model type, default gpt-3.5-turbo
|
|
40
|
+
* @param config.timeout - Timeout, default 30 seconds
|
|
41
|
+
* @param config.organization - Organization ID (optional)
|
|
42
|
+
* @param config.baseURL - Custom base URL (optional)
|
|
43
|
+
*/
|
|
44
|
+
constructor(config: OpenAIConfig) {
|
|
45
|
+
this.apiKey = config.apiKey;
|
|
46
|
+
this.modelType = config.modelType || OpenAIModelType.GPT3_5_TURBO;
|
|
47
|
+
this.timeout = config.timeout || 30000;
|
|
48
|
+
this.organization = config.organization;
|
|
49
|
+
this.baseURL = config.baseURL || 'https://api.openai.com/v1';
|
|
50
|
+
|
|
51
|
+
if (!this.apiKey) {
|
|
52
|
+
throw new Error('API Key cannot be empty');
|
|
53
|
+
}
|
|
54
|
+
const modelConfig = OPENAI_MODELS.get(this.modelType);
|
|
55
|
+
if (!modelConfig) {
|
|
56
|
+
throw new Error(`Unsupported model type: ${this.modelType}`);
|
|
57
|
+
}
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
/**
|
|
61
|
+
* Simplest method: single conversation
|
|
62
|
+
* @param message - User message
|
|
63
|
+
* @param options - Chat options
|
|
64
|
+
* @returns AI response
|
|
65
|
+
*/
|
|
66
|
+
async chat(message: string, options?: OpenAIChatOptions): Promise<string> {
|
|
67
|
+
const messages: ChatMessage[] = [];
|
|
68
|
+
if (options?.systemPrompt) {
|
|
69
|
+
messages.push({ role: 'system', content: options.systemPrompt });
|
|
70
|
+
}
|
|
71
|
+
messages.push({ role: 'user', content: message });
|
|
72
|
+
const response = await this.chatCompletion(messages, {
|
|
73
|
+
temperature: options?.temperature,
|
|
74
|
+
maxTokens: options?.maxTokens,
|
|
75
|
+
stream: false,
|
|
76
|
+
topP: options?.topP,
|
|
77
|
+
frequencyPenalty: options?.frequencyPenalty,
|
|
78
|
+
presencePenalty: options?.presencePenalty,
|
|
79
|
+
stop: options?.stop
|
|
80
|
+
});
|
|
81
|
+
return this.extractContent(response);
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
/**
|
|
85
|
+
* Multi-turn conversation
|
|
86
|
+
* @param messages - Message history
|
|
87
|
+
* @param options - Chat options
|
|
88
|
+
* @returns Complete API response
|
|
89
|
+
*/
|
|
90
|
+
async chatCompletion(
|
|
91
|
+
messages: ChatMessage[],
|
|
92
|
+
options?: {
|
|
93
|
+
temperature?: number;
|
|
94
|
+
maxTokens?: number;
|
|
95
|
+
stream?: boolean;
|
|
96
|
+
modelType?: OpenAIModelType;
|
|
97
|
+
topP?: number;
|
|
98
|
+
frequencyPenalty?: number;
|
|
99
|
+
presencePenalty?: number;
|
|
100
|
+
stop?: string[];
|
|
101
|
+
}
|
|
102
|
+
): Promise<any> {
|
|
103
|
+
const modelType = options?.modelType || this.modelType;
|
|
104
|
+
const modelConfig = OPENAI_MODELS.get(modelType);
|
|
105
|
+
if (!modelConfig) {
|
|
106
|
+
throw new Error(`Unsupported model type: ${modelType}`);
|
|
107
|
+
}
|
|
108
|
+
const temperature = options?.temperature ?? 0.7;
|
|
109
|
+
const maxTokens = options?.maxTokens ?? 1000;
|
|
110
|
+
const stream = options?.stream ?? false;
|
|
111
|
+
const endpoint = modelConfig.endpoint;
|
|
112
|
+
|
|
113
|
+
const requestData = this.buildOpenAIRequest(
|
|
114
|
+
modelConfig.name,
|
|
115
|
+
messages,
|
|
116
|
+
temperature,
|
|
117
|
+
maxTokens,
|
|
118
|
+
stream,
|
|
119
|
+
options
|
|
120
|
+
);
|
|
121
|
+
|
|
122
|
+
try {
|
|
123
|
+
const response = await this.makeRequest(endpoint, requestData, stream);
|
|
124
|
+
return response;
|
|
125
|
+
} catch (error: any) {
|
|
126
|
+
throw new Error(`OpenAI request failed: ${error.message}`);
|
|
127
|
+
}
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
/**
|
|
131
|
+
* Streaming conversation
|
|
132
|
+
* @param messages - Message history
|
|
133
|
+
* @param callback - Streaming callback function
|
|
134
|
+
* @param options - Chat options
|
|
135
|
+
*/
|
|
136
|
+
async chatStream(
|
|
137
|
+
messages: ChatMessage[],
|
|
138
|
+
callback: OpenAIStreamCallback,
|
|
139
|
+
options?: {
|
|
140
|
+
temperature?: number;
|
|
141
|
+
maxTokens?: number;
|
|
142
|
+
modelType?: OpenAIModelType;
|
|
143
|
+
topP?: number;
|
|
144
|
+
frequencyPenalty?: number;
|
|
145
|
+
presencePenalty?: number;
|
|
146
|
+
stop?: string[];
|
|
147
|
+
}
|
|
148
|
+
): Promise<void> {
|
|
149
|
+
const modelType = options?.modelType || this.modelType;
|
|
150
|
+
const modelConfig = OPENAI_MODELS.get(modelType);
|
|
151
|
+
if (!modelConfig) {
|
|
152
|
+
throw new Error(`Unsupported model type: ${modelType}`);
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
const temperature = options?.temperature ?? 0.7;
|
|
156
|
+
const maxTokens = options?.maxTokens ?? 1000;
|
|
157
|
+
const requestData = this.buildOpenAIRequest(
|
|
158
|
+
modelConfig.name,
|
|
159
|
+
messages,
|
|
160
|
+
temperature,
|
|
161
|
+
maxTokens,
|
|
162
|
+
true,
|
|
163
|
+
options
|
|
164
|
+
);
|
|
165
|
+
|
|
166
|
+
try {
|
|
167
|
+
await this.makeStreamRequest(modelConfig.endpoint, requestData, callback);
|
|
168
|
+
} catch (error: any) {
|
|
169
|
+
throw new Error(`Streaming request failed: ${error.message}`);
|
|
170
|
+
}
|
|
171
|
+
}
|
|
172
|
+
|
|
173
|
+
/**
|
|
174
|
+
* Generate images using DALL-E
|
|
175
|
+
* @param prompt - Image generation prompt
|
|
176
|
+
* @param options - Image generation options
|
|
177
|
+
* @returns Generated image URLs
|
|
178
|
+
*/
|
|
179
|
+
async generateImage(
|
|
180
|
+
prompt: string,
|
|
181
|
+
options?: {
|
|
182
|
+
modelType?: OpenAIModelType;
|
|
183
|
+
n?: number;
|
|
184
|
+
size?: '256x256' | '512x512' | '1024x1024' | '1792x1024' | '1024x1792';
|
|
185
|
+
quality?: 'standard' | 'hd';
|
|
186
|
+
style?: 'vivid' | 'natural';
|
|
187
|
+
responseFormat?: 'url' | 'b64_json';
|
|
188
|
+
}
|
|
189
|
+
): Promise<string[]> {
|
|
190
|
+
const modelType = options?.modelType || OpenAIModelType.DALL_E_3;
|
|
191
|
+
if (modelType !== OpenAIModelType.DALL_E_2 && modelType !== OpenAIModelType.DALL_E_3) {
|
|
192
|
+
throw new Error('Image generation only supports DALL-E models');
|
|
193
|
+
}
|
|
194
|
+
|
|
195
|
+
const modelConfig = OPENAI_MODELS.get(modelType);
|
|
196
|
+
if (!modelConfig) {
|
|
197
|
+
throw new Error(`Unsupported model type: ${modelType}`);
|
|
198
|
+
}
|
|
199
|
+
|
|
200
|
+
const requestData = {
|
|
201
|
+
model: modelConfig.name,
|
|
202
|
+
prompt: prompt,
|
|
203
|
+
n: options?.n || 1,
|
|
204
|
+
size: options?.size || '1024x1024',
|
|
205
|
+
quality: options?.quality || 'standard',
|
|
206
|
+
style: options?.style || 'vivid',
|
|
207
|
+
response_format: options?.responseFormat || 'url'
|
|
208
|
+
};
|
|
209
|
+
|
|
210
|
+
try {
|
|
211
|
+
const response = await this.makeRequest(modelConfig.endpoint, requestData, false);
|
|
212
|
+
if (response.data && Array.isArray(response.data)) {
|
|
213
|
+
return response.data.map((item: any) =>
|
|
214
|
+
options?.responseFormat === 'b64_json' ? item.b64_json : item.url
|
|
215
|
+
);
|
|
216
|
+
}
|
|
217
|
+
throw new Error('Invalid response format from image generation');
|
|
218
|
+
} catch (error: any) {
|
|
219
|
+
throw new Error(`Image generation failed: ${error.message}`);
|
|
220
|
+
}
|
|
221
|
+
}
|
|
222
|
+
|
|
223
|
+
/**
|
|
224
|
+
* Create text embeddings
|
|
225
|
+
* @param input - Text or array of texts to embed
|
|
226
|
+
* @param options - Embedding options
|
|
227
|
+
* @returns Embedding vectors
|
|
228
|
+
*/
|
|
229
|
+
async createEmbeddings(
|
|
230
|
+
input: string | string[],
|
|
231
|
+
options?: {
|
|
232
|
+
modelType?: OpenAIModelType;
|
|
233
|
+
dimensions?: number;
|
|
234
|
+
}
|
|
235
|
+
): Promise<number[][]> {
|
|
236
|
+
const modelType = options?.modelType || OpenAIModelType.TEXT_EMBEDDING_ADA_002;
|
|
237
|
+
const modelConfig = OPENAI_MODELS.get(modelType);
|
|
238
|
+
if (!modelConfig) {
|
|
239
|
+
throw new Error(`Unsupported model type: ${modelType}`);
|
|
240
|
+
}
|
|
241
|
+
|
|
242
|
+
const requestData: any = {
|
|
243
|
+
model: modelConfig.name,
|
|
244
|
+
input: input
|
|
245
|
+
};
|
|
246
|
+
|
|
247
|
+
if (options?.dimensions && modelConfig.name === OpenAIModelType.TEXT_EMBEDDING_3_SMALL) {
|
|
248
|
+
requestData.dimensions = options.dimensions;
|
|
249
|
+
}
|
|
250
|
+
|
|
251
|
+
try {
|
|
252
|
+
const response = await this.makeRequest(modelConfig.endpoint, requestData, false);
|
|
253
|
+
if (response.data && Array.isArray(response.data)) {
|
|
254
|
+
return response.data.map((item: any) => item.embedding);
|
|
255
|
+
}
|
|
256
|
+
throw new Error('Invalid response format from embeddings');
|
|
257
|
+
} catch (error: any) {
|
|
258
|
+
throw new Error(`Embedding creation failed: ${error.message}`);
|
|
259
|
+
}
|
|
260
|
+
}
|
|
261
|
+
|
|
262
|
+
/**
|
|
263
|
+
* Transcribe audio using Whisper
|
|
264
|
+
* @param audioData - Base64 encoded audio data or file path
|
|
265
|
+
* @param options - Transcription options
|
|
266
|
+
* @returns Transcribed text
|
|
267
|
+
*/
|
|
268
|
+
async transcribeAudio(
|
|
269
|
+
audioData: string | File,
|
|
270
|
+
options?: {
|
|
271
|
+
modelType?: OpenAIModelType;
|
|
272
|
+
language?: string;
|
|
273
|
+
prompt?: string;
|
|
274
|
+
responseFormat?: 'json' | 'text' | 'srt' | 'verbose_json' | 'vtt';
|
|
275
|
+
temperature?: number;
|
|
276
|
+
}
|
|
277
|
+
): Promise<string> {
|
|
278
|
+
const modelType = options?.modelType || OpenAIModelType.WHISPER_1;
|
|
279
|
+
const modelConfig = OPENAI_MODELS.get(modelType);
|
|
280
|
+
if (!modelConfig) {
|
|
281
|
+
throw new Error(`Unsupported model type: ${modelType}`);
|
|
282
|
+
}
|
|
283
|
+
|
|
284
|
+
const formData = new FormData();
|
|
285
|
+
|
|
286
|
+
if (typeof audioData === 'string') {
|
|
287
|
+
// Assuming it's a file path or base64
|
|
288
|
+
// In a real implementation, you'd need to convert to File or Blob
|
|
289
|
+
throw new Error('File path/Base64 support requires additional implementation');
|
|
290
|
+
} else {
|
|
291
|
+
formData.append('file', audioData);
|
|
292
|
+
}
|
|
293
|
+
|
|
294
|
+
formData.append('model', modelConfig.name);
|
|
295
|
+
if (options?.language) formData.append('language', options.language);
|
|
296
|
+
if (options?.prompt) formData.append('prompt', options.prompt);
|
|
297
|
+
if (options?.responseFormat) formData.append('response_format', options.responseFormat);
|
|
298
|
+
if (options?.temperature !== undefined) formData.append('temperature', options.temperature.toString());
|
|
299
|
+
|
|
300
|
+
try {
|
|
301
|
+
const response = await this.makeFormDataRequest(modelConfig.endpoint, formData, false);
|
|
302
|
+
return response.text || response.transcription || '';
|
|
303
|
+
} catch (error: any) {
|
|
304
|
+
throw new Error(`Audio transcription failed: ${error.message}`);
|
|
305
|
+
}
|
|
306
|
+
}
|
|
307
|
+
|
|
308
|
+
/**
|
|
309
|
+
* Text-to-speech conversion
|
|
310
|
+
* @param text - Text to convert to speech
|
|
311
|
+
* @param options - TTS options
|
|
312
|
+
* @returns Audio data (base64 or blob)
|
|
313
|
+
*/
|
|
314
|
+
async textToSpeech(
|
|
315
|
+
text: string,
|
|
316
|
+
options?: {
|
|
317
|
+
modelType?: OpenAIModelType;
|
|
318
|
+
voice?: 'alloy' | 'echo' | 'fable' | 'onyx' | 'nova' | 'shimmer';
|
|
319
|
+
responseFormat?: 'mp3' | 'opus' | 'aac' | 'flac' | 'wav' | 'pcm';
|
|
320
|
+
speed?: number;
|
|
321
|
+
}
|
|
322
|
+
): Promise<ArrayBuffer> {
|
|
323
|
+
const modelType = options?.modelType || OpenAIModelType.TTS_1_HD;
|
|
324
|
+
const modelConfig = OPENAI_MODELS.get(modelType);
|
|
325
|
+
if (!modelConfig) {
|
|
326
|
+
throw new Error(`Unsupported model type: ${modelType}`);
|
|
327
|
+
}
|
|
328
|
+
|
|
329
|
+
const requestData = {
|
|
330
|
+
model: modelConfig.name,
|
|
331
|
+
input: text,
|
|
332
|
+
voice: options?.voice || 'alloy',
|
|
333
|
+
response_format: options?.responseFormat || 'mp3',
|
|
334
|
+
speed: options?.speed || 1.0
|
|
335
|
+
};
|
|
336
|
+
|
|
337
|
+
try {
|
|
338
|
+
const response = await this.makeRequest(modelConfig.endpoint, requestData, false, true);
|
|
339
|
+
return response;
|
|
340
|
+
} catch (error: any) {
|
|
341
|
+
throw new Error(`Text-to-speech conversion failed: ${error.message}`);
|
|
342
|
+
}
|
|
343
|
+
}
|
|
344
|
+
|
|
345
|
+
/**
|
|
346
|
+
* Content moderation
|
|
347
|
+
* @param input - Text to moderate
|
|
348
|
+
* @param options - Moderation options
|
|
349
|
+
* @returns Moderation results
|
|
350
|
+
*/
|
|
351
|
+
async moderateContent(
|
|
352
|
+
input: string,
|
|
353
|
+
options?: {
|
|
354
|
+
modelType?: OpenAIModelType;
|
|
355
|
+
}
|
|
356
|
+
): Promise<any> {
|
|
357
|
+
const modelType = options?.modelType || OpenAIModelType.MODERATION_LATEST;
|
|
358
|
+
const modelConfig = OPENAI_MODELS.get(modelType);
|
|
359
|
+
if (!modelConfig) {
|
|
360
|
+
throw new Error(`Unsupported model type: ${modelType}`);
|
|
361
|
+
}
|
|
362
|
+
|
|
363
|
+
const requestData = {
|
|
364
|
+
model: modelConfig.name,
|
|
365
|
+
input: input
|
|
366
|
+
};
|
|
367
|
+
|
|
368
|
+
try {
|
|
369
|
+
const response = await this.makeRequest(modelConfig.endpoint, requestData, false);
|
|
370
|
+
return response.results || [];
|
|
371
|
+
} catch (error: any) {
|
|
372
|
+
throw new Error(`Content moderation failed: ${error.message}`);
|
|
373
|
+
}
|
|
374
|
+
}
|
|
375
|
+
|
|
376
|
+
/**
|
|
377
|
+
* Switch model
|
|
378
|
+
* @param modelType - New model type
|
|
379
|
+
*/
|
|
380
|
+
setModel(modelType: OpenAIModelType): void {
|
|
381
|
+
const modelConfig = OPENAI_MODELS.get(modelType);
|
|
382
|
+
if (!modelConfig) {
|
|
383
|
+
throw new Error(`Unsupported model type: ${modelType}`);
|
|
384
|
+
}
|
|
385
|
+
this.modelType = modelType;
|
|
386
|
+
}
|
|
387
|
+
|
|
388
|
+
/**
|
|
389
|
+
* Get current model configuration
|
|
390
|
+
*/
|
|
391
|
+
getCurrentModel(): { name: string; displayName: string; description?: string } {
|
|
392
|
+
const modelConfig = OPENAI_MODELS.get(this.modelType);
|
|
393
|
+
if (!modelConfig) {
|
|
394
|
+
throw new Error(`Model configuration does not exist: ${this.modelType}`);
|
|
395
|
+
}
|
|
396
|
+
return {
|
|
397
|
+
name: modelConfig.name,
|
|
398
|
+
displayName: modelConfig.displayName,
|
|
399
|
+
description: modelConfig.description
|
|
400
|
+
};
|
|
401
|
+
}
|
|
402
|
+
|
|
403
|
+
/**
|
|
404
|
+
* Test connection
|
|
405
|
+
* @returns Connection test result
|
|
406
|
+
*/
|
|
407
|
+
async testConnection(): Promise<{
|
|
408
|
+
success: boolean;
|
|
409
|
+
model: string;
|
|
410
|
+
response?: string;
|
|
411
|
+
error?: string;
|
|
412
|
+
}> {
|
|
413
|
+
try {
|
|
414
|
+
const response = await this.chat('Hello, respond with "OK" if you can hear me.');
|
|
415
|
+
return {
|
|
416
|
+
success: true,
|
|
417
|
+
model: this.modelType,
|
|
418
|
+
response: response
|
|
419
|
+
};
|
|
420
|
+
} catch (error: any) {
|
|
421
|
+
return {
|
|
422
|
+
success: false,
|
|
423
|
+
model: this.modelType,
|
|
424
|
+
error: error.message
|
|
425
|
+
};
|
|
426
|
+
}
|
|
427
|
+
}
|
|
428
|
+
|
|
429
|
+
/**
|
|
430
|
+
* Estimate cost for a request
|
|
431
|
+
* @param inputTokens - Number of input tokens
|
|
432
|
+
* @param outputTokens - Number of output tokens
|
|
433
|
+
* @param modelType - Model type (optional, uses current if not provided)
|
|
434
|
+
* @returns Cost estimate
|
|
435
|
+
*/
|
|
436
|
+
estimateCost(
|
|
437
|
+
inputTokens: number,
|
|
438
|
+
outputTokens: number = 0,
|
|
439
|
+
modelType?: OpenAIModelType
|
|
440
|
+
): { inputCost: number; outputCost: number; totalCost: number } {
|
|
441
|
+
const model = modelType || this.modelType;
|
|
442
|
+
const modelConfig = OPENAI_MODELS.get(model);
|
|
443
|
+
if (!modelConfig) {
|
|
444
|
+
throw new Error(`Unsupported model type: ${model}`);
|
|
445
|
+
}
|
|
446
|
+
const inputCost = ((modelConfig.inputCostPer1KTokens || 0) / 1000) * inputTokens;
|
|
447
|
+
const outputCost = ((modelConfig.outputCostPer1KTokens || 0) / 1000) * outputTokens;
|
|
448
|
+
return {
|
|
449
|
+
inputCost,
|
|
450
|
+
outputCost,
|
|
451
|
+
totalCost: inputCost + outputCost
|
|
452
|
+
};
|
|
453
|
+
}
|
|
454
|
+
private buildOpenAIRequest(
|
|
455
|
+
model: string,
|
|
456
|
+
messages: ChatMessage[],
|
|
457
|
+
temperature: number,
|
|
458
|
+
maxTokens: number,
|
|
459
|
+
stream: boolean,
|
|
460
|
+
options?: {
|
|
461
|
+
topP?: number;
|
|
462
|
+
frequencyPenalty?: number;
|
|
463
|
+
presencePenalty?: number;
|
|
464
|
+
stop?: string[];
|
|
465
|
+
}
|
|
466
|
+
): any {
|
|
467
|
+
const request: any = {
|
|
468
|
+
model,
|
|
469
|
+
messages,
|
|
470
|
+
temperature,
|
|
471
|
+
max_tokens: maxTokens,
|
|
472
|
+
stream
|
|
473
|
+
};
|
|
474
|
+
if (options?.topP !== undefined) request.top_p = options.topP;
|
|
475
|
+
if (options?.frequencyPenalty !== undefined) request.frequency_penalty = options.frequencyPenalty;
|
|
476
|
+
if (options?.presencePenalty !== undefined) request.presence_penalty = options.presencePenalty;
|
|
477
|
+
if (options?.stop !== undefined) request.stop = options.stop;
|
|
478
|
+
return request;
|
|
479
|
+
}
|
|
480
|
+
|
|
481
|
+
private async makeRequest(
|
|
482
|
+
endpoint: string,
|
|
483
|
+
data: any,
|
|
484
|
+
stream: boolean,
|
|
485
|
+
binaryResponse: boolean = false
|
|
486
|
+
): Promise<any> {
|
|
487
|
+
const controller = new AbortController();
|
|
488
|
+
const timeoutId = setTimeout(() => controller.abort(), this.timeout);
|
|
489
|
+
try {
|
|
490
|
+
const fullEndpoint = endpoint.startsWith('http') ? endpoint : `${this.baseURL}${endpoint}`;
|
|
491
|
+
const headers: Record<string, string> = {
|
|
492
|
+
'Authorization': `Bearer ${this.apiKey}`,
|
|
493
|
+
'Content-Type': 'application/json',
|
|
494
|
+
};
|
|
495
|
+
if (this.organization) {
|
|
496
|
+
headers['OpenAI-Organization'] = this.organization;
|
|
497
|
+
}
|
|
498
|
+
const response = await fetch(fullEndpoint, {
|
|
499
|
+
method: 'POST',
|
|
500
|
+
headers,
|
|
501
|
+
body: JSON.stringify(data),
|
|
502
|
+
signal: controller.signal
|
|
503
|
+
});
|
|
504
|
+
clearTimeout(timeoutId);
|
|
505
|
+
if (!response.ok) {
|
|
506
|
+
const errorText = await response.text();
|
|
507
|
+
try {
|
|
508
|
+
const errorJson = JSON.parse(errorText);
|
|
509
|
+
throw new Error(`HTTP ${response.status}: ${errorJson.error?.message || errorText}`);
|
|
510
|
+
} catch {
|
|
511
|
+
throw new Error(`HTTP ${response.status}: ${errorText}`);
|
|
512
|
+
}
|
|
513
|
+
}
|
|
514
|
+
if (binaryResponse) {
|
|
515
|
+
return await response.arrayBuffer();
|
|
516
|
+
}
|
|
517
|
+
if (stream) {
|
|
518
|
+
return response.body;
|
|
519
|
+
}
|
|
520
|
+
return await response.json();
|
|
521
|
+
} catch (error: any) {
|
|
522
|
+
clearTimeout(timeoutId);
|
|
523
|
+
if (error.name === 'AbortError') {
|
|
524
|
+
throw new Error(`Request timeout (${this.timeout}ms)`);
|
|
525
|
+
}
|
|
526
|
+
throw error;
|
|
527
|
+
}
|
|
528
|
+
}
|
|
529
|
+
|
|
530
|
+
private async makeFormDataRequest(
|
|
531
|
+
endpoint: string,
|
|
532
|
+
formData: FormData,
|
|
533
|
+
stream: boolean
|
|
534
|
+
): Promise<any> {
|
|
535
|
+
const controller = new AbortController();
|
|
536
|
+
const timeoutId = setTimeout(() => controller.abort(), this.timeout);
|
|
537
|
+
try {
|
|
538
|
+
const fullEndpoint = endpoint.startsWith('http') ? endpoint : `${this.baseURL}${endpoint}`;
|
|
539
|
+
const headers: Record<string, string> = {
|
|
540
|
+
'Authorization': `Bearer ${this.apiKey}`,
|
|
541
|
+
};
|
|
542
|
+
if (this.organization) {
|
|
543
|
+
headers['OpenAI-Organization'] = this.organization;
|
|
544
|
+
}
|
|
545
|
+
const response = await fetch(fullEndpoint, {
|
|
546
|
+
method: 'POST',
|
|
547
|
+
headers,
|
|
548
|
+
body: formData,
|
|
549
|
+
signal: controller.signal
|
|
550
|
+
});
|
|
551
|
+
clearTimeout(timeoutId);
|
|
552
|
+
if (!response.ok) {
|
|
553
|
+
const errorText = await response.text();
|
|
554
|
+
try {
|
|
555
|
+
const errorJson = JSON.parse(errorText);
|
|
556
|
+
throw new Error(`HTTP ${response.status}: ${errorJson.error?.message || errorText}`);
|
|
557
|
+
} catch {
|
|
558
|
+
throw new Error(`HTTP ${response.status}: ${errorText}`);
|
|
559
|
+
}
|
|
560
|
+
}
|
|
561
|
+
if (stream) {
|
|
562
|
+
return response.body;
|
|
563
|
+
}
|
|
564
|
+
return await response.json();
|
|
565
|
+
} catch (error: any) {
|
|
566
|
+
clearTimeout(timeoutId);
|
|
567
|
+
if (error.name === 'AbortError') {
|
|
568
|
+
throw new Error(`Request timeout (${this.timeout}ms)`);
|
|
569
|
+
}
|
|
570
|
+
throw error;
|
|
571
|
+
}
|
|
572
|
+
}
|
|
573
|
+
|
|
574
|
+
private async makeStreamRequest(
|
|
575
|
+
endpoint: string,
|
|
576
|
+
data: any,
|
|
577
|
+
callback: OpenAIStreamCallback
|
|
578
|
+
): Promise<void> {
|
|
579
|
+
const response = await this.makeRequest(endpoint, data, true);
|
|
580
|
+
if (!response) {
|
|
581
|
+
throw new Error('Failed to get streaming response');
|
|
582
|
+
}
|
|
583
|
+
const reader = response.getReader();
|
|
584
|
+
const decoder = new TextDecoder('utf-8');
|
|
585
|
+
let buffer = '';
|
|
586
|
+
try {
|
|
587
|
+
while (true) {
|
|
588
|
+
const { done, value } = await reader.read();
|
|
589
|
+
if (done) {
|
|
590
|
+
callback('', true);
|
|
591
|
+
break;
|
|
592
|
+
}
|
|
593
|
+
buffer += decoder.decode(value, { stream: true });
|
|
594
|
+
const lines = buffer.split('\n');
|
|
595
|
+
buffer = lines.pop() || '';
|
|
596
|
+
for (const line of lines) {
|
|
597
|
+
if (line.startsWith('data: ')) {
|
|
598
|
+
const data = line.slice(6);
|
|
599
|
+
if (data === '[DONE]') {
|
|
600
|
+
callback('', true);
|
|
601
|
+
return;
|
|
602
|
+
}
|
|
603
|
+
try {
|
|
604
|
+
const parsed = JSON.parse(data);
|
|
605
|
+
if (parsed.choices?.[0]?.delta?.content) {
|
|
606
|
+
callback(parsed.choices[0].delta.content, false);
|
|
607
|
+
}
|
|
608
|
+
} catch (e) {
|
|
609
|
+
// Ignore parsing errors
|
|
610
|
+
}
|
|
611
|
+
}
|
|
612
|
+
}
|
|
613
|
+
}
|
|
614
|
+
} finally {
|
|
615
|
+
reader.releaseLock();
|
|
616
|
+
}
|
|
617
|
+
}
|
|
618
|
+
|
|
619
|
+
private extractContent(response: any): string {
|
|
620
|
+
if (response.choices?.[0]?.message?.content) {
|
|
621
|
+
return response.choices[0].message.content;
|
|
622
|
+
} else if (response.data?.[0]?.b64_json) {
|
|
623
|
+
return response.data[0].b64_json;
|
|
624
|
+
} else if (response.data?.[0]?.url) {
|
|
625
|
+
return response.data[0].url;
|
|
626
|
+
} else if (response.text) {
|
|
627
|
+
return response.text;
|
|
628
|
+
} else {
|
|
629
|
+
throw new Error('Unable to parse response content');
|
|
630
|
+
}
|
|
631
|
+
}
|
|
632
|
+
|
|
633
|
+
/**
|
|
634
|
+
* Specialized method for processing OHLCV arrays
|
|
635
|
+
* @param ohlcvArray - OHLCV data array
|
|
636
|
+
* @param instructions - Processing instructions (optional, default: "Based on these OHLCV data, predict the next period")
|
|
637
|
+
* @param count - Number of OHLCV data items to return (optional, default: 1)
|
|
638
|
+
* @param options - Chat options
|
|
639
|
+
* @returns Predicted OHLCV array
|
|
640
|
+
*/
|
|
641
|
+
async analyzeOHLCV(
|
|
642
|
+
ohlcvArray: OHLCV[],
|
|
643
|
+
instructions?: string,
|
|
644
|
+
count?: number,
|
|
645
|
+
options?: OpenAIChatOptions
|
|
646
|
+
): Promise<OHLCV[]> {
|
|
647
|
+
const processedInstructions = instructions ||
|
|
648
|
+
"Based on these OHLCV data, predict the next period";
|
|
649
|
+
const processedCount = count || 1;
|
|
650
|
+
if (!Number.isInteger(processedCount) || processedCount <= 0) {
|
|
651
|
+
throw new Error(`Invalid count parameter: ${processedCount}. Must be a positive integer.`);
|
|
652
|
+
}
|
|
653
|
+
const MAX_COUNT = 50;
|
|
654
|
+
if (processedCount > MAX_COUNT) {
|
|
655
|
+
throw new Error(`Count parameter too large: ${processedCount}. Maximum allowed is ${MAX_COUNT}. Please reduce the count or split your request.`);
|
|
656
|
+
}
|
|
657
|
+
const countMessage = processedCount === 1
|
|
658
|
+
? "Return EXACTLY 1 OHLCV object for the next period."
|
|
659
|
+
: `Return EXACTLY ${processedCount} consecutive OHLCV objects for the next ${processedCount} periods.`;
|
|
660
|
+
const systemPrompt = `You are a professional financial data analysis AI. The user will give you an array of OHLCV (Open, High, Low, Close, Volume) data.
|
|
661
|
+
Your task: ${processedInstructions}
|
|
662
|
+
CRITICAL RULES:
|
|
663
|
+
1. ${countMessage}
|
|
664
|
+
2. Return ONLY a JSON array of OHLCV objects, NO explanations, comments, or other text
|
|
665
|
+
3. The OHLCV array format must match: [{open, high, low, close, volume}, ...]
|
|
666
|
+
4. All numbers must be valid numbers
|
|
667
|
+
5. Ensure technical rationality (high >= low, high >= close >= low, volume >= 0)
|
|
668
|
+
6. Maintain consistency with historical trends and patterns
|
|
669
|
+
7. For technical analysis, provide reasonable values based on typical patterns
|
|
670
|
+
8. Do not include markdown formatting, only pure JSON
|
|
671
|
+
${processedCount === 1 ?
|
|
672
|
+
`Example of valid response for 1 period:
|
|
673
|
+
[{"open": 115.5, "high": 118.0, "low": 114.0, "close": 117.0, "volume": 1350000}]` :
|
|
674
|
+
`Example of valid response for ${processedCount} periods:
|
|
675
|
+
[
|
|
676
|
+
{"open": 115.5, "high": 118.0, "low": 114.0, "close": 117.0, "volume": 1350000},
|
|
677
|
+
{"open": 117.5, "high": 120.0, "low": 116.0, "close": 119.0, "volume": 1400000}
|
|
678
|
+
${processedCount > 2 ? `,
|
|
679
|
+
... ${processedCount - 2} more OHLCV objects following the same pattern` : ''}
|
|
680
|
+
]`}`;
|
|
681
|
+
const dataString = JSON.stringify(ohlcvArray, null, 2);
|
|
682
|
+
const userMessage = `Here is the historical OHLCV data (${ohlcvArray.length} periods):
|
|
683
|
+
${dataString}
|
|
684
|
+
Please process this data according to the system instructions. Remember to return EXACTLY ${processedCount} OHLCV object(s) in a JSON array with no additional text.`;
|
|
685
|
+
const messages: ChatMessage[] = [
|
|
686
|
+
{ role: 'system', content: systemPrompt },
|
|
687
|
+
{ role: 'user', content: userMessage }
|
|
688
|
+
];
|
|
689
|
+
try {
|
|
690
|
+
const estimatedTokens = processedCount * 50 + 100;
|
|
691
|
+
const maxTokens = Math.max(options?.maxTokens || 1000, estimatedTokens);
|
|
692
|
+
const response = await this.chatCompletion(messages, {
|
|
693
|
+
temperature: options?.temperature || 0.3,
|
|
694
|
+
maxTokens,
|
|
695
|
+
stream: false,
|
|
696
|
+
modelType: options?.modelType,
|
|
697
|
+
topP: options?.topP,
|
|
698
|
+
frequencyPenalty: options?.frequencyPenalty,
|
|
699
|
+
presencePenalty: options?.presencePenalty,
|
|
700
|
+
stop: options?.stop
|
|
701
|
+
});
|
|
702
|
+
const content = this.extractContent(response);
|
|
703
|
+
const result = this.parseOHLCVResponse(content);
|
|
704
|
+
if (result.length !== processedCount) {
|
|
705
|
+
throw new Error(`AI returned ${result.length} OHLCV objects, but expected ${processedCount}.`);
|
|
706
|
+
}
|
|
707
|
+
return result;
|
|
708
|
+
} catch (error: any) {
|
|
709
|
+
throw new Error(`OHLCV analysis failed: ${error.message}`);
|
|
710
|
+
}
|
|
711
|
+
}
|
|
712
|
+
|
|
713
|
+
/**
|
|
714
|
+
* Parse AI returned OHLCV response
|
|
715
|
+
* @private
|
|
716
|
+
*/
|
|
717
|
+
private parseOHLCVResponse(content: string): OHLCV[] {
|
|
718
|
+
try {
|
|
719
|
+
const parsed = JSON.parse(content);
|
|
720
|
+
if (!Array.isArray(parsed)) {
|
|
721
|
+
throw new Error('Response is not in array format');
|
|
722
|
+
}
|
|
723
|
+
// Validate each element is a valid OHLCV object
|
|
724
|
+
const result = parsed.map((item, index) => {
|
|
725
|
+
if (typeof item !== 'object' || item === null) {
|
|
726
|
+
throw new Error(`Element ${index} is not a valid object`);
|
|
727
|
+
}
|
|
728
|
+
const { open, high, low, close, volume } = item;
|
|
729
|
+
const requiredFields = ['open', 'high', 'low', 'close', 'volume'];
|
|
730
|
+
for (const field of requiredFields) {
|
|
731
|
+
if (typeof item[field] !== 'number' || isNaN(item[field])) {
|
|
732
|
+
throw new Error(`Element ${index} field ${field} is not a valid number`);
|
|
733
|
+
}
|
|
734
|
+
}
|
|
735
|
+
// Validate data rationality
|
|
736
|
+
if (high < low) {
|
|
737
|
+
throw new Error(`Element ${index}: high cannot be lower than low`);
|
|
738
|
+
}
|
|
739
|
+
if (close < low || close > high) {
|
|
740
|
+
throw new Error(`Element ${index}: close must be between low and high`);
|
|
741
|
+
}
|
|
742
|
+
return {
|
|
743
|
+
open: Number(open),
|
|
744
|
+
high: Number(high),
|
|
745
|
+
low: Number(low),
|
|
746
|
+
close: Number(close),
|
|
747
|
+
volume: Number(volume)
|
|
748
|
+
};
|
|
749
|
+
});
|
|
750
|
+
return result;
|
|
751
|
+
} catch (error) {
|
|
752
|
+
const jsonMatch = content.match(/\[[\s\S]*\]/);
|
|
753
|
+
if (jsonMatch) {
|
|
754
|
+
return this.parseOHLCVResponse(jsonMatch[0]);
|
|
755
|
+
}
|
|
756
|
+
throw new Error(`Unable to parse AI returned OHLCV data: ${error}\nOriginal content: ${content.substring(0, 200)}...`);
|
|
757
|
+
}
|
|
758
|
+
}
|
|
759
|
+
}
|
|
760
|
+
|
|
761
|
+
/**
|
|
762
|
+
* Factory function for quick instance creation
|
|
763
|
+
*/
|
|
764
|
+
export function createOpenAI(apiKey: string, modelType?: OpenAIModelType): OpenAI {
|
|
765
|
+
return new OpenAI({ apiKey, modelType });
|
|
766
|
+
}
|