ohlcv-ai 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,446 @@
1
+ import { ChatMessage, OHLCV } from '@/types';
2
+ import { AliYunModelType, ALIYUN_MODELS } from './model';
3
+
4
+ // Minimal configuration
5
+ export interface AliyunConfig {
6
+ apiKey: string;
7
+ modelType?: AliYunModelType;
8
+ timeout?: number;
9
+ }
10
+
11
+ // Chat options
12
+ export interface AliYunChatOptions {
13
+ temperature?: number;
14
+ maxTokens?: number;
15
+ stream?: boolean;
16
+ systemPrompt?: string;
17
+ modelType?: AliYunModelType; // Add modelType option
18
+ }
19
+
20
+ // Streaming response callback
21
+ export type AliYunStreamCallback = (chunk: string, isEnd: boolean) => void;
22
+
23
+ export class AliyunAI {
24
+ private apiKey: string;
25
+ private modelType: AliYunModelType;
26
+ private timeout: number;
27
+
28
+ /**
29
+ * Constructor - Minimal configuration
30
+ * @param config.apiKey - API key (required)
31
+ * @param config.modelType - Model type, default qwen-turbo
32
+ * @param config.timeout - Timeout, default 30 seconds
33
+ */
34
+ constructor(config: AliyunConfig) {
35
+ this.apiKey = config.apiKey;
36
+ this.modelType = config.modelType || AliYunModelType.QWEN_TURBO;
37
+ this.timeout = config.timeout || 30000;
38
+ if (!this.apiKey) {
39
+ throw new Error('API Key cannot be empty');
40
+ }
41
+ const modelConfig = ALIYUN_MODELS.get(this.modelType);
42
+ if (!modelConfig) {
43
+ throw new Error(`Unsupported model type: ${this.modelType}`);
44
+ }
45
+ }
46
+
47
+ /**
48
+ * Simplest method: single conversation
49
+ * @param message - User message
50
+ * @param options - Chat options
51
+ * @returns AI response
52
+ */
53
+ async chat(message: string, options?: AliYunChatOptions): Promise<string> {
54
+ const messages: ChatMessage[] = [];
55
+ if (options?.systemPrompt) {
56
+ messages.push({ role: 'system', content: options.systemPrompt });
57
+ }
58
+ messages.push({ role: 'user', content: message });
59
+ const response = await this.chatCompletion(messages, {
60
+ temperature: options?.temperature,
61
+ maxTokens: options?.maxTokens,
62
+ stream: false
63
+ });
64
+ return this.extractContent(response);
65
+ }
66
+
67
+ /**
68
+ * Multi-turn conversation
69
+ * @param messages - Message history
70
+ * @param options - Chat options
71
+ * @returns Complete API response
72
+ */
73
+ async chatCompletion(
74
+ messages: ChatMessage[],
75
+ options?: {
76
+ temperature?: number;
77
+ maxTokens?: number;
78
+ stream?: boolean;
79
+ modelType?: AliYunModelType;
80
+ }
81
+ ): Promise<any> {
82
+ const modelType = options?.modelType || this.modelType;
83
+ const modelConfig = ALIYUN_MODELS.get(modelType);
84
+ if (!modelConfig) {
85
+ throw new Error(`Unsupported model type: ${modelType}`);
86
+ }
87
+ const temperature = options?.temperature ?? 0.7;
88
+ const maxTokens = options?.maxTokens ?? 1000;
89
+ const stream = options?.stream ?? false;
90
+ const endpoint = modelConfig.endpoint;
91
+ const format = modelConfig.format;
92
+ const requestData = format === 'openai'
93
+ ? this.buildOpenAIRequest(modelConfig.name, messages, temperature, maxTokens, stream)
94
+ : this.buildDashScopeRequest(modelConfig.name, messages, temperature, maxTokens);
95
+ try {
96
+ const response = await this.makeRequest(endpoint, requestData, stream);
97
+ return response;
98
+ } catch (error: any) {
99
+ throw new Error(`Aliyun AI request failed: ${error.message}`);
100
+ }
101
+ }
102
+
103
+ /**
104
+ * Streaming conversation (only supports OpenAI format)
105
+ * @param messages - Message history
106
+ * @param callback - Streaming callback function
107
+ * @param options - Chat options
108
+ */
109
+ async chatStream(
110
+ messages: ChatMessage[],
111
+ callback: AliYunStreamCallback,
112
+ options?: {
113
+ temperature?: number;
114
+ maxTokens?: number;
115
+ modelType?: AliYunModelType;
116
+ }
117
+ ): Promise<void> {
118
+ const modelType = options?.modelType || this.modelType;
119
+ const modelConfig = ALIYUN_MODELS.get(modelType);
120
+ if (!modelConfig) {
121
+ throw new Error(`Unsupported model type: ${modelType}`);
122
+ }
123
+ if (modelConfig.format !== 'openai') {
124
+ throw new Error('Streaming conversation only supports OpenAI format models');
125
+ }
126
+ const temperature = options?.temperature ?? 0.7;
127
+ const maxTokens = options?.maxTokens ?? 1000;
128
+ const requestData = this.buildOpenAIRequest(
129
+ modelConfig.name,
130
+ messages,
131
+ temperature,
132
+ maxTokens,
133
+ true
134
+ );
135
+ try {
136
+ await this.makeStreamRequest(modelConfig.endpoint, requestData, callback);
137
+ } catch (error: any) {
138
+ throw new Error(`Streaming request failed: ${error.message}`);
139
+ }
140
+ }
141
+
142
+ /**
143
+ * Switch model
144
+ * @param modelType - New model type
145
+ */
146
+ setModel(modelType: AliYunModelType): void {
147
+ const modelConfig = ALIYUN_MODELS.get(modelType);
148
+ if (!modelConfig) {
149
+ throw new Error(`Unsupported model type: ${modelType}`);
150
+ }
151
+ this.modelType = modelType;
152
+ }
153
+
154
+ /**
155
+ * Get current model configuration
156
+ */
157
+ getCurrentModel(): { name: string; displayName: string; description?: string } {
158
+ const modelConfig = ALIYUN_MODELS.get(this.modelType);
159
+ if (!modelConfig) {
160
+ throw new Error(`Model configuration does not exist: ${this.modelType}`);
161
+ }
162
+ return {
163
+ name: modelConfig.name,
164
+ displayName: modelConfig.displayName,
165
+ description: modelConfig.description
166
+ };
167
+ }
168
+
169
+ /**
170
+ * Test connection
171
+ * @returns Connection test result
172
+ */
173
+ async testConnection(): Promise<{
174
+ success: boolean;
175
+ model: string;
176
+ response?: string;
177
+ error?: string;
178
+ }> {
179
+ try {
180
+ const response = await this.chat('Hello, respond with "OK" if you can hear me.');
181
+ return {
182
+ success: true,
183
+ model: this.modelType,
184
+ response: response
185
+ };
186
+ } catch (error: any) {
187
+ return {
188
+ success: false,
189
+ model: this.modelType,
190
+ error: error.message
191
+ };
192
+ }
193
+ }
194
+
195
+ private buildOpenAIRequest(
196
+ model: string,
197
+ messages: ChatMessage[],
198
+ temperature: number,
199
+ maxTokens: number,
200
+ stream: boolean
201
+ ): any {
202
+ return {
203
+ model,
204
+ messages,
205
+ temperature,
206
+ max_tokens: maxTokens,
207
+ stream
208
+ };
209
+ }
210
+
211
+ private buildDashScopeRequest(
212
+ model: string,
213
+ messages: ChatMessage[],
214
+ temperature: number,
215
+ maxTokens: number
216
+ ): any {
217
+ return {
218
+ model,
219
+ input: { messages },
220
+ parameters: {
221
+ temperature,
222
+ max_tokens: maxTokens,
223
+ result_format: 'message'
224
+ }
225
+ };
226
+ }
227
+
228
+ private async makeRequest(endpoint: string, data: any, stream: boolean): Promise<any> {
229
+ const controller = new AbortController();
230
+ const timeoutId = setTimeout(() => controller.abort(), this.timeout);
231
+ try {
232
+ const response = await fetch(endpoint, {
233
+ method: 'POST',
234
+ headers: {
235
+ 'Authorization': `Bearer ${this.apiKey}`,
236
+ 'Content-Type': 'application/json; charset=utf-8',
237
+ 'Accept': 'application/json'
238
+ },
239
+ body: JSON.stringify(data),
240
+ signal: controller.signal
241
+ });
242
+ clearTimeout(timeoutId);
243
+ if (!response.ok) {
244
+ const errorText = await response.text();
245
+ throw new Error(`HTTP ${response.status}: ${errorText}`);
246
+ }
247
+ if (stream) {
248
+ return response.body;
249
+ }
250
+ return await response.json();
251
+ } catch (error: any) {
252
+ clearTimeout(timeoutId);
253
+ if (error.name === 'AbortError') {
254
+ throw new Error(`Request timeout (${this.timeout}ms)`);
255
+ }
256
+ throw error;
257
+ }
258
+ }
259
+
260
+ private async makeStreamRequest(
261
+ endpoint: string,
262
+ data: any,
263
+ callback: AliYunStreamCallback
264
+ ): Promise<void> {
265
+ const response = await this.makeRequest(endpoint, data, true);
266
+ if (!response) {
267
+ throw new Error('Failed to get streaming response');
268
+ }
269
+ const reader = response.getReader();
270
+ const decoder = new TextDecoder('utf-8');
271
+ let buffer = '';
272
+ try {
273
+ while (true) {
274
+ const { done, value } = await reader.read();
275
+ if (done) {
276
+ callback('', true);
277
+ break;
278
+ }
279
+ buffer += decoder.decode(value, { stream: true });
280
+ const lines = buffer.split('\n');
281
+ buffer = lines.pop() || '';
282
+ for (const line of lines) {
283
+ if (line.startsWith('data: ')) {
284
+ const data = line.slice(6);
285
+ if (data === '[DONE]') {
286
+ callback('', true);
287
+ return;
288
+ }
289
+ try {
290
+ const parsed = JSON.parse(data);
291
+ if (parsed.choices?.[0]?.delta?.content) {
292
+ callback(parsed.choices[0].delta.content, false);
293
+ }
294
+ } catch (e) {
295
+ // Ignore parsing errors
296
+ }
297
+ }
298
+ }
299
+ }
300
+ } finally {
301
+ reader.releaseLock();
302
+ }
303
+ }
304
+
305
+ private extractContent(response: any): string {
306
+ if (response.choices?.[0]?.message?.content) {
307
+ return response.choices[0].message.content;
308
+ } else if (response.output?.choices?.[0]?.message?.content) {
309
+ return response.output.choices[0].message.content;
310
+ } else if (response.output?.text) {
311
+ return response.output.text;
312
+ } else {
313
+ throw new Error('Unable to parse response content');
314
+ }
315
+ }
316
+
317
+ /**
318
+ * Specialized method for processing OHLCV arrays
319
+ * @param ohlcvArray - OHLCV data array
320
+ * @param instructions - Processing instructions, supports Chinese and English (optional, default: "Based on these OHLCV data, predict the next period")
321
+ * @param count - Number of OHLCV data items to return (optional, default: 1)
322
+ * @param options - Chat options
323
+ * @returns Predicted OHLCV array
324
+ */
325
+ async predictingOHLCV(
326
+ ohlcvArray: OHLCV[],
327
+ instructions?: string,
328
+ count?: number,
329
+ options?: AliYunChatOptions
330
+ ): Promise<OHLCV[]> {
331
+ const processedInstructions = instructions ||
332
+ "Based on these OHLCV data, predict the next period";
333
+ const processedCount = count || 1;
334
+ if (!Number.isInteger(processedCount) || processedCount <= 0) {
335
+ throw new Error(`Invalid count parameter: ${processedCount}. Must be a positive integer.`);
336
+ }
337
+ const MAX_COUNT = 50;
338
+ if (processedCount > MAX_COUNT) {
339
+ throw new Error(`Count parameter too large: ${processedCount}. Maximum allowed is ${MAX_COUNT}. Please reduce the count or split your request.`);
340
+ }
341
+ const countMessage = processedCount === 1
342
+ ? "Return EXACTLY 1 OHLCV object for the next period."
343
+ : `Return EXACTLY ${processedCount} consecutive OHLCV objects for the next ${processedCount} periods.`;
344
+ const systemPrompt = `You are a professional financial data analysis AI. The user will give you an array of OHLCV (Open, High, Low, Close, Volume) data.
345
+ Your task: ${processedInstructions}
346
+ CRITICAL RULES:
347
+ 1. ${countMessage}
348
+ 2. Return ONLY a JSON array of OHLCV objects, NO explanations, comments, or other text
349
+ 3. The OHLCV array format must match: [{open, high, low, close, volume}, ...]
350
+ 4. All numbers must be valid numbers
351
+ 5. Ensure technical rationality (high >= low, high >= close >= low, volume >= 0)
352
+ 6. Maintain consistency with historical trends and patterns
353
+ 7. For technical analysis, provide reasonable values based on typical patterns
354
+ 8. Do not include markdown formatting, only pure JSON
355
+ ${processedCount === 1 ?
356
+ `Example of valid response for 1 period:
357
+ [{"open": 115.5, "high": 118.0, "low": 114.0, "close": 117.0, "volume": 1350000}]` :
358
+ `Example of valid response for ${processedCount} periods:
359
+ [
360
+ {"open": 115.5, "high": 118.0, "low": 114.0, "close": 117.0, "volume": 1350000},
361
+ {"open": 117.5, "high": 120.0, "low": 116.0, "close": 119.0, "volume": 1400000}
362
+ ${processedCount > 2 ? `,
363
+ ... ${processedCount - 2} more OHLCV objects following the same pattern` : ''}
364
+ ]`}`;
365
+ const dataString = JSON.stringify(ohlcvArray, null, 2);
366
+ const userMessage = `Here is the historical OHLCV data (${ohlcvArray.length} periods):
367
+ ${dataString}
368
+ Please process this data according to the system instructions. Remember to return EXACTLY ${processedCount} OHLCV object(s) in a JSON array with no additional text.`;
369
+ const messages: ChatMessage[] = [
370
+ { role: 'system', content: systemPrompt },
371
+ { role: 'user', content: userMessage }
372
+ ];
373
+ try {
374
+ const estimatedTokens = processedCount * 50 + 100;
375
+ const maxTokens = Math.max(options?.maxTokens || 1000, estimatedTokens);
376
+ const response = await this.chatCompletion(messages, {
377
+ temperature: options?.temperature || 0.3,
378
+ maxTokens,
379
+ stream: false,
380
+ modelType: options?.modelType
381
+ });
382
+ const content = this.extractContent(response);
383
+ const result = this.parseOHLCVResponse(content);
384
+ if (result.length !== processedCount) {
385
+ throw new Error(`AI returned ${result.length} OHLCV objects, but expected ${processedCount}.`);
386
+ }
387
+ return result;
388
+ } catch (error: any) {
389
+ throw new Error(`OHLCV analysis failed: ${error.message}`);
390
+ }
391
+ }
392
+
393
+ /**
394
+ * Parse AI returned OHLCV response
395
+ * @private
396
+ */
397
+ private parseOHLCVResponse(content: string): OHLCV[] {
398
+ try {
399
+ const parsed = JSON.parse(content);
400
+ if (!Array.isArray(parsed)) {
401
+ throw new Error('Response is not in array format');
402
+ }
403
+ // Validate each element is a valid OHLCV object
404
+ const result = parsed.map((item, index) => {
405
+ if (typeof item !== 'object' || item === null) {
406
+ throw new Error(`Element ${index} is not a valid object`);
407
+ }
408
+ const { open, high, low, close, volume } = item;
409
+ const requiredFields = ['open', 'high', 'low', 'close', 'volume'];
410
+ for (const field of requiredFields) {
411
+ if (typeof item[field] !== 'number' || isNaN(item[field])) {
412
+ throw new Error(`Element ${index} field ${field} is not a valid number`);
413
+ }
414
+ }
415
+ // Validate data rationality
416
+ if (high < low) {
417
+ throw new Error(`Element ${index}: high cannot be lower than low`);
418
+ }
419
+ if (close < low || close > high) {
420
+ throw new Error(`Element ${index}: close must be between low and high`);
421
+ }
422
+ return {
423
+ open: Number(open),
424
+ high: Number(high),
425
+ low: Number(low),
426
+ close: Number(close),
427
+ volume: Number(volume)
428
+ };
429
+ });
430
+ return result;
431
+ } catch (error) {
432
+ const jsonMatch = content.match(/\[[\s\S]*\]/);
433
+ if (jsonMatch) {
434
+ return this.parseOHLCVResponse(jsonMatch[0]);
435
+ }
436
+ throw new Error(`Unable to parse AI returned OHLCV data: ${error}\nOriginal content: ${content.substring(0, 200)}...`);
437
+ }
438
+ }
439
+ }
440
+
441
+ /**
442
+ * Factory function for quick instance creation
443
+ */
444
+ export function createAliyunAI(apiKey: string, modelType?: AliYunModelType): AliyunAI {
445
+ return new AliyunAI({ apiKey, modelType });
446
+ }