ohlcv-ai 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,479 @@
1
+ import { OHLCV } from "@/types";
2
+ import { DeepSeekModelType, DEEPSEEK_MODELS } from "./model";
3
+
4
+ // Chat message interface
5
+ export interface DeepSeekChatMessage {
6
+ role: 'system' | 'user' | 'assistant' | 'tool';
7
+ content: string;
8
+ name?: string;
9
+ tool_call_id?: string;
10
+ }
11
+
12
+ // Minimal configuration
13
+ export interface DeepSeekConfig {
14
+ apiKey: string;
15
+ modelType?: DeepSeekModelType;
16
+ timeout?: number;
17
+ baseURL?: string;
18
+ }
19
+
20
+ // Chat options
21
+ export interface DeepSeekChatOptions {
22
+ temperature?: number;
23
+ maxTokens?: number;
24
+ stream?: boolean;
25
+ systemPrompt?: string;
26
+ modelType?: DeepSeekModelType;
27
+ topP?: number;
28
+ frequencyPenalty?: number;
29
+ presencePenalty?: number;
30
+ stop?: string[];
31
+ tools?: any[];
32
+ toolChoice?: string | object;
33
+ }
34
+
35
+ // Streaming response callback
36
+ export type DeepSeekStreamCallback = (chunk: string, isEnd: boolean) => void;
37
+
38
+ export class DeepSeekAI {
39
+ private apiKey: string;
40
+ private modelType: DeepSeekModelType;
41
+ private timeout: number;
42
+ private baseURL: string;
43
+
44
+ /**
45
+ * Constructor - Minimal configuration
46
+ * @param config.apiKey - API key (required)
47
+ * @param config.modelType - Model type, default deepseek-chat
48
+ * @param config.timeout - Timeout, default 30 seconds
49
+ * @param config.baseURL - Base URL for API, default official endpoint
50
+ */
51
+ constructor(config: DeepSeekConfig) {
52
+ this.apiKey = config.apiKey;
53
+ this.modelType = config.modelType || DeepSeekModelType.DEEPSEEK_CHAT;
54
+ this.timeout = config.timeout || 30000;
55
+ this.baseURL = config.baseURL || 'https://api.deepseek.com';
56
+ if (!this.apiKey) {
57
+ throw new Error('API Key cannot be empty');
58
+ }
59
+ const modelConfig = DEEPSEEK_MODELS.get(this.modelType);
60
+ if (!modelConfig) {
61
+ throw new Error(`Unsupported model type: ${this.modelType}`);
62
+ }
63
+ }
64
+
65
+ /**
66
+ * Simplest method: single conversation
67
+ * @param message - User message
68
+ * @param options - Chat options
69
+ * @returns AI response
70
+ */
71
+ async chat(message: string, options?: DeepSeekChatOptions): Promise<string> {
72
+ const messages: DeepSeekChatMessage[] = [];
73
+ if (options?.systemPrompt) {
74
+ messages.push({ role: 'system', content: options.systemPrompt });
75
+ }
76
+ messages.push({ role: 'user', content: message });
77
+ const response = await this.chatCompletion(messages, {
78
+ temperature: options?.temperature,
79
+ maxTokens: options?.maxTokens,
80
+ stream: false,
81
+ modelType: options?.modelType,
82
+ topP: options?.topP,
83
+ frequencyPenalty: options?.frequencyPenalty,
84
+ presencePenalty: options?.presencePenalty,
85
+ stop: options?.stop,
86
+ tools: options?.tools,
87
+ toolChoice: options?.toolChoice
88
+ });
89
+ return this.extractContent(response);
90
+ }
91
+
92
+ /**
93
+ * Multi-turn conversation
94
+ * @param messages - Message history
95
+ * @param options - Chat options
96
+ * @returns Complete API response
97
+ */
98
+ async chatCompletion(
99
+ messages: DeepSeekChatMessage[],
100
+ options?: DeepSeekChatOptions
101
+ ): Promise<any> {
102
+ const modelType = options?.modelType || this.modelType;
103
+ const modelConfig = DEEPSEEK_MODELS.get(modelType);
104
+ if (!modelConfig) {
105
+ throw new Error(`Unsupported model type: ${modelType}`);
106
+ }
107
+ const temperature = options?.temperature ?? 0.7;
108
+ const maxTokens = options?.maxTokens ?? 2000;
109
+ const stream = options?.stream ?? false;
110
+ const topP = options?.topP ?? 1.0;
111
+ const frequencyPenalty = options?.frequencyPenalty ?? 0.0;
112
+ const presencePenalty = options?.presencePenalty ?? 0.0;
113
+ const stop = options?.stop;
114
+ const tools = options?.tools;
115
+ const toolChoice = options?.toolChoice;
116
+ const endpoint = modelConfig.endpoint;
117
+ const requestData = this.buildOpenAIRequest(
118
+ modelConfig.name,
119
+ messages,
120
+ temperature,
121
+ maxTokens,
122
+ stream,
123
+ topP,
124
+ frequencyPenalty,
125
+ presencePenalty,
126
+ stop,
127
+ tools,
128
+ toolChoice
129
+ );
130
+ try {
131
+ const response = await this.makeRequest(endpoint, requestData, stream);
132
+ return response;
133
+ } catch (error: any) {
134
+ throw new Error(`DeepSeek AI request failed: ${error.message}`);
135
+ }
136
+ }
137
+
138
+ /**
139
+ * Streaming conversation
140
+ * @param messages - Message history
141
+ * @param callback - Streaming callback function
142
+ * @param options - Chat options
143
+ */
144
+ async chatStream(
145
+ messages: DeepSeekChatMessage[],
146
+ callback: DeepSeekStreamCallback,
147
+ options?: DeepSeekChatOptions
148
+ ): Promise<void> {
149
+ const modelType = options?.modelType || this.modelType;
150
+ const modelConfig = DEEPSEEK_MODELS.get(modelType);
151
+ if (!modelConfig) {
152
+ throw new Error(`Unsupported model type: ${modelType}`);
153
+ }
154
+ const temperature = options?.temperature ?? 0.7;
155
+ const maxTokens = options?.maxTokens ?? 2000;
156
+ const topP = options?.topP ?? 1.0;
157
+ const frequencyPenalty = options?.frequencyPenalty ?? 0.0;
158
+ const presencePenalty = options?.presencePenalty ?? 0.0;
159
+ const requestData = this.buildOpenAIRequest(
160
+ modelConfig.name,
161
+ messages,
162
+ temperature,
163
+ maxTokens,
164
+ true,
165
+ topP,
166
+ frequencyPenalty,
167
+ presencePenalty,
168
+ options?.stop,
169
+ options?.tools,
170
+ options?.toolChoice
171
+ );
172
+ try {
173
+ await this.makeStreamRequest(modelConfig.endpoint, requestData, callback);
174
+ } catch (error: any) {
175
+ throw new Error(`Streaming request failed: ${error.message}`);
176
+ }
177
+ }
178
+
179
+ /**
180
+ * Specialized method for processing OHLCV arrays
181
+ * @param ohlcvArray - OHLCV data array
182
+ * @param instructions - Processing instructions (optional)
183
+ * @param count - Number of OHLCV data items to return (optional, default: 1)
184
+ * @param options - Chat options
185
+ * @returns Predicted OHLCV array
186
+ */
187
+ async predictingOHLCV(
188
+ ohlcvArray: OHLCV[],
189
+ instructions?: string,
190
+ count?: number,
191
+ options?: DeepSeekChatOptions
192
+ ): Promise<OHLCV[]> {
193
+ const processedInstructions = instructions ||
194
+ "Based on these OHLCV data, predict the next period";
195
+ const processedCount = count || 1;
196
+ if (!Number.isInteger(processedCount) || processedCount <= 0) {
197
+ throw new Error(`Invalid count parameter: ${processedCount}. Must be a positive integer.`);
198
+ }
199
+ const MAX_COUNT = 50;
200
+ if (processedCount > MAX_COUNT) {
201
+ throw new Error(`Count parameter too large: ${processedCount}. Maximum allowed is ${MAX_COUNT}.`);
202
+ }
203
+ const countMessage = processedCount === 1
204
+ ? "Return EXACTLY 1 OHLCV object for the next period."
205
+ : `Return EXACTLY ${processedCount} consecutive OHLCV objects for the next ${processedCount} periods.`;
206
+ const systemPrompt = `You are a professional financial data analysis AI. The user will give you an array of OHLCV (Open, High, Low, Close, Volume) data.
207
+ Your task: ${processedInstructions}
208
+ CRITICAL RULES:
209
+ 1. ${countMessage}
210
+ 2. Return ONLY a JSON array of OHLCV objects, NO explanations, comments, or other text
211
+ 3. The OHLCV array format must match: [{open, high, low, close, volume}, ...]
212
+ 4. All numbers must be valid numbers
213
+ 5. Ensure technical rationality (high >= low, high >= close >= low, volume >= 0)
214
+ 6. Maintain consistency with historical trends and patterns
215
+ 7. For technical analysis, provide reasonable values based on typical patterns
216
+ 8. Do not include markdown formatting, only pure JSON
217
+
218
+ ${processedCount === 1 ?
219
+ `Example of valid response for 1 period:
220
+ [{"open": 115.5, "high": 118.0, "low": 114.0, "close": 117.0, "volume": 1350000}]` :
221
+ `Example of valid response for ${processedCount} periods:
222
+ [
223
+ {"open": 115.5, "high": 118.0, "low": 114.0, "close": 117.0, "volume": 1350000},
224
+ {"open": 117.5, "high": 120.0, "low": 116.0, "close": 119.0, "volume": 1400000}
225
+ ${processedCount > 2 ? `,
226
+ ... ${processedCount - 2} more OHLCV objects following the same pattern` : ''}
227
+ ]`}`;
228
+ const dataString = JSON.stringify(ohlcvArray, null, 2);
229
+ const userMessage = `Here is the historical OHLCV data (${ohlcvArray.length} periods):
230
+ ${dataString}
231
+ Please process this data according to the system instructions. Remember to return EXACTLY ${processedCount} OHLCV object(s) in a JSON array with no additional text.`;
232
+ const messages: DeepSeekChatMessage[] = [
233
+ { role: 'system', content: systemPrompt },
234
+ { role: 'user', content: userMessage }
235
+ ];
236
+ try {
237
+ const estimatedTokens = processedCount * 50 + 100;
238
+ const maxTokens = Math.max(options?.maxTokens || 2000, estimatedTokens);
239
+ const response = await this.chatCompletion(messages, {
240
+ temperature: options?.temperature || 0.3,
241
+ maxTokens,
242
+ stream: false,
243
+ modelType: options?.modelType || DeepSeekModelType.DEEPSEEK_FINANCE,
244
+ topP: options?.topP,
245
+ frequencyPenalty: options?.frequencyPenalty,
246
+ presencePenalty: options?.presencePenalty
247
+ });
248
+ const content = this.extractContent(response);
249
+ const result = this.parseOHLCVResponse(content);
250
+ if (result.length !== processedCount) {
251
+ throw new Error(`AI returned ${result.length} OHLCV objects, but expected ${processedCount}.`);
252
+ }
253
+ return result;
254
+ } catch (error: any) {
255
+ throw new Error(`OHLCV analysis failed: ${error.message}`);
256
+ }
257
+ }
258
+
259
+ /**
260
+ * Switch model
261
+ * @param modelType - New model type
262
+ */
263
+ setModel(modelType: DeepSeekModelType): void {
264
+ const modelConfig = DEEPSEEK_MODELS.get(modelType);
265
+ if (!modelConfig) {
266
+ throw new Error(`Unsupported model type: ${modelType}`);
267
+ }
268
+ this.modelType = modelType;
269
+ }
270
+
271
+ /**
272
+ * Get current model configuration
273
+ */
274
+ getCurrentModel(): { name: string; displayName: string; description?: string } {
275
+ const modelConfig = DEEPSEEK_MODELS.get(this.modelType);
276
+ if (!modelConfig) {
277
+ throw new Error(`Model configuration does not exist: ${this.modelType}`);
278
+ }
279
+ return {
280
+ name: modelConfig.name,
281
+ displayName: modelConfig.displayName,
282
+ description: modelConfig.description
283
+ };
284
+ }
285
+
286
+ /**
287
+ * Test connection
288
+ * @returns Connection test result
289
+ */
290
+ async testConnection(): Promise<{
291
+ success: boolean;
292
+ model: string;
293
+ response?: string;
294
+ error?: string;
295
+ }> {
296
+ try {
297
+ const response = await this.chat('Hello, respond with "OK" if you can hear me.');
298
+ return {
299
+ success: true,
300
+ model: this.modelType,
301
+ response: response
302
+ };
303
+ } catch (error: any) {
304
+ return {
305
+ success: false,
306
+ model: this.modelType,
307
+ error: error.message
308
+ };
309
+ }
310
+ }
311
+
312
+ private buildOpenAIRequest(
313
+ model: string,
314
+ messages: DeepSeekChatMessage[],
315
+ temperature: number,
316
+ maxTokens: number,
317
+ stream: boolean,
318
+ topP?: number,
319
+ frequencyPenalty?: number,
320
+ presencePenalty?: number,
321
+ stop?: string[],
322
+ tools?: any[],
323
+ toolChoice?: string | object
324
+ ): any {
325
+ const request: any = {
326
+ model,
327
+ messages,
328
+ temperature,
329
+ max_tokens: maxTokens,
330
+ stream
331
+ };
332
+ if (topP !== undefined) request.top_p = topP;
333
+ if (frequencyPenalty !== undefined) request.frequency_penalty = frequencyPenalty;
334
+ if (presencePenalty !== undefined) request.presence_penalty = presencePenalty;
335
+ if (stop) request.stop = stop;
336
+ if (tools) request.tools = tools;
337
+ if (toolChoice) request.tool_choice = toolChoice;
338
+ return request;
339
+ }
340
+
341
+ private async makeRequest(endpoint: string, data: any, stream: boolean): Promise<any> {
342
+ const controller = new AbortController();
343
+ const timeoutId = setTimeout(() => controller.abort(), this.timeout);
344
+ try {
345
+ const response = await fetch(endpoint, {
346
+ method: 'POST',
347
+ headers: {
348
+ 'Authorization': `Bearer ${this.apiKey}`,
349
+ 'Content-Type': 'application/json; charset=utf-8',
350
+ 'Accept': 'application/json'
351
+ },
352
+ body: JSON.stringify(data),
353
+ signal: controller.signal
354
+ });
355
+ clearTimeout(timeoutId);
356
+ if (!response.ok) {
357
+ const errorText = await response.text();
358
+ throw new Error(`HTTP ${response.status}: ${errorText}`);
359
+ }
360
+ if (stream) {
361
+ return response.body;
362
+ }
363
+ return await response.json();
364
+ } catch (error: any) {
365
+ clearTimeout(timeoutId);
366
+ if (error.name === 'AbortError') {
367
+ throw new Error(`Request timeout (${this.timeout}ms)`);
368
+ }
369
+ throw error;
370
+ }
371
+ }
372
+
373
+ private async makeStreamRequest(
374
+ endpoint: string,
375
+ data: any,
376
+ callback: DeepSeekStreamCallback
377
+ ): Promise<void> {
378
+ const response = await this.makeRequest(endpoint, data, true);
379
+ if (!response) {
380
+ throw new Error('Failed to get streaming response');
381
+ }
382
+ const reader = response.getReader();
383
+ const decoder = new TextDecoder('utf-8');
384
+ let buffer = '';
385
+ try {
386
+ while (true) {
387
+ const { done, value } = await reader.read();
388
+ if (done) {
389
+ callback('', true);
390
+ break;
391
+ }
392
+ buffer += decoder.decode(value, { stream: true });
393
+ const lines = buffer.split('\n');
394
+ buffer = lines.pop() || '';
395
+ for (const line of lines) {
396
+ if (line.startsWith('data: ')) {
397
+ const data = line.slice(6);
398
+ if (data === '[DONE]') {
399
+ callback('', true);
400
+ return;
401
+ }
402
+ try {
403
+ const parsed = JSON.parse(data);
404
+ if (parsed.choices?.[0]?.delta?.content) {
405
+ callback(parsed.choices[0].delta.content, false);
406
+ }
407
+ } catch (e) {
408
+ // Ignore parsing errors
409
+ }
410
+ }
411
+ }
412
+ }
413
+ } finally {
414
+ reader.releaseLock();
415
+ }
416
+ }
417
+
418
+ private extractContent(response: any): string {
419
+ if (response.choices?.[0]?.message?.content) {
420
+ return response.choices[0].message.content;
421
+ } else if (response.output?.choices?.[0]?.message?.content) {
422
+ return response.output.choices[0].message.content;
423
+ } else if (response.output?.text) {
424
+ return response.output.text;
425
+ } else if (response.choices?.[0]?.delta?.content) {
426
+ return response.choices[0].delta.content;
427
+ } else {
428
+ throw new Error('Unable to parse response content');
429
+ }
430
+ }
431
+
432
+ private parseOHLCVResponse(content: string): OHLCV[] {
433
+ try {
434
+ const parsed = JSON.parse(content);
435
+ if (!Array.isArray(parsed)) {
436
+ throw new Error('Response is not in array format');
437
+ }
438
+ const result = parsed.map((item, index) => {
439
+ if (typeof item !== 'object' || item === null) {
440
+ throw new Error(`Element ${index} is not a valid object`);
441
+ }
442
+ const { open, high, low, close, volume } = item;
443
+ const requiredFields = ['open', 'high', 'low', 'close', 'volume'];
444
+ for (const field of requiredFields) {
445
+ if (typeof item[field] !== 'number' || isNaN(item[field])) {
446
+ throw new Error(`Element ${index} field ${field} is not a valid number`);
447
+ }
448
+ }
449
+ if (high < low) {
450
+ throw new Error(`Element ${index}: high cannot be lower than low`);
451
+ }
452
+ if (close < low || close > high) {
453
+ throw new Error(`Element ${index}: close must be between low and high`);
454
+ }
455
+ return {
456
+ open: Number(open),
457
+ high: Number(high),
458
+ low: Number(low),
459
+ close: Number(close),
460
+ volume: Number(volume)
461
+ };
462
+ });
463
+ return result;
464
+ } catch (error) {
465
+ const jsonMatch = content.match(/\[[\s\S]*\]/);
466
+ if (jsonMatch) {
467
+ return this.parseOHLCVResponse(jsonMatch[0]);
468
+ }
469
+ throw new Error(`Unable to parse AI returned OHLCV data: ${error}\nOriginal content: ${content.substring(0, 200)}...`);
470
+ }
471
+ }
472
+ }
473
+
474
+ /**
475
+ * Factory function for quick instance creation
476
+ */
477
+ export function createDeepSeekAI(apiKey: string, modelType?: DeepSeekModelType): DeepSeekAI {
478
+ return new DeepSeekAI({ apiKey, modelType });
479
+ }