ohlcv-ai 1.0.2 → 1.0.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs ADDED
@@ -0,0 +1,2051 @@
1
+ var v = /* @__PURE__ */ ((o) => (o.QWEN_TURBO = "qwen-turbo", o.QWEN_PLUS = "qwen-plus", o.QWEN_MAX = "qwen-max", o.QWEN_MAX_LONGCONTEXT = "qwen-max-longcontext", o.QWEN_2_5B = "qwen2.5-0.5b", o.QWEN_2_5B_INSTRUCT = "qwen2.5-0.5b-instruct", o.QWEN_2_5B_7B = "qwen2.5-7b", o.QWEN_2_5B_7B_INSTRUCT = "qwen2.5-7b-instruct", o.QWEN_2_5B_14B = "qwen2.5-14b", o.QWEN_2_5B_14B_INSTRUCT = "qwen2.5-14b-instruct", o.QWEN_2_5B_32B = "qwen2.5-32b", o.QWEN_2_5B_32B_INSTRUCT = "qwen2.5-32b-instruct", o.QWEN_2_5B_72B = "qwen2.5-72b", o.QWEN_2_5B_72B_INSTRUCT = "qwen2.5-72b-instruct", o.QWEN_2_5B_CODER = "qwen2.5-coder", o.QWEN_2_5B_CODER_7B = "qwen2.5-coder-7b", o.QWEN_2_5B_CODER_14B = "qwen2.5-coder-14b", o.QWEN_2_5B_CODER_32B = "qwen2.5-coder-32b", o.QWEN_VL_LITE = "qwen-vl-lite", o.QWEN_VL_PLUS = "qwen-vl-plus", o.QWEN_VL_MAX = "qwen-vl-max", o.QWEN_AUDIO_TURBO = "qwen-audio-turbo", o.QWEN_AUDIO_CHAT = "qwen-audio-chat", o.QWEN_MATH_7B = "qwen-math-7b", o.LLAMA2_7B_CHAT_V2 = "llama2-7b-chat-v2", o.BAICHUAN2_7B_CHAT_V1 = "baichuan2-7b-chat-v1", o.QWEN_FINANCIAL = "qwen-financial", o.QWEN_FINANCIAL_14B = "qwen-financial-14b", o.QWEN_FINANCIAL_32B = "qwen-financial-32b", o.QWEN_MEDICAL = "qwen-medical", o.QWEN_MEDICAL_14B = "qwen-medical-14b", o.QWEN_MEDICAL_32B = "qwen-medical-32b", o.QWEN_OMNI = "qwen-omni", o.QWEN_OMNI_PRO = "qwen-omni-pro", o))(v || {});
2
+ const x = /* @__PURE__ */ new Map([
3
+ [
4
+ "qwen-turbo",
5
+ {
6
+ name: "qwen-turbo",
7
+ displayName: "Qwen-Turbo",
8
+ endpoint: "https://dashscope.aliyuncs.com/compatible-mode/v1/chat/completions",
9
+ endpoints: [
10
+ "https://dashscope.aliyuncs.com/compatible-mode/v1/chat/completions",
11
+ "https://dashscope.aliyuncs.com/api/v1/services/aigc/text-generation/generation"
12
+ ],
13
+ format: "openai",
14
+ description: "Lightweight version, fast response speed, suitable for general conversation scenarios",
15
+ maxTokens: 2e3,
16
+ contextLength: 8e3,
17
+ capabilities: ["text-generation", "chat"]
18
+ }
19
+ ],
20
+ [
21
+ "qwen-plus",
22
+ {
23
+ name: "qwen-plus",
24
+ displayName: "Qwen-Plus",
25
+ endpoint: "https://dashscope.aliyuncs.com/compatible-mode/v1/chat/completions",
26
+ endpoints: [
27
+ "https://dashscope.aliyuncs.com/compatible-mode/v1/chat/completions"
28
+ ],
29
+ format: "openai",
30
+ description: "Enhanced version, suitable for complex tasks and long text processing",
31
+ maxTokens: 6e3,
32
+ contextLength: 32e3,
33
+ capabilities: ["text-generation", "chat", "reasoning"]
34
+ }
35
+ ],
36
+ [
37
+ "qwen-max",
38
+ {
39
+ name: "qwen-max",
40
+ displayName: "Qwen-Max",
41
+ endpoint: "https://dashscope.aliyuncs.com/compatible-mode/v1/chat/completions",
42
+ endpoints: [
43
+ "https://dashscope.aliyuncs.com/compatible-mode/v1/chat/completions"
44
+ ],
45
+ format: "openai",
46
+ description: "Maximum version, strongest capabilities, suitable for high-demand professional tasks",
47
+ maxTokens: 8e3,
48
+ contextLength: 32e3,
49
+ capabilities: ["text-generation", "chat", "reasoning", "coding", "analysis"]
50
+ }
51
+ ],
52
+ [
53
+ "qwen-max-longcontext",
54
+ {
55
+ name: "qwen-max-longcontext",
56
+ displayName: "Qwen-Max-LongContext",
57
+ endpoint: "https://dashscope.aliyuncs.com/compatible-mode/v1/chat/completions",
58
+ endpoints: [
59
+ "https://dashscope.aliyuncs.com/compatible-mode/v1/chat/completions"
60
+ ],
61
+ format: "openai",
62
+ description: "Supports 128K long context, suitable for long document processing",
63
+ maxTokens: 8e3,
64
+ contextLength: 128e3,
65
+ capabilities: ["text-generation", "chat", "document-analysis"]
66
+ }
67
+ ],
68
+ // Qwen2.5 series models
69
+ [
70
+ "qwen2.5-0.5b",
71
+ {
72
+ name: "qwen2.5-0.5b",
73
+ displayName: "Qwen2.5-0.5B",
74
+ endpoint: "https://dashscope.aliyuncs.com/compatible-mode/v1/chat/completions",
75
+ format: "openai",
76
+ description: "Ultra-lightweight 0.5B parameter model for edge devices",
77
+ maxTokens: 4e3,
78
+ contextLength: 32e3,
79
+ capabilities: ["text-generation", "chat"]
80
+ }
81
+ ],
82
+ [
83
+ "qwen2.5-0.5b-instruct",
84
+ {
85
+ name: "qwen2.5-0.5b-instruct",
86
+ displayName: "Qwen2.5-0.5B-Instruct",
87
+ endpoint: "https://dashscope.aliyuncs.com/compatible-mode/v1/chat/completions",
88
+ format: "openai",
89
+ description: "Instruction-tuned 0.5B model for specific tasks",
90
+ maxTokens: 4e3,
91
+ contextLength: 32e3,
92
+ capabilities: ["instruction-following", "chat"]
93
+ }
94
+ ],
95
+ [
96
+ "qwen2.5-7b",
97
+ {
98
+ name: "qwen2.5-7b",
99
+ displayName: "Qwen2.5-7B",
100
+ endpoint: "https://dashscope.aliyuncs.com/compatible-mode/v1/chat/completions",
101
+ format: "openai",
102
+ description: "7B parameter base model, balanced performance and efficiency",
103
+ maxTokens: 6e3,
104
+ contextLength: 32e3,
105
+ capabilities: ["text-generation", "reasoning"]
106
+ }
107
+ ],
108
+ [
109
+ "qwen2.5-7b-instruct",
110
+ {
111
+ name: "qwen2.5-7b-instruct",
112
+ displayName: "Qwen2.5-7B-Instruct",
113
+ endpoint: "https://dashscope.aliyuncs.com/compatible-mode/v1/chat/completions",
114
+ format: "openai",
115
+ description: "Instruction-tuned 7B model for chat and tasks",
116
+ maxTokens: 6e3,
117
+ contextLength: 32e3,
118
+ capabilities: ["chat", "instruction-following", "coding"]
119
+ }
120
+ ],
121
+ [
122
+ "qwen2.5-14b",
123
+ {
124
+ name: "qwen2.5-14b",
125
+ displayName: "Qwen2.5-14B",
126
+ endpoint: "https://dashscope.aliyuncs.com/compatible-mode/v1/chat/completions",
127
+ format: "openai",
128
+ description: "14B parameter model with enhanced capabilities",
129
+ maxTokens: 8e3,
130
+ contextLength: 32e3,
131
+ capabilities: ["text-generation", "analysis", "reasoning"]
132
+ }
133
+ ],
134
+ [
135
+ "qwen2.5-32b",
136
+ {
137
+ name: "qwen2.5-32b",
138
+ displayName: "Qwen2.5-32B",
139
+ endpoint: "https://dashscope.aliyuncs.com/compatible-mode/v1/chat/completions",
140
+ format: "openai",
141
+ description: "32B parameter high-performance model",
142
+ maxTokens: 8e3,
143
+ contextLength: 32e3,
144
+ capabilities: ["text-generation", "complex-reasoning", "analysis"]
145
+ }
146
+ ],
147
+ [
148
+ "qwen2.5-72b",
149
+ {
150
+ name: "qwen2.5-72b",
151
+ displayName: "Qwen2.5-72B",
152
+ endpoint: "https://dashscope.aliyuncs.com/compatible-mode/v1/chat/completions",
153
+ format: "openai",
154
+ description: "72B parameter state-of-the-art model",
155
+ maxTokens: 8e3,
156
+ contextLength: 32e3,
157
+ capabilities: ["text-generation", "expert-analysis", "research"]
158
+ }
159
+ ],
160
+ // Qwen2.5 Coder series
161
+ [
162
+ "qwen2.5-coder",
163
+ {
164
+ name: "qwen2.5-coder",
165
+ displayName: "Qwen2.5-Coder",
166
+ endpoint: "https://dashscope.aliyuncs.com/compatible-mode/v1/chat/completions",
167
+ format: "openai",
168
+ description: "Specialized code generation model",
169
+ maxTokens: 8e3,
170
+ contextLength: 32e3,
171
+ capabilities: ["code-generation", "code-explanation", "debugging"]
172
+ }
173
+ ],
174
+ [
175
+ "qwen2.5-coder-7b",
176
+ {
177
+ name: "qwen2.5-coder-7b",
178
+ displayName: "Qwen2.5-Coder-7B",
179
+ endpoint: "https://dashscope.aliyuncs.com/compatible-mode/v1/chat/completions",
180
+ format: "openai",
181
+ description: "7B parameter code generation model",
182
+ maxTokens: 8e3,
183
+ contextLength: 32e3,
184
+ capabilities: ["code-generation", "programming"]
185
+ }
186
+ ],
187
+ [
188
+ "qwen2.5-coder-14b",
189
+ {
190
+ name: "qwen2.5-coder-14b",
191
+ displayName: "Qwen2.5-Coder-14B",
192
+ endpoint: "https://dashscope.aliyuncs.com/compatible-mode/v1/chat/completions",
193
+ format: "openai",
194
+ description: "14B parameter advanced code generation model",
195
+ maxTokens: 8e3,
196
+ contextLength: 32e3,
197
+ capabilities: ["code-generation", "code-review", "optimization"]
198
+ }
199
+ ],
200
+ // Vision-Language models
201
+ [
202
+ "qwen-vl-lite",
203
+ {
204
+ name: "qwen-vl-lite",
205
+ displayName: "Qwen-VL-Lite",
206
+ endpoint: "https://dashscope.aliyuncs.com/compatible-mode/v1/chat/completions",
207
+ format: "openai",
208
+ description: "Lightweight vision-language model for basic image understanding",
209
+ maxTokens: 2e3,
210
+ contextLength: 8e3,
211
+ capabilities: ["image-understanding", "visual-qa"]
212
+ }
213
+ ],
214
+ [
215
+ "qwen-vl-plus",
216
+ {
217
+ name: "qwen-vl-plus",
218
+ displayName: "Qwen-VL-Plus",
219
+ endpoint: "https://dashscope.aliyuncs.com/compatible-mode/v1/chat/completions",
220
+ format: "openai",
221
+ description: "Vision-language model supporting image understanding",
222
+ maxTokens: 4e3,
223
+ contextLength: 32e3,
224
+ capabilities: ["image-understanding", "document-analysis", "visual-reasoning"]
225
+ }
226
+ ],
227
+ [
228
+ "qwen-vl-max",
229
+ {
230
+ name: "qwen-vl-max",
231
+ displayName: "Qwen-VL-Max",
232
+ endpoint: "https://dashscope.aliyuncs.com/compatible-mode/v1/chat/completions",
233
+ format: "openai",
234
+ description: "Most powerful vision-language model",
235
+ maxTokens: 8e3,
236
+ contextLength: 32e3,
237
+ capabilities: ["image-understanding", "video-analysis", "multimodal-reasoning"]
238
+ }
239
+ ],
240
+ // Audio models
241
+ [
242
+ "qwen-audio-turbo",
243
+ {
244
+ name: "qwen-audio-turbo",
245
+ displayName: "Qwen-Audio-Turbo",
246
+ endpoint: "https://dashscope.aliyuncs.com/compatible-mode/v1/chat/completions",
247
+ format: "openai",
248
+ description: "Fast audio processing and speech-to-text model",
249
+ maxTokens: 2e3,
250
+ contextLength: 8e3,
251
+ capabilities: ["speech-recognition", "audio-analysis"]
252
+ }
253
+ ],
254
+ [
255
+ "qwen-audio-chat",
256
+ {
257
+ name: "qwen-audio-chat",
258
+ displayName: "Qwen-Audio-Chat",
259
+ endpoint: "https://dashscope.aliyuncs.com/compatible-mode/v1/chat/completions",
260
+ format: "openai",
261
+ description: "Audio conversation and processing model",
262
+ maxTokens: 4e3,
263
+ contextLength: 32e3,
264
+ capabilities: ["audio-chat", "voice-assistant", "speech-synthesis"]
265
+ }
266
+ ],
267
+ // Specialized models
268
+ [
269
+ "qwen-math-7b",
270
+ {
271
+ name: "qwen-math-7b",
272
+ displayName: "Qwen-Math-7B",
273
+ endpoint: "https://dashscope.aliyuncs.com/compatible-mode/v1/chat/completions",
274
+ format: "openai",
275
+ description: "Specialized for mathematical reasoning and problem solving",
276
+ maxTokens: 4e3,
277
+ contextLength: 32e3,
278
+ capabilities: ["mathematical-reasoning", "problem-solving"]
279
+ }
280
+ ],
281
+ [
282
+ "llama2-7b-chat-v2",
283
+ {
284
+ name: "llama2-7b-chat-v2",
285
+ displayName: "LLaMA2-7B-Chat",
286
+ endpoint: "https://dashscope.aliyuncs.com/compatible-mode/v1/chat/completions",
287
+ format: "openai",
288
+ description: "Meta's LLaMA2-7B model",
289
+ maxTokens: 2e3,
290
+ contextLength: 8e3,
291
+ capabilities: ["chat", "text-generation"]
292
+ }
293
+ ],
294
+ [
295
+ "baichuan2-7b-chat-v1",
296
+ {
297
+ name: "baichuan2-7b-chat-v1",
298
+ displayName: "Baichuan2-7B-Chat",
299
+ endpoint: "https://dashscope.aliyuncs.com/compatible-mode/v1/chat/completions",
300
+ format: "openai",
301
+ description: "Baichuan AI's Baichuan2-7B model",
302
+ maxTokens: 2e3,
303
+ contextLength: 8e3,
304
+ capabilities: ["chat", "chinese-nlp"]
305
+ }
306
+ ],
307
+ [
308
+ "qwen-financial",
309
+ {
310
+ name: "qwen-financial",
311
+ displayName: "Qwen-Financial",
312
+ endpoint: "https://dashscope.aliyuncs.com/compatible-mode/v1/chat/completions",
313
+ format: "openai",
314
+ description: "Specialized for financial analysis and market insights",
315
+ maxTokens: 6e3,
316
+ contextLength: 32e3,
317
+ capabilities: ["financial-analysis", "market-prediction", "risk-assessment"]
318
+ }
319
+ ],
320
+ [
321
+ "qwen-medical",
322
+ {
323
+ name: "qwen-medical",
324
+ displayName: "Qwen-Medical",
325
+ endpoint: "https://dashscope.aliyuncs.com/compatible-mode/v1/chat/completions",
326
+ format: "openai",
327
+ description: "Specialized for medical consultation and health analysis",
328
+ maxTokens: 6e3,
329
+ contextLength: 32e3,
330
+ capabilities: ["medical-consultation", "health-analysis", "diagnostic-support"]
331
+ }
332
+ ],
333
+ // Omni models (multimodal)
334
+ [
335
+ "qwen-omni",
336
+ {
337
+ name: "qwen-omni",
338
+ displayName: "Qwen-Omni",
339
+ endpoint: "https://dashscope.aliyuncs.com/compatible-mode/v1/chat/completions",
340
+ format: "openai",
341
+ description: "Omnidirectional multimodal model supporting text, image, audio",
342
+ maxTokens: 8e3,
343
+ contextLength: 64e3,
344
+ capabilities: ["text-generation", "image-understanding", "audio-processing", "multimodal"]
345
+ }
346
+ ],
347
+ [
348
+ "qwen-omni-pro",
349
+ {
350
+ name: "qwen-omni-pro",
351
+ displayName: "Qwen-Omni-Pro",
352
+ endpoint: "https://dashscope.aliyuncs.com/compatible-mode/v1/chat/completions",
353
+ format: "openai",
354
+ description: "Professional omnidirectional multimodal model with advanced capabilities",
355
+ maxTokens: 16e3,
356
+ contextLength: 128e3,
357
+ capabilities: ["text-generation", "multimodal", "complex-reasoning", "expert-analysis"]
358
+ }
359
+ ]
360
+ ]);
361
+ class _ {
362
+ /**
363
+ * Constructor - Minimal configuration
364
+ * @param config.apiKey - API key (required)
365
+ * @param config.modelType - Model type, default qwen-turbo
366
+ * @param config.timeout - Timeout, default 30 seconds
367
+ */
368
+ constructor(e) {
369
+ if (this.apiKey = e.apiKey, this.modelType = e.modelType || v.QWEN_TURBO, this.timeout = e.timeout || 3e4, !this.apiKey)
370
+ throw new Error("API Key cannot be empty");
371
+ if (!x.get(this.modelType))
372
+ throw new Error(`Unsupported model type: ${this.modelType}`);
373
+ }
374
+ /**
375
+ * Simplest method: single conversation
376
+ * @param message - User message
377
+ * @param options - Chat options
378
+ * @returns AI response
379
+ */
380
+ async chat(e, t) {
381
+ const s = [];
382
+ t?.systemPrompt && s.push({ role: "system", content: t.systemPrompt }), s.push({ role: "user", content: e });
383
+ const a = await this.chatCompletion(s, {
384
+ temperature: t?.temperature,
385
+ maxTokens: t?.maxTokens,
386
+ stream: !1
387
+ });
388
+ return this.extractContent(a);
389
+ }
390
+ /**
391
+ * Multi-turn conversation
392
+ * @param messages - Message history
393
+ * @param options - Chat options
394
+ * @returns Complete API response
395
+ */
396
+ async chatCompletion(e, t) {
397
+ const s = t?.modelType || this.modelType, a = x.get(s);
398
+ if (!a)
399
+ throw new Error(`Unsupported model type: ${s}`);
400
+ const i = t?.temperature ?? 0.7, n = t?.maxTokens ?? 1e3, r = t?.stream ?? !1, m = a.endpoint, p = a.format === "openai" ? this.buildOpenAIRequest(a.name, e, i, n, r) : this.buildDashScopeRequest(a.name, e, i, n);
401
+ try {
402
+ return await this.makeRequest(m, p, r);
403
+ } catch (d) {
404
+ throw new Error(`Aliyun AI request failed: ${d.message}`);
405
+ }
406
+ }
407
+ /**
408
+ * Streaming conversation (only supports OpenAI format)
409
+ * @param messages - Message history
410
+ * @param callback - Streaming callback function
411
+ * @param options - Chat options
412
+ */
413
+ async chatStream(e, t, s) {
414
+ const a = s?.modelType || this.modelType, i = x.get(a);
415
+ if (!i)
416
+ throw new Error(`Unsupported model type: ${a}`);
417
+ if (i.format !== "openai")
418
+ throw new Error("Streaming conversation only supports OpenAI format models");
419
+ const n = s?.temperature ?? 0.7, r = s?.maxTokens ?? 1e3, m = this.buildOpenAIRequest(
420
+ i.name,
421
+ e,
422
+ n,
423
+ r,
424
+ !0
425
+ );
426
+ try {
427
+ await this.makeStreamRequest(i.endpoint, m, t);
428
+ } catch (c) {
429
+ throw new Error(`Streaming request failed: ${c.message}`);
430
+ }
431
+ }
432
+ /**
433
+ * Switch model
434
+ * @param modelType - New model type
435
+ */
436
+ setModel(e) {
437
+ if (!x.get(e))
438
+ throw new Error(`Unsupported model type: ${e}`);
439
+ this.modelType = e;
440
+ }
441
+ /**
442
+ * Get current model configuration
443
+ */
444
+ getCurrentModel() {
445
+ const e = x.get(this.modelType);
446
+ if (!e)
447
+ throw new Error(`Model configuration does not exist: ${this.modelType}`);
448
+ return {
449
+ name: e.name,
450
+ displayName: e.displayName,
451
+ description: e.description
452
+ };
453
+ }
454
+ /**
455
+ * Test connection
456
+ * @returns Connection test result
457
+ */
458
+ async testConnection() {
459
+ try {
460
+ const e = await this.chat('Hello, respond with "OK" if you can hear me.');
461
+ return {
462
+ success: !0,
463
+ model: this.modelType,
464
+ response: e
465
+ };
466
+ } catch (e) {
467
+ return {
468
+ success: !1,
469
+ model: this.modelType,
470
+ error: e.message
471
+ };
472
+ }
473
+ }
474
+ buildOpenAIRequest(e, t, s, a, i) {
475
+ return {
476
+ model: e,
477
+ messages: t,
478
+ temperature: s,
479
+ max_tokens: a,
480
+ stream: i
481
+ };
482
+ }
483
+ buildDashScopeRequest(e, t, s, a) {
484
+ return {
485
+ model: e,
486
+ input: { messages: t },
487
+ parameters: {
488
+ temperature: s,
489
+ max_tokens: a,
490
+ result_format: "message"
491
+ }
492
+ };
493
+ }
494
+ async makeRequest(e, t, s) {
495
+ const a = new AbortController(), i = setTimeout(() => a.abort(), this.timeout);
496
+ try {
497
+ const n = await fetch(e, {
498
+ method: "POST",
499
+ headers: {
500
+ Authorization: `Bearer ${this.apiKey}`,
501
+ "Content-Type": "application/json; charset=utf-8",
502
+ Accept: "application/json"
503
+ },
504
+ body: JSON.stringify(t),
505
+ signal: a.signal
506
+ });
507
+ if (clearTimeout(i), !n.ok) {
508
+ const r = await n.text();
509
+ throw new Error(`HTTP ${n.status}: ${r}`);
510
+ }
511
+ return s ? n.body : await n.json();
512
+ } catch (n) {
513
+ throw clearTimeout(i), n.name === "AbortError" ? new Error(`Request timeout (${this.timeout}ms)`) : n;
514
+ }
515
+ }
516
+ async makeStreamRequest(e, t, s) {
517
+ const a = await this.makeRequest(e, t, !0);
518
+ if (!a)
519
+ throw new Error("Failed to get streaming response");
520
+ const i = a.getReader(), n = new TextDecoder("utf-8");
521
+ let r = "";
522
+ try {
523
+ for (; ; ) {
524
+ const { done: m, value: c } = await i.read();
525
+ if (m) {
526
+ s("", !0);
527
+ break;
528
+ }
529
+ r += n.decode(c, { stream: !0 });
530
+ const p = r.split(`
531
+ `);
532
+ r = p.pop() || "";
533
+ for (const d of p)
534
+ if (d.startsWith("data: ")) {
535
+ const l = d.slice(6);
536
+ if (l === "[DONE]") {
537
+ s("", !0);
538
+ return;
539
+ }
540
+ try {
541
+ const h = JSON.parse(l);
542
+ h.choices?.[0]?.delta?.content && s(h.choices[0].delta.content, !1);
543
+ } catch {
544
+ }
545
+ }
546
+ }
547
+ } finally {
548
+ i.releaseLock();
549
+ }
550
+ }
551
+ extractContent(e) {
552
+ if (e.choices?.[0]?.message?.content)
553
+ return e.choices[0].message.content;
554
+ if (e.output?.choices?.[0]?.message?.content)
555
+ return e.output.choices[0].message.content;
556
+ if (e.output?.text)
557
+ return e.output.text;
558
+ throw new Error("Unable to parse response content");
559
+ }
560
+ /**
561
+ * Specialized method for processing OHLCV arrays
562
+ * @param ohlcvArray - OHLCV data array
563
+ * @param instructions - Processing instructions, supports Chinese and English (optional, default: "Based on these OHLCV data, predict the next period")
564
+ * @param count - Number of OHLCV data items to return (optional, default: 1)
565
+ * @param options - Chat options
566
+ * @returns Predicted OHLCV array
567
+ */
568
+ async predictingOHLCV(e, t, s, a) {
569
+ const i = t || "Based on these OHLCV data, predict the next period", n = s || 1;
570
+ if (!Number.isInteger(n) || n <= 0)
571
+ throw new Error(`Invalid count parameter: ${n}. Must be a positive integer.`);
572
+ const r = 50;
573
+ if (n > r)
574
+ throw new Error(`Count parameter too large: ${n}. Maximum allowed is ${r}. Please reduce the count or split your request.`);
575
+ const m = n === 1 ? "Return EXACTLY 1 OHLCV object for the next period." : `Return EXACTLY ${n} consecutive OHLCV objects for the next ${n} periods.`, c = `You are a professional financial data analysis AI. The user will give you an array of OHLCV (Open, High, Low, Close, Volume) data.
576
+ Your task: ${i}
577
+ CRITICAL RULES:
578
+ 1. ${m}
579
+ 2. Return ONLY a JSON array of OHLCV objects, NO explanations, comments, or other text
580
+ 3. The OHLCV array format must match: [{open, high, low, close, volume}, ...]
581
+ 4. All numbers must be valid numbers
582
+ 5. Ensure technical rationality (high >= low, high >= close >= low, volume >= 0)
583
+ 6. Maintain consistency with historical trends and patterns
584
+ 7. For technical analysis, provide reasonable values based on typical patterns
585
+ 8. Do not include markdown formatting, only pure JSON
586
+ ${n === 1 ? `Example of valid response for 1 period:
587
+ [{"open": 115.5, "high": 118.0, "low": 114.0, "close": 117.0, "volume": 1350000}]` : `Example of valid response for ${n} periods:
588
+ [
589
+ {"open": 115.5, "high": 118.0, "low": 114.0, "close": 117.0, "volume": 1350000},
590
+ {"open": 117.5, "high": 120.0, "low": 116.0, "close": 119.0, "volume": 1400000}
591
+ ${n > 2 ? `,
592
+ ... ${n - 2} more OHLCV objects following the same pattern` : ""}
593
+ ]`}`, p = JSON.stringify(e, null, 2), d = `Here is the historical OHLCV data (${e.length} periods):
594
+ ${p}
595
+ Please process this data according to the system instructions. Remember to return EXACTLY ${n} OHLCV object(s) in a JSON array with no additional text.`, l = [
596
+ { role: "system", content: c },
597
+ { role: "user", content: d }
598
+ ];
599
+ try {
600
+ const h = n * 50 + 100, y = Math.max(a?.maxTokens || 1e3, h), E = await this.chatCompletion(l, {
601
+ temperature: a?.temperature || 0.3,
602
+ maxTokens: y,
603
+ stream: !1,
604
+ modelType: a?.modelType
605
+ }), b = this.extractContent(E), g = this.parseOHLCVResponse(b);
606
+ if (g.length !== n)
607
+ throw new Error(`AI returned ${g.length} OHLCV objects, but expected ${n}.`);
608
+ return g;
609
+ } catch (h) {
610
+ throw new Error(`OHLCV analysis failed: ${h.message}`);
611
+ }
612
+ }
613
+ /**
614
+ * Parse AI returned OHLCV response
615
+ * @private
616
+ */
617
+ parseOHLCVResponse(e) {
618
+ try {
619
+ const t = JSON.parse(e);
620
+ if (!Array.isArray(t))
621
+ throw new Error("Response is not in array format");
622
+ return t.map((a, i) => {
623
+ if (typeof a != "object" || a === null)
624
+ throw new Error(`Element ${i} is not a valid object`);
625
+ const { open: n, high: r, low: m, close: c, volume: p } = a, d = ["open", "high", "low", "close", "volume"];
626
+ for (const l of d)
627
+ if (typeof a[l] != "number" || isNaN(a[l]))
628
+ throw new Error(`Element ${i} field ${l} is not a valid number`);
629
+ if (r < m)
630
+ throw new Error(`Element ${i}: high cannot be lower than low`);
631
+ if (c < m || c > r)
632
+ throw new Error(`Element ${i}: close must be between low and high`);
633
+ return {
634
+ open: Number(n),
635
+ high: Number(r),
636
+ low: Number(m),
637
+ close: Number(c),
638
+ volume: Number(p)
639
+ };
640
+ });
641
+ } catch (t) {
642
+ const s = e.match(/\[[\s\S]*\]/);
643
+ if (s)
644
+ return this.parseOHLCVResponse(s[0]);
645
+ throw new Error(`Unable to parse AI returned OHLCV data: ${t}
646
+ Original content: ${e.substring(0, 200)}...`);
647
+ }
648
+ }
649
+ }
650
+ function N(o, e) {
651
+ return new _({ apiKey: o, modelType: e });
652
+ }
653
+ var T = /* @__PURE__ */ ((o) => (o.DEEPSEEK_CHAT = "deepseek-chat", o.DEEPSEEK_CHAT_LITE = "deepseek-chat-lite", o.DEEPSEEK_CHAT_PRO = "deepseek-chat-pro", o.DEEPSEEK_CHAT_MAX = "deepseek-chat-max", o.DEEPSEEK_CODER = "deepseek-coder", o.DEEPSEEK_CODER_LITE = "deepseek-coder-lite", o.DEEPSEEK_CODER_PRO = "deepseek-coder-pro", o.DEEPSEEK_MATH = "deepseek-math", o.DEEPSEEK_MATH_PRO = "deepseek-math-pro", o.DEEPSEEK_REASONER = "deepseek-reasoner", o.DEEPSEEK_REASONER_PRO = "deepseek-reasoner-pro", o.DEEPSEEK_VISION = "deepseek-vision", o.DEEPSEEK_VISION_PRO = "deepseek-vision-pro", o.DEEPSEEK_FINANCE = "deepseek-finance", o.DEEPSEEK_LAW = "deepseek-law", o.DEEPSEEK_MEDICAL = "deepseek-medical", o.DEEPSEEK_RESEARCH = "deepseek-research", o.DEEPSEEK_OMNI = "deepseek-omni", o.DEEPSEEK_OMNI_PRO = "deepseek-omni-pro", o.DEEPSEEK_LLM = "deepseek-llm", o.DEEPSEEK_LLM_67B = "deepseek-llm-67b", o.DEEPSEEK_LLM_131B = "deepseek-llm-131b", o))(T || {});
654
+ const k = /* @__PURE__ */ new Map([
655
+ // Chat models
656
+ [
657
+ "deepseek-chat",
658
+ {
659
+ name: "deepseek-chat",
660
+ displayName: "DeepSeek Chat",
661
+ endpoint: "https://api.deepseek.com/v1/chat/completions",
662
+ endpoints: [
663
+ "https://api.deepseek.com/v1/chat/completions"
664
+ ],
665
+ format: "openai",
666
+ description: "General purpose chat model for everyday conversations and tasks",
667
+ maxTokens: 4096,
668
+ contextLength: 16e3,
669
+ capabilities: ["chat", "text-generation", "reasoning"],
670
+ version: "2025-01"
671
+ }
672
+ ],
673
+ [
674
+ "deepseek-chat-lite",
675
+ {
676
+ name: "deepseek-chat-lite",
677
+ displayName: "DeepSeek Chat Lite",
678
+ endpoint: "https://api.deepseek.com/v1/chat/completions",
679
+ format: "openai",
680
+ description: "Lightweight chat model optimized for speed and efficiency",
681
+ maxTokens: 2048,
682
+ contextLength: 8e3,
683
+ capabilities: ["chat", "text-generation"],
684
+ version: "2025-01"
685
+ }
686
+ ],
687
+ [
688
+ "deepseek-chat-pro",
689
+ {
690
+ name: "deepseek-chat-pro",
691
+ displayName: "DeepSeek Chat Pro",
692
+ endpoint: "https://api.deepseek.com/v1/chat/completions",
693
+ format: "openai",
694
+ description: "Professional chat model with enhanced reasoning capabilities",
695
+ maxTokens: 8192,
696
+ contextLength: 32e3,
697
+ capabilities: ["chat", "text-generation", "complex-reasoning", "analysis"],
698
+ version: "2025-01"
699
+ }
700
+ ],
701
+ [
702
+ "deepseek-chat-max",
703
+ {
704
+ name: "deepseek-chat-max",
705
+ displayName: "DeepSeek Chat Max",
706
+ endpoint: "https://api.deepseek.com/v1/chat/completions",
707
+ format: "openai",
708
+ description: "Maximum capability chat model for most demanding tasks",
709
+ maxTokens: 16384,
710
+ contextLength: 64e3,
711
+ capabilities: ["chat", "text-generation", "expert-analysis", "research"],
712
+ version: "2025-01"
713
+ }
714
+ ],
715
+ // Coder models
716
+ [
717
+ "deepseek-coder",
718
+ {
719
+ name: "deepseek-coder",
720
+ displayName: "DeepSeek Coder",
721
+ endpoint: "https://api.deepseek.com/v1/chat/completions",
722
+ format: "openai",
723
+ description: "Specialized model for code generation and programming tasks",
724
+ maxTokens: 16384,
725
+ contextLength: 64e3,
726
+ capabilities: ["code-generation", "programming", "debugging", "code-review"],
727
+ version: "2025-01"
728
+ }
729
+ ],
730
+ [
731
+ "deepseek-coder-lite",
732
+ {
733
+ name: "deepseek-coder-lite",
734
+ displayName: "DeepSeek Coder Lite",
735
+ endpoint: "https://api.deepseek.com/v1/chat/completions",
736
+ format: "openai",
737
+ description: "Lightweight code generation model",
738
+ maxTokens: 4096,
739
+ contextLength: 16e3,
740
+ capabilities: ["code-generation", "programming"],
741
+ version: "2025-01"
742
+ }
743
+ ],
744
+ [
745
+ "deepseek-coder-pro",
746
+ {
747
+ name: "deepseek-coder-pro",
748
+ displayName: "DeepSeek Coder Pro",
749
+ endpoint: "https://api.deepseek.com/v1/chat/completions",
750
+ format: "openai",
751
+ description: "Professional code generation model with advanced features",
752
+ maxTokens: 32768,
753
+ contextLength: 128e3,
754
+ capabilities: ["code-generation", "programming", "system-design", "architecture"],
755
+ version: "2025-01"
756
+ }
757
+ ],
758
+ // Math models
759
+ [
760
+ "deepseek-math",
761
+ {
762
+ name: "deepseek-math",
763
+ displayName: "DeepSeek Math",
764
+ endpoint: "https://api.deepseek.com/v1/chat/completions",
765
+ format: "openai",
766
+ description: "Specialized model for mathematical reasoning and problem solving",
767
+ maxTokens: 8192,
768
+ contextLength: 32e3,
769
+ capabilities: ["mathematical-reasoning", "problem-solving", "calculations"],
770
+ version: "2025-01"
771
+ }
772
+ ],
773
+ [
774
+ "deepseek-math-pro",
775
+ {
776
+ name: "deepseek-math-pro",
777
+ displayName: "DeepSeek Math Pro",
778
+ endpoint: "https://api.deepseek.com/v1/chat/completions",
779
+ format: "openai",
780
+ description: "Advanced mathematical reasoning model for complex problems",
781
+ maxTokens: 16384,
782
+ contextLength: 64e3,
783
+ capabilities: ["mathematical-reasoning", "advanced-calculus", "statistics"],
784
+ version: "2025-01"
785
+ }
786
+ ],
787
+ // Reasoning models
788
+ [
789
+ "deepseek-reasoner",
790
+ {
791
+ name: "deepseek-reasoner",
792
+ displayName: "DeepSeek Reasoner",
793
+ endpoint: "https://api.deepseek.com/v1/chat/completions",
794
+ format: "openai",
795
+ description: "Dedicated reasoning model for logical analysis",
796
+ maxTokens: 8192,
797
+ contextLength: 32e3,
798
+ capabilities: ["logical-reasoning", "analysis", "decision-making"],
799
+ version: "2025-01"
800
+ }
801
+ ],
802
+ [
803
+ "deepseek-reasoner-pro",
804
+ {
805
+ name: "deepseek-reasoner-pro",
806
+ displayName: "DeepSeek Reasoner Pro",
807
+ endpoint: "https://api.deepseek.com/v1/chat/completions",
808
+ format: "openai",
809
+ description: "Advanced reasoning model for complex logical problems",
810
+ maxTokens: 16384,
811
+ contextLength: 64e3,
812
+ capabilities: ["complex-reasoning", "scientific-analysis", "research"],
813
+ version: "2025-01"
814
+ }
815
+ ],
816
+ // Vision models
817
+ [
818
+ "deepseek-vision",
819
+ {
820
+ name: "deepseek-vision",
821
+ displayName: "DeepSeek Vision",
822
+ endpoint: "https://api.deepseek.com/v1/chat/completions",
823
+ format: "openai",
824
+ description: "Vision model for image understanding and analysis",
825
+ maxTokens: 4096,
826
+ contextLength: 16e3,
827
+ capabilities: ["image-understanding", "visual-qa", "document-analysis"],
828
+ version: "2025-01"
829
+ }
830
+ ],
831
+ [
832
+ "deepseek-vision-pro",
833
+ {
834
+ name: "deepseek-vision-pro",
835
+ displayName: "DeepSeek Vision Pro",
836
+ endpoint: "https://api.deepseek.com/v1/chat/completions",
837
+ format: "openai",
838
+ description: "Advanced vision model for complex visual tasks",
839
+ maxTokens: 8192,
840
+ contextLength: 32e3,
841
+ capabilities: ["image-understanding", "video-analysis", "visual-reasoning"],
842
+ version: "2025-01"
843
+ }
844
+ ],
845
+ // Specialized models
846
+ [
847
+ "deepseek-finance",
848
+ {
849
+ name: "deepseek-finance",
850
+ displayName: "DeepSeek Finance",
851
+ endpoint: "https://api.deepseek.com/v1/chat/completions",
852
+ format: "openai",
853
+ description: "Specialized for financial analysis, market prediction, and investment insights",
854
+ maxTokens: 8192,
855
+ contextLength: 32e3,
856
+ capabilities: ["financial-analysis", "market-prediction", "risk-assessment", "investment-advice"],
857
+ version: "2025-01"
858
+ }
859
+ ],
860
+ [
861
+ "deepseek-law",
862
+ {
863
+ name: "deepseek-law",
864
+ displayName: "DeepSeek Law",
865
+ endpoint: "https://api.deepseek.com/v1/chat/completions",
866
+ format: "openai",
867
+ description: "Specialized for legal analysis, contract review, and legal research",
868
+ maxTokens: 16384,
869
+ contextLength: 64e3,
870
+ capabilities: ["legal-analysis", "contract-review", "legal-research"],
871
+ version: "2025-01"
872
+ }
873
+ ],
874
+ [
875
+ "deepseek-medical",
876
+ {
877
+ name: "deepseek-medical",
878
+ displayName: "DeepSeek Medical",
879
+ endpoint: "https://api.deepseek.com/v1/chat/completions",
880
+ format: "openai",
881
+ description: "Specialized for medical consultation, diagnosis support, and health analysis",
882
+ maxTokens: 8192,
883
+ contextLength: 32e3,
884
+ capabilities: ["medical-consultation", "diagnostic-support", "health-analysis"],
885
+ version: "2025-01"
886
+ }
887
+ ],
888
+ [
889
+ "deepseek-research",
890
+ {
891
+ name: "deepseek-research",
892
+ displayName: "DeepSeek Research",
893
+ endpoint: "https://api.deepseek.com/v1/chat/completions",
894
+ format: "openai",
895
+ description: "Specialized for academic research and scientific analysis",
896
+ maxTokens: 32768,
897
+ contextLength: 128e3,
898
+ capabilities: ["academic-research", "scientific-analysis", "paper-writing"],
899
+ version: "2025-01"
900
+ }
901
+ ],
902
+ // Multimodal models
903
+ [
904
+ "deepseek-omni",
905
+ {
906
+ name: "deepseek-omni",
907
+ displayName: "DeepSeek Omni",
908
+ endpoint: "https://api.deepseek.com/v1/chat/completions",
909
+ format: "openai",
910
+ description: "Multimodal model supporting text, image, and audio",
911
+ maxTokens: 16384,
912
+ contextLength: 64e3,
913
+ capabilities: ["text-generation", "image-understanding", "audio-processing", "multimodal"],
914
+ version: "2025-01"
915
+ }
916
+ ],
917
+ [
918
+ "deepseek-omni-pro",
919
+ {
920
+ name: "deepseek-omni-pro",
921
+ displayName: "DeepSeek Omni Pro",
922
+ endpoint: "https://api.deepseek.com/v1/chat/completions",
923
+ format: "openai",
924
+ description: "Professional multimodal model with advanced capabilities",
925
+ maxTokens: 32768,
926
+ contextLength: 128e3,
927
+ capabilities: ["text-generation", "multimodal", "complex-reasoning", "expert-analysis"],
928
+ version: "2025-01"
929
+ }
930
+ ],
931
+ // Legacy models
932
+ [
933
+ "deepseek-llm",
934
+ {
935
+ name: "deepseek-llm",
936
+ displayName: "DeepSeek LLM",
937
+ endpoint: "https://api.deepseek.com/v1/chat/completions",
938
+ format: "openai",
939
+ description: "Base large language model",
940
+ maxTokens: 4096,
941
+ contextLength: 16e3,
942
+ capabilities: ["text-generation"],
943
+ version: "2024-12"
944
+ }
945
+ ]
946
+ ]);
947
+ class C {
948
+ /**
949
+ * Constructor - Minimal configuration
950
+ * @param config.apiKey - API key (required)
951
+ * @param config.modelType - Model type, default deepseek-chat
952
+ * @param config.timeout - Timeout, default 30 seconds
953
+ * @param config.baseURL - Base URL for API, default official endpoint
954
+ */
955
+ constructor(e) {
956
+ if (this.apiKey = e.apiKey, this.modelType = e.modelType || T.DEEPSEEK_CHAT, this.timeout = e.timeout || 3e4, this.baseURL = e.baseURL || "https://api.deepseek.com", !this.apiKey)
957
+ throw new Error("API Key cannot be empty");
958
+ if (!k.get(this.modelType))
959
+ throw new Error(`Unsupported model type: ${this.modelType}`);
960
+ }
961
+ /**
962
+ * Simplest method: single conversation
963
+ * @param message - User message
964
+ * @param options - Chat options
965
+ * @returns AI response
966
+ */
967
+ async chat(e, t) {
968
+ const s = [];
969
+ t?.systemPrompt && s.push({ role: "system", content: t.systemPrompt }), s.push({ role: "user", content: e });
970
+ const a = await this.chatCompletion(s, {
971
+ temperature: t?.temperature,
972
+ maxTokens: t?.maxTokens,
973
+ stream: !1,
974
+ modelType: t?.modelType,
975
+ topP: t?.topP,
976
+ frequencyPenalty: t?.frequencyPenalty,
977
+ presencePenalty: t?.presencePenalty,
978
+ stop: t?.stop,
979
+ tools: t?.tools,
980
+ toolChoice: t?.toolChoice
981
+ });
982
+ return this.extractContent(a);
983
+ }
984
+ /**
985
+ * Multi-turn conversation
986
+ * @param messages - Message history
987
+ * @param options - Chat options
988
+ * @returns Complete API response
989
+ */
990
+ async chatCompletion(e, t) {
991
+ const s = t?.modelType || this.modelType, a = k.get(s);
992
+ if (!a)
993
+ throw new Error(`Unsupported model type: ${s}`);
994
+ const i = t?.temperature ?? 0.7, n = t?.maxTokens ?? 2e3, r = t?.stream ?? !1, m = t?.topP ?? 1, c = t?.frequencyPenalty ?? 0, p = t?.presencePenalty ?? 0, d = t?.stop, l = t?.tools, h = t?.toolChoice, y = a.endpoint, E = this.buildOpenAIRequest(
995
+ a.name,
996
+ e,
997
+ i,
998
+ n,
999
+ r,
1000
+ m,
1001
+ c,
1002
+ p,
1003
+ d,
1004
+ l,
1005
+ h
1006
+ );
1007
+ try {
1008
+ return await this.makeRequest(y, E, r);
1009
+ } catch (b) {
1010
+ throw new Error(`DeepSeek AI request failed: ${b.message}`);
1011
+ }
1012
+ }
1013
+ /**
1014
+ * Streaming conversation
1015
+ * @param messages - Message history
1016
+ * @param callback - Streaming callback function
1017
+ * @param options - Chat options
1018
+ */
1019
+ async chatStream(e, t, s) {
1020
+ const a = s?.modelType || this.modelType, i = k.get(a);
1021
+ if (!i)
1022
+ throw new Error(`Unsupported model type: ${a}`);
1023
+ const n = s?.temperature ?? 0.7, r = s?.maxTokens ?? 2e3, m = s?.topP ?? 1, c = s?.frequencyPenalty ?? 0, p = s?.presencePenalty ?? 0, d = this.buildOpenAIRequest(
1024
+ i.name,
1025
+ e,
1026
+ n,
1027
+ r,
1028
+ !0,
1029
+ m,
1030
+ c,
1031
+ p,
1032
+ s?.stop,
1033
+ s?.tools,
1034
+ s?.toolChoice
1035
+ );
1036
+ try {
1037
+ await this.makeStreamRequest(i.endpoint, d, t);
1038
+ } catch (l) {
1039
+ throw new Error(`Streaming request failed: ${l.message}`);
1040
+ }
1041
+ }
1042
+ /**
1043
+ * Specialized method for processing OHLCV arrays
1044
+ * @param ohlcvArray - OHLCV data array
1045
+ * @param instructions - Processing instructions (optional)
1046
+ * @param count - Number of OHLCV data items to return (optional, default: 1)
1047
+ * @param options - Chat options
1048
+ * @returns Predicted OHLCV array
1049
+ */
1050
+ async predictingOHLCV(e, t, s, a) {
1051
+ const i = t || "Based on these OHLCV data, predict the next period", n = s || 1;
1052
+ if (!Number.isInteger(n) || n <= 0)
1053
+ throw new Error(`Invalid count parameter: ${n}. Must be a positive integer.`);
1054
+ const r = 50;
1055
+ if (n > r)
1056
+ throw new Error(`Count parameter too large: ${n}. Maximum allowed is ${r}.`);
1057
+ const m = n === 1 ? "Return EXACTLY 1 OHLCV object for the next period." : `Return EXACTLY ${n} consecutive OHLCV objects for the next ${n} periods.`, c = `You are a professional financial data analysis AI. The user will give you an array of OHLCV (Open, High, Low, Close, Volume) data.
1058
+ Your task: ${i}
1059
+ CRITICAL RULES:
1060
+ 1. ${m}
1061
+ 2. Return ONLY a JSON array of OHLCV objects, NO explanations, comments, or other text
1062
+ 3. The OHLCV array format must match: [{open, high, low, close, volume}, ...]
1063
+ 4. All numbers must be valid numbers
1064
+ 5. Ensure technical rationality (high >= low, high >= close >= low, volume >= 0)
1065
+ 6. Maintain consistency with historical trends and patterns
1066
+ 7. For technical analysis, provide reasonable values based on typical patterns
1067
+ 8. Do not include markdown formatting, only pure JSON
1068
+
1069
+ ${n === 1 ? `Example of valid response for 1 period:
1070
+ [{"open": 115.5, "high": 118.0, "low": 114.0, "close": 117.0, "volume": 1350000}]` : `Example of valid response for ${n} periods:
1071
+ [
1072
+ {"open": 115.5, "high": 118.0, "low": 114.0, "close": 117.0, "volume": 1350000},
1073
+ {"open": 117.5, "high": 120.0, "low": 116.0, "close": 119.0, "volume": 1400000}
1074
+ ${n > 2 ? `,
1075
+ ... ${n - 2} more OHLCV objects following the same pattern` : ""}
1076
+ ]`}`, p = JSON.stringify(e, null, 2), d = `Here is the historical OHLCV data (${e.length} periods):
1077
+ ${p}
1078
+ Please process this data according to the system instructions. Remember to return EXACTLY ${n} OHLCV object(s) in a JSON array with no additional text.`, l = [
1079
+ { role: "system", content: c },
1080
+ { role: "user", content: d }
1081
+ ];
1082
+ try {
1083
+ const h = n * 50 + 100, y = Math.max(a?.maxTokens || 2e3, h), E = await this.chatCompletion(l, {
1084
+ temperature: a?.temperature || 0.3,
1085
+ maxTokens: y,
1086
+ stream: !1,
1087
+ modelType: a?.modelType || T.DEEPSEEK_FINANCE,
1088
+ topP: a?.topP,
1089
+ frequencyPenalty: a?.frequencyPenalty,
1090
+ presencePenalty: a?.presencePenalty
1091
+ }), b = this.extractContent(E), g = this.parseOHLCVResponse(b);
1092
+ if (g.length !== n)
1093
+ throw new Error(`AI returned ${g.length} OHLCV objects, but expected ${n}.`);
1094
+ return g;
1095
+ } catch (h) {
1096
+ throw new Error(`OHLCV analysis failed: ${h.message}`);
1097
+ }
1098
+ }
1099
+ /**
1100
+ * Switch model
1101
+ * @param modelType - New model type
1102
+ */
1103
+ setModel(e) {
1104
+ if (!k.get(e))
1105
+ throw new Error(`Unsupported model type: ${e}`);
1106
+ this.modelType = e;
1107
+ }
1108
+ /**
1109
+ * Get current model configuration
1110
+ */
1111
+ getCurrentModel() {
1112
+ const e = k.get(this.modelType);
1113
+ if (!e)
1114
+ throw new Error(`Model configuration does not exist: ${this.modelType}`);
1115
+ return {
1116
+ name: e.name,
1117
+ displayName: e.displayName,
1118
+ description: e.description
1119
+ };
1120
+ }
1121
+ /**
1122
+ * Test connection
1123
+ * @returns Connection test result
1124
+ */
1125
+ async testConnection() {
1126
+ try {
1127
+ const e = await this.chat('Hello, respond with "OK" if you can hear me.');
1128
+ return {
1129
+ success: !0,
1130
+ model: this.modelType,
1131
+ response: e
1132
+ };
1133
+ } catch (e) {
1134
+ return {
1135
+ success: !1,
1136
+ model: this.modelType,
1137
+ error: e.message
1138
+ };
1139
+ }
1140
+ }
1141
+ buildOpenAIRequest(e, t, s, a, i, n, r, m, c, p, d) {
1142
+ const l = {
1143
+ model: e,
1144
+ messages: t,
1145
+ temperature: s,
1146
+ max_tokens: a,
1147
+ stream: i
1148
+ };
1149
+ return n !== void 0 && (l.top_p = n), r !== void 0 && (l.frequency_penalty = r), m !== void 0 && (l.presence_penalty = m), c && (l.stop = c), p && (l.tools = p), d && (l.tool_choice = d), l;
1150
+ }
1151
+ async makeRequest(e, t, s) {
1152
+ const a = new AbortController(), i = setTimeout(() => a.abort(), this.timeout);
1153
+ try {
1154
+ const n = await fetch(e, {
1155
+ method: "POST",
1156
+ headers: {
1157
+ Authorization: `Bearer ${this.apiKey}`,
1158
+ "Content-Type": "application/json; charset=utf-8",
1159
+ Accept: "application/json"
1160
+ },
1161
+ body: JSON.stringify(t),
1162
+ signal: a.signal
1163
+ });
1164
+ if (clearTimeout(i), !n.ok) {
1165
+ const r = await n.text();
1166
+ throw new Error(`HTTP ${n.status}: ${r}`);
1167
+ }
1168
+ return s ? n.body : await n.json();
1169
+ } catch (n) {
1170
+ throw clearTimeout(i), n.name === "AbortError" ? new Error(`Request timeout (${this.timeout}ms)`) : n;
1171
+ }
1172
+ }
1173
+ async makeStreamRequest(e, t, s) {
1174
+ const a = await this.makeRequest(e, t, !0);
1175
+ if (!a)
1176
+ throw new Error("Failed to get streaming response");
1177
+ const i = a.getReader(), n = new TextDecoder("utf-8");
1178
+ let r = "";
1179
+ try {
1180
+ for (; ; ) {
1181
+ const { done: m, value: c } = await i.read();
1182
+ if (m) {
1183
+ s("", !0);
1184
+ break;
1185
+ }
1186
+ r += n.decode(c, { stream: !0 });
1187
+ const p = r.split(`
1188
+ `);
1189
+ r = p.pop() || "";
1190
+ for (const d of p)
1191
+ if (d.startsWith("data: ")) {
1192
+ const l = d.slice(6);
1193
+ if (l === "[DONE]") {
1194
+ s("", !0);
1195
+ return;
1196
+ }
1197
+ try {
1198
+ const h = JSON.parse(l);
1199
+ h.choices?.[0]?.delta?.content && s(h.choices[0].delta.content, !1);
1200
+ } catch {
1201
+ }
1202
+ }
1203
+ }
1204
+ } finally {
1205
+ i.releaseLock();
1206
+ }
1207
+ }
1208
+ extractContent(e) {
1209
+ if (e.choices?.[0]?.message?.content)
1210
+ return e.choices[0].message.content;
1211
+ if (e.output?.choices?.[0]?.message?.content)
1212
+ return e.output.choices[0].message.content;
1213
+ if (e.output?.text)
1214
+ return e.output.text;
1215
+ if (e.choices?.[0]?.delta?.content)
1216
+ return e.choices[0].delta.content;
1217
+ throw new Error("Unable to parse response content");
1218
+ }
1219
+ parseOHLCVResponse(e) {
1220
+ try {
1221
+ const t = JSON.parse(e);
1222
+ if (!Array.isArray(t))
1223
+ throw new Error("Response is not in array format");
1224
+ return t.map((a, i) => {
1225
+ if (typeof a != "object" || a === null)
1226
+ throw new Error(`Element ${i} is not a valid object`);
1227
+ const { open: n, high: r, low: m, close: c, volume: p } = a, d = ["open", "high", "low", "close", "volume"];
1228
+ for (const l of d)
1229
+ if (typeof a[l] != "number" || isNaN(a[l]))
1230
+ throw new Error(`Element ${i} field ${l} is not a valid number`);
1231
+ if (r < m)
1232
+ throw new Error(`Element ${i}: high cannot be lower than low`);
1233
+ if (c < m || c > r)
1234
+ throw new Error(`Element ${i}: close must be between low and high`);
1235
+ return {
1236
+ open: Number(n),
1237
+ high: Number(r),
1238
+ low: Number(m),
1239
+ close: Number(c),
1240
+ volume: Number(p)
1241
+ };
1242
+ });
1243
+ } catch (t) {
1244
+ const s = e.match(/\[[\s\S]*\]/);
1245
+ if (s)
1246
+ return this.parseOHLCVResponse(s[0]);
1247
+ throw new Error(`Unable to parse AI returned OHLCV data: ${t}
1248
+ Original content: ${e.substring(0, 200)}...`);
1249
+ }
1250
+ }
1251
+ }
1252
+ function P(o, e) {
1253
+ return new C({ apiKey: o, modelType: e });
1254
+ }
1255
+ var f = /* @__PURE__ */ ((o) => (o.GPT4 = "gpt-4", o.GPT4_0314 = "gpt-4-0314", o.GPT4_0613 = "gpt-4-0613", o.GPT4_32K = "gpt-4-32k", o.GPT4_32K_0314 = "gpt-4-32k-0314", o.GPT4_32K_0613 = "gpt-4-32k-0613", o.GPT4_TURBO = "gpt-4-turbo", o.GPT4_TURBO_PREVIEW = "gpt-4-turbo-preview", o.GPT4_TURBO_2024_04_09 = "gpt-4-turbo-2024-04-09", o.GPT4_OMNI = "gpt-4o", o.GPT4_OMNI_2024_05_13 = "gpt-4o-2024-05-13", o.GPT4_OMNI_MINI = "gpt-4o-mini", o.GPT4_OMNI_MINI_2024_07_18 = "gpt-4o-mini-2024-07-18", o.GPT3_5_TURBO = "gpt-3.5-turbo", o.GPT3_5_TURBO_0125 = "gpt-3.5-turbo-0125", o.GPT3_5_TURBO_1106 = "gpt-3.5-turbo-1106", o.GPT3_5_TURBO_INSTRUCT = "gpt-3.5-turbo-instruct", o.GPT3_5_TURBO_16K = "gpt-3.5-turbo-16k", o.GPT3_5_TURBO_16K_0613 = "gpt-3.5-turbo-16k-0613", o.DAVINCI_002 = "davinci-002", o.BABBAGE_002 = "babbage-002", o.TEXT_DAVINCI_003 = "text-davinci-003", o.TEXT_DAVINCI_002 = "text-davinci-002", o.TEXT_DAVINCI_001 = "text-davinci-001", o.TEXT_CURIE_001 = "text-curie-001", o.TEXT_BABBAGE_001 = "text-babbage-001", o.TEXT_ADA_001 = "text-ada-001", o.TEXT_EMBEDDING_ADA_002 = "text-embedding-ada-002", o.TEXT_EMBEDDING_3_SMALL = "text-embedding-3-small", o.TEXT_EMBEDDING_3_LARGE = "text-embedding-3-large", o.DALL_E_2 = "dall-e-2", o.DALL_E_3 = "dall-e-3", o.WHISPER_1 = "whisper-1", o.TTS_1 = "tts-1", o.TTS_1_HD = "tts-1-hd", o.MODERATION_LATEST = "text-moderation-latest", o.MODERATION_STABLE = "text-moderation-stable", o.GPT3_5_TURBO_FINETUNED = "ft:gpt-3.5-turbo-0125:personal:", o.GPT4_FINETUNED = "ft:gpt-4-0125-preview:personal:", o.GPT4_VISION_PREVIEW = "gpt-4-vision-preview", o))(f || {});
1256
+ const u = /* @__PURE__ */ new Map([
1257
+ // GPT-4 Series
1258
+ [
1259
+ "gpt-4",
1260
+ {
1261
+ name: "gpt-4",
1262
+ displayName: "GPT-4",
1263
+ endpoint: "https://api.openai.com/v1/chat/completions",
1264
+ format: "openai",
1265
+ description: "Powerful multi-purpose model for complex tasks",
1266
+ maxTokens: 8192,
1267
+ contextLength: 8192,
1268
+ capabilities: ["chat", "text-generation", "reasoning", "analysis"],
1269
+ inputCostPer1KTokens: 0.03,
1270
+ outputCostPer1KTokens: 0.06,
1271
+ supportedFeatures: ["chat", "function-calling"]
1272
+ }
1273
+ ],
1274
+ [
1275
+ "gpt-4-turbo",
1276
+ {
1277
+ name: "gpt-4-turbo",
1278
+ displayName: "GPT-4 Turbo",
1279
+ endpoint: "https://api.openai.com/v1/chat/completions",
1280
+ format: "openai",
1281
+ description: "Enhanced GPT-4 with 128K context, knowledge cutoff April 2023",
1282
+ maxTokens: 4096,
1283
+ contextLength: 128e3,
1284
+ capabilities: ["chat", "text-generation", "reasoning", "analysis", "vision"],
1285
+ inputCostPer1KTokens: 0.01,
1286
+ outputCostPer1KTokens: 0.03,
1287
+ supportedFeatures: ["chat", "function-calling", "vision", "json-mode"]
1288
+ }
1289
+ ],
1290
+ [
1291
+ "gpt-4o",
1292
+ {
1293
+ name: "gpt-4o",
1294
+ displayName: "GPT-4o",
1295
+ endpoint: "https://api.openai.com/v1/chat/completions",
1296
+ format: "openai",
1297
+ description: "Versatile model supporting text, images, audio with fast response",
1298
+ maxTokens: 4096,
1299
+ contextLength: 128e3,
1300
+ capabilities: ["chat", "text-generation", "vision", "audio-processing", "multimodal"],
1301
+ inputCostPer1KTokens: 5e-3,
1302
+ outputCostPer1KTokens: 0.015,
1303
+ supportedFeatures: ["chat", "function-calling", "vision", "audio", "json-mode"]
1304
+ }
1305
+ ],
1306
+ [
1307
+ "gpt-4o-mini",
1308
+ {
1309
+ name: "gpt-4o-mini",
1310
+ displayName: "GPT-4o Mini",
1311
+ endpoint: "https://api.openai.com/v1/chat/completions",
1312
+ format: "openai",
1313
+ description: "Compact and efficient version of GPT-4o with lower cost",
1314
+ maxTokens: 16384,
1315
+ contextLength: 128e3,
1316
+ capabilities: ["chat", "text-generation", "vision"],
1317
+ inputCostPer1KTokens: 15e-5,
1318
+ outputCostPer1KTokens: 6e-4,
1319
+ supportedFeatures: ["chat", "function-calling", "vision", "json-mode"]
1320
+ }
1321
+ ],
1322
+ // GPT-3.5 Series
1323
+ [
1324
+ "gpt-3.5-turbo",
1325
+ {
1326
+ name: "gpt-3.5-turbo",
1327
+ displayName: "GPT-3.5 Turbo",
1328
+ endpoint: "https://api.openai.com/v1/chat/completions",
1329
+ format: "openai",
1330
+ description: "Fast and cost-effective, suitable for most conversational tasks",
1331
+ maxTokens: 4096,
1332
+ contextLength: 16385,
1333
+ capabilities: ["chat", "text-generation", "code-generation"],
1334
+ inputCostPer1KTokens: 5e-4,
1335
+ outputCostPer1KTokens: 15e-4,
1336
+ supportedFeatures: ["chat", "function-calling"]
1337
+ }
1338
+ ],
1339
+ [
1340
+ "gpt-3.5-turbo-instruct",
1341
+ {
1342
+ name: "gpt-3.5-turbo-instruct",
1343
+ displayName: "GPT-3.5 Turbo Instruct",
1344
+ endpoint: "https://api.openai.com/v1/completions",
1345
+ format: "openai",
1346
+ description: "Instruction-tuned version for text completion tasks",
1347
+ maxTokens: 4096,
1348
+ contextLength: 4097,
1349
+ capabilities: ["text-completion", "instruction-following"],
1350
+ inputCostPer1KTokens: 15e-4,
1351
+ outputCostPer1KTokens: 2e-3,
1352
+ supportedFeatures: ["completions"]
1353
+ }
1354
+ ],
1355
+ // Embedding Models
1356
+ [
1357
+ "text-embedding-ada-002",
1358
+ {
1359
+ name: "text-embedding-ada-002",
1360
+ displayName: "Text Embedding Ada 002",
1361
+ endpoint: "https://api.openai.com/v1/embeddings",
1362
+ format: "openai",
1363
+ description: "Text embedding model, 1536 dimensions, suitable for retrieval and similarity",
1364
+ contextLength: 8191,
1365
+ capabilities: ["embeddings", "semantic-search"],
1366
+ inputCostPer1KTokens: 1e-4,
1367
+ supportedFeatures: ["embeddings"]
1368
+ }
1369
+ ],
1370
+ [
1371
+ "text-embedding-3-small",
1372
+ {
1373
+ name: "text-embedding-3-small",
1374
+ displayName: "Text Embedding 3 Small",
1375
+ endpoint: "https://api.openai.com/v1/embeddings",
1376
+ format: "openai",
1377
+ description: "Small text embedding model, 1536 dimensions, balance of performance and cost",
1378
+ contextLength: 8191,
1379
+ capabilities: ["embeddings", "semantic-search"],
1380
+ inputCostPer1KTokens: 2e-5,
1381
+ supportedFeatures: ["embeddings"]
1382
+ }
1383
+ ],
1384
+ // DALL-E Image Generation
1385
+ [
1386
+ "dall-e-3",
1387
+ {
1388
+ name: "dall-e-3",
1389
+ displayName: "DALL-E 3",
1390
+ endpoint: "https://api.openai.com/v1/images/generations",
1391
+ format: "openai",
1392
+ description: "Advanced image generation model producing high-quality, high-resolution images",
1393
+ capabilities: ["image-generation", "creative-design"],
1394
+ inputCostPer1KTokens: 0.04,
1395
+ // Cost per image
1396
+ supportedFeatures: ["image-generation", "variations", "edits"]
1397
+ }
1398
+ ],
1399
+ // Whisper Speech Recognition
1400
+ [
1401
+ "whisper-1",
1402
+ {
1403
+ name: "whisper-1",
1404
+ displayName: "Whisper",
1405
+ endpoint: "https://api.openai.com/v1/audio/transcriptions",
1406
+ format: "openai",
1407
+ description: "Speech recognition model supporting multilingual transcription and translation",
1408
+ capabilities: ["speech-recognition", "audio-transcription", "translation"],
1409
+ inputCostPer1KTokens: 6e-3,
1410
+ // Cost per minute of audio
1411
+ supportedFeatures: ["transcriptions", "translations"]
1412
+ }
1413
+ ],
1414
+ // TTS Text-to-Speech
1415
+ [
1416
+ "tts-1-hd",
1417
+ {
1418
+ name: "tts-1-hd",
1419
+ displayName: "TTS-1 HD",
1420
+ endpoint: "https://api.openai.com/v1/audio/speech",
1421
+ format: "openai",
1422
+ description: "High-quality text-to-speech with multiple voice options",
1423
+ capabilities: ["speech-synthesis", "text-to-speech"],
1424
+ inputCostPer1KTokens: 0.015,
1425
+ // Cost per thousand characters
1426
+ supportedFeatures: ["speech", "voice-selection"]
1427
+ }
1428
+ ],
1429
+ // Moderation Models
1430
+ [
1431
+ "text-moderation-latest",
1432
+ {
1433
+ name: "text-moderation-latest",
1434
+ displayName: "Moderation Latest",
1435
+ endpoint: "https://api.openai.com/v1/moderations",
1436
+ format: "openai",
1437
+ description: "Content moderation model for detecting harmful content",
1438
+ capabilities: ["content-moderation", "safety"],
1439
+ inputCostPer1KTokens: 1e-4,
1440
+ supportedFeatures: ["moderation"]
1441
+ }
1442
+ ]
1443
+ ]);
1444
+ function q(o) {
1445
+ return u.get(o);
1446
+ }
1447
+ function w() {
1448
+ return Array.from(u.values());
1449
+ }
1450
+ function O(o) {
1451
+ for (const e of u.values())
1452
+ if (e.name === o)
1453
+ return e;
1454
+ }
1455
+ function $() {
1456
+ return Array.from(u.keys());
1457
+ }
1458
+ function R() {
1459
+ return w().filter(
1460
+ (o) => o.capabilities.includes("chat")
1461
+ );
1462
+ }
1463
+ function S() {
1464
+ return w().filter(
1465
+ (o) => o.capabilities.includes("text-completion")
1466
+ );
1467
+ }
1468
+ function D() {
1469
+ return w().filter(
1470
+ (o) => o.capabilities.includes("embeddings")
1471
+ );
1472
+ }
1473
+ function I() {
1474
+ return w().filter(
1475
+ (o) => o.capabilities.includes("vision") || o.capabilities.includes("image-generation")
1476
+ );
1477
+ }
1478
+ function A() {
1479
+ return w().filter(
1480
+ (o) => o.capabilities.includes("audio-processing") || o.capabilities.includes("speech-recognition") || o.capabilities.includes("speech-synthesis")
1481
+ );
1482
+ }
1483
+ function B() {
1484
+ return w().filter(
1485
+ (o) => o.capabilities.includes("multimodal")
1486
+ );
1487
+ }
1488
+ function H() {
1489
+ const o = [
1490
+ "gpt-4o",
1491
+ "gpt-4o-mini",
1492
+ "gpt-4-turbo",
1493
+ "gpt-3.5-turbo",
1494
+ "text-embedding-3-small",
1495
+ "dall-e-3"
1496
+ /* DALL_E_3 */
1497
+ ];
1498
+ return w().filter(
1499
+ (e) => o.includes(e.name)
1500
+ );
1501
+ }
1502
+ function K() {
1503
+ return w().filter((o) => o.inputCostPer1KTokens && o.inputCostPer1KTokens < 1e-3).sort((o, e) => (o.inputCostPer1KTokens || 0) - (e.inputCostPer1KTokens || 0));
1504
+ }
1505
+ function V() {
1506
+ return w().filter((o) => o.contextLength && o.contextLength >= 128e3).sort((o, e) => (e.contextLength || 0) - (o.contextLength || 0));
1507
+ }
1508
+ function U(o, e, t = 0) {
1509
+ const s = (o.inputCostPer1KTokens || 0) / 1e3 * e, a = (o.outputCostPer1KTokens || 0) / 1e3 * t;
1510
+ return {
1511
+ inputTokens: e,
1512
+ outputTokens: t,
1513
+ inputCost: s,
1514
+ outputCost: a,
1515
+ totalCost: s + a
1516
+ };
1517
+ }
1518
+ function Q(o) {
1519
+ let e = w();
1520
+ switch (o.taskType) {
1521
+ case "chat":
1522
+ e = e.filter((t) => t.capabilities.includes("chat"));
1523
+ break;
1524
+ case "completion":
1525
+ e = e.filter((t) => t.capabilities.includes("text-completion"));
1526
+ break;
1527
+ case "embedding":
1528
+ e = e.filter((t) => t.capabilities.includes("embeddings"));
1529
+ break;
1530
+ case "image":
1531
+ e = e.filter(
1532
+ (t) => t.capabilities.includes("image-generation") || t.capabilities.includes("vision")
1533
+ );
1534
+ break;
1535
+ case "audio":
1536
+ e = e.filter(
1537
+ (t) => t.capabilities.includes("speech-recognition") || t.capabilities.includes("speech-synthesis")
1538
+ );
1539
+ break;
1540
+ }
1541
+ return o.contextLength && (e = e.filter(
1542
+ (t) => t.contextLength && t.contextLength >= o.contextLength
1543
+ )), o.features && o.features.length > 0 && (e = e.filter(
1544
+ (t) => o.features.every(
1545
+ (s) => t.supportedFeatures?.includes(s) || t.capabilities.includes(s)
1546
+ )
1547
+ )), o.budget && e.sort(
1548
+ (t, s) => (t.inputCostPer1KTokens || 0) - (s.inputCostPer1KTokens || 0)
1549
+ ), e.slice(0, 5);
1550
+ }
1551
+ class L {
1552
+ /**
1553
+ * Constructor - Minimal configuration
1554
+ * @param config.apiKey - API key (required)
1555
+ * @param config.modelType - Model type, default gpt-3.5-turbo
1556
+ * @param config.timeout - Timeout, default 30 seconds
1557
+ * @param config.organization - Organization ID (optional)
1558
+ * @param config.baseURL - Custom base URL (optional)
1559
+ */
1560
+ constructor(e) {
1561
+ if (this.apiKey = e.apiKey, this.modelType = e.modelType || f.GPT3_5_TURBO, this.timeout = e.timeout || 3e4, this.organization = e.organization, this.baseURL = e.baseURL || "https://api.openai.com/v1", !this.apiKey)
1562
+ throw new Error("API Key cannot be empty");
1563
+ if (!u.get(this.modelType))
1564
+ throw new Error(`Unsupported model type: ${this.modelType}`);
1565
+ }
1566
+ /**
1567
+ * Simplest method: single conversation
1568
+ * @param message - User message
1569
+ * @param options - Chat options
1570
+ * @returns AI response
1571
+ */
1572
+ async chat(e, t) {
1573
+ const s = [];
1574
+ t?.systemPrompt && s.push({ role: "system", content: t.systemPrompt }), s.push({ role: "user", content: e });
1575
+ const a = await this.chatCompletion(s, {
1576
+ temperature: t?.temperature,
1577
+ maxTokens: t?.maxTokens,
1578
+ stream: !1,
1579
+ topP: t?.topP,
1580
+ frequencyPenalty: t?.frequencyPenalty,
1581
+ presencePenalty: t?.presencePenalty,
1582
+ stop: t?.stop
1583
+ });
1584
+ return this.extractContent(a);
1585
+ }
1586
+ /**
1587
+ * Multi-turn conversation
1588
+ * @param messages - Message history
1589
+ * @param options - Chat options
1590
+ * @returns Complete API response
1591
+ */
1592
+ async chatCompletion(e, t) {
1593
+ const s = t?.modelType || this.modelType, a = u.get(s);
1594
+ if (!a)
1595
+ throw new Error(`Unsupported model type: ${s}`);
1596
+ const i = t?.temperature ?? 0.7, n = t?.maxTokens ?? 1e3, r = t?.stream ?? !1, m = a.endpoint, c = this.buildOpenAIRequest(
1597
+ a.name,
1598
+ e,
1599
+ i,
1600
+ n,
1601
+ r,
1602
+ t
1603
+ );
1604
+ try {
1605
+ return await this.makeRequest(m, c, r);
1606
+ } catch (p) {
1607
+ throw new Error(`OpenAI request failed: ${p.message}`);
1608
+ }
1609
+ }
1610
+ /**
1611
+ * Streaming conversation
1612
+ * @param messages - Message history
1613
+ * @param callback - Streaming callback function
1614
+ * @param options - Chat options
1615
+ */
1616
+ async chatStream(e, t, s) {
1617
+ const a = s?.modelType || this.modelType, i = u.get(a);
1618
+ if (!i)
1619
+ throw new Error(`Unsupported model type: ${a}`);
1620
+ const n = s?.temperature ?? 0.7, r = s?.maxTokens ?? 1e3, m = this.buildOpenAIRequest(
1621
+ i.name,
1622
+ e,
1623
+ n,
1624
+ r,
1625
+ !0,
1626
+ s
1627
+ );
1628
+ try {
1629
+ await this.makeStreamRequest(i.endpoint, m, t);
1630
+ } catch (c) {
1631
+ throw new Error(`Streaming request failed: ${c.message}`);
1632
+ }
1633
+ }
1634
+ /**
1635
+ * Generate images using DALL-E
1636
+ * @param prompt - Image generation prompt
1637
+ * @param options - Image generation options
1638
+ * @returns Generated image URLs
1639
+ */
1640
+ async generateImage(e, t) {
1641
+ const s = t?.modelType || f.DALL_E_3;
1642
+ if (s !== f.DALL_E_2 && s !== f.DALL_E_3)
1643
+ throw new Error("Image generation only supports DALL-E models");
1644
+ const a = u.get(s);
1645
+ if (!a)
1646
+ throw new Error(`Unsupported model type: ${s}`);
1647
+ const i = {
1648
+ model: a.name,
1649
+ prompt: e,
1650
+ n: t?.n || 1,
1651
+ size: t?.size || "1024x1024",
1652
+ quality: t?.quality || "standard",
1653
+ style: t?.style || "vivid",
1654
+ response_format: t?.responseFormat || "url"
1655
+ };
1656
+ try {
1657
+ const n = await this.makeRequest(a.endpoint, i, !1);
1658
+ if (n.data && Array.isArray(n.data))
1659
+ return n.data.map(
1660
+ (r) => t?.responseFormat === "b64_json" ? r.b64_json : r.url
1661
+ );
1662
+ throw new Error("Invalid response format from image generation");
1663
+ } catch (n) {
1664
+ throw new Error(`Image generation failed: ${n.message}`);
1665
+ }
1666
+ }
1667
+ /**
1668
+ * Create text embeddings
1669
+ * @param input - Text or array of texts to embed
1670
+ * @param options - Embedding options
1671
+ * @returns Embedding vectors
1672
+ */
1673
+ async createEmbeddings(e, t) {
1674
+ const s = t?.modelType || f.TEXT_EMBEDDING_ADA_002, a = u.get(s);
1675
+ if (!a)
1676
+ throw new Error(`Unsupported model type: ${s}`);
1677
+ const i = {
1678
+ model: a.name,
1679
+ input: e
1680
+ };
1681
+ t?.dimensions && a.name === f.TEXT_EMBEDDING_3_SMALL && (i.dimensions = t.dimensions);
1682
+ try {
1683
+ const n = await this.makeRequest(a.endpoint, i, !1);
1684
+ if (n.data && Array.isArray(n.data))
1685
+ return n.data.map((r) => r.embedding);
1686
+ throw new Error("Invalid response format from embeddings");
1687
+ } catch (n) {
1688
+ throw new Error(`Embedding creation failed: ${n.message}`);
1689
+ }
1690
+ }
1691
+ /**
1692
+ * Transcribe audio using Whisper
1693
+ * @param audioData - Base64 encoded audio data or file path
1694
+ * @param options - Transcription options
1695
+ * @returns Transcribed text
1696
+ */
1697
+ async transcribeAudio(e, t) {
1698
+ const s = t?.modelType || f.WHISPER_1, a = u.get(s);
1699
+ if (!a)
1700
+ throw new Error(`Unsupported model type: ${s}`);
1701
+ const i = new FormData();
1702
+ if (typeof e == "string")
1703
+ throw new Error("File path/Base64 support requires additional implementation");
1704
+ i.append("file", e), i.append("model", a.name), t?.language && i.append("language", t.language), t?.prompt && i.append("prompt", t.prompt), t?.responseFormat && i.append("response_format", t.responseFormat), t?.temperature !== void 0 && i.append("temperature", t.temperature.toString());
1705
+ try {
1706
+ const n = await this.makeFormDataRequest(a.endpoint, i, !1);
1707
+ return n.text || n.transcription || "";
1708
+ } catch (n) {
1709
+ throw new Error(`Audio transcription failed: ${n.message}`);
1710
+ }
1711
+ }
1712
+ /**
1713
+ * Text-to-speech conversion
1714
+ * @param text - Text to convert to speech
1715
+ * @param options - TTS options
1716
+ * @returns Audio data (base64 or blob)
1717
+ */
1718
+ async textToSpeech(e, t) {
1719
+ const s = t?.modelType || f.TTS_1_HD, a = u.get(s);
1720
+ if (!a)
1721
+ throw new Error(`Unsupported model type: ${s}`);
1722
+ const i = {
1723
+ model: a.name,
1724
+ input: e,
1725
+ voice: t?.voice || "alloy",
1726
+ response_format: t?.responseFormat || "mp3",
1727
+ speed: t?.speed || 1
1728
+ };
1729
+ try {
1730
+ return await this.makeRequest(a.endpoint, i, !1, !0);
1731
+ } catch (n) {
1732
+ throw new Error(`Text-to-speech conversion failed: ${n.message}`);
1733
+ }
1734
+ }
1735
+ /**
1736
+ * Content moderation
1737
+ * @param input - Text to moderate
1738
+ * @param options - Moderation options
1739
+ * @returns Moderation results
1740
+ */
1741
+ async moderateContent(e, t) {
1742
+ const s = t?.modelType || f.MODERATION_LATEST, a = u.get(s);
1743
+ if (!a)
1744
+ throw new Error(`Unsupported model type: ${s}`);
1745
+ const i = {
1746
+ model: a.name,
1747
+ input: e
1748
+ };
1749
+ try {
1750
+ return (await this.makeRequest(a.endpoint, i, !1)).results || [];
1751
+ } catch (n) {
1752
+ throw new Error(`Content moderation failed: ${n.message}`);
1753
+ }
1754
+ }
1755
+ /**
1756
+ * Switch model
1757
+ * @param modelType - New model type
1758
+ */
1759
+ setModel(e) {
1760
+ if (!u.get(e))
1761
+ throw new Error(`Unsupported model type: ${e}`);
1762
+ this.modelType = e;
1763
+ }
1764
+ /**
1765
+ * Get current model configuration
1766
+ */
1767
+ getCurrentModel() {
1768
+ const e = u.get(this.modelType);
1769
+ if (!e)
1770
+ throw new Error(`Model configuration does not exist: ${this.modelType}`);
1771
+ return {
1772
+ name: e.name,
1773
+ displayName: e.displayName,
1774
+ description: e.description
1775
+ };
1776
+ }
1777
+ /**
1778
+ * Test connection
1779
+ * @returns Connection test result
1780
+ */
1781
+ async testConnection() {
1782
+ try {
1783
+ const e = await this.chat('Hello, respond with "OK" if you can hear me.');
1784
+ return {
1785
+ success: !0,
1786
+ model: this.modelType,
1787
+ response: e
1788
+ };
1789
+ } catch (e) {
1790
+ return {
1791
+ success: !1,
1792
+ model: this.modelType,
1793
+ error: e.message
1794
+ };
1795
+ }
1796
+ }
1797
+ /**
1798
+ * Estimate cost for a request
1799
+ * @param inputTokens - Number of input tokens
1800
+ * @param outputTokens - Number of output tokens
1801
+ * @param modelType - Model type (optional, uses current if not provided)
1802
+ * @returns Cost estimate
1803
+ */
1804
+ estimateCost(e, t = 0, s) {
1805
+ const a = s || this.modelType, i = u.get(a);
1806
+ if (!i)
1807
+ throw new Error(`Unsupported model type: ${a}`);
1808
+ const n = (i.inputCostPer1KTokens || 0) / 1e3 * e, r = (i.outputCostPer1KTokens || 0) / 1e3 * t;
1809
+ return {
1810
+ inputCost: n,
1811
+ outputCost: r,
1812
+ totalCost: n + r
1813
+ };
1814
+ }
1815
+ buildOpenAIRequest(e, t, s, a, i, n) {
1816
+ const r = {
1817
+ model: e,
1818
+ messages: t,
1819
+ temperature: s,
1820
+ max_tokens: a,
1821
+ stream: i
1822
+ };
1823
+ return n?.topP !== void 0 && (r.top_p = n.topP), n?.frequencyPenalty !== void 0 && (r.frequency_penalty = n.frequencyPenalty), n?.presencePenalty !== void 0 && (r.presence_penalty = n.presencePenalty), n?.stop !== void 0 && (r.stop = n.stop), r;
1824
+ }
1825
+ async makeRequest(e, t, s, a = !1) {
1826
+ const i = new AbortController(), n = setTimeout(() => i.abort(), this.timeout);
1827
+ try {
1828
+ const r = e.startsWith("http") ? e : `${this.baseURL}${e}`, m = {
1829
+ Authorization: `Bearer ${this.apiKey}`,
1830
+ "Content-Type": "application/json"
1831
+ };
1832
+ this.organization && (m["OpenAI-Organization"] = this.organization);
1833
+ const c = await fetch(r, {
1834
+ method: "POST",
1835
+ headers: m,
1836
+ body: JSON.stringify(t),
1837
+ signal: i.signal
1838
+ });
1839
+ if (clearTimeout(n), !c.ok) {
1840
+ const p = await c.text();
1841
+ try {
1842
+ const d = JSON.parse(p);
1843
+ throw new Error(`HTTP ${c.status}: ${d.error?.message || p}`);
1844
+ } catch {
1845
+ throw new Error(`HTTP ${c.status}: ${p}`);
1846
+ }
1847
+ }
1848
+ return a ? await c.arrayBuffer() : s ? c.body : await c.json();
1849
+ } catch (r) {
1850
+ throw clearTimeout(n), r.name === "AbortError" ? new Error(`Request timeout (${this.timeout}ms)`) : r;
1851
+ }
1852
+ }
1853
+ async makeFormDataRequest(e, t, s) {
1854
+ const a = new AbortController(), i = setTimeout(() => a.abort(), this.timeout);
1855
+ try {
1856
+ const n = e.startsWith("http") ? e : `${this.baseURL}${e}`, r = {
1857
+ Authorization: `Bearer ${this.apiKey}`
1858
+ };
1859
+ this.organization && (r["OpenAI-Organization"] = this.organization);
1860
+ const m = await fetch(n, {
1861
+ method: "POST",
1862
+ headers: r,
1863
+ body: t,
1864
+ signal: a.signal
1865
+ });
1866
+ if (clearTimeout(i), !m.ok) {
1867
+ const c = await m.text();
1868
+ try {
1869
+ const p = JSON.parse(c);
1870
+ throw new Error(`HTTP ${m.status}: ${p.error?.message || c}`);
1871
+ } catch {
1872
+ throw new Error(`HTTP ${m.status}: ${c}`);
1873
+ }
1874
+ }
1875
+ return s ? m.body : await m.json();
1876
+ } catch (n) {
1877
+ throw clearTimeout(i), n.name === "AbortError" ? new Error(`Request timeout (${this.timeout}ms)`) : n;
1878
+ }
1879
+ }
1880
+ async makeStreamRequest(e, t, s) {
1881
+ const a = await this.makeRequest(e, t, !0);
1882
+ if (!a)
1883
+ throw new Error("Failed to get streaming response");
1884
+ const i = a.getReader(), n = new TextDecoder("utf-8");
1885
+ let r = "";
1886
+ try {
1887
+ for (; ; ) {
1888
+ const { done: m, value: c } = await i.read();
1889
+ if (m) {
1890
+ s("", !0);
1891
+ break;
1892
+ }
1893
+ r += n.decode(c, { stream: !0 });
1894
+ const p = r.split(`
1895
+ `);
1896
+ r = p.pop() || "";
1897
+ for (const d of p)
1898
+ if (d.startsWith("data: ")) {
1899
+ const l = d.slice(6);
1900
+ if (l === "[DONE]") {
1901
+ s("", !0);
1902
+ return;
1903
+ }
1904
+ try {
1905
+ const h = JSON.parse(l);
1906
+ h.choices?.[0]?.delta?.content && s(h.choices[0].delta.content, !1);
1907
+ } catch {
1908
+ }
1909
+ }
1910
+ }
1911
+ } finally {
1912
+ i.releaseLock();
1913
+ }
1914
+ }
1915
+ extractContent(e) {
1916
+ if (e.choices?.[0]?.message?.content)
1917
+ return e.choices[0].message.content;
1918
+ if (e.data?.[0]?.b64_json)
1919
+ return e.data[0].b64_json;
1920
+ if (e.data?.[0]?.url)
1921
+ return e.data[0].url;
1922
+ if (e.text)
1923
+ return e.text;
1924
+ throw new Error("Unable to parse response content");
1925
+ }
1926
+ /**
1927
+ * Specialized method for processing OHLCV arrays
1928
+ * @param ohlcvArray - OHLCV data array
1929
+ * @param instructions - Processing instructions (optional, default: "Based on these OHLCV data, predict the next period")
1930
+ * @param count - Number of OHLCV data items to return (optional, default: 1)
1931
+ * @param options - Chat options
1932
+ * @returns Predicted OHLCV array
1933
+ */
1934
+ async analyzeOHLCV(e, t, s, a) {
1935
+ const i = t || "Based on these OHLCV data, predict the next period", n = s || 1;
1936
+ if (!Number.isInteger(n) || n <= 0)
1937
+ throw new Error(`Invalid count parameter: ${n}. Must be a positive integer.`);
1938
+ const r = 50;
1939
+ if (n > r)
1940
+ throw new Error(`Count parameter too large: ${n}. Maximum allowed is ${r}. Please reduce the count or split your request.`);
1941
+ const m = n === 1 ? "Return EXACTLY 1 OHLCV object for the next period." : `Return EXACTLY ${n} consecutive OHLCV objects for the next ${n} periods.`, c = `You are a professional financial data analysis AI. The user will give you an array of OHLCV (Open, High, Low, Close, Volume) data.
1942
+ Your task: ${i}
1943
+ CRITICAL RULES:
1944
+ 1. ${m}
1945
+ 2. Return ONLY a JSON array of OHLCV objects, NO explanations, comments, or other text
1946
+ 3. The OHLCV array format must match: [{open, high, low, close, volume}, ...]
1947
+ 4. All numbers must be valid numbers
1948
+ 5. Ensure technical rationality (high >= low, high >= close >= low, volume >= 0)
1949
+ 6. Maintain consistency with historical trends and patterns
1950
+ 7. For technical analysis, provide reasonable values based on typical patterns
1951
+ 8. Do not include markdown formatting, only pure JSON
1952
+ ${n === 1 ? `Example of valid response for 1 period:
1953
+ [{"open": 115.5, "high": 118.0, "low": 114.0, "close": 117.0, "volume": 1350000}]` : `Example of valid response for ${n} periods:
1954
+ [
1955
+ {"open": 115.5, "high": 118.0, "low": 114.0, "close": 117.0, "volume": 1350000},
1956
+ {"open": 117.5, "high": 120.0, "low": 116.0, "close": 119.0, "volume": 1400000}
1957
+ ${n > 2 ? `,
1958
+ ... ${n - 2} more OHLCV objects following the same pattern` : ""}
1959
+ ]`}`, p = JSON.stringify(e, null, 2), d = `Here is the historical OHLCV data (${e.length} periods):
1960
+ ${p}
1961
+ Please process this data according to the system instructions. Remember to return EXACTLY ${n} OHLCV object(s) in a JSON array with no additional text.`, l = [
1962
+ { role: "system", content: c },
1963
+ { role: "user", content: d }
1964
+ ];
1965
+ try {
1966
+ const h = n * 50 + 100, y = Math.max(a?.maxTokens || 1e3, h), E = await this.chatCompletion(l, {
1967
+ temperature: a?.temperature || 0.3,
1968
+ maxTokens: y,
1969
+ stream: !1,
1970
+ modelType: a?.modelType,
1971
+ topP: a?.topP,
1972
+ frequencyPenalty: a?.frequencyPenalty,
1973
+ presencePenalty: a?.presencePenalty,
1974
+ stop: a?.stop
1975
+ }), b = this.extractContent(E), g = this.parseOHLCVResponse(b);
1976
+ if (g.length !== n)
1977
+ throw new Error(`AI returned ${g.length} OHLCV objects, but expected ${n}.`);
1978
+ return g;
1979
+ } catch (h) {
1980
+ throw new Error(`OHLCV analysis failed: ${h.message}`);
1981
+ }
1982
+ }
1983
+ /**
1984
+ * Parse AI returned OHLCV response
1985
+ * @private
1986
+ */
1987
+ parseOHLCVResponse(e) {
1988
+ try {
1989
+ const t = JSON.parse(e);
1990
+ if (!Array.isArray(t))
1991
+ throw new Error("Response is not in array format");
1992
+ return t.map((a, i) => {
1993
+ if (typeof a != "object" || a === null)
1994
+ throw new Error(`Element ${i} is not a valid object`);
1995
+ const { open: n, high: r, low: m, close: c, volume: p } = a, d = ["open", "high", "low", "close", "volume"];
1996
+ for (const l of d)
1997
+ if (typeof a[l] != "number" || isNaN(a[l]))
1998
+ throw new Error(`Element ${i} field ${l} is not a valid number`);
1999
+ if (r < m)
2000
+ throw new Error(`Element ${i}: high cannot be lower than low`);
2001
+ if (c < m || c > r)
2002
+ throw new Error(`Element ${i}: close must be between low and high`);
2003
+ return {
2004
+ open: Number(n),
2005
+ high: Number(r),
2006
+ low: Number(m),
2007
+ close: Number(c),
2008
+ volume: Number(p)
2009
+ };
2010
+ });
2011
+ } catch (t) {
2012
+ const s = e.match(/\[[\s\S]*\]/);
2013
+ if (s)
2014
+ return this.parseOHLCVResponse(s[0]);
2015
+ throw new Error(`Unable to parse AI returned OHLCV data: ${t}
2016
+ Original content: ${e.substring(0, 200)}...`);
2017
+ }
2018
+ }
2019
+ }
2020
+ function W(o, e) {
2021
+ return new L({ apiKey: o, modelType: e });
2022
+ }
2023
+ export {
2024
+ x as ALIYUN_MODELS,
2025
+ v as AliYunModelType,
2026
+ _ as AliyunAI,
2027
+ k as DEEPSEEK_MODELS,
2028
+ C as DeepSeekAI,
2029
+ T as DeepSeekModelType,
2030
+ u as OPENAI_MODELS,
2031
+ L as OpenAI,
2032
+ f as OpenAIModelType,
2033
+ N as createAliyunAI,
2034
+ P as createDeepSeekAI,
2035
+ W as createOpenAI,
2036
+ U as estimateCost,
2037
+ w as getAllOpenAIModels,
2038
+ A as getAudioModelsOpenAI,
2039
+ $ as getAvailableOpenAIModelTypes,
2040
+ R as getChatModels,
2041
+ S as getCompletionModels,
2042
+ K as getCostEfficientModels,
2043
+ D as getEmbeddingModels,
2044
+ V as getHighContextModels,
2045
+ H as getLatestModels,
2046
+ B as getMultimodalModelsOpenAI,
2047
+ q as getOpenAIModel,
2048
+ O as getOpenAIModelByName,
2049
+ I as getVisionModelsOpenAI,
2050
+ Q as suggestModel
2051
+ };