ohlcv-ai 1.0.7 → 1.0.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -1,5 +1,5 @@
1
- var v = /* @__PURE__ */ ((o) => (o.QWEN_TURBO = "qwen-turbo", o.QWEN_PLUS = "qwen-plus", o.QWEN_MAX = "qwen-max", o.QWEN_MAX_LONGCONTEXT = "qwen-max-longcontext", o.QWEN_2_5B = "qwen2.5-0.5b", o.QWEN_2_5B_INSTRUCT = "qwen2.5-0.5b-instruct", o.QWEN_2_5B_7B = "qwen2.5-7b", o.QWEN_2_5B_7B_INSTRUCT = "qwen2.5-7b-instruct", o.QWEN_2_5B_14B = "qwen2.5-14b", o.QWEN_2_5B_14B_INSTRUCT = "qwen2.5-14b-instruct", o.QWEN_2_5B_32B = "qwen2.5-32b", o.QWEN_2_5B_32B_INSTRUCT = "qwen2.5-32b-instruct", o.QWEN_2_5B_72B = "qwen2.5-72b", o.QWEN_2_5B_72B_INSTRUCT = "qwen2.5-72b-instruct", o.QWEN_2_5B_CODER = "qwen2.5-coder", o.QWEN_2_5B_CODER_7B = "qwen2.5-coder-7b", o.QWEN_2_5B_CODER_14B = "qwen2.5-coder-14b", o.QWEN_2_5B_CODER_32B = "qwen2.5-coder-32b", o.QWEN_VL_LITE = "qwen-vl-lite", o.QWEN_VL_PLUS = "qwen-vl-plus", o.QWEN_VL_MAX = "qwen-vl-max", o.QWEN_AUDIO_TURBO = "qwen-audio-turbo", o.QWEN_AUDIO_CHAT = "qwen-audio-chat", o.QWEN_MATH_7B = "qwen-math-7b", o.LLAMA2_7B_CHAT_V2 = "llama2-7b-chat-v2", o.BAICHUAN2_7B_CHAT_V1 = "baichuan2-7b-chat-v1", o.QWEN_FINANCIAL = "qwen-financial", o.QWEN_FINANCIAL_14B = "qwen-financial-14b", o.QWEN_FINANCIAL_32B = "qwen-financial-32b", o.QWEN_MEDICAL = "qwen-medical", o.QWEN_MEDICAL_14B = "qwen-medical-14b", o.QWEN_MEDICAL_32B = "qwen-medical-32b", o.QWEN_OMNI = "qwen-omni", o.QWEN_OMNI_PRO = "qwen-omni-pro", o))(v || {});
2
- const x = /* @__PURE__ */ new Map([
1
+ var T = /* @__PURE__ */ ((t) => (t.QWEN_TURBO = "qwen-turbo", t.QWEN_PLUS = "qwen-plus", t.QWEN_MAX = "qwen-max", t.QWEN_MAX_LONGCONTEXT = "qwen-max-longcontext", t.QWEN_2_5B = "qwen2.5-0.5b", t.QWEN_2_5B_INSTRUCT = "qwen2.5-0.5b-instruct", t.QWEN_2_5B_7B = "qwen2.5-7b", t.QWEN_2_5B_7B_INSTRUCT = "qwen2.5-7b-instruct", t.QWEN_2_5B_14B = "qwen2.5-14b", t.QWEN_2_5B_14B_INSTRUCT = "qwen2.5-14b-instruct", t.QWEN_2_5B_32B = "qwen2.5-32b", t.QWEN_2_5B_32B_INSTRUCT = "qwen2.5-32b-instruct", t.QWEN_2_5B_72B = "qwen2.5-72b", t.QWEN_2_5B_72B_INSTRUCT = "qwen2.5-72b-instruct", t.QWEN_2_5B_CODER = "qwen2.5-coder", t.QWEN_2_5B_CODER_7B = "qwen2.5-coder-7b", t.QWEN_2_5B_CODER_14B = "qwen2.5-coder-14b", t.QWEN_2_5B_CODER_32B = "qwen2.5-coder-32b", t.QWEN_VL_LITE = "qwen-vl-lite", t.QWEN_VL_PLUS = "qwen-vl-plus", t.QWEN_VL_MAX = "qwen-vl-max", t.QWEN_AUDIO_TURBO = "qwen-audio-turbo", t.QWEN_AUDIO_CHAT = "qwen-audio-chat", t.QWEN_MATH_7B = "qwen-math-7b", t.LLAMA2_7B_CHAT_V2 = "llama2-7b-chat-v2", t.BAICHUAN2_7B_CHAT_V1 = "baichuan2-7b-chat-v1", t.QWEN_FINANCIAL = "qwen-financial", t.QWEN_FINANCIAL_14B = "qwen-financial-14b", t.QWEN_FINANCIAL_32B = "qwen-financial-32b", t.QWEN_MEDICAL = "qwen-medical", t.QWEN_MEDICAL_14B = "qwen-medical-14b", t.QWEN_MEDICAL_32B = "qwen-medical-32b", t.QWEN_OMNI = "qwen-omni", t.QWEN_OMNI_PRO = "qwen-omni-pro", t))(T || {});
2
+ const b = /* @__PURE__ */ new Map([
3
3
  [
4
4
  "qwen-turbo",
5
5
  {
@@ -358,6 +358,20 @@ const x = /* @__PURE__ */ new Map([
358
358
  }
359
359
  ]
360
360
  ]);
361
+ function N(t) {
362
+ return b.get(t);
363
+ }
364
+ function P() {
365
+ return Array.from(b.values());
366
+ }
367
+ function q(t) {
368
+ for (const e of b.values())
369
+ if (e.name === t)
370
+ return e;
371
+ }
372
+ function O() {
373
+ return Array.from(b.keys());
374
+ }
361
375
  class _ {
362
376
  /**
363
377
  * Constructor - Minimal configuration
@@ -366,9 +380,9 @@ class _ {
366
380
  * @param config.timeout - Timeout, default 30 seconds
367
381
  */
368
382
  constructor(e) {
369
- if (this.apiKey = e.apiKey, this.modelType = e.modelType || v.QWEN_TURBO, this.timeout = e.timeout || 3e4, !this.apiKey)
383
+ if (this.apiKey = e.apiKey, this.modelType = e.modelType || T.QWEN_TURBO, this.timeout = e.timeout || 3e4, !this.apiKey)
370
384
  throw new Error("API Key cannot be empty");
371
- if (!x.get(this.modelType))
385
+ if (!b.get(this.modelType))
372
386
  throw new Error(`Unsupported model type: ${this.modelType}`);
373
387
  }
374
388
  /**
@@ -377,12 +391,12 @@ class _ {
377
391
  * @param options - Chat options
378
392
  * @returns AI response
379
393
  */
380
- async chat(e, t) {
394
+ async chat(e, o) {
381
395
  const s = [];
382
- t?.systemPrompt && s.push({ role: "system", content: t.systemPrompt }), s.push({ role: "user", content: e });
396
+ o?.systemPrompt && s.push({ role: "system", content: o.systemPrompt }), s.push({ role: "user", content: e });
383
397
  const a = await this.chatCompletion(s, {
384
- temperature: t?.temperature,
385
- maxTokens: t?.maxTokens,
398
+ temperature: o?.temperature,
399
+ maxTokens: o?.maxTokens,
386
400
  stream: !1
387
401
  });
388
402
  return this.extractContent(a);
@@ -393,11 +407,11 @@ class _ {
393
407
  * @param options - Chat options
394
408
  * @returns Complete API response
395
409
  */
396
- async chatCompletion(e, t) {
397
- const s = t?.modelType || this.modelType, a = x.get(s);
410
+ async chatCompletion(e, o) {
411
+ const s = o?.modelType || this.modelType, a = b.get(s);
398
412
  if (!a)
399
413
  throw new Error(`Unsupported model type: ${s}`);
400
- const i = t?.temperature ?? 0.7, n = t?.maxTokens ?? 1e3, r = t?.stream ?? !1, m = a.endpoint, p = a.format === "openai" ? this.buildOpenAIRequest(a.name, e, i, n, r) : this.buildDashScopeRequest(a.name, e, i, n);
414
+ const i = o?.temperature ?? 0.7, n = o?.maxTokens ?? 1e3, r = o?.stream ?? !1, m = a.endpoint, p = a.format === "openai" ? this.buildOpenAIRequest(a.name, e, i, n, r) : this.buildDashScopeRequest(a.name, e, i, n);
401
415
  try {
402
416
  return await this.makeRequest(m, p, r);
403
417
  } catch (d) {
@@ -410,8 +424,8 @@ class _ {
410
424
  * @param callback - Streaming callback function
411
425
  * @param options - Chat options
412
426
  */
413
- async chatStream(e, t, s) {
414
- const a = s?.modelType || this.modelType, i = x.get(a);
427
+ async chatStream(e, o, s) {
428
+ const a = s?.modelType || this.modelType, i = b.get(a);
415
429
  if (!i)
416
430
  throw new Error(`Unsupported model type: ${a}`);
417
431
  if (i.format !== "openai")
@@ -424,7 +438,7 @@ class _ {
424
438
  !0
425
439
  );
426
440
  try {
427
- await this.makeStreamRequest(i.endpoint, m, t);
441
+ await this.makeStreamRequest(i.endpoint, m, o);
428
442
  } catch (c) {
429
443
  throw new Error(`Streaming request failed: ${c.message}`);
430
444
  }
@@ -434,7 +448,7 @@ class _ {
434
448
  * @param modelType - New model type
435
449
  */
436
450
  setModel(e) {
437
- if (!x.get(e))
451
+ if (!b.get(e))
438
452
  throw new Error(`Unsupported model type: ${e}`);
439
453
  this.modelType = e;
440
454
  }
@@ -442,7 +456,7 @@ class _ {
442
456
  * Get current model configuration
443
457
  */
444
458
  getCurrentModel() {
445
- const e = x.get(this.modelType);
459
+ const e = b.get(this.modelType);
446
460
  if (!e)
447
461
  throw new Error(`Model configuration does not exist: ${this.modelType}`);
448
462
  return {
@@ -471,19 +485,19 @@ class _ {
471
485
  };
472
486
  }
473
487
  }
474
- buildOpenAIRequest(e, t, s, a, i) {
488
+ buildOpenAIRequest(e, o, s, a, i) {
475
489
  return {
476
490
  model: e,
477
- messages: t,
491
+ messages: o,
478
492
  temperature: s,
479
493
  max_tokens: a,
480
494
  stream: i
481
495
  };
482
496
  }
483
- buildDashScopeRequest(e, t, s, a) {
497
+ buildDashScopeRequest(e, o, s, a) {
484
498
  return {
485
499
  model: e,
486
- input: { messages: t },
500
+ input: { messages: o },
487
501
  parameters: {
488
502
  temperature: s,
489
503
  max_tokens: a,
@@ -491,7 +505,7 @@ class _ {
491
505
  }
492
506
  };
493
507
  }
494
- async makeRequest(e, t, s) {
508
+ async makeRequest(e, o, s) {
495
509
  const a = new AbortController(), i = setTimeout(() => a.abort(), this.timeout);
496
510
  try {
497
511
  const n = await fetch(e, {
@@ -501,7 +515,7 @@ class _ {
501
515
  "Content-Type": "application/json; charset=utf-8",
502
516
  Accept: "application/json"
503
517
  },
504
- body: JSON.stringify(t),
518
+ body: JSON.stringify(o),
505
519
  signal: a.signal
506
520
  });
507
521
  if (clearTimeout(i), !n.ok) {
@@ -513,8 +527,8 @@ class _ {
513
527
  throw clearTimeout(i), n.name === "AbortError" ? new Error(`Request timeout (${this.timeout}ms)`) : n;
514
528
  }
515
529
  }
516
- async makeStreamRequest(e, t, s) {
517
- const a = await this.makeRequest(e, t, !0);
530
+ async makeStreamRequest(e, o, s) {
531
+ const a = await this.makeRequest(e, o, !0);
518
532
  if (!a)
519
533
  throw new Error("Failed to get streaming response");
520
534
  const i = a.getReader(), n = new TextDecoder("utf-8");
@@ -565,8 +579,8 @@ class _ {
565
579
  * @param options - Chat options
566
580
  * @returns Predicted OHLCV array
567
581
  */
568
- async predictingOHLCV(e, t, s, a) {
569
- const i = t || "Based on these OHLCV data, predict the next period", n = s || 1;
582
+ async predictingOHLCV(e, o, s, a) {
583
+ const i = o || "Based on these OHLCV data, predict the next period", n = s || 1;
570
584
  if (!Number.isInteger(n) || n <= 0)
571
585
  throw new Error(`Invalid count parameter: ${n}. Must be a positive integer.`);
572
586
  const r = 50;
@@ -597,12 +611,12 @@ Please process this data according to the system instructions. Remember to retur
597
611
  { role: "user", content: d }
598
612
  ];
599
613
  try {
600
- const h = n * 50 + 100, y = Math.max(a?.maxTokens || 1e3, h), E = await this.chatCompletion(l, {
614
+ const h = n * 50 + 100, x = Math.max(a?.maxTokens || 1e3, h), k = await this.chatCompletion(l, {
601
615
  temperature: a?.temperature || 0.3,
602
- maxTokens: y,
616
+ maxTokens: x,
603
617
  stream: !1,
604
618
  modelType: a?.modelType
605
- }), b = this.extractContent(E), g = this.parseOHLCVResponse(b);
619
+ }), E = this.extractContent(k), g = this.parseOHLCVResponse(E);
606
620
  if (g.length !== n)
607
621
  throw new Error(`AI returned ${g.length} OHLCV objects, but expected ${n}.`);
608
622
  return g;
@@ -616,10 +630,10 @@ Please process this data according to the system instructions. Remember to retur
616
630
  */
617
631
  parseOHLCVResponse(e) {
618
632
  try {
619
- const t = JSON.parse(e);
620
- if (!Array.isArray(t))
633
+ const o = JSON.parse(e);
634
+ if (!Array.isArray(o))
621
635
  throw new Error("Response is not in array format");
622
- return t.map((a, i) => {
636
+ return o.map((a, i) => {
623
637
  if (typeof a != "object" || a === null)
624
638
  throw new Error(`Element ${i} is not a valid object`);
625
639
  const { open: n, high: r, low: m, close: c, volume: p } = a, d = ["open", "high", "low", "close", "volume"];
@@ -638,20 +652,20 @@ Please process this data according to the system instructions. Remember to retur
638
652
  volume: Number(p)
639
653
  };
640
654
  });
641
- } catch (t) {
655
+ } catch (o) {
642
656
  const s = e.match(/\[[\s\S]*\]/);
643
657
  if (s)
644
658
  return this.parseOHLCVResponse(s[0]);
645
- throw new Error(`Unable to parse AI returned OHLCV data: ${t}
659
+ throw new Error(`Unable to parse AI returned OHLCV data: ${o}
646
660
  Original content: ${e.substring(0, 200)}...`);
647
661
  }
648
662
  }
649
663
  }
650
- function N(o, e) {
651
- return new _({ apiKey: o, modelType: e });
664
+ function $(t, e) {
665
+ return new _({ apiKey: t, modelType: e });
652
666
  }
653
- var T = /* @__PURE__ */ ((o) => (o.DEEPSEEK_CHAT = "deepseek-chat", o.DEEPSEEK_CHAT_LITE = "deepseek-chat-lite", o.DEEPSEEK_CHAT_PRO = "deepseek-chat-pro", o.DEEPSEEK_CHAT_MAX = "deepseek-chat-max", o.DEEPSEEK_CODER = "deepseek-coder", o.DEEPSEEK_CODER_LITE = "deepseek-coder-lite", o.DEEPSEEK_CODER_PRO = "deepseek-coder-pro", o.DEEPSEEK_MATH = "deepseek-math", o.DEEPSEEK_MATH_PRO = "deepseek-math-pro", o.DEEPSEEK_REASONER = "deepseek-reasoner", o.DEEPSEEK_REASONER_PRO = "deepseek-reasoner-pro", o.DEEPSEEK_VISION = "deepseek-vision", o.DEEPSEEK_VISION_PRO = "deepseek-vision-pro", o.DEEPSEEK_FINANCE = "deepseek-finance", o.DEEPSEEK_LAW = "deepseek-law", o.DEEPSEEK_MEDICAL = "deepseek-medical", o.DEEPSEEK_RESEARCH = "deepseek-research", o.DEEPSEEK_OMNI = "deepseek-omni", o.DEEPSEEK_OMNI_PRO = "deepseek-omni-pro", o.DEEPSEEK_LLM = "deepseek-llm", o.DEEPSEEK_LLM_67B = "deepseek-llm-67b", o.DEEPSEEK_LLM_131B = "deepseek-llm-131b", o))(T || {});
654
- const k = /* @__PURE__ */ new Map([
667
+ var v = /* @__PURE__ */ ((t) => (t.DEEPSEEK_CHAT = "deepseek-chat", t.DEEPSEEK_CHAT_LITE = "deepseek-chat-lite", t.DEEPSEEK_CHAT_PRO = "deepseek-chat-pro", t.DEEPSEEK_CHAT_MAX = "deepseek-chat-max", t.DEEPSEEK_CODER = "deepseek-coder", t.DEEPSEEK_CODER_LITE = "deepseek-coder-lite", t.DEEPSEEK_CODER_PRO = "deepseek-coder-pro", t.DEEPSEEK_MATH = "deepseek-math", t.DEEPSEEK_MATH_PRO = "deepseek-math-pro", t.DEEPSEEK_REASONER = "deepseek-reasoner", t.DEEPSEEK_REASONER_PRO = "deepseek-reasoner-pro", t.DEEPSEEK_VISION = "deepseek-vision", t.DEEPSEEK_VISION_PRO = "deepseek-vision-pro", t.DEEPSEEK_FINANCE = "deepseek-finance", t.DEEPSEEK_LAW = "deepseek-law", t.DEEPSEEK_MEDICAL = "deepseek-medical", t.DEEPSEEK_RESEARCH = "deepseek-research", t.DEEPSEEK_OMNI = "deepseek-omni", t.DEEPSEEK_OMNI_PRO = "deepseek-omni-pro", t.DEEPSEEK_LLM = "deepseek-llm", t.DEEPSEEK_LLM_67B = "deepseek-llm-67b", t.DEEPSEEK_LLM_131B = "deepseek-llm-131b", t))(v || {});
668
+ const y = /* @__PURE__ */ new Map([
655
669
  // Chat models
656
670
  [
657
671
  "deepseek-chat",
@@ -944,6 +958,20 @@ const k = /* @__PURE__ */ new Map([
944
958
  }
945
959
  ]
946
960
  ]);
961
+ function R(t) {
962
+ return y.get(t);
963
+ }
964
+ function S() {
965
+ return Array.from(y.values());
966
+ }
967
+ function A(t) {
968
+ for (const e of y.values())
969
+ if (e.name === t)
970
+ return e;
971
+ }
972
+ function D() {
973
+ return Array.from(y.keys());
974
+ }
947
975
  class C {
948
976
  /**
949
977
  * Constructor - Minimal configuration
@@ -953,9 +981,9 @@ class C {
953
981
  * @param config.baseURL - Base URL for API, default official endpoint
954
982
  */
955
983
  constructor(e) {
956
- if (this.apiKey = e.apiKey, this.modelType = e.modelType || T.DEEPSEEK_CHAT, this.timeout = e.timeout || 3e4, this.baseURL = e.baseURL || "https://api.deepseek.com", !this.apiKey)
984
+ if (this.apiKey = e.apiKey, this.modelType = e.modelType || v.DEEPSEEK_CHAT, this.timeout = e.timeout || 3e4, this.baseURL = e.baseURL || "https://api.deepseek.com", !this.apiKey)
957
985
  throw new Error("API Key cannot be empty");
958
- if (!k.get(this.modelType))
986
+ if (!y.get(this.modelType))
959
987
  throw new Error(`Unsupported model type: ${this.modelType}`);
960
988
  }
961
989
  /**
@@ -964,20 +992,20 @@ class C {
964
992
  * @param options - Chat options
965
993
  * @returns AI response
966
994
  */
967
- async chat(e, t) {
995
+ async chat(e, o) {
968
996
  const s = [];
969
- t?.systemPrompt && s.push({ role: "system", content: t.systemPrompt }), s.push({ role: "user", content: e });
997
+ o?.systemPrompt && s.push({ role: "system", content: o.systemPrompt }), s.push({ role: "user", content: e });
970
998
  const a = await this.chatCompletion(s, {
971
- temperature: t?.temperature,
972
- maxTokens: t?.maxTokens,
999
+ temperature: o?.temperature,
1000
+ maxTokens: o?.maxTokens,
973
1001
  stream: !1,
974
- modelType: t?.modelType,
975
- topP: t?.topP,
976
- frequencyPenalty: t?.frequencyPenalty,
977
- presencePenalty: t?.presencePenalty,
978
- stop: t?.stop,
979
- tools: t?.tools,
980
- toolChoice: t?.toolChoice
1002
+ modelType: o?.modelType,
1003
+ topP: o?.topP,
1004
+ frequencyPenalty: o?.frequencyPenalty,
1005
+ presencePenalty: o?.presencePenalty,
1006
+ stop: o?.stop,
1007
+ tools: o?.tools,
1008
+ toolChoice: o?.toolChoice
981
1009
  });
982
1010
  return this.extractContent(a);
983
1011
  }
@@ -987,11 +1015,11 @@ class C {
987
1015
  * @param options - Chat options
988
1016
  * @returns Complete API response
989
1017
  */
990
- async chatCompletion(e, t) {
991
- const s = t?.modelType || this.modelType, a = k.get(s);
1018
+ async chatCompletion(e, o) {
1019
+ const s = o?.modelType || this.modelType, a = y.get(s);
992
1020
  if (!a)
993
1021
  throw new Error(`Unsupported model type: ${s}`);
994
- const i = t?.temperature ?? 0.7, n = t?.maxTokens ?? 2e3, r = t?.stream ?? !1, m = t?.topP ?? 1, c = t?.frequencyPenalty ?? 0, p = t?.presencePenalty ?? 0, d = t?.stop, l = t?.tools, h = t?.toolChoice, y = a.endpoint, E = this.buildOpenAIRequest(
1022
+ const i = o?.temperature ?? 0.7, n = o?.maxTokens ?? 2e3, r = o?.stream ?? !1, m = o?.topP ?? 1, c = o?.frequencyPenalty ?? 0, p = o?.presencePenalty ?? 0, d = o?.stop, l = o?.tools, h = o?.toolChoice, x = a.endpoint, k = this.buildOpenAIRequest(
995
1023
  a.name,
996
1024
  e,
997
1025
  i,
@@ -1005,9 +1033,9 @@ class C {
1005
1033
  h
1006
1034
  );
1007
1035
  try {
1008
- return await this.makeRequest(y, E, r);
1009
- } catch (b) {
1010
- throw new Error(`DeepSeek AI request failed: ${b.message}`);
1036
+ return await this.makeRequest(x, k, r);
1037
+ } catch (E) {
1038
+ throw new Error(`DeepSeek AI request failed: ${E.message}`);
1011
1039
  }
1012
1040
  }
1013
1041
  /**
@@ -1016,8 +1044,8 @@ class C {
1016
1044
  * @param callback - Streaming callback function
1017
1045
  * @param options - Chat options
1018
1046
  */
1019
- async chatStream(e, t, s) {
1020
- const a = s?.modelType || this.modelType, i = k.get(a);
1047
+ async chatStream(e, o, s) {
1048
+ const a = s?.modelType || this.modelType, i = y.get(a);
1021
1049
  if (!i)
1022
1050
  throw new Error(`Unsupported model type: ${a}`);
1023
1051
  const n = s?.temperature ?? 0.7, r = s?.maxTokens ?? 2e3, m = s?.topP ?? 1, c = s?.frequencyPenalty ?? 0, p = s?.presencePenalty ?? 0, d = this.buildOpenAIRequest(
@@ -1034,7 +1062,7 @@ class C {
1034
1062
  s?.toolChoice
1035
1063
  );
1036
1064
  try {
1037
- await this.makeStreamRequest(i.endpoint, d, t);
1065
+ await this.makeStreamRequest(i.endpoint, d, o);
1038
1066
  } catch (l) {
1039
1067
  throw new Error(`Streaming request failed: ${l.message}`);
1040
1068
  }
@@ -1047,8 +1075,8 @@ class C {
1047
1075
  * @param options - Chat options
1048
1076
  * @returns Predicted OHLCV array
1049
1077
  */
1050
- async predictingOHLCV(e, t, s, a) {
1051
- const i = t || "Based on these OHLCV data, predict the next period", n = s || 1;
1078
+ async predictingOHLCV(e, o, s, a) {
1079
+ const i = o || "Based on these OHLCV data, predict the next period", n = s || 1;
1052
1080
  if (!Number.isInteger(n) || n <= 0)
1053
1081
  throw new Error(`Invalid count parameter: ${n}. Must be a positive integer.`);
1054
1082
  const r = 50;
@@ -1080,15 +1108,15 @@ Please process this data according to the system instructions. Remember to retur
1080
1108
  { role: "user", content: d }
1081
1109
  ];
1082
1110
  try {
1083
- const h = n * 50 + 100, y = Math.max(a?.maxTokens || 2e3, h), E = await this.chatCompletion(l, {
1111
+ const h = n * 50 + 100, x = Math.max(a?.maxTokens || 2e3, h), k = await this.chatCompletion(l, {
1084
1112
  temperature: a?.temperature || 0.3,
1085
- maxTokens: y,
1113
+ maxTokens: x,
1086
1114
  stream: !1,
1087
- modelType: a?.modelType || T.DEEPSEEK_FINANCE,
1115
+ modelType: a?.modelType || v.DEEPSEEK_FINANCE,
1088
1116
  topP: a?.topP,
1089
1117
  frequencyPenalty: a?.frequencyPenalty,
1090
1118
  presencePenalty: a?.presencePenalty
1091
- }), b = this.extractContent(E), g = this.parseOHLCVResponse(b);
1119
+ }), E = this.extractContent(k), g = this.parseOHLCVResponse(E);
1092
1120
  if (g.length !== n)
1093
1121
  throw new Error(`AI returned ${g.length} OHLCV objects, but expected ${n}.`);
1094
1122
  return g;
@@ -1101,7 +1129,7 @@ Please process this data according to the system instructions. Remember to retur
1101
1129
  * @param modelType - New model type
1102
1130
  */
1103
1131
  setModel(e) {
1104
- if (!k.get(e))
1132
+ if (!y.get(e))
1105
1133
  throw new Error(`Unsupported model type: ${e}`);
1106
1134
  this.modelType = e;
1107
1135
  }
@@ -1109,7 +1137,7 @@ Please process this data according to the system instructions. Remember to retur
1109
1137
  * Get current model configuration
1110
1138
  */
1111
1139
  getCurrentModel() {
1112
- const e = k.get(this.modelType);
1140
+ const e = y.get(this.modelType);
1113
1141
  if (!e)
1114
1142
  throw new Error(`Model configuration does not exist: ${this.modelType}`);
1115
1143
  return {
@@ -1138,17 +1166,17 @@ Please process this data according to the system instructions. Remember to retur
1138
1166
  };
1139
1167
  }
1140
1168
  }
1141
- buildOpenAIRequest(e, t, s, a, i, n, r, m, c, p, d) {
1169
+ buildOpenAIRequest(e, o, s, a, i, n, r, m, c, p, d) {
1142
1170
  const l = {
1143
1171
  model: e,
1144
- messages: t,
1172
+ messages: o,
1145
1173
  temperature: s,
1146
1174
  max_tokens: a,
1147
1175
  stream: i
1148
1176
  };
1149
1177
  return n !== void 0 && (l.top_p = n), r !== void 0 && (l.frequency_penalty = r), m !== void 0 && (l.presence_penalty = m), c && (l.stop = c), p && (l.tools = p), d && (l.tool_choice = d), l;
1150
1178
  }
1151
- async makeRequest(e, t, s) {
1179
+ async makeRequest(e, o, s) {
1152
1180
  const a = new AbortController(), i = setTimeout(() => a.abort(), this.timeout);
1153
1181
  try {
1154
1182
  const n = await fetch(e, {
@@ -1158,7 +1186,7 @@ Please process this data according to the system instructions. Remember to retur
1158
1186
  "Content-Type": "application/json; charset=utf-8",
1159
1187
  Accept: "application/json"
1160
1188
  },
1161
- body: JSON.stringify(t),
1189
+ body: JSON.stringify(o),
1162
1190
  signal: a.signal
1163
1191
  });
1164
1192
  if (clearTimeout(i), !n.ok) {
@@ -1170,8 +1198,8 @@ Please process this data according to the system instructions. Remember to retur
1170
1198
  throw clearTimeout(i), n.name === "AbortError" ? new Error(`Request timeout (${this.timeout}ms)`) : n;
1171
1199
  }
1172
1200
  }
1173
- async makeStreamRequest(e, t, s) {
1174
- const a = await this.makeRequest(e, t, !0);
1201
+ async makeStreamRequest(e, o, s) {
1202
+ const a = await this.makeRequest(e, o, !0);
1175
1203
  if (!a)
1176
1204
  throw new Error("Failed to get streaming response");
1177
1205
  const i = a.getReader(), n = new TextDecoder("utf-8");
@@ -1218,10 +1246,10 @@ Please process this data according to the system instructions. Remember to retur
1218
1246
  }
1219
1247
  parseOHLCVResponse(e) {
1220
1248
  try {
1221
- const t = JSON.parse(e);
1222
- if (!Array.isArray(t))
1249
+ const o = JSON.parse(e);
1250
+ if (!Array.isArray(o))
1223
1251
  throw new Error("Response is not in array format");
1224
- return t.map((a, i) => {
1252
+ return o.map((a, i) => {
1225
1253
  if (typeof a != "object" || a === null)
1226
1254
  throw new Error(`Element ${i} is not a valid object`);
1227
1255
  const { open: n, high: r, low: m, close: c, volume: p } = a, d = ["open", "high", "low", "close", "volume"];
@@ -1240,19 +1268,19 @@ Please process this data according to the system instructions. Remember to retur
1240
1268
  volume: Number(p)
1241
1269
  };
1242
1270
  });
1243
- } catch (t) {
1271
+ } catch (o) {
1244
1272
  const s = e.match(/\[[\s\S]*\]/);
1245
1273
  if (s)
1246
1274
  return this.parseOHLCVResponse(s[0]);
1247
- throw new Error(`Unable to parse AI returned OHLCV data: ${t}
1275
+ throw new Error(`Unable to parse AI returned OHLCV data: ${o}
1248
1276
  Original content: ${e.substring(0, 200)}...`);
1249
1277
  }
1250
1278
  }
1251
1279
  }
1252
- function P(o, e) {
1253
- return new C({ apiKey: o, modelType: e });
1280
+ function B(t, e) {
1281
+ return new C({ apiKey: t, modelType: e });
1254
1282
  }
1255
- var f = /* @__PURE__ */ ((o) => (o.GPT4 = "gpt-4", o.GPT4_0314 = "gpt-4-0314", o.GPT4_0613 = "gpt-4-0613", o.GPT4_32K = "gpt-4-32k", o.GPT4_32K_0314 = "gpt-4-32k-0314", o.GPT4_32K_0613 = "gpt-4-32k-0613", o.GPT4_TURBO = "gpt-4-turbo", o.GPT4_TURBO_PREVIEW = "gpt-4-turbo-preview", o.GPT4_TURBO_2024_04_09 = "gpt-4-turbo-2024-04-09", o.GPT4_OMNI = "gpt-4o", o.GPT4_OMNI_2024_05_13 = "gpt-4o-2024-05-13", o.GPT4_OMNI_MINI = "gpt-4o-mini", o.GPT4_OMNI_MINI_2024_07_18 = "gpt-4o-mini-2024-07-18", o.GPT3_5_TURBO = "gpt-3.5-turbo", o.GPT3_5_TURBO_0125 = "gpt-3.5-turbo-0125", o.GPT3_5_TURBO_1106 = "gpt-3.5-turbo-1106", o.GPT3_5_TURBO_INSTRUCT = "gpt-3.5-turbo-instruct", o.GPT3_5_TURBO_16K = "gpt-3.5-turbo-16k", o.GPT3_5_TURBO_16K_0613 = "gpt-3.5-turbo-16k-0613", o.DAVINCI_002 = "davinci-002", o.BABBAGE_002 = "babbage-002", o.TEXT_DAVINCI_003 = "text-davinci-003", o.TEXT_DAVINCI_002 = "text-davinci-002", o.TEXT_DAVINCI_001 = "text-davinci-001", o.TEXT_CURIE_001 = "text-curie-001", o.TEXT_BABBAGE_001 = "text-babbage-001", o.TEXT_ADA_001 = "text-ada-001", o.TEXT_EMBEDDING_ADA_002 = "text-embedding-ada-002", o.TEXT_EMBEDDING_3_SMALL = "text-embedding-3-small", o.TEXT_EMBEDDING_3_LARGE = "text-embedding-3-large", o.DALL_E_2 = "dall-e-2", o.DALL_E_3 = "dall-e-3", o.WHISPER_1 = "whisper-1", o.TTS_1 = "tts-1", o.TTS_1_HD = "tts-1-hd", o.MODERATION_LATEST = "text-moderation-latest", o.MODERATION_STABLE = "text-moderation-stable", o.GPT3_5_TURBO_FINETUNED = "ft:gpt-3.5-turbo-0125:personal:", o.GPT4_FINETUNED = "ft:gpt-4-0125-preview:personal:", o.GPT4_VISION_PREVIEW = "gpt-4-vision-preview", o))(f || {});
1283
+ var f = /* @__PURE__ */ ((t) => (t.GPT4 = "gpt-4", t.GPT4_0314 = "gpt-4-0314", t.GPT4_0613 = "gpt-4-0613", t.GPT4_32K = "gpt-4-32k", t.GPT4_32K_0314 = "gpt-4-32k-0314", t.GPT4_32K_0613 = "gpt-4-32k-0613", t.GPT4_TURBO = "gpt-4-turbo", t.GPT4_TURBO_PREVIEW = "gpt-4-turbo-preview", t.GPT4_TURBO_2024_04_09 = "gpt-4-turbo-2024-04-09", t.GPT4_OMNI = "gpt-4o", t.GPT4_OMNI_2024_05_13 = "gpt-4o-2024-05-13", t.GPT4_OMNI_MINI = "gpt-4o-mini", t.GPT4_OMNI_MINI_2024_07_18 = "gpt-4o-mini-2024-07-18", t.GPT3_5_TURBO = "gpt-3.5-turbo", t.GPT3_5_TURBO_0125 = "gpt-3.5-turbo-0125", t.GPT3_5_TURBO_1106 = "gpt-3.5-turbo-1106", t.GPT3_5_TURBO_INSTRUCT = "gpt-3.5-turbo-instruct", t.GPT3_5_TURBO_16K = "gpt-3.5-turbo-16k", t.GPT3_5_TURBO_16K_0613 = "gpt-3.5-turbo-16k-0613", t.DAVINCI_002 = "davinci-002", t.BABBAGE_002 = "babbage-002", t.TEXT_DAVINCI_003 = "text-davinci-003", t.TEXT_DAVINCI_002 = "text-davinci-002", t.TEXT_DAVINCI_001 = "text-davinci-001", t.TEXT_CURIE_001 = "text-curie-001", t.TEXT_BABBAGE_001 = "text-babbage-001", t.TEXT_ADA_001 = "text-ada-001", t.TEXT_EMBEDDING_ADA_002 = "text-embedding-ada-002", t.TEXT_EMBEDDING_3_SMALL = "text-embedding-3-small", t.TEXT_EMBEDDING_3_LARGE = "text-embedding-3-large", t.DALL_E_2 = "dall-e-2", t.DALL_E_3 = "dall-e-3", t.WHISPER_1 = "whisper-1", t.TTS_1 = "tts-1", t.TTS_1_HD = "tts-1-hd", t.MODERATION_LATEST = "text-moderation-latest", t.MODERATION_STABLE = "text-moderation-stable", t.GPT3_5_TURBO_FINETUNED = "ft:gpt-3.5-turbo-0125:personal:", t.GPT4_FINETUNED = "ft:gpt-4-0125-preview:personal:", t.GPT4_VISION_PREVIEW = "gpt-4-vision-preview", t))(f || {});
1256
1284
  const u = /* @__PURE__ */ new Map([
1257
1285
  // GPT-4 Series
1258
1286
  [
@@ -1441,52 +1469,52 @@ const u = /* @__PURE__ */ new Map([
1441
1469
  }
1442
1470
  ]
1443
1471
  ]);
1444
- function q(o) {
1445
- return u.get(o);
1472
+ function I(t) {
1473
+ return u.get(t);
1446
1474
  }
1447
1475
  function w() {
1448
1476
  return Array.from(u.values());
1449
1477
  }
1450
- function O(o) {
1478
+ function H(t) {
1451
1479
  for (const e of u.values())
1452
- if (e.name === o)
1480
+ if (e.name === t)
1453
1481
  return e;
1454
1482
  }
1455
- function $() {
1483
+ function K() {
1456
1484
  return Array.from(u.keys());
1457
1485
  }
1458
- function R() {
1486
+ function V() {
1459
1487
  return w().filter(
1460
- (o) => o.capabilities.includes("chat")
1488
+ (t) => t.capabilities.includes("chat")
1461
1489
  );
1462
1490
  }
1463
- function S() {
1491
+ function U() {
1464
1492
  return w().filter(
1465
- (o) => o.capabilities.includes("text-completion")
1493
+ (t) => t.capabilities.includes("text-completion")
1466
1494
  );
1467
1495
  }
1468
- function D() {
1496
+ function Q() {
1469
1497
  return w().filter(
1470
- (o) => o.capabilities.includes("embeddings")
1498
+ (t) => t.capabilities.includes("embeddings")
1471
1499
  );
1472
1500
  }
1473
- function I() {
1501
+ function W() {
1474
1502
  return w().filter(
1475
- (o) => o.capabilities.includes("vision") || o.capabilities.includes("image-generation")
1503
+ (t) => t.capabilities.includes("vision") || t.capabilities.includes("image-generation")
1476
1504
  );
1477
1505
  }
1478
- function A() {
1506
+ function j() {
1479
1507
  return w().filter(
1480
- (o) => o.capabilities.includes("audio-processing") || o.capabilities.includes("speech-recognition") || o.capabilities.includes("speech-synthesis")
1508
+ (t) => t.capabilities.includes("audio-processing") || t.capabilities.includes("speech-recognition") || t.capabilities.includes("speech-synthesis")
1481
1509
  );
1482
1510
  }
1483
- function B() {
1511
+ function F() {
1484
1512
  return w().filter(
1485
- (o) => o.capabilities.includes("multimodal")
1513
+ (t) => t.capabilities.includes("multimodal")
1486
1514
  );
1487
1515
  }
1488
- function H() {
1489
- const o = [
1516
+ function G() {
1517
+ const t = [
1490
1518
  "gpt-4o",
1491
1519
  "gpt-4o-mini",
1492
1520
  "gpt-4-turbo",
@@ -1496,56 +1524,56 @@ function H() {
1496
1524
  /* DALL_E_3 */
1497
1525
  ];
1498
1526
  return w().filter(
1499
- (e) => o.includes(e.name)
1527
+ (e) => t.includes(e.name)
1500
1528
  );
1501
1529
  }
1502
- function K() {
1503
- return w().filter((o) => o.inputCostPer1KTokens && o.inputCostPer1KTokens < 1e-3).sort((o, e) => (o.inputCostPer1KTokens || 0) - (e.inputCostPer1KTokens || 0));
1530
+ function X() {
1531
+ return w().filter((t) => t.inputCostPer1KTokens && t.inputCostPer1KTokens < 1e-3).sort((t, e) => (t.inputCostPer1KTokens || 0) - (e.inputCostPer1KTokens || 0));
1504
1532
  }
1505
- function V() {
1506
- return w().filter((o) => o.contextLength && o.contextLength >= 128e3).sort((o, e) => (e.contextLength || 0) - (o.contextLength || 0));
1533
+ function z() {
1534
+ return w().filter((t) => t.contextLength && t.contextLength >= 128e3).sort((t, e) => (e.contextLength || 0) - (t.contextLength || 0));
1507
1535
  }
1508
- function U(o, e, t = 0) {
1509
- const s = (o.inputCostPer1KTokens || 0) / 1e3 * e, a = (o.outputCostPer1KTokens || 0) / 1e3 * t;
1536
+ function J(t, e, o = 0) {
1537
+ const s = (t.inputCostPer1KTokens || 0) / 1e3 * e, a = (t.outputCostPer1KTokens || 0) / 1e3 * o;
1510
1538
  return {
1511
1539
  inputTokens: e,
1512
- outputTokens: t,
1540
+ outputTokens: o,
1513
1541
  inputCost: s,
1514
1542
  outputCost: a,
1515
1543
  totalCost: s + a
1516
1544
  };
1517
1545
  }
1518
- function Q(o) {
1546
+ function M(t) {
1519
1547
  let e = w();
1520
- switch (o.taskType) {
1548
+ switch (t.taskType) {
1521
1549
  case "chat":
1522
- e = e.filter((t) => t.capabilities.includes("chat"));
1550
+ e = e.filter((o) => o.capabilities.includes("chat"));
1523
1551
  break;
1524
1552
  case "completion":
1525
- e = e.filter((t) => t.capabilities.includes("text-completion"));
1553
+ e = e.filter((o) => o.capabilities.includes("text-completion"));
1526
1554
  break;
1527
1555
  case "embedding":
1528
- e = e.filter((t) => t.capabilities.includes("embeddings"));
1556
+ e = e.filter((o) => o.capabilities.includes("embeddings"));
1529
1557
  break;
1530
1558
  case "image":
1531
1559
  e = e.filter(
1532
- (t) => t.capabilities.includes("image-generation") || t.capabilities.includes("vision")
1560
+ (o) => o.capabilities.includes("image-generation") || o.capabilities.includes("vision")
1533
1561
  );
1534
1562
  break;
1535
1563
  case "audio":
1536
1564
  e = e.filter(
1537
- (t) => t.capabilities.includes("speech-recognition") || t.capabilities.includes("speech-synthesis")
1565
+ (o) => o.capabilities.includes("speech-recognition") || o.capabilities.includes("speech-synthesis")
1538
1566
  );
1539
1567
  break;
1540
1568
  }
1541
- return o.contextLength && (e = e.filter(
1542
- (t) => t.contextLength && t.contextLength >= o.contextLength
1543
- )), o.features && o.features.length > 0 && (e = e.filter(
1544
- (t) => o.features.every(
1545
- (s) => t.supportedFeatures?.includes(s) || t.capabilities.includes(s)
1569
+ return t.contextLength && (e = e.filter(
1570
+ (o) => o.contextLength && o.contextLength >= t.contextLength
1571
+ )), t.features && t.features.length > 0 && (e = e.filter(
1572
+ (o) => t.features.every(
1573
+ (s) => o.supportedFeatures?.includes(s) || o.capabilities.includes(s)
1546
1574
  )
1547
- )), o.budget && e.sort(
1548
- (t, s) => (t.inputCostPer1KTokens || 0) - (s.inputCostPer1KTokens || 0)
1575
+ )), t.budget && e.sort(
1576
+ (o, s) => (o.inputCostPer1KTokens || 0) - (s.inputCostPer1KTokens || 0)
1549
1577
  ), e.slice(0, 5);
1550
1578
  }
1551
1579
  class L {
@@ -1569,17 +1597,17 @@ class L {
1569
1597
  * @param options - Chat options
1570
1598
  * @returns AI response
1571
1599
  */
1572
- async chat(e, t) {
1600
+ async chat(e, o) {
1573
1601
  const s = [];
1574
- t?.systemPrompt && s.push({ role: "system", content: t.systemPrompt }), s.push({ role: "user", content: e });
1602
+ o?.systemPrompt && s.push({ role: "system", content: o.systemPrompt }), s.push({ role: "user", content: e });
1575
1603
  const a = await this.chatCompletion(s, {
1576
- temperature: t?.temperature,
1577
- maxTokens: t?.maxTokens,
1604
+ temperature: o?.temperature,
1605
+ maxTokens: o?.maxTokens,
1578
1606
  stream: !1,
1579
- topP: t?.topP,
1580
- frequencyPenalty: t?.frequencyPenalty,
1581
- presencePenalty: t?.presencePenalty,
1582
- stop: t?.stop
1607
+ topP: o?.topP,
1608
+ frequencyPenalty: o?.frequencyPenalty,
1609
+ presencePenalty: o?.presencePenalty,
1610
+ stop: o?.stop
1583
1611
  });
1584
1612
  return this.extractContent(a);
1585
1613
  }
@@ -1589,17 +1617,17 @@ class L {
1589
1617
  * @param options - Chat options
1590
1618
  * @returns Complete API response
1591
1619
  */
1592
- async chatCompletion(e, t) {
1593
- const s = t?.modelType || this.modelType, a = u.get(s);
1620
+ async chatCompletion(e, o) {
1621
+ const s = o?.modelType || this.modelType, a = u.get(s);
1594
1622
  if (!a)
1595
1623
  throw new Error(`Unsupported model type: ${s}`);
1596
- const i = t?.temperature ?? 0.7, n = t?.maxTokens ?? 1e3, r = t?.stream ?? !1, m = a.endpoint, c = this.buildOpenAIRequest(
1624
+ const i = o?.temperature ?? 0.7, n = o?.maxTokens ?? 1e3, r = o?.stream ?? !1, m = a.endpoint, c = this.buildOpenAIRequest(
1597
1625
  a.name,
1598
1626
  e,
1599
1627
  i,
1600
1628
  n,
1601
1629
  r,
1602
- t
1630
+ o
1603
1631
  );
1604
1632
  try {
1605
1633
  return await this.makeRequest(m, c, r);
@@ -1613,7 +1641,7 @@ class L {
1613
1641
  * @param callback - Streaming callback function
1614
1642
  * @param options - Chat options
1615
1643
  */
1616
- async chatStream(e, t, s) {
1644
+ async chatStream(e, o, s) {
1617
1645
  const a = s?.modelType || this.modelType, i = u.get(a);
1618
1646
  if (!i)
1619
1647
  throw new Error(`Unsupported model type: ${a}`);
@@ -1626,7 +1654,7 @@ class L {
1626
1654
  s
1627
1655
  );
1628
1656
  try {
1629
- await this.makeStreamRequest(i.endpoint, m, t);
1657
+ await this.makeStreamRequest(i.endpoint, m, o);
1630
1658
  } catch (c) {
1631
1659
  throw new Error(`Streaming request failed: ${c.message}`);
1632
1660
  }
@@ -1637,8 +1665,8 @@ class L {
1637
1665
  * @param options - Image generation options
1638
1666
  * @returns Generated image URLs
1639
1667
  */
1640
- async generateImage(e, t) {
1641
- const s = t?.modelType || f.DALL_E_3;
1668
+ async generateImage(e, o) {
1669
+ const s = o?.modelType || f.DALL_E_3;
1642
1670
  if (s !== f.DALL_E_2 && s !== f.DALL_E_3)
1643
1671
  throw new Error("Image generation only supports DALL-E models");
1644
1672
  const a = u.get(s);
@@ -1647,17 +1675,17 @@ class L {
1647
1675
  const i = {
1648
1676
  model: a.name,
1649
1677
  prompt: e,
1650
- n: t?.n || 1,
1651
- size: t?.size || "1024x1024",
1652
- quality: t?.quality || "standard",
1653
- style: t?.style || "vivid",
1654
- response_format: t?.responseFormat || "url"
1678
+ n: o?.n || 1,
1679
+ size: o?.size || "1024x1024",
1680
+ quality: o?.quality || "standard",
1681
+ style: o?.style || "vivid",
1682
+ response_format: o?.responseFormat || "url"
1655
1683
  };
1656
1684
  try {
1657
1685
  const n = await this.makeRequest(a.endpoint, i, !1);
1658
1686
  if (n.data && Array.isArray(n.data))
1659
1687
  return n.data.map(
1660
- (r) => t?.responseFormat === "b64_json" ? r.b64_json : r.url
1688
+ (r) => o?.responseFormat === "b64_json" ? r.b64_json : r.url
1661
1689
  );
1662
1690
  throw new Error("Invalid response format from image generation");
1663
1691
  } catch (n) {
@@ -1670,15 +1698,15 @@ class L {
1670
1698
  * @param options - Embedding options
1671
1699
  * @returns Embedding vectors
1672
1700
  */
1673
- async createEmbeddings(e, t) {
1674
- const s = t?.modelType || f.TEXT_EMBEDDING_ADA_002, a = u.get(s);
1701
+ async createEmbeddings(e, o) {
1702
+ const s = o?.modelType || f.TEXT_EMBEDDING_ADA_002, a = u.get(s);
1675
1703
  if (!a)
1676
1704
  throw new Error(`Unsupported model type: ${s}`);
1677
1705
  const i = {
1678
1706
  model: a.name,
1679
1707
  input: e
1680
1708
  };
1681
- t?.dimensions && a.name === f.TEXT_EMBEDDING_3_SMALL && (i.dimensions = t.dimensions);
1709
+ o?.dimensions && a.name === f.TEXT_EMBEDDING_3_SMALL && (i.dimensions = o.dimensions);
1682
1710
  try {
1683
1711
  const n = await this.makeRequest(a.endpoint, i, !1);
1684
1712
  if (n.data && Array.isArray(n.data))
@@ -1694,14 +1722,14 @@ class L {
1694
1722
  * @param options - Transcription options
1695
1723
  * @returns Transcribed text
1696
1724
  */
1697
- async transcribeAudio(e, t) {
1698
- const s = t?.modelType || f.WHISPER_1, a = u.get(s);
1725
+ async transcribeAudio(e, o) {
1726
+ const s = o?.modelType || f.WHISPER_1, a = u.get(s);
1699
1727
  if (!a)
1700
1728
  throw new Error(`Unsupported model type: ${s}`);
1701
1729
  const i = new FormData();
1702
1730
  if (typeof e == "string")
1703
1731
  throw new Error("File path/Base64 support requires additional implementation");
1704
- i.append("file", e), i.append("model", a.name), t?.language && i.append("language", t.language), t?.prompt && i.append("prompt", t.prompt), t?.responseFormat && i.append("response_format", t.responseFormat), t?.temperature !== void 0 && i.append("temperature", t.temperature.toString());
1732
+ i.append("file", e), i.append("model", a.name), o?.language && i.append("language", o.language), o?.prompt && i.append("prompt", o.prompt), o?.responseFormat && i.append("response_format", o.responseFormat), o?.temperature !== void 0 && i.append("temperature", o.temperature.toString());
1705
1733
  try {
1706
1734
  const n = await this.makeFormDataRequest(a.endpoint, i, !1);
1707
1735
  return n.text || n.transcription || "";
@@ -1715,16 +1743,16 @@ class L {
1715
1743
  * @param options - TTS options
1716
1744
  * @returns Audio data (base64 or blob)
1717
1745
  */
1718
- async textToSpeech(e, t) {
1719
- const s = t?.modelType || f.TTS_1_HD, a = u.get(s);
1746
+ async textToSpeech(e, o) {
1747
+ const s = o?.modelType || f.TTS_1_HD, a = u.get(s);
1720
1748
  if (!a)
1721
1749
  throw new Error(`Unsupported model type: ${s}`);
1722
1750
  const i = {
1723
1751
  model: a.name,
1724
1752
  input: e,
1725
- voice: t?.voice || "alloy",
1726
- response_format: t?.responseFormat || "mp3",
1727
- speed: t?.speed || 1
1753
+ voice: o?.voice || "alloy",
1754
+ response_format: o?.responseFormat || "mp3",
1755
+ speed: o?.speed || 1
1728
1756
  };
1729
1757
  try {
1730
1758
  return await this.makeRequest(a.endpoint, i, !1, !0);
@@ -1738,8 +1766,8 @@ class L {
1738
1766
  * @param options - Moderation options
1739
1767
  * @returns Moderation results
1740
1768
  */
1741
- async moderateContent(e, t) {
1742
- const s = t?.modelType || f.MODERATION_LATEST, a = u.get(s);
1769
+ async moderateContent(e, o) {
1770
+ const s = o?.modelType || f.MODERATION_LATEST, a = u.get(s);
1743
1771
  if (!a)
1744
1772
  throw new Error(`Unsupported model type: ${s}`);
1745
1773
  const i = {
@@ -1801,28 +1829,28 @@ class L {
1801
1829
  * @param modelType - Model type (optional, uses current if not provided)
1802
1830
  * @returns Cost estimate
1803
1831
  */
1804
- estimateCost(e, t = 0, s) {
1832
+ estimateCost(e, o = 0, s) {
1805
1833
  const a = s || this.modelType, i = u.get(a);
1806
1834
  if (!i)
1807
1835
  throw new Error(`Unsupported model type: ${a}`);
1808
- const n = (i.inputCostPer1KTokens || 0) / 1e3 * e, r = (i.outputCostPer1KTokens || 0) / 1e3 * t;
1836
+ const n = (i.inputCostPer1KTokens || 0) / 1e3 * e, r = (i.outputCostPer1KTokens || 0) / 1e3 * o;
1809
1837
  return {
1810
1838
  inputCost: n,
1811
1839
  outputCost: r,
1812
1840
  totalCost: n + r
1813
1841
  };
1814
1842
  }
1815
- buildOpenAIRequest(e, t, s, a, i, n) {
1843
+ buildOpenAIRequest(e, o, s, a, i, n) {
1816
1844
  const r = {
1817
1845
  model: e,
1818
- messages: t,
1846
+ messages: o,
1819
1847
  temperature: s,
1820
1848
  max_tokens: a,
1821
1849
  stream: i
1822
1850
  };
1823
1851
  return n?.topP !== void 0 && (r.top_p = n.topP), n?.frequencyPenalty !== void 0 && (r.frequency_penalty = n.frequencyPenalty), n?.presencePenalty !== void 0 && (r.presence_penalty = n.presencePenalty), n?.stop !== void 0 && (r.stop = n.stop), r;
1824
1852
  }
1825
- async makeRequest(e, t, s, a = !1) {
1853
+ async makeRequest(e, o, s, a = !1) {
1826
1854
  const i = new AbortController(), n = setTimeout(() => i.abort(), this.timeout);
1827
1855
  try {
1828
1856
  const r = e.startsWith("http") ? e : `${this.baseURL}${e}`, m = {
@@ -1833,7 +1861,7 @@ class L {
1833
1861
  const c = await fetch(r, {
1834
1862
  method: "POST",
1835
1863
  headers: m,
1836
- body: JSON.stringify(t),
1864
+ body: JSON.stringify(o),
1837
1865
  signal: i.signal
1838
1866
  });
1839
1867
  if (clearTimeout(n), !c.ok) {
@@ -1850,7 +1878,7 @@ class L {
1850
1878
  throw clearTimeout(n), r.name === "AbortError" ? new Error(`Request timeout (${this.timeout}ms)`) : r;
1851
1879
  }
1852
1880
  }
1853
- async makeFormDataRequest(e, t, s) {
1881
+ async makeFormDataRequest(e, o, s) {
1854
1882
  const a = new AbortController(), i = setTimeout(() => a.abort(), this.timeout);
1855
1883
  try {
1856
1884
  const n = e.startsWith("http") ? e : `${this.baseURL}${e}`, r = {
@@ -1860,7 +1888,7 @@ class L {
1860
1888
  const m = await fetch(n, {
1861
1889
  method: "POST",
1862
1890
  headers: r,
1863
- body: t,
1891
+ body: o,
1864
1892
  signal: a.signal
1865
1893
  });
1866
1894
  if (clearTimeout(i), !m.ok) {
@@ -1877,8 +1905,8 @@ class L {
1877
1905
  throw clearTimeout(i), n.name === "AbortError" ? new Error(`Request timeout (${this.timeout}ms)`) : n;
1878
1906
  }
1879
1907
  }
1880
- async makeStreamRequest(e, t, s) {
1881
- const a = await this.makeRequest(e, t, !0);
1908
+ async makeStreamRequest(e, o, s) {
1909
+ const a = await this.makeRequest(e, o, !0);
1882
1910
  if (!a)
1883
1911
  throw new Error("Failed to get streaming response");
1884
1912
  const i = a.getReader(), n = new TextDecoder("utf-8");
@@ -1931,8 +1959,8 @@ class L {
1931
1959
  * @param options - Chat options
1932
1960
  * @returns Predicted OHLCV array
1933
1961
  */
1934
- async analyzeOHLCV(e, t, s, a) {
1935
- const i = t || "Based on these OHLCV data, predict the next period", n = s || 1;
1962
+ async analyzeOHLCV(e, o, s, a) {
1963
+ const i = o || "Based on these OHLCV data, predict the next period", n = s || 1;
1936
1964
  if (!Number.isInteger(n) || n <= 0)
1937
1965
  throw new Error(`Invalid count parameter: ${n}. Must be a positive integer.`);
1938
1966
  const r = 50;
@@ -1963,16 +1991,16 @@ Please process this data according to the system instructions. Remember to retur
1963
1991
  { role: "user", content: d }
1964
1992
  ];
1965
1993
  try {
1966
- const h = n * 50 + 100, y = Math.max(a?.maxTokens || 1e3, h), E = await this.chatCompletion(l, {
1994
+ const h = n * 50 + 100, x = Math.max(a?.maxTokens || 1e3, h), k = await this.chatCompletion(l, {
1967
1995
  temperature: a?.temperature || 0.3,
1968
- maxTokens: y,
1996
+ maxTokens: x,
1969
1997
  stream: !1,
1970
1998
  modelType: a?.modelType,
1971
1999
  topP: a?.topP,
1972
2000
  frequencyPenalty: a?.frequencyPenalty,
1973
2001
  presencePenalty: a?.presencePenalty,
1974
2002
  stop: a?.stop
1975
- }), b = this.extractContent(E), g = this.parseOHLCVResponse(b);
2003
+ }), E = this.extractContent(k), g = this.parseOHLCVResponse(E);
1976
2004
  if (g.length !== n)
1977
2005
  throw new Error(`AI returned ${g.length} OHLCV objects, but expected ${n}.`);
1978
2006
  return g;
@@ -1986,10 +2014,10 @@ Please process this data according to the system instructions. Remember to retur
1986
2014
  */
1987
2015
  parseOHLCVResponse(e) {
1988
2016
  try {
1989
- const t = JSON.parse(e);
1990
- if (!Array.isArray(t))
2017
+ const o = JSON.parse(e);
2018
+ if (!Array.isArray(o))
1991
2019
  throw new Error("Response is not in array format");
1992
- return t.map((a, i) => {
2020
+ return o.map((a, i) => {
1993
2021
  if (typeof a != "object" || a === null)
1994
2022
  throw new Error(`Element ${i} is not a valid object`);
1995
2023
  const { open: n, high: r, low: m, close: c, volume: p } = a, d = ["open", "high", "low", "close", "volume"];
@@ -2008,44 +2036,52 @@ Please process this data according to the system instructions. Remember to retur
2008
2036
  volume: Number(p)
2009
2037
  };
2010
2038
  });
2011
- } catch (t) {
2039
+ } catch (o) {
2012
2040
  const s = e.match(/\[[\s\S]*\]/);
2013
2041
  if (s)
2014
2042
  return this.parseOHLCVResponse(s[0]);
2015
- throw new Error(`Unable to parse AI returned OHLCV data: ${t}
2043
+ throw new Error(`Unable to parse AI returned OHLCV data: ${o}
2016
2044
  Original content: ${e.substring(0, 200)}...`);
2017
2045
  }
2018
2046
  }
2019
2047
  }
2020
- function W(o, e) {
2021
- return new L({ apiKey: o, modelType: e });
2048
+ function Z(t, e) {
2049
+ return new L({ apiKey: t, modelType: e });
2022
2050
  }
2023
2051
  export {
2024
- x as ALIYUN_MODELS,
2025
- v as AliYunModelType,
2052
+ b as ALIYUN_MODELS,
2053
+ T as AliYunModelType,
2026
2054
  _ as AliyunAI,
2027
- k as DEEPSEEK_MODELS,
2055
+ y as DEEPSEEK_MODELS,
2028
2056
  C as DeepSeekAI,
2029
- T as DeepSeekModelType,
2057
+ v as DeepSeekModelType,
2030
2058
  u as OPENAI_MODELS,
2031
2059
  L as OpenAI,
2032
2060
  f as OpenAIModelType,
2033
- N as createAliyunAI,
2034
- P as createDeepSeekAI,
2035
- W as createOpenAI,
2036
- U as estimateCost,
2061
+ $ as createAliyunAI,
2062
+ B as createDeepSeekAI,
2063
+ Z as createOpenAI,
2064
+ J as estimateCost,
2065
+ S as getAllDeepSeekModels,
2066
+ P as getAllModels,
2037
2067
  w as getAllOpenAIModels,
2038
- A as getAudioModelsOpenAI,
2039
- $ as getAvailableOpenAIModelTypes,
2040
- R as getChatModels,
2041
- S as getCompletionModels,
2042
- K as getCostEfficientModels,
2043
- D as getEmbeddingModels,
2044
- V as getHighContextModels,
2045
- H as getLatestModels,
2046
- B as getMultimodalModelsOpenAI,
2047
- q as getOpenAIModel,
2048
- O as getOpenAIModelByName,
2049
- I as getVisionModelsOpenAI,
2050
- Q as suggestModel
2068
+ j as getAudioModelsOpenAI,
2069
+ O as getAvailableAliYunModelTypes,
2070
+ D as getAvailableDeepSeekModelTypes,
2071
+ K as getAvailableOpenAIModelTypes,
2072
+ V as getChatModels,
2073
+ U as getCompletionModels,
2074
+ X as getCostEfficientModels,
2075
+ R as getDeepSeekModel,
2076
+ A as getDeepSeekModelByName,
2077
+ Q as getEmbeddingModels,
2078
+ z as getHighContextModels,
2079
+ G as getLatestModels,
2080
+ N as getModel,
2081
+ q as getModelByName,
2082
+ F as getMultimodalModelsOpenAI,
2083
+ I as getOpenAIModel,
2084
+ H as getOpenAIModelByName,
2085
+ W as getVisionModelsOpenAI,
2086
+ M as suggestModel
2051
2087
  };