ohlcv-ai 1.0.7 → 1.0.9
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.ts +22 -0
- package/dist/index.js +19 -19
- package/dist/index.mjs +285 -225
- package/package.json +1 -1
package/dist/index.mjs
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
|
-
var
|
|
2
|
-
const
|
|
1
|
+
var T = /* @__PURE__ */ ((t) => (t.QWEN_TURBO = "qwen-turbo", t.QWEN_PLUS = "qwen-plus", t.QWEN_MAX = "qwen-max", t.QWEN_MAX_LONGCONTEXT = "qwen-max-longcontext", t.QWEN_2_5B = "qwen2.5-0.5b", t.QWEN_2_5B_INSTRUCT = "qwen2.5-0.5b-instruct", t.QWEN_2_5B_7B = "qwen2.5-7b", t.QWEN_2_5B_7B_INSTRUCT = "qwen2.5-7b-instruct", t.QWEN_2_5B_14B = "qwen2.5-14b", t.QWEN_2_5B_14B_INSTRUCT = "qwen2.5-14b-instruct", t.QWEN_2_5B_32B = "qwen2.5-32b", t.QWEN_2_5B_32B_INSTRUCT = "qwen2.5-32b-instruct", t.QWEN_2_5B_72B = "qwen2.5-72b", t.QWEN_2_5B_72B_INSTRUCT = "qwen2.5-72b-instruct", t.QWEN_2_5B_CODER = "qwen2.5-coder", t.QWEN_2_5B_CODER_7B = "qwen2.5-coder-7b", t.QWEN_2_5B_CODER_14B = "qwen2.5-coder-14b", t.QWEN_2_5B_CODER_32B = "qwen2.5-coder-32b", t.QWEN_VL_LITE = "qwen-vl-lite", t.QWEN_VL_PLUS = "qwen-vl-plus", t.QWEN_VL_MAX = "qwen-vl-max", t.QWEN_AUDIO_TURBO = "qwen-audio-turbo", t.QWEN_AUDIO_CHAT = "qwen-audio-chat", t.QWEN_MATH_7B = "qwen-math-7b", t.LLAMA2_7B_CHAT_V2 = "llama2-7b-chat-v2", t.BAICHUAN2_7B_CHAT_V1 = "baichuan2-7b-chat-v1", t.QWEN_FINANCIAL = "qwen-financial", t.QWEN_FINANCIAL_14B = "qwen-financial-14b", t.QWEN_FINANCIAL_32B = "qwen-financial-32b", t.QWEN_MEDICAL = "qwen-medical", t.QWEN_MEDICAL_14B = "qwen-medical-14b", t.QWEN_MEDICAL_32B = "qwen-medical-32b", t.QWEN_OMNI = "qwen-omni", t.QWEN_OMNI_PRO = "qwen-omni-pro", t))(T || {});
|
|
2
|
+
const b = /* @__PURE__ */ new Map([
|
|
3
3
|
[
|
|
4
4
|
"qwen-turbo",
|
|
5
5
|
{
|
|
@@ -358,6 +358,27 @@ const x = /* @__PURE__ */ new Map([
|
|
|
358
358
|
}
|
|
359
359
|
]
|
|
360
360
|
]);
|
|
361
|
+
function N(t) {
|
|
362
|
+
return b.get(t);
|
|
363
|
+
}
|
|
364
|
+
function P() {
|
|
365
|
+
return Array.from(b.values());
|
|
366
|
+
}
|
|
367
|
+
function q(t) {
|
|
368
|
+
for (const e of b.values())
|
|
369
|
+
if (e.name === t)
|
|
370
|
+
return e;
|
|
371
|
+
}
|
|
372
|
+
function O() {
|
|
373
|
+
return Array.from(b.keys());
|
|
374
|
+
}
|
|
375
|
+
function $(t) {
|
|
376
|
+
const e = Object.values(T);
|
|
377
|
+
for (const o of e)
|
|
378
|
+
if (o === t)
|
|
379
|
+
return o;
|
|
380
|
+
return null;
|
|
381
|
+
}
|
|
361
382
|
class _ {
|
|
362
383
|
/**
|
|
363
384
|
* Constructor - Minimal configuration
|
|
@@ -366,9 +387,9 @@ class _ {
|
|
|
366
387
|
* @param config.timeout - Timeout, default 30 seconds
|
|
367
388
|
*/
|
|
368
389
|
constructor(e) {
|
|
369
|
-
if (this.apiKey = e.apiKey, this.modelType = e.modelType ||
|
|
390
|
+
if (this.apiKey = e.apiKey, this.modelType = e.modelType || T.QWEN_TURBO, this.timeout = e.timeout || 3e4, !this.apiKey)
|
|
370
391
|
throw new Error("API Key cannot be empty");
|
|
371
|
-
if (!
|
|
392
|
+
if (!b.get(this.modelType))
|
|
372
393
|
throw new Error(`Unsupported model type: ${this.modelType}`);
|
|
373
394
|
}
|
|
374
395
|
/**
|
|
@@ -377,12 +398,12 @@ class _ {
|
|
|
377
398
|
* @param options - Chat options
|
|
378
399
|
* @returns AI response
|
|
379
400
|
*/
|
|
380
|
-
async chat(e,
|
|
401
|
+
async chat(e, o) {
|
|
381
402
|
const s = [];
|
|
382
|
-
|
|
403
|
+
o?.systemPrompt && s.push({ role: "system", content: o.systemPrompt }), s.push({ role: "user", content: e });
|
|
383
404
|
const a = await this.chatCompletion(s, {
|
|
384
|
-
temperature:
|
|
385
|
-
maxTokens:
|
|
405
|
+
temperature: o?.temperature,
|
|
406
|
+
maxTokens: o?.maxTokens,
|
|
386
407
|
stream: !1
|
|
387
408
|
});
|
|
388
409
|
return this.extractContent(a);
|
|
@@ -393,11 +414,11 @@ class _ {
|
|
|
393
414
|
* @param options - Chat options
|
|
394
415
|
* @returns Complete API response
|
|
395
416
|
*/
|
|
396
|
-
async chatCompletion(e,
|
|
397
|
-
const s =
|
|
417
|
+
async chatCompletion(e, o) {
|
|
418
|
+
const s = o?.modelType || this.modelType, a = b.get(s);
|
|
398
419
|
if (!a)
|
|
399
420
|
throw new Error(`Unsupported model type: ${s}`);
|
|
400
|
-
const i =
|
|
421
|
+
const i = o?.temperature ?? 0.7, n = o?.maxTokens ?? 1e3, r = o?.stream ?? !1, m = a.endpoint, p = a.format === "openai" ? this.buildOpenAIRequest(a.name, e, i, n, r) : this.buildDashScopeRequest(a.name, e, i, n);
|
|
401
422
|
try {
|
|
402
423
|
return await this.makeRequest(m, p, r);
|
|
403
424
|
} catch (d) {
|
|
@@ -410,8 +431,8 @@ class _ {
|
|
|
410
431
|
* @param callback - Streaming callback function
|
|
411
432
|
* @param options - Chat options
|
|
412
433
|
*/
|
|
413
|
-
async chatStream(e,
|
|
414
|
-
const a = s?.modelType || this.modelType, i =
|
|
434
|
+
async chatStream(e, o, s) {
|
|
435
|
+
const a = s?.modelType || this.modelType, i = b.get(a);
|
|
415
436
|
if (!i)
|
|
416
437
|
throw new Error(`Unsupported model type: ${a}`);
|
|
417
438
|
if (i.format !== "openai")
|
|
@@ -424,7 +445,7 @@ class _ {
|
|
|
424
445
|
!0
|
|
425
446
|
);
|
|
426
447
|
try {
|
|
427
|
-
await this.makeStreamRequest(i.endpoint, m,
|
|
448
|
+
await this.makeStreamRequest(i.endpoint, m, o);
|
|
428
449
|
} catch (c) {
|
|
429
450
|
throw new Error(`Streaming request failed: ${c.message}`);
|
|
430
451
|
}
|
|
@@ -434,7 +455,7 @@ class _ {
|
|
|
434
455
|
* @param modelType - New model type
|
|
435
456
|
*/
|
|
436
457
|
setModel(e) {
|
|
437
|
-
if (!
|
|
458
|
+
if (!b.get(e))
|
|
438
459
|
throw new Error(`Unsupported model type: ${e}`);
|
|
439
460
|
this.modelType = e;
|
|
440
461
|
}
|
|
@@ -442,7 +463,7 @@ class _ {
|
|
|
442
463
|
* Get current model configuration
|
|
443
464
|
*/
|
|
444
465
|
getCurrentModel() {
|
|
445
|
-
const e =
|
|
466
|
+
const e = b.get(this.modelType);
|
|
446
467
|
if (!e)
|
|
447
468
|
throw new Error(`Model configuration does not exist: ${this.modelType}`);
|
|
448
469
|
return {
|
|
@@ -471,19 +492,19 @@ class _ {
|
|
|
471
492
|
};
|
|
472
493
|
}
|
|
473
494
|
}
|
|
474
|
-
buildOpenAIRequest(e,
|
|
495
|
+
buildOpenAIRequest(e, o, s, a, i) {
|
|
475
496
|
return {
|
|
476
497
|
model: e,
|
|
477
|
-
messages:
|
|
498
|
+
messages: o,
|
|
478
499
|
temperature: s,
|
|
479
500
|
max_tokens: a,
|
|
480
501
|
stream: i
|
|
481
502
|
};
|
|
482
503
|
}
|
|
483
|
-
buildDashScopeRequest(e,
|
|
504
|
+
buildDashScopeRequest(e, o, s, a) {
|
|
484
505
|
return {
|
|
485
506
|
model: e,
|
|
486
|
-
input: { messages:
|
|
507
|
+
input: { messages: o },
|
|
487
508
|
parameters: {
|
|
488
509
|
temperature: s,
|
|
489
510
|
max_tokens: a,
|
|
@@ -491,7 +512,7 @@ class _ {
|
|
|
491
512
|
}
|
|
492
513
|
};
|
|
493
514
|
}
|
|
494
|
-
async makeRequest(e,
|
|
515
|
+
async makeRequest(e, o, s) {
|
|
495
516
|
const a = new AbortController(), i = setTimeout(() => a.abort(), this.timeout);
|
|
496
517
|
try {
|
|
497
518
|
const n = await fetch(e, {
|
|
@@ -501,7 +522,7 @@ class _ {
|
|
|
501
522
|
"Content-Type": "application/json; charset=utf-8",
|
|
502
523
|
Accept: "application/json"
|
|
503
524
|
},
|
|
504
|
-
body: JSON.stringify(
|
|
525
|
+
body: JSON.stringify(o),
|
|
505
526
|
signal: a.signal
|
|
506
527
|
});
|
|
507
528
|
if (clearTimeout(i), !n.ok) {
|
|
@@ -513,8 +534,8 @@ class _ {
|
|
|
513
534
|
throw clearTimeout(i), n.name === "AbortError" ? new Error(`Request timeout (${this.timeout}ms)`) : n;
|
|
514
535
|
}
|
|
515
536
|
}
|
|
516
|
-
async makeStreamRequest(e,
|
|
517
|
-
const a = await this.makeRequest(e,
|
|
537
|
+
async makeStreamRequest(e, o, s) {
|
|
538
|
+
const a = await this.makeRequest(e, o, !0);
|
|
518
539
|
if (!a)
|
|
519
540
|
throw new Error("Failed to get streaming response");
|
|
520
541
|
const i = a.getReader(), n = new TextDecoder("utf-8");
|
|
@@ -538,8 +559,8 @@ class _ {
|
|
|
538
559
|
return;
|
|
539
560
|
}
|
|
540
561
|
try {
|
|
541
|
-
const
|
|
542
|
-
|
|
562
|
+
const u = JSON.parse(l);
|
|
563
|
+
u.choices?.[0]?.delta?.content && s(u.choices[0].delta.content, !1);
|
|
543
564
|
} catch {
|
|
544
565
|
}
|
|
545
566
|
}
|
|
@@ -565,8 +586,8 @@ class _ {
|
|
|
565
586
|
* @param options - Chat options
|
|
566
587
|
* @returns Predicted OHLCV array
|
|
567
588
|
*/
|
|
568
|
-
async predictingOHLCV(e,
|
|
569
|
-
const i =
|
|
589
|
+
async predictingOHLCV(e, o, s, a) {
|
|
590
|
+
const i = o || "Based on these OHLCV data, predict the next period", n = s || 1;
|
|
570
591
|
if (!Number.isInteger(n) || n <= 0)
|
|
571
592
|
throw new Error(`Invalid count parameter: ${n}. Must be a positive integer.`);
|
|
572
593
|
const r = 50;
|
|
@@ -597,17 +618,17 @@ Please process this data according to the system instructions. Remember to retur
|
|
|
597
618
|
{ role: "user", content: d }
|
|
598
619
|
];
|
|
599
620
|
try {
|
|
600
|
-
const
|
|
621
|
+
const u = n * 50 + 100, x = Math.max(a?.maxTokens || 1e3, u), v = await this.chatCompletion(l, {
|
|
601
622
|
temperature: a?.temperature || 0.3,
|
|
602
|
-
maxTokens:
|
|
623
|
+
maxTokens: x,
|
|
603
624
|
stream: !1,
|
|
604
625
|
modelType: a?.modelType
|
|
605
|
-
}),
|
|
626
|
+
}), E = this.extractContent(v), g = this.parseOHLCVResponse(E);
|
|
606
627
|
if (g.length !== n)
|
|
607
628
|
throw new Error(`AI returned ${g.length} OHLCV objects, but expected ${n}.`);
|
|
608
629
|
return g;
|
|
609
|
-
} catch (
|
|
610
|
-
throw new Error(`OHLCV analysis failed: ${
|
|
630
|
+
} catch (u) {
|
|
631
|
+
throw new Error(`OHLCV analysis failed: ${u.message}`);
|
|
611
632
|
}
|
|
612
633
|
}
|
|
613
634
|
/**
|
|
@@ -616,10 +637,10 @@ Please process this data according to the system instructions. Remember to retur
|
|
|
616
637
|
*/
|
|
617
638
|
parseOHLCVResponse(e) {
|
|
618
639
|
try {
|
|
619
|
-
const
|
|
620
|
-
if (!Array.isArray(
|
|
640
|
+
const o = JSON.parse(e);
|
|
641
|
+
if (!Array.isArray(o))
|
|
621
642
|
throw new Error("Response is not in array format");
|
|
622
|
-
return
|
|
643
|
+
return o.map((a, i) => {
|
|
623
644
|
if (typeof a != "object" || a === null)
|
|
624
645
|
throw new Error(`Element ${i} is not a valid object`);
|
|
625
646
|
const { open: n, high: r, low: m, close: c, volume: p } = a, d = ["open", "high", "low", "close", "volume"];
|
|
@@ -638,20 +659,20 @@ Please process this data according to the system instructions. Remember to retur
|
|
|
638
659
|
volume: Number(p)
|
|
639
660
|
};
|
|
640
661
|
});
|
|
641
|
-
} catch (
|
|
662
|
+
} catch (o) {
|
|
642
663
|
const s = e.match(/\[[\s\S]*\]/);
|
|
643
664
|
if (s)
|
|
644
665
|
return this.parseOHLCVResponse(s[0]);
|
|
645
|
-
throw new Error(`Unable to parse AI returned OHLCV data: ${
|
|
666
|
+
throw new Error(`Unable to parse AI returned OHLCV data: ${o}
|
|
646
667
|
Original content: ${e.substring(0, 200)}...`);
|
|
647
668
|
}
|
|
648
669
|
}
|
|
649
670
|
}
|
|
650
|
-
function
|
|
651
|
-
return new _({ apiKey:
|
|
671
|
+
function R(t, e) {
|
|
672
|
+
return new _({ apiKey: t, modelType: e });
|
|
652
673
|
}
|
|
653
|
-
var
|
|
654
|
-
const
|
|
674
|
+
var k = /* @__PURE__ */ ((t) => (t.DEEPSEEK_CHAT = "deepseek-chat", t.DEEPSEEK_CHAT_LITE = "deepseek-chat-lite", t.DEEPSEEK_CHAT_PRO = "deepseek-chat-pro", t.DEEPSEEK_CHAT_MAX = "deepseek-chat-max", t.DEEPSEEK_CODER = "deepseek-coder", t.DEEPSEEK_CODER_LITE = "deepseek-coder-lite", t.DEEPSEEK_CODER_PRO = "deepseek-coder-pro", t.DEEPSEEK_MATH = "deepseek-math", t.DEEPSEEK_MATH_PRO = "deepseek-math-pro", t.DEEPSEEK_REASONER = "deepseek-reasoner", t.DEEPSEEK_REASONER_PRO = "deepseek-reasoner-pro", t.DEEPSEEK_VISION = "deepseek-vision", t.DEEPSEEK_VISION_PRO = "deepseek-vision-pro", t.DEEPSEEK_FINANCE = "deepseek-finance", t.DEEPSEEK_LAW = "deepseek-law", t.DEEPSEEK_MEDICAL = "deepseek-medical", t.DEEPSEEK_RESEARCH = "deepseek-research", t.DEEPSEEK_OMNI = "deepseek-omni", t.DEEPSEEK_OMNI_PRO = "deepseek-omni-pro", t.DEEPSEEK_LLM = "deepseek-llm", t.DEEPSEEK_LLM_67B = "deepseek-llm-67b", t.DEEPSEEK_LLM_131B = "deepseek-llm-131b", t))(k || {});
|
|
675
|
+
const y = /* @__PURE__ */ new Map([
|
|
655
676
|
// Chat models
|
|
656
677
|
[
|
|
657
678
|
"deepseek-chat",
|
|
@@ -944,6 +965,27 @@ const k = /* @__PURE__ */ new Map([
|
|
|
944
965
|
}
|
|
945
966
|
]
|
|
946
967
|
]);
|
|
968
|
+
function S(t) {
|
|
969
|
+
return y.get(t);
|
|
970
|
+
}
|
|
971
|
+
function A() {
|
|
972
|
+
return Array.from(y.values());
|
|
973
|
+
}
|
|
974
|
+
function D(t) {
|
|
975
|
+
for (const e of y.values())
|
|
976
|
+
if (e.name === t)
|
|
977
|
+
return e;
|
|
978
|
+
}
|
|
979
|
+
function B() {
|
|
980
|
+
return Array.from(y.keys());
|
|
981
|
+
}
|
|
982
|
+
function I(t) {
|
|
983
|
+
const e = Object.values(k);
|
|
984
|
+
for (const o of e)
|
|
985
|
+
if (o === t)
|
|
986
|
+
return o;
|
|
987
|
+
return null;
|
|
988
|
+
}
|
|
947
989
|
class C {
|
|
948
990
|
/**
|
|
949
991
|
* Constructor - Minimal configuration
|
|
@@ -953,9 +995,9 @@ class C {
|
|
|
953
995
|
* @param config.baseURL - Base URL for API, default official endpoint
|
|
954
996
|
*/
|
|
955
997
|
constructor(e) {
|
|
956
|
-
if (this.apiKey = e.apiKey, this.modelType = e.modelType ||
|
|
998
|
+
if (this.apiKey = e.apiKey, this.modelType = e.modelType || k.DEEPSEEK_CHAT, this.timeout = e.timeout || 3e4, this.baseURL = e.baseURL || "https://api.deepseek.com", !this.apiKey)
|
|
957
999
|
throw new Error("API Key cannot be empty");
|
|
958
|
-
if (!
|
|
1000
|
+
if (!y.get(this.modelType))
|
|
959
1001
|
throw new Error(`Unsupported model type: ${this.modelType}`);
|
|
960
1002
|
}
|
|
961
1003
|
/**
|
|
@@ -964,20 +1006,20 @@ class C {
|
|
|
964
1006
|
* @param options - Chat options
|
|
965
1007
|
* @returns AI response
|
|
966
1008
|
*/
|
|
967
|
-
async chat(e,
|
|
1009
|
+
async chat(e, o) {
|
|
968
1010
|
const s = [];
|
|
969
|
-
|
|
1011
|
+
o?.systemPrompt && s.push({ role: "system", content: o.systemPrompt }), s.push({ role: "user", content: e });
|
|
970
1012
|
const a = await this.chatCompletion(s, {
|
|
971
|
-
temperature:
|
|
972
|
-
maxTokens:
|
|
1013
|
+
temperature: o?.temperature,
|
|
1014
|
+
maxTokens: o?.maxTokens,
|
|
973
1015
|
stream: !1,
|
|
974
|
-
modelType:
|
|
975
|
-
topP:
|
|
976
|
-
frequencyPenalty:
|
|
977
|
-
presencePenalty:
|
|
978
|
-
stop:
|
|
979
|
-
tools:
|
|
980
|
-
toolChoice:
|
|
1016
|
+
modelType: o?.modelType,
|
|
1017
|
+
topP: o?.topP,
|
|
1018
|
+
frequencyPenalty: o?.frequencyPenalty,
|
|
1019
|
+
presencePenalty: o?.presencePenalty,
|
|
1020
|
+
stop: o?.stop,
|
|
1021
|
+
tools: o?.tools,
|
|
1022
|
+
toolChoice: o?.toolChoice
|
|
981
1023
|
});
|
|
982
1024
|
return this.extractContent(a);
|
|
983
1025
|
}
|
|
@@ -987,11 +1029,11 @@ class C {
|
|
|
987
1029
|
* @param options - Chat options
|
|
988
1030
|
* @returns Complete API response
|
|
989
1031
|
*/
|
|
990
|
-
async chatCompletion(e,
|
|
991
|
-
const s =
|
|
1032
|
+
async chatCompletion(e, o) {
|
|
1033
|
+
const s = o?.modelType || this.modelType, a = y.get(s);
|
|
992
1034
|
if (!a)
|
|
993
1035
|
throw new Error(`Unsupported model type: ${s}`);
|
|
994
|
-
const i =
|
|
1036
|
+
const i = o?.temperature ?? 0.7, n = o?.maxTokens ?? 2e3, r = o?.stream ?? !1, m = o?.topP ?? 1, c = o?.frequencyPenalty ?? 0, p = o?.presencePenalty ?? 0, d = o?.stop, l = o?.tools, u = o?.toolChoice, x = a.endpoint, v = this.buildOpenAIRequest(
|
|
995
1037
|
a.name,
|
|
996
1038
|
e,
|
|
997
1039
|
i,
|
|
@@ -1002,12 +1044,12 @@ class C {
|
|
|
1002
1044
|
p,
|
|
1003
1045
|
d,
|
|
1004
1046
|
l,
|
|
1005
|
-
|
|
1047
|
+
u
|
|
1006
1048
|
);
|
|
1007
1049
|
try {
|
|
1008
|
-
return await this.makeRequest(
|
|
1009
|
-
} catch (
|
|
1010
|
-
throw new Error(`DeepSeek AI request failed: ${
|
|
1050
|
+
return await this.makeRequest(x, v, r);
|
|
1051
|
+
} catch (E) {
|
|
1052
|
+
throw new Error(`DeepSeek AI request failed: ${E.message}`);
|
|
1011
1053
|
}
|
|
1012
1054
|
}
|
|
1013
1055
|
/**
|
|
@@ -1016,8 +1058,8 @@ class C {
|
|
|
1016
1058
|
* @param callback - Streaming callback function
|
|
1017
1059
|
* @param options - Chat options
|
|
1018
1060
|
*/
|
|
1019
|
-
async chatStream(e,
|
|
1020
|
-
const a = s?.modelType || this.modelType, i =
|
|
1061
|
+
async chatStream(e, o, s) {
|
|
1062
|
+
const a = s?.modelType || this.modelType, i = y.get(a);
|
|
1021
1063
|
if (!i)
|
|
1022
1064
|
throw new Error(`Unsupported model type: ${a}`);
|
|
1023
1065
|
const n = s?.temperature ?? 0.7, r = s?.maxTokens ?? 2e3, m = s?.topP ?? 1, c = s?.frequencyPenalty ?? 0, p = s?.presencePenalty ?? 0, d = this.buildOpenAIRequest(
|
|
@@ -1034,7 +1076,7 @@ class C {
|
|
|
1034
1076
|
s?.toolChoice
|
|
1035
1077
|
);
|
|
1036
1078
|
try {
|
|
1037
|
-
await this.makeStreamRequest(i.endpoint, d,
|
|
1079
|
+
await this.makeStreamRequest(i.endpoint, d, o);
|
|
1038
1080
|
} catch (l) {
|
|
1039
1081
|
throw new Error(`Streaming request failed: ${l.message}`);
|
|
1040
1082
|
}
|
|
@@ -1047,8 +1089,8 @@ class C {
|
|
|
1047
1089
|
* @param options - Chat options
|
|
1048
1090
|
* @returns Predicted OHLCV array
|
|
1049
1091
|
*/
|
|
1050
|
-
async predictingOHLCV(e,
|
|
1051
|
-
const i =
|
|
1092
|
+
async predictingOHLCV(e, o, s, a) {
|
|
1093
|
+
const i = o || "Based on these OHLCV data, predict the next period", n = s || 1;
|
|
1052
1094
|
if (!Number.isInteger(n) || n <= 0)
|
|
1053
1095
|
throw new Error(`Invalid count parameter: ${n}. Must be a positive integer.`);
|
|
1054
1096
|
const r = 50;
|
|
@@ -1080,20 +1122,20 @@ Please process this data according to the system instructions. Remember to retur
|
|
|
1080
1122
|
{ role: "user", content: d }
|
|
1081
1123
|
];
|
|
1082
1124
|
try {
|
|
1083
|
-
const
|
|
1125
|
+
const u = n * 50 + 100, x = Math.max(a?.maxTokens || 2e3, u), v = await this.chatCompletion(l, {
|
|
1084
1126
|
temperature: a?.temperature || 0.3,
|
|
1085
|
-
maxTokens:
|
|
1127
|
+
maxTokens: x,
|
|
1086
1128
|
stream: !1,
|
|
1087
|
-
modelType: a?.modelType ||
|
|
1129
|
+
modelType: a?.modelType || k.DEEPSEEK_FINANCE,
|
|
1088
1130
|
topP: a?.topP,
|
|
1089
1131
|
frequencyPenalty: a?.frequencyPenalty,
|
|
1090
1132
|
presencePenalty: a?.presencePenalty
|
|
1091
|
-
}),
|
|
1133
|
+
}), E = this.extractContent(v), g = this.parseOHLCVResponse(E);
|
|
1092
1134
|
if (g.length !== n)
|
|
1093
1135
|
throw new Error(`AI returned ${g.length} OHLCV objects, but expected ${n}.`);
|
|
1094
1136
|
return g;
|
|
1095
|
-
} catch (
|
|
1096
|
-
throw new Error(`OHLCV analysis failed: ${
|
|
1137
|
+
} catch (u) {
|
|
1138
|
+
throw new Error(`OHLCV analysis failed: ${u.message}`);
|
|
1097
1139
|
}
|
|
1098
1140
|
}
|
|
1099
1141
|
/**
|
|
@@ -1101,7 +1143,7 @@ Please process this data according to the system instructions. Remember to retur
|
|
|
1101
1143
|
* @param modelType - New model type
|
|
1102
1144
|
*/
|
|
1103
1145
|
setModel(e) {
|
|
1104
|
-
if (!
|
|
1146
|
+
if (!y.get(e))
|
|
1105
1147
|
throw new Error(`Unsupported model type: ${e}`);
|
|
1106
1148
|
this.modelType = e;
|
|
1107
1149
|
}
|
|
@@ -1109,7 +1151,7 @@ Please process this data according to the system instructions. Remember to retur
|
|
|
1109
1151
|
* Get current model configuration
|
|
1110
1152
|
*/
|
|
1111
1153
|
getCurrentModel() {
|
|
1112
|
-
const e =
|
|
1154
|
+
const e = y.get(this.modelType);
|
|
1113
1155
|
if (!e)
|
|
1114
1156
|
throw new Error(`Model configuration does not exist: ${this.modelType}`);
|
|
1115
1157
|
return {
|
|
@@ -1138,17 +1180,17 @@ Please process this data according to the system instructions. Remember to retur
|
|
|
1138
1180
|
};
|
|
1139
1181
|
}
|
|
1140
1182
|
}
|
|
1141
|
-
buildOpenAIRequest(e,
|
|
1183
|
+
buildOpenAIRequest(e, o, s, a, i, n, r, m, c, p, d) {
|
|
1142
1184
|
const l = {
|
|
1143
1185
|
model: e,
|
|
1144
|
-
messages:
|
|
1186
|
+
messages: o,
|
|
1145
1187
|
temperature: s,
|
|
1146
1188
|
max_tokens: a,
|
|
1147
1189
|
stream: i
|
|
1148
1190
|
};
|
|
1149
1191
|
return n !== void 0 && (l.top_p = n), r !== void 0 && (l.frequency_penalty = r), m !== void 0 && (l.presence_penalty = m), c && (l.stop = c), p && (l.tools = p), d && (l.tool_choice = d), l;
|
|
1150
1192
|
}
|
|
1151
|
-
async makeRequest(e,
|
|
1193
|
+
async makeRequest(e, o, s) {
|
|
1152
1194
|
const a = new AbortController(), i = setTimeout(() => a.abort(), this.timeout);
|
|
1153
1195
|
try {
|
|
1154
1196
|
const n = await fetch(e, {
|
|
@@ -1158,7 +1200,7 @@ Please process this data according to the system instructions. Remember to retur
|
|
|
1158
1200
|
"Content-Type": "application/json; charset=utf-8",
|
|
1159
1201
|
Accept: "application/json"
|
|
1160
1202
|
},
|
|
1161
|
-
body: JSON.stringify(
|
|
1203
|
+
body: JSON.stringify(o),
|
|
1162
1204
|
signal: a.signal
|
|
1163
1205
|
});
|
|
1164
1206
|
if (clearTimeout(i), !n.ok) {
|
|
@@ -1170,8 +1212,8 @@ Please process this data according to the system instructions. Remember to retur
|
|
|
1170
1212
|
throw clearTimeout(i), n.name === "AbortError" ? new Error(`Request timeout (${this.timeout}ms)`) : n;
|
|
1171
1213
|
}
|
|
1172
1214
|
}
|
|
1173
|
-
async makeStreamRequest(e,
|
|
1174
|
-
const a = await this.makeRequest(e,
|
|
1215
|
+
async makeStreamRequest(e, o, s) {
|
|
1216
|
+
const a = await this.makeRequest(e, o, !0);
|
|
1175
1217
|
if (!a)
|
|
1176
1218
|
throw new Error("Failed to get streaming response");
|
|
1177
1219
|
const i = a.getReader(), n = new TextDecoder("utf-8");
|
|
@@ -1195,8 +1237,8 @@ Please process this data according to the system instructions. Remember to retur
|
|
|
1195
1237
|
return;
|
|
1196
1238
|
}
|
|
1197
1239
|
try {
|
|
1198
|
-
const
|
|
1199
|
-
|
|
1240
|
+
const u = JSON.parse(l);
|
|
1241
|
+
u.choices?.[0]?.delta?.content && s(u.choices[0].delta.content, !1);
|
|
1200
1242
|
} catch {
|
|
1201
1243
|
}
|
|
1202
1244
|
}
|
|
@@ -1218,10 +1260,10 @@ Please process this data according to the system instructions. Remember to retur
|
|
|
1218
1260
|
}
|
|
1219
1261
|
parseOHLCVResponse(e) {
|
|
1220
1262
|
try {
|
|
1221
|
-
const
|
|
1222
|
-
if (!Array.isArray(
|
|
1263
|
+
const o = JSON.parse(e);
|
|
1264
|
+
if (!Array.isArray(o))
|
|
1223
1265
|
throw new Error("Response is not in array format");
|
|
1224
|
-
return
|
|
1266
|
+
return o.map((a, i) => {
|
|
1225
1267
|
if (typeof a != "object" || a === null)
|
|
1226
1268
|
throw new Error(`Element ${i} is not a valid object`);
|
|
1227
1269
|
const { open: n, high: r, low: m, close: c, volume: p } = a, d = ["open", "high", "low", "close", "volume"];
|
|
@@ -1240,20 +1282,20 @@ Please process this data according to the system instructions. Remember to retur
|
|
|
1240
1282
|
volume: Number(p)
|
|
1241
1283
|
};
|
|
1242
1284
|
});
|
|
1243
|
-
} catch (
|
|
1285
|
+
} catch (o) {
|
|
1244
1286
|
const s = e.match(/\[[\s\S]*\]/);
|
|
1245
1287
|
if (s)
|
|
1246
1288
|
return this.parseOHLCVResponse(s[0]);
|
|
1247
|
-
throw new Error(`Unable to parse AI returned OHLCV data: ${
|
|
1289
|
+
throw new Error(`Unable to parse AI returned OHLCV data: ${o}
|
|
1248
1290
|
Original content: ${e.substring(0, 200)}...`);
|
|
1249
1291
|
}
|
|
1250
1292
|
}
|
|
1251
1293
|
}
|
|
1252
|
-
function
|
|
1253
|
-
return new C({ apiKey:
|
|
1294
|
+
function H(t, e) {
|
|
1295
|
+
return new C({ apiKey: t, modelType: e });
|
|
1254
1296
|
}
|
|
1255
|
-
var f = /* @__PURE__ */ ((
|
|
1256
|
-
const
|
|
1297
|
+
var f = /* @__PURE__ */ ((t) => (t.GPT4 = "gpt-4", t.GPT4_0314 = "gpt-4-0314", t.GPT4_0613 = "gpt-4-0613", t.GPT4_32K = "gpt-4-32k", t.GPT4_32K_0314 = "gpt-4-32k-0314", t.GPT4_32K_0613 = "gpt-4-32k-0613", t.GPT4_TURBO = "gpt-4-turbo", t.GPT4_TURBO_PREVIEW = "gpt-4-turbo-preview", t.GPT4_TURBO_2024_04_09 = "gpt-4-turbo-2024-04-09", t.GPT4_OMNI = "gpt-4o", t.GPT4_OMNI_2024_05_13 = "gpt-4o-2024-05-13", t.GPT4_OMNI_MINI = "gpt-4o-mini", t.GPT4_OMNI_MINI_2024_07_18 = "gpt-4o-mini-2024-07-18", t.GPT3_5_TURBO = "gpt-3.5-turbo", t.GPT3_5_TURBO_0125 = "gpt-3.5-turbo-0125", t.GPT3_5_TURBO_1106 = "gpt-3.5-turbo-1106", t.GPT3_5_TURBO_INSTRUCT = "gpt-3.5-turbo-instruct", t.GPT3_5_TURBO_16K = "gpt-3.5-turbo-16k", t.GPT3_5_TURBO_16K_0613 = "gpt-3.5-turbo-16k-0613", t.DAVINCI_002 = "davinci-002", t.BABBAGE_002 = "babbage-002", t.TEXT_DAVINCI_003 = "text-davinci-003", t.TEXT_DAVINCI_002 = "text-davinci-002", t.TEXT_DAVINCI_001 = "text-davinci-001", t.TEXT_CURIE_001 = "text-curie-001", t.TEXT_BABBAGE_001 = "text-babbage-001", t.TEXT_ADA_001 = "text-ada-001", t.TEXT_EMBEDDING_ADA_002 = "text-embedding-ada-002", t.TEXT_EMBEDDING_3_SMALL = "text-embedding-3-small", t.TEXT_EMBEDDING_3_LARGE = "text-embedding-3-large", t.DALL_E_2 = "dall-e-2", t.DALL_E_3 = "dall-e-3", t.WHISPER_1 = "whisper-1", t.TTS_1 = "tts-1", t.TTS_1_HD = "tts-1-hd", t.MODERATION_LATEST = "text-moderation-latest", t.MODERATION_STABLE = "text-moderation-stable", t.GPT3_5_TURBO_FINETUNED = "ft:gpt-3.5-turbo-0125:personal:", t.GPT4_FINETUNED = "ft:gpt-4-0125-preview:personal:", t.GPT4_VISION_PREVIEW = "gpt-4-vision-preview", t))(f || {});
|
|
1298
|
+
const h = /* @__PURE__ */ new Map([
|
|
1257
1299
|
// GPT-4 Series
|
|
1258
1300
|
[
|
|
1259
1301
|
"gpt-4",
|
|
@@ -1441,52 +1483,52 @@ const u = /* @__PURE__ */ new Map([
|
|
|
1441
1483
|
}
|
|
1442
1484
|
]
|
|
1443
1485
|
]);
|
|
1444
|
-
function
|
|
1445
|
-
return
|
|
1486
|
+
function K(t) {
|
|
1487
|
+
return h.get(t);
|
|
1446
1488
|
}
|
|
1447
1489
|
function w() {
|
|
1448
|
-
return Array.from(
|
|
1490
|
+
return Array.from(h.values());
|
|
1449
1491
|
}
|
|
1450
|
-
function
|
|
1451
|
-
for (const e of
|
|
1452
|
-
if (e.name ===
|
|
1492
|
+
function V(t) {
|
|
1493
|
+
for (const e of h.values())
|
|
1494
|
+
if (e.name === t)
|
|
1453
1495
|
return e;
|
|
1454
1496
|
}
|
|
1455
|
-
function
|
|
1456
|
-
return Array.from(
|
|
1497
|
+
function U() {
|
|
1498
|
+
return Array.from(h.keys());
|
|
1457
1499
|
}
|
|
1458
|
-
function
|
|
1500
|
+
function Q() {
|
|
1459
1501
|
return w().filter(
|
|
1460
|
-
(
|
|
1502
|
+
(t) => t.capabilities.includes("chat")
|
|
1461
1503
|
);
|
|
1462
1504
|
}
|
|
1463
|
-
function
|
|
1505
|
+
function j() {
|
|
1464
1506
|
return w().filter(
|
|
1465
|
-
(
|
|
1507
|
+
(t) => t.capabilities.includes("text-completion")
|
|
1466
1508
|
);
|
|
1467
1509
|
}
|
|
1468
|
-
function
|
|
1510
|
+
function W() {
|
|
1469
1511
|
return w().filter(
|
|
1470
|
-
(
|
|
1512
|
+
(t) => t.capabilities.includes("embeddings")
|
|
1471
1513
|
);
|
|
1472
1514
|
}
|
|
1473
|
-
function
|
|
1515
|
+
function F() {
|
|
1474
1516
|
return w().filter(
|
|
1475
|
-
(
|
|
1517
|
+
(t) => t.capabilities.includes("vision") || t.capabilities.includes("image-generation")
|
|
1476
1518
|
);
|
|
1477
1519
|
}
|
|
1478
|
-
function
|
|
1520
|
+
function G() {
|
|
1479
1521
|
return w().filter(
|
|
1480
|
-
(
|
|
1522
|
+
(t) => t.capabilities.includes("audio-processing") || t.capabilities.includes("speech-recognition") || t.capabilities.includes("speech-synthesis")
|
|
1481
1523
|
);
|
|
1482
1524
|
}
|
|
1483
|
-
function
|
|
1525
|
+
function X() {
|
|
1484
1526
|
return w().filter(
|
|
1485
|
-
(
|
|
1527
|
+
(t) => t.capabilities.includes("multimodal")
|
|
1486
1528
|
);
|
|
1487
1529
|
}
|
|
1488
|
-
function
|
|
1489
|
-
const
|
|
1530
|
+
function z() {
|
|
1531
|
+
const t = [
|
|
1490
1532
|
"gpt-4o",
|
|
1491
1533
|
"gpt-4o-mini",
|
|
1492
1534
|
"gpt-4-turbo",
|
|
@@ -1496,58 +1538,65 @@ function H() {
|
|
|
1496
1538
|
/* DALL_E_3 */
|
|
1497
1539
|
];
|
|
1498
1540
|
return w().filter(
|
|
1499
|
-
(e) =>
|
|
1541
|
+
(e) => t.includes(e.name)
|
|
1500
1542
|
);
|
|
1501
1543
|
}
|
|
1502
|
-
function
|
|
1503
|
-
return w().filter((
|
|
1544
|
+
function M() {
|
|
1545
|
+
return w().filter((t) => t.inputCostPer1KTokens && t.inputCostPer1KTokens < 1e-3).sort((t, e) => (t.inputCostPer1KTokens || 0) - (e.inputCostPer1KTokens || 0));
|
|
1504
1546
|
}
|
|
1505
|
-
function
|
|
1506
|
-
return w().filter((
|
|
1547
|
+
function J() {
|
|
1548
|
+
return w().filter((t) => t.contextLength && t.contextLength >= 128e3).sort((t, e) => (e.contextLength || 0) - (t.contextLength || 0));
|
|
1507
1549
|
}
|
|
1508
|
-
function
|
|
1509
|
-
const s = (
|
|
1550
|
+
function Z(t, e, o = 0) {
|
|
1551
|
+
const s = (t.inputCostPer1KTokens || 0) / 1e3 * e, a = (t.outputCostPer1KTokens || 0) / 1e3 * o;
|
|
1510
1552
|
return {
|
|
1511
1553
|
inputTokens: e,
|
|
1512
|
-
outputTokens:
|
|
1554
|
+
outputTokens: o,
|
|
1513
1555
|
inputCost: s,
|
|
1514
1556
|
outputCost: a,
|
|
1515
1557
|
totalCost: s + a
|
|
1516
1558
|
};
|
|
1517
1559
|
}
|
|
1518
|
-
function
|
|
1560
|
+
function Y(t) {
|
|
1519
1561
|
let e = w();
|
|
1520
|
-
switch (
|
|
1562
|
+
switch (t.taskType) {
|
|
1521
1563
|
case "chat":
|
|
1522
|
-
e = e.filter((
|
|
1564
|
+
e = e.filter((o) => o.capabilities.includes("chat"));
|
|
1523
1565
|
break;
|
|
1524
1566
|
case "completion":
|
|
1525
|
-
e = e.filter((
|
|
1567
|
+
e = e.filter((o) => o.capabilities.includes("text-completion"));
|
|
1526
1568
|
break;
|
|
1527
1569
|
case "embedding":
|
|
1528
|
-
e = e.filter((
|
|
1570
|
+
e = e.filter((o) => o.capabilities.includes("embeddings"));
|
|
1529
1571
|
break;
|
|
1530
1572
|
case "image":
|
|
1531
1573
|
e = e.filter(
|
|
1532
|
-
(
|
|
1574
|
+
(o) => o.capabilities.includes("image-generation") || o.capabilities.includes("vision")
|
|
1533
1575
|
);
|
|
1534
1576
|
break;
|
|
1535
1577
|
case "audio":
|
|
1536
1578
|
e = e.filter(
|
|
1537
|
-
(
|
|
1579
|
+
(o) => o.capabilities.includes("speech-recognition") || o.capabilities.includes("speech-synthesis")
|
|
1538
1580
|
);
|
|
1539
1581
|
break;
|
|
1540
1582
|
}
|
|
1541
|
-
return
|
|
1542
|
-
(
|
|
1543
|
-
)),
|
|
1544
|
-
(
|
|
1545
|
-
(s) =>
|
|
1583
|
+
return t.contextLength && (e = e.filter(
|
|
1584
|
+
(o) => o.contextLength && o.contextLength >= t.contextLength
|
|
1585
|
+
)), t.features && t.features.length > 0 && (e = e.filter(
|
|
1586
|
+
(o) => t.features.every(
|
|
1587
|
+
(s) => o.supportedFeatures?.includes(s) || o.capabilities.includes(s)
|
|
1546
1588
|
)
|
|
1547
|
-
)),
|
|
1548
|
-
(
|
|
1589
|
+
)), t.budget && e.sort(
|
|
1590
|
+
(o, s) => (o.inputCostPer1KTokens || 0) - (s.inputCostPer1KTokens || 0)
|
|
1549
1591
|
), e.slice(0, 5);
|
|
1550
1592
|
}
|
|
1593
|
+
function ee(t) {
|
|
1594
|
+
const e = Object.values(f);
|
|
1595
|
+
for (const o of e)
|
|
1596
|
+
if (o === t)
|
|
1597
|
+
return o;
|
|
1598
|
+
return null;
|
|
1599
|
+
}
|
|
1551
1600
|
class L {
|
|
1552
1601
|
/**
|
|
1553
1602
|
* Constructor - Minimal configuration
|
|
@@ -1560,7 +1609,7 @@ class L {
|
|
|
1560
1609
|
constructor(e) {
|
|
1561
1610
|
if (this.apiKey = e.apiKey, this.modelType = e.modelType || f.GPT3_5_TURBO, this.timeout = e.timeout || 3e4, this.organization = e.organization, this.baseURL = e.baseURL || "https://api.openai.com/v1", !this.apiKey)
|
|
1562
1611
|
throw new Error("API Key cannot be empty");
|
|
1563
|
-
if (!
|
|
1612
|
+
if (!h.get(this.modelType))
|
|
1564
1613
|
throw new Error(`Unsupported model type: ${this.modelType}`);
|
|
1565
1614
|
}
|
|
1566
1615
|
/**
|
|
@@ -1569,17 +1618,17 @@ class L {
|
|
|
1569
1618
|
* @param options - Chat options
|
|
1570
1619
|
* @returns AI response
|
|
1571
1620
|
*/
|
|
1572
|
-
async chat(e,
|
|
1621
|
+
async chat(e, o) {
|
|
1573
1622
|
const s = [];
|
|
1574
|
-
|
|
1623
|
+
o?.systemPrompt && s.push({ role: "system", content: o.systemPrompt }), s.push({ role: "user", content: e });
|
|
1575
1624
|
const a = await this.chatCompletion(s, {
|
|
1576
|
-
temperature:
|
|
1577
|
-
maxTokens:
|
|
1625
|
+
temperature: o?.temperature,
|
|
1626
|
+
maxTokens: o?.maxTokens,
|
|
1578
1627
|
stream: !1,
|
|
1579
|
-
topP:
|
|
1580
|
-
frequencyPenalty:
|
|
1581
|
-
presencePenalty:
|
|
1582
|
-
stop:
|
|
1628
|
+
topP: o?.topP,
|
|
1629
|
+
frequencyPenalty: o?.frequencyPenalty,
|
|
1630
|
+
presencePenalty: o?.presencePenalty,
|
|
1631
|
+
stop: o?.stop
|
|
1583
1632
|
});
|
|
1584
1633
|
return this.extractContent(a);
|
|
1585
1634
|
}
|
|
@@ -1589,17 +1638,17 @@ class L {
|
|
|
1589
1638
|
* @param options - Chat options
|
|
1590
1639
|
* @returns Complete API response
|
|
1591
1640
|
*/
|
|
1592
|
-
async chatCompletion(e,
|
|
1593
|
-
const s =
|
|
1641
|
+
async chatCompletion(e, o) {
|
|
1642
|
+
const s = o?.modelType || this.modelType, a = h.get(s);
|
|
1594
1643
|
if (!a)
|
|
1595
1644
|
throw new Error(`Unsupported model type: ${s}`);
|
|
1596
|
-
const i =
|
|
1645
|
+
const i = o?.temperature ?? 0.7, n = o?.maxTokens ?? 1e3, r = o?.stream ?? !1, m = a.endpoint, c = this.buildOpenAIRequest(
|
|
1597
1646
|
a.name,
|
|
1598
1647
|
e,
|
|
1599
1648
|
i,
|
|
1600
1649
|
n,
|
|
1601
1650
|
r,
|
|
1602
|
-
|
|
1651
|
+
o
|
|
1603
1652
|
);
|
|
1604
1653
|
try {
|
|
1605
1654
|
return await this.makeRequest(m, c, r);
|
|
@@ -1613,8 +1662,8 @@ class L {
|
|
|
1613
1662
|
* @param callback - Streaming callback function
|
|
1614
1663
|
* @param options - Chat options
|
|
1615
1664
|
*/
|
|
1616
|
-
async chatStream(e,
|
|
1617
|
-
const a = s?.modelType || this.modelType, i =
|
|
1665
|
+
async chatStream(e, o, s) {
|
|
1666
|
+
const a = s?.modelType || this.modelType, i = h.get(a);
|
|
1618
1667
|
if (!i)
|
|
1619
1668
|
throw new Error(`Unsupported model type: ${a}`);
|
|
1620
1669
|
const n = s?.temperature ?? 0.7, r = s?.maxTokens ?? 1e3, m = this.buildOpenAIRequest(
|
|
@@ -1626,7 +1675,7 @@ class L {
|
|
|
1626
1675
|
s
|
|
1627
1676
|
);
|
|
1628
1677
|
try {
|
|
1629
|
-
await this.makeStreamRequest(i.endpoint, m,
|
|
1678
|
+
await this.makeStreamRequest(i.endpoint, m, o);
|
|
1630
1679
|
} catch (c) {
|
|
1631
1680
|
throw new Error(`Streaming request failed: ${c.message}`);
|
|
1632
1681
|
}
|
|
@@ -1637,27 +1686,27 @@ class L {
|
|
|
1637
1686
|
* @param options - Image generation options
|
|
1638
1687
|
* @returns Generated image URLs
|
|
1639
1688
|
*/
|
|
1640
|
-
async generateImage(e,
|
|
1641
|
-
const s =
|
|
1689
|
+
async generateImage(e, o) {
|
|
1690
|
+
const s = o?.modelType || f.DALL_E_3;
|
|
1642
1691
|
if (s !== f.DALL_E_2 && s !== f.DALL_E_3)
|
|
1643
1692
|
throw new Error("Image generation only supports DALL-E models");
|
|
1644
|
-
const a =
|
|
1693
|
+
const a = h.get(s);
|
|
1645
1694
|
if (!a)
|
|
1646
1695
|
throw new Error(`Unsupported model type: ${s}`);
|
|
1647
1696
|
const i = {
|
|
1648
1697
|
model: a.name,
|
|
1649
1698
|
prompt: e,
|
|
1650
|
-
n:
|
|
1651
|
-
size:
|
|
1652
|
-
quality:
|
|
1653
|
-
style:
|
|
1654
|
-
response_format:
|
|
1699
|
+
n: o?.n || 1,
|
|
1700
|
+
size: o?.size || "1024x1024",
|
|
1701
|
+
quality: o?.quality || "standard",
|
|
1702
|
+
style: o?.style || "vivid",
|
|
1703
|
+
response_format: o?.responseFormat || "url"
|
|
1655
1704
|
};
|
|
1656
1705
|
try {
|
|
1657
1706
|
const n = await this.makeRequest(a.endpoint, i, !1);
|
|
1658
1707
|
if (n.data && Array.isArray(n.data))
|
|
1659
1708
|
return n.data.map(
|
|
1660
|
-
(r) =>
|
|
1709
|
+
(r) => o?.responseFormat === "b64_json" ? r.b64_json : r.url
|
|
1661
1710
|
);
|
|
1662
1711
|
throw new Error("Invalid response format from image generation");
|
|
1663
1712
|
} catch (n) {
|
|
@@ -1670,15 +1719,15 @@ class L {
|
|
|
1670
1719
|
* @param options - Embedding options
|
|
1671
1720
|
* @returns Embedding vectors
|
|
1672
1721
|
*/
|
|
1673
|
-
async createEmbeddings(e,
|
|
1674
|
-
const s =
|
|
1722
|
+
async createEmbeddings(e, o) {
|
|
1723
|
+
const s = o?.modelType || f.TEXT_EMBEDDING_ADA_002, a = h.get(s);
|
|
1675
1724
|
if (!a)
|
|
1676
1725
|
throw new Error(`Unsupported model type: ${s}`);
|
|
1677
1726
|
const i = {
|
|
1678
1727
|
model: a.name,
|
|
1679
1728
|
input: e
|
|
1680
1729
|
};
|
|
1681
|
-
|
|
1730
|
+
o?.dimensions && a.name === f.TEXT_EMBEDDING_3_SMALL && (i.dimensions = o.dimensions);
|
|
1682
1731
|
try {
|
|
1683
1732
|
const n = await this.makeRequest(a.endpoint, i, !1);
|
|
1684
1733
|
if (n.data && Array.isArray(n.data))
|
|
@@ -1694,14 +1743,14 @@ class L {
|
|
|
1694
1743
|
* @param options - Transcription options
|
|
1695
1744
|
* @returns Transcribed text
|
|
1696
1745
|
*/
|
|
1697
|
-
async transcribeAudio(e,
|
|
1698
|
-
const s =
|
|
1746
|
+
async transcribeAudio(e, o) {
|
|
1747
|
+
const s = o?.modelType || f.WHISPER_1, a = h.get(s);
|
|
1699
1748
|
if (!a)
|
|
1700
1749
|
throw new Error(`Unsupported model type: ${s}`);
|
|
1701
1750
|
const i = new FormData();
|
|
1702
1751
|
if (typeof e == "string")
|
|
1703
1752
|
throw new Error("File path/Base64 support requires additional implementation");
|
|
1704
|
-
i.append("file", e), i.append("model", a.name),
|
|
1753
|
+
i.append("file", e), i.append("model", a.name), o?.language && i.append("language", o.language), o?.prompt && i.append("prompt", o.prompt), o?.responseFormat && i.append("response_format", o.responseFormat), o?.temperature !== void 0 && i.append("temperature", o.temperature.toString());
|
|
1705
1754
|
try {
|
|
1706
1755
|
const n = await this.makeFormDataRequest(a.endpoint, i, !1);
|
|
1707
1756
|
return n.text || n.transcription || "";
|
|
@@ -1715,16 +1764,16 @@ class L {
|
|
|
1715
1764
|
* @param options - TTS options
|
|
1716
1765
|
* @returns Audio data (base64 or blob)
|
|
1717
1766
|
*/
|
|
1718
|
-
async textToSpeech(e,
|
|
1719
|
-
const s =
|
|
1767
|
+
async textToSpeech(e, o) {
|
|
1768
|
+
const s = o?.modelType || f.TTS_1_HD, a = h.get(s);
|
|
1720
1769
|
if (!a)
|
|
1721
1770
|
throw new Error(`Unsupported model type: ${s}`);
|
|
1722
1771
|
const i = {
|
|
1723
1772
|
model: a.name,
|
|
1724
1773
|
input: e,
|
|
1725
|
-
voice:
|
|
1726
|
-
response_format:
|
|
1727
|
-
speed:
|
|
1774
|
+
voice: o?.voice || "alloy",
|
|
1775
|
+
response_format: o?.responseFormat || "mp3",
|
|
1776
|
+
speed: o?.speed || 1
|
|
1728
1777
|
};
|
|
1729
1778
|
try {
|
|
1730
1779
|
return await this.makeRequest(a.endpoint, i, !1, !0);
|
|
@@ -1738,8 +1787,8 @@ class L {
|
|
|
1738
1787
|
* @param options - Moderation options
|
|
1739
1788
|
* @returns Moderation results
|
|
1740
1789
|
*/
|
|
1741
|
-
async moderateContent(e,
|
|
1742
|
-
const s =
|
|
1790
|
+
async moderateContent(e, o) {
|
|
1791
|
+
const s = o?.modelType || f.MODERATION_LATEST, a = h.get(s);
|
|
1743
1792
|
if (!a)
|
|
1744
1793
|
throw new Error(`Unsupported model type: ${s}`);
|
|
1745
1794
|
const i = {
|
|
@@ -1757,7 +1806,7 @@ class L {
|
|
|
1757
1806
|
* @param modelType - New model type
|
|
1758
1807
|
*/
|
|
1759
1808
|
setModel(e) {
|
|
1760
|
-
if (!
|
|
1809
|
+
if (!h.get(e))
|
|
1761
1810
|
throw new Error(`Unsupported model type: ${e}`);
|
|
1762
1811
|
this.modelType = e;
|
|
1763
1812
|
}
|
|
@@ -1765,7 +1814,7 @@ class L {
|
|
|
1765
1814
|
* Get current model configuration
|
|
1766
1815
|
*/
|
|
1767
1816
|
getCurrentModel() {
|
|
1768
|
-
const e =
|
|
1817
|
+
const e = h.get(this.modelType);
|
|
1769
1818
|
if (!e)
|
|
1770
1819
|
throw new Error(`Model configuration does not exist: ${this.modelType}`);
|
|
1771
1820
|
return {
|
|
@@ -1801,28 +1850,28 @@ class L {
|
|
|
1801
1850
|
* @param modelType - Model type (optional, uses current if not provided)
|
|
1802
1851
|
* @returns Cost estimate
|
|
1803
1852
|
*/
|
|
1804
|
-
estimateCost(e,
|
|
1805
|
-
const a = s || this.modelType, i =
|
|
1853
|
+
estimateCost(e, o = 0, s) {
|
|
1854
|
+
const a = s || this.modelType, i = h.get(a);
|
|
1806
1855
|
if (!i)
|
|
1807
1856
|
throw new Error(`Unsupported model type: ${a}`);
|
|
1808
|
-
const n = (i.inputCostPer1KTokens || 0) / 1e3 * e, r = (i.outputCostPer1KTokens || 0) / 1e3 *
|
|
1857
|
+
const n = (i.inputCostPer1KTokens || 0) / 1e3 * e, r = (i.outputCostPer1KTokens || 0) / 1e3 * o;
|
|
1809
1858
|
return {
|
|
1810
1859
|
inputCost: n,
|
|
1811
1860
|
outputCost: r,
|
|
1812
1861
|
totalCost: n + r
|
|
1813
1862
|
};
|
|
1814
1863
|
}
|
|
1815
|
-
buildOpenAIRequest(e,
|
|
1864
|
+
buildOpenAIRequest(e, o, s, a, i, n) {
|
|
1816
1865
|
const r = {
|
|
1817
1866
|
model: e,
|
|
1818
|
-
messages:
|
|
1867
|
+
messages: o,
|
|
1819
1868
|
temperature: s,
|
|
1820
1869
|
max_tokens: a,
|
|
1821
1870
|
stream: i
|
|
1822
1871
|
};
|
|
1823
1872
|
return n?.topP !== void 0 && (r.top_p = n.topP), n?.frequencyPenalty !== void 0 && (r.frequency_penalty = n.frequencyPenalty), n?.presencePenalty !== void 0 && (r.presence_penalty = n.presencePenalty), n?.stop !== void 0 && (r.stop = n.stop), r;
|
|
1824
1873
|
}
|
|
1825
|
-
async makeRequest(e,
|
|
1874
|
+
async makeRequest(e, o, s, a = !1) {
|
|
1826
1875
|
const i = new AbortController(), n = setTimeout(() => i.abort(), this.timeout);
|
|
1827
1876
|
try {
|
|
1828
1877
|
const r = e.startsWith("http") ? e : `${this.baseURL}${e}`, m = {
|
|
@@ -1833,7 +1882,7 @@ class L {
|
|
|
1833
1882
|
const c = await fetch(r, {
|
|
1834
1883
|
method: "POST",
|
|
1835
1884
|
headers: m,
|
|
1836
|
-
body: JSON.stringify(
|
|
1885
|
+
body: JSON.stringify(o),
|
|
1837
1886
|
signal: i.signal
|
|
1838
1887
|
});
|
|
1839
1888
|
if (clearTimeout(n), !c.ok) {
|
|
@@ -1850,7 +1899,7 @@ class L {
|
|
|
1850
1899
|
throw clearTimeout(n), r.name === "AbortError" ? new Error(`Request timeout (${this.timeout}ms)`) : r;
|
|
1851
1900
|
}
|
|
1852
1901
|
}
|
|
1853
|
-
async makeFormDataRequest(e,
|
|
1902
|
+
async makeFormDataRequest(e, o, s) {
|
|
1854
1903
|
const a = new AbortController(), i = setTimeout(() => a.abort(), this.timeout);
|
|
1855
1904
|
try {
|
|
1856
1905
|
const n = e.startsWith("http") ? e : `${this.baseURL}${e}`, r = {
|
|
@@ -1860,7 +1909,7 @@ class L {
|
|
|
1860
1909
|
const m = await fetch(n, {
|
|
1861
1910
|
method: "POST",
|
|
1862
1911
|
headers: r,
|
|
1863
|
-
body:
|
|
1912
|
+
body: o,
|
|
1864
1913
|
signal: a.signal
|
|
1865
1914
|
});
|
|
1866
1915
|
if (clearTimeout(i), !m.ok) {
|
|
@@ -1877,8 +1926,8 @@ class L {
|
|
|
1877
1926
|
throw clearTimeout(i), n.name === "AbortError" ? new Error(`Request timeout (${this.timeout}ms)`) : n;
|
|
1878
1927
|
}
|
|
1879
1928
|
}
|
|
1880
|
-
async makeStreamRequest(e,
|
|
1881
|
-
const a = await this.makeRequest(e,
|
|
1929
|
+
async makeStreamRequest(e, o, s) {
|
|
1930
|
+
const a = await this.makeRequest(e, o, !0);
|
|
1882
1931
|
if (!a)
|
|
1883
1932
|
throw new Error("Failed to get streaming response");
|
|
1884
1933
|
const i = a.getReader(), n = new TextDecoder("utf-8");
|
|
@@ -1902,8 +1951,8 @@ class L {
|
|
|
1902
1951
|
return;
|
|
1903
1952
|
}
|
|
1904
1953
|
try {
|
|
1905
|
-
const
|
|
1906
|
-
|
|
1954
|
+
const u = JSON.parse(l);
|
|
1955
|
+
u.choices?.[0]?.delta?.content && s(u.choices[0].delta.content, !1);
|
|
1907
1956
|
} catch {
|
|
1908
1957
|
}
|
|
1909
1958
|
}
|
|
@@ -1931,8 +1980,8 @@ class L {
|
|
|
1931
1980
|
* @param options - Chat options
|
|
1932
1981
|
* @returns Predicted OHLCV array
|
|
1933
1982
|
*/
|
|
1934
|
-
async analyzeOHLCV(e,
|
|
1935
|
-
const i =
|
|
1983
|
+
async analyzeOHLCV(e, o, s, a) {
|
|
1984
|
+
const i = o || "Based on these OHLCV data, predict the next period", n = s || 1;
|
|
1936
1985
|
if (!Number.isInteger(n) || n <= 0)
|
|
1937
1986
|
throw new Error(`Invalid count parameter: ${n}. Must be a positive integer.`);
|
|
1938
1987
|
const r = 50;
|
|
@@ -1963,21 +2012,21 @@ Please process this data according to the system instructions. Remember to retur
|
|
|
1963
2012
|
{ role: "user", content: d }
|
|
1964
2013
|
];
|
|
1965
2014
|
try {
|
|
1966
|
-
const
|
|
2015
|
+
const u = n * 50 + 100, x = Math.max(a?.maxTokens || 1e3, u), v = await this.chatCompletion(l, {
|
|
1967
2016
|
temperature: a?.temperature || 0.3,
|
|
1968
|
-
maxTokens:
|
|
2017
|
+
maxTokens: x,
|
|
1969
2018
|
stream: !1,
|
|
1970
2019
|
modelType: a?.modelType,
|
|
1971
2020
|
topP: a?.topP,
|
|
1972
2021
|
frequencyPenalty: a?.frequencyPenalty,
|
|
1973
2022
|
presencePenalty: a?.presencePenalty,
|
|
1974
2023
|
stop: a?.stop
|
|
1975
|
-
}),
|
|
2024
|
+
}), E = this.extractContent(v), g = this.parseOHLCVResponse(E);
|
|
1976
2025
|
if (g.length !== n)
|
|
1977
2026
|
throw new Error(`AI returned ${g.length} OHLCV objects, but expected ${n}.`);
|
|
1978
2027
|
return g;
|
|
1979
|
-
} catch (
|
|
1980
|
-
throw new Error(`OHLCV analysis failed: ${
|
|
2028
|
+
} catch (u) {
|
|
2029
|
+
throw new Error(`OHLCV analysis failed: ${u.message}`);
|
|
1981
2030
|
}
|
|
1982
2031
|
}
|
|
1983
2032
|
/**
|
|
@@ -1986,10 +2035,10 @@ Please process this data according to the system instructions. Remember to retur
|
|
|
1986
2035
|
*/
|
|
1987
2036
|
parseOHLCVResponse(e) {
|
|
1988
2037
|
try {
|
|
1989
|
-
const
|
|
1990
|
-
if (!Array.isArray(
|
|
2038
|
+
const o = JSON.parse(e);
|
|
2039
|
+
if (!Array.isArray(o))
|
|
1991
2040
|
throw new Error("Response is not in array format");
|
|
1992
|
-
return
|
|
2041
|
+
return o.map((a, i) => {
|
|
1993
2042
|
if (typeof a != "object" || a === null)
|
|
1994
2043
|
throw new Error(`Element ${i} is not a valid object`);
|
|
1995
2044
|
const { open: n, high: r, low: m, close: c, volume: p } = a, d = ["open", "high", "low", "close", "volume"];
|
|
@@ -2008,44 +2057,55 @@ Please process this data according to the system instructions. Remember to retur
|
|
|
2008
2057
|
volume: Number(p)
|
|
2009
2058
|
};
|
|
2010
2059
|
});
|
|
2011
|
-
} catch (
|
|
2060
|
+
} catch (o) {
|
|
2012
2061
|
const s = e.match(/\[[\s\S]*\]/);
|
|
2013
2062
|
if (s)
|
|
2014
2063
|
return this.parseOHLCVResponse(s[0]);
|
|
2015
|
-
throw new Error(`Unable to parse AI returned OHLCV data: ${
|
|
2064
|
+
throw new Error(`Unable to parse AI returned OHLCV data: ${o}
|
|
2016
2065
|
Original content: ${e.substring(0, 200)}...`);
|
|
2017
2066
|
}
|
|
2018
2067
|
}
|
|
2019
2068
|
}
|
|
2020
|
-
function
|
|
2021
|
-
return new L({ apiKey:
|
|
2069
|
+
function te(t, e) {
|
|
2070
|
+
return new L({ apiKey: t, modelType: e });
|
|
2022
2071
|
}
|
|
2023
2072
|
export {
|
|
2024
|
-
|
|
2025
|
-
|
|
2073
|
+
b as ALIYUN_MODELS,
|
|
2074
|
+
T as AliYunModelType,
|
|
2026
2075
|
_ as AliyunAI,
|
|
2027
|
-
|
|
2076
|
+
y as DEEPSEEK_MODELS,
|
|
2028
2077
|
C as DeepSeekAI,
|
|
2029
|
-
|
|
2030
|
-
|
|
2078
|
+
k as DeepSeekModelType,
|
|
2079
|
+
h as OPENAI_MODELS,
|
|
2031
2080
|
L as OpenAI,
|
|
2032
2081
|
f as OpenAIModelType,
|
|
2033
|
-
|
|
2034
|
-
|
|
2035
|
-
|
|
2036
|
-
|
|
2082
|
+
R as createAliyunAI,
|
|
2083
|
+
H as createDeepSeekAI,
|
|
2084
|
+
te as createOpenAI,
|
|
2085
|
+
Z as estimateCost,
|
|
2086
|
+
A as getAllDeepSeekModels,
|
|
2087
|
+
P as getAllModels,
|
|
2037
2088
|
w as getAllOpenAIModels,
|
|
2038
|
-
|
|
2039
|
-
|
|
2040
|
-
|
|
2041
|
-
|
|
2042
|
-
|
|
2043
|
-
|
|
2044
|
-
|
|
2045
|
-
|
|
2046
|
-
|
|
2047
|
-
|
|
2048
|
-
|
|
2049
|
-
|
|
2050
|
-
|
|
2089
|
+
G as getAudioModelsOpenAI,
|
|
2090
|
+
O as getAvailableAliYunModelTypes,
|
|
2091
|
+
B as getAvailableDeepSeekModelTypes,
|
|
2092
|
+
U as getAvailableOpenAIModelTypes,
|
|
2093
|
+
Q as getChatModels,
|
|
2094
|
+
j as getCompletionModels,
|
|
2095
|
+
M as getCostEfficientModels,
|
|
2096
|
+
S as getDeepSeekModel,
|
|
2097
|
+
D as getDeepSeekModelByName,
|
|
2098
|
+
W as getEmbeddingModels,
|
|
2099
|
+
J as getHighContextModels,
|
|
2100
|
+
z as getLatestModels,
|
|
2101
|
+
N as getModel,
|
|
2102
|
+
q as getModelByName,
|
|
2103
|
+
X as getMultimodalModelsOpenAI,
|
|
2104
|
+
K as getOpenAIModel,
|
|
2105
|
+
V as getOpenAIModelByName,
|
|
2106
|
+
F as getVisionModelsOpenAI,
|
|
2107
|
+
$ as stringToAliYunModelType,
|
|
2108
|
+
I as stringToDeepSeekModelType,
|
|
2109
|
+
ee as stringToOpenAIModelType,
|
|
2110
|
+
Y as suggestModel
|
|
2051
2111
|
};
|