@digilogiclabs/platform-core 1.15.0 → 1.17.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/{agents-Cd2eEX5M.d.mts → agents-Cc65YUoW.d.ts} +59 -2
- package/dist/{agents-CntmA45w.d.ts → agents-DGciJI27.d.mts} +59 -2
- package/dist/agents.d.mts +1 -1
- package/dist/agents.d.ts +1 -1
- package/dist/agents.js +334 -0
- package/dist/agents.js.map +1 -1
- package/dist/agents.mjs +333 -0
- package/dist/agents.mjs.map +1 -1
- package/dist/auth.d.mts +52 -4
- package/dist/auth.d.ts +52 -4
- package/dist/auth.js +49 -10
- package/dist/auth.js.map +1 -1
- package/dist/auth.mjs +49 -10
- package/dist/auth.mjs.map +1 -1
- package/dist/index.d.mts +2 -2
- package/dist/index.d.ts +2 -2
- package/dist/index.js +300 -0
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +299 -0
- package/dist/index.mjs.map +1 -1
- package/package.json +1 -1
package/dist/agents.mjs
CHANGED
|
@@ -438,8 +438,341 @@ function createTracedAI(options) {
|
|
|
438
438
|
}
|
|
439
439
|
};
|
|
440
440
|
}
|
|
441
|
+
|
|
442
|
+
// src/interfaces/IAI.ts
|
|
443
|
+
var AIErrorMessages = {
|
|
444
|
+
invalid_request: "The request was invalid or malformed",
|
|
445
|
+
authentication_error: "Authentication failed - check your API key",
|
|
446
|
+
rate_limit_exceeded: "Rate limit exceeded - try again later",
|
|
447
|
+
quota_exceeded: "Usage quota exceeded for this billing period",
|
|
448
|
+
model_not_found: "The specified model was not found",
|
|
449
|
+
context_length_exceeded: "Input exceeds the model context length",
|
|
450
|
+
content_filter: "Content was filtered due to policy violations",
|
|
451
|
+
server_error: "The AI provider encountered an internal error",
|
|
452
|
+
timeout: "The request timed out",
|
|
453
|
+
network_error: "Network error connecting to AI provider",
|
|
454
|
+
provider_unavailable: "The AI provider is currently unavailable",
|
|
455
|
+
unknown: "An unknown error occurred"
|
|
456
|
+
};
|
|
457
|
+
function createAIError(code, message, options) {
|
|
458
|
+
const error = new Error(message || AIErrorMessages[code]);
|
|
459
|
+
error.name = "AIError";
|
|
460
|
+
error.code = code;
|
|
461
|
+
error.provider = options?.provider;
|
|
462
|
+
error.model = options?.model;
|
|
463
|
+
error.statusCode = options?.statusCode;
|
|
464
|
+
error.retryable = options?.retryable ?? [
|
|
465
|
+
"rate_limit_exceeded",
|
|
466
|
+
"server_error",
|
|
467
|
+
"timeout",
|
|
468
|
+
"network_error"
|
|
469
|
+
].includes(code);
|
|
470
|
+
error.retryAfterMs = options?.retryAfterMs;
|
|
471
|
+
if (options?.cause) {
|
|
472
|
+
error.cause = options.cause;
|
|
473
|
+
}
|
|
474
|
+
return error;
|
|
475
|
+
}
|
|
476
|
+
|
|
477
|
+
// src/adapters/ollama/OllamaAdapter.ts
|
|
478
|
+
var OllamaAdapter = class {
|
|
479
|
+
baseUrl;
|
|
480
|
+
defaultModel;
|
|
481
|
+
defaultEmbeddingModel;
|
|
482
|
+
timeoutMs;
|
|
483
|
+
constructor(config) {
|
|
484
|
+
this.baseUrl = (config?.baseUrl ?? "http://localhost:11434").replace(
|
|
485
|
+
/\/$/,
|
|
486
|
+
""
|
|
487
|
+
);
|
|
488
|
+
this.defaultModel = config?.defaultModel ?? "qwen2.5:3b";
|
|
489
|
+
this.defaultEmbeddingModel = config?.defaultEmbeddingModel ?? "nomic-embed-text";
|
|
490
|
+
this.timeoutMs = config?.timeoutMs ?? 12e4;
|
|
491
|
+
}
|
|
492
|
+
async chat(request) {
|
|
493
|
+
const model = request.model ?? this.defaultModel;
|
|
494
|
+
const ollamaReq = {
|
|
495
|
+
model,
|
|
496
|
+
messages: request.messages.map((m) => ({
|
|
497
|
+
role: m.role === "tool" ? "assistant" : m.role,
|
|
498
|
+
content: m.content
|
|
499
|
+
})),
|
|
500
|
+
stream: false,
|
|
501
|
+
options: {
|
|
502
|
+
temperature: request.temperature,
|
|
503
|
+
num_predict: request.maxTokens,
|
|
504
|
+
top_p: request.topP,
|
|
505
|
+
stop: request.stop ? Array.isArray(request.stop) ? request.stop : [request.stop] : void 0
|
|
506
|
+
}
|
|
507
|
+
};
|
|
508
|
+
const response = await this.fetch(
|
|
509
|
+
"/api/chat",
|
|
510
|
+
ollamaReq
|
|
511
|
+
);
|
|
512
|
+
const usage = {
|
|
513
|
+
promptTokens: response.prompt_eval_count ?? 0,
|
|
514
|
+
completionTokens: response.eval_count ?? 0,
|
|
515
|
+
totalTokens: (response.prompt_eval_count ?? 0) + (response.eval_count ?? 0),
|
|
516
|
+
estimatedCostUsd: 0
|
|
517
|
+
// Local — zero cost
|
|
518
|
+
};
|
|
519
|
+
return {
|
|
520
|
+
id: `ollama-${Date.now()}`,
|
|
521
|
+
model: response.model ?? model,
|
|
522
|
+
provider: "custom",
|
|
523
|
+
choices: [
|
|
524
|
+
{
|
|
525
|
+
index: 0,
|
|
526
|
+
message: {
|
|
527
|
+
role: "assistant",
|
|
528
|
+
content: response.message.content
|
|
529
|
+
},
|
|
530
|
+
finishReason: "stop"
|
|
531
|
+
}
|
|
532
|
+
],
|
|
533
|
+
usage,
|
|
534
|
+
created: /* @__PURE__ */ new Date(),
|
|
535
|
+
finishReason: "stop"
|
|
536
|
+
};
|
|
537
|
+
}
|
|
538
|
+
async *chatStream(request) {
|
|
539
|
+
const model = request.model ?? this.defaultModel;
|
|
540
|
+
const ollamaReq = {
|
|
541
|
+
model,
|
|
542
|
+
messages: request.messages.map((m) => ({
|
|
543
|
+
role: m.role === "tool" ? "assistant" : m.role,
|
|
544
|
+
content: m.content
|
|
545
|
+
})),
|
|
546
|
+
stream: true,
|
|
547
|
+
options: {
|
|
548
|
+
temperature: request.temperature,
|
|
549
|
+
num_predict: request.maxTokens,
|
|
550
|
+
top_p: request.topP
|
|
551
|
+
}
|
|
552
|
+
};
|
|
553
|
+
const controller = new AbortController();
|
|
554
|
+
const timer = setTimeout(() => controller.abort(), this.timeoutMs);
|
|
555
|
+
try {
|
|
556
|
+
const res = await fetch(`${this.baseUrl}/api/chat`, {
|
|
557
|
+
method: "POST",
|
|
558
|
+
headers: { "Content-Type": "application/json" },
|
|
559
|
+
body: JSON.stringify(ollamaReq),
|
|
560
|
+
signal: controller.signal
|
|
561
|
+
});
|
|
562
|
+
if (!res.ok) {
|
|
563
|
+
throw createAIError("server_error", `Ollama error: ${res.status}`);
|
|
564
|
+
}
|
|
565
|
+
const reader = res.body?.getReader();
|
|
566
|
+
if (!reader) return;
|
|
567
|
+
const decoder = new TextDecoder();
|
|
568
|
+
let buffer = "";
|
|
569
|
+
while (true) {
|
|
570
|
+
const { done, value } = await reader.read();
|
|
571
|
+
if (done) break;
|
|
572
|
+
buffer += decoder.decode(value, { stream: true });
|
|
573
|
+
const lines = buffer.split("\n");
|
|
574
|
+
buffer = lines.pop() ?? "";
|
|
575
|
+
for (const line of lines) {
|
|
576
|
+
if (!line.trim()) continue;
|
|
577
|
+
const chunk = JSON.parse(line);
|
|
578
|
+
yield {
|
|
579
|
+
id: `ollama-stream-${Date.now()}`,
|
|
580
|
+
model,
|
|
581
|
+
provider: "custom",
|
|
582
|
+
delta: {
|
|
583
|
+
content: chunk.message?.content ?? ""
|
|
584
|
+
},
|
|
585
|
+
finishReason: chunk.done ? "stop" : void 0
|
|
586
|
+
};
|
|
587
|
+
}
|
|
588
|
+
}
|
|
589
|
+
} finally {
|
|
590
|
+
clearTimeout(timer);
|
|
591
|
+
}
|
|
592
|
+
}
|
|
593
|
+
async chatWithCallback(request, callback) {
|
|
594
|
+
let fullContent = "";
|
|
595
|
+
for await (const chunk of this.chatStream(request)) {
|
|
596
|
+
await callback(chunk);
|
|
597
|
+
if (chunk.delta.content) {
|
|
598
|
+
fullContent += chunk.delta.content;
|
|
599
|
+
}
|
|
600
|
+
}
|
|
601
|
+
const model = request.model ?? this.defaultModel;
|
|
602
|
+
return {
|
|
603
|
+
id: `ollama-${Date.now()}`,
|
|
604
|
+
model,
|
|
605
|
+
provider: "custom",
|
|
606
|
+
choices: [
|
|
607
|
+
{
|
|
608
|
+
index: 0,
|
|
609
|
+
message: { role: "assistant", content: fullContent },
|
|
610
|
+
finishReason: "stop"
|
|
611
|
+
}
|
|
612
|
+
],
|
|
613
|
+
usage: {
|
|
614
|
+
promptTokens: 0,
|
|
615
|
+
completionTokens: 0,
|
|
616
|
+
totalTokens: 0,
|
|
617
|
+
estimatedCostUsd: 0
|
|
618
|
+
},
|
|
619
|
+
created: /* @__PURE__ */ new Date(),
|
|
620
|
+
finishReason: "stop"
|
|
621
|
+
};
|
|
622
|
+
}
|
|
623
|
+
async complete(request) {
|
|
624
|
+
const response = await this.chat({
|
|
625
|
+
messages: [{ role: "user", content: request.prompt }],
|
|
626
|
+
model: request.model,
|
|
627
|
+
temperature: request.temperature,
|
|
628
|
+
maxTokens: request.maxTokens
|
|
629
|
+
});
|
|
630
|
+
return {
|
|
631
|
+
id: response.id,
|
|
632
|
+
model: response.model,
|
|
633
|
+
provider: "custom",
|
|
634
|
+
text: response.choices[0]?.message.content ?? "",
|
|
635
|
+
usage: response.usage,
|
|
636
|
+
created: response.created,
|
|
637
|
+
finishReason: response.finishReason
|
|
638
|
+
};
|
|
639
|
+
}
|
|
640
|
+
async *completeStream(request) {
|
|
641
|
+
yield* this.chatStream({
|
|
642
|
+
messages: [{ role: "user", content: request.prompt }],
|
|
643
|
+
model: request.model,
|
|
644
|
+
temperature: request.temperature,
|
|
645
|
+
maxTokens: request.maxTokens
|
|
646
|
+
});
|
|
647
|
+
}
|
|
648
|
+
async embed(request) {
|
|
649
|
+
const model = request.model ?? this.defaultEmbeddingModel;
|
|
650
|
+
const response = await this.fetch("/api/embed", {
|
|
651
|
+
model,
|
|
652
|
+
input: request.input
|
|
653
|
+
});
|
|
654
|
+
return {
|
|
655
|
+
id: `ollama-emb-${Date.now()}`,
|
|
656
|
+
model,
|
|
657
|
+
provider: "custom",
|
|
658
|
+
embeddings: response.embeddings,
|
|
659
|
+
usage: {
|
|
660
|
+
promptTokens: 0,
|
|
661
|
+
completionTokens: 0,
|
|
662
|
+
totalTokens: 0,
|
|
663
|
+
estimatedCostUsd: 0
|
|
664
|
+
},
|
|
665
|
+
created: /* @__PURE__ */ new Date()
|
|
666
|
+
};
|
|
667
|
+
}
|
|
668
|
+
async similarity(text1, text2, model) {
|
|
669
|
+
const response = await this.embed({ input: [text1, text2], model });
|
|
670
|
+
const [a, b] = response.embeddings;
|
|
671
|
+
if (!a || !b) return 0;
|
|
672
|
+
let dot = 0, normA = 0, normB = 0;
|
|
673
|
+
for (let i = 0; i < a.length; i++) {
|
|
674
|
+
dot += a[i] * b[i];
|
|
675
|
+
normA += a[i] * a[i];
|
|
676
|
+
normB += b[i] * b[i];
|
|
677
|
+
}
|
|
678
|
+
return dot / (Math.sqrt(normA) * Math.sqrt(normB));
|
|
679
|
+
}
|
|
680
|
+
async listModels() {
|
|
681
|
+
try {
|
|
682
|
+
const response = await this.fetch(
|
|
683
|
+
"/api/tags",
|
|
684
|
+
null,
|
|
685
|
+
"GET"
|
|
686
|
+
);
|
|
687
|
+
return response.models.map((m) => ({
|
|
688
|
+
modelId: m.name,
|
|
689
|
+
provider: "custom",
|
|
690
|
+
capabilities: ["chat", "completion"],
|
|
691
|
+
maxContextTokens: 4096,
|
|
692
|
+
maxOutputTokens: 2048,
|
|
693
|
+
inputCostPer1K: 0,
|
|
694
|
+
outputCostPer1K: 0,
|
|
695
|
+
supportsStreaming: true,
|
|
696
|
+
supportsTools: false,
|
|
697
|
+
supportsVision: false
|
|
698
|
+
}));
|
|
699
|
+
} catch {
|
|
700
|
+
return [];
|
|
701
|
+
}
|
|
702
|
+
}
|
|
703
|
+
async getModel(modelId) {
|
|
704
|
+
const models = await this.listModels();
|
|
705
|
+
return models.find((m) => m.modelId === modelId) ?? null;
|
|
706
|
+
}
|
|
707
|
+
async supportsCapability(modelId, capability) {
|
|
708
|
+
const model = await this.getModel(modelId);
|
|
709
|
+
return model?.capabilities.includes(capability) ?? false;
|
|
710
|
+
}
|
|
711
|
+
async estimateTokens(text, _model) {
|
|
712
|
+
return Math.ceil(text.length / 4);
|
|
713
|
+
}
|
|
714
|
+
async estimateCost() {
|
|
715
|
+
return 0;
|
|
716
|
+
}
|
|
717
|
+
async healthCheck() {
|
|
718
|
+
try {
|
|
719
|
+
const start = Date.now();
|
|
720
|
+
await this.fetch("/api/tags", null, "GET");
|
|
721
|
+
const latency = Date.now() - start;
|
|
722
|
+
return {
|
|
723
|
+
healthy: true,
|
|
724
|
+
providers: {
|
|
725
|
+
custom: { available: true, latencyMs: latency },
|
|
726
|
+
openai: { available: false, error: "Not configured" },
|
|
727
|
+
anthropic: { available: false, error: "Not configured" },
|
|
728
|
+
google: { available: false, error: "Not configured" },
|
|
729
|
+
azure: { available: false, error: "Not configured" },
|
|
730
|
+
bedrock: { available: false, error: "Not configured" }
|
|
731
|
+
}
|
|
732
|
+
};
|
|
733
|
+
} catch (err) {
|
|
734
|
+
return {
|
|
735
|
+
healthy: false,
|
|
736
|
+
providers: {
|
|
737
|
+
custom: {
|
|
738
|
+
available: false,
|
|
739
|
+
error: err instanceof Error ? err.message : "Ollama unavailable"
|
|
740
|
+
},
|
|
741
|
+
openai: { available: false, error: "Not configured" },
|
|
742
|
+
anthropic: { available: false, error: "Not configured" },
|
|
743
|
+
google: { available: false, error: "Not configured" },
|
|
744
|
+
azure: { available: false, error: "Not configured" },
|
|
745
|
+
bedrock: { available: false, error: "Not configured" }
|
|
746
|
+
}
|
|
747
|
+
};
|
|
748
|
+
}
|
|
749
|
+
}
|
|
750
|
+
async fetch(path, body, method = "POST") {
|
|
751
|
+
const controller = new AbortController();
|
|
752
|
+
const timer = setTimeout(() => controller.abort(), this.timeoutMs);
|
|
753
|
+
try {
|
|
754
|
+
const res = await fetch(`${this.baseUrl}${path}`, {
|
|
755
|
+
method,
|
|
756
|
+
headers: body ? { "Content-Type": "application/json" } : void 0,
|
|
757
|
+
body: body ? JSON.stringify(body) : void 0,
|
|
758
|
+
signal: controller.signal
|
|
759
|
+
});
|
|
760
|
+
if (!res.ok) {
|
|
761
|
+
const text = await res.text().catch(() => "");
|
|
762
|
+
throw createAIError(
|
|
763
|
+
"server_error",
|
|
764
|
+
`Ollama ${method} ${path}: ${res.status} ${text}`
|
|
765
|
+
);
|
|
766
|
+
}
|
|
767
|
+
return await res.json();
|
|
768
|
+
} finally {
|
|
769
|
+
clearTimeout(timer);
|
|
770
|
+
}
|
|
771
|
+
}
|
|
772
|
+
};
|
|
441
773
|
export {
|
|
442
774
|
DEFAULT_AGENT_LOOP_OPTIONS,
|
|
775
|
+
OllamaAdapter,
|
|
443
776
|
createAgentTracer,
|
|
444
777
|
createAgentUsageTracker,
|
|
445
778
|
createTracedAI,
|