@llm-jury/core 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. package/README.md +378 -0
  2. package/dist/calibration/index.d.ts +1 -0
  3. package/dist/calibration/index.js +1 -0
  4. package/dist/calibration/optimizer.d.ts +26 -0
  5. package/dist/calibration/optimizer.js +61 -0
  6. package/dist/classifiers/base.d.ts +11 -0
  7. package/dist/classifiers/base.js +7 -0
  8. package/dist/classifiers/functionAdapter.d.ts +8 -0
  9. package/dist/classifiers/functionAdapter.js +20 -0
  10. package/dist/classifiers/huggingFaceAdapter.d.ts +20 -0
  11. package/dist/classifiers/huggingFaceAdapter.js +52 -0
  12. package/dist/classifiers/index.d.ts +5 -0
  13. package/dist/classifiers/index.js +5 -0
  14. package/dist/classifiers/llmClassifier.d.ts +19 -0
  15. package/dist/classifiers/llmClassifier.js +47 -0
  16. package/dist/classifiers/sklearnAdapter.d.ts +14 -0
  17. package/dist/classifiers/sklearnAdapter.js +29 -0
  18. package/dist/cli/index.d.ts +1 -0
  19. package/dist/cli/index.js +1 -0
  20. package/dist/cli/main.d.ts +4 -0
  21. package/dist/cli/main.js +261 -0
  22. package/dist/debate/engine.d.ts +48 -0
  23. package/dist/debate/engine.js +309 -0
  24. package/dist/debate/index.d.ts +1 -0
  25. package/dist/debate/index.js +1 -0
  26. package/dist/index.d.ts +7 -0
  27. package/dist/index.js +7 -0
  28. package/dist/judges/base.d.ts +16 -0
  29. package/dist/judges/base.js +1 -0
  30. package/dist/judges/bayesian.d.ts +8 -0
  31. package/dist/judges/bayesian.js +52 -0
  32. package/dist/judges/index.d.ts +5 -0
  33. package/dist/judges/index.js +5 -0
  34. package/dist/judges/llmJudge.d.ts +19 -0
  35. package/dist/judges/llmJudge.js +86 -0
  36. package/dist/judges/majorityVote.d.ts +5 -0
  37. package/dist/judges/majorityVote.js +45 -0
  38. package/dist/judges/weightedVote.d.ts +5 -0
  39. package/dist/judges/weightedVote.js +42 -0
  40. package/dist/jury/core.d.ts +43 -0
  41. package/dist/jury/core.js +113 -0
  42. package/dist/jury/index.d.ts +1 -0
  43. package/dist/jury/index.js +1 -0
  44. package/dist/llm/client.d.ts +23 -0
  45. package/dist/llm/client.js +85 -0
  46. package/dist/llm/index.d.ts +1 -0
  47. package/dist/llm/index.js +1 -0
  48. package/dist/personas/base.d.ts +19 -0
  49. package/dist/personas/base.js +1 -0
  50. package/dist/personas/index.d.ts +2 -0
  51. package/dist/personas/index.js +2 -0
  52. package/dist/personas/registry.d.ts +8 -0
  53. package/dist/personas/registry.js +83 -0
  54. package/dist/utils.d.ts +2 -0
  55. package/dist/utils.js +23 -0
  56. package/package.json +43 -0
@@ -0,0 +1,7 @@
1
+ export * from "./calibration/index.ts";
2
+ export * from "./classifiers/index.ts";
3
+ export * from "./debate/index.ts";
4
+ export * from "./judges/index.ts";
5
+ export * from "./jury/index.ts";
6
+ export * from "./llm/index.ts";
7
+ export * from "./personas/index.ts";
package/dist/index.js ADDED
@@ -0,0 +1,7 @@
1
+ export * from "./calibration/index.js";
2
+ export * from "./classifiers/index.js";
3
+ export * from "./debate/index.js";
4
+ export * from "./judges/index.js";
5
+ export * from "./jury/index.js";
6
+ export * from "./llm/index.js";
7
+ export * from "./personas/index.js";
@@ -0,0 +1,16 @@
1
+ import type { ClassificationResult } from "../classifiers/base.ts";
2
+ import type { DebateTranscript } from "../debate/engine.ts";
3
+ export type Verdict = {
4
+ label: string;
5
+ confidence: number;
6
+ reasoning: string;
7
+ wasEscalated: boolean;
8
+ primaryResult: ClassificationResult;
9
+ debateTranscript: DebateTranscript | null;
10
+ judgeStrategy: string;
11
+ totalDurationMs: number;
12
+ totalCostUsd: number | null;
13
+ };
14
+ export interface JudgeStrategy {
15
+ judge(transcript: DebateTranscript, labels: string[]): Promise<Verdict>;
16
+ }
@@ -0,0 +1 @@
1
+ export {};
@@ -0,0 +1,8 @@
1
+ import type { DebateTranscript } from "../debate/engine.ts";
2
+ import type { JudgeStrategy, Verdict } from "./base.ts";
3
+ export type PersonaPriors = Record<string, Record<string, number>>;
4
+ export declare class BayesianJudge implements JudgeStrategy {
5
+ private priors;
6
+ constructor(priors?: PersonaPriors);
7
+ judge(transcript: DebateTranscript, labels: string[]): Promise<Verdict>;
8
+ }
@@ -0,0 +1,52 @@
1
+ export class BayesianJudge {
2
+ priors;
3
+ constructor(priors = {}) {
4
+ this.priors = priors;
5
+ }
6
+ async judge(transcript, labels) {
7
+ const finalRound = transcript.rounds[transcript.rounds.length - 1] ?? [];
8
+ if (finalRound.length === 0) {
9
+ return {
10
+ label: transcript.primaryResult.label,
11
+ confidence: transcript.primaryResult.confidence,
12
+ reasoning: "No persona responses available. Falling back to primary result.",
13
+ wasEscalated: true,
14
+ primaryResult: transcript.primaryResult,
15
+ debateTranscript: transcript,
16
+ judgeStrategy: "bayesian",
17
+ totalDurationMs: transcript.durationMs,
18
+ totalCostUsd: transcript.totalCostUsd,
19
+ };
20
+ }
21
+ const posterior = new Map();
22
+ labels.forEach((label) => posterior.set(label, 1));
23
+ for (const response of finalRound) {
24
+ for (const label of labels) {
25
+ const prior = this.priors[response.personaName]?.[label] ?? 1 / Math.max(1, labels.length);
26
+ const likelihood = response.label === label ? Number(response.confidence) : 1 - Number(response.confidence);
27
+ posterior.set(label, (posterior.get(label) ?? 1) * Math.max(1e-6, prior * likelihood));
28
+ }
29
+ }
30
+ const sum = Array.from(posterior.values()).reduce((a, b) => a + b, 0) || 1;
31
+ let winner = labels[0] ?? transcript.primaryResult.label;
32
+ let winnerScore = -1;
33
+ for (const [label, score] of posterior.entries()) {
34
+ const normalized = score / sum;
35
+ if (normalized > winnerScore) {
36
+ winner = label;
37
+ winnerScore = normalized;
38
+ }
39
+ }
40
+ return {
41
+ label: winner,
42
+ confidence: winnerScore,
43
+ reasoning: "Bayesian aggregation across persona responses.",
44
+ wasEscalated: true,
45
+ primaryResult: transcript.primaryResult,
46
+ debateTranscript: transcript,
47
+ judgeStrategy: "bayesian",
48
+ totalDurationMs: transcript.durationMs,
49
+ totalCostUsd: transcript.totalCostUsd,
50
+ };
51
+ }
52
+ }
@@ -0,0 +1,5 @@
1
+ export * from "./base.ts";
2
+ export * from "./bayesian.ts";
3
+ export * from "./llmJudge.ts";
4
+ export * from "./majorityVote.ts";
5
+ export * from "./weightedVote.ts";
@@ -0,0 +1,5 @@
1
+ export * from "./base.js";
2
+ export * from "./bayesian.js";
3
+ export * from "./llmJudge.js";
4
+ export * from "./majorityVote.js";
5
+ export * from "./weightedVote.js";
@@ -0,0 +1,19 @@
1
+ import type { LLMClient } from "../llm/client.ts";
2
+ import type { DebateTranscript } from "../debate/engine.ts";
3
+ import type { JudgeStrategy, Verdict } from "./base.ts";
4
+ export type LLMJudgeOptions = {
5
+ model?: string;
6
+ systemPrompt?: string;
7
+ temperature?: number;
8
+ llmClient?: LLMClient;
9
+ };
10
+ export declare class LLMJudge implements JudgeStrategy {
11
+ static readonly DEFAULT_SYSTEM_PROMPT: string;
12
+ private model;
13
+ private systemPrompt;
14
+ private temperature;
15
+ private llmClient;
16
+ constructor(options?: LLMJudgeOptions);
17
+ judge(transcript: DebateTranscript, labels: string[]): Promise<Verdict>;
18
+ buildPrompt(transcript: DebateTranscript, labels: string[]): string;
19
+ }
@@ -0,0 +1,86 @@
1
+ import { LiteLLMClient } from "../llm/client.js";
2
+ import { stripMarkdown, safeJsonObject } from "../utils.js";
3
+ export class LLMJudge {
4
+ static DEFAULT_SYSTEM_PROMPT = "You are the presiding judge in an expert panel. " +
5
+ "You have received assessments from multiple domain experts on a classification task.\n\n" +
6
+ "Your role is to:\n" +
7
+ "1. Weigh each expert's reasoning on its merits\n" +
8
+ "2. Consider the strength of evidence each expert cites\n" +
9
+ "3. Note where experts agree and disagree\n" +
10
+ "4. Factor in each expert's known perspective/bias\n" +
11
+ "5. If a debate summary is provided, use it to identify the decisive arguments\n" +
12
+ "6. Deliver a final classification with clear reasoning\n\n" +
13
+ "Respond ONLY with valid JSON:\n" +
14
+ "{\n" +
15
+ ' "label": "<final classification>",\n' +
16
+ ' "confidence": <0.0-1.0>,\n' +
17
+ ' "reasoning": "<your synthesis of the debate>",\n' +
18
+ ' "key_agreements": ["<points all experts agreed on>"],\n' +
19
+ ' "key_disagreements": ["<points of contention>"],\n' +
20
+ ' "decisive_factor": "<what tipped the decision>"\n' +
21
+ "}";
22
+ model;
23
+ systemPrompt;
24
+ temperature;
25
+ llmClient;
26
+ constructor(options = {}) {
27
+ this.model = options.model ?? "gpt-5-mini";
28
+ this.systemPrompt = options.systemPrompt ?? LLMJudge.DEFAULT_SYSTEM_PROMPT;
29
+ this.temperature = options.temperature ?? 0;
30
+ this.llmClient = options.llmClient ?? new LiteLLMClient();
31
+ }
32
+ async judge(transcript, labels) {
33
+ const prompt = this.buildPrompt(transcript, labels);
34
+ const payload = await this.llmClient.complete(this.model, this.systemPrompt, prompt, this.temperature);
35
+ const parsed = safeJsonObject(stripMarkdown(payload.content));
36
+ if (!parsed) {
37
+ return {
38
+ label: String(transcript.primaryResult.label),
39
+ confidence: Number(transcript.primaryResult.confidence),
40
+ reasoning: "LLM judge response was not valid JSON. Falling back to primary result.",
41
+ wasEscalated: true,
42
+ primaryResult: transcript.primaryResult,
43
+ debateTranscript: transcript,
44
+ judgeStrategy: "llm_judge_fallback_invalid_json",
45
+ totalDurationMs: transcript.durationMs,
46
+ totalCostUsd: Number(transcript.totalCostUsd ?? 0) + Number(payload.costUsd ?? 0),
47
+ };
48
+ }
49
+ return {
50
+ label: String(parsed.label ?? transcript.primaryResult.label),
51
+ confidence: Number(parsed.confidence ?? transcript.primaryResult.confidence),
52
+ reasoning: String(parsed.reasoning ?? "LLM judge response."),
53
+ wasEscalated: true,
54
+ primaryResult: transcript.primaryResult,
55
+ debateTranscript: transcript,
56
+ judgeStrategy: "llm_judge",
57
+ totalDurationMs: transcript.durationMs,
58
+ totalCostUsd: Number(transcript.totalCostUsd ?? 0) + Number(payload.costUsd ?? 0),
59
+ };
60
+ }
61
+ buildPrompt(transcript, labels) {
62
+ const lines = [];
63
+ lines.push(`Input: ${transcript.inputText}`);
64
+ lines.push(`Available labels: ${labels.join(", ")}`);
65
+ lines.push(`Primary result: ${transcript.primaryResult.label} (${Number(transcript.primaryResult.confidence).toFixed(2)})`);
66
+ lines.push("Debate transcript:");
67
+ transcript.rounds.forEach((round, index) => {
68
+ if (index === 0) {
69
+ lines.push("Initial Expert Opinions:");
70
+ }
71
+ else {
72
+ lines.push(`Revised Opinions (Round ${index + 1}):`);
73
+ }
74
+ round.forEach((response) => {
75
+ lines.push(`- ${response.personaName}: ${response.label} (${Number(response.confidence).toFixed(2)}) | Reasoning: ${response.reasoning}`);
76
+ });
77
+ });
78
+ if (transcript.summary) {
79
+ lines.push("");
80
+ lines.push("Debate Summary:");
81
+ lines.push(transcript.summary);
82
+ }
83
+ lines.push("Respond ONLY with JSON containing: label, confidence, reasoning, key_agreements, key_disagreements, decisive_factor.");
84
+ return lines.join("\n");
85
+ }
86
+ }
@@ -0,0 +1,5 @@
1
+ import type { DebateTranscript } from "../debate/engine.ts";
2
+ import type { JudgeStrategy, Verdict } from "./base.ts";
3
+ export declare class MajorityVoteJudge implements JudgeStrategy {
4
+ judge(transcript: DebateTranscript, _labels: string[]): Promise<Verdict>;
5
+ }
@@ -0,0 +1,45 @@
1
+ export class MajorityVoteJudge {
2
+ async judge(transcript, _labels) {
3
+ const finalRound = transcript.rounds[transcript.rounds.length - 1] ?? [];
4
+ if (finalRound.length === 0) {
5
+ return {
6
+ label: transcript.primaryResult.label,
7
+ confidence: transcript.primaryResult.confidence,
8
+ reasoning: "No persona responses available. Falling back to primary result.",
9
+ wasEscalated: true,
10
+ primaryResult: transcript.primaryResult,
11
+ debateTranscript: transcript,
12
+ judgeStrategy: "majority_vote",
13
+ totalDurationMs: transcript.durationMs,
14
+ totalCostUsd: transcript.totalCostUsd,
15
+ };
16
+ }
17
+ const counts = new Map();
18
+ for (const response of finalRound) {
19
+ counts.set(response.label, (counts.get(response.label) ?? 0) + 1);
20
+ }
21
+ let winner = "";
22
+ let winnerCount = -1;
23
+ for (const [label, count] of counts.entries()) {
24
+ if (count > winnerCount) {
25
+ winner = label;
26
+ winnerCount = count;
27
+ }
28
+ }
29
+ const reasoning = finalRound
30
+ .filter((response) => response.label === winner)
31
+ .map((response) => response.reasoning)
32
+ .join(" ");
33
+ return {
34
+ label: winner,
35
+ confidence: winnerCount / finalRound.length,
36
+ reasoning: reasoning || "Majority vote selected the winner.",
37
+ wasEscalated: true,
38
+ primaryResult: transcript.primaryResult,
39
+ debateTranscript: transcript,
40
+ judgeStrategy: "majority_vote",
41
+ totalDurationMs: transcript.durationMs,
42
+ totalCostUsd: transcript.totalCostUsd,
43
+ };
44
+ }
45
+ }
@@ -0,0 +1,5 @@
1
+ import type { DebateTranscript } from "../debate/engine.ts";
2
+ import type { JudgeStrategy, Verdict } from "./base.ts";
3
+ export declare class WeightedVoteJudge implements JudgeStrategy {
4
+ judge(transcript: DebateTranscript, _labels: string[]): Promise<Verdict>;
5
+ }
@@ -0,0 +1,42 @@
1
+ export class WeightedVoteJudge {
2
+ async judge(transcript, _labels) {
3
+ const finalRound = transcript.rounds[transcript.rounds.length - 1] ?? [];
4
+ if (finalRound.length === 0) {
5
+ return {
6
+ label: transcript.primaryResult.label,
7
+ confidence: transcript.primaryResult.confidence,
8
+ reasoning: "No persona responses available. Falling back to primary result.",
9
+ wasEscalated: true,
10
+ primaryResult: transcript.primaryResult,
11
+ debateTranscript: transcript,
12
+ judgeStrategy: "weighted_vote",
13
+ totalDurationMs: transcript.durationMs,
14
+ totalCostUsd: transcript.totalCostUsd,
15
+ };
16
+ }
17
+ const scores = new Map();
18
+ for (const response of finalRound) {
19
+ scores.set(response.label, (scores.get(response.label) ?? 0) + Number(response.confidence));
20
+ }
21
+ let winner = "";
22
+ let bestScore = -1;
23
+ for (const [label, score] of scores.entries()) {
24
+ if (score > bestScore) {
25
+ winner = label;
26
+ bestScore = score;
27
+ }
28
+ }
29
+ const total = Array.from(scores.values()).reduce((a, b) => a + b, 0) || 1;
30
+ return {
31
+ label: winner,
32
+ confidence: bestScore / total,
33
+ reasoning: "Weighted vote based on persona confidence scores.",
34
+ wasEscalated: true,
35
+ primaryResult: transcript.primaryResult,
36
+ debateTranscript: transcript,
37
+ judgeStrategy: "weighted_vote",
38
+ totalDurationMs: transcript.durationMs,
39
+ totalCostUsd: transcript.totalCostUsd,
40
+ };
41
+ }
42
+ }
@@ -0,0 +1,43 @@
1
+ import type { ClassificationResult, Classifier } from "../classifiers/base.ts";
2
+ import { DebateConfig, DebateEngine } from "../debate/engine.ts";
3
+ import type { LLMClient } from "../llm/client.ts";
4
+ import type { Persona } from "../personas/base.ts";
5
+ import type { JudgeStrategy, Verdict } from "../judges/base.ts";
6
+ export type JuryOptions = {
7
+ classifier: Classifier;
8
+ personas: Persona[];
9
+ confidenceThreshold?: number;
10
+ debateConcurrency?: number;
11
+ judge?: JudgeStrategy;
12
+ debateConfig?: DebateConfig;
13
+ escalationOverride?: (result: ClassificationResult) => boolean;
14
+ maxDebateCostUsd?: number;
15
+ onEscalation?: (text: string, result: ClassificationResult) => void;
16
+ onVerdict?: (verdict: Verdict) => void;
17
+ llmClient?: LLMClient;
18
+ };
19
+ export declare class JuryStats {
20
+ total: number;
21
+ fastPath: number;
22
+ escalated: number;
23
+ get escalationRate(): number;
24
+ get costSavingsVsAlwaysEscalate(): number;
25
+ }
26
+ export declare class Jury {
27
+ classifier: Classifier;
28
+ personas: Persona[];
29
+ threshold: number;
30
+ judge: JudgeStrategy;
31
+ debateConfig: DebateConfig;
32
+ debateEngine: DebateEngine;
33
+ escalationOverride?: (result: ClassificationResult) => boolean;
34
+ maxDebateCostUsd?: number;
35
+ onEscalation?: (text: string, result: ClassificationResult) => void;
36
+ onVerdict?: (verdict: Verdict) => void;
37
+ private _stats;
38
+ constructor(options: JuryOptions);
39
+ classify(text: string): Promise<Verdict>;
40
+ classifyBatch(texts: string[], concurrency?: number): Promise<Verdict[]>;
41
+ shouldEscalate(result: ClassificationResult): boolean;
42
+ get stats(): JuryStats;
43
+ }
@@ -0,0 +1,113 @@
1
+ import { DebateConfig, DebateEngine } from "../debate/engine.js";
2
+ import { LiteLLMClient } from "../llm/client.js";
3
+ import { LLMJudge } from "../judges/llmJudge.js";
4
+ export class JuryStats {
5
+ total = 0;
6
+ fastPath = 0;
7
+ escalated = 0;
8
+ get escalationRate() {
9
+ return this.total > 0 ? this.escalated / this.total : 0;
10
+ }
11
+ get costSavingsVsAlwaysEscalate() {
12
+ return this.total > 0 ? this.fastPath / this.total : 0;
13
+ }
14
+ }
15
+ export class Jury {
16
+ classifier;
17
+ personas;
18
+ threshold;
19
+ judge;
20
+ debateConfig;
21
+ debateEngine;
22
+ escalationOverride;
23
+ maxDebateCostUsd;
24
+ onEscalation;
25
+ onVerdict;
26
+ _stats;
27
+ constructor(options) {
28
+ this.classifier = options.classifier;
29
+ this.personas = options.personas;
30
+ this.threshold = options.confidenceThreshold ?? 0.7;
31
+ const llmClient = options.llmClient ?? new LiteLLMClient();
32
+ this.judge = options.judge ?? new LLMJudge({ llmClient });
33
+ this.debateConfig = options.debateConfig ?? new DebateConfig();
34
+ this.debateEngine = new DebateEngine(this.personas, this.debateConfig, llmClient, Math.max(1, options.debateConcurrency ?? 5));
35
+ this.escalationOverride = options.escalationOverride;
36
+ this.maxDebateCostUsd = options.maxDebateCostUsd;
37
+ this.onEscalation = options.onEscalation;
38
+ this.onVerdict = options.onVerdict;
39
+ this._stats = new JuryStats();
40
+ }
41
+ async classify(text) {
42
+ const start = Date.now();
43
+ const primary = await this.classifier.classify(text);
44
+ this._stats.total += 1;
45
+ const shouldEscalate = this.shouldEscalate(primary) && this.personas.length > 0;
46
+ if (!shouldEscalate) {
47
+ this._stats.fastPath += 1;
48
+ return {
49
+ label: primary.label,
50
+ confidence: primary.confidence,
51
+ reasoning: "Classified by primary classifier with sufficient confidence.",
52
+ wasEscalated: false,
53
+ primaryResult: primary,
54
+ debateTranscript: null,
55
+ judgeStrategy: "primary_classifier",
56
+ totalDurationMs: Date.now() - start,
57
+ totalCostUsd: 0,
58
+ };
59
+ }
60
+ this._stats.escalated += 1;
61
+ this.onEscalation?.(text, primary);
62
+ const transcript = await this.debateEngine.debate(text, primary, this.classifier.labels, this.maxDebateCostUsd ?? null);
63
+ if (this.maxDebateCostUsd != null && transcript.totalCostUsd != null && transcript.totalCostUsd > this.maxDebateCostUsd) {
64
+ return {
65
+ label: primary.label,
66
+ confidence: primary.confidence,
67
+ reasoning: "Debate exceeded maxDebateCostUsd. Returning primary classifier result.",
68
+ wasEscalated: true,
69
+ primaryResult: primary,
70
+ debateTranscript: transcript,
71
+ judgeStrategy: "cost_guard_primary_fallback",
72
+ totalDurationMs: Date.now() - start,
73
+ totalCostUsd: transcript.totalCostUsd,
74
+ };
75
+ }
76
+ const verdict = await this.judge.judge(transcript, this.classifier.labels);
77
+ verdict.wasEscalated = true;
78
+ verdict.primaryResult = primary;
79
+ verdict.debateTranscript = transcript;
80
+ verdict.totalDurationMs = Date.now() - start;
81
+ if (verdict.totalCostUsd == null) {
82
+ verdict.totalCostUsd = transcript.totalCostUsd;
83
+ }
84
+ this.onVerdict?.(verdict);
85
+ return verdict;
86
+ }
87
+ async classifyBatch(texts, concurrency = 10) {
88
+ const limit = Math.max(1, concurrency);
89
+ const results = new Array(texts.length);
90
+ let cursor = 0;
91
+ const workers = new Array(Math.min(limit, texts.length)).fill(null).map(async () => {
92
+ while (true) {
93
+ const index = cursor;
94
+ cursor += 1;
95
+ if (index >= texts.length) {
96
+ break;
97
+ }
98
+ results[index] = await this.classify(texts[index]);
99
+ }
100
+ });
101
+ await Promise.all(workers);
102
+ return results;
103
+ }
104
+ shouldEscalate(result) {
105
+ if (this.escalationOverride) {
106
+ return Boolean(this.escalationOverride(result));
107
+ }
108
+ return result.confidence < this.threshold;
109
+ }
110
+ get stats() {
111
+ return this._stats;
112
+ }
113
+ }
@@ -0,0 +1 @@
1
+ export * from "./core.ts";
@@ -0,0 +1 @@
1
+ export * from "./core.js";
@@ -0,0 +1,23 @@
1
+ export interface LLMClient {
2
+ complete(model: string, systemPrompt: string, prompt: string, temperature?: number): Promise<{
3
+ content: string;
4
+ tokens?: number;
5
+ costUsd?: number;
6
+ }>;
7
+ }
8
+ export type LiteLLMClientOptions = {
9
+ baseUrl?: string;
10
+ apiKey?: string;
11
+ timeoutMs?: number;
12
+ };
13
+ export declare class LiteLLMClient implements LLMClient {
14
+ private baseUrl;
15
+ private apiKey;
16
+ private timeoutMs;
17
+ constructor(options?: LiteLLMClientOptions);
18
+ complete(model: string, systemPrompt: string, prompt: string, temperature?: number): Promise<{
19
+ content: string;
20
+ tokens?: number;
21
+ costUsd?: number;
22
+ }>;
23
+ }
@@ -0,0 +1,85 @@
1
+ async function withRetry(fn, maxAttempts = 3, baseDelayMs = 1000) {
2
+ let lastError;
3
+ for (let attempt = 1; attempt <= maxAttempts; attempt++) {
4
+ try {
5
+ return await fn();
6
+ }
7
+ catch (err) {
8
+ lastError = err;
9
+ const isTransient = err instanceof TypeError ||
10
+ (err instanceof Error && err.name === "AbortError") ||
11
+ (err instanceof Error && /5\d{2}|429/.test(err.message));
12
+ if (!isTransient || attempt === maxAttempts) {
13
+ throw err;
14
+ }
15
+ const delay = baseDelayMs * Math.pow(2, attempt - 1);
16
+ await new Promise((r) => setTimeout(r, delay));
17
+ }
18
+ }
19
+ throw lastError;
20
+ }
21
+ export class LiteLLMClient {
22
+ baseUrl;
23
+ apiKey;
24
+ timeoutMs;
25
+ constructor(options = {}) {
26
+ this.baseUrl = (options.baseUrl ?? process.env.LITELLM_BASE_URL ?? process.env.OPENAI_BASE_URL ?? "https://api.openai.com/v1").replace(/\/$/, "");
27
+ this.apiKey = options.apiKey ?? process.env.LITELLM_API_KEY ?? process.env.OPENAI_API_KEY ?? null;
28
+ this.timeoutMs = options.timeoutMs ?? 60000;
29
+ }
30
+ async complete(model, systemPrompt, prompt, temperature = 0) {
31
+ if (!this.apiKey) {
32
+ throw new Error("No API key configured. Set LITELLM_API_KEY or OPENAI_API_KEY, or inject a custom llmClient.");
33
+ }
34
+ const body = {
35
+ model,
36
+ messages: [
37
+ { role: "system", content: systemPrompt },
38
+ { role: "user", content: prompt },
39
+ ],
40
+ };
41
+ if (shouldSendTemperature(model, temperature)) {
42
+ body.temperature = temperature;
43
+ }
44
+ return withRetry(async () => {
45
+ const controller = new AbortController();
46
+ const timeout = setTimeout(() => controller.abort(), this.timeoutMs);
47
+ try {
48
+ const response = await fetch(`${this.baseUrl}/chat/completions`, {
49
+ method: "POST",
50
+ headers: {
51
+ "Content-Type": "application/json",
52
+ Authorization: `Bearer ${this.apiKey}`,
53
+ },
54
+ body: JSON.stringify(body),
55
+ signal: controller.signal,
56
+ });
57
+ if (!response.ok) {
58
+ const detail = await response.text();
59
+ throw new Error(`LLM request failed (${response.status}): ${detail}`);
60
+ }
61
+ const payload = (await response.json());
62
+ const content = payload.choices?.[0]?.message?.content;
63
+ if (typeof content !== "string") {
64
+ throw new Error("LLM response did not include choices[0].message.content");
65
+ }
66
+ return {
67
+ content,
68
+ tokens: Number(payload.usage?.total_tokens ?? 0),
69
+ costUsd: undefined,
70
+ };
71
+ }
72
+ finally {
73
+ clearTimeout(timeout);
74
+ }
75
+ });
76
+ }
77
+ }
78
+ function shouldSendTemperature(model, temperature) {
79
+ if (typeof temperature !== "number" || Number.isNaN(temperature)) {
80
+ return false;
81
+ }
82
+ const lower = model.toLowerCase();
83
+ const noTempPrefixes = ["o1", "o3", "gpt-5"];
84
+ return !noTempPrefixes.some((prefix) => lower.startsWith(prefix));
85
+ }
@@ -0,0 +1 @@
1
+ export * from "./client.ts";
@@ -0,0 +1 @@
1
+ export * from "./client.js";
@@ -0,0 +1,19 @@
1
+ export type Persona = {
2
+ name: string;
3
+ role: string;
4
+ systemPrompt: string;
5
+ model: string;
6
+ temperature: number;
7
+ knownBias?: string;
8
+ };
9
+ export type PersonaResponse = {
10
+ personaName: string;
11
+ label: string;
12
+ confidence: number;
13
+ reasoning: string;
14
+ keyFactors: string[];
15
+ dissentNotes?: string;
16
+ rawResponse?: string;
17
+ tokensUsed?: number;
18
+ costUsd?: number;
19
+ };
@@ -0,0 +1 @@
1
+ export {};
@@ -0,0 +1,2 @@
1
+ export * from "./base.ts";
2
+ export * from "./registry.ts";
@@ -0,0 +1,2 @@
1
+ export * from "./base.js";
2
+ export * from "./registry.js";
@@ -0,0 +1,8 @@
1
+ import type { Persona } from "./base.ts";
2
+ export declare class PersonaRegistry {
3
+ static contentModeration(): Persona[];
4
+ static legalCompliance(): Persona[];
5
+ static medicalTriage(): Persona[];
6
+ static financialCompliance(): Persona[];
7
+ static custom(personas: Persona[]): Persona[];
8
+ }