@mzhub/promptc 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +224 -0
- package/dist/cli.d.ts +3 -0
- package/dist/cli.d.ts.map +1 -0
- package/dist/cli.js +111 -0
- package/dist/cli.js.map +1 -0
- package/dist/compiler/BootstrapFewShot.d.ts +13 -0
- package/dist/compiler/BootstrapFewShot.d.ts.map +1 -0
- package/dist/compiler/BootstrapFewShot.js +93 -0
- package/dist/compiler/BootstrapFewShot.js.map +1 -0
- package/dist/compiler/CandidatePool.d.ts +10 -0
- package/dist/compiler/CandidatePool.d.ts.map +1 -0
- package/dist/compiler/CandidatePool.js +29 -0
- package/dist/compiler/CandidatePool.js.map +1 -0
- package/dist/compiler/CompiledProgram.d.ts +43 -0
- package/dist/compiler/CompiledProgram.d.ts.map +1 -0
- package/dist/compiler/CompiledProgram.js +41 -0
- package/dist/compiler/CompiledProgram.js.map +1 -0
- package/dist/compiler/InstructionRewrite.d.ts +19 -0
- package/dist/compiler/InstructionRewrite.d.ts.map +1 -0
- package/dist/compiler/InstructionRewrite.js +117 -0
- package/dist/compiler/InstructionRewrite.js.map +1 -0
- package/dist/compiler/index.d.ts +8 -0
- package/dist/compiler/index.d.ts.map +1 -0
- package/dist/compiler/index.js +5 -0
- package/dist/compiler/index.js.map +1 -0
- package/dist/compiler/types.d.ts +41 -0
- package/dist/compiler/types.d.ts.map +1 -0
- package/dist/compiler/types.js +2 -0
- package/dist/compiler/types.js.map +1 -0
- package/dist/eval/exactMatch.d.ts +5 -0
- package/dist/eval/exactMatch.d.ts.map +1 -0
- package/dist/eval/exactMatch.js +58 -0
- package/dist/eval/exactMatch.js.map +1 -0
- package/dist/eval/index.d.ts +5 -0
- package/dist/eval/index.d.ts.map +1 -0
- package/dist/eval/index.js +3 -0
- package/dist/eval/index.js.map +1 -0
- package/dist/eval/llmJudge.d.ts +9 -0
- package/dist/eval/llmJudge.d.ts.map +1 -0
- package/dist/eval/llmJudge.js +33 -0
- package/dist/eval/llmJudge.js.map +1 -0
- package/dist/eval/types.d.ts +2 -0
- package/dist/eval/types.d.ts.map +1 -0
- package/dist/eval/types.js +2 -0
- package/dist/eval/types.js.map +1 -0
- package/dist/index.d.ts +14 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +8 -0
- package/dist/index.js.map +1 -0
- package/dist/program/ChainOfThought.d.ts +6 -0
- package/dist/program/ChainOfThought.d.ts.map +1 -0
- package/dist/program/ChainOfThought.js +44 -0
- package/dist/program/ChainOfThought.js.map +1 -0
- package/dist/program/Predict.d.ts +6 -0
- package/dist/program/Predict.d.ts.map +1 -0
- package/dist/program/Predict.js +33 -0
- package/dist/program/Predict.js.map +1 -0
- package/dist/program/Program.d.ts +33 -0
- package/dist/program/Program.d.ts.map +1 -0
- package/dist/program/Program.js +28 -0
- package/dist/program/Program.js.map +1 -0
- package/dist/program/index.d.ts +5 -0
- package/dist/program/index.d.ts.map +1 -0
- package/dist/program/index.js +4 -0
- package/dist/program/index.js.map +1 -0
- package/dist/providers/anthropic.d.ts +10 -0
- package/dist/providers/anthropic.d.ts.map +1 -0
- package/dist/providers/anthropic.js +40 -0
- package/dist/providers/anthropic.js.map +1 -0
- package/dist/providers/cerebras.d.ts +10 -0
- package/dist/providers/cerebras.d.ts.map +1 -0
- package/dist/providers/cerebras.js +39 -0
- package/dist/providers/cerebras.js.map +1 -0
- package/dist/providers/google.d.ts +10 -0
- package/dist/providers/google.d.ts.map +1 -0
- package/dist/providers/google.js +42 -0
- package/dist/providers/google.js.map +1 -0
- package/dist/providers/groq.d.ts +10 -0
- package/dist/providers/groq.d.ts.map +1 -0
- package/dist/providers/groq.js +42 -0
- package/dist/providers/groq.js.map +1 -0
- package/dist/providers/index.d.ts +11 -0
- package/dist/providers/index.d.ts.map +1 -0
- package/dist/providers/index.js +31 -0
- package/dist/providers/index.js.map +1 -0
- package/dist/providers/ollama.d.ts +9 -0
- package/dist/providers/ollama.d.ts.map +1 -0
- package/dist/providers/ollama.js +39 -0
- package/dist/providers/ollama.js.map +1 -0
- package/dist/providers/openai.d.ts +10 -0
- package/dist/providers/openai.d.ts.map +1 -0
- package/dist/providers/openai.js +42 -0
- package/dist/providers/openai.js.map +1 -0
- package/dist/providers/types.d.ts +25 -0
- package/dist/providers/types.d.ts.map +1 -0
- package/dist/providers/types.js +2 -0
- package/dist/providers/types.js.map +1 -0
- package/dist/runtime/cache.d.ts +18 -0
- package/dist/runtime/cache.d.ts.map +1 -0
- package/dist/runtime/cache.js +45 -0
- package/dist/runtime/cache.js.map +1 -0
- package/dist/runtime/concurrency.d.ts +7 -0
- package/dist/runtime/concurrency.d.ts.map +1 -0
- package/dist/runtime/concurrency.js +14 -0
- package/dist/runtime/concurrency.js.map +1 -0
- package/dist/runtime/costTracker.d.ts +24 -0
- package/dist/runtime/costTracker.d.ts.map +1 -0
- package/dist/runtime/costTracker.js +37 -0
- package/dist/runtime/costTracker.js.map +1 -0
- package/dist/runtime/index.d.ts +9 -0
- package/dist/runtime/index.d.ts.map +1 -0
- package/dist/runtime/index.js +5 -0
- package/dist/runtime/index.js.map +1 -0
- package/dist/runtime/retry.d.ts +10 -0
- package/dist/runtime/retry.d.ts.map +1 -0
- package/dist/runtime/retry.js +39 -0
- package/dist/runtime/retry.js.map +1 -0
- package/dist/schema/defineSchema.d.ts +18 -0
- package/dist/schema/defineSchema.d.ts.map +1 -0
- package/dist/schema/defineSchema.js +27 -0
- package/dist/schema/defineSchema.js.map +1 -0
- package/dist/schema/index.d.ts +3 -0
- package/dist/schema/index.d.ts.map +1 -0
- package/dist/schema/index.js +2 -0
- package/dist/schema/index.js.map +1 -0
- package/examples/README.md +42 -0
- package/examples/load-compiled.ts +62 -0
- package/examples/multi-provider.ts +77 -0
- package/examples/name-extractor.ts +113 -0
- package/examples/qa-system.ts +98 -0
- package/package.json +62 -0
- package/src/cli.ts +122 -0
- package/src/compiler/BootstrapFewShot.ts +149 -0
- package/src/compiler/CandidatePool.ts +39 -0
- package/src/compiler/CompiledProgram.ts +112 -0
- package/src/compiler/InstructionRewrite.ts +200 -0
- package/src/compiler/index.ts +19 -0
- package/src/compiler/types.ts +46 -0
- package/src/eval/exactMatch.ts +65 -0
- package/src/eval/index.ts +4 -0
- package/src/eval/llmJudge.ts +45 -0
- package/src/eval/types.ts +4 -0
- package/src/index.ts +71 -0
- package/src/program/ChainOfThought.ts +59 -0
- package/src/program/Predict.ts +47 -0
- package/src/program/Program.ts +64 -0
- package/src/program/index.ts +4 -0
- package/src/providers/anthropic.ts +55 -0
- package/src/providers/cerebras.ts +53 -0
- package/src/providers/google.ts +57 -0
- package/src/providers/groq.ts +57 -0
- package/src/providers/index.ts +50 -0
- package/src/providers/ollama.ts +54 -0
- package/src/providers/openai.ts +57 -0
- package/src/providers/types.ts +27 -0
- package/src/runtime/cache.ts +65 -0
- package/src/runtime/concurrency.ts +21 -0
- package/src/runtime/costTracker.ts +58 -0
- package/src/runtime/index.ts +8 -0
- package/src/runtime/retry.ts +59 -0
- package/src/schema/defineSchema.ts +44 -0
- package/src/schema/index.ts +2 -0
- package/tests/candidatePool.test.ts +46 -0
- package/tests/evaluators.test.ts +69 -0
- package/tests/runtime.test.ts +106 -0
- package/tests/schema.test.ts +59 -0
- package/tsconfig.json +24 -0
|
@@ -0,0 +1,65 @@
|
|
|
1
|
+
import type { Evaluator } from "./types.js";
|
|
2
|
+
|
|
3
|
+
function deepEqual(a: unknown, b: unknown): boolean {
|
|
4
|
+
if (a === b) return true;
|
|
5
|
+
if (typeof a !== typeof b) return false;
|
|
6
|
+
if (a === null || b === null) return a === b;
|
|
7
|
+
|
|
8
|
+
if (Array.isArray(a) && Array.isArray(b)) {
|
|
9
|
+
if (a.length !== b.length) return false;
|
|
10
|
+
return a.every((item, i) => deepEqual(item, b[i]));
|
|
11
|
+
}
|
|
12
|
+
|
|
13
|
+
if (typeof a === "object" && typeof b === "object") {
|
|
14
|
+
const aObj = a as Record<string, unknown>;
|
|
15
|
+
const bObj = b as Record<string, unknown>;
|
|
16
|
+
const aKeys = Object.keys(aObj);
|
|
17
|
+
const bKeys = Object.keys(bObj);
|
|
18
|
+
|
|
19
|
+
if (aKeys.length !== bKeys.length) return false;
|
|
20
|
+
return aKeys.every((key) => deepEqual(aObj[key], bObj[key]));
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
return false;
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
export function exactMatch<O>(): Evaluator<O> {
|
|
27
|
+
return (prediction: O, groundTruth: O): number => {
|
|
28
|
+
return deepEqual(prediction, groundTruth) ? 1.0 : 0.0;
|
|
29
|
+
};
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
export function partialMatch<
|
|
33
|
+
O extends Record<string, unknown>
|
|
34
|
+
>(): Evaluator<O> {
|
|
35
|
+
return (prediction: O, groundTruth: O): number => {
|
|
36
|
+
const keys = Object.keys(groundTruth);
|
|
37
|
+
if (keys.length === 0) return 1.0;
|
|
38
|
+
|
|
39
|
+
let matchCount = 0;
|
|
40
|
+
for (const key of keys) {
|
|
41
|
+
if (deepEqual(prediction[key], groundTruth[key])) {
|
|
42
|
+
matchCount++;
|
|
43
|
+
}
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
return matchCount / keys.length;
|
|
47
|
+
};
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
export function arrayOverlap<T>(): Evaluator<T[]> {
|
|
51
|
+
return (prediction: T[], groundTruth: T[]): number => {
|
|
52
|
+
if (groundTruth.length === 0) return prediction.length === 0 ? 1.0 : 0.0;
|
|
53
|
+
|
|
54
|
+
const predSet = new Set(prediction.map((x) => JSON.stringify(x)));
|
|
55
|
+
const truthSet = new Set(groundTruth.map((x) => JSON.stringify(x)));
|
|
56
|
+
|
|
57
|
+
let intersection = 0;
|
|
58
|
+
for (const item of predSet) {
|
|
59
|
+
if (truthSet.has(item)) intersection++;
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
const union = new Set([...predSet, ...truthSet]).size;
|
|
63
|
+
return union > 0 ? intersection / union : 0;
|
|
64
|
+
};
|
|
65
|
+
}
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
import type { LLMProvider } from "../providers/types.js";
|
|
2
|
+
import type { Evaluator } from "./types.js";
|
|
3
|
+
|
|
4
|
+
export interface LLMJudgeConfig {
|
|
5
|
+
provider: LLMProvider;
|
|
6
|
+
criteria?: string;
|
|
7
|
+
model?: string;
|
|
8
|
+
}
|
|
9
|
+
|
|
10
|
+
export function llmJudge<O>(config: LLMJudgeConfig): Evaluator<O> {
|
|
11
|
+
const { provider, criteria = "accuracy and completeness", model } = config;
|
|
12
|
+
|
|
13
|
+
return async (prediction: O, groundTruth: O): Promise<number> => {
|
|
14
|
+
const prompt = `You are evaluating the quality of an AI response.
|
|
15
|
+
|
|
16
|
+
EXPECTED OUTPUT:
|
|
17
|
+
${JSON.stringify(groundTruth, null, 2)}
|
|
18
|
+
|
|
19
|
+
ACTUAL OUTPUT:
|
|
20
|
+
${JSON.stringify(prediction, null, 2)}
|
|
21
|
+
|
|
22
|
+
EVALUATION CRITERIA: ${criteria}
|
|
23
|
+
|
|
24
|
+
Rate the actual output on a scale from 0.0 to 1.0, where:
|
|
25
|
+
- 1.0 = Perfect match or equivalent
|
|
26
|
+
- 0.5 = Partially correct
|
|
27
|
+
- 0.0 = Completely wrong
|
|
28
|
+
|
|
29
|
+
Respond with ONLY a number between 0.0 and 1.0, nothing else.`;
|
|
30
|
+
|
|
31
|
+
const response = await provider.complete({
|
|
32
|
+
prompt,
|
|
33
|
+
model,
|
|
34
|
+
temperature: 0,
|
|
35
|
+
maxTokens: 10,
|
|
36
|
+
});
|
|
37
|
+
|
|
38
|
+
const score = parseFloat(response.content.trim());
|
|
39
|
+
if (isNaN(score) || score < 0 || score > 1) {
|
|
40
|
+
return 0;
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
return score;
|
|
44
|
+
};
|
|
45
|
+
}
|
package/src/index.ts
ADDED
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
export { z } from "zod";
|
|
2
|
+
|
|
3
|
+
export { Schema, defineSchema } from "./schema/index.js";
|
|
4
|
+
export type { SchemaDefinition } from "./schema/index.js";
|
|
5
|
+
|
|
6
|
+
export {
|
|
7
|
+
createProvider,
|
|
8
|
+
OpenAIProvider,
|
|
9
|
+
AnthropicProvider,
|
|
10
|
+
GoogleProvider,
|
|
11
|
+
OllamaProvider,
|
|
12
|
+
GroqProvider,
|
|
13
|
+
CerebrasProvider,
|
|
14
|
+
} from "./providers/index.js";
|
|
15
|
+
export type {
|
|
16
|
+
LLMProvider,
|
|
17
|
+
CompletionParams,
|
|
18
|
+
CompletionResult,
|
|
19
|
+
ProviderConfig,
|
|
20
|
+
ProviderName,
|
|
21
|
+
} from "./providers/index.js";
|
|
22
|
+
|
|
23
|
+
export { Program, Predict, ChainOfThought } from "./program/index.js";
|
|
24
|
+
export type {
|
|
25
|
+
ProgramConfig,
|
|
26
|
+
ProgramOutput,
|
|
27
|
+
ProgramTrace,
|
|
28
|
+
} from "./program/index.js";
|
|
29
|
+
|
|
30
|
+
export {
|
|
31
|
+
BootstrapFewShot,
|
|
32
|
+
InstructionRewrite,
|
|
33
|
+
CandidatePool,
|
|
34
|
+
createCompiledProgram,
|
|
35
|
+
loadCompiledProgram,
|
|
36
|
+
} from "./compiler/index.js";
|
|
37
|
+
export type {
|
|
38
|
+
Evaluator,
|
|
39
|
+
Example,
|
|
40
|
+
CompilationResult,
|
|
41
|
+
CompileOptions,
|
|
42
|
+
CompiledProgram,
|
|
43
|
+
CompiledProgramMeta,
|
|
44
|
+
SerializedCompiledProgram,
|
|
45
|
+
InstructionRewriteOptions,
|
|
46
|
+
} from "./compiler/index.js";
|
|
47
|
+
|
|
48
|
+
export {
|
|
49
|
+
exactMatch,
|
|
50
|
+
partialMatch,
|
|
51
|
+
arrayOverlap,
|
|
52
|
+
llmJudge,
|
|
53
|
+
} from "./eval/index.js";
|
|
54
|
+
export type { LLMJudgeConfig } from "./eval/index.js";
|
|
55
|
+
|
|
56
|
+
export {
|
|
57
|
+
createConcurrencyManager,
|
|
58
|
+
CostTracker,
|
|
59
|
+
estimateCost,
|
|
60
|
+
withRetry,
|
|
61
|
+
createRetryWrapper,
|
|
62
|
+
PromptCache,
|
|
63
|
+
createCache,
|
|
64
|
+
} from "./runtime/index.js";
|
|
65
|
+
export type {
|
|
66
|
+
ConcurrencyManager,
|
|
67
|
+
TokenUsage,
|
|
68
|
+
CostEstimate,
|
|
69
|
+
RetryOptions,
|
|
70
|
+
CacheOptions,
|
|
71
|
+
} from "./runtime/index.js";
|
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
import { z, ZodRawShape, ZodObject } from "zod";
|
|
2
|
+
import { Program, ProgramConfig, ProgramOutput } from "./Program.js";
|
|
3
|
+
|
|
4
|
+
export class ChainOfThought<
|
|
5
|
+
I extends ZodRawShape,
|
|
6
|
+
O extends ZodRawShape
|
|
7
|
+
> extends Program<I, O> {
|
|
8
|
+
async run(
|
|
9
|
+
input: z.infer<ZodObject<I>>,
|
|
10
|
+
config: ProgramConfig = {}
|
|
11
|
+
): Promise<ProgramOutput<z.infer<ZodObject<O>>>> {
|
|
12
|
+
const startTime = Date.now();
|
|
13
|
+
const validatedInput = this.schema.validateInput(input);
|
|
14
|
+
|
|
15
|
+
const instructions = config.instructions || this.schema.description;
|
|
16
|
+
const fewShotSection = this.buildFewShotString(config.fewShotExamples);
|
|
17
|
+
const outputKeys = this.schema.getOutputKeys();
|
|
18
|
+
const outputFormat = outputKeys
|
|
19
|
+
.map((k) => `"${k}": <value>`)
|
|
20
|
+
.join(",\n ");
|
|
21
|
+
|
|
22
|
+
const prompt = `${instructions}
|
|
23
|
+
|
|
24
|
+
${
|
|
25
|
+
fewShotSection ? `Here are some examples:\n\n${fewShotSection}\n\n` : ""
|
|
26
|
+
}Think through this step-by-step before providing your answer.
|
|
27
|
+
|
|
28
|
+
Input: ${JSON.stringify(validatedInput)}
|
|
29
|
+
|
|
30
|
+
Respond with valid JSON in this exact format:
|
|
31
|
+
{
|
|
32
|
+
"reasoning": "Your step-by-step thinking process...",
|
|
33
|
+
${outputFormat}
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
Respond ONLY with valid JSON, no additional text.`;
|
|
37
|
+
|
|
38
|
+
const response = await this.provider.complete({
|
|
39
|
+
prompt,
|
|
40
|
+
responseFormat: "json",
|
|
41
|
+
});
|
|
42
|
+
|
|
43
|
+
const parsed = this.parseJsonResponse(response.content);
|
|
44
|
+
const reasoning = parsed.reasoning as string | undefined;
|
|
45
|
+
delete parsed.reasoning;
|
|
46
|
+
|
|
47
|
+
const result = this.schema.validateOutput(parsed);
|
|
48
|
+
|
|
49
|
+
return {
|
|
50
|
+
result,
|
|
51
|
+
trace: {
|
|
52
|
+
promptUsed: prompt,
|
|
53
|
+
reasoning,
|
|
54
|
+
usage: response.usage,
|
|
55
|
+
latencyMs: Date.now() - startTime,
|
|
56
|
+
},
|
|
57
|
+
};
|
|
58
|
+
}
|
|
59
|
+
}
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
import { z, ZodRawShape, ZodObject } from "zod";
|
|
2
|
+
import { Program, ProgramConfig, ProgramOutput } from "./Program.js";
|
|
3
|
+
|
|
4
|
+
export class Predict<
|
|
5
|
+
I extends ZodRawShape,
|
|
6
|
+
O extends ZodRawShape
|
|
7
|
+
> extends Program<I, O> {
|
|
8
|
+
async run(
|
|
9
|
+
input: z.infer<ZodObject<I>>,
|
|
10
|
+
config: ProgramConfig = {}
|
|
11
|
+
): Promise<ProgramOutput<z.infer<ZodObject<O>>>> {
|
|
12
|
+
const startTime = Date.now();
|
|
13
|
+
const validatedInput = this.schema.validateInput(input);
|
|
14
|
+
|
|
15
|
+
const instructions = config.instructions || this.schema.description;
|
|
16
|
+
const fewShotSection = this.buildFewShotString(config.fewShotExamples);
|
|
17
|
+
const outputFormat = this.buildOutputFormat();
|
|
18
|
+
|
|
19
|
+
const prompt = `${instructions}
|
|
20
|
+
|
|
21
|
+
${
|
|
22
|
+
fewShotSection ? `Here are some examples:\n\n${fewShotSection}\n\n` : ""
|
|
23
|
+
}Now process this input and respond with valid JSON matching this format:
|
|
24
|
+
${outputFormat}
|
|
25
|
+
|
|
26
|
+
Input: ${JSON.stringify(validatedInput)}
|
|
27
|
+
|
|
28
|
+
Respond ONLY with valid JSON, no additional text.`;
|
|
29
|
+
|
|
30
|
+
const response = await this.provider.complete({
|
|
31
|
+
prompt,
|
|
32
|
+
responseFormat: "json",
|
|
33
|
+
});
|
|
34
|
+
|
|
35
|
+
const parsed = this.parseJsonResponse(response.content);
|
|
36
|
+
const result = this.schema.validateOutput(parsed);
|
|
37
|
+
|
|
38
|
+
return {
|
|
39
|
+
result,
|
|
40
|
+
trace: {
|
|
41
|
+
promptUsed: prompt,
|
|
42
|
+
usage: response.usage,
|
|
43
|
+
latencyMs: Date.now() - startTime,
|
|
44
|
+
},
|
|
45
|
+
};
|
|
46
|
+
}
|
|
47
|
+
}
|
|
@@ -0,0 +1,64 @@
|
|
|
1
|
+
import { z, ZodRawShape, ZodObject } from "zod";
|
|
2
|
+
import { Schema } from "../schema/defineSchema.js";
|
|
3
|
+
import type { LLMProvider } from "../providers/types.js";
|
|
4
|
+
|
|
5
|
+
export interface ProgramConfig {
|
|
6
|
+
instructions?: string;
|
|
7
|
+
fewShotExamples?: Array<{
|
|
8
|
+
input: Record<string, unknown>;
|
|
9
|
+
output: Record<string, unknown>;
|
|
10
|
+
}>;
|
|
11
|
+
}
|
|
12
|
+
|
|
13
|
+
export interface ProgramTrace {
|
|
14
|
+
promptUsed: string;
|
|
15
|
+
reasoning?: string;
|
|
16
|
+
usage: { inputTokens: number; outputTokens: number };
|
|
17
|
+
latencyMs: number;
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
export interface ProgramOutput<O> {
|
|
21
|
+
result: O;
|
|
22
|
+
trace: ProgramTrace;
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
export abstract class Program<I extends ZodRawShape, O extends ZodRawShape> {
|
|
26
|
+
constructor(
|
|
27
|
+
protected schema: Schema<I, O>,
|
|
28
|
+
protected provider: LLMProvider
|
|
29
|
+
) {}
|
|
30
|
+
|
|
31
|
+
abstract run(
|
|
32
|
+
input: z.infer<ZodObject<I>>,
|
|
33
|
+
config?: ProgramConfig
|
|
34
|
+
): Promise<ProgramOutput<z.infer<ZodObject<O>>>>;
|
|
35
|
+
|
|
36
|
+
protected buildFewShotString(
|
|
37
|
+
examples: ProgramConfig["fewShotExamples"]
|
|
38
|
+
): string {
|
|
39
|
+
if (!examples || examples.length === 0) return "";
|
|
40
|
+
|
|
41
|
+
return examples
|
|
42
|
+
.map(
|
|
43
|
+
(ex, i) =>
|
|
44
|
+
`Example ${i + 1}:\nInput: ${JSON.stringify(
|
|
45
|
+
ex.input
|
|
46
|
+
)}\nOutput: ${JSON.stringify(ex.output)}`
|
|
47
|
+
)
|
|
48
|
+
.join("\n\n");
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
protected buildOutputFormat(): string {
|
|
52
|
+
const keys = this.schema.getOutputKeys();
|
|
53
|
+
const format = keys.map((k) => `"${k}": <value>`).join(",\n ");
|
|
54
|
+
return `{\n ${format}\n}`;
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
protected parseJsonResponse(content: string): Record<string, unknown> {
|
|
58
|
+
const jsonMatch = content.match(/\{[\s\S]*\}/);
|
|
59
|
+
if (!jsonMatch) {
|
|
60
|
+
throw new Error("No JSON found in response");
|
|
61
|
+
}
|
|
62
|
+
return JSON.parse(jsonMatch[0]);
|
|
63
|
+
}
|
|
64
|
+
}
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
import type {
|
|
2
|
+
LLMProvider,
|
|
3
|
+
CompletionParams,
|
|
4
|
+
CompletionResult,
|
|
5
|
+
ProviderConfig,
|
|
6
|
+
} from "./types.js";
|
|
7
|
+
|
|
8
|
+
export class AnthropicProvider implements LLMProvider {
|
|
9
|
+
name = "anthropic";
|
|
10
|
+
defaultModel: string;
|
|
11
|
+
private apiKey: string;
|
|
12
|
+
private baseUrl: string;
|
|
13
|
+
|
|
14
|
+
constructor(config: ProviderConfig) {
|
|
15
|
+
this.apiKey = config.apiKey || process.env.ANTHROPIC_API_KEY || "";
|
|
16
|
+
this.baseUrl = config.baseUrl || "https://api.anthropic.com/v1";
|
|
17
|
+
this.defaultModel = config.defaultModel || "claude-3-5-sonnet-20241022";
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
async complete(params: CompletionParams): Promise<CompletionResult> {
|
|
21
|
+
const response = await fetch(`${this.baseUrl}/messages`, {
|
|
22
|
+
method: "POST",
|
|
23
|
+
headers: {
|
|
24
|
+
"Content-Type": "application/json",
|
|
25
|
+
"x-api-key": this.apiKey,
|
|
26
|
+
"anthropic-version": "2023-06-01",
|
|
27
|
+
},
|
|
28
|
+
body: JSON.stringify({
|
|
29
|
+
model: params.model || this.defaultModel,
|
|
30
|
+
max_tokens: params.maxTokens ?? 1024,
|
|
31
|
+
messages: [{ role: "user", content: params.prompt }],
|
|
32
|
+
}),
|
|
33
|
+
});
|
|
34
|
+
|
|
35
|
+
if (!response.ok) {
|
|
36
|
+
const error = await response.text();
|
|
37
|
+
throw new Error(`Anthropic API error: ${response.status} - ${error}`);
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
const data = (await response.json()) as {
|
|
41
|
+
content: Array<{ type: string; text: string }>;
|
|
42
|
+
usage: { input_tokens: number; output_tokens: number };
|
|
43
|
+
};
|
|
44
|
+
|
|
45
|
+
const textContent = data.content.find((c) => c.type === "text");
|
|
46
|
+
|
|
47
|
+
return {
|
|
48
|
+
content: textContent?.text || "",
|
|
49
|
+
usage: {
|
|
50
|
+
inputTokens: data.usage.input_tokens,
|
|
51
|
+
outputTokens: data.usage.output_tokens,
|
|
52
|
+
},
|
|
53
|
+
};
|
|
54
|
+
}
|
|
55
|
+
}
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
import type {
|
|
2
|
+
LLMProvider,
|
|
3
|
+
CompletionParams,
|
|
4
|
+
CompletionResult,
|
|
5
|
+
ProviderConfig,
|
|
6
|
+
} from "./types.js";
|
|
7
|
+
|
|
8
|
+
export class CerebrasProvider implements LLMProvider {
|
|
9
|
+
name = "cerebras";
|
|
10
|
+
defaultModel: string;
|
|
11
|
+
private apiKey: string;
|
|
12
|
+
private baseUrl: string;
|
|
13
|
+
|
|
14
|
+
constructor(config: ProviderConfig) {
|
|
15
|
+
this.apiKey = config.apiKey || process.env.CEREBRAS_API_KEY || "";
|
|
16
|
+
this.baseUrl = config.baseUrl || "https://api.cerebras.ai/v1";
|
|
17
|
+
this.defaultModel = config.defaultModel || "llama3.1-8b";
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
async complete(params: CompletionParams): Promise<CompletionResult> {
|
|
21
|
+
const response = await fetch(`${this.baseUrl}/chat/completions`, {
|
|
22
|
+
method: "POST",
|
|
23
|
+
headers: {
|
|
24
|
+
"Content-Type": "application/json",
|
|
25
|
+
Authorization: `Bearer ${this.apiKey}`,
|
|
26
|
+
},
|
|
27
|
+
body: JSON.stringify({
|
|
28
|
+
model: params.model || this.defaultModel,
|
|
29
|
+
messages: [{ role: "user", content: params.prompt }],
|
|
30
|
+
temperature: params.temperature ?? 0.7,
|
|
31
|
+
max_tokens: params.maxTokens ?? 1024,
|
|
32
|
+
}),
|
|
33
|
+
});
|
|
34
|
+
|
|
35
|
+
if (!response.ok) {
|
|
36
|
+
const error = await response.text();
|
|
37
|
+
throw new Error(`Cerebras API error: ${response.status} - ${error}`);
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
const data = (await response.json()) as {
|
|
41
|
+
choices: Array<{ message: { content: string } }>;
|
|
42
|
+
usage: { prompt_tokens: number; completion_tokens: number };
|
|
43
|
+
};
|
|
44
|
+
|
|
45
|
+
return {
|
|
46
|
+
content: data.choices[0].message.content,
|
|
47
|
+
usage: {
|
|
48
|
+
inputTokens: data.usage.prompt_tokens,
|
|
49
|
+
outputTokens: data.usage.completion_tokens,
|
|
50
|
+
},
|
|
51
|
+
};
|
|
52
|
+
}
|
|
53
|
+
}
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
import type {
|
|
2
|
+
LLMProvider,
|
|
3
|
+
CompletionParams,
|
|
4
|
+
CompletionResult,
|
|
5
|
+
ProviderConfig,
|
|
6
|
+
} from "./types.js";
|
|
7
|
+
|
|
8
|
+
export class GoogleProvider implements LLMProvider {
|
|
9
|
+
name = "google";
|
|
10
|
+
defaultModel: string;
|
|
11
|
+
private apiKey: string;
|
|
12
|
+
private baseUrl: string;
|
|
13
|
+
|
|
14
|
+
constructor(config: ProviderConfig) {
|
|
15
|
+
this.apiKey = config.apiKey || process.env.GOOGLE_API_KEY || "";
|
|
16
|
+
this.baseUrl =
|
|
17
|
+
config.baseUrl || "https://generativelanguage.googleapis.com/v1beta";
|
|
18
|
+
this.defaultModel = config.defaultModel || "gemini-2.0-flash";
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
async complete(params: CompletionParams): Promise<CompletionResult> {
|
|
22
|
+
const model = params.model || this.defaultModel;
|
|
23
|
+
const url = `${this.baseUrl}/models/${model}:generateContent?key=${this.apiKey}`;
|
|
24
|
+
|
|
25
|
+
const response = await fetch(url, {
|
|
26
|
+
method: "POST",
|
|
27
|
+
headers: {
|
|
28
|
+
"Content-Type": "application/json",
|
|
29
|
+
},
|
|
30
|
+
body: JSON.stringify({
|
|
31
|
+
contents: [{ parts: [{ text: params.prompt }] }],
|
|
32
|
+
generationConfig: {
|
|
33
|
+
temperature: params.temperature ?? 0.7,
|
|
34
|
+
maxOutputTokens: params.maxTokens ?? 1024,
|
|
35
|
+
},
|
|
36
|
+
}),
|
|
37
|
+
});
|
|
38
|
+
|
|
39
|
+
if (!response.ok) {
|
|
40
|
+
const error = await response.text();
|
|
41
|
+
throw new Error(`Google API error: ${response.status} - ${error}`);
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
const data = (await response.json()) as {
|
|
45
|
+
candidates: Array<{ content: { parts: Array<{ text: string }> } }>;
|
|
46
|
+
usageMetadata: { promptTokenCount: number; candidatesTokenCount: number };
|
|
47
|
+
};
|
|
48
|
+
|
|
49
|
+
return {
|
|
50
|
+
content: data.candidates[0]?.content?.parts[0]?.text || "",
|
|
51
|
+
usage: {
|
|
52
|
+
inputTokens: data.usageMetadata?.promptTokenCount || 0,
|
|
53
|
+
outputTokens: data.usageMetadata?.candidatesTokenCount || 0,
|
|
54
|
+
},
|
|
55
|
+
};
|
|
56
|
+
}
|
|
57
|
+
}
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
import type {
|
|
2
|
+
LLMProvider,
|
|
3
|
+
CompletionParams,
|
|
4
|
+
CompletionResult,
|
|
5
|
+
ProviderConfig,
|
|
6
|
+
} from "./types.js";
|
|
7
|
+
|
|
8
|
+
export class GroqProvider implements LLMProvider {
|
|
9
|
+
name = "groq";
|
|
10
|
+
defaultModel: string;
|
|
11
|
+
private apiKey: string;
|
|
12
|
+
private baseUrl: string;
|
|
13
|
+
|
|
14
|
+
constructor(config: ProviderConfig) {
|
|
15
|
+
this.apiKey = config.apiKey || process.env.GROQ_API_KEY || "";
|
|
16
|
+
this.baseUrl = config.baseUrl || "https://api.groq.com/openai/v1";
|
|
17
|
+
this.defaultModel = config.defaultModel || "llama-3.3-70b-versatile";
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
async complete(params: CompletionParams): Promise<CompletionResult> {
|
|
21
|
+
const response = await fetch(`${this.baseUrl}/chat/completions`, {
|
|
22
|
+
method: "POST",
|
|
23
|
+
headers: {
|
|
24
|
+
"Content-Type": "application/json",
|
|
25
|
+
Authorization: `Bearer ${this.apiKey}`,
|
|
26
|
+
},
|
|
27
|
+
body: JSON.stringify({
|
|
28
|
+
model: params.model || this.defaultModel,
|
|
29
|
+
messages: [{ role: "user", content: params.prompt }],
|
|
30
|
+
temperature: params.temperature ?? 0.7,
|
|
31
|
+
max_tokens: params.maxTokens ?? 1024,
|
|
32
|
+
response_format:
|
|
33
|
+
params.responseFormat === "json"
|
|
34
|
+
? { type: "json_object" }
|
|
35
|
+
: undefined,
|
|
36
|
+
}),
|
|
37
|
+
});
|
|
38
|
+
|
|
39
|
+
if (!response.ok) {
|
|
40
|
+
const error = await response.text();
|
|
41
|
+
throw new Error(`Groq API error: ${response.status} - ${error}`);
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
const data = (await response.json()) as {
|
|
45
|
+
choices: Array<{ message: { content: string } }>;
|
|
46
|
+
usage: { prompt_tokens: number; completion_tokens: number };
|
|
47
|
+
};
|
|
48
|
+
|
|
49
|
+
return {
|
|
50
|
+
content: data.choices[0].message.content,
|
|
51
|
+
usage: {
|
|
52
|
+
inputTokens: data.usage.prompt_tokens,
|
|
53
|
+
outputTokens: data.usage.completion_tokens,
|
|
54
|
+
},
|
|
55
|
+
};
|
|
56
|
+
}
|
|
57
|
+
}
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
import { OpenAIProvider } from "./openai.js";
|
|
2
|
+
import { AnthropicProvider } from "./anthropic.js";
|
|
3
|
+
import { GoogleProvider } from "./google.js";
|
|
4
|
+
import { OllamaProvider } from "./ollama.js";
|
|
5
|
+
import { GroqProvider } from "./groq.js";
|
|
6
|
+
import { CerebrasProvider } from "./cerebras.js";
|
|
7
|
+
import type { LLMProvider, ProviderConfig } from "./types.js";
|
|
8
|
+
|
|
9
|
+
export type ProviderName =
|
|
10
|
+
| "openai"
|
|
11
|
+
| "anthropic"
|
|
12
|
+
| "google"
|
|
13
|
+
| "ollama"
|
|
14
|
+
| "groq"
|
|
15
|
+
| "cerebras";
|
|
16
|
+
|
|
17
|
+
export function createProvider(
|
|
18
|
+
name: ProviderName,
|
|
19
|
+
config: ProviderConfig = {}
|
|
20
|
+
): LLMProvider {
|
|
21
|
+
switch (name) {
|
|
22
|
+
case "openai":
|
|
23
|
+
return new OpenAIProvider(config);
|
|
24
|
+
case "anthropic":
|
|
25
|
+
return new AnthropicProvider(config);
|
|
26
|
+
case "google":
|
|
27
|
+
return new GoogleProvider(config);
|
|
28
|
+
case "ollama":
|
|
29
|
+
return new OllamaProvider(config);
|
|
30
|
+
case "groq":
|
|
31
|
+
return new GroqProvider(config);
|
|
32
|
+
case "cerebras":
|
|
33
|
+
return new CerebrasProvider(config);
|
|
34
|
+
default:
|
|
35
|
+
throw new Error(`Unknown provider: ${name}`);
|
|
36
|
+
}
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
export { OpenAIProvider } from "./openai.js";
|
|
40
|
+
export { AnthropicProvider } from "./anthropic.js";
|
|
41
|
+
export { GoogleProvider } from "./google.js";
|
|
42
|
+
export { OllamaProvider } from "./ollama.js";
|
|
43
|
+
export { GroqProvider } from "./groq.js";
|
|
44
|
+
export { CerebrasProvider } from "./cerebras.js";
|
|
45
|
+
export type {
|
|
46
|
+
LLMProvider,
|
|
47
|
+
CompletionParams,
|
|
48
|
+
CompletionResult,
|
|
49
|
+
ProviderConfig,
|
|
50
|
+
} from "./types.js";
|