@cbuff/llm 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 Charles Buffington
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
package/README.md ADDED
@@ -0,0 +1,110 @@
1
+ # @cbuff/llm
2
+
3
+ Type-safe LLM wrapper built on [Vercel AI SDK](https://sdk.vercel.ai/) with provider/model registry and optional cost tracking.
4
+
5
+ ## Installation
6
+
7
+ ```bash
8
+ bun add @cbuff/llm ai
9
+ ```
10
+
11
+ Install the provider SDKs you need:
12
+
13
+ ```bash
14
+ bun add @ai-sdk/openai @ai-sdk/anthropic @ai-sdk/google
15
+ ```
16
+
17
+ ## Usage
18
+
19
+ ```typescript
20
+ import { createLLM } from "@cbuff/llm";
21
+ import { createOpenAI } from "@ai-sdk/openai";
22
+
23
+ const llm = createLLM({
24
+ providers: {
25
+ openai: () => createOpenAI({ apiKey: process.env.OPENAI_API_KEY }),
26
+ anthropic: async () => {
27
+ const { createAnthropic } = await import("@ai-sdk/anthropic");
28
+ return createAnthropic({ apiKey: process.env.ANTHROPIC_API_KEY });
29
+ },
30
+ },
31
+ models: {
32
+ fast: { provider: "openai", id: "gpt-4o-mini" },
33
+ smart: { provider: "openai", id: "gpt-4o", costs: { input: 2.5, output: 10 } },
34
+ claude: { provider: "anthropic", id: "claude-sonnet-4-20250514", costs: { input: 3, output: 15 } },
35
+ },
36
+ });
37
+
38
+ // Full autocomplete on model names
39
+ const { data, metadata } = await llm.generate({
40
+ model: "fast",
41
+ prompt: "Hello, world!",
42
+ });
43
+
44
+ console.log(data);
45
+ console.log(metadata.totalCostUsd); // undefined if costs not configured
46
+ ```
47
+
48
+ ## Features
49
+
50
+ - **Type-safe model selection** — Full autocomplete on model aliases, provider references validated at compile time
51
+ - **Lazy provider loading** — Providers can be sync or async, loaded on first use and cached
52
+ - **Optional cost tracking** — Define `costs: { input, output }` per model (USD per 1M tokens), skip if you don't need it
53
+ - **Unified config** — Single `createLLM()` call with providers and models in one object
54
+
55
+ ## API
56
+
57
+ ### `createLLM(config)`
58
+
59
+ Creates a typed LLM client.
60
+
61
+ ```typescript
62
+ const llm = createLLM({
63
+ providers: {
64
+ [key: string]: () => Provider | Promise<Provider>
65
+ },
66
+ models: {
67
+ [alias: string]: {
68
+ provider: string; // must match a key in providers
69
+ id: string; // actual model ID sent to provider
70
+ costs?: { input: number; output: number }; // USD per 1M tokens
71
+ }
72
+ }
73
+ });
74
+ ```
75
+
76
+ ### `llm.generate(params)`
77
+
78
+ Generate text or structured output.
79
+
80
+ ```typescript
81
+ const { data, metadata } = await llm.generate({
82
+ model: "fast", // required, autocompletes to your model aliases
83
+ prompt: "Hello", // required
84
+ system: "Be helpful", // optional
85
+ temperature: 0.7, // optional
86
+ maxOutputTokens: 1000, // optional
87
+ output: schema, // optional, for structured output
88
+ logKey: "my-request", // optional, logs timing and cost
89
+ });
90
+ ```
91
+
92
+ **Returns:**
93
+
94
+ ```typescript
95
+ {
96
+ data: string | T, // text or structured output
97
+ metadata: {
98
+ responseTimeMs: number,
99
+ inputTokens: number,
100
+ outputTokens: number,
101
+ inputCostUsd?: number, // undefined if costs not configured
102
+ outputCostUsd?: number,
103
+ totalCostUsd?: number,
104
+ }
105
+ }
106
+ ```
107
+
108
+ ## License
109
+
110
+ MIT
@@ -0,0 +1,31 @@
1
+ import { type Output } from "ai";
2
+ import type { GenerateParams, GenerateResponse, ModelEntry, ProviderFactory } from "./types.js";
3
+ export type { GenerateMetadata, GenerateParams, GenerateResponse, LanguageModelProvider, ModelEntry, ProviderFactory } from "./types.js";
4
+ /**
5
+ * Creates a type-safe LLM client with the given providers and models.
6
+ *
7
+ * @example
8
+ * ```typescript
9
+ * import { createLLM } from "@cbuff/llm";
10
+ * import { createOpenAI } from "@ai-sdk/openai";
11
+ *
12
+ * const llm = createLLM({
13
+ * providers: {
14
+ * openai: () => createOpenAI({ apiKey: process.env.OPENAI_API_KEY }),
15
+ * },
16
+ * models: {
17
+ * fast: { provider: "openai", id: "gpt-4o-mini" },
18
+ * smart: { provider: "openai", id: "gpt-4o", costs: { input: 2.5, output: 10 } },
19
+ * },
20
+ * });
21
+ *
22
+ * const { data } = await llm.generate({ model: "fast", prompt: "Hello" });
23
+ * ```
24
+ */
25
+ export declare function createLLM<TProviders extends Record<string, ProviderFactory>, TModels extends Record<string, ModelEntry<keyof TProviders & string>>>(config: {
26
+ providers: TProviders;
27
+ models: TModels;
28
+ }): {
29
+ generate: <TOutput extends Output.Output = Output.Output<string, string>>(params: GenerateParams<keyof TModels & string, TOutput>) => Promise<GenerateResponse<TOutput>>;
30
+ };
31
+ //# sourceMappingURL=index.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAAA,OAAO,EAAoC,KAAK,MAAM,EAAE,MAAM,IAAI,CAAC;AACnE,OAAO,KAAK,EAEV,cAAc,EACd,gBAAgB,EAEhB,UAAU,EACV,eAAe,EAChB,MAAM,YAAY,CAAC;AAGpB,YAAY,EACV,gBAAgB,EAAE,cAAc,EAChC,gBAAgB,EAAE,qBAAqB,EAAE,UAAU,EAAE,eAAe,EACrE,MAAM,YAAY,CAAC;AAgBpB;;;;;;;;;;;;;;;;;;;;GAoBG;AACH,wBAAgB,SAAS,CACvB,UAAU,SAAS,MAAM,CAAC,MAAM,EAAE,eAAe,CAAC,EAClD,OAAO,SAAS,MAAM,CAAC,MAAM,EAAE,UAAU,CAAC,MAAM,UAAU,GAAG,MAAM,CAAC,CAAC,EACrE,MAAM,EAAE;IAAE,SAAS,EAAE,UAAU,CAAC;IAAC,MAAM,EAAE,OAAO,CAAA;CAAE;eAuD1B,OAAO,SAAS,MAAM,CAAC,MAAM,0CAC3C,cAAc,CAAC,MAAM,OAAO,GAAG,MAAM,EAAE,OAAO,CAAC,KACtD,OAAO,CAAC,gBAAgB,CAAC,OAAO,CAAC,CAAC;EAyCtC"}
package/dist/index.js ADDED
@@ -0,0 +1,112 @@
1
+ import { generateText } from "ai";
2
+ // ############################################################################
3
+ // Cost Formatter
4
+ // ############################################################################
5
+ const costFormatter = new Intl.NumberFormat("en-US", {
6
+ style: "currency",
7
+ currency: "USD",
8
+ minimumFractionDigits: 4,
9
+ });
10
+ // ############################################################################
11
+ // createLLM Factory
12
+ // ############################################################################
13
+ /**
14
+ * Creates a type-safe LLM client with the given providers and models.
15
+ *
16
+ * @example
17
+ * ```typescript
18
+ * import { createLLM } from "@cbuff/llm";
19
+ * import { createOpenAI } from "@ai-sdk/openai";
20
+ *
21
+ * const llm = createLLM({
22
+ * providers: {
23
+ * openai: () => createOpenAI({ apiKey: process.env.OPENAI_API_KEY }),
24
+ * },
25
+ * models: {
26
+ * fast: { provider: "openai", id: "gpt-4o-mini" },
27
+ * smart: { provider: "openai", id: "gpt-4o", costs: { input: 2.5, output: 10 } },
28
+ * },
29
+ * });
30
+ *
31
+ * const { data } = await llm.generate({ model: "fast", prompt: "Hello" });
32
+ * ```
33
+ */
34
+ export function createLLM(config) {
35
+ // Provider cache: lazily resolved and stored
36
+ const providerCache = new Map();
37
+ /**
38
+ * Resolves a provider by key, using cache if available.
39
+ */
40
+ async function getProvider(providerKey) {
41
+ const cached = providerCache.get(providerKey);
42
+ if (cached)
43
+ return cached;
44
+ const factory = config.providers[providerKey];
45
+ const provider = await Promise.resolve(factory());
46
+ providerCache.set(providerKey, provider);
47
+ return provider;
48
+ }
49
+ /**
50
+ * Gets a language model instance for the given model alias.
51
+ */
52
+ async function getModel(modelKey) {
53
+ const modelConfig = config.models[modelKey];
54
+ const provider = await getProvider(modelConfig.provider);
55
+ return provider(modelConfig.id);
56
+ }
57
+ /**
58
+ * Calculates costs for the given model and token usage.
59
+ * Returns undefined values if costs are not configured.
60
+ */
61
+ function calculateCosts(modelKey, inputTokens, outputTokens) {
62
+ const modelConfig = config.models[modelKey];
63
+ if (!modelConfig.costs) {
64
+ return {
65
+ inputCostUsd: undefined,
66
+ outputCostUsd: undefined,
67
+ totalCostUsd: undefined,
68
+ };
69
+ }
70
+ const inputCostUsd = (inputTokens / 1_000_000) * modelConfig.costs.input;
71
+ const outputCostUsd = (outputTokens / 1_000_000) * modelConfig.costs.output;
72
+ const totalCostUsd = inputCostUsd + outputCostUsd;
73
+ return { inputCostUsd, outputCostUsd, totalCostUsd };
74
+ }
75
+ /**
76
+ * Generate text or structured output using the configured models.
77
+ */
78
+ async function generate(params) {
79
+ const model = await getModel(params.model);
80
+ const startTime = Date.now();
81
+ const result = await generateText({
82
+ model,
83
+ prompt: params.prompt,
84
+ system: params.system,
85
+ temperature: params.temperature,
86
+ maxOutputTokens: params.maxOutputTokens,
87
+ experimental_output: params.output,
88
+ });
89
+ const endTime = Date.now();
90
+ const responseTimeMs = endTime - startTime;
91
+ const inputTokens = result.usage?.inputTokens ?? 0;
92
+ const outputTokens = result.usage?.outputTokens ?? 0;
93
+ const costs = calculateCosts(params.model, inputTokens, outputTokens);
94
+ // Log if requested
95
+ if (params.logKey) {
96
+ const costStr = costs.totalCostUsd !== undefined
97
+ ? ` cost: ${costFormatter.format(costs.totalCostUsd)} (in: ${costFormatter.format(costs.inputCostUsd)}, out: ${costFormatter.format(costs.outputCostUsd)})`
98
+ : "";
99
+ console.log(`[LLM][${params.logKey}] ${(responseTimeMs / 1000).toFixed(2)}s using ${String(params.model)}${costStr}`);
100
+ }
101
+ return {
102
+ data: (params.output ? result.experimental_output : result.text),
103
+ metadata: {
104
+ responseTimeMs,
105
+ inputTokens,
106
+ outputTokens,
107
+ ...costs,
108
+ },
109
+ };
110
+ }
111
+ return { generate };
112
+ }
@@ -0,0 +1,83 @@
1
+ import type { generateText, LanguageModel, Output, InferGenerateOutput } from "ai";
2
+ /**
3
+ * A provider instance that can create language models.
4
+ * This is the return type of provider factories like `createOpenAI()`.
5
+ */
6
+ export type LanguageModelProvider = (modelId: string) => LanguageModel;
7
+ /**
8
+ * A factory function that creates a provider instance.
9
+ * Can be sync or async (for lazy-loading provider SDKs).
10
+ */
11
+ export type ProviderFactory = () => LanguageModelProvider | Promise<LanguageModelProvider>;
12
+ /**
13
+ * Configuration for a single model.
14
+ * @template TProviders - Union of available provider keys
15
+ */
16
+ export type ModelEntry<TProviders extends string> = {
17
+ /** The provider key to use for this model */
18
+ provider: TProviders;
19
+ /** The actual model ID to pass to the provider */
20
+ id: string;
21
+ /** Optional cost tracking (per 1M tokens in USD) */
22
+ costs?: {
23
+ input: number;
24
+ output: number;
25
+ };
26
+ };
27
+ /**
28
+ * Full configuration object for createLLM.
29
+ * @template TProviders - Record of provider factories
30
+ * @template TModels - Record of model configurations
31
+ */
32
+ export type LLMConfig<TProviders extends Record<string, ProviderFactory>, TModels extends Record<string, ModelEntry<keyof TProviders & string>>> = {
33
+ providers: TProviders;
34
+ models: TModels;
35
+ };
36
+ type GenerateTextParams = Parameters<typeof generateText>[0];
37
+ type DefaultOutput = Output.Output<string, string>;
38
+ /**
39
+ * Parameters for the generate function.
40
+ * @template TModels - Union of available model keys
41
+ * @template TOutput - Output schema type
42
+ */
43
+ export type GenerateParams<TModels extends string, TOutput extends Output.Output = DefaultOutput> = {
44
+ /** The model alias to use */
45
+ model: TModels;
46
+ /** The user prompt */
47
+ prompt: string;
48
+ /** Optional system prompt */
49
+ system?: string;
50
+ /** Optional output schema for structured generation */
51
+ output?: TOutput;
52
+ /** Optional key for logging */
53
+ logKey?: string;
54
+ } & Pick<GenerateTextParams, "temperature" | "maxOutputTokens">;
55
+ /**
56
+ * Response metadata from a generate call.
57
+ */
58
+ export type GenerateMetadata = {
59
+ /** Response time in milliseconds */
60
+ responseTimeMs: number;
61
+ /** Number of input tokens used */
62
+ inputTokens: number;
63
+ /** Number of output tokens generated */
64
+ outputTokens: number;
65
+ /** Cost of input tokens in USD (undefined if costs not configured) */
66
+ inputCostUsd?: number;
67
+ /** Cost of output tokens in USD (undefined if costs not configured) */
68
+ outputCostUsd?: number;
69
+ /** Total cost in USD (undefined if costs not configured) */
70
+ totalCostUsd?: number;
71
+ };
72
+ /**
73
+ * Response from a generate call.
74
+ * @template TOutput - Output schema type
75
+ */
76
+ export type GenerateResponse<TOutput extends Output.Output = DefaultOutput> = {
77
+ /** The generated data */
78
+ data: InferGenerateOutput<TOutput>;
79
+ /** Metadata about the generation */
80
+ metadata: GenerateMetadata;
81
+ };
82
+ export {};
83
+ //# sourceMappingURL=types.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"types.d.ts","sourceRoot":"","sources":["../src/types.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,YAAY,EAAE,aAAa,EAAE,MAAM,EAAE,mBAAmB,EAAE,MAAM,IAAI,CAAC;AAMnF;;;GAGG;AACH,MAAM,MAAM,qBAAqB,GAAG,CAAC,OAAO,EAAE,MAAM,KAAK,aAAa,CAAC;AAEvE;;;GAGG;AACH,MAAM,MAAM,eAAe,GAAG,MAAM,qBAAqB,GAAG,OAAO,CAAC,qBAAqB,CAAC,CAAC;AAM3F;;;GAGG;AACH,MAAM,MAAM,UAAU,CAAC,UAAU,SAAS,MAAM,IAAI;IAClD,6CAA6C;IAC7C,QAAQ,EAAE,UAAU,CAAC;IACrB,kDAAkD;IAClD,EAAE,EAAE,MAAM,CAAC;IACX,oDAAoD;IACpD,KAAK,CAAC,EAAE;QACN,KAAK,EAAE,MAAM,CAAC;QACd,MAAM,EAAE,MAAM,CAAC;KAChB,CAAC;CACH,CAAC;AAMF;;;;GAIG;AACH,MAAM,MAAM,SAAS,CACnB,UAAU,SAAS,MAAM,CAAC,MAAM,EAAE,eAAe,CAAC,EAClD,OAAO,SAAS,MAAM,CAAC,MAAM,EAAE,UAAU,CAAC,MAAM,UAAU,GAAG,MAAM,CAAC,CAAC,IACnE;IACF,SAAS,EAAE,UAAU,CAAC;IACtB,MAAM,EAAE,OAAO,CAAC;CACjB,CAAC;AAMF,KAAK,kBAAkB,GAAG,UAAU,CAAC,OAAO,YAAY,CAAC,CAAC,CAAC,CAAC,CAAC;AAE7D,KAAK,aAAa,GAAG,MAAM,CAAC,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;AAEnD;;;;GAIG;AACH,MAAM,MAAM,cAAc,CACxB,OAAO,SAAS,MAAM,EACtB,OAAO,SAAS,MAAM,CAAC,MAAM,GAAG,aAAa,IAC3C;IACF,6BAA6B;IAC7B,KAAK,EAAE,OAAO,CAAC;IACf,sBAAsB;IACtB,MAAM,EAAE,MAAM,CAAC;IACf,6BAA6B;IAC7B,MAAM,CAAC,EAAE,MAAM,CAAC;IAChB,uDAAuD;IACvD,MAAM,CAAC,EAAE,OAAO,CAAC;IACjB,+BAA+B;IAC/B,MAAM,CAAC,EAAE,MAAM,CAAC;CACjB,GAAG,IAAI,CAAC,kBAAkB,EAAE,aAAa,GAAG,iBAAiB,CAAC,CAAC;AAEhE;;GAEG;AACH,MAAM,MAAM,gBAAgB,GAAG;IAC7B,oCAAoC;IACpC,cAAc,EAAE,MAAM,CAAC;IACvB,kCAAkC;IAClC,WAAW,EAAE,MAAM,CAAC;IACpB,wCAAwC;IACxC,YAAY,EAAE,MAAM,CAAC;IACrB,sEAAsE;IACtE,YAAY,CAAC,EAAE,MAAM,CAAC;IACtB,uEAAuE;IACvE,aAAa,CAAC,EAAE,MAAM,CAAC;IACvB,4DAA4D;IAC5D,YAAY,CAAC,EAAE,MAAM,CAAC;CACvB,CAAC;AAEF;;;GAGG;AACH,MAAM,MAAM,gBAAgB,CAAC,OAAO,SAAS,MAAM,CAAC,MAAM,GAAG,aAAa,IAAI;IAC5E,yBAAyB;IACzB,IAAI,EAAE,mBAAmB,CAAC,OAAO,CAAC,CAAC;IACnC,oCAAoC;IACpC,QAAQ,EAAE,gBAAgB,CAAC;CAC5B,CAAC"}
package/dist/types.js ADDED
@@ -0,0 +1 @@
1
+ export {};
package/package.json ADDED
@@ -0,0 +1,59 @@
1
+ {
2
+ "name": "@cbuff/llm",
3
+ "version": "1.0.0",
4
+ "author": "Charles Buffington",
5
+ "description": "Type-safe LLM wrapper with provider/model registry and optional cost tracking",
6
+ "repository": {
7
+ "type": "git",
8
+ "url": "https://github.com/C41M50N/llm.git"
9
+ },
10
+ "type": "module",
11
+ "main": "dist/index.js",
12
+ "types": "dist/index.d.ts",
13
+ "exports": {
14
+ ".": {
15
+ "types": "./dist/index.d.ts",
16
+ "import": "./dist/index.js"
17
+ }
18
+ },
19
+ "files": [
20
+ "dist"
21
+ ],
22
+ "scripts": {
23
+ "build": "tsc -p tsconfig.build.json",
24
+ "dev": "tsc -p tsconfig.build.json --watch",
25
+ "typecheck": "tsc --noEmit"
26
+ },
27
+ "devDependencies": {
28
+ "@types/bun": "latest"
29
+ },
30
+ "peerDependencies": {
31
+ "ai": "^6.0.39",
32
+ "typescript": "^5"
33
+ },
34
+ "peerDependenciesMeta": {
35
+ "@ai-sdk/openai": {
36
+ "optional": true
37
+ },
38
+ "@ai-sdk/anthropic": {
39
+ "optional": true
40
+ },
41
+ "@ai-sdk/google": {
42
+ "optional": true
43
+ },
44
+ "@openrouter/ai-sdk-provider": {
45
+ "optional": true
46
+ }
47
+ },
48
+ "keywords": [
49
+ "llm",
50
+ "ai",
51
+ "ai-sdk",
52
+ "openai",
53
+ "anthropic",
54
+ "google",
55
+ "vercel-ai-sdk",
56
+ "typescript"
57
+ ],
58
+ "license": "MIT"
59
+ }