smoltalk 0.0.53 → 0.0.54

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -1,6 +1,6 @@
1
1
  # Smoltalk
2
2
 
3
- Smoltalk is a package that exposes a common interface across different LLM providers. It exists because I think it's important to have an npm package that allows users to try out different kinds of LLMs, and prevents vendor lock-in. Using a different LLM should be as simple as switching out a model name.
3
+ Smoltalk exposes a common API to different LLM providers. There are other packages that do this, but Smoltalk allows you to build strategies on top of it. Here is a simple example. Hello world, this is functionality that other packages allow.
4
4
 
5
5
  ## Install
6
6
 
@@ -8,7 +8,7 @@ Smoltalk is a package that exposes a common interface across different LLM provi
8
8
  pnpm install smoltalk
9
9
  ```
10
10
 
11
- ## Quickstart
11
+ ## Hello world example
12
12
 
13
13
  ```typescript
14
14
  import { getClient } from "smoltalk";
package/dist/client.js CHANGED
@@ -26,29 +26,38 @@ export function getClient(config) {
26
26
  }
27
27
  provider = model.provider;
28
28
  }
29
- const clientConfig = { ...config, model: modelName };
29
+ const resolvedKeys = {
30
+ openAiApiKey: config.openAiApiKey || process.env.OPENAI_API_KEY,
31
+ googleApiKey: config.googleApiKey || process.env.GEMINI_API_KEY,
32
+ anthropicApiKey: config.anthropicApiKey || process.env.ANTHROPIC_API_KEY,
33
+ };
34
+ const clientConfig = {
35
+ ...config,
36
+ ...resolvedKeys,
37
+ model: modelName,
38
+ };
30
39
  switch (provider) {
31
40
  case "anthropic":
32
- if (!config.anthropicApiKey) {
33
- throw new SmolError("No Anthropic API key provided. Please provide an Anthropic API key in the config using anthropicApiKey.");
41
+ if (!resolvedKeys.anthropicApiKey) {
42
+ throw new SmolError("No Anthropic API key provided. Please provide an Anthropic API key in the config using anthropicApiKey, or set the ANTHROPIC_API_KEY environment variable.");
34
43
  }
35
44
  return new SmolAnthropic({
36
45
  ...clientConfig,
37
- anthropicApiKey: config.anthropicApiKey,
46
+ anthropicApiKey: resolvedKeys.anthropicApiKey,
38
47
  });
39
48
  case "openai":
40
- if (!config.openAiApiKey) {
41
- throw new SmolError("No OpenAI API key provided. Please provide an OpenAI API key in the config using openAiApiKey.");
49
+ if (!resolvedKeys.openAiApiKey) {
50
+ throw new SmolError("No OpenAI API key provided. Please provide an OpenAI API key in the config using openAiApiKey, or set the OPENAI_API_KEY environment variable.");
42
51
  }
43
52
  return new SmolOpenAi(clientConfig);
44
53
  case "openai-responses":
45
- if (!config.openAiApiKey) {
46
- throw new SmolError("No OpenAI API key provided. Please provide an OpenAI API key in the config using openAiApiKey.");
54
+ if (!resolvedKeys.openAiApiKey) {
55
+ throw new SmolError("No OpenAI API key provided. Please provide an OpenAI API key in the config using openAiApiKey, or set the OPENAI_API_KEY environment variable.");
47
56
  }
48
57
  return new SmolOpenAiResponses(clientConfig);
49
58
  case "google":
50
- if (!config.googleApiKey) {
51
- throw new SmolError("No Google API key provided. Please provide a Google API key in the config using googleApiKey.");
59
+ if (!resolvedKeys.googleApiKey) {
60
+ throw new SmolError("No Google API key provided. Please provide a Google API key in the config using googleApiKey, or set the GEMINI_API_KEY environment variable.");
52
61
  }
53
62
  return new SmolGoogle(clientConfig);
54
63
  case "ollama":
package/dist/model.js CHANGED
@@ -1,4 +1,4 @@
1
- import { getModel, isTextModel, textModels, } from "./models.js";
1
+ import { getModel, isTextModel, textModels, registeredTextModels, } from "./models.js";
2
2
  import { SmolError } from "./smolError.js";
3
3
  import { ModelConfigSchema, ModelNameAndProviderSchema, ModelNameSchema, } from "./strategies/types.js";
4
4
  import { round } from "./util.js";
@@ -41,7 +41,7 @@ export class Model {
41
41
  }
42
42
  return undefined;
43
43
  }
44
- resolveModel(models = textModels) {
44
+ resolveModel(models = [...registeredTextModels, ...textModels]) {
45
45
  if (ModelNameSchema.safeParse(this.model).success) {
46
46
  return this.model;
47
47
  }
@@ -120,6 +120,8 @@ export class Model {
120
120
  return (m.inputTokenCost ?? 0) + (m.outputTokenCost ?? 0);
121
121
  case "large-context":
122
122
  return m.maxInputTokens;
123
+ default:
124
+ throw new SmolError(`Unknown optimization: ${optimization}`);
123
125
  }
124
126
  }
125
127
  isLowerBetter(optimization) {
package/dist/models.d.ts CHANGED
@@ -13,7 +13,7 @@ export declare const ProviderSchema: z.ZodEnum<{
13
13
  export type Provider = z.infer<typeof ProviderSchema>;
14
14
  export type BaseModel = {
15
15
  modelName: string;
16
- provider: Provider;
16
+ provider: string;
17
17
  description?: string;
18
18
  inputTokenCost?: number;
19
19
  cachedInputTokenCost?: number;
@@ -32,7 +32,6 @@ export type ImageModel = BaseModel & {
32
32
  };
33
33
  export type TextModel = BaseModel & {
34
34
  type: "text";
35
- modelName: string;
36
35
  maxInputTokens: number;
37
36
  maxOutputTokens: number;
38
37
  outputTokensPerSecond?: number;
@@ -676,7 +675,11 @@ export type ImageModelName = (typeof imageModels)[number]["modelName"];
676
675
  export type SpeechToTextModelName = (typeof speechToTextModels)[number]["modelName"];
677
676
  export type EmbeddingsModelName = (typeof embeddingsModels)[number]["modelName"];
678
677
  export type ModelName = TextModelName | ImageModelName | SpeechToTextModelName;
679
- export declare function getModel(modelName: ModelName): {
678
+ export declare const registeredTextModels: TextModel[];
679
+ export declare function registerTextModel(model: Omit<TextModel, "type"> & {
680
+ type?: "text";
681
+ }): void;
682
+ export declare function getModel(modelName: ModelName): TextModel | {
680
683
  readonly type: "speech-to-text";
681
684
  readonly modelName: "whisper-local";
682
685
  readonly provider: "local";
package/dist/models.js CHANGED
@@ -715,8 +715,17 @@ export const imageModels = [
715
715
  export const embeddingsModels = [
716
716
  { type: "embeddings", modelName: "text-embedding-3-small", tokenCost: 0.02 },
717
717
  ];
718
+ export const registeredTextModels = [];
719
+ export function registerTextModel(model) {
720
+ registeredTextModels.push({ ...model, type: "text" });
721
+ }
718
722
  export function getModel(modelName) {
719
- const allModels = [...textModels, ...imageModels, ...speechToTextModels];
723
+ const allModels = [
724
+ ...textModels,
725
+ ...imageModels,
726
+ ...speechToTextModels,
727
+ ...registeredTextModels,
728
+ ];
720
729
  return allModels.find((model) => model.modelName === modelName);
721
730
  }
722
731
  export function isImageModel(model) {
package/dist/types.d.ts CHANGED
@@ -4,7 +4,7 @@ import z, { ZodType } from "zod";
4
4
  import { Message } from "./classes/message/index.js";
5
5
  import { ToolCall } from "./classes/ToolCall.js";
6
6
  import { Model } from "./model.js";
7
- import { ModelName, Provider } from "./models.js";
7
+ import { ModelName } from "./models.js";
8
8
  import { ModelConfig, ModelNameAndProvider, Strategy, StrategyJSON } from "./strategies/types.js";
9
9
  import { Result } from "./types/result.js";
10
10
  export type ThinkingBlock = {
@@ -72,8 +72,95 @@ export type SmolConfig = {
72
72
  anthropicApiKey?: string;
73
73
  ollamaApiKey?: string;
74
74
  ollamaHost?: string;
75
+ /**
76
+ The given model determines both
77
+ - what client is used
78
+ - what strategy is executed.
79
+
80
+ ## 1. Specifying a model directly
81
+ The simplest case is to specify the name of a model from lib/models.ts.
82
+ Example:
83
+
84
+ ```
85
+ model: "claude-sonnet-4-6"
86
+ ```
87
+
88
+ ## 2. Specifying a model config (letting Smoltalk pick the model)
89
+ You can instead also choose to let Smoltalk pick the model that it thinks
90
+ will be best for certain parameters. For example:
91
+ ```
92
+ model: {
93
+ // find the fastest model
94
+ optimizeFor: ["speed"],
95
+
96
+ // from either Anthropic or Google, whichever is faster
97
+ providers: ["anthropic", "google"],
98
+ limit: {
99
+ // 1 mil input tokens + 1 mil output tokens together
100
+ // should cost less than $10 for the models being considered
101
+ cost: 10,
102
+ },
103
+ }
104
+ ```
105
+
106
+ This can be a good option because as better models come out,
107
+ you won't need to update your code. You can just update Smoltalk
108
+ and it will pick the best model automatically.
109
+
110
+ ## 3. Specifying a strategy
111
+ Finally, you can instead specify a strategy to execute. For example:
112
+
113
+ ```
114
+ model: {
115
+ type: "race",
116
+ params: {
117
+ strategies: ["gemini-2.5-flash-lite", "gemini-2.5-pro"],
118
+ },
119
+ }
120
+ ```
121
+
122
+ In this case, Smoltalk will run your request over using both LLMs simultaneously,
123
+ and take the response that finishes first.
124
+
125
+ You can also choose to specify fallbacks in case the first model
126
+ returns an error for some reason. This can be a good way to try something
127
+ with a fast model and then use a slower but more powerful model if the first one fails.
128
+
129
+ ```
130
+ model: {
131
+ type: "fallback",
132
+ params: {
133
+ primaryStrategy: "gemini-2.5-flash-lite",
134
+ config: {
135
+ error: ["gemini-2.5-pro"],
136
+ },
137
+ },
138
+ }
139
+ ```
140
+
141
+ You can of course combine strategies together to create more complex behavior:
142
+
143
+ ```
144
+ const geminiLiteWithFallback = {
145
+ type: "fallback",
146
+ params: {
147
+ primaryStrategy: "gemini-2.5-flash-lite",
148
+ config: {
149
+ error: ["gemini-2.5-pro"],
150
+ },
151
+ },
152
+ };
153
+
154
+ model: {
155
+ type: "race",
156
+ params: {
157
+ strategies: ["gemini-2.5-pro", geminiLiteWithFallback],
158
+ },
159
+ }
160
+ ```
161
+ */
75
162
  model: ModelParam;
76
- provider?: Provider;
163
+ provider?: string;
77
164
  logLevel?: LogLevel;
78
165
  statelog?: Partial<{
79
166
  host: string;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "smoltalk",
3
- "version": "0.0.53",
3
+ "version": "0.0.54",
4
4
  "description": "A common interface for LLM APIs",
5
5
  "homepage": "https://github.com/egonSchiele/smoltalk",
6
6
  "scripts": {
@@ -33,7 +33,6 @@
33
33
  "devDependencies": {
34
34
  "@types/node": "^25.0.3",
35
35
  "prettier": "^3.7.4",
36
- "termcolors": "github:egonSchiele/termcolors",
37
36
  "typedoc": "^0.28.15",
38
37
  "typescript": "^5.9.3",
39
38
  "vitest": "^4.0.16"