@aliou/pi-synthetic 0.0.1 → 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,11 @@
1
+ {
2
+ "$schema": "https://unpkg.com/@changesets/config@3.1.2/schema.json",
3
+ "changelog": "@changesets/cli/changelog",
4
+ "commit": false,
5
+ "fixed": [],
6
+ "linked": [],
7
+ "access": "public",
8
+ "baseBranch": "main",
9
+ "updateInternalDependencies": "patch",
10
+ "ignore": []
11
+ }
@@ -0,0 +1,32 @@
1
+ name: CI
2
+
3
+ on:
4
+ push:
5
+ branches: [main]
6
+ pull_request:
7
+
8
+ concurrency:
9
+ group: ${{ github.workflow }}-${{ github.ref }}
10
+ cancel-in-progress: true
11
+
12
+ jobs:
13
+ check:
14
+ runs-on: ubuntu-latest
15
+ steps:
16
+ - uses: actions/checkout@v4
17
+
18
+ - uses: pnpm/action-setup@v4
19
+
20
+ - uses: actions/setup-node@v4
21
+ with:
22
+ node-version: "22"
23
+ cache: "pnpm"
24
+
25
+ - name: Install dependencies
26
+ run: pnpm install --frozen-lockfile
27
+
28
+ - name: Lint
29
+ run: pnpm lint
30
+
31
+ - name: Typecheck
32
+ run: pnpm typecheck
@@ -0,0 +1,143 @@
1
+ name: Publish
2
+
3
+ on:
4
+ push:
5
+ branches:
6
+ - main
7
+
8
+ concurrency:
9
+ group: ${{ github.workflow }}-${{ github.ref }}
10
+ cancel-in-progress: true
11
+
12
+ jobs:
13
+ check:
14
+ runs-on: ubuntu-latest
15
+ steps:
16
+ - uses: actions/checkout@v4
17
+
18
+ - uses: pnpm/action-setup@v4
19
+
20
+ - uses: actions/setup-node@v4
21
+ with:
22
+ node-version: "22"
23
+ cache: "pnpm"
24
+
25
+ - name: Install dependencies
26
+ run: pnpm install --frozen-lockfile
27
+
28
+ - name: Lint
29
+ run: pnpm lint
30
+
31
+ - name: Typecheck
32
+ run: pnpm typecheck
33
+
34
+ publish:
35
+ name: Publish
36
+ needs: check
37
+ runs-on: ubuntu-latest
38
+ permissions:
39
+ contents: write
40
+ packages: write
41
+ pull-requests: write
42
+ id-token: write
43
+
44
+ steps:
45
+ - name: Checkout
46
+ uses: actions/checkout@v4
47
+ with:
48
+ fetch-depth: 0
49
+
50
+ - name: Setup pnpm
51
+ uses: pnpm/action-setup@v4
52
+
53
+ - name: Setup Node.js
54
+ uses: actions/setup-node@v4
55
+ with:
56
+ node-version: "22"
57
+ registry-url: "https://registry.npmjs.org"
58
+ scope: "@aliou"
59
+ cache: "pnpm"
60
+
61
+ - name: Upgrade npm for OIDC support
62
+ run: npm install -g npm@latest
63
+
64
+ - name: Install dependencies
65
+ run: pnpm install --frozen-lockfile
66
+
67
+ - name: Get release info
68
+ id: release-info
69
+ run: |
70
+ pnpm changeset status --output=release.json 2>/dev/null || echo '{"releases":[]}' > release.json
71
+ node <<NODE
72
+ const fs = require('fs');
73
+ const release = JSON.parse(fs.readFileSync('release.json', 'utf8'));
74
+ const releases = release.releases?.filter(r => r.type !== 'none') || [];
75
+
76
+ let title = 'Version Packages';
77
+ let commit = 'Version Packages';
78
+ if (releases.length === 1) {
79
+ const { name, newVersion } = releases[0];
80
+ title = 'Updating ' + name + ' to version ' + newVersion;
81
+ commit = name + '@' + newVersion;
82
+ } else if (releases.length > 1) {
83
+ const summary = releases.map(r => r.name + '@' + r.newVersion).join(', ');
84
+ title = 'Updating ' + summary;
85
+ commit = summary;
86
+ }
87
+
88
+ fs.appendFileSync(process.env.GITHUB_OUTPUT, 'title=' + title + '\n');
89
+ fs.appendFileSync(process.env.GITHUB_OUTPUT, 'commit=' + commit + '\n');
90
+ NODE
91
+ rm -f release.json
92
+ continue-on-error: true
93
+
94
+ - name: Create Release PR or Publish
95
+ id: changesets
96
+ uses: changesets/action@v1
97
+ with:
98
+ version: pnpm changeset version
99
+ publish: pnpm changeset publish
100
+ title: ${{ steps.release-info.outputs.title || 'Version Packages' }}
101
+ commit: ${{ steps.release-info.outputs.commit || 'Version Packages' }}
102
+ env:
103
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
104
+ NPM_CONFIG_PROVENANCE: true
105
+
106
+ - name: Create GitHub releases
107
+ if: steps.changesets.outputs.published == 'true'
108
+ env:
109
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
110
+ PUBLISHED_PACKAGES: ${{ steps.changesets.outputs.publishedPackages }}
111
+ run: |
112
+ node <<'NODE'
113
+ const { execSync } = require("node:child_process");
114
+
115
+ const published = JSON.parse(process.env.PUBLISHED_PACKAGES || "[]");
116
+
117
+ for (const pkg of published) {
118
+ const shortName = pkg.name.replace(/^@[^/]+\//, "");
119
+ const tag = `${shortName}@${pkg.version}`;
120
+
121
+ const existing = execSync(`git tag --list ${tag}`, { encoding: "utf8" }).trim();
122
+ if (!existing) {
123
+ execSync(`git tag ${tag}`);
124
+ execSync(`git push origin ${tag}`);
125
+ }
126
+
127
+ let hasRelease = false;
128
+ try {
129
+ const output = execSync(`gh release view ${tag} --json tagName --jq .tagName`, {
130
+ stdio: ["ignore", "pipe", "ignore"],
131
+ }).toString().trim();
132
+ hasRelease = output.length > 0;
133
+ } catch {
134
+ hasRelease = false;
135
+ }
136
+
137
+ if (!hasRelease) {
138
+ execSync(`gh release create ${tag} --title ${tag} --notes "Release ${tag}"`, {
139
+ stdio: "inherit",
140
+ });
141
+ }
142
+ }
143
+ NODE
@@ -0,0 +1,3 @@
1
+ pnpm run typecheck
2
+ pnpm run lint
3
+ pnpm run format
package/AGENTS.md ADDED
@@ -0,0 +1,69 @@
1
+ # pi-synthetic
2
+
3
+ Public Pi extension providing open-source language models via Synthetic's API. People could be using this, so consider backwards compatibility when making changes.
4
+
5
+ Pi is pre-1.0.0, so breaking changes can happen between Pi versions. This extension must stay up to date with Pi or things will break.
6
+
7
+ ## Stack
8
+
9
+ - TypeScript (strict mode)
10
+ - pnpm 10.26.1
11
+ - Biome for linting/formatting
12
+ - Changesets for versioning
13
+
14
+ ## Scripts
15
+
16
+ ```bash
17
+ pnpm typecheck # Type check
18
+ pnpm lint # Lint (runs on pre-commit)
19
+ pnpm format # Format
20
+ pnpm changeset # Create changeset for versioning
21
+ ```
22
+
23
+ ## Structure
24
+
25
+ ```
26
+ src/
27
+ index.ts # Extension entry, registers provider
28
+ providers/
29
+ index.ts # Provider registration
30
+ models.ts # Hardcoded model definitions
31
+ ```
32
+
33
+ ## Conventions
34
+
35
+ - API key comes from environment (`SYNTHETIC_API_KEY`)
36
+ - Uses Anthropic-compatible API at `https://api.synthetic.new/anthropic`
37
+ - Models are hardcoded in `src/providers/models.ts`
38
+ - Update model list when Synthetic adds new models
39
+
40
+ ## Adding Models
41
+
42
+ Edit `src/providers/models.ts`:
43
+
44
+ ```typescript
45
+ {
46
+ id: "hf:vendor/model-name",
47
+ name: "vendor/model-name",
48
+ reasoning: true/false,
49
+ input: ["text"] or ["text", "image"],
50
+ cost: {
51
+ input: 0.55, // $ per million tokens
52
+ output: 2.19,
53
+ cacheRead: 0.55,
54
+ cacheWrite: 0
55
+ },
56
+ contextWindow: 202752,
57
+ maxTokens: 65536
58
+ }
59
+ ```
60
+
61
+ Get pricing from `https://api.synthetic.new/openai/v1/models`.
62
+
63
+ ## Versioning
64
+
65
+ Uses changesets. Run `pnpm changeset` before committing user-facing changes.
66
+
67
+ - `patch`: bug fixes, model updates
68
+ - `minor`: new models, features
69
+ - `major`: breaking changes
package/CHANGELOG.md ADDED
@@ -0,0 +1,21 @@
1
+ # @aliou/pi-synthetic
2
+
3
+ ## 0.2.0
4
+
5
+ ### Minor Changes
6
+
7
+ - 58d21ca: Fix model configurations from Synthetic API
8
+
9
+ - Update maxTokens for all Synthetic models using values from models.dev (synthetic provider)
10
+ - Fix Kimi-K2-Instruct-0905 reasoning flag to false
11
+
12
+ ## 0.1.0
13
+
14
+ ### Minor Changes
15
+
16
+ - 4a32d18: Initial release with 19 open-source models
17
+
18
+ - Add Synthetic provider with Anthropic-compatible API
19
+ - Support for DeepSeek, Qwen, MiniMax, Kimi, Llama, GLM models
20
+ - Vision and reasoning capabilities where available
21
+ - Hardcoded model definitions with per-token pricing
package/README.md ADDED
@@ -0,0 +1,108 @@
1
+ # Pi Synthetic Extension
2
+
3
+ A Pi extension that adds [Synthetic](https://synthetic.new) as a model provider, giving you access to open-source models through an Anthropic-compatible API.
4
+
5
+ ## Installation
6
+
7
+ ### Get API Key
8
+
9
+ Sign up at [synthetic.new](https://synthetic.new/?referral=NDWw1u3UDWiFyDR) to get an API key (referral link).
10
+
11
+ ### Set Environment Variable
12
+
13
+ ```bash
14
+ export SYNTHETIC_API_KEY="your-api-key-here"
15
+ ```
16
+
17
+ Add to shell profile for persistence:
18
+
19
+ ```bash
20
+ echo 'export SYNTHETIC_API_KEY="your-api-key-here"' >> ~/.zshrc
21
+ ```
22
+
23
+ ### Install Extension
24
+
25
+ ```bash
26
+ # From npm
27
+ pi install npm:@aliou/pi-synthetic
28
+
29
+ # From git
30
+ pi install git:github.com/aliou/pi-synthetic
31
+
32
+ # Local development
33
+ pi -e ./src/index.ts
34
+ ```
35
+
36
+ ## Usage
37
+
38
+ Once installed, select `synthetic` as your provider and choose from available models:
39
+
40
+ ```
41
+ /model synthetic hf:moonshotai/Kimi-K2.5
42
+ ```
43
+
44
+ ## Adding or Updating Models
45
+
46
+ Models are hardcoded in `src/providers/models.ts`. To add or update models:
47
+
48
+ 1. Edit `src/providers/models.ts`
49
+ 2. Add the model configuration following the `SyntheticModelConfig` interface
50
+ 3. Run `pnpm run typecheck` to verify
51
+
52
+ ## Development
53
+
54
+ ### Setup
55
+
56
+ ```bash
57
+ git clone https://github.com/aliou/pi-synthetic.git
58
+ cd pi-synthetic
59
+
60
+ # Install dependencies (sets up pre-commit hooks)
61
+ pnpm install && pnpm prepare
62
+ ```
63
+
64
+ Pre-commit hooks run on every commit:
65
+ - TypeScript type checking
66
+ - Biome linting
67
+ - Biome formatting with auto-fix
68
+
69
+ ### Commands
70
+
71
+ ```bash
72
+ # Type check
73
+ pnpm run typecheck
74
+
75
+ # Lint
76
+ pnpm run lint
77
+
78
+ # Format
79
+ pnpm run format
80
+ ```
81
+
82
+ ### Test Locally
83
+
84
+ ```bash
85
+ pi -e ./src/index.ts
86
+ ```
87
+
88
+ ## Release
89
+
90
+ This repository uses [Changesets](https://github.com/changesets/changesets) for versioning.
91
+
92
+ **Note:** Automatic NPM publishing is currently disabled. To publish manually:
93
+
94
+ 1. Create a changeset: `pnpm changeset`
95
+ 2. Version packages: `pnpm version`
96
+ 3. Publish (when ready): Uncomment the publish job in `.github/workflows/publish.yml`
97
+
98
+ ## Requirements
99
+
100
+ - Pi coding agent v0.50.0+
101
+ - SYNTHETIC_API_KEY environment variable
102
+
103
+ ## Links
104
+
105
+ - [Synthetic](https://synthetic.new)
106
+ - [Synthetic Models](https://synthetic.new/models)
107
+ - [Synthetic API Docs](https://dev.synthetic.new/docs/api/overview)
108
+ - [Pi Documentation](https://buildwithpi.ai/)
package/biome.json ADDED
@@ -0,0 +1,30 @@
1
+ {
2
+ "$schema": "https://biomejs.dev/schemas/2.3.13/schema.json",
3
+ "vcs": {
4
+ "enabled": true,
5
+ "clientKind": "git",
6
+ "useIgnoreFile": true
7
+ },
8
+ "files": {
9
+ "includes": ["**/*.ts", "**/*.json"],
10
+ "ignoreUnknown": true
11
+ },
12
+ "assist": {
13
+ "actions": {
14
+ "source": {
15
+ "organizeImports": "on"
16
+ }
17
+ }
18
+ },
19
+ "linter": {
20
+ "enabled": true,
21
+ "rules": {
22
+ "recommended": true
23
+ }
24
+ },
25
+ "formatter": {
26
+ "enabled": true,
27
+ "indentStyle": "space",
28
+ "indentWidth": 2
29
+ }
30
+ }
package/package.json CHANGED
@@ -1,9 +1,33 @@
1
1
  {
2
2
  "name": "@aliou/pi-synthetic",
3
- "version": "0.0.1",
4
- "private": false,
3
+ "version": "0.2.0",
4
+ "repository": {
5
+ "type": "git",
6
+ "url": "https://github.com/aliou/pi-synthetic"
7
+ },
5
8
  "keywords": [
6
9
  "pi-package"
7
- ]
8
- }
9
-
10
+ ],
11
+ "pi": {
12
+ "extensions": [
13
+ "./src/index.ts"
14
+ ]
15
+ },
16
+ "devDependencies": {
17
+ "@biomejs/biome": "^2.3.13",
18
+ "@changesets/cli": "^2.27.11",
19
+ "@mariozechner/pi-coding-agent": "^0.50.1",
20
+ "@mariozechner/pi-tui": "^0.50.1",
21
+ "@types/node": "^25.0.10",
22
+ "husky": "^9.1.7",
23
+ "typescript": "^5.9.3"
24
+ },
25
+ "scripts": {
26
+ "typecheck": "tsc --noEmit",
27
+ "lint": "biome check",
28
+ "format": "biome check --write",
29
+ "changeset": "changeset",
30
+ "version": "changeset version",
31
+ "release": "pnpm changeset publish"
32
+ }
33
+ }
package/shell.nix ADDED
@@ -0,0 +1,10 @@
1
+ {
2
+ pkgs ? import <nixpkgs> { },
3
+ }:
4
+
5
+ pkgs.mkShell {
6
+ buildInputs = with pkgs; [
7
+ nodejs
8
+ pnpm_10
9
+ ];
10
+ }
package/src/index.ts ADDED
@@ -0,0 +1,6 @@
1
+ import type { ExtensionAPI } from "@mariozechner/pi-coding-agent";
2
+ import { registerSyntheticProvider } from "./providers/index.js";
3
+
4
+ export default function (pi: ExtensionAPI) {
5
+ registerSyntheticProvider(pi);
6
+ }
@@ -0,0 +1,19 @@
1
+ import type { ExtensionAPI } from "@mariozechner/pi-coding-agent";
2
+ import { SYNTHETIC_MODELS } from "./models.js";
3
+
4
+ export function registerSyntheticProvider(pi: ExtensionAPI): void {
5
+ pi.registerProvider("synthetic", {
6
+ baseUrl: "https://api.synthetic.new/anthropic",
7
+ apiKey: "SYNTHETIC_API_KEY",
8
+ api: "anthropic-messages",
9
+ models: SYNTHETIC_MODELS.map((model) => ({
10
+ id: model.id,
11
+ name: model.name,
12
+ reasoning: model.reasoning,
13
+ input: model.input,
14
+ cost: model.cost,
15
+ contextWindow: model.contextWindow,
16
+ maxTokens: model.maxTokens,
17
+ })),
18
+ });
19
+ }
@@ -0,0 +1,306 @@
1
+ // Hardcoded models from Synthetic API
2
+ // Source: https://api.synthetic.new/openai/v1/models
3
+ // maxTokens sourced from https://models.dev/api.json (synthetic provider)
4
+
5
+ export interface SyntheticModelConfig {
6
+ id: string;
7
+ name: string;
8
+ reasoning: boolean;
9
+ input: ("text" | "image")[];
10
+ cost: {
11
+ input: number;
12
+ output: number;
13
+ cacheRead: number;
14
+ cacheWrite: number;
15
+ };
16
+ contextWindow: number;
17
+ maxTokens: number;
18
+ }
19
+
20
+ export const SYNTHETIC_MODELS: SyntheticModelConfig[] = [
21
+ // models.dev: synthetic/hf:zai-org/GLM-4.7 → ctx=200000, out=64000
22
+ {
23
+ id: "hf:zai-org/GLM-4.7",
24
+ name: "zai-org/GLM-4.7",
25
+ reasoning: true,
26
+ input: ["text"],
27
+ cost: {
28
+ input: 0.55,
29
+ output: 2.19,
30
+ cacheRead: 0.55,
31
+ cacheWrite: 0,
32
+ },
33
+ contextWindow: 202752,
34
+ maxTokens: 64000,
35
+ },
36
+ // models.dev: synthetic/hf:MiniMaxAI/MiniMax-M2.1 → ctx=204800, out=131072
37
+ {
38
+ id: "hf:MiniMaxAI/MiniMax-M2.1",
39
+ name: "MiniMaxAI/MiniMax-M2.1",
40
+ reasoning: true,
41
+ input: ["text"],
42
+ cost: {
43
+ input: 0.55,
44
+ output: 2.19,
45
+ cacheRead: 0.55,
46
+ cacheWrite: 0,
47
+ },
48
+ contextWindow: 196608,
49
+ maxTokens: 131072,
50
+ },
51
+ // models.dev: synthetic/hf:meta-llama/Llama-3.3-70B-Instruct → ctx=128000, out=32768
52
+ {
53
+ id: "hf:meta-llama/Llama-3.3-70B-Instruct",
54
+ name: "meta-llama/Llama-3.3-70B-Instruct",
55
+ reasoning: false,
56
+ input: ["text"],
57
+ cost: {
58
+ input: 0.9,
59
+ output: 0.9,
60
+ cacheRead: 0.9,
61
+ cacheWrite: 0,
62
+ },
63
+ contextWindow: 131072,
64
+ maxTokens: 32768,
65
+ },
66
+ // models.dev: synthetic/hf:deepseek-ai/DeepSeek-V3-0324 → ctx=128000, out=128000
67
+ {
68
+ id: "hf:deepseek-ai/DeepSeek-V3-0324",
69
+ name: "deepseek-ai/DeepSeek-V3-0324",
70
+ reasoning: false,
71
+ input: ["text"],
72
+ cost: {
73
+ input: 1.2,
74
+ output: 1.2,
75
+ cacheRead: 1.2,
76
+ cacheWrite: 0,
77
+ },
78
+ contextWindow: 131072,
79
+ maxTokens: 128000,
80
+ },
81
+ // models.dev: synthetic/hf:deepseek-ai/DeepSeek-R1-0528 → ctx=128000, out=128000
82
+ {
83
+ id: "hf:deepseek-ai/DeepSeek-R1-0528",
84
+ name: "deepseek-ai/DeepSeek-R1-0528",
85
+ reasoning: true,
86
+ input: ["text"],
87
+ cost: {
88
+ input: 3,
89
+ output: 8,
90
+ cacheRead: 3,
91
+ cacheWrite: 0,
92
+ },
93
+ contextWindow: 131072,
94
+ maxTokens: 128000,
95
+ },
96
+ // models.dev: synthetic/hf:deepseek-ai/DeepSeek-V3.1 → ctx=128000, out=128000
97
+ {
98
+ id: "hf:deepseek-ai/DeepSeek-V3.1",
99
+ name: "deepseek-ai/DeepSeek-V3.1",
100
+ reasoning: false,
101
+ input: ["text"],
102
+ cost: {
103
+ input: 0.56,
104
+ output: 1.68,
105
+ cacheRead: 0.56,
106
+ cacheWrite: 0,
107
+ },
108
+ contextWindow: 131072,
109
+ maxTokens: 128000,
110
+ },
111
+ // models.dev: synthetic/hf:deepseek-ai/DeepSeek-V3.1-Terminus → ctx=128000, out=128000
112
+ {
113
+ id: "hf:deepseek-ai/DeepSeek-V3.1-Terminus",
114
+ name: "deepseek-ai/DeepSeek-V3.1-Terminus",
115
+ reasoning: false,
116
+ input: ["text"],
117
+ cost: {
118
+ input: 1.2,
119
+ output: 1.2,
120
+ cacheRead: 1.2,
121
+ cacheWrite: 0,
122
+ },
123
+ contextWindow: 131072,
124
+ maxTokens: 128000,
125
+ },
126
+ // models.dev: synthetic/hf:deepseek-ai/DeepSeek-V3.2 → ctx=162816, out=8000
127
+ {
128
+ id: "hf:deepseek-ai/DeepSeek-V3.2",
129
+ name: "deepseek-ai/DeepSeek-V3.2",
130
+ reasoning: false,
131
+ input: ["text"],
132
+ cost: {
133
+ input: 0.56,
134
+ output: 1.68,
135
+ cacheRead: 0.56,
136
+ cacheWrite: 0,
137
+ },
138
+ contextWindow: 162816,
139
+ maxTokens: 8000,
140
+ },
141
+ // NOTE: not present in models.dev synthetic provider; maxTokens unchanged
142
+ {
143
+ id: "hf:Qwen/Qwen3-VL-235B-A22B-Instruct",
144
+ name: "Qwen/Qwen3-VL-235B-A22B-Instruct",
145
+ reasoning: true,
146
+ input: ["text", "image"],
147
+ cost: {
148
+ input: 0.22,
149
+ output: 0.88,
150
+ cacheRead: 0.22,
151
+ cacheWrite: 0,
152
+ },
153
+ contextWindow: 256000,
154
+ maxTokens: 4096,
155
+ },
156
+ // models.dev: synthetic/hf:moonshotai/Kimi-K2-Instruct-0905 → ctx=262144, out=32768
157
+ {
158
+ id: "hf:moonshotai/Kimi-K2-Instruct-0905",
159
+ name: "moonshotai/Kimi-K2-Instruct-0905",
160
+ reasoning: false,
161
+ input: ["text"],
162
+ cost: {
163
+ input: 1.2,
164
+ output: 1.2,
165
+ cacheRead: 1.2,
166
+ cacheWrite: 0,
167
+ },
168
+ contextWindow: 262144,
169
+ maxTokens: 32768,
170
+ },
171
+ // models.dev: synthetic/hf:moonshotai/Kimi-K2-Thinking → ctx=262144, out=262144
172
+ {
173
+ id: "hf:moonshotai/Kimi-K2-Thinking",
174
+ name: "moonshotai/Kimi-K2-Thinking",
175
+ reasoning: true,
176
+ input: ["text"],
177
+ cost: {
178
+ input: 0.6,
179
+ output: 2.5,
180
+ cacheRead: 0.6,
181
+ cacheWrite: 0,
182
+ },
183
+ contextWindow: 262144,
184
+ maxTokens: 262144,
185
+ },
186
+ // models.dev: synthetic/hf:openai/gpt-oss-120b → ctx=128000, out=32768
187
+ {
188
+ id: "hf:openai/gpt-oss-120b",
189
+ name: "openai/gpt-oss-120b",
190
+ reasoning: false,
191
+ input: ["text"],
192
+ cost: {
193
+ input: 0.1,
194
+ output: 0.1,
195
+ cacheRead: 0.1,
196
+ cacheWrite: 0,
197
+ },
198
+ contextWindow: 131072,
199
+ maxTokens: 32768,
200
+ },
201
+ // models.dev: synthetic/hf:Qwen/Qwen3-Coder-480B-A35B-Instruct → ctx=256000, out=32000
202
+ {
203
+ id: "hf:Qwen/Qwen3-Coder-480B-A35B-Instruct",
204
+ name: "Qwen/Qwen3-Coder-480B-A35B-Instruct",
205
+ reasoning: false,
206
+ input: ["text"],
207
+ cost: {
208
+ input: 0.45,
209
+ output: 1.8,
210
+ cacheRead: 0.45,
211
+ cacheWrite: 0,
212
+ },
213
+ contextWindow: 262144,
214
+ maxTokens: 32000,
215
+ },
216
+ // models.dev: synthetic/hf:Qwen/Qwen3-235B-A22B-Instruct-2507 → ctx=256000, out=32000
217
+ {
218
+ id: "hf:Qwen/Qwen3-235B-A22B-Instruct-2507",
219
+ name: "Qwen/Qwen3-235B-A22B-Instruct-2507",
220
+ reasoning: false,
221
+ input: ["text"],
222
+ cost: {
223
+ input: 0.22,
224
+ output: 0.88,
225
+ cacheRead: 0.22,
226
+ cacheWrite: 0,
227
+ },
228
+ contextWindow: 262144,
229
+ maxTokens: 32000,
230
+ },
231
+ // models.dev: synthetic/hf:zai-org/GLM-4.6 → ctx=200000, out=64000
232
+ {
233
+ id: "hf:zai-org/GLM-4.6",
234
+ name: "zai-org/GLM-4.6",
235
+ reasoning: true,
236
+ input: ["text"],
237
+ cost: {
238
+ input: 0.55,
239
+ output: 2.19,
240
+ cacheRead: 0.55,
241
+ cacheWrite: 0,
242
+ },
243
+ contextWindow: 202752,
244
+ maxTokens: 64000,
245
+ },
246
+ // models.dev: synthetic/hf:MiniMaxAI/MiniMax-M2 → ctx=196608, out=131000
247
+ {
248
+ id: "hf:MiniMaxAI/MiniMax-M2",
249
+ name: "MiniMaxAI/MiniMax-M2",
250
+ reasoning: true,
251
+ input: ["text"],
252
+ cost: {
253
+ input: 0.3,
254
+ output: 1.2,
255
+ cacheRead: 0.3,
256
+ cacheWrite: 0,
257
+ },
258
+ contextWindow: 196608,
259
+ maxTokens: 131000,
260
+ },
261
+ // models.dev: synthetic/hf:moonshotai/Kimi-K2.5 → ctx=262144, out=65536
262
+ {
263
+ id: "hf:moonshotai/Kimi-K2.5",
264
+ name: "moonshotai/Kimi-K2.5",
265
+ reasoning: true,
266
+ input: ["text", "image"],
267
+ cost: {
268
+ input: 1.2,
269
+ output: 1.2,
270
+ cacheRead: 1.2,
271
+ cacheWrite: 0,
272
+ },
273
+ contextWindow: 262144,
274
+ maxTokens: 65536,
275
+ },
276
+ // models.dev: synthetic/hf:deepseek-ai/DeepSeek-V3 → ctx=128000, out=128000
277
+ {
278
+ id: "hf:deepseek-ai/DeepSeek-V3",
279
+ name: "deepseek-ai/DeepSeek-V3",
280
+ reasoning: false,
281
+ input: ["text"],
282
+ cost: {
283
+ input: 1.25,
284
+ output: 1.25,
285
+ cacheRead: 1.25,
286
+ cacheWrite: 0,
287
+ },
288
+ contextWindow: 131072,
289
+ maxTokens: 128000,
290
+ },
291
+ // models.dev: synthetic/hf:Qwen/Qwen3-235B-A22B-Thinking-2507 → ctx=256000, out=32000
292
+ {
293
+ id: "hf:Qwen/Qwen3-235B-A22B-Thinking-2507",
294
+ name: "Qwen/Qwen3-235B-A22B-Thinking-2507",
295
+ reasoning: true,
296
+ input: ["text"],
297
+ cost: {
298
+ input: 0.65,
299
+ output: 3,
300
+ cacheRead: 0.65,
301
+ cacheWrite: 0,
302
+ },
303
+ contextWindow: 262144,
304
+ maxTokens: 32000,
305
+ },
306
+ ];
package/tsconfig.json ADDED
@@ -0,0 +1,15 @@
1
+ {
2
+ "compilerOptions": {
3
+ "target": "ES2022",
4
+ "module": "ESNext",
5
+ "moduleResolution": "bundler",
6
+ "strict": true,
7
+ "esModuleInterop": true,
8
+ "skipLibCheck": true,
9
+ "forceConsistentCasingInFileNames": true,
10
+ "resolveJsonModule": true,
11
+ "noEmit": true
12
+ },
13
+ "include": ["src/**/*"],
14
+ "exclude": ["node_modules"]
15
+ }