@aliou/pi-synthetic 0.1.0 → 0.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/AGENTS.md CHANGED
@@ -33,7 +33,7 @@ src/
33
33
  ## Conventions
34
34
 
35
35
  - API key comes from environment (`SYNTHETIC_API_KEY`)
36
- - Uses Anthropic-compatible API at `https://api.synthetic.new/anthropic`
36
+ - Uses OpenAI-compatible API at `https://api.synthetic.new/openai/v1`
37
37
  - Models are hardcoded in `src/providers/models.ts`
38
38
  - Update model list when Synthetic adds new models
39
39
 
package/CHANGELOG.md CHANGED
@@ -1,5 +1,25 @@
1
1
  # @aliou/pi-synthetic
2
2
 
3
+ ## 0.3.0
4
+
5
+ ### Minor Changes
6
+
7
+ - 5f67daf: Switch from Anthropic to OpenAI API endpoints
8
+
9
+ - Change API endpoint from `/anthropic` to `/openai/v1`
10
+ - Update from `anthropic-messages` to `openai-completions` API
11
+ - Add compatibility flags for proper role handling (`supportsDeveloperRole: false`)
12
+ - Use standard `max_tokens` field instead of `max_completion_tokens`
13
+
14
+ ## 0.2.0
15
+
16
+ ### Minor Changes
17
+
18
+ - 58d21ca: Fix model configurations from Synthetic API
19
+
20
+ - Update maxTokens for all Synthetic models using values from models.dev (synthetic provider)
21
+ - Fix Kimi-K2-Instruct-0905 reasoning flag to false
22
+
3
23
  ## 0.1.0
4
24
 
5
25
  ### Minor Changes
package/README.md CHANGED
@@ -1,6 +1,6 @@
1
1
  # Pi Synthetic Extension
2
2
 
3
- A Pi extension that adds [Synthetic](https://synthetic.new) as a model provider, giving you access to open-source models through an Anthropic-compatible API.
3
+ A Pi extension that adds [Synthetic](https://synthetic.new) as a model provider, giving you access to open-source models through an OpenAI-compatible API.
4
4
 
5
5
  ## Installation
6
6
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@aliou/pi-synthetic",
3
- "version": "0.1.0",
3
+ "version": "0.3.0",
4
4
  "repository": {
5
5
  "type": "git",
6
6
  "url": "https://github.com/aliou/pi-synthetic"
@@ -3,9 +3,9 @@ import { SYNTHETIC_MODELS } from "./models.js";
3
3
 
4
4
  export function registerSyntheticProvider(pi: ExtensionAPI): void {
5
5
  pi.registerProvider("synthetic", {
6
- baseUrl: "https://api.synthetic.new/anthropic",
6
+ baseUrl: "https://api.synthetic.new/openai/v1",
7
7
  apiKey: "SYNTHETIC_API_KEY",
8
- api: "anthropic-messages",
8
+ api: "openai-completions",
9
9
  models: SYNTHETIC_MODELS.map((model) => ({
10
10
  id: model.id,
11
11
  name: model.name,
@@ -14,6 +14,10 @@ export function registerSyntheticProvider(pi: ExtensionAPI): void {
14
14
  cost: model.cost,
15
15
  contextWindow: model.contextWindow,
16
16
  maxTokens: model.maxTokens,
17
+ compat: {
18
+ supportsDeveloperRole: false,
19
+ maxTokensField: "max_tokens",
20
+ },
17
21
  })),
18
22
  });
19
23
  }
@@ -1,5 +1,6 @@
1
1
  // Hardcoded models from Synthetic API
2
2
  // Source: https://api.synthetic.new/openai/v1/models
3
+ // maxTokens sourced from https://models.dev/api.json (synthetic provider)
3
4
 
4
5
  export interface SyntheticModelConfig {
5
6
  id: string;
@@ -14,9 +15,17 @@ export interface SyntheticModelConfig {
14
15
  };
15
16
  contextWindow: number;
16
17
  maxTokens: number;
18
+ compat?: {
19
+ supportsDeveloperRole?: boolean;
20
+ supportsReasoningEffort?: boolean;
21
+ maxTokensField?: "max_completion_tokens" | "max_tokens";
22
+ requiresToolResultName?: boolean;
23
+ requiresMistralToolIds?: boolean;
24
+ };
17
25
  }
18
26
 
19
27
  export const SYNTHETIC_MODELS: SyntheticModelConfig[] = [
28
+ // models.dev: synthetic/hf:zai-org/GLM-4.7 → ctx=200000, out=64000
20
29
  {
21
30
  id: "hf:zai-org/GLM-4.7",
22
31
  name: "zai-org/GLM-4.7",
@@ -29,8 +38,9 @@ export const SYNTHETIC_MODELS: SyntheticModelConfig[] = [
29
38
  cacheWrite: 0,
30
39
  },
31
40
  contextWindow: 202752,
32
- maxTokens: 65536,
41
+ maxTokens: 64000,
33
42
  },
43
+ // models.dev: synthetic/hf:MiniMaxAI/MiniMax-M2.1 → ctx=204800, out=131072
34
44
  {
35
45
  id: "hf:MiniMaxAI/MiniMax-M2.1",
36
46
  name: "MiniMaxAI/MiniMax-M2.1",
@@ -43,8 +53,9 @@ export const SYNTHETIC_MODELS: SyntheticModelConfig[] = [
43
53
  cacheWrite: 0,
44
54
  },
45
55
  contextWindow: 196608,
46
- maxTokens: 65536,
56
+ maxTokens: 131072,
47
57
  },
58
+ // models.dev: synthetic/hf:meta-llama/Llama-3.3-70B-Instruct → ctx=128000, out=32768
48
59
  {
49
60
  id: "hf:meta-llama/Llama-3.3-70B-Instruct",
50
61
  name: "meta-llama/Llama-3.3-70B-Instruct",
@@ -57,8 +68,9 @@ export const SYNTHETIC_MODELS: SyntheticModelConfig[] = [
57
68
  cacheWrite: 0,
58
69
  },
59
70
  contextWindow: 131072,
60
- maxTokens: 4096,
71
+ maxTokens: 32768,
61
72
  },
73
+ // models.dev: synthetic/hf:deepseek-ai/DeepSeek-V3-0324 → ctx=128000, out=128000
62
74
  {
63
75
  id: "hf:deepseek-ai/DeepSeek-V3-0324",
64
76
  name: "deepseek-ai/DeepSeek-V3-0324",
@@ -71,8 +83,9 @@ export const SYNTHETIC_MODELS: SyntheticModelConfig[] = [
71
83
  cacheWrite: 0,
72
84
  },
73
85
  contextWindow: 131072,
74
- maxTokens: 4096,
86
+ maxTokens: 128000,
75
87
  },
88
+ // models.dev: synthetic/hf:deepseek-ai/DeepSeek-R1-0528 → ctx=128000, out=128000
76
89
  {
77
90
  id: "hf:deepseek-ai/DeepSeek-R1-0528",
78
91
  name: "deepseek-ai/DeepSeek-R1-0528",
@@ -85,8 +98,9 @@ export const SYNTHETIC_MODELS: SyntheticModelConfig[] = [
85
98
  cacheWrite: 0,
86
99
  },
87
100
  contextWindow: 131072,
88
- maxTokens: 4096,
101
+ maxTokens: 128000,
89
102
  },
103
+ // models.dev: synthetic/hf:deepseek-ai/DeepSeek-V3.1 → ctx=128000, out=128000
90
104
  {
91
105
  id: "hf:deepseek-ai/DeepSeek-V3.1",
92
106
  name: "deepseek-ai/DeepSeek-V3.1",
@@ -99,8 +113,9 @@ export const SYNTHETIC_MODELS: SyntheticModelConfig[] = [
99
113
  cacheWrite: 0,
100
114
  },
101
115
  contextWindow: 131072,
102
- maxTokens: 4096,
116
+ maxTokens: 128000,
103
117
  },
118
+ // models.dev: synthetic/hf:deepseek-ai/DeepSeek-V3.1-Terminus → ctx=128000, out=128000
104
119
  {
105
120
  id: "hf:deepseek-ai/DeepSeek-V3.1-Terminus",
106
121
  name: "deepseek-ai/DeepSeek-V3.1-Terminus",
@@ -113,8 +128,9 @@ export const SYNTHETIC_MODELS: SyntheticModelConfig[] = [
113
128
  cacheWrite: 0,
114
129
  },
115
130
  contextWindow: 131072,
116
- maxTokens: 4096,
131
+ maxTokens: 128000,
117
132
  },
133
+ // models.dev: synthetic/hf:deepseek-ai/DeepSeek-V3.2 → ctx=162816, out=8000
118
134
  {
119
135
  id: "hf:deepseek-ai/DeepSeek-V3.2",
120
136
  name: "deepseek-ai/DeepSeek-V3.2",
@@ -127,8 +143,9 @@ export const SYNTHETIC_MODELS: SyntheticModelConfig[] = [
127
143
  cacheWrite: 0,
128
144
  },
129
145
  contextWindow: 162816,
130
- maxTokens: 4096,
146
+ maxTokens: 8000,
131
147
  },
148
+ // NOTE: not present in models.dev synthetic provider; maxTokens unchanged
132
149
  {
133
150
  id: "hf:Qwen/Qwen3-VL-235B-A22B-Instruct",
134
151
  name: "Qwen/Qwen3-VL-235B-A22B-Instruct",
@@ -143,10 +160,11 @@ export const SYNTHETIC_MODELS: SyntheticModelConfig[] = [
143
160
  contextWindow: 256000,
144
161
  maxTokens: 4096,
145
162
  },
163
+ // models.dev: synthetic/hf:moonshotai/Kimi-K2-Instruct-0905 → ctx=262144, out=32768
146
164
  {
147
165
  id: "hf:moonshotai/Kimi-K2-Instruct-0905",
148
166
  name: "moonshotai/Kimi-K2-Instruct-0905",
149
- reasoning: true,
167
+ reasoning: false,
150
168
  input: ["text"],
151
169
  cost: {
152
170
  input: 1.2,
@@ -155,8 +173,9 @@ export const SYNTHETIC_MODELS: SyntheticModelConfig[] = [
155
173
  cacheWrite: 0,
156
174
  },
157
175
  contextWindow: 262144,
158
- maxTokens: 4096,
176
+ maxTokens: 32768,
159
177
  },
178
+ // models.dev: synthetic/hf:moonshotai/Kimi-K2-Thinking → ctx=262144, out=262144
160
179
  {
161
180
  id: "hf:moonshotai/Kimi-K2-Thinking",
162
181
  name: "moonshotai/Kimi-K2-Thinking",
@@ -169,8 +188,9 @@ export const SYNTHETIC_MODELS: SyntheticModelConfig[] = [
169
188
  cacheWrite: 0,
170
189
  },
171
190
  contextWindow: 262144,
172
- maxTokens: 4096,
191
+ maxTokens: 262144,
173
192
  },
193
+ // models.dev: synthetic/hf:openai/gpt-oss-120b → ctx=128000, out=32768
174
194
  {
175
195
  id: "hf:openai/gpt-oss-120b",
176
196
  name: "openai/gpt-oss-120b",
@@ -183,8 +203,9 @@ export const SYNTHETIC_MODELS: SyntheticModelConfig[] = [
183
203
  cacheWrite: 0,
184
204
  },
185
205
  contextWindow: 131072,
186
- maxTokens: 4096,
206
+ maxTokens: 32768,
187
207
  },
208
+ // models.dev: synthetic/hf:Qwen/Qwen3-Coder-480B-A35B-Instruct → ctx=256000, out=32000
188
209
  {
189
210
  id: "hf:Qwen/Qwen3-Coder-480B-A35B-Instruct",
190
211
  name: "Qwen/Qwen3-Coder-480B-A35B-Instruct",
@@ -197,8 +218,9 @@ export const SYNTHETIC_MODELS: SyntheticModelConfig[] = [
197
218
  cacheWrite: 0,
198
219
  },
199
220
  contextWindow: 262144,
200
- maxTokens: 4096,
221
+ maxTokens: 32000,
201
222
  },
223
+ // models.dev: synthetic/hf:Qwen/Qwen3-235B-A22B-Instruct-2507 → ctx=256000, out=32000
202
224
  {
203
225
  id: "hf:Qwen/Qwen3-235B-A22B-Instruct-2507",
204
226
  name: "Qwen/Qwen3-235B-A22B-Instruct-2507",
@@ -211,8 +233,9 @@ export const SYNTHETIC_MODELS: SyntheticModelConfig[] = [
211
233
  cacheWrite: 0,
212
234
  },
213
235
  contextWindow: 262144,
214
- maxTokens: 4096,
236
+ maxTokens: 32000,
215
237
  },
238
+ // models.dev: synthetic/hf:zai-org/GLM-4.6 → ctx=200000, out=64000
216
239
  {
217
240
  id: "hf:zai-org/GLM-4.6",
218
241
  name: "zai-org/GLM-4.6",
@@ -225,8 +248,9 @@ export const SYNTHETIC_MODELS: SyntheticModelConfig[] = [
225
248
  cacheWrite: 0,
226
249
  },
227
250
  contextWindow: 202752,
228
- maxTokens: 4096,
251
+ maxTokens: 64000,
229
252
  },
253
+ // models.dev: synthetic/hf:MiniMaxAI/MiniMax-M2 → ctx=196608, out=131000
230
254
  {
231
255
  id: "hf:MiniMaxAI/MiniMax-M2",
232
256
  name: "MiniMaxAI/MiniMax-M2",
@@ -239,8 +263,9 @@ export const SYNTHETIC_MODELS: SyntheticModelConfig[] = [
239
263
  cacheWrite: 0,
240
264
  },
241
265
  contextWindow: 196608,
242
- maxTokens: 4096,
266
+ maxTokens: 131000,
243
267
  },
268
+ // models.dev: synthetic/hf:moonshotai/Kimi-K2.5 → ctx=262144, out=65536
244
269
  {
245
270
  id: "hf:moonshotai/Kimi-K2.5",
246
271
  name: "moonshotai/Kimi-K2.5",
@@ -253,8 +278,9 @@ export const SYNTHETIC_MODELS: SyntheticModelConfig[] = [
253
278
  cacheWrite: 0,
254
279
  },
255
280
  contextWindow: 262144,
256
- maxTokens: 4096,
281
+ maxTokens: 65536,
257
282
  },
283
+ // models.dev: synthetic/hf:deepseek-ai/DeepSeek-V3 → ctx=128000, out=128000
258
284
  {
259
285
  id: "hf:deepseek-ai/DeepSeek-V3",
260
286
  name: "deepseek-ai/DeepSeek-V3",
@@ -267,8 +293,9 @@ export const SYNTHETIC_MODELS: SyntheticModelConfig[] = [
267
293
  cacheWrite: 0,
268
294
  },
269
295
  contextWindow: 131072,
270
- maxTokens: 4096,
296
+ maxTokens: 128000,
271
297
  },
298
+ // models.dev: synthetic/hf:Qwen/Qwen3-235B-A22B-Thinking-2507 → ctx=256000, out=32000
272
299
  {
273
300
  id: "hf:Qwen/Qwen3-235B-A22B-Thinking-2507",
274
301
  name: "Qwen/Qwen3-235B-A22B-Thinking-2507",
@@ -281,6 +308,6 @@ export const SYNTHETIC_MODELS: SyntheticModelConfig[] = [
281
308
  cacheWrite: 0,
282
309
  },
283
310
  contextWindow: 262144,
284
- maxTokens: 4096,
311
+ maxTokens: 32000,
285
312
  },
286
313
  ];