@aliou/pi-synthetic 0.1.0 → 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,14 @@
1
1
  # @aliou/pi-synthetic
2
2
 
3
+ ## 0.2.0
4
+
5
+ ### Minor Changes
6
+
7
+ - 58d21ca: Fix model configurations from Synthetic API
8
+
9
+ - Update maxTokens for all Synthetic models using values from models.dev (synthetic provider)
10
+ - Fix Kimi-K2-Instruct-0905 reasoning flag to false
11
+
3
12
  ## 0.1.0
4
13
 
5
14
  ### Minor Changes
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@aliou/pi-synthetic",
3
- "version": "0.1.0",
3
+ "version": "0.2.0",
4
4
  "repository": {
5
5
  "type": "git",
6
6
  "url": "https://github.com/aliou/pi-synthetic"
@@ -1,5 +1,6 @@
1
1
  // Hardcoded models from Synthetic API
2
2
  // Source: https://api.synthetic.new/openai/v1/models
3
+ // maxTokens sourced from https://models.dev/api.json (synthetic provider)
3
4
 
4
5
  export interface SyntheticModelConfig {
5
6
  id: string;
@@ -17,6 +18,7 @@ export interface SyntheticModelConfig {
17
18
  }
18
19
 
19
20
  export const SYNTHETIC_MODELS: SyntheticModelConfig[] = [
21
+ // models.dev: synthetic/hf:zai-org/GLM-4.7 → ctx=200000, out=64000
20
22
  {
21
23
  id: "hf:zai-org/GLM-4.7",
22
24
  name: "zai-org/GLM-4.7",
@@ -29,8 +31,9 @@ export const SYNTHETIC_MODELS: SyntheticModelConfig[] = [
29
31
  cacheWrite: 0,
30
32
  },
31
33
  contextWindow: 202752,
32
- maxTokens: 65536,
34
+ maxTokens: 64000,
33
35
  },
36
+ // models.dev: synthetic/hf:MiniMaxAI/MiniMax-M2.1 → ctx=204800, out=131072
34
37
  {
35
38
  id: "hf:MiniMaxAI/MiniMax-M2.1",
36
39
  name: "MiniMaxAI/MiniMax-M2.1",
@@ -43,8 +46,9 @@ export const SYNTHETIC_MODELS: SyntheticModelConfig[] = [
43
46
  cacheWrite: 0,
44
47
  },
45
48
  contextWindow: 196608,
46
- maxTokens: 65536,
49
+ maxTokens: 131072,
47
50
  },
51
+ // models.dev: synthetic/hf:meta-llama/Llama-3.3-70B-Instruct → ctx=128000, out=32768
48
52
  {
49
53
  id: "hf:meta-llama/Llama-3.3-70B-Instruct",
50
54
  name: "meta-llama/Llama-3.3-70B-Instruct",
@@ -57,8 +61,9 @@ export const SYNTHETIC_MODELS: SyntheticModelConfig[] = [
57
61
  cacheWrite: 0,
58
62
  },
59
63
  contextWindow: 131072,
60
- maxTokens: 4096,
64
+ maxTokens: 32768,
61
65
  },
66
+ // models.dev: synthetic/hf:deepseek-ai/DeepSeek-V3-0324 → ctx=128000, out=128000
62
67
  {
63
68
  id: "hf:deepseek-ai/DeepSeek-V3-0324",
64
69
  name: "deepseek-ai/DeepSeek-V3-0324",
@@ -71,8 +76,9 @@ export const SYNTHETIC_MODELS: SyntheticModelConfig[] = [
71
76
  cacheWrite: 0,
72
77
  },
73
78
  contextWindow: 131072,
74
- maxTokens: 4096,
79
+ maxTokens: 128000,
75
80
  },
81
+ // models.dev: synthetic/hf:deepseek-ai/DeepSeek-R1-0528 → ctx=128000, out=128000
76
82
  {
77
83
  id: "hf:deepseek-ai/DeepSeek-R1-0528",
78
84
  name: "deepseek-ai/DeepSeek-R1-0528",
@@ -85,8 +91,9 @@ export const SYNTHETIC_MODELS: SyntheticModelConfig[] = [
85
91
  cacheWrite: 0,
86
92
  },
87
93
  contextWindow: 131072,
88
- maxTokens: 4096,
94
+ maxTokens: 128000,
89
95
  },
96
+ // models.dev: synthetic/hf:deepseek-ai/DeepSeek-V3.1 → ctx=128000, out=128000
90
97
  {
91
98
  id: "hf:deepseek-ai/DeepSeek-V3.1",
92
99
  name: "deepseek-ai/DeepSeek-V3.1",
@@ -99,8 +106,9 @@ export const SYNTHETIC_MODELS: SyntheticModelConfig[] = [
99
106
  cacheWrite: 0,
100
107
  },
101
108
  contextWindow: 131072,
102
- maxTokens: 4096,
109
+ maxTokens: 128000,
103
110
  },
111
+ // models.dev: synthetic/hf:deepseek-ai/DeepSeek-V3.1-Terminus → ctx=128000, out=128000
104
112
  {
105
113
  id: "hf:deepseek-ai/DeepSeek-V3.1-Terminus",
106
114
  name: "deepseek-ai/DeepSeek-V3.1-Terminus",
@@ -113,8 +121,9 @@ export const SYNTHETIC_MODELS: SyntheticModelConfig[] = [
113
121
  cacheWrite: 0,
114
122
  },
115
123
  contextWindow: 131072,
116
- maxTokens: 4096,
124
+ maxTokens: 128000,
117
125
  },
126
+ // models.dev: synthetic/hf:deepseek-ai/DeepSeek-V3.2 → ctx=162816, out=8000
118
127
  {
119
128
  id: "hf:deepseek-ai/DeepSeek-V3.2",
120
129
  name: "deepseek-ai/DeepSeek-V3.2",
@@ -127,8 +136,9 @@ export const SYNTHETIC_MODELS: SyntheticModelConfig[] = [
127
136
  cacheWrite: 0,
128
137
  },
129
138
  contextWindow: 162816,
130
- maxTokens: 4096,
139
+ maxTokens: 8000,
131
140
  },
141
+ // NOTE: not present in models.dev synthetic provider; maxTokens unchanged
132
142
  {
133
143
  id: "hf:Qwen/Qwen3-VL-235B-A22B-Instruct",
134
144
  name: "Qwen/Qwen3-VL-235B-A22B-Instruct",
@@ -143,10 +153,11 @@ export const SYNTHETIC_MODELS: SyntheticModelConfig[] = [
143
153
  contextWindow: 256000,
144
154
  maxTokens: 4096,
145
155
  },
156
+ // models.dev: synthetic/hf:moonshotai/Kimi-K2-Instruct-0905 → ctx=262144, out=32768
146
157
  {
147
158
  id: "hf:moonshotai/Kimi-K2-Instruct-0905",
148
159
  name: "moonshotai/Kimi-K2-Instruct-0905",
149
- reasoning: true,
160
+ reasoning: false,
150
161
  input: ["text"],
151
162
  cost: {
152
163
  input: 1.2,
@@ -155,8 +166,9 @@ export const SYNTHETIC_MODELS: SyntheticModelConfig[] = [
155
166
  cacheWrite: 0,
156
167
  },
157
168
  contextWindow: 262144,
158
- maxTokens: 4096,
169
+ maxTokens: 32768,
159
170
  },
171
+ // models.dev: synthetic/hf:moonshotai/Kimi-K2-Thinking → ctx=262144, out=262144
160
172
  {
161
173
  id: "hf:moonshotai/Kimi-K2-Thinking",
162
174
  name: "moonshotai/Kimi-K2-Thinking",
@@ -169,8 +181,9 @@ export const SYNTHETIC_MODELS: SyntheticModelConfig[] = [
169
181
  cacheWrite: 0,
170
182
  },
171
183
  contextWindow: 262144,
172
- maxTokens: 4096,
184
+ maxTokens: 262144,
173
185
  },
186
+ // models.dev: synthetic/hf:openai/gpt-oss-120b → ctx=128000, out=32768
174
187
  {
175
188
  id: "hf:openai/gpt-oss-120b",
176
189
  name: "openai/gpt-oss-120b",
@@ -183,8 +196,9 @@ export const SYNTHETIC_MODELS: SyntheticModelConfig[] = [
183
196
  cacheWrite: 0,
184
197
  },
185
198
  contextWindow: 131072,
186
- maxTokens: 4096,
199
+ maxTokens: 32768,
187
200
  },
201
+ // models.dev: synthetic/hf:Qwen/Qwen3-Coder-480B-A35B-Instruct → ctx=256000, out=32000
188
202
  {
189
203
  id: "hf:Qwen/Qwen3-Coder-480B-A35B-Instruct",
190
204
  name: "Qwen/Qwen3-Coder-480B-A35B-Instruct",
@@ -197,8 +211,9 @@ export const SYNTHETIC_MODELS: SyntheticModelConfig[] = [
197
211
  cacheWrite: 0,
198
212
  },
199
213
  contextWindow: 262144,
200
- maxTokens: 4096,
214
+ maxTokens: 32000,
201
215
  },
216
+ // models.dev: synthetic/hf:Qwen/Qwen3-235B-A22B-Instruct-2507 → ctx=256000, out=32000
202
217
  {
203
218
  id: "hf:Qwen/Qwen3-235B-A22B-Instruct-2507",
204
219
  name: "Qwen/Qwen3-235B-A22B-Instruct-2507",
@@ -211,8 +226,9 @@ export const SYNTHETIC_MODELS: SyntheticModelConfig[] = [
211
226
  cacheWrite: 0,
212
227
  },
213
228
  contextWindow: 262144,
214
- maxTokens: 4096,
229
+ maxTokens: 32000,
215
230
  },
231
+ // models.dev: synthetic/hf:zai-org/GLM-4.6 → ctx=200000, out=64000
216
232
  {
217
233
  id: "hf:zai-org/GLM-4.6",
218
234
  name: "zai-org/GLM-4.6",
@@ -225,8 +241,9 @@ export const SYNTHETIC_MODELS: SyntheticModelConfig[] = [
225
241
  cacheWrite: 0,
226
242
  },
227
243
  contextWindow: 202752,
228
- maxTokens: 4096,
244
+ maxTokens: 64000,
229
245
  },
246
+ // models.dev: synthetic/hf:MiniMaxAI/MiniMax-M2 → ctx=196608, out=131000
230
247
  {
231
248
  id: "hf:MiniMaxAI/MiniMax-M2",
232
249
  name: "MiniMaxAI/MiniMax-M2",
@@ -239,8 +256,9 @@ export const SYNTHETIC_MODELS: SyntheticModelConfig[] = [
239
256
  cacheWrite: 0,
240
257
  },
241
258
  contextWindow: 196608,
242
- maxTokens: 4096,
259
+ maxTokens: 131000,
243
260
  },
261
+ // models.dev: synthetic/hf:moonshotai/Kimi-K2.5 → ctx=262144, out=65536
244
262
  {
245
263
  id: "hf:moonshotai/Kimi-K2.5",
246
264
  name: "moonshotai/Kimi-K2.5",
@@ -253,8 +271,9 @@ export const SYNTHETIC_MODELS: SyntheticModelConfig[] = [
253
271
  cacheWrite: 0,
254
272
  },
255
273
  contextWindow: 262144,
256
- maxTokens: 4096,
274
+ maxTokens: 65536,
257
275
  },
276
+ // models.dev: synthetic/hf:deepseek-ai/DeepSeek-V3 → ctx=128000, out=128000
258
277
  {
259
278
  id: "hf:deepseek-ai/DeepSeek-V3",
260
279
  name: "deepseek-ai/DeepSeek-V3",
@@ -267,8 +286,9 @@ export const SYNTHETIC_MODELS: SyntheticModelConfig[] = [
267
286
  cacheWrite: 0,
268
287
  },
269
288
  contextWindow: 131072,
270
- maxTokens: 4096,
289
+ maxTokens: 128000,
271
290
  },
291
+ // models.dev: synthetic/hf:Qwen/Qwen3-235B-A22B-Thinking-2507 → ctx=256000, out=32000
272
292
  {
273
293
  id: "hf:Qwen/Qwen3-235B-A22B-Thinking-2507",
274
294
  name: "Qwen/Qwen3-235B-A22B-Thinking-2507",
@@ -281,6 +301,6 @@ export const SYNTHETIC_MODELS: SyntheticModelConfig[] = [
281
301
  cacheWrite: 0,
282
302
  },
283
303
  contextWindow: 262144,
284
- maxTokens: 4096,
304
+ maxTokens: 32000,
285
305
  },
286
306
  ];