backend-manager 5.0.165 → 5.0.167
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +13 -0
- package/package.json +1 -1
- package/src/manager/libraries/openai.js +69 -13
package/CHANGELOG.md
CHANGED
|
@@ -14,6 +14,19 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
|
|
|
14
14
|
- `Fixed` for any bug fixes.
|
|
15
15
|
- `Security` in case of vulnerabilities.
|
|
16
16
|
|
|
17
|
+
# [5.0.167] - 2026-03-20
|
|
18
|
+
### Changed
|
|
19
|
+
- Extracted `resolveTemperature()` helper for consistency with `resolveFormatting()` and `resolveReasoning()`
|
|
20
|
+
|
|
21
|
+
# [5.0.166] - 2026-03-20
|
|
22
|
+
### Added
|
|
23
|
+
- `reasoning: true` feature flag to GPT-5.x and o-series models in MODEL_TABLE
|
|
24
|
+
- New GPT-5.4-mini and GPT-5.4-nano model entries with pricing
|
|
25
|
+
|
|
26
|
+
### Changed
|
|
27
|
+
- Reasoning parameter is now conditionally included in API requests only when the model supports it
|
|
28
|
+
- `resolveReasoning()` validates model support and warns when reasoning is requested for unsupported models
|
|
29
|
+
|
|
17
30
|
# [5.0.165] - 2026-03-20
|
|
18
31
|
### Changed
|
|
19
32
|
- Serve command now reads hosting port from `firebase.json` emulator config before falling back to default 5000
|
package/package.json
CHANGED
|
@@ -22,6 +22,7 @@ const MODEL_TABLE = {
|
|
|
22
22
|
features: {
|
|
23
23
|
json: true,
|
|
24
24
|
temperature: false,
|
|
25
|
+
reasoning: true,
|
|
25
26
|
},
|
|
26
27
|
},
|
|
27
28
|
'gpt-5.2': {
|
|
@@ -31,6 +32,7 @@ const MODEL_TABLE = {
|
|
|
31
32
|
features: {
|
|
32
33
|
json: true,
|
|
33
34
|
temperature: false,
|
|
35
|
+
reasoning: true,
|
|
34
36
|
},
|
|
35
37
|
},
|
|
36
38
|
'gpt-5.1': {
|
|
@@ -40,6 +42,7 @@ const MODEL_TABLE = {
|
|
|
40
42
|
features: {
|
|
41
43
|
json: true,
|
|
42
44
|
temperature: false,
|
|
45
|
+
reasoning: true,
|
|
43
46
|
},
|
|
44
47
|
},
|
|
45
48
|
'gpt-5': {
|
|
@@ -49,6 +52,7 @@ const MODEL_TABLE = {
|
|
|
49
52
|
features: {
|
|
50
53
|
json: true,
|
|
51
54
|
temperature: false,
|
|
55
|
+
reasoning: true,
|
|
52
56
|
},
|
|
53
57
|
},
|
|
54
58
|
'gpt-5-mini': {
|
|
@@ -58,6 +62,7 @@ const MODEL_TABLE = {
|
|
|
58
62
|
features: {
|
|
59
63
|
json: true,
|
|
60
64
|
temperature: false,
|
|
65
|
+
reasoning: true,
|
|
61
66
|
},
|
|
62
67
|
},
|
|
63
68
|
'gpt-5-nano': {
|
|
@@ -67,6 +72,29 @@ const MODEL_TABLE = {
|
|
|
67
72
|
features: {
|
|
68
73
|
json: true,
|
|
69
74
|
temperature: false,
|
|
75
|
+
reasoning: true,
|
|
76
|
+
},
|
|
77
|
+
},
|
|
78
|
+
// Mar 20, 2026
|
|
79
|
+
// GPT-5.4 mini/nano family
|
|
80
|
+
'gpt-5.4-mini': {
|
|
81
|
+
input: 0.75,
|
|
82
|
+
output: 4.50,
|
|
83
|
+
provider: 'openai',
|
|
84
|
+
features: {
|
|
85
|
+
json: true,
|
|
86
|
+
temperature: false,
|
|
87
|
+
reasoning: true,
|
|
88
|
+
},
|
|
89
|
+
},
|
|
90
|
+
'gpt-5.4-nano': {
|
|
91
|
+
input: 0.20,
|
|
92
|
+
output: 1.25,
|
|
93
|
+
provider: 'openai',
|
|
94
|
+
features: {
|
|
95
|
+
json: true,
|
|
96
|
+
temperature: false,
|
|
97
|
+
reasoning: true,
|
|
70
98
|
},
|
|
71
99
|
},
|
|
72
100
|
// GPT-4.5
|
|
@@ -127,6 +155,7 @@ const MODEL_TABLE = {
|
|
|
127
155
|
provider: 'openai',
|
|
128
156
|
features: {
|
|
129
157
|
json: true,
|
|
158
|
+
reasoning: true,
|
|
130
159
|
},
|
|
131
160
|
},
|
|
132
161
|
'o3-pro': {
|
|
@@ -135,6 +164,7 @@ const MODEL_TABLE = {
|
|
|
135
164
|
provider: 'openai',
|
|
136
165
|
features: {
|
|
137
166
|
json: true,
|
|
167
|
+
reasoning: true,
|
|
138
168
|
},
|
|
139
169
|
},
|
|
140
170
|
'o3': {
|
|
@@ -143,6 +173,7 @@ const MODEL_TABLE = {
|
|
|
143
173
|
provider: 'openai',
|
|
144
174
|
features: {
|
|
145
175
|
json: true,
|
|
176
|
+
reasoning: true,
|
|
146
177
|
},
|
|
147
178
|
},
|
|
148
179
|
'o3-mini': {
|
|
@@ -151,6 +182,7 @@ const MODEL_TABLE = {
|
|
|
151
182
|
provider: 'openai',
|
|
152
183
|
features: {
|
|
153
184
|
json: true,
|
|
185
|
+
reasoning: true,
|
|
154
186
|
},
|
|
155
187
|
},
|
|
156
188
|
'o1-pro': {
|
|
@@ -159,6 +191,7 @@ const MODEL_TABLE = {
|
|
|
159
191
|
provider: 'openai',
|
|
160
192
|
features: {
|
|
161
193
|
json: true,
|
|
194
|
+
reasoning: true,
|
|
162
195
|
},
|
|
163
196
|
},
|
|
164
197
|
'o1': {
|
|
@@ -167,6 +200,7 @@ const MODEL_TABLE = {
|
|
|
167
200
|
provider: 'openai',
|
|
168
201
|
features: {
|
|
169
202
|
json: true,
|
|
203
|
+
reasoning: true,
|
|
170
204
|
},
|
|
171
205
|
},
|
|
172
206
|
'o1-preview': {
|
|
@@ -175,6 +209,7 @@ const MODEL_TABLE = {
|
|
|
175
209
|
provider: 'openai',
|
|
176
210
|
features: {
|
|
177
211
|
json: true,
|
|
212
|
+
reasoning: true,
|
|
178
213
|
},
|
|
179
214
|
},
|
|
180
215
|
'o1-mini': {
|
|
@@ -183,6 +218,7 @@ const MODEL_TABLE = {
|
|
|
183
218
|
provider: 'openai',
|
|
184
219
|
features: {
|
|
185
220
|
json: true,
|
|
221
|
+
reasoning: true,
|
|
186
222
|
},
|
|
187
223
|
},
|
|
188
224
|
'gpt-4-turbo': {
|
|
@@ -785,8 +821,6 @@ function makeRequest(mode, options, self, prompt, message, user, _log) {
|
|
|
785
821
|
const history = formatHistory(options, prompt, message, _log);
|
|
786
822
|
|
|
787
823
|
// Set request
|
|
788
|
-
const modelConfig = getModelConfig(options.model);
|
|
789
|
-
|
|
790
824
|
request.url = 'https://api.openai.com/v1/responses';
|
|
791
825
|
request.body = {
|
|
792
826
|
model: options.model,
|
|
@@ -794,12 +828,18 @@ function makeRequest(mode, options, self, prompt, message, user, _log) {
|
|
|
794
828
|
user: user,
|
|
795
829
|
max_output_tokens: options.maxTokens,
|
|
796
830
|
text: resolveFormatting(options),
|
|
797
|
-
reasoning: resolveReasoning(options),
|
|
798
831
|
}
|
|
799
832
|
|
|
800
833
|
// Only include temperature if the model supports it
|
|
801
|
-
|
|
802
|
-
|
|
834
|
+
const temperature = resolveTemperature(options);
|
|
835
|
+
if (temperature !== undefined) {
|
|
836
|
+
request.body.temperature = temperature;
|
|
837
|
+
}
|
|
838
|
+
|
|
839
|
+
// Only include reasoning if the model supports it
|
|
840
|
+
const reasoning = resolveReasoning(options);
|
|
841
|
+
if (reasoning) {
|
|
842
|
+
request.body.reasoning = reasoning;
|
|
803
843
|
}
|
|
804
844
|
}
|
|
805
845
|
|
|
@@ -860,17 +900,33 @@ function resolveFormatting(options) {
|
|
|
860
900
|
return undefined;
|
|
861
901
|
}
|
|
862
902
|
|
|
903
|
+
function resolveTemperature(options) {
|
|
904
|
+
// Check if the model supports temperature
|
|
905
|
+
const modelConfig = getModelConfig(options.model);
|
|
906
|
+
if (modelConfig.features?.temperature === false) {
|
|
907
|
+
return undefined;
|
|
908
|
+
}
|
|
909
|
+
|
|
910
|
+
return options.temperature;
|
|
911
|
+
}
|
|
912
|
+
|
|
863
913
|
function resolveReasoning(options) {
|
|
864
|
-
// If reasoning is
|
|
865
|
-
if (options.reasoning) {
|
|
866
|
-
return
|
|
867
|
-
effort: options.reasoning.effort || 'medium',
|
|
868
|
-
// summary: options.reasoning.summary || 'concise',
|
|
869
|
-
};
|
|
914
|
+
// If reasoning is not requested, return undefined
|
|
915
|
+
if (!options.reasoning) {
|
|
916
|
+
return undefined;
|
|
870
917
|
}
|
|
871
918
|
|
|
872
|
-
//
|
|
873
|
-
|
|
919
|
+
// Check if the model supports reasoning
|
|
920
|
+
const modelConfig = getModelConfig(options.model);
|
|
921
|
+
if (!modelConfig.features?.reasoning) {
|
|
922
|
+
console.warn(`Reasoning not supported for model: ${options.model}, ignoring reasoning option`);
|
|
923
|
+
return undefined;
|
|
924
|
+
}
|
|
925
|
+
|
|
926
|
+
return {
|
|
927
|
+
effort: options.reasoning.effort || 'medium',
|
|
928
|
+
// summary: options.reasoning.summary || 'concise',
|
|
929
|
+
};
|
|
874
930
|
}
|
|
875
931
|
|
|
876
932
|
module.exports = OpenAI;
|