@ryanfw/prompt-orchestration-pipeline 0.13.4 → 0.14.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/package.json +1 -1
  2. package/src/llm/index.js +38 -4
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@ryanfw/prompt-orchestration-pipeline",
3
- "version": "0.13.4",
3
+ "version": "0.14.0",
4
4
  "description": "A Prompt-orchestration pipeline (POP) is a framework for building, running, and experimenting with complex chains of LLM tasks.",
5
5
  "type": "module",
6
6
  "main": "src/ui/server.js",
package/src/llm/index.js CHANGED
@@ -89,6 +89,18 @@ export function calculateCost(provider, model, usage) {
89
89
  return promptCost + completionCost;
90
90
  }
91
91
 
92
+ // Helper function to detect if messages indicate JSON response is needed
93
+ function shouldInferJsonFormat(messages) {
94
+ // Check first two messages for JSON keyword (case-insensitive)
95
+ const messagesToCheck = messages.slice(0, 2);
96
+ for (const msg of messagesToCheck) {
97
+ if (typeof msg?.content === "string" && /json/i.test(msg.content)) {
98
+ return true;
99
+ }
100
+ }
101
+ return false;
102
+ }
103
+
92
104
  // Core chat function - no metrics handling needed!
93
105
  export async function chat(options) {
94
106
  console.log("[llm] chat() called with options:", {
@@ -199,6 +211,17 @@ export async function chat(options) {
199
211
  };
200
212
  } else if (provider === "openai") {
201
213
  console.log("[llm] Using OpenAI provider");
214
+
215
+ // Infer JSON format if not explicitly provided
216
+ const effectiveResponseFormat =
217
+ responseFormat === undefined ||
218
+ responseFormat === null ||
219
+ responseFormat === ""
220
+ ? shouldInferJsonFormat(messages)
221
+ ? "json_object"
222
+ : undefined
223
+ : responseFormat;
224
+
202
225
  const openaiArgs = {
203
226
  messages,
204
227
  model: model || "gpt-5-chat-latest",
@@ -211,8 +234,8 @@ export async function chat(options) {
211
234
  hasMessages: !!openaiArgs.messages,
212
235
  messageCount: openaiArgs.messages?.length,
213
236
  });
214
- if (responseFormat !== undefined) {
215
- openaiArgs.responseFormat = responseFormat;
237
+ if (effectiveResponseFormat !== undefined) {
238
+ openaiArgs.responseFormat = effectiveResponseFormat;
216
239
  }
217
240
  if (topP !== undefined) openaiArgs.topP = topP;
218
241
  if (frequencyPenalty !== undefined)
@@ -255,6 +278,17 @@ export async function chat(options) {
255
278
  }
256
279
  } else if (provider === "deepseek") {
257
280
  console.log("[llm] Using DeepSeek provider");
281
+
282
+ // Infer JSON format if not explicitly provided
283
+ const effectiveResponseFormat =
284
+ responseFormat === undefined ||
285
+ responseFormat === null ||
286
+ responseFormat === ""
287
+ ? shouldInferJsonFormat(messages)
288
+ ? "json_object"
289
+ : undefined
290
+ : responseFormat;
291
+
258
292
  const deepseekArgs = {
259
293
  messages,
260
294
  model: model || MODEL_CONFIG[DEFAULT_MODEL_BY_PROVIDER.deepseek].model,
@@ -274,8 +308,8 @@ export async function chat(options) {
274
308
  if (presencePenalty !== undefined)
275
309
  deepseekArgs.presencePenalty = presencePenalty;
276
310
  if (stop !== undefined) deepseekArgs.stop = stop;
277
- if (responseFormat !== undefined) {
278
- deepseekArgs.responseFormat = responseFormat;
311
+ if (effectiveResponseFormat !== undefined) {
312
+ deepseekArgs.responseFormat = effectiveResponseFormat;
279
313
  }
280
314
 
281
315
  console.log("[llm] Calling deepseekChat()...");