@discomedia/utils 1.0.9 → 1.0.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.cjs CHANGED
@@ -9387,21 +9387,66 @@ const makeResponsesAPICall = async (input, options = {}) => {
9387
9387
  * Makes a call to the OpenAI Responses API for advanced use cases with built-in tools.
9388
9388
  *
9389
9389
  * @param input The text prompt to send to the model (e.g., "What's in this image?")
9390
- * @param options The options for the Responses API call, including optional image data.
9390
+ * @param options The options for the Responses API call, including optional image data and context.
9391
9391
  * @return A promise that resolves to the response from the Responses API.
9392
+ *
9393
+ * @example
9394
+ * // With conversation context
9395
+ * const response = await makeLLMCall("What did I ask about earlier?", {
9396
+ * context: [
9397
+ * { role: 'user', content: 'What is the capital of France?' },
9398
+ * { role: 'assistant', content: 'The capital of France is Paris.' }
9399
+ * ]
9400
+ * });
9392
9401
  */
9393
9402
  async function makeLLMCall(input, options = {}) {
9394
- const { apiKey, model = DEFAULT_MODEL$1, responseFormat = 'text', tools, useCodeInterpreter = false, useWebSearch = false, imageBase64, imageDetail = 'high' } = options;
9403
+ const { apiKey, model = DEFAULT_MODEL$1, responseFormat = 'text', tools, useCodeInterpreter = false, useWebSearch = false, imageBase64, imageDetail = 'high', context } = options;
9395
9404
  // Validate model
9396
9405
  const normalizedModel = normalizeModelName(model);
9397
9406
  if (!isSupportedModel(normalizedModel)) {
9398
9407
  throw new Error(`Unsupported model: ${normalizedModel}. Please use one of the supported models.`);
9399
9408
  }
9400
- // Process input for image analysis
9409
+ // Process input for conversation context and image analysis
9401
9410
  let processedInput;
9402
- if (imageBase64) {
9403
- // Combine text prompt with image for multi-modal input
9404
- // Based on OpenAI's sample code structure
9411
+ if (context && context.length > 0) {
9412
+ // Build conversation array with context
9413
+ const conversationMessages = [];
9414
+ // Add context messages
9415
+ for (const contextMsg of context) {
9416
+ conversationMessages.push({
9417
+ role: contextMsg.role,
9418
+ content: contextMsg.content,
9419
+ type: 'message'
9420
+ });
9421
+ }
9422
+ // Add current input message
9423
+ if (imageBase64) {
9424
+ // Current message includes both text and image
9425
+ conversationMessages.push({
9426
+ role: 'user',
9427
+ content: [
9428
+ { type: 'input_text', text: input },
9429
+ {
9430
+ type: 'input_image',
9431
+ detail: imageDetail,
9432
+ image_url: imageBase64.startsWith('data:') ? imageBase64 : `data:image/webp;base64,${imageBase64}`,
9433
+ }
9434
+ ],
9435
+ type: 'message'
9436
+ });
9437
+ }
9438
+ else {
9439
+ // Current message is just text
9440
+ conversationMessages.push({
9441
+ role: 'user',
9442
+ content: input,
9443
+ type: 'message'
9444
+ });
9445
+ }
9446
+ processedInput = conversationMessages;
9447
+ }
9448
+ else if (imageBase64) {
9449
+ // No context, but has image - use the original image logic
9405
9450
  processedInput = [
9406
9451
  {
9407
9452
  role: 'user',
@@ -9412,11 +9457,13 @@ async function makeLLMCall(input, options = {}) {
9412
9457
  detail: imageDetail,
9413
9458
  image_url: imageBase64.startsWith('data:') ? imageBase64 : `data:image/webp;base64,${imageBase64}`,
9414
9459
  }
9415
- ]
9460
+ ],
9461
+ type: 'message'
9416
9462
  }
9417
9463
  ];
9418
9464
  }
9419
9465
  else {
9466
+ // No context, no image - simple string input
9420
9467
  processedInput = input;
9421
9468
  }
9422
9469
  // Build the options object for makeResponsesAPICall