@discomedia/utils 1.0.8 → 1.0.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -7376,21 +7376,66 @@ const makeResponsesAPICall = async (input, options = {}) => {
7376
7376
  * Makes a call to the OpenAI Responses API for advanced use cases with built-in tools.
7377
7377
  *
7378
7378
  * @param input The text prompt to send to the model (e.g., "What's in this image?")
7379
- * @param options The options for the Responses API call, including optional image data.
7379
+ * @param options The options for the Responses API call, including optional image data and context.
7380
7380
  * @return A promise that resolves to the response from the Responses API.
7381
+ *
7382
+ * @example
7383
+ * // With conversation context
7384
+ * const response = await makeLLMCall("What did I ask about earlier?", {
7385
+ * context: [
7386
+ * { role: 'user', content: 'What is the capital of France?' },
7387
+ * { role: 'assistant', content: 'The capital of France is Paris.' }
7388
+ * ]
7389
+ * });
7381
7390
  */
7382
7391
  async function makeLLMCall(input, options = {}) {
7383
- const { apiKey, model = DEFAULT_MODEL$1, responseFormat = 'text', tools, useCodeInterpreter = false, useWebSearch = false, imageBase64, imageDetail = 'high' } = options;
7392
+ const { apiKey, model = DEFAULT_MODEL$1, responseFormat = 'text', tools, useCodeInterpreter = false, useWebSearch = false, imageBase64, imageDetail = 'high', context } = options;
7384
7393
  // Validate model
7385
7394
  const normalizedModel = normalizeModelName(model);
7386
7395
  if (!isSupportedModel(normalizedModel)) {
7387
7396
  throw new Error(`Unsupported model: ${normalizedModel}. Please use one of the supported models.`);
7388
7397
  }
7389
- // Process input for image analysis
7398
+ // Process input for conversation context and image analysis
7390
7399
  let processedInput;
7391
- if (imageBase64) {
7392
- // Combine text prompt with image for multi-modal input
7393
- // Based on OpenAI's sample code structure
7400
+ if (context && context.length > 0) {
7401
+ // Build conversation array with context
7402
+ const conversationMessages = [];
7403
+ // Add context messages
7404
+ for (const contextMsg of context) {
7405
+ conversationMessages.push({
7406
+ role: contextMsg.role,
7407
+ content: contextMsg.content,
7408
+ type: 'message'
7409
+ });
7410
+ }
7411
+ // Add current input message
7412
+ if (imageBase64) {
7413
+ // Current message includes both text and image
7414
+ conversationMessages.push({
7415
+ role: 'user',
7416
+ content: [
7417
+ { type: 'input_text', text: input },
7418
+ {
7419
+ type: 'input_image',
7420
+ detail: imageDetail,
7421
+ image_url: imageBase64.startsWith('data:') ? imageBase64 : `data:image/webp;base64,${imageBase64}`,
7422
+ }
7423
+ ],
7424
+ type: 'message'
7425
+ });
7426
+ }
7427
+ else {
7428
+ // Current message is just text
7429
+ conversationMessages.push({
7430
+ role: 'user',
7431
+ content: input,
7432
+ type: 'message'
7433
+ });
7434
+ }
7435
+ processedInput = conversationMessages;
7436
+ }
7437
+ else if (imageBase64) {
7438
+ // No context, but has image - use the original image logic
7394
7439
  processedInput = [
7395
7440
  {
7396
7441
  role: 'user',
@@ -7401,11 +7446,13 @@ async function makeLLMCall(input, options = {}) {
7401
7446
  detail: imageDetail,
7402
7447
  image_url: imageBase64.startsWith('data:') ? imageBase64 : `data:image/webp;base64,${imageBase64}`,
7403
7448
  }
7404
- ]
7449
+ ],
7450
+ type: 'message'
7405
7451
  }
7406
7452
  ];
7407
7453
  }
7408
7454
  else {
7455
+ // No context, no image - simple string input
7409
7456
  processedInput = input;
7410
7457
  }
7411
7458
  // Build the options object for makeResponsesAPICall