@discomedia/utils 1.0.9 → 1.0.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -9385,21 +9385,66 @@ const makeResponsesAPICall = async (input, options = {}) => {
9385
9385
  * Makes a call to the OpenAI Responses API for advanced use cases with built-in tools.
9386
9386
  *
9387
9387
  * @param input The text prompt to send to the model (e.g., "What's in this image?")
9388
- * @param options The options for the Responses API call, including optional image data.
9388
+ * @param options The options for the Responses API call, including optional image data and context.
9389
9389
  * @return A promise that resolves to the response from the Responses API.
9390
+ *
9391
+ * @example
9392
+ * // With conversation context
9393
+ * const response = await makeLLMCall("What did I ask about earlier?", {
9394
+ * context: [
9395
+ * { role: 'user', content: 'What is the capital of France?' },
9396
+ * { role: 'assistant', content: 'The capital of France is Paris.' }
9397
+ * ]
9398
+ * });
9390
9399
  */
9391
9400
  async function makeLLMCall(input, options = {}) {
9392
- const { apiKey, model = DEFAULT_MODEL$1, responseFormat = 'text', tools, useCodeInterpreter = false, useWebSearch = false, imageBase64, imageDetail = 'high' } = options;
9401
+ const { apiKey, model = DEFAULT_MODEL$1, responseFormat = 'text', tools, useCodeInterpreter = false, useWebSearch = false, imageBase64, imageDetail = 'high', context } = options;
9393
9402
  // Validate model
9394
9403
  const normalizedModel = normalizeModelName(model);
9395
9404
  if (!isSupportedModel(normalizedModel)) {
9396
9405
  throw new Error(`Unsupported model: ${normalizedModel}. Please use one of the supported models.`);
9397
9406
  }
9398
- // Process input for image analysis
9407
+ // Process input for conversation context and image analysis
9399
9408
  let processedInput;
9400
- if (imageBase64) {
9401
- // Combine text prompt with image for multi-modal input
9402
- // Based on OpenAI's sample code structure
9409
+ if (context && context.length > 0) {
9410
+ // Build conversation array with context
9411
+ const conversationMessages = [];
9412
+ // Add context messages
9413
+ for (const contextMsg of context) {
9414
+ conversationMessages.push({
9415
+ role: contextMsg.role,
9416
+ content: contextMsg.content,
9417
+ type: 'message'
9418
+ });
9419
+ }
9420
+ // Add current input message
9421
+ if (imageBase64) {
9422
+ // Current message includes both text and image
9423
+ conversationMessages.push({
9424
+ role: 'user',
9425
+ content: [
9426
+ { type: 'input_text', text: input },
9427
+ {
9428
+ type: 'input_image',
9429
+ detail: imageDetail,
9430
+ image_url: imageBase64.startsWith('data:') ? imageBase64 : `data:image/webp;base64,${imageBase64}`,
9431
+ }
9432
+ ],
9433
+ type: 'message'
9434
+ });
9435
+ }
9436
+ else {
9437
+ // Current message is just text
9438
+ conversationMessages.push({
9439
+ role: 'user',
9440
+ content: input,
9441
+ type: 'message'
9442
+ });
9443
+ }
9444
+ processedInput = conversationMessages;
9445
+ }
9446
+ else if (imageBase64) {
9447
+ // No context, but has image - use the original image logic
9403
9448
  processedInput = [
9404
9449
  {
9405
9450
  role: 'user',
@@ -9410,11 +9455,13 @@ async function makeLLMCall(input, options = {}) {
9410
9455
  detail: imageDetail,
9411
9456
  image_url: imageBase64.startsWith('data:') ? imageBase64 : `data:image/webp;base64,${imageBase64}`,
9412
9457
  }
9413
- ]
9458
+ ],
9459
+ type: 'message'
9414
9460
  }
9415
9461
  ];
9416
9462
  }
9417
9463
  else {
9464
+ // No context, no image - simple string input
9418
9465
  processedInput = input;
9419
9466
  }
9420
9467
  // Build the options object for makeResponsesAPICall