@fre4x/openai 1.0.57 → 1.0.60

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/README.md +1 -0
  2. package/dist/index.js +92 -13
  3. package/package.json +1 -1
package/README.md CHANGED
@@ -42,6 +42,7 @@ This B1TE bridges your agent directly to OpenAI's advanced models — enabling i
42
42
  ## Features
43
43
 
44
44
  - **Protocol Compliance**: Implements the split `content` (Markdown) and `structuredContent` (Raw Data) model for high-fidelity agent interaction.
45
+ - **Agent-Sufficient Output**: `content.text` carries enough detail for autonomous follow-up, including model IDs, generated image URLs, revised prompts, saved paths, transcript metadata, and speech metadata.
45
46
  - **Strict Validation**: All tools utilize `outputSchema` for reliable data parsing and reduced hallucinations.
46
47
  - **Privacy & Security**: Aggressively masks implementation details (HTTP codes, library names) in error messages.
47
48
  - **Token Efficiency**: Mandatory pagination for model listing.
package/dist/index.js CHANGED
@@ -43514,6 +43514,73 @@ var IMAGE_MIME_TYPES = {
43514
43514
  ".png": "image/png",
43515
43515
  ".webp": "image/webp"
43516
43516
  };
43517
+ function summarizeAnalyzeMediaText(analysis, args, usage) {
43518
+ return truncateToLimit(
43519
+ [
43520
+ "# Media Analysis",
43521
+ "",
43522
+ `- Model: ${args.model}`,
43523
+ `- Prompt: ${args.prompt}`,
43524
+ `- Source: ${args.image_url}`,
43525
+ `- Detail: ${args.detail}`,
43526
+ usage ? `- Usage: ${usage.input_tokens} input / ${usage.output_tokens} output / ${usage.total_tokens} total tokens` : null,
43527
+ "",
43528
+ analysis
43529
+ ].filter(Boolean).join("\n")
43530
+ );
43531
+ }
43532
+ function summarizeModelListText(models, total, offset, hasMore) {
43533
+ const lines = models.map(
43534
+ (model) => `- ${model.id} (owner: ${model.owned_by || "unknown"})`
43535
+ );
43536
+ return [
43537
+ `Found ${total} models. Showing ${models.length} starting from offset ${offset}.${hasMore ? " Use offset for more." : ""}`,
43538
+ lines.length > 0 ? `Model IDs:
43539
+ ${lines.join("\n")}` : "No models returned."
43540
+ ].join("\n");
43541
+ }
43542
+ function summarizeGeneratedImagesText(images) {
43543
+ const details = images.map(
43544
+ (image, index) => [
43545
+ `Image ${index + 1}:`,
43546
+ `- URL: ${image.url || "not returned"}`,
43547
+ image.revised_prompt ? `- Revised Prompt: ${image.revised_prompt}` : null,
43548
+ image.savedPath ? `- Saved Path: ${image.savedPath}` : null
43549
+ ].filter(Boolean).join("\n")
43550
+ );
43551
+ return [
43552
+ "Image generated successfully.",
43553
+ details.length > 0 ? details.join("\n") : "No images returned."
43554
+ ].join("\n");
43555
+ }
43556
+ function summarizeTranscriptionText(output, args) {
43557
+ return truncateToLimit(
43558
+ [
43559
+ "# Audio Transcript",
43560
+ "",
43561
+ `- Model: ${args.model}`,
43562
+ `- File: ${args.file_path}`,
43563
+ output.language ? `- Language: ${output.language}` : null,
43564
+ output.duration !== void 0 ? `- Duration: ${output.duration}s` : null,
43565
+ "",
43566
+ output.text
43567
+ ].filter(Boolean).join("\n")
43568
+ );
43569
+ }
43570
+ function summarizeSpeechText(args, output) {
43571
+ return [
43572
+ "# Speech Generated",
43573
+ "",
43574
+ `- Model: ${output.model}`,
43575
+ `- Voice: ${output.voice}`,
43576
+ `- Format: ${args.response_format}`,
43577
+ `- MIME Type: ${output.mimeType}`,
43578
+ `- Speed: ${args.speed}`,
43579
+ output.savedPath ? `- Saved Path: ${output.savedPath}` : "- Saved Path: not saved",
43580
+ "",
43581
+ "Audio content is attached in the MCP response."
43582
+ ].join("\n");
43583
+ }
43517
43584
  var ToolInputError = class extends Error {
43518
43585
  constructor(field, message) {
43519
43586
  super(message);
@@ -43720,7 +43787,12 @@ async function handleAnalyzeMedia(args) {
43720
43787
  };
43721
43788
  await resolveVisionImageUrl(args.image_url);
43722
43789
  return {
43723
- content: [{ type: "text", text: truncateToLimit(analysis2) }],
43790
+ content: [
43791
+ {
43792
+ type: "text",
43793
+ text: summarizeAnalyzeMediaText(analysis2, args)
43794
+ }
43795
+ ],
43724
43796
  structuredContent: structuredContent2
43725
43797
  };
43726
43798
  }
@@ -43754,7 +43826,12 @@ async function handleAnalyzeMedia(args) {
43754
43826
  usage
43755
43827
  };
43756
43828
  return {
43757
- content: [{ type: "text", text: truncateToLimit(analysis) }],
43829
+ content: [
43830
+ {
43831
+ type: "text",
43832
+ text: summarizeAnalyzeMediaText(analysis, args, usage)
43833
+ }
43834
+ ],
43758
43835
  structuredContent
43759
43836
  };
43760
43837
  } catch (error48) {
@@ -43786,7 +43863,12 @@ async function handleListModels(args) {
43786
43863
  content: [
43787
43864
  {
43788
43865
  type: "text",
43789
- text: `Found ${paginated.total} models. Showing ${paginated.items.length} starting from offset ${paginated.offset}.${paginated.hasMore ? " Use offset for more." : ""}`
43866
+ text: summarizeModelListText(
43867
+ paginated.items,
43868
+ paginated.total,
43869
+ paginated.offset,
43870
+ paginated.hasMore
43871
+ )
43790
43872
  }
43791
43873
  ],
43792
43874
  structuredContent
@@ -43828,9 +43910,6 @@ async function handleGenerateImage(args) {
43828
43910
  savedPath
43829
43911
  });
43830
43912
  }
43831
- const savedPaths = images.flatMap(
43832
- (image) => image.savedPath ? [image.savedPath] : []
43833
- );
43834
43913
  const structuredContent = {
43835
43914
  images,
43836
43915
  prompt: args.prompt
@@ -43839,9 +43918,7 @@ async function handleGenerateImage(args) {
43839
43918
  content: [
43840
43919
  {
43841
43920
  type: "text",
43842
- text: `Image generated successfully.${savedPaths.length > 0 ? `
43843
- Saved to:
43844
- ${savedPaths.join("\n")}` : ""}`
43921
+ text: truncateToLimit(summarizeGeneratedImagesText(images))
43845
43922
  }
43846
43923
  ],
43847
43924
  structuredContent
@@ -43860,7 +43937,10 @@ async function handleTranscribeAudio(args) {
43860
43937
  content: [
43861
43938
  {
43862
43939
  type: "text",
43863
- text: truncateToLimit(structuredContent2.text)
43940
+ text: summarizeTranscriptionText(
43941
+ structuredContent2,
43942
+ args
43943
+ )
43864
43944
  }
43865
43945
  ],
43866
43946
  structuredContent: structuredContent2
@@ -43885,7 +43965,7 @@ async function handleTranscribeAudio(args) {
43885
43965
  content: [
43886
43966
  {
43887
43967
  type: "text",
43888
- text: truncateToLimit(response.text)
43968
+ text: summarizeTranscriptionText(structuredContent, args)
43889
43969
  }
43890
43970
  ],
43891
43971
  structuredContent
@@ -43926,8 +44006,7 @@ async function handleGenerateSpeech(args) {
43926
44006
  content: [
43927
44007
  {
43928
44008
  type: "text",
43929
- text: `Speech generated successfully using voice \`${args.voice}\`.${savedPath ? `
43930
- Saved to: ${savedPath}` : ""}`
44009
+ text: summarizeSpeechText(args, structuredContent)
43931
44010
  },
43932
44011
  audioContent
43933
44012
  ],
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@fre4x/openai",
3
- "version": "1.0.57",
3
+ "version": "1.0.60",
4
4
  "description": "OpenAI MCP server providing multimodal analysis, image generation, and transcription.",
5
5
  "type": "module",
6
6
  "main": "dist/index.js",