@fre4x/gemini 1.0.57 → 1.0.60

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/README.md +5 -3
  2. package/dist/index.js +324 -119
  3. package/package.json +1 -1
package/README.md CHANGED
@@ -11,9 +11,9 @@ Google's Gemini is not a chatbot. It is a multimodal reasoning engine. This B1TE
11
11
  | Tool | Capability |
12
12
  |------|-----------|
13
13
  | `analyze_media` | Analyze an image or audio file via URL or `file://` path. Prompt is optional. |
14
- | `list_models` | List available Imagen and Veo models. Filter by `image`, `video`, or `all`. |
14
+ | `list_models` | List supported models for `analyze_media`, `generate_image`, and `generate_video`. Filter by `analyze`, `image`, `video`, or `all`. |
15
15
  | `generate_image` | Image synthesis via Imagen 4 (`imagen-4.0-generate-001`). Optionally save to `output_dir`. |
16
- | `generate_video` | Async video generation via Veo (`veo-2.0-generate-001`). Returns `operation_name` to poll. |
16
+ | `generate_video` | Async video generation via Veo (`veo-3.1-generate-preview`). Returns `operation_name` to poll. |
17
17
  | `get_video_status` | Poll video generation status. Returns URLs when complete. Optionally saves to `output_dir`. |
18
18
 
19
19
  ## Requirements
@@ -48,8 +48,10 @@ MOCK=true npx @fre4x/gemini
48
48
 
49
49
  - **Image generation** requires Imagen access — not available on all free-tier API keys. Check [AI Studio](https://aistudio.google.com/).
50
50
  - **Video generation** is async. Call `generate_video` → poll `get_video_status` every ~30s until `done: true`.
51
- - `analyze_media` accepts `file://` paths for local files and auto-detects MIME type from URL extension (jpg, png, mp4, mp3, wav, pdf, etc.). If you omit `prompt`, it defaults to "Describe this media.".
51
+ - `analyze_media` accepts `file://` paths for local files and auto-detects MIME type from URL extension (jpg, png, mp4, mp3, wav, pdf, etc.). If you omit `prompt`, it defaults to "Describe this media." and uses `gemini-2.5-flash` by default.
52
+ - `analyze_media` rejects retired text-model aliases such as `gemini-1.5-*` and `gemini-2.0-*`. If you override `model`, use a currently supported `generateContent` model.
52
53
  - `output_dir` on `generate_image` and `get_video_status` saves files to disk. Directory is created if it doesn't exist.
54
+ - Model defaults are intentionally pinned to the latest verified official Gemini docs for the current date. Do not replace them with older names from stale model knowledge.
53
55
 
54
56
  ## Development
55
57
 
package/dist/index.js CHANGED
@@ -67519,9 +67519,24 @@ var zodCompat2 = zod_exports;
67519
67519
  var z3 = zodCompat2.z ?? zodCompat2.default?.z ?? zodCompat2.default ?? zodCompat2;
67520
67520
  var IS_MOCK = process.env.GEMINI_MOCK === "true" || process.env.MOCK === "true";
67521
67521
  var PACKAGE_VERSION = getPackageVersion(import.meta.url);
67522
- var DEFAULT_TEXT_MODEL = "gemini-2.0-flash";
67523
- var DEFAULT_IMAGE_MODEL = "imagen-3.0-generate-001";
67524
- var DEFAULT_VIDEO_MODEL = "veo-2.0-generate-001";
67522
+ var DEFAULT_TEXT_MODEL = "gemini-2.5-flash";
67523
+ var DEFAULT_IMAGE_MODEL = "imagen-4.0-generate-001";
67524
+ var DEFAULT_VIDEO_MODEL = "veo-3.1-generate-preview";
67525
+ var FALLBACK_ANALYZE_MEDIA_MODELS = [
67526
+ DEFAULT_TEXT_MODEL,
67527
+ "gemini-2.5-pro",
67528
+ "gemini-2.5-flash-lite"
67529
+ ];
67530
+ var FALLBACK_IMAGE_MODELS = [
67531
+ DEFAULT_IMAGE_MODEL,
67532
+ "imagen-4.0-ultra-generate-001",
67533
+ "imagen-4.0-fast-generate-001"
67534
+ ];
67535
+ var FALLBACK_VIDEO_MODELS = [
67536
+ DEFAULT_VIDEO_MODEL,
67537
+ "veo-3.1-lite-generate-preview"
67538
+ ];
67539
+ var MODEL_CATALOG_CACHE_TTL_MS = 5 * 60 * 1e3;
67525
67540
  var ALLOWED_MEDIA_MIME_TYPES = /* @__PURE__ */ new Set([
67526
67541
  "image/png",
67527
67542
  "image/jpeg",
@@ -67542,6 +67557,7 @@ var ALLOWED_MEDIA_MIME_TYPES = /* @__PURE__ */ new Set([
67542
67557
  "application/pdf"
67543
67558
  ]);
67544
67559
  var ai = null;
67560
+ var modelCatalogCache = null;
67545
67561
  function getApiKey() {
67546
67562
  const apiKey = process.env.GEMINI_API_KEY;
67547
67563
  if (!apiKey) {
@@ -67616,6 +67632,7 @@ var AnalyzeMediaOutputSchema = z3.object({
67616
67632
  savedPath: z3.string().optional()
67617
67633
  });
67618
67634
  var ListModelsOutputSchema = z3.object({
67635
+ analyzeModels: z3.array(z3.string()),
67619
67636
  imageModels: z3.array(z3.string()),
67620
67637
  videoModels: z3.array(z3.string())
67621
67638
  });
@@ -67644,48 +67661,14 @@ var VideoStatusOutputSchema = z3.object({
67644
67661
  ),
67645
67662
  error: z3.string().optional()
67646
67663
  });
67647
- var ANALYZE_MEDIA_TOOL = {
67648
- name: "analyze_media",
67649
- description: "Analyze an image or audio file from a URL or file path.",
67650
- inputSchema: z3.object({
67651
- prompt: z3.string().min(1).default("Describe this media.").describe("Optional question or instruction for the media."),
67652
- media_source: z3.string().min(1).describe("URL, file:// path, or local file path."),
67653
- mime_type: z3.string().optional().describe("Optional MIME type override."),
67654
- model: z3.string().optional().default(DEFAULT_TEXT_MODEL).describe("Gemini model to use for analysis."),
67655
- output_dir: z3.string().optional().describe(
67656
- "Optional directory to save the analysis result as a .txt file."
67657
- )
67658
- }).strict(),
67659
- outputSchema: AnalyzeMediaOutputSchema
67660
- };
67661
67664
  var LIST_MODELS_TOOL = {
67662
67665
  name: "list_models",
67663
- description: "List available Gemini image and video models.",
67666
+ description: "List available models for Gemini media tools.",
67664
67667
  inputSchema: z3.object({
67665
- capability: z3.enum(["all", "image", "video"]).default("all").describe("Filter by capability: image, video, or all.")
67668
+ capability: z3.enum(["all", "analyze", "image", "video"]).default("all").describe("Filter by analyze, image, video, or all.")
67666
67669
  }).strict(),
67667
67670
  outputSchema: ListModelsOutputSchema
67668
67671
  };
67669
- var GENERATE_IMAGE_TOOL = {
67670
- name: "generate_image",
67671
- description: "Generate an image using Imagen and optionally save it to disk.",
67672
- inputSchema: z3.object({
67673
- prompt: z3.string().min(1).describe("Description of the image to generate."),
67674
- aspect_ratio: z3.enum(["1:1", "16:9", "9:16"]).default("1:1").describe("Aspect ratio for the image."),
67675
- model: z3.string().default(DEFAULT_IMAGE_MODEL).describe("Imagen model to use."),
67676
- output_dir: z3.string().optional().describe("Directory to save the image file.")
67677
- }).strict(),
67678
- outputSchema: GenerateImageOutputSchema
67679
- };
67680
- var GENERATE_VIDEO_TOOL = {
67681
- name: "generate_video",
67682
- description: "Start an async video generation request using Veo.",
67683
- inputSchema: z3.object({
67684
- prompt: z3.string().min(1).describe("Description of the video to generate."),
67685
- model: z3.string().default(DEFAULT_VIDEO_MODEL).describe("Veo model to use.")
67686
- }).strict(),
67687
- outputSchema: GenerateVideoOutputSchema
67688
- };
67689
67672
  var GET_VIDEO_STATUS_TOOL = {
67690
67673
  name: "get_video_status",
67691
67674
  description: "Check the status of a video generation request.",
@@ -67695,16 +67678,9 @@ var GET_VIDEO_STATUS_TOOL = {
67695
67678
  }).strict(),
67696
67679
  outputSchema: VideoStatusOutputSchema
67697
67680
  };
67698
- var TOOL_DEFINITIONS = [
67699
- ANALYZE_MEDIA_TOOL,
67700
- LIST_MODELS_TOOL,
67701
- GENERATE_IMAGE_TOOL,
67702
- GENERATE_VIDEO_TOOL,
67703
- GET_VIDEO_STATUS_TOOL
67704
- ];
67705
67681
  var server = new Server(
67706
67682
  { name: "gemini-mcp", version: PACKAGE_VERSION },
67707
- { capabilities: { tools: {} } }
67683
+ { capabilities: { tools: { listChanged: true } } }
67708
67684
  );
67709
67685
  function inferMimeTypeFromSource(source) {
67710
67686
  const withoutQuery = source.split("?")[0].split("#")[0];
@@ -67769,8 +67745,212 @@ function textResult(text, structuredContent) {
67769
67745
  function formatModelLines(models, title) {
67770
67746
  return [title, ...models.map((model) => `- ${model}`), ""];
67771
67747
  }
67748
+ function hasApiKey() {
67749
+ return typeof process.env.GEMINI_API_KEY === "string" && process.env.GEMINI_API_KEY.length > 0;
67750
+ }
67751
+ function dedupeModels(models) {
67752
+ return [...new Set(models.filter(Boolean))];
67753
+ }
67754
+ function normalizeModelOptions(models, defaultModel) {
67755
+ return dedupeModels([defaultModel, ...models]);
67756
+ }
67757
+ function getFallbackModelCatalog() {
67758
+ return {
67759
+ analyzeModels: [...FALLBACK_ANALYZE_MEDIA_MODELS],
67760
+ imageModels: [...FALLBACK_IMAGE_MODELS],
67761
+ videoModels: [...FALLBACK_VIDEO_MODELS]
67762
+ };
67763
+ }
67764
+ function isAnalyzeModel(model) {
67765
+ const name = (model.name ?? "").replace("models/", "");
67766
+ const methods = model.supportedGenerationMethods ?? [];
67767
+ return name.startsWith("gemini-") && methods.includes("generateContent") && !name.includes("image") && !name.includes("embedding") && !name.includes("tts") && !name.includes("live");
67768
+ }
67769
+ function isImageGenerationModel(model) {
67770
+ const name = (model.name ?? "").replace("models/", "");
67771
+ return name.startsWith("imagen-");
67772
+ }
67773
+ function isVideoGenerationModel(model) {
67774
+ const name = (model.name ?? "").replace("models/", "");
67775
+ return name.startsWith("veo-");
67776
+ }
67777
+ async function loadLiveModelCatalog() {
67778
+ if (modelCatalogCache && modelCatalogCache.expiresAt > Date.now()) {
67779
+ return modelCatalogCache.catalog;
67780
+ }
67781
+ const resp = await fetch(
67782
+ `https://generativelanguage.googleapis.com/v1beta/models?key=${getApiKey()}&pageSize=200`
67783
+ );
67784
+ if (!resp.ok) {
67785
+ throw withStatusCode(
67786
+ new Error("The model listing service is unavailable."),
67787
+ resp.status
67788
+ );
67789
+ }
67790
+ const data = await resp.json();
67791
+ const models = data.models ?? [];
67792
+ const catalog = {
67793
+ analyzeModels: normalizeModelOptions(
67794
+ models.filter(isAnalyzeModel).map((model) => model.name.replace("models/", "")),
67795
+ DEFAULT_TEXT_MODEL
67796
+ ),
67797
+ imageModels: normalizeModelOptions(
67798
+ models.filter(isImageGenerationModel).map((model) => model.name.replace("models/", "")),
67799
+ DEFAULT_IMAGE_MODEL
67800
+ ),
67801
+ videoModels: normalizeModelOptions(
67802
+ models.filter(isVideoGenerationModel).map((model) => model.name.replace("models/", "")),
67803
+ DEFAULT_VIDEO_MODEL
67804
+ )
67805
+ };
67806
+ modelCatalogCache = {
67807
+ expiresAt: Date.now() + MODEL_CATALOG_CACHE_TTL_MS,
67808
+ catalog
67809
+ };
67810
+ return catalog;
67811
+ }
67812
+ async function getToolModelCatalog() {
67813
+ if (IS_MOCK || !hasApiKey()) {
67814
+ return getFallbackModelCatalog();
67815
+ }
67816
+ try {
67817
+ return await loadLiveModelCatalog();
67818
+ } catch {
67819
+ return getFallbackModelCatalog();
67820
+ }
67821
+ }
67822
+ async function getValidationModelCatalog() {
67823
+ if (IS_MOCK) {
67824
+ return getFallbackModelCatalog();
67825
+ }
67826
+ if (!hasApiKey()) {
67827
+ return null;
67828
+ }
67829
+ try {
67830
+ return await loadLiveModelCatalog();
67831
+ } catch {
67832
+ return null;
67833
+ }
67834
+ }
67835
+ function createModelSchema(models, defaultModel, description) {
67836
+ const options = normalizeModelOptions(models, defaultModel);
67837
+ return z3.enum(options).default(defaultModel).describe(description);
67838
+ }
67839
+ function buildAnalyzeMediaTool(catalog) {
67840
+ return {
67841
+ name: "analyze_media",
67842
+ description: "Analyze an image or audio file from a URL or file path.",
67843
+ inputSchema: z3.object({
67844
+ prompt: z3.string().min(1).default("Describe this media.").describe(
67845
+ "Optional question or instruction for the media."
67846
+ ),
67847
+ media_source: z3.string().min(1).describe("URL, file:// path, or local file path."),
67848
+ mime_type: z3.string().optional().describe("Optional MIME type override."),
67849
+ model: createModelSchema(
67850
+ catalog.analyzeModels,
67851
+ DEFAULT_TEXT_MODEL,
67852
+ "Supported analyze_media model."
67853
+ ),
67854
+ output_dir: z3.string().optional().describe(
67855
+ "Optional directory to save the analysis result as a .txt file."
67856
+ )
67857
+ }).strict(),
67858
+ outputSchema: AnalyzeMediaOutputSchema
67859
+ };
67860
+ }
67861
+ function buildGenerateImageTool(catalog) {
67862
+ return {
67863
+ name: "generate_image",
67864
+ description: "Generate an image using Imagen and optionally save it to disk.",
67865
+ inputSchema: z3.object({
67866
+ prompt: z3.string().min(1).describe("Description of the image to generate."),
67867
+ aspect_ratio: z3.enum(["1:1", "16:9", "9:16"]).default("1:1").describe("Aspect ratio for the image."),
67868
+ model: createModelSchema(
67869
+ catalog.imageModels,
67870
+ DEFAULT_IMAGE_MODEL,
67871
+ "Supported generate_image model."
67872
+ ),
67873
+ output_dir: z3.string().optional().describe("Directory to save the image file.")
67874
+ }).strict(),
67875
+ outputSchema: GenerateImageOutputSchema
67876
+ };
67877
+ }
67878
+ function buildGenerateVideoTool(catalog) {
67879
+ return {
67880
+ name: "generate_video",
67881
+ description: "Start an async video generation request using Veo.",
67882
+ inputSchema: z3.object({
67883
+ prompt: z3.string().min(1).describe("Description of the video to generate."),
67884
+ model: createModelSchema(
67885
+ catalog.videoModels,
67886
+ DEFAULT_VIDEO_MODEL,
67887
+ "Supported generate_video model."
67888
+ )
67889
+ }).strict(),
67890
+ outputSchema: GenerateVideoOutputSchema
67891
+ };
67892
+ }
67893
+ var TOOL_DEFINITIONS = [
67894
+ buildAnalyzeMediaTool(getFallbackModelCatalog()),
67895
+ LIST_MODELS_TOOL,
67896
+ buildGenerateImageTool(getFallbackModelCatalog()),
67897
+ buildGenerateVideoTool(getFallbackModelCatalog()),
67898
+ GET_VIDEO_STATUS_TOOL
67899
+ ];
67900
+ async function getToolDefinitions() {
67901
+ const catalog = await getToolModelCatalog();
67902
+ return [
67903
+ buildAnalyzeMediaTool(catalog),
67904
+ LIST_MODELS_TOOL,
67905
+ buildGenerateImageTool(catalog),
67906
+ buildGenerateVideoTool(catalog),
67907
+ GET_VIDEO_STATUS_TOOL
67908
+ ];
67909
+ }
67910
+ function createModelValidationError(toolName, model, supportedModels) {
67911
+ return createValidationError(
67912
+ "model",
67913
+ `"${model}" is not supported by ${toolName}. Verify today's date first (for example: Get-Date -Format o), then use list_models to inspect current models for this tool. Supported values: ${supportedModels.join(", ")}`
67914
+ );
67915
+ }
67916
+ async function validateRequestedModel(toolName, requestedModel, supportedModels) {
67917
+ if (toolName === "analyze_media") {
67918
+ if (requestedModel.startsWith("gemini-1.5-") || requestedModel.startsWith("gemini-2.0-")) {
67919
+ return createModelValidationError(
67920
+ toolName,
67921
+ requestedModel,
67922
+ supportedModels
67923
+ );
67924
+ }
67925
+ }
67926
+ const catalog = await getValidationModelCatalog();
67927
+ if (!catalog) {
67928
+ return null;
67929
+ }
67930
+ if (!supportedModels.includes(requestedModel)) {
67931
+ return createModelValidationError(
67932
+ toolName,
67933
+ requestedModel,
67934
+ supportedModels
67935
+ );
67936
+ }
67937
+ return null;
67938
+ }
67939
+ function isMissingGenerateContentModelError(message, model) {
67940
+ return message.includes(`models/${model} is not found`) || message.includes("not supported for generateContent");
67941
+ }
67772
67942
  async function analyzeMedia(args) {
67773
67943
  const { prompt, media_source, mime_type, model, output_dir } = args;
67944
+ const catalog = await getToolModelCatalog();
67945
+ const requestedModel = model || DEFAULT_TEXT_MODEL;
67946
+ const validationError = await validateRequestedModel(
67947
+ "analyze_media",
67948
+ requestedModel,
67949
+ catalog.analyzeModels
67950
+ );
67951
+ if (validationError) {
67952
+ return validationError;
67953
+ }
67774
67954
  let media;
67775
67955
  try {
67776
67956
  media = await resolveMediaSource(media_source, mime_type);
@@ -67785,28 +67965,43 @@ async function analyzeMedia(args) {
67785
67965
  responseText = `[Mock] Analyzed media (${media.mimeType})
67786
67966
  Source: ${media.label}
67787
67967
  Prompt: "${prompt}"
67788
- Model: ${model}
67968
+ Model: ${requestedModel}
67789
67969
 
67790
67970
  Mock response \u2014 no API call made.`;
67791
67971
  } else {
67792
- const response = await getAi().models.generateContent({
67793
- model: model || DEFAULT_TEXT_MODEL,
67794
- contents: [
67795
- {
67796
- role: "user",
67797
- parts: [
67798
- { text: prompt },
67799
- {
67800
- inlineData: {
67801
- data: media.data,
67802
- mimeType: media.mimeType
67972
+ try {
67973
+ const response = await getAi().models.generateContent({
67974
+ model: requestedModel,
67975
+ contents: [
67976
+ {
67977
+ role: "user",
67978
+ parts: [
67979
+ { text: prompt },
67980
+ {
67981
+ inlineData: {
67982
+ data: media.data,
67983
+ mimeType: media.mimeType
67984
+ }
67803
67985
  }
67804
- }
67805
- ]
67806
- }
67807
- ]
67808
- });
67809
- responseText = response.text || "No analysis generated.";
67986
+ ]
67987
+ }
67988
+ ]
67989
+ });
67990
+ responseText = response.text || "No analysis generated.";
67991
+ } catch (error48) {
67992
+ const errorMessage = getErrorMessage(error48);
67993
+ if (isMissingGenerateContentModelError(errorMessage, requestedModel)) {
67994
+ return createModelValidationError(
67995
+ "analyze_media",
67996
+ requestedModel,
67997
+ catalog.analyzeModels
67998
+ );
67999
+ }
68000
+ if (isErrorWithStatusCode(error48)) {
68001
+ return createApiError(error48.message, error48.statusCode);
68002
+ }
68003
+ return createInternalError(error48);
68004
+ }
67810
68005
  }
67811
68006
  let savedPath;
67812
68007
  if (output_dir) {
@@ -67842,59 +68037,53 @@ Mock response \u2014 no API call made.`;
67842
68037
  }
67843
68038
  async function listModels(args) {
67844
68039
  const { capability } = args;
67845
- if (IS_MOCK) {
67846
- const imageModels2 = [
67847
- DEFAULT_IMAGE_MODEL,
67848
- "imagen-3.0-ultra-generate-001",
67849
- "imagen-3.0-fast-generate-001"
67850
- ];
67851
- const videoModels2 = [DEFAULT_VIDEO_MODEL];
67852
- const lines2 = ["## Available Gemini Models", ""];
67853
- if (capability === "all" || capability === "image") {
67854
- lines2.push(
67855
- ...formatModelLines(imageModels2, "### \u{1F5BC}\uFE0F Image Generation")
67856
- );
67857
- }
67858
- if (capability === "all" || capability === "video") {
67859
- lines2.push(
67860
- ...formatModelLines(videoModels2, "### \u{1F3AC} Video Generation")
67861
- );
67862
- }
67863
- return textResult(lines2.join("\n").trim(), {
67864
- imageModels: capability === "video" ? [] : imageModels2,
67865
- videoModels: capability === "image" ? [] : videoModels2
67866
- });
67867
- }
67868
- const resp = await fetch(
67869
- `https://generativelanguage.googleapis.com/v1beta/models?key=${getApiKey()}&pageSize=200`
67870
- );
67871
- if (!resp.ok) {
67872
- return createApiError(
67873
- "The model listing service is unavailable.",
67874
- resp.status
68040
+ const catalog = await getToolModelCatalog();
68041
+ const analyzeModels = capability === "image" || capability === "video" ? [] : catalog.analyzeModels;
68042
+ const imageModels = capability === "analyze" || capability === "video" ? [] : catalog.imageModels;
68043
+ const videoModels = capability === "analyze" || capability === "image" ? [] : catalog.videoModels;
68044
+ const lines = ["## Available Gemini Models", ""];
68045
+ if (capability === "all" || capability === "analyze") {
68046
+ lines.push(
68047
+ ...formatModelLines(
68048
+ analyzeModels,
68049
+ "### \u{1F3A7} analyze_media (`generateContent`)"
68050
+ )
67875
68051
  );
67876
68052
  }
67877
- const data = await resp.json();
67878
- const models = (data.models ?? []).map((model) => ({
67879
- name: (model.name ?? "").replace("models/", ""),
67880
- methods: model.supportedGenerationMethods ?? []
67881
- }));
67882
- const imageModels = models.filter((model) => model.methods.includes("predict")).map((model) => model.name).filter(Boolean);
67883
- const videoModels = models.filter((model) => model.name.startsWith("veo")).map((model) => model.name).filter(Boolean);
67884
- const lines = ["## Available Gemini Models", ""];
67885
68053
  if (capability === "all" || capability === "image") {
67886
- lines.push(...formatModelLines(imageModels, "### \u{1F5BC}\uFE0F Image Generation"));
68054
+ lines.push(
68055
+ ...formatModelLines(
68056
+ imageModels,
68057
+ "### \u{1F5BC}\uFE0F generate_image (`generateImages`)"
68058
+ )
68059
+ );
67887
68060
  }
67888
68061
  if (capability === "all" || capability === "video") {
67889
- lines.push(...formatModelLines(videoModels, "### \u{1F3AC} Video Generation"));
68062
+ lines.push(
68063
+ ...formatModelLines(
68064
+ videoModels,
68065
+ "### \u{1F3AC} generate_video (`generateVideos`)"
68066
+ )
68067
+ );
67890
68068
  }
67891
68069
  return textResult(lines.join("\n").trim() || "No models found.", {
67892
- imageModels: capability === "video" ? [] : imageModels,
67893
- videoModels: capability === "image" ? [] : videoModels
68070
+ analyzeModels,
68071
+ imageModels,
68072
+ videoModels
67894
68073
  });
67895
68074
  }
67896
68075
  async function generateImage(args) {
67897
68076
  const { prompt, aspect_ratio, model, output_dir } = args;
68077
+ const catalog = await getToolModelCatalog();
68078
+ const requestedModel = model || DEFAULT_IMAGE_MODEL;
68079
+ const validationError = await validateRequestedModel(
68080
+ "generate_image",
68081
+ requestedModel,
68082
+ catalog.imageModels
68083
+ );
68084
+ if (validationError) {
68085
+ return validationError;
68086
+ }
67898
68087
  if (IS_MOCK) {
67899
68088
  const mockBase64 = "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNk+M9QDwADhgGAWjR9awAAAABJRU5ErkJggg==";
67900
68089
  let savedPath;
@@ -67947,7 +68136,7 @@ Saved to: ${savedPath}` : ""}`
67947
68136
  };
67948
68137
  }
67949
68138
  const response = await getAi().models.generateImages({
67950
- model: model || DEFAULT_IMAGE_MODEL,
68139
+ model: requestedModel,
67951
68140
  prompt,
67952
68141
  config: { numberOfImages: 1, aspectRatio: aspect_ratio }
67953
68142
  });
@@ -68001,6 +68190,16 @@ ${savedPaths.join("\n")}` : ""}`
68001
68190
  }
68002
68191
  async function generateVideo(args) {
68003
68192
  const { prompt, model } = args;
68193
+ const catalog = await getToolModelCatalog();
68194
+ const requestedModel = model || DEFAULT_VIDEO_MODEL;
68195
+ const validationError = await validateRequestedModel(
68196
+ "generate_video",
68197
+ requestedModel,
68198
+ catalog.videoModels
68199
+ );
68200
+ if (validationError) {
68201
+ return validationError;
68202
+ }
68004
68203
  if (IS_MOCK) {
68005
68204
  const opName2 = "mock-operations/123456";
68006
68205
  return textResult(
@@ -68013,7 +68212,7 @@ Call get_video_status to check progress.`,
68013
68212
  );
68014
68213
  }
68015
68214
  const operation = await getAi().models.generateVideos({
68016
- model: model || DEFAULT_VIDEO_MODEL,
68215
+ model: requestedModel,
68017
68216
  prompt,
68018
68217
  config: { numberOfVideos: 1 }
68019
68218
  });
@@ -68140,26 +68339,30 @@ ${savedPaths.join("\n")}`);
68140
68339
  }
68141
68340
  async function handleToolCall(name, args) {
68142
68341
  try {
68342
+ const toolDefinitions = await getToolDefinitions();
68343
+ const analyzeMediaTool = toolDefinitions[0];
68344
+ const listModelsTool = toolDefinitions[1];
68345
+ const generateImageTool = toolDefinitions[2];
68346
+ const generateVideoTool = toolDefinitions[3];
68347
+ const getVideoStatusTool = toolDefinitions[4];
68143
68348
  switch (name) {
68144
68349
  case "analyze_media":
68145
68350
  return await analyzeMedia(
68146
- ANALYZE_MEDIA_TOOL.inputSchema.parse(args)
68351
+ analyzeMediaTool.inputSchema.parse(args)
68147
68352
  );
68148
68353
  case "list_models":
68149
- return await listModels(
68150
- LIST_MODELS_TOOL.inputSchema.parse(args)
68151
- );
68354
+ return await listModels(listModelsTool.inputSchema.parse(args));
68152
68355
  case "generate_image":
68153
68356
  return await generateImage(
68154
- GENERATE_IMAGE_TOOL.inputSchema.parse(args)
68357
+ generateImageTool.inputSchema.parse(args)
68155
68358
  );
68156
68359
  case "generate_video":
68157
68360
  return await generateVideo(
68158
- GENERATE_VIDEO_TOOL.inputSchema.parse(args)
68361
+ generateVideoTool.inputSchema.parse(args)
68159
68362
  );
68160
68363
  case "get_video_status":
68161
68364
  return await getVideoStatus(
68162
- GET_VIDEO_STATUS_TOOL.inputSchema.parse(args)
68365
+ getVideoStatusTool.inputSchema.parse(args)
68163
68366
  );
68164
68367
  default:
68165
68368
  throw new Error(`Tool not found: ${name}`);
@@ -68177,9 +68380,10 @@ async function handleToolCall(name, args) {
68177
68380
  return createInternalError(error48);
68178
68381
  }
68179
68382
  }
68180
- server.setRequestHandler(ListToolsRequestSchema, async () => ({
68181
- tools: [...TOOL_DEFINITIONS]
68182
- }));
68383
+ server.setRequestHandler(ListToolsRequestSchema, async () => {
68384
+ const tools = await getToolDefinitions();
68385
+ return { tools: [...tools] };
68386
+ });
68183
68387
  server.setRequestHandler(CallToolRequestSchema, async (request) => {
68184
68388
  const { name, arguments: args } = request.params;
68185
68389
  return handleToolCall(name, args);
@@ -68208,6 +68412,7 @@ if (isMainModule(import.meta.url)) {
68208
68412
  }
68209
68413
  export {
68210
68414
  TOOL_DEFINITIONS,
68415
+ getToolDefinitions,
68211
68416
  handleToolCall,
68212
68417
  runServer
68213
68418
  };
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@fre4x/gemini",
3
- "version": "1.0.57",
3
+ "version": "1.0.60",
4
4
  "description": "A Gemini MCP server providing multimodal analysis and image/video generation.",
5
5
  "type": "module",
6
6
  "main": "dist/index.js",