gemini-deep-research-mcp 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- gemini_deep_research_mcp/__init__.py +4 -0
- gemini_deep_research_mcp/__main__.py +5 -0
- gemini_deep_research_mcp/config.py +34 -0
- gemini_deep_research_mcp/extract.py +69 -0
- gemini_deep_research_mcp/gemini.py +57 -0
- gemini_deep_research_mcp/resolve.py +90 -0
- gemini_deep_research_mcp/server.py +136 -0
- gemini_deep_research_mcp-0.1.0.dist-info/METADATA +127 -0
- gemini_deep_research_mcp-0.1.0.dist-info/RECORD +12 -0
- gemini_deep_research_mcp-0.1.0.dist-info/WHEEL +5 -0
- gemini_deep_research_mcp-0.1.0.dist-info/entry_points.txt +2 -0
- gemini_deep_research_mcp-0.1.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
from dataclasses import dataclass
|
|
5
|
+
|
|
6
|
+
from dotenv import load_dotenv
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
@dataclass(frozen=True)
|
|
10
|
+
class Settings:
|
|
11
|
+
api_key: str
|
|
12
|
+
model: str
|
|
13
|
+
deep_research_agent: str
|
|
14
|
+
poll_interval_seconds: float = 10.0
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def load_settings() -> Settings:
|
|
18
|
+
"""Load configuration from environment (and .env when present)."""
|
|
19
|
+
|
|
20
|
+
# Load .env if present; safe no-op otherwise.
|
|
21
|
+
load_dotenv(override=False)
|
|
22
|
+
|
|
23
|
+
api_key = os.getenv("GEMINI_API_KEY") or os.getenv("GOOGLE_API_KEY") or ""
|
|
24
|
+
|
|
25
|
+
model = os.getenv("GEMINI_MODEL", "gemini-3-pro-preview")
|
|
26
|
+
deep_research_agent = os.getenv(
|
|
27
|
+
"GEMINI_DEEP_RESEARCH_AGENT", "deep-research-pro-preview-12-2025"
|
|
28
|
+
)
|
|
29
|
+
|
|
30
|
+
return Settings(
|
|
31
|
+
api_key=api_key,
|
|
32
|
+
model=model,
|
|
33
|
+
deep_research_agent=deep_research_agent,
|
|
34
|
+
)
|
|
@@ -0,0 +1,69 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import re
|
|
4
|
+
from typing import Any, Iterable, List, Optional
|
|
5
|
+
|
|
6
|
+
from .resolve import resolve_sources_in_text
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def _strip_duplicate_references(text: str) -> str:
|
|
10
|
+
"""Remove the redundant 'References' section while keeping 'Sources'.
|
|
11
|
+
|
|
12
|
+
Gemini Deep Research reports contain:
|
|
13
|
+
1. Inline [cite: X] markers throughout the text
|
|
14
|
+
2. A 'References' section with brief citation titles (REDUNDANT)
|
|
15
|
+
3. A 'Sources:' section at the end with full URLs (KEEP THIS)
|
|
16
|
+
|
|
17
|
+
We remove the References section since:
|
|
18
|
+
- The inline [cite: X] markers already show where info comes from
|
|
19
|
+
- The Sources section has the actual clickable URLs
|
|
20
|
+
- The References section just has brief titles without URLs
|
|
21
|
+
|
|
22
|
+
This typically saves ~1-2KB per report.
|
|
23
|
+
"""
|
|
24
|
+
# Match "### References" or "References" section with cite entries
|
|
25
|
+
# Format: [cite: X] Title. Description.
|
|
26
|
+
pattern = r'\n+(?:#{1,3}\s*)?References\s*\n(?:\[cite:\s*\d+\][^\n]*\n?)+'
|
|
27
|
+
cleaned = re.sub(pattern, '\n', text, flags=re.IGNORECASE)
|
|
28
|
+
return cleaned.strip()
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def _get(obj: Any, key: str, default: Any = None) -> Any:
|
|
32
|
+
if obj is None:
|
|
33
|
+
return default
|
|
34
|
+
if isinstance(obj, dict):
|
|
35
|
+
return obj.get(key, default)
|
|
36
|
+
return getattr(obj, key, default)
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def outputs_to_text(outputs: Optional[Iterable[Any]], *, include_citations: bool = True) -> str:
|
|
40
|
+
"""Best-effort conversion of Interaction.outputs to a readable string."""
|
|
41
|
+
|
|
42
|
+
if not outputs:
|
|
43
|
+
return ""
|
|
44
|
+
|
|
45
|
+
parts: List[str] = []
|
|
46
|
+
for out in outputs:
|
|
47
|
+
text = _get(out, "text")
|
|
48
|
+
if isinstance(text, str) and text.strip():
|
|
49
|
+
parts.append(text)
|
|
50
|
+
|
|
51
|
+
result = _strip_duplicate_references("\n\n".join(parts).strip())
|
|
52
|
+
|
|
53
|
+
# Resolve redirect URLs to actual source URLs (if citations enabled)
|
|
54
|
+
if include_citations:
|
|
55
|
+
result = resolve_sources_in_text(result)
|
|
56
|
+
|
|
57
|
+
return result
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
def interaction_to_result(interaction: Any, *, include_citations: bool = True) -> dict[str, Any]:
|
|
61
|
+
"""Convert an Interaction object to a JSON-serializable summary."""
|
|
62
|
+
|
|
63
|
+
outputs = _get(interaction, "outputs")
|
|
64
|
+
text = outputs_to_text(outputs, include_citations=include_citations)
|
|
65
|
+
|
|
66
|
+
return {
|
|
67
|
+
"status": _get(interaction, "status"),
|
|
68
|
+
"text": text,
|
|
69
|
+
}
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import time
|
|
4
|
+
from typing import Any, Optional
|
|
5
|
+
|
|
6
|
+
from google import genai
|
|
7
|
+
|
|
8
|
+
from .config import Settings
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def create_client(settings: Settings) -> genai.Client:
|
|
12
|
+
# The SDK supports GOOGLE_API_KEY env var, but we pass explicitly for clarity.
|
|
13
|
+
if not settings.api_key:
|
|
14
|
+
raise ValueError(
|
|
15
|
+
"Missing GEMINI_API_KEY (or GOOGLE_API_KEY fallback). "
|
|
16
|
+
"Set it in your environment or .env."
|
|
17
|
+
)
|
|
18
|
+
return genai.Client(api_key=settings.api_key)
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def start_deep_research(client: genai.Client, *, prompt: str, agent: str) -> Any:
|
|
22
|
+
# For Deep Research: background=True requires store=True.
|
|
23
|
+
return client.interactions.create(
|
|
24
|
+
input=prompt,
|
|
25
|
+
agent=agent,
|
|
26
|
+
background=True,
|
|
27
|
+
store=True,
|
|
28
|
+
)
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def get_interaction(client: genai.Client, job_id: str) -> Any:
|
|
32
|
+
return client.interactions.get(job_id)
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def poll_until_terminal(
|
|
36
|
+
client: genai.Client,
|
|
37
|
+
*,
|
|
38
|
+
job_id: str,
|
|
39
|
+
timeout_seconds: float,
|
|
40
|
+
poll_interval_seconds: float,
|
|
41
|
+
) -> Any:
|
|
42
|
+
deadline = time.monotonic() + max(0.0, timeout_seconds)
|
|
43
|
+
|
|
44
|
+
interaction: Optional[Any] = None
|
|
45
|
+
while True:
|
|
46
|
+
interaction = get_interaction(client, job_id)
|
|
47
|
+
status = getattr(interaction, "status", None) or (
|
|
48
|
+
interaction.get("status") if isinstance(interaction, dict) else None
|
|
49
|
+
)
|
|
50
|
+
|
|
51
|
+
if status in {"completed", "failed", "cancelled"}:
|
|
52
|
+
return interaction
|
|
53
|
+
|
|
54
|
+
if time.monotonic() >= deadline:
|
|
55
|
+
return interaction
|
|
56
|
+
|
|
57
|
+
time.sleep(poll_interval_seconds)
|
|
@@ -0,0 +1,90 @@
|
|
|
1
|
+
"""Resolve Gemini grounding redirect URLs to actual source URLs."""
|
|
2
|
+
from __future__ import annotations
|
|
3
|
+
|
|
4
|
+
import logging
|
|
5
|
+
import re
|
|
6
|
+
from functools import lru_cache
|
|
7
|
+
from typing import Optional
|
|
8
|
+
|
|
9
|
+
import httpx
|
|
10
|
+
|
|
11
|
+
logger = logging.getLogger(__name__)
|
|
12
|
+
|
|
13
|
+
# Pattern to match Gemini grounding redirect URLs
|
|
14
|
+
REDIRECT_URL_PATTERN = re.compile(
|
|
15
|
+
r'https://vertexaisearch\.cloud\.google\.com/grounding-api-redirect/[A-Za-z0-9_-]+'
|
|
16
|
+
)
|
|
17
|
+
|
|
18
|
+
# HTTP timeout for resolving redirects (seconds)
|
|
19
|
+
RESOLVE_TIMEOUT = 10.0
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
@lru_cache(maxsize=256)
|
|
23
|
+
def resolve_redirect_url(url: str) -> Optional[str]:
|
|
24
|
+
"""Follow a redirect URL and return the final destination.
|
|
25
|
+
|
|
26
|
+
Args:
|
|
27
|
+
url: A Gemini grounding redirect URL
|
|
28
|
+
|
|
29
|
+
Returns:
|
|
30
|
+
The resolved destination URL, or None if resolution fails
|
|
31
|
+
"""
|
|
32
|
+
if not url or 'grounding-api-redirect' not in url:
|
|
33
|
+
return None
|
|
34
|
+
|
|
35
|
+
try:
|
|
36
|
+
# Use HEAD request to follow redirects without downloading content
|
|
37
|
+
# follow_redirects=False so we can capture the Location header
|
|
38
|
+
with httpx.Client(timeout=RESOLVE_TIMEOUT, follow_redirects=False) as client:
|
|
39
|
+
response = client.head(url)
|
|
40
|
+
|
|
41
|
+
# The redirect URL returns a 302/301 with Location header
|
|
42
|
+
if response.status_code in (301, 302, 303, 307, 308):
|
|
43
|
+
location = response.headers.get('location')
|
|
44
|
+
if location:
|
|
45
|
+
logger.debug(f"Resolved {url[:60]}... -> {location}")
|
|
46
|
+
return location
|
|
47
|
+
|
|
48
|
+
# If no redirect, try GET as fallback (some servers don't respond to HEAD)
|
|
49
|
+
response = client.get(url, follow_redirects=True)
|
|
50
|
+
final_url = str(response.url)
|
|
51
|
+
|
|
52
|
+
# Only return if we actually got redirected somewhere different
|
|
53
|
+
if final_url != url and 'grounding-api-redirect' not in final_url:
|
|
54
|
+
logger.debug(f"Resolved {url[:60]}... -> {final_url}")
|
|
55
|
+
return final_url
|
|
56
|
+
|
|
57
|
+
except httpx.TimeoutException:
|
|
58
|
+
logger.warning(f"Timeout resolving URL: {url[:80]}...")
|
|
59
|
+
except httpx.HTTPError as e:
|
|
60
|
+
logger.warning(f"HTTP error resolving URL: {e}")
|
|
61
|
+
except Exception as e:
|
|
62
|
+
logger.warning(f"Unexpected error resolving URL: {e}")
|
|
63
|
+
|
|
64
|
+
return None
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
def resolve_sources_in_text(text: str) -> str:
|
|
68
|
+
"""Find and resolve all grounding redirect URLs in text.
|
|
69
|
+
|
|
70
|
+
Scans the text for Gemini grounding redirect URLs and replaces them
|
|
71
|
+
with resolved destination URLs where possible.
|
|
72
|
+
|
|
73
|
+
Args:
|
|
74
|
+
text: The text containing potential redirect URLs
|
|
75
|
+
|
|
76
|
+
Returns:
|
|
77
|
+
Text with redirect URLs replaced by resolved URLs where possible
|
|
78
|
+
"""
|
|
79
|
+
if not text or 'grounding-api-redirect' not in text:
|
|
80
|
+
return text
|
|
81
|
+
|
|
82
|
+
def replace_url(match: re.Match) -> str:
|
|
83
|
+
original_url = match.group(0)
|
|
84
|
+
resolved = resolve_redirect_url(original_url)
|
|
85
|
+
if resolved:
|
|
86
|
+
return resolved
|
|
87
|
+
# Keep original if resolution failed
|
|
88
|
+
return original_url
|
|
89
|
+
|
|
90
|
+
return REDIRECT_URL_PATTERN.sub(replace_url, text)
|
|
@@ -0,0 +1,136 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
import sys
|
|
5
|
+
from typing import Annotated, Any, TypedDict
|
|
6
|
+
|
|
7
|
+
from mcp.server.fastmcp import FastMCP
|
|
8
|
+
from mcp.types import CallToolResult, ToolAnnotations
|
|
9
|
+
|
|
10
|
+
from .config import Settings, load_settings
|
|
11
|
+
from .extract import interaction_to_result
|
|
12
|
+
from .gemini import create_client, poll_until_terminal, start_deep_research
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
logger = logging.getLogger(__name__)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class DeepResearchOutput(TypedDict):
|
|
19
|
+
status: str
|
|
20
|
+
report_text: str
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def _configure_logging() -> None:
|
|
24
|
+
# IMPORTANT: stdout is reserved for MCP protocol.
|
|
25
|
+
logging.basicConfig(
|
|
26
|
+
level=logging.INFO,
|
|
27
|
+
format="%(asctime)s %(levelname)s %(name)s - %(message)s",
|
|
28
|
+
stream=sys.stderr,
|
|
29
|
+
)
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
_configure_logging()
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
mcp = FastMCP("Gemini Deep Research MCP")
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def _get_client_and_settings() -> tuple[Any, Settings]:
|
|
39
|
+
settings = load_settings()
|
|
40
|
+
client = create_client(settings)
|
|
41
|
+
return client, settings
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def _require_nonempty(value: Optional[str], *, field: str) -> str:
|
|
45
|
+
if value is None:
|
|
46
|
+
raise ValueError(f"`{field}` is required")
|
|
47
|
+
value = str(value)
|
|
48
|
+
if not value.strip():
|
|
49
|
+
raise ValueError(f"`{field}` is required")
|
|
50
|
+
return value
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
# Default timeout for research
|
|
54
|
+
_DEFAULT_TIMEOUT_SECONDS = 1200.0
|
|
55
|
+
|
|
56
|
+
_DEEP_RESEARCH_DESCRIPTION = """
|
|
57
|
+
Conduct comprehensive web research using Gemini's Deep Research Agent.
|
|
58
|
+
|
|
59
|
+
When to use this tool:
|
|
60
|
+
- Researching complex topics requiring multi-source analysis
|
|
61
|
+
- Need synthesized information from the web
|
|
62
|
+
- Require fact-checking and cross-referencing of information
|
|
63
|
+
|
|
64
|
+
Parameters:
|
|
65
|
+
- `prompt`: Your research question or topic (required)
|
|
66
|
+
- `include_citations`: Whether to include source URLs in the report (default: true)
|
|
67
|
+
|
|
68
|
+
Returns:
|
|
69
|
+
- `status`: Final state (completed, failed, cancelled)
|
|
70
|
+
- `report_text`: The synthesized research report with findings
|
|
71
|
+
- `sources`: List of sources used in the research (if enabled)
|
|
72
|
+
|
|
73
|
+
Notes:
|
|
74
|
+
- This tool blocks until research completes (typically 10-20 minutes)
|
|
75
|
+
""".strip()
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
@mcp.tool(
|
|
79
|
+
title="Gemini Deep Research",
|
|
80
|
+
description=_DEEP_RESEARCH_DESCRIPTION,
|
|
81
|
+
annotations=ToolAnnotations(
|
|
82
|
+
openWorldHint=True,
|
|
83
|
+
readOnlyHint=False,
|
|
84
|
+
idempotentHint=False,
|
|
85
|
+
),
|
|
86
|
+
structured_output=True,
|
|
87
|
+
)
|
|
88
|
+
def gemini_deep_research(
|
|
89
|
+
prompt: str,
|
|
90
|
+
include_citations: bool = True,
|
|
91
|
+
) -> Annotated[CallToolResult, DeepResearchOutput]:
|
|
92
|
+
"""Conduct deep research on a topic and wait for the complete report."""
|
|
93
|
+
|
|
94
|
+
if not prompt or not prompt.strip():
|
|
95
|
+
raise ValueError("`prompt` is required")
|
|
96
|
+
|
|
97
|
+
client, settings = _get_client_and_settings()
|
|
98
|
+
|
|
99
|
+
# Start the deep research job
|
|
100
|
+
initial = start_deep_research(client, prompt=prompt.strip(), agent=settings.deep_research_agent)
|
|
101
|
+
job_id = getattr(initial, "id", None) or (
|
|
102
|
+
initial.get("id") if isinstance(initial, dict) else None
|
|
103
|
+
)
|
|
104
|
+
if not job_id:
|
|
105
|
+
raise RuntimeError("Gemini SDK did not return a research job id.")
|
|
106
|
+
|
|
107
|
+
# Wait for completion
|
|
108
|
+
interaction = poll_until_terminal(
|
|
109
|
+
client,
|
|
110
|
+
job_id=job_id,
|
|
111
|
+
timeout_seconds=_DEFAULT_TIMEOUT_SECONDS,
|
|
112
|
+
poll_interval_seconds=settings.poll_interval_seconds,
|
|
113
|
+
)
|
|
114
|
+
result = interaction_to_result(interaction, include_citations=include_citations)
|
|
115
|
+
status = result.get("status")
|
|
116
|
+
if status is None:
|
|
117
|
+
status = "unknown"
|
|
118
|
+
payload: DeepResearchOutput = {
|
|
119
|
+
"status": str(status),
|
|
120
|
+
"report_text": result.get("text", ""),
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
# IMPORTANT: when returning a dict from a structured tool, the MCP lowlevel server
|
|
124
|
+
# will also serialize it to JSON text and include it in `content`, which some
|
|
125
|
+
# clients then print in addition to `structuredContent` (leading to duplicate
|
|
126
|
+
# outputs). Returning CallToolResult avoids that double-serialization.
|
|
127
|
+
return CallToolResult(content=[], structuredContent=payload, isError=False)
|
|
128
|
+
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
def main() -> None:
|
|
134
|
+
# Run over STDIO.
|
|
135
|
+
logger.info("Starting Gemini Deep Research MCP server (stdio)")
|
|
136
|
+
mcp.run()
|
|
@@ -0,0 +1,127 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: gemini-deep-research-mcp
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: MCP server exposing Gemini Deep Research (Interactions API) tools
|
|
5
|
+
Author-email: Ayush <ayusin439@gmail.com>
|
|
6
|
+
License: MIT
|
|
7
|
+
Project-URL: Homepage, https://github.com/bharatvansh/gemini-deep-research-mcp
|
|
8
|
+
Project-URL: Repository, https://github.com/bharatvansh/gemini-deep-research-mcp
|
|
9
|
+
Project-URL: Issues, https://github.com/bharatvansh/gemini-deep-research-mcp/issues
|
|
10
|
+
Keywords: mcp,gemini,deep-research,ai,google,model-context-protocol,research,agent
|
|
11
|
+
Classifier: Development Status :: 4 - Beta
|
|
12
|
+
Classifier: Intended Audience :: Developers
|
|
13
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
14
|
+
Classifier: Operating System :: OS Independent
|
|
15
|
+
Classifier: Programming Language :: Python :: 3
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
19
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
20
|
+
Requires-Python: >=3.10
|
|
21
|
+
Description-Content-Type: text/markdown
|
|
22
|
+
Requires-Dist: mcp>=1.2.0
|
|
23
|
+
Requires-Dist: google-genai>=0.6.0
|
|
24
|
+
Requires-Dist: python-dotenv>=1.0.1
|
|
25
|
+
Requires-Dist: httpx>=0.27.0
|
|
26
|
+
Provides-Extra: dev
|
|
27
|
+
Requires-Dist: pytest>=8.0.0; extra == "dev"
|
|
28
|
+
|
|
29
|
+
# Gemini Deep Research MCP
|
|
30
|
+
|
|
31
|
+
An MCP server that exposes Gemini's **Deep Research Agent** for comprehensive web research.
|
|
32
|
+
|
|
33
|
+
## Quick Start
|
|
34
|
+
|
|
35
|
+
```bash
|
|
36
|
+
pip install gemini-deep-research-mcp
|
|
37
|
+
```
|
|
38
|
+
|
|
39
|
+
Set your API key:
|
|
40
|
+
```bash
|
|
41
|
+
export GEMINI_API_KEY="your-api-key" # macOS/Linux
|
|
42
|
+
set GEMINI_API_KEY=your-api-key # Windows CMD
|
|
43
|
+
$env:GEMINI_API_KEY="your-api-key" # Windows PowerShell
|
|
44
|
+
```
|
|
45
|
+
|
|
46
|
+
## MCP Client Setup
|
|
47
|
+
|
|
48
|
+
### VS Code (Copilot)
|
|
49
|
+
|
|
50
|
+
Add to your VS Code settings or `.vscode/mcp.json`:
|
|
51
|
+
|
|
52
|
+
```json
|
|
53
|
+
{
|
|
54
|
+
"mcp": {
|
|
55
|
+
"servers": {
|
|
56
|
+
"gemini-deep-research": {
|
|
57
|
+
"command": "gemini-deep-research-mcp",
|
|
58
|
+
"env": {
|
|
59
|
+
"GEMINI_API_KEY": "your-api-key"
|
|
60
|
+
}
|
|
61
|
+
}
|
|
62
|
+
}
|
|
63
|
+
}
|
|
64
|
+
}
|
|
65
|
+
```
|
|
66
|
+
|
|
67
|
+
### Claude Desktop
|
|
68
|
+
|
|
69
|
+
Add to `claude_desktop_config.json`:
|
|
70
|
+
|
|
71
|
+
```json
|
|
72
|
+
{
|
|
73
|
+
"mcpServers": {
|
|
74
|
+
"gemini-deep-research": {
|
|
75
|
+
"command": "gemini-deep-research-mcp",
|
|
76
|
+
"env": {
|
|
77
|
+
"GEMINI_API_KEY": "your-api-key"
|
|
78
|
+
}
|
|
79
|
+
}
|
|
80
|
+
}
|
|
81
|
+
}
|
|
82
|
+
```
|
|
83
|
+
|
|
84
|
+
> **Windows**: If `gemini-deep-research-mcp` isn't in PATH, use full path: `C:\\Users\\YOU\\...\\python.exe` with args `["-m", "gemini_deep_research_mcp"]`
|
|
85
|
+
|
|
86
|
+
## Tool: `gemini_deep_research`
|
|
87
|
+
|
|
88
|
+
Conducts comprehensive web research using Gemini's Deep Research Agent. Blocks until research completes (typically 10-20 minutes).
|
|
89
|
+
|
|
90
|
+
**When to use:**
|
|
91
|
+
- Complex topics requiring multi-source analysis
|
|
92
|
+
- Synthesized information from the web
|
|
93
|
+
- Fact-checking and cross-referencing
|
|
94
|
+
|
|
95
|
+
**Parameters:**
|
|
96
|
+
|
|
97
|
+
| Parameter | Type | Required | Default | Description |
|
|
98
|
+
|-----------|------|----------|---------|-------------|
|
|
99
|
+
| `prompt` | string | ✓ | — | Your research question or topic |
|
|
100
|
+
| `include_citations` | boolean | | `true` | Include resolved source URLs |
|
|
101
|
+
|
|
102
|
+
**Output:**
|
|
103
|
+
|
|
104
|
+
| Field | Description |
|
|
105
|
+
|-------|-------------|
|
|
106
|
+
| `status` | `completed`, `failed`, or `cancelled` |
|
|
107
|
+
| `report_text` | Synthesized research report |
|
|
108
|
+
|
|
109
|
+
## Configuration
|
|
110
|
+
|
|
111
|
+
| Variable | Required | Default | Description |
|
|
112
|
+
|----------|----------|---------|-------------|
|
|
113
|
+
| `GEMINI_API_KEY` | ✓ | — | Your Gemini API key |
|
|
114
|
+
| `GEMINI_DEEP_RESEARCH_AGENT` | | `deep-research-pro-preview-12-2025` | Model to use |
|
|
115
|
+
|
|
116
|
+
## Development
|
|
117
|
+
|
|
118
|
+
```bash
|
|
119
|
+
git clone https://github.com/bharatvansh/gemini-deep-research-mcp.git
|
|
120
|
+
cd gemini-deep-research-mcp
|
|
121
|
+
pip install -e .[dev]
|
|
122
|
+
pytest
|
|
123
|
+
```
|
|
124
|
+
|
|
125
|
+
## License
|
|
126
|
+
|
|
127
|
+
MIT
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
gemini_deep_research_mcp/__init__.py,sha256=b2shbKOn0cCFFhUSy85dPhwl6ERjORIn1-t6vRrPesA,96
|
|
2
|
+
gemini_deep_research_mcp/__main__.py,sha256=kXwGR0h4I_1BdqTv1YV6f86NLM1OwukbORECfPc9OPg,65
|
|
3
|
+
gemini_deep_research_mcp/config.py,sha256=7Cbk8EnW7VhaqkVwoX4sW2jekccQRdLraM89LoL11W4,827
|
|
4
|
+
gemini_deep_research_mcp/extract.py,sha256=lBFB2GfpHdPQXRszV6yulfSTa5cU3ApwU0KwwGg3450,2300
|
|
5
|
+
gemini_deep_research_mcp/gemini.py,sha256=-4wptf99qVcukl0vsohsQQDBwMPq-B7LzZHqjpPZSqg,1572
|
|
6
|
+
gemini_deep_research_mcp/resolve.py,sha256=LmSuTGOe1-qjToKdQL6pcpNlzoMQwn43_uzX1KSQvOo,3105
|
|
7
|
+
gemini_deep_research_mcp/server.py,sha256=lAolJPB-fV4oN9W0Ldq4VQYkXt86Zxd3SVInwT3QsNk,3991
|
|
8
|
+
gemini_deep_research_mcp-0.1.0.dist-info/METADATA,sha256=_CahiTs3Q1OqndH2hXmMuy7FYwqKcwtlCJlBNw6N4sc,3503
|
|
9
|
+
gemini_deep_research_mcp-0.1.0.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
|
|
10
|
+
gemini_deep_research_mcp-0.1.0.dist-info/entry_points.txt,sha256=nE5DP4kZ2RG5LB1Ied-hykIRmBei2GJw0wy_PijepRE,82
|
|
11
|
+
gemini_deep_research_mcp-0.1.0.dist-info/top_level.txt,sha256=sk90gOv-N7MHdyHhJE6N-fLVHrtFPfme8m1opjwXoMM,25
|
|
12
|
+
gemini_deep_research_mcp-0.1.0.dist-info/RECORD,,
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
gemini_deep_research_mcp
|