@amplify-studio/open-mcp 0.9.0 → 0.10.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/resources.js CHANGED
@@ -8,15 +8,13 @@ export function createConfigResource() {
8
8
  description: "MCP server for SearXNG integration via Gateway API"
9
9
  },
10
10
  environment: {
11
- gatewayUrl: process.env.GATEWAY_URL || "http://115.190.91.253:80 (default)",
11
+ gatewayUrl: process.env.GATEWAY_URL || "Not configured",
12
12
  hasAuth: !!(process.env.AUTH_USERNAME && process.env.AUTH_PASSWORD),
13
- hasProxy: !!(process.env.HTTP_PROXY || process.env.HTTPS_PROXY || process.env.http_proxy || process.env.https_proxy),
14
- hasNoProxy: !!(process.env.NO_PROXY || process.env.no_proxy),
15
13
  nodeVersion: process.version,
16
14
  currentLogLevel: getCurrentLogLevel()
17
15
  },
18
16
  capabilities: {
19
- tools: ["searxng_web_search", "web_url_read"],
17
+ tools: ["searxng_web_search", "web_url_read", "image_understand", "image_generate"],
20
18
  logging: true,
21
19
  resources: true,
22
20
  transports: process.env.MCP_HTTP_PORT ? ["stdio", "http"] : ["stdio"]
@@ -33,30 +31,46 @@ This is a Model Context Protocol (MCP) server that provides web search capabilit
33
31
  ## Available Tools
34
32
 
35
33
  ### 1. searxng_web_search
36
- Performs web searches using the configured Gateway API.
34
+ Performs web searches using the configured Gateway API (Firecrawl search).
37
35
 
38
36
  **Parameters:**
39
37
  - \`query\` (required): The search query string
40
- - \`pageno\` (optional): Page number (default: 1)
41
- - \`time_range\` (optional): Filter by time - "day", "month", or "year"
42
- - \`language\` (optional): Language code like "en", "fr", "de" (default: "all")
43
- - \`safesearch\` (optional): Safe search level - "0" (none), "1" (moderate), "2" (strict)
38
+ - \`limit\` (optional): Maximum number of results (default: 10, max: 100)
44
39
 
45
40
  ### 2. web_url_read
46
41
  Reads and converts web page content to Markdown format via Gateway API.
47
42
 
48
43
  **Parameters:**
49
44
  - \`url\` (required): The URL to fetch and convert
45
+ - \`startChar\` (optional): Starting character position (default: 0)
46
+ - \`maxLength\` (optional): Maximum number of characters to return
47
+ - \`section\` (optional): Extract content under a specific heading
48
+ - \`paragraphRange\` (optional): Return specific paragraph ranges (e.g., "1-5", "3", "10-")
49
+ - \`readHeadings\` (optional): Return only a list of headings
50
+
51
+ ### 3. image_understand
52
+ Analyze images, videos, and documents using Zhipu GLM-4.6V-Flash model.
53
+
54
+ **Parameters:**
55
+ - \`file\` (required): File path, URL, or base64 data
56
+ - \`prompt\` (required): Question or instruction for analysis
57
+ - \`thinking\` (optional): Enable deep thinking mode (default: true)
58
+
59
+ ### 4. image_generate
60
+ Generate images from text using Zhipu Cogview-3-Flash model.
61
+
62
+ **Parameters:**
63
+ - \`prompt\` (required): Text description of the image to generate
64
+ - \`size\` (optional): Image size (default: "1024x1024")
50
65
 
51
66
  ## Configuration
52
67
 
53
- ### Optional Environment Variables
54
- - \`GATEWAY_URL\`: URL of the Gateway API (default: http://115.190.91.253:80)
68
+ ### Required Environment Variables
69
+ - \`GATEWAY_URL\`: URL of the Gateway API
55
70
 
56
71
  ### Optional Environment Variables
72
+ - \`ZHIPUAI_API_KEY\`: API key for image tools (understand/generate)
57
73
  - \`AUTH_USERNAME\` & \`AUTH_PASSWORD\`: Basic authentication for Gateway
58
- - \`HTTP_PROXY\` / \`HTTPS_PROXY\`: Proxy server configuration
59
- - \`NO_PROXY\` / \`no_proxy\`: Comma-separated list of hosts to bypass proxy
60
74
  - \`MCP_HTTP_PORT\`: Enable HTTP transport on specified port
61
75
 
62
76
  ## Transport Modes
@@ -69,10 +83,10 @@ RESTful HTTP transport for web applications. Set \`MCP_HTTP_PORT\` to enable.
69
83
 
70
84
  ## Usage Examples
71
85
 
72
- ### Search for recent news
86
+ ### Search the web
73
87
  \`\`\`
74
88
  Tool: searxng_web_search
75
- Args: {"query": "latest AI developments", "time_range": "day"}
89
+ Args: {"query": "latest AI developments", "limit": 10}
76
90
  \`\`\`
77
91
 
78
92
  ### Read a specific article
@@ -81,15 +95,28 @@ Tool: web_url_read
81
95
  Args: {"url": "https://example.com/article"}
82
96
  \`\`\`
83
97
 
98
+ ### Analyze an image
99
+ \`\`\`
100
+ Tool: image_understand
101
+ Args: {"file": "https://example.com/image.jpg", "prompt": "Describe this image"}
102
+ \`\`\`
103
+
104
+ ### Generate an image
105
+ \`\`\`
106
+ Tool: image_generate
107
+ Args: {"prompt": "A sunset over mountains", "size": "1024x1024"}
108
+ \`\`\`
109
+
84
110
  ## Troubleshooting
85
111
 
86
112
  1. **Network errors**: Check if Gateway API is running and accessible
87
113
  2. **Empty results**: Try different search terms or check Gateway service
88
114
  3. **Timeout errors**: The server has a 10-second timeout for URL fetching
115
+ 4. **Image tools not working**: Ensure ZHIPUAI_API_KEY is set
89
116
 
90
117
  Use logging level "debug" for detailed request information.
91
118
 
92
119
  ## Current Configuration
93
- See the "Current Configuration" resource for live settings.
120
+ See the "config://server-config" resource for live settings.
94
121
  `;
95
122
  }
package/dist/search.js CHANGED
@@ -1,19 +1,20 @@
1
- import { createProxyAgent } from "./proxy.js";
2
1
  import { logMessage } from "./logging.js";
3
- import { createConfigurationError, createNetworkError, createServerError } from "./error-handler.js";
2
+ import { createConfigurationError, createNetworkError, createServerError, createTimeoutError, GATEWAY_URL_REQUIRED_MESSAGE } from "./error-handler.js";
4
3
  export async function performWebSearch(server, query, limit = 10) {
5
4
  const startTime = Date.now();
6
5
  logMessage(server, "info", `Starting web search: "${query}" (limit: ${limit})`);
7
- const gatewayUrl = process.env.GATEWAY_URL || "http://115.190.91.253:80";
8
- // Validate gateway URL
9
- let parsedUrl;
6
+ const gatewayUrl = process.env.GATEWAY_URL;
7
+ if (!gatewayUrl) {
8
+ throw createConfigurationError(GATEWAY_URL_REQUIRED_MESSAGE);
9
+ }
10
+ // Build and validate the endpoint URL
11
+ let searchUrl;
10
12
  try {
11
- parsedUrl = new URL(gatewayUrl);
13
+ searchUrl = new URL('/api/firecrawl-search', gatewayUrl);
12
14
  }
13
- catch (error) {
14
- throw createConfigurationError(`Invalid GATEWAY_URL format: ${gatewayUrl}. Use format: http://115.190.91.253:80`);
15
+ catch {
16
+ throw createConfigurationError(`Invalid GATEWAY_URL format: ${gatewayUrl}. Use format: http://your-gateway.com:80`);
15
17
  }
16
- const url = new URL('/api/firecrawl-search', parsedUrl);
17
18
  // Prepare request body
18
19
  const requestBody = {
19
20
  query,
@@ -27,11 +28,6 @@ export async function performWebSearch(server, query, limit = 10) {
27
28
  },
28
29
  body: JSON.stringify(requestBody)
29
30
  };
30
- // Add proxy dispatcher if configured
31
- const proxyAgent = createProxyAgent(url.toString());
32
- if (proxyAgent) {
33
- requestOptions.dispatcher = proxyAgent;
34
- }
35
31
  // Add basic authentication if configured
36
32
  const username = process.env.AUTH_USERNAME;
37
33
  const password = process.env.AUTH_PASSWORD;
@@ -44,21 +40,25 @@ export async function performWebSearch(server, query, limit = 10) {
44
40
  if (userAgent) {
45
41
  requestOptions.headers['User-Agent'] = userAgent;
46
42
  }
43
+ // Add timeout to prevent hanging
44
+ const controller = new AbortController();
45
+ const timeoutId = setTimeout(() => controller.abort(), 30000); // 30 second timeout
46
+ requestOptions.signal = controller.signal;
47
47
  // Fetch with error handling
48
48
  let response;
49
49
  try {
50
- logMessage(server, "info", `Making POST request to: ${url.toString()}`);
51
- response = await fetch(url.toString(), requestOptions);
50
+ logMessage(server, "info", `Making POST request to: ${searchUrl.toString()}`);
51
+ response = await fetch(searchUrl.toString(), requestOptions);
52
+ clearTimeout(timeoutId);
52
53
  }
53
54
  catch (error) {
54
- logMessage(server, "error", `Network error during search request: ${error.message}`, { query, url: url.toString() });
55
- const context = {
56
- url: url.toString(),
57
- gatewayUrl,
58
- proxyAgent: !!proxyAgent,
59
- username
60
- };
61
- throw createNetworkError(error, context);
55
+ clearTimeout(timeoutId);
56
+ if (error.name === 'AbortError') {
57
+ logMessage(server, "error", `Request timeout after 30s: ${searchUrl.toString()}`);
58
+ throw createTimeoutError(30000, searchUrl.toString());
59
+ }
60
+ logMessage(server, "error", `Network error during search request: ${error.message}`, { query, url: searchUrl.toString() });
61
+ throw createNetworkError(error, { url: searchUrl.toString(), gatewayUrl });
62
62
  }
63
63
  if (!response.ok) {
64
64
  let responseBody;
@@ -68,11 +68,7 @@ export async function performWebSearch(server, query, limit = 10) {
68
68
  catch {
69
69
  responseBody = '[Could not read response body]';
70
70
  }
71
- const context = {
72
- url: url.toString(),
73
- gatewayUrl
74
- };
75
- throw createServerError(response.status, response.statusText, responseBody, context);
71
+ throw createServerError(response.status, response.statusText, responseBody, { url: searchUrl.toString(), gatewayUrl });
76
72
  }
77
73
  // Parse JSON response
78
74
  let data;
@@ -87,7 +83,6 @@ export async function performWebSearch(server, query, limit = 10) {
87
83
  catch {
88
84
  responseText = '[Could not read response text]';
89
85
  }
90
- const context = { url: url.toString() };
91
86
  throw new Error(`Failed to parse JSON response: ${responseText}`);
92
87
  }
93
88
  // Handle Firecrawl API response format: {success: true, data: [...]}
@@ -0,0 +1,5 @@
1
+ import type { ImageGenerateArgs } from '../types.js';
2
+ /**
3
+ * Main function: Generate image from text prompt
4
+ */
5
+ export declare function generateImage(args: ImageGenerateArgs): Promise<string>;
@@ -0,0 +1,29 @@
1
+ import { callImageGenAPI } from '../api/zhipu.js';
2
+ const VALID_SIZES = new Set([
3
+ '1024x1024', '768x1344', '864x1152', '1344x768',
4
+ '1152x864', '1440x720', '720x1440'
5
+ ]);
6
+ const MAX_PROMPT_LENGTH = 4000;
7
+ /**
8
+ * Validate image generation parameters
9
+ */
10
+ function validateImageGenArgs(args) {
11
+ const { prompt, size } = args;
12
+ if (!prompt?.trim()) {
13
+ throw new Error('Prompt is required');
14
+ }
15
+ if (prompt.length > MAX_PROMPT_LENGTH) {
16
+ throw new Error(`Prompt is too long (max ${MAX_PROMPT_LENGTH} characters)`);
17
+ }
18
+ if (size && !VALID_SIZES.has(size)) {
19
+ throw new Error(`Invalid size: ${size}. Must be one of: ${Array.from(VALID_SIZES).join(', ')}`);
20
+ }
21
+ }
22
+ /**
23
+ * Main function: Generate image from text prompt
24
+ */
25
+ export async function generateImage(args) {
26
+ validateImageGenArgs(args);
27
+ const { prompt, size = '1024x1024' } = args;
28
+ return callImageGenAPI(prompt, size);
29
+ }
@@ -0,0 +1,5 @@
1
+ import type { ImageOCRArgs } from '../types.js';
2
+ /**
3
+ * Main function: Extract text from image using OCR
4
+ */
5
+ export declare function extractTextFromImage(args: ImageOCRArgs): Promise<string>;
@@ -0,0 +1,102 @@
1
+ import { readFile } from 'node:fs/promises';
2
+ const SUPPORTED_FORMATS = new Set(['png', 'jpg', 'jpeg', 'bmp', 'gif']);
3
+ const MAX_IMAGE_SIZE = 10485760; // 10MB
4
+ const LOW_CONFIDENCE_THRESHOLD = 0.5;
5
+ const DEFAULT_CONFIDENCE = 0.9;
6
+ /**
7
+ * Check if path is a local file
8
+ */
9
+ function isLocalFile(imagePath) {
10
+ return imagePath.startsWith('/') ||
11
+ imagePath.startsWith('./') ||
12
+ imagePath.startsWith('../');
13
+ }
14
+ /**
15
+ * Get file extension from path
16
+ */
17
+ function getFileExtension(imagePath) {
18
+ return imagePath.split('.').pop()?.toLowerCase() ?? '';
19
+ }
20
+ /**
21
+ * Validate image file format and size
22
+ */
23
+ async function validateImage(imagePath) {
24
+ const ext = getFileExtension(imagePath);
25
+ if (!SUPPORTED_FORMATS.has(ext)) {
26
+ throw new Error(`Unsupported image format: ${ext}. Supported formats: ${Array.from(SUPPORTED_FORMATS).join(', ')}`);
27
+ }
28
+ if (!isLocalFile(imagePath)) {
29
+ return;
30
+ }
31
+ try {
32
+ const buffer = await readFile(imagePath);
33
+ const maxSize = Number(process.env.MAX_IMAGE_SIZE) || MAX_IMAGE_SIZE;
34
+ if (buffer.length > maxSize) {
35
+ throw new Error(`Image too large: ${buffer.length} bytes (max: ${maxSize} bytes)`);
36
+ }
37
+ }
38
+ catch (error) {
39
+ const errno = error;
40
+ if (errno.code === 'ENOENT') {
41
+ throw new Error(`Image file not found: ${imagePath}`);
42
+ }
43
+ throw error;
44
+ }
45
+ }
46
+ /**
47
+ * Call PaddleOCR service
48
+ */
49
+ async function callPaddleOCR(imagePath) {
50
+ const paddleocrUrl = process.env.PADDLEOCR_URL || 'http://localhost:8080';
51
+ const imageBuffer = await readFile(imagePath);
52
+ const base64Image = imageBuffer.toString('base64');
53
+ const response = await fetch(`${paddleocrUrl}/ocr`, {
54
+ method: 'POST',
55
+ headers: { 'Content-Type': 'application/json' },
56
+ body: JSON.stringify({
57
+ image: base64Image,
58
+ lang: 'auto'
59
+ }),
60
+ });
61
+ if (!response.ok) {
62
+ const errorText = await response.text().catch(() => 'Unknown error');
63
+ throw new Error(`OCR service error (${response.status}): ${errorText}`);
64
+ }
65
+ return response.json();
66
+ }
67
+ /**
68
+ * Main function: Extract text from image using OCR
69
+ */
70
+ export async function extractTextFromImage(args) {
71
+ const { imageFile } = args;
72
+ if (!imageFile?.trim()) {
73
+ throw new Error('Image file path is required');
74
+ }
75
+ const startTime = Date.now();
76
+ try {
77
+ await validateImage(imageFile);
78
+ const result = await callPaddleOCR(imageFile);
79
+ const processingTime = `${((Date.now() - startTime) / 1000).toFixed(2)}s`;
80
+ const confidence = result.confidence || DEFAULT_CONFIDENCE;
81
+ const warning = confidence < LOW_CONFIDENCE_THRESHOLD
82
+ ? 'Low confidence score, image quality may be poor'
83
+ : undefined;
84
+ const response = {
85
+ success: true,
86
+ text: result.text,
87
+ confidence,
88
+ language: result.language || 'unknown',
89
+ processingTime,
90
+ engine: 'paddleocr',
91
+ warning
92
+ };
93
+ return JSON.stringify(response, null, 2);
94
+ }
95
+ catch (error) {
96
+ const errno = error;
97
+ if (errno.code === 'ECONNREFUSED') {
98
+ throw new Error('OCR service is not running. Please start the PaddleOCR service.');
99
+ }
100
+ throw error;
101
+ }
102
+ }
@@ -0,0 +1,5 @@
1
+ import type { ImageUnderstandArgs } from '../types.js';
2
+ /**
3
+ * Main function: Process image understanding request
4
+ */
5
+ export declare function understandImage(args: ImageUnderstandArgs): Promise<string>;
@@ -0,0 +1,54 @@
1
+ import { detectFileType, getMimeType, normalizeInput, readAsBase64, } from '../utils/file-helper.js';
2
+ import { callVisionAPI } from '../api/zhipu.js';
3
+ const DEFAULT_MIME_TYPES = {
4
+ image: 'image/png',
5
+ video: 'video/mp4',
6
+ };
7
+ /**
8
+ * Convert file input to data URL
9
+ */
10
+ async function fileToDataUrl(file) {
11
+ const normalized = normalizeInput(file);
12
+ if (normalized.type === 'local') {
13
+ const base64 = await readAsBase64(normalized.value);
14
+ const mimeType = getMimeType(normalized.value);
15
+ return `data:${mimeType};base64,${base64}`;
16
+ }
17
+ if (normalized.type === 'base64' && !normalized.value.startsWith('data:')) {
18
+ const fileType = detectFileType(normalized.value);
19
+ const mimeType = DEFAULT_MIME_TYPES[fileType] ?? 'application/octet-stream';
20
+ return `data:${mimeType};base64,${normalized.value}`;
21
+ }
22
+ return normalized.value;
23
+ }
24
+ /**
25
+ * Create content item based on file type
26
+ */
27
+ function createContentItem(dataUrl, file) {
28
+ const fileType = detectFileType(file.split('?')[0]);
29
+ if (fileType === 'image') {
30
+ return { type: 'image_url', image_url: { url: dataUrl } };
31
+ }
32
+ if (fileType === 'video') {
33
+ return { type: 'video_url', video_url: { url: dataUrl } };
34
+ }
35
+ return { type: 'file_url', file_url: { url: dataUrl } };
36
+ }
37
+ /**
38
+ * Main function: Process image understanding request
39
+ */
40
+ export async function understandImage(args) {
41
+ const { file, prompt, thinking = true } = args;
42
+ if (!file?.trim()) {
43
+ throw new Error('File is required');
44
+ }
45
+ if (!prompt?.trim()) {
46
+ throw new Error('Prompt is required');
47
+ }
48
+ const dataUrl = await fileToDataUrl(file);
49
+ const contentItem = createContentItem(dataUrl, file);
50
+ return callVisionAPI([{
51
+ role: 'user',
52
+ content: [contentItem, { type: 'text', text: prompt }]
53
+ }], thinking);
54
+ }
package/dist/types.d.ts CHANGED
@@ -1,14 +1,29 @@
1
1
  import { Tool } from "@modelcontextprotocol/sdk/types.js";
2
- export interface SearXNGWeb {
3
- results: Array<{
4
- title: string;
5
- content: string;
6
- url: string;
7
- }>;
8
- }
9
2
  export declare function isSearXNGWebSearchArgs(args: unknown): args is {
10
3
  query: string;
11
4
  limit?: number;
12
5
  };
13
6
  export declare const WEB_SEARCH_TOOL: Tool;
14
7
  export declare const READ_URL_TOOL: Tool;
8
+ export declare const IMAGE_UNDERSTAND_TOOL: Tool;
9
+ export declare const IMAGE_GENERATE_TOOL: Tool;
10
+ export interface ImageUnderstandArgs {
11
+ file: string;
12
+ prompt: string;
13
+ thinking?: boolean;
14
+ }
15
+ export interface ImageGenerateArgs {
16
+ prompt: string;
17
+ size?: string;
18
+ }
19
+ export declare function isImageUnderstandArgs(args: unknown): args is ImageUnderstandArgs;
20
+ export declare function isImageGenerateArgs(args: unknown): args is ImageGenerateArgs;
21
+ export declare function isWebUrlReadArgs(args: unknown): args is WebUrlReadArgs;
22
+ export interface WebUrlReadArgs {
23
+ url: string;
24
+ startChar?: number;
25
+ maxLength?: number;
26
+ section?: string;
27
+ paragraphRange?: string;
28
+ readHeadings?: boolean;
29
+ }
package/dist/types.js CHANGED
@@ -1,13 +1,20 @@
1
1
  export function isSearXNGWebSearchArgs(args) {
2
- return (typeof args === "object" &&
3
- args !== null &&
4
- "query" in args &&
5
- typeof args.query === "string");
2
+ if (typeof args !== "object" ||
3
+ args === null ||
4
+ !("query" in args) ||
5
+ typeof args.query !== "string") {
6
+ return false;
7
+ }
8
+ const searchArgs = args;
9
+ if (searchArgs.limit !== undefined && (typeof searchArgs.limit !== "number" || searchArgs.limit < 1 || searchArgs.limit > 100)) {
10
+ return false;
11
+ }
12
+ return true;
6
13
  }
7
14
  export const WEB_SEARCH_TOOL = {
8
15
  name: "searxng_web_search",
9
16
  description: "Performs web search using the Gateway API Firecrawl search. " +
10
- "Returns search results with title, content, URL, and relevance score. " +
17
+ "Returns search results with title, content, and URL. " +
11
18
  "Use this for general queries, news, articles, and online content.",
12
19
  inputSchema: {
13
20
  type: "object",
@@ -64,3 +71,103 @@ export const READ_URL_TOOL = {
64
71
  required: ["url"],
65
72
  },
66
73
  };
74
+ export const IMAGE_UNDERSTAND_TOOL = {
75
+ name: "image_understand",
76
+ description: "Understand and analyze images, videos, and documents using Zhipu GLM-4.6V-Flash model. " +
77
+ "Supports visual Q&A, content description, OCR, document parsing, video understanding, " +
78
+ "and frontend code replication from screenshots. " +
79
+ "Accepts file paths, URLs, or base64 data. " +
80
+ "Use this when you need to extract information from visual content or answer questions about images/videos.",
81
+ inputSchema: {
82
+ type: "object",
83
+ properties: {
84
+ file: {
85
+ type: "string",
86
+ description: "File path, URL, or base64 data (image, video, or PDF)",
87
+ },
88
+ prompt: {
89
+ type: "string",
90
+ description: "Question or instruction for the visual content analysis",
91
+ },
92
+ thinking: {
93
+ type: "boolean",
94
+ description: "Enable deep thinking mode for complex reasoning (default: true)",
95
+ default: true,
96
+ },
97
+ },
98
+ required: ["file", "prompt"],
99
+ },
100
+ };
101
+ export const IMAGE_GENERATE_TOOL = {
102
+ name: "image_generate",
103
+ description: "Generate images from text descriptions using Zhipu Cogview-3-Flash model. " +
104
+ "Supports multiple resolutions. " +
105
+ "Use this when you need to create visual content from text prompts.",
106
+ inputSchema: {
107
+ type: "object",
108
+ properties: {
109
+ prompt: {
110
+ type: "string",
111
+ description: "Text description of the image to generate",
112
+ },
113
+ size: {
114
+ type: "string",
115
+ enum: ["1024x1024", "768x1344", "864x1152", "1344x768", "1152x864", "1440x720", "720x1440"],
116
+ description: "Image size (default: 1024x1024)",
117
+ default: "1024x1024",
118
+ },
119
+ },
120
+ required: ["prompt"],
121
+ },
122
+ };
123
+ /**
124
+ * Generic type guard for checking if an object has a property of a specific type
125
+ */
126
+ function hasProperty(args, prop, type) {
127
+ return (typeof args === "object" &&
128
+ args !== null &&
129
+ prop in args &&
130
+ typeof args[prop] === type);
131
+ }
132
+ export function isImageUnderstandArgs(args) {
133
+ if (!hasProperty(args, 'prompt', 'string') || !hasProperty(args, 'file', 'string')) {
134
+ return false;
135
+ }
136
+ const typedArgs = args;
137
+ if (typedArgs.thinking !== undefined && typeof typedArgs.thinking !== 'boolean') {
138
+ return false;
139
+ }
140
+ return true;
141
+ }
142
+ export function isImageGenerateArgs(args) {
143
+ if (!hasProperty(args, 'prompt', 'string')) {
144
+ return false;
145
+ }
146
+ const typedArgs = args;
147
+ if (typedArgs.size !== undefined && typeof typedArgs.size !== 'string') {
148
+ return false;
149
+ }
150
+ return true;
151
+ }
152
+ export function isWebUrlReadArgs(args) {
153
+ if (!hasProperty(args, 'url', 'string')) {
154
+ return false;
155
+ }
156
+ const urlArgs = args;
157
+ if (urlArgs.startChar !== undefined && (typeof urlArgs.startChar !== 'number' || urlArgs.startChar < 0)) {
158
+ return false;
159
+ }
160
+ if (urlArgs.maxLength !== undefined && (typeof urlArgs.maxLength !== 'number' || urlArgs.maxLength < 1)) {
161
+ return false;
162
+ }
163
+ if (urlArgs.section !== undefined && typeof urlArgs.section !== 'string') {
164
+ return false;
165
+ }
166
+ if (urlArgs.paragraphRange !== undefined && typeof urlArgs.paragraphRange !== 'string') {
167
+ return false;
168
+ }
169
+ if (urlArgs.readHeadings !== undefined && typeof urlArgs.readHeadings !== 'boolean') {
170
+ return false;
171
+ }
172
+ return true;
173
+ }
@@ -6,5 +6,6 @@ interface PaginationOptions {
6
6
  paragraphRange?: string;
7
7
  readHeadings?: boolean;
8
8
  }
9
- export declare function fetchAndConvertToMarkdown(server: Server, url: string, timeoutMs?: number, paginationOptions?: PaginationOptions): Promise<string>;
9
+ export declare function fetchAndConvertToMarkdown(server: Server, url: string, timeoutMs?: number, // Increased default from 10s to 30s
10
+ paginationOptions?: PaginationOptions): Promise<string>;
10
11
  export {};