universal-mcp 0.1.2__py3-none-any.whl → 0.1.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- universal_mcp/applications/firecrawl/app.py +74 -190
- universal_mcp/applications/markitdown/app.py +17 -6
- universal_mcp/applications/notion/README.md +43 -20
- universal_mcp/applications/notion/app.py +122 -130
- universal_mcp/applications/perplexity/app.py +79 -0
- universal_mcp/cli.py +20 -13
- universal_mcp/integrations/integration.py +9 -3
- universal_mcp/logger.py +74 -0
- universal_mcp/servers/server.py +28 -21
- universal_mcp/utils/docgen.py +2 -2
- universal_mcp/utils/installation.py +15 -0
- universal_mcp/utils/openapi.py +22 -29
- universal_mcp-0.1.3.dist-info/METADATA +252 -0
- {universal_mcp-0.1.2.dist-info → universal_mcp-0.1.3.dist-info}/RECORD +16 -14
- universal_mcp-0.1.2.dist-info/METADATA +0 -208
- {universal_mcp-0.1.2.dist-info → universal_mcp-0.1.3.dist-info}/WHEEL +0 -0
- {universal_mcp-0.1.2.dist-info → universal_mcp-0.1.3.dist-info}/entry_points.txt +0 -0
@@ -1,10 +1,8 @@
|
|
1
1
|
from typing import Any
|
2
2
|
|
3
3
|
from firecrawl import FirecrawlApp as FirecrawlApiClient
|
4
|
-
from loguru import logger
|
5
4
|
|
6
5
|
from universal_mcp.applications.application import APIApplication
|
7
|
-
from universal_mcp.exceptions import NotAuthorizedError
|
8
6
|
from universal_mcp.integrations import Integration
|
9
7
|
|
10
8
|
|
@@ -12,57 +10,38 @@ class FirecrawlApp(APIApplication):
|
|
12
10
|
"""
|
13
11
|
Application for interacting with the Firecrawl service (firecrawl.dev)
|
14
12
|
to scrape web pages, perform searches, and manage crawl/batch scrape/extract jobs.
|
15
|
-
Requires a Firecrawl API key configured via integration
|
16
|
-
(e.g., FIRECRAWL_API_KEY environment variable).
|
13
|
+
Requires a Firecrawl API key configured via integration.
|
17
14
|
"""
|
18
15
|
|
19
16
|
def __init__(self, integration: Integration | None = None) -> None:
|
20
17
|
super().__init__(name="firecrawl", integration=integration)
|
21
18
|
self.api_key: str | None = None
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
if
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
logger.warning(
|
35
|
-
"Firecrawl API Key not found in credentials during init. Will try again on first use."
|
36
|
-
)
|
37
|
-
|
38
|
-
def _get_firecrawl_client(self) -> FirecrawlApiClient:
|
39
|
-
"""Ensures the API key is available and returns an initialized Firecrawl client."""
|
40
|
-
if not self.api_key:
|
41
|
-
logger.debug(
|
42
|
-
"Firecrawl API key not loaded, attempting retrieval via integration."
|
19
|
+
|
20
|
+
def _set_api_key(self):
|
21
|
+
"""
|
22
|
+
Ensures the API key is loaded from the integration.
|
23
|
+
Raises ValueError if the integration or key is missing/misconfigured.
|
24
|
+
"""
|
25
|
+
if self.api_key:
|
26
|
+
return
|
27
|
+
|
28
|
+
if not self.integration:
|
29
|
+
raise ValueError(
|
30
|
+
"Integration is None. Cannot retrieve Firecrawl API Key."
|
43
31
|
)
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
logger.info("Firecrawl API Key successfully retrieved via integration.")
|
51
|
-
else:
|
52
|
-
action = (
|
53
|
-
self.integration.authorize()
|
54
|
-
if hasattr(self.integration, "authorize")
|
55
|
-
else "Configure API Key"
|
56
|
-
)
|
57
|
-
raise NotAuthorizedError(
|
58
|
-
f"Firecrawl API Key not found in provided integration credentials. Action required: {action}"
|
59
|
-
)
|
60
|
-
|
61
|
-
if not self.api_key:
|
62
|
-
raise NotAuthorizedError(
|
63
|
-
"Firecrawl API Key is missing or could not be loaded."
|
32
|
+
|
33
|
+
credentials = self.integration.get_credentials()
|
34
|
+
if not credentials:
|
35
|
+
raise ValueError(
|
36
|
+
f"Failed to retrieve Firecrawl API Key using integration '{self.integration.name}'. "
|
37
|
+
f"Check store configuration (e.g., ensure the correct source like environment variable is set)."
|
64
38
|
)
|
65
39
|
|
40
|
+
self.api_key = credentials
|
41
|
+
|
42
|
+
def _get_client(self) -> FirecrawlApiClient:
|
43
|
+
"""Initializes and returns the Firecrawl client after ensuring API key is set."""
|
44
|
+
self._set_api_key()
|
66
45
|
return FirecrawlApiClient(api_key=self.api_key)
|
67
46
|
|
68
47
|
def scrape_url(
|
@@ -74,21 +53,17 @@ class FirecrawlApp(APIApplication):
|
|
74
53
|
Args:
|
75
54
|
url: The URL of the web page to scrape.
|
76
55
|
params: Optional dictionary of parameters to customize the scrape.
|
77
|
-
Refer to Firecrawl documentation for 'pageOptions', 'extractorOptions', 'jsonOptions'.
|
78
|
-
Example: {'pageOptions': {'onlyMainContent': True}}
|
79
56
|
|
80
57
|
Returns:
|
81
|
-
A dictionary containing the scraped data
|
82
|
-
|
58
|
+
A dictionary containing the scraped data on success,
|
59
|
+
or a string containing an error message on failure.
|
83
60
|
"""
|
84
|
-
logger.info(f"Attempting to scrape URL: {url} with params: {params}")
|
85
61
|
try:
|
86
|
-
client = self.
|
62
|
+
client = self._get_client()
|
87
63
|
response_data = client.scrape_url(url=url, params=params)
|
88
|
-
logger.info(f"Successfully scraped URL: {url}")
|
89
64
|
return response_data
|
65
|
+
|
90
66
|
except Exception as e:
|
91
|
-
logger.error(f"Failed to scrape URL {url}: {type(e).__name__} - {e}")
|
92
67
|
return f"Error scraping URL {url}: {type(e).__name__} - {e}"
|
93
68
|
|
94
69
|
def search(
|
@@ -99,29 +74,19 @@ class FirecrawlApp(APIApplication):
|
|
99
74
|
|
100
75
|
Args:
|
101
76
|
query: The search query string.
|
102
|
-
params: Optional dictionary of search parameters
|
103
|
-
Refer to Firecrawl documentation for details.
|
104
|
-
Example: {'limit': 3, 'country': 'DE'}
|
77
|
+
params: Optional dictionary of search parameters.
|
105
78
|
|
106
79
|
Returns:
|
107
|
-
A dictionary containing the search results
|
108
|
-
|
80
|
+
A dictionary containing the search results on success,
|
81
|
+
or a string containing an error message on failure.
|
109
82
|
"""
|
110
|
-
logger.info(f"Attempting search for query: '{query}' with params: {params}")
|
111
83
|
try:
|
112
|
-
client = self.
|
113
|
-
# The library method returns the full response dictionary
|
84
|
+
client = self._get_client()
|
114
85
|
response = client.search(query=query, params=params)
|
115
|
-
logger.info(f"Successfully performed search for query: '{query}'")
|
116
86
|
return response
|
117
87
|
except Exception as e:
|
118
|
-
logger.error(
|
119
|
-
f"Failed to perform search for '{query}': {type(e).__name__} - {e}"
|
120
|
-
)
|
121
88
|
return f"Error performing search for '{query}': {type(e).__name__} - {e}"
|
122
89
|
|
123
|
-
# --- Asynchronous Job Pattern Tools ---
|
124
|
-
|
125
90
|
def start_crawl(
|
126
91
|
self,
|
127
92
|
url: str,
|
@@ -130,68 +95,43 @@ class FirecrawlApp(APIApplication):
|
|
130
95
|
) -> dict[str, Any] | str:
|
131
96
|
"""
|
132
97
|
Starts a crawl job for a given URL using Firecrawl. Returns the job ID immediately.
|
133
|
-
Use 'check_crawl_status' to monitor progress and retrieve results.
|
134
98
|
|
135
99
|
Args:
|
136
100
|
url: The starting URL for the crawl.
|
137
|
-
params: Optional dictionary of parameters to customize the crawl
|
138
|
-
|
139
|
-
idempotency_key: Optional unique key to prevent duplicate jobs if the request is retried.
|
101
|
+
params: Optional dictionary of parameters to customize the crawl.
|
102
|
+
idempotency_key: Optional unique key to prevent duplicate jobs.
|
140
103
|
|
141
104
|
Returns:
|
142
|
-
A dictionary containing the job initiation response
|
143
|
-
|
105
|
+
A dictionary containing the job initiation response on success,
|
106
|
+
or a string containing an error message on failure.
|
144
107
|
"""
|
145
|
-
logger.info(
|
146
|
-
f"Attempting to start crawl job for URL: {url} with params: {params}"
|
147
|
-
)
|
148
108
|
try:
|
149
|
-
client = self.
|
150
|
-
# Use the library's async method which returns the job ID response
|
109
|
+
client = self._get_client()
|
151
110
|
response = client.async_crawl_url(
|
152
111
|
url=url, params=params, idempotency_key=idempotency_key
|
153
112
|
)
|
154
|
-
if response.get("success"):
|
155
|
-
logger.info(
|
156
|
-
f"Successfully started crawl job for URL: {url}. Job ID: {response.get('id')}"
|
157
|
-
)
|
158
|
-
else:
|
159
|
-
logger.error(
|
160
|
-
f"Failed to start crawl job for URL {url}. Response: {response}"
|
161
|
-
)
|
162
113
|
return response
|
114
|
+
|
163
115
|
except Exception as e:
|
164
|
-
logger.error(
|
165
|
-
f"Failed to start crawl for URL {url}: {type(e).__name__} - {e}"
|
166
|
-
)
|
167
116
|
return f"Error starting crawl for URL {url}: {type(e).__name__} - {e}"
|
168
117
|
|
169
118
|
def check_crawl_status(self, job_id: str) -> dict[str, Any] | str:
|
170
119
|
"""
|
171
120
|
Checks the status of a previously initiated Firecrawl crawl job.
|
172
|
-
If the job is completed, this retrieves the results (potentially paginated).
|
173
121
|
|
174
122
|
Args:
|
175
123
|
job_id: The ID of the crawl job to check.
|
176
124
|
|
177
125
|
Returns:
|
178
|
-
A dictionary containing the job status details
|
179
|
-
|
180
|
-
Common status values: 'pending', 'queued', 'scraping', 'completed', 'failed'.
|
126
|
+
A dictionary containing the job status details on success,
|
127
|
+
or a string containing an error message on failure.
|
181
128
|
"""
|
182
|
-
logger.info(f"Attempting to check status for crawl job ID: {job_id}")
|
183
129
|
try:
|
184
|
-
client = self.
|
185
|
-
|
186
|
-
status = client.check_crawl_status(id=job_id)
|
187
|
-
logger.info(
|
188
|
-
f"Successfully checked status for job ID: {job_id}. Status: {status.get('status', 'unknown')}"
|
189
|
-
)
|
130
|
+
client = self._get_client()
|
131
|
+
status = client.check_crawl_status(id=job_id)
|
190
132
|
return status
|
133
|
+
|
191
134
|
except Exception as e:
|
192
|
-
logger.error(
|
193
|
-
f"Failed to check crawl status for job ID {job_id}: {type(e).__name__} - {e}"
|
194
|
-
)
|
195
135
|
return f"Error checking crawl status for job ID {job_id}: {type(e).__name__} - {e}"
|
196
136
|
|
197
137
|
def cancel_crawl(self, job_id: str) -> dict[str, Any] | str:
|
@@ -202,23 +142,19 @@ class FirecrawlApp(APIApplication):
|
|
202
142
|
job_id: The ID of the crawl job to cancel.
|
203
143
|
|
204
144
|
Returns:
|
205
|
-
A dictionary confirming the cancellation status
|
206
|
-
|
145
|
+
A dictionary confirming the cancellation status on success,
|
146
|
+
or a string containing an error message on failure.
|
207
147
|
"""
|
208
|
-
logger.info(f"Attempting to cancel crawl job ID: {job_id}")
|
209
148
|
try:
|
210
|
-
client = self.
|
149
|
+
client = self._get_client()
|
211
150
|
response = client.cancel_crawl(id=job_id)
|
212
|
-
|
213
|
-
f"Successfully requested cancellation for job ID: {job_id}. Response: {response}"
|
214
|
-
)
|
151
|
+
|
215
152
|
return response
|
216
|
-
|
217
|
-
|
218
|
-
f"Failed to cancel crawl job ID {job_id}: {type(e).__name__} - {e}"
|
219
|
-
)
|
153
|
+
|
154
|
+
except Exception as e:
|
220
155
|
return f"Error cancelling crawl job ID {job_id}: {type(e).__name__} - {e}"
|
221
156
|
|
157
|
+
|
222
158
|
def start_batch_scrape(
|
223
159
|
self,
|
224
160
|
urls: list[str],
|
@@ -226,67 +162,44 @@ class FirecrawlApp(APIApplication):
|
|
226
162
|
idempotency_key: str | None = None,
|
227
163
|
) -> dict[str, Any] | str:
|
228
164
|
"""
|
229
|
-
Starts a batch scrape job for multiple URLs using Firecrawl.
|
230
|
-
Use 'check_batch_scrape_status' to monitor progress and retrieve results.
|
165
|
+
Starts a batch scrape job for multiple URLs using Firecrawl.
|
231
166
|
|
232
167
|
Args:
|
233
168
|
urls: A list of URLs to scrape.
|
234
|
-
params: Optional dictionary of parameters applied to all scrapes
|
235
|
-
Refer to Firecrawl documentation for scrape parameters.
|
169
|
+
params: Optional dictionary of parameters applied to all scrapes.
|
236
170
|
idempotency_key: Optional unique key to prevent duplicate jobs.
|
237
171
|
|
238
172
|
Returns:
|
239
|
-
A dictionary containing the job initiation response
|
240
|
-
|
173
|
+
A dictionary containing the job initiation response on success,
|
174
|
+
or a string containing an error message on failure.
|
241
175
|
"""
|
242
|
-
url_count = len(urls)
|
243
|
-
logger.info(
|
244
|
-
f"Attempting to start batch scrape job for {url_count} URLs with params: {params}"
|
245
|
-
)
|
246
|
-
if not urls:
|
247
|
-
return "Error: No URLs provided for batch scrape."
|
248
176
|
try:
|
249
|
-
client = self.
|
177
|
+
client = self._get_client()
|
250
178
|
response = client.async_batch_scrape_urls(
|
251
179
|
urls=urls, params=params, idempotency_key=idempotency_key
|
252
180
|
)
|
253
|
-
if response.get("success"):
|
254
|
-
logger.info(
|
255
|
-
f"Successfully started batch scrape job for {url_count} URLs. Job ID: {response.get('id')}"
|
256
|
-
)
|
257
|
-
else:
|
258
|
-
logger.error(
|
259
|
-
f"Failed to start batch scrape job for {url_count} URLs. Response: {response}"
|
260
|
-
)
|
261
181
|
return response
|
182
|
+
|
262
183
|
except Exception as e:
|
263
|
-
logger.error(f"Failed to start batch scrape: {type(e).__name__} - {e}")
|
264
184
|
return f"Error starting batch scrape: {type(e).__name__} - {e}"
|
265
185
|
|
266
186
|
def check_batch_scrape_status(self, job_id: str) -> dict[str, Any] | str:
|
267
187
|
"""
|
268
188
|
Checks the status of a previously initiated Firecrawl batch scrape job.
|
269
|
-
If the job is completed, this retrieves the results for all URLs.
|
270
189
|
|
271
190
|
Args:
|
272
191
|
job_id: The ID of the batch scrape job to check.
|
273
192
|
|
274
193
|
Returns:
|
275
|
-
A dictionary containing the job status details
|
276
|
-
|
194
|
+
A dictionary containing the job status details on success,
|
195
|
+
or a string containing an error message on failure.
|
277
196
|
"""
|
278
|
-
logger.info(f"Attempting to check status for batch scrape job ID: {job_id}")
|
279
197
|
try:
|
280
|
-
client = self.
|
281
|
-
status = client.check_batch_scrape_status(id=job_id)
|
282
|
-
logger.info(
|
283
|
-
f"Successfully checked status for batch scrape job ID: {job_id}. Status: {status.get('status', 'unknown')}"
|
284
|
-
)
|
198
|
+
client = self._get_client()
|
199
|
+
status = client.check_batch_scrape_status(id=job_id)
|
285
200
|
return status
|
201
|
+
|
286
202
|
except Exception as e:
|
287
|
-
logger.error(
|
288
|
-
f"Failed to check batch scrape status for job ID {job_id}: {type(e).__name__} - {e}"
|
289
|
-
)
|
290
203
|
return f"Error checking batch scrape status for job ID {job_id}: {type(e).__name__} - {e}"
|
291
204
|
|
292
205
|
def start_extract(
|
@@ -296,74 +209,45 @@ class FirecrawlApp(APIApplication):
|
|
296
209
|
idempotency_key: str | None = None,
|
297
210
|
) -> dict[str, Any] | str:
|
298
211
|
"""
|
299
|
-
Starts an extraction job for one or more URLs using Firecrawl.
|
300
|
-
Use 'check_extract_status' to monitor progress and retrieve results. Requires 'prompt' or 'schema' in params.
|
212
|
+
Starts an extraction job for one or more URLs using Firecrawl.
|
301
213
|
|
302
214
|
Args:
|
303
215
|
urls: A list of URLs to extract data from.
|
304
|
-
params: Dictionary of parameters. MUST include 'prompt'
|
305
|
-
Optional: 'enableWebSearch', 'systemPrompt', etc. See Firecrawl docs.
|
306
|
-
Example: {'prompt': 'Extract the main headlines'}
|
307
|
-
Example: {'schema': {'type': 'object', 'properties': {'title': {'type': 'string'}}}}
|
216
|
+
params: Dictionary of parameters. MUST include 'prompt' or 'schema'.
|
308
217
|
idempotency_key: Optional unique key to prevent duplicate jobs.
|
309
218
|
|
310
219
|
Returns:
|
311
|
-
A dictionary containing the job initiation response
|
312
|
-
|
220
|
+
A dictionary containing the job initiation response on success,
|
221
|
+
or a string containing an error message on failure.
|
313
222
|
"""
|
314
|
-
|
315
|
-
f"Attempting to start extraction job for URLs: {urls} with params: {params}"
|
316
|
-
)
|
317
|
-
if not urls:
|
318
|
-
return "Error: No URLs provided for extraction."
|
319
|
-
if not params or (not params.get("prompt") and not params.get("schema")):
|
320
|
-
return "Error: 'params' dictionary must include either a 'prompt' string or a 'schema' definition."
|
223
|
+
|
321
224
|
try:
|
322
|
-
client = self.
|
323
|
-
# Pass params directly; the library handles schema conversion if needed
|
225
|
+
client = self._get_client()
|
324
226
|
response = client.async_extract(
|
325
227
|
urls=urls, params=params, idempotency_key=idempotency_key
|
326
228
|
)
|
327
|
-
if response.get("success"):
|
328
|
-
logger.info(
|
329
|
-
f"Successfully started extraction job for URLs. Job ID: {response.get('id')}"
|
330
|
-
)
|
331
|
-
else:
|
332
|
-
logger.error(
|
333
|
-
f"Failed to start extraction job for URLs. Response: {response}"
|
334
|
-
)
|
335
229
|
return response
|
230
|
+
|
336
231
|
except Exception as e:
|
337
|
-
logger.error(f"Failed to start extraction: {type(e).__name__} - {e}")
|
338
232
|
return f"Error starting extraction: {type(e).__name__} - {e}"
|
339
233
|
|
340
234
|
def check_extract_status(self, job_id: str) -> dict[str, Any] | str:
|
341
235
|
"""
|
342
236
|
Checks the status of a previously initiated Firecrawl extraction job.
|
343
|
-
If the job is completed, this retrieves the extracted data.
|
344
237
|
|
345
238
|
Args:
|
346
239
|
job_id: The ID of the extraction job to check.
|
347
240
|
|
348
241
|
Returns:
|
349
|
-
A dictionary containing the job status details
|
350
|
-
|
351
|
-
Common status values: 'pending', 'processing', 'completed', 'failed'.
|
242
|
+
A dictionary containing the job status details on success,
|
243
|
+
or a string containing an error message on failure.
|
352
244
|
"""
|
353
|
-
logger.info(f"Attempting to check status for extraction job ID: {job_id}")
|
354
245
|
try:
|
355
|
-
client = self.
|
356
|
-
status = client.get_extract_status(
|
357
|
-
job_id=job_id
|
358
|
-
) # Correct library method name
|
359
|
-
logger.info(
|
360
|
-
f"Successfully checked status for extraction job ID: {job_id}. Status: {status.get('status', 'unknown')}"
|
361
|
-
)
|
246
|
+
client = self._get_client()
|
247
|
+
status = client.get_extract_status(job_id=job_id)
|
362
248
|
return status
|
249
|
+
|
363
250
|
except Exception as e:
|
364
|
-
logger.error(
|
365
|
-
f"Failed to check extraction status for job ID {job_id}: {type(e).__name__} - {e}"
|
366
|
-
)
|
367
251
|
return f"Error checking extraction status for job ID {job_id}: {type(e).__name__} - {e}"
|
368
252
|
|
369
253
|
def list_tools(self):
|
@@ -378,4 +262,4 @@ class FirecrawlApp(APIApplication):
|
|
378
262
|
self.check_batch_scrape_status,
|
379
263
|
self.start_extract,
|
380
264
|
self.check_extract_status,
|
381
|
-
]
|
265
|
+
]
|
@@ -9,16 +9,27 @@ class MarkitdownApp(Application):
|
|
9
9
|
self.markitdown = MarkItDown()
|
10
10
|
|
11
11
|
async def convert_to_markdown(self, uri: str) -> str:
|
12
|
-
"""
|
12
|
+
"""Fetches content from a URI and converts its primary textual representation into Markdown.
|
13
|
+
|
14
|
+
This tool aims to extract the main text content from various sources. It supports:
|
15
|
+
- Web Pages: General HTML, specific handlers for RSS/Atom feeds, Wikipedia articles (main content), YouTube (transcripts if available), Bing SERPs.
|
16
|
+
- Documents: PDF (attempts OCR), DOCX, XLSX, PPTX, XLS, EPUB, Outlook MSG, IPYNB notebooks.
|
17
|
+
- Plain Text files.
|
18
|
+
- Images: Extracts metadata and attempts OCR to get text.
|
19
|
+
- Audio: Extracts metadata and attempts transcription to get text.
|
20
|
+
- Archives: ZIP (extracts and attempts to convert supported files within, concatenating results).
|
21
|
+
|
22
|
+
Note: Conversion quality depends on the source format. Complex layouts, encrypted files, or missing transcripts/OCR data may limit output.
|
23
|
+
Enhanced PDF/Image processing via Azure Document Intelligence may be active if configured server-side.
|
13
24
|
|
14
25
|
Args:
|
15
|
-
uri (str): The URI to
|
16
|
-
|
17
|
-
|
18
|
-
|
26
|
+
uri (str): The URI pointing to the resource. Supported schemes:
|
27
|
+
- http:// or https:// (Web pages, feeds, APIs)
|
28
|
+
- file:// (Local or accessible network files)
|
29
|
+
- data: (Embedded data)
|
19
30
|
|
20
31
|
Returns:
|
21
|
-
str: The
|
32
|
+
str: The extracted content converted to Markdown format.
|
22
33
|
|
23
34
|
Example:
|
24
35
|
>>> await convert_to_markdown("https://example.com")
|
@@ -1,4 +1,15 @@
|
|
1
|
-
|
1
|
+
|
2
|
+
# Notion MCP Server
|
3
|
+
|
4
|
+
An MCP Server for the Notion API.
|
5
|
+
|
6
|
+
## Supported Integrations
|
7
|
+
|
8
|
+
- AgentR
|
9
|
+
- API Key (Coming Soon)
|
10
|
+
- OAuth (Coming Soon)
|
11
|
+
|
12
|
+
## Tools
|
2
13
|
|
3
14
|
This is automatically generated from OpenAPI schema for the Notion API.
|
4
15
|
|
@@ -10,23 +21,35 @@ This tool can be integrated with any service that supports HTTP requests.
|
|
10
21
|
|
11
22
|
| Tool | Description |
|
12
23
|
|------|-------------|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
24
|
+
| retrieve_a_user | Retrieves user details from the server using the specified user ID. |
|
25
|
+
| list_a_ll_users | Fetches a list of all users from the API endpoint and returns the data as a dictionary. |
|
26
|
+
| retrieve_your_token_sbot_user | Retrieves the authentication token for the current user from the SBOT service. |
|
27
|
+
| retrieve_a_database | Retrieves database details from a specified endpoint using the provided database ID. |
|
28
|
+
| update_a_database | Updates a database entry with the given ID using a PATCH request. |
|
29
|
+
| query_a_database | Executes a query on a specified database using an identifier and an optional request body. |
|
30
|
+
| create_a_database | Creates a new database on the server using the specified request body. |
|
31
|
+
| create_a_page | Creates a new page by sending a POST request to the specified endpoint. |
|
32
|
+
| retrieve_a_page | Retrieves a page by its unique identifier from a remote server. |
|
33
|
+
| update_page_properties | Updates the properties of a page identified by its ID using the provided request body. |
|
34
|
+
| retrieve_a_page_property_item | Retrieves the property item of a page using specified page and property identifiers. |
|
35
|
+
| retrieve_block_children | Retrieves the children of a specified block using its unique identifier. |
|
36
|
+
| append_block_children | Appends child elements to a block identified by its ID and returns the updated block data. |
|
37
|
+
| retrieve_a_block | Retrieves a block of data from a given API endpoint using the specified block ID. |
|
38
|
+
| delete_a_block | Deletes a block by its unique identifier and returns the server's response. |
|
39
|
+
| update_a_block | Updates a block by sending a PATCH request to the specified endpoint. |
|
40
|
+
| search | Executes a search query using the specified request body and returns the results. |
|
41
|
+
| retrieve_comments | Fetches comments from a remote server for a specified block, with optional pagination. |
|
42
|
+
| add_comment_to_page | Adds a comment to a page by sending a POST request with the provided request body. |
|
43
|
+
|
44
|
+
|
45
|
+
|
46
|
+
## Usage
|
47
|
+
|
48
|
+
- Login to AgentR
|
49
|
+
- Follow the quickstart guide to setup MCP Server for your client
|
50
|
+
- Visit Apps Store and enable the Notion app
|
51
|
+
- Restart the MCP Server
|
52
|
+
|
53
|
+
### Local Development
|
32
54
|
|
55
|
+
- Follow the README to test with the local MCP Server
|