universal-mcp 0.1.13rc1__py3-none-any.whl → 0.1.13rc2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (99) hide show
  1. {universal_mcp-0.1.13rc1.dist-info → universal_mcp-0.1.13rc2.dist-info}/METADATA +1 -1
  2. universal_mcp-0.1.13rc2.dist-info/RECORD +38 -0
  3. universal_mcp/applications/ahrefs/README.md +0 -76
  4. universal_mcp/applications/ahrefs/__init__.py +0 -0
  5. universal_mcp/applications/ahrefs/app.py +0 -2291
  6. universal_mcp/applications/cal_com_v2/README.md +0 -175
  7. universal_mcp/applications/cal_com_v2/__init__.py +0 -0
  8. universal_mcp/applications/cal_com_v2/app.py +0 -5390
  9. universal_mcp/applications/calendly/README.md +0 -78
  10. universal_mcp/applications/calendly/__init__.py +0 -0
  11. universal_mcp/applications/calendly/app.py +0 -1195
  12. universal_mcp/applications/clickup/README.md +0 -160
  13. universal_mcp/applications/clickup/__init__.py +0 -0
  14. universal_mcp/applications/clickup/app.py +0 -5009
  15. universal_mcp/applications/coda/README.md +0 -133
  16. universal_mcp/applications/coda/__init__.py +0 -0
  17. universal_mcp/applications/coda/app.py +0 -3671
  18. universal_mcp/applications/curstdata/README.md +0 -50
  19. universal_mcp/applications/curstdata/__init__.py +0 -0
  20. universal_mcp/applications/curstdata/app.py +0 -551
  21. universal_mcp/applications/e2b/README.md +0 -37
  22. universal_mcp/applications/e2b/app.py +0 -65
  23. universal_mcp/applications/elevenlabs/README.md +0 -84
  24. universal_mcp/applications/elevenlabs/__init__.py +0 -0
  25. universal_mcp/applications/elevenlabs/app.py +0 -1402
  26. universal_mcp/applications/falai/README.md +0 -42
  27. universal_mcp/applications/falai/__init__.py +0 -0
  28. universal_mcp/applications/falai/app.py +0 -332
  29. universal_mcp/applications/figma/README.md +0 -74
  30. universal_mcp/applications/figma/__init__.py +0 -0
  31. universal_mcp/applications/figma/app.py +0 -1261
  32. universal_mcp/applications/firecrawl/README.md +0 -45
  33. universal_mcp/applications/firecrawl/app.py +0 -268
  34. universal_mcp/applications/github/README.md +0 -47
  35. universal_mcp/applications/github/app.py +0 -429
  36. universal_mcp/applications/gong/README.md +0 -88
  37. universal_mcp/applications/gong/__init__.py +0 -0
  38. universal_mcp/applications/gong/app.py +0 -2297
  39. universal_mcp/applications/google_calendar/app.py +0 -442
  40. universal_mcp/applications/google_docs/README.md +0 -40
  41. universal_mcp/applications/google_docs/app.py +0 -88
  42. universal_mcp/applications/google_drive/README.md +0 -44
  43. universal_mcp/applications/google_drive/app.py +0 -286
  44. universal_mcp/applications/google_mail/README.md +0 -47
  45. universal_mcp/applications/google_mail/app.py +0 -664
  46. universal_mcp/applications/google_sheet/README.md +0 -42
  47. universal_mcp/applications/google_sheet/app.py +0 -150
  48. universal_mcp/applications/heygen/README.md +0 -69
  49. universal_mcp/applications/heygen/__init__.py +0 -0
  50. universal_mcp/applications/heygen/app.py +0 -956
  51. universal_mcp/applications/mailchimp/README.md +0 -306
  52. universal_mcp/applications/mailchimp/__init__.py +0 -0
  53. universal_mcp/applications/mailchimp/app.py +0 -10937
  54. universal_mcp/applications/markitdown/app.py +0 -44
  55. universal_mcp/applications/neon/README.md +0 -99
  56. universal_mcp/applications/neon/__init__.py +0 -0
  57. universal_mcp/applications/neon/app.py +0 -1924
  58. universal_mcp/applications/notion/README.md +0 -55
  59. universal_mcp/applications/notion/__init__.py +0 -0
  60. universal_mcp/applications/notion/app.py +0 -527
  61. universal_mcp/applications/perplexity/README.md +0 -37
  62. universal_mcp/applications/perplexity/app.py +0 -65
  63. universal_mcp/applications/reddit/README.md +0 -45
  64. universal_mcp/applications/reddit/app.py +0 -379
  65. universal_mcp/applications/replicate/README.md +0 -65
  66. universal_mcp/applications/replicate/__init__.py +0 -0
  67. universal_mcp/applications/replicate/app.py +0 -980
  68. universal_mcp/applications/resend/README.md +0 -38
  69. universal_mcp/applications/resend/app.py +0 -37
  70. universal_mcp/applications/retell_ai/README.md +0 -46
  71. universal_mcp/applications/retell_ai/__init__.py +0 -0
  72. universal_mcp/applications/retell_ai/app.py +0 -333
  73. universal_mcp/applications/rocketlane/README.md +0 -42
  74. universal_mcp/applications/rocketlane/__init__.py +0 -0
  75. universal_mcp/applications/rocketlane/app.py +0 -194
  76. universal_mcp/applications/serpapi/README.md +0 -37
  77. universal_mcp/applications/serpapi/app.py +0 -73
  78. universal_mcp/applications/shortcut/README.md +0 -153
  79. universal_mcp/applications/shortcut/__init__.py +0 -0
  80. universal_mcp/applications/shortcut/app.py +0 -3880
  81. universal_mcp/applications/spotify/README.md +0 -116
  82. universal_mcp/applications/spotify/__init__.py +0 -0
  83. universal_mcp/applications/spotify/app.py +0 -2526
  84. universal_mcp/applications/supabase/README.md +0 -112
  85. universal_mcp/applications/supabase/__init__.py +0 -0
  86. universal_mcp/applications/supabase/app.py +0 -2970
  87. universal_mcp/applications/tavily/README.md +0 -38
  88. universal_mcp/applications/tavily/app.py +0 -51
  89. universal_mcp/applications/wrike/README.md +0 -71
  90. universal_mcp/applications/wrike/__init__.py +0 -0
  91. universal_mcp/applications/wrike/app.py +0 -1372
  92. universal_mcp/applications/youtube/README.md +0 -82
  93. universal_mcp/applications/youtube/__init__.py +0 -0
  94. universal_mcp/applications/youtube/app.py +0 -1428
  95. universal_mcp/applications/zenquotes/README.md +0 -37
  96. universal_mcp/applications/zenquotes/app.py +0 -31
  97. universal_mcp-0.1.13rc1.dist-info/RECORD +0 -132
  98. {universal_mcp-0.1.13rc1.dist-info → universal_mcp-0.1.13rc2.dist-info}/WHEEL +0 -0
  99. {universal_mcp-0.1.13rc1.dist-info → universal_mcp-0.1.13rc2.dist-info}/entry_points.txt +0 -0
@@ -1,50 +0,0 @@
1
-
2
- # Curstdata MCP Server
3
-
4
- An MCP Server for the Curstdata API.
5
-
6
- ## Supported Integrations
7
-
8
- - AgentR
9
- - API Key (Coming Soon)
10
- - OAuth (Coming Soon)
11
-
12
- ## Tools
13
-
14
- This is automatically generated from OpenAPI schema for the Curstdata API.
15
-
16
- ## Supported Integrations
17
-
18
- This tool can be integrated with any service that supports HTTP requests.
19
-
20
- ## Tool List
21
-
22
- | Tool | Description |
23
- |------|-------------|
24
- | screen_companies | Screens companies based on specified metrics, filters, sorting, and pagination parameters, and returns the result as a JSON-compatible dictionary. |
25
- | get_headcount_timeseries | Retrieve headcount timeseries data from the data lab endpoint using the provided filters, pagination, and sorting options. |
26
- | get_headcount_by_facet_timeseries | Retrieves headcount timeseries data aggregated by specified facets using provided filters and sorting options. |
27
- | get_funding_milestone_timeseries | Retrieves a time series of funding milestone data based on specified filters, pagination, and sorting options. |
28
- | get_decision_makers | Retrieves decision makers based on specified filters and parameters. |
29
- | get_web_traffic | Retrieves web traffic data based on provided filters, pagination, and sorting criteria. |
30
- | get_investor_portfolio | Retrieves the investment portfolio information for a specified investor. |
31
- | get_job_listings | Retrieves job listings data based on specified parameters. |
32
- | search_persons | Submits a search request for persons associated with a given asynchronous job and returns the search results as a dictionary. |
33
- | search_companies | Searches for companies using specified filters and pagination parameters. |
34
- | enrich_person | Retrieves enriched person data from LinkedIn profile using the provided profile URL, enrichment mode, and requested fields. |
35
- | enrich_company | Retrieves enriched company data using the provided company domain and enrichment mode. |
36
- | get_linked_in_posts | Fetches LinkedIn posts for a specified company using its LinkedIn URL. |
37
- | search_linked_in_posts | Searches LinkedIn posts using the provided keyword and filters, returning the search results as a dictionary. |
38
-
39
-
40
-
41
- ## Usage
42
-
43
- - Login to AgentR
44
- - Follow the quickstart guide to setup MCP Server for your client
45
- - Visit Apps Store and enable the Curstdata app
46
- - Restart the MCP Server
47
-
48
- ### Local Development
49
-
50
- - Follow the README to test with the local MCP Server
File without changes
@@ -1,551 +0,0 @@
1
- from typing import Any
2
-
3
- from universal_mcp.applications import APIApplication
4
- from universal_mcp.integrations import Integration
5
-
6
-
7
- class CrustdataApp(APIApplication):
8
- def __init__(self, integration: Integration = None, **kwargs) -> None:
9
- super().__init__(name='crustdata', integration=integration, **kwargs)
10
- self.base_url = "https://api.crustdata.com"
11
-
12
- def _get_headers(self) -> dict[str, Any]:
13
- api_key = self.integration.get_credentials().get("api_key")
14
- return {
15
- "Authorization": f"Token {api_key}",
16
- "Content-Type": "application/json",
17
- "Accept": "application/json",
18
- }
19
-
20
- def screen_companies(self, metrics, filters, offset, count, sorts) -> dict[str, Any]:
21
- """
22
- Screens companies based on specified metrics, filters, sorting, and pagination parameters, and returns the result as a JSON-compatible dictionary.
23
-
24
- Args:
25
- metrics: List or structure specifying the financial or business metrics to screen companies by.
26
- filters: List or structure defining the filter criteria to apply to the company screening.
27
- offset: Integer specifying the starting index for paginated results.
28
- count: Integer specifying the maximum number of results to return.
29
- sorts: List or structure describing the sorting criteria for the returned companies.
30
-
31
- Returns:
32
- A dictionary representing the JSON response from the company screener API, typically containing the screened companies and associated data.
33
-
34
- Raises:
35
- ValueError: If any of the required parameters (metrics, filters, offset, count, sorts) is None.
36
- requests.HTTPError: If the HTTP request to the screener API fails or returns a bad status code.
37
-
38
- Tags:
39
- screen, companies, api, filter, pagination
40
- """
41
- if metrics is None:
42
- raise ValueError("Missing required parameter 'metrics'")
43
- if filters is None:
44
- raise ValueError("Missing required parameter 'filters'")
45
- if offset is None:
46
- raise ValueError("Missing required parameter 'offset'")
47
- if count is None:
48
- raise ValueError("Missing required parameter 'count'")
49
- if sorts is None:
50
- raise ValueError("Missing required parameter 'sorts'")
51
- request_body = {
52
- 'metrics': metrics,
53
- 'filters': filters,
54
- 'offset': offset,
55
- 'count': count,
56
- 'sorts': sorts,
57
- }
58
- request_body = {k: v for k, v in request_body.items() if v is not None}
59
- url = f"{self.base_url}/screener/screen/"
60
- query_params = {}
61
- response = self._post(url, data=request_body, params=query_params)
62
- response.raise_for_status()
63
- return response.json()
64
-
65
- def get_headcount_timeseries(self, filters, offset, count, sorts) -> dict[str, Any]:
66
- """
67
- Retrieve headcount timeseries data from the data lab endpoint using the provided filters, pagination, and sorting options.
68
-
69
- Args:
70
- filters: dict. Dictionary specifying filter criteria to apply to the headcount timeseries query.
71
- offset: int. The starting index for pagination of the timeseries results.
72
- count: int. The number of records to retrieve in the result set.
73
- sorts: list or dict. Sorting options to order the returned timeseries data.
74
-
75
- Returns:
76
- dict. Parsed JSON response containing the headcount timeseries data and associated metadata.
77
-
78
- Raises:
79
- ValueError: Raised if any of the required parameters ('filters', 'offset', 'count', 'sorts') are not provided (i.e., are None).
80
- requests.HTTPError: Raised if the HTTP request to the endpoint fails or returns a non-success status code.
81
-
82
- Tags:
83
- get, fetch, headcount, timeseries, data, api, management
84
- """
85
- if filters is None:
86
- raise ValueError("Missing required parameter 'filters'")
87
- if offset is None:
88
- raise ValueError("Missing required parameter 'offset'")
89
- if count is None:
90
- raise ValueError("Missing required parameter 'count'")
91
- if sorts is None:
92
- raise ValueError("Missing required parameter 'sorts'")
93
- request_body = {
94
- 'filters': filters,
95
- 'offset': offset,
96
- 'count': count,
97
- 'sorts': sorts,
98
- }
99
- request_body = {k: v for k, v in request_body.items() if v is not None}
100
- url = f"{self.base_url}/data_lab/headcount_timeseries/"
101
- query_params = {}
102
- response = self._post(url, data=request_body, params=query_params)
103
- response.raise_for_status()
104
- return response.json()
105
-
106
- def get_headcount_by_facet_timeseries(self, filters, offset, count, sorts) -> dict[str, Any]:
107
- """
108
- Retrieves headcount timeseries data aggregated by specified facets using provided filters and sorting options.
109
-
110
- Args:
111
- filters: dict. A dictionary specifying the filter criteria for the data retrieval.
112
- offset: int. The number of records to skip before starting to return results, used for pagination.
113
- count: int. The maximum number of records to return.
114
- sorts: list or dict. Sorting instructions determining the order of the returned timeseries data.
115
-
116
- Returns:
117
- dict. A dictionary containing the requested headcount timeseries data aggregated by facets.
118
-
119
- Raises:
120
- ValueError: Raised if any of the required parameters ('filters', 'offset', 'count', 'sorts') is None.
121
- requests.HTTPError: Raised if the HTTP request to the data source fails (non-2xx response).
122
-
123
- Tags:
124
- get, timeseries, headcount, data-retrieval, facets, management
125
- """
126
- if filters is None:
127
- raise ValueError("Missing required parameter 'filters'")
128
- if offset is None:
129
- raise ValueError("Missing required parameter 'offset'")
130
- if count is None:
131
- raise ValueError("Missing required parameter 'count'")
132
- if sorts is None:
133
- raise ValueError("Missing required parameter 'sorts'")
134
- request_body = {
135
- 'filters': filters,
136
- 'offset': offset,
137
- 'count': count,
138
- 'sorts': sorts,
139
- }
140
- request_body = {k: v for k, v in request_body.items() if v is not None}
141
- url = f"{self.base_url}/data_lab/headcount_by_facet_timeseries/"
142
- query_params = {}
143
- response = self._post(url, data=request_body, params=query_params)
144
- response.raise_for_status()
145
- return response.json()
146
-
147
- def get_funding_milestone_timeseries(self, filters, offset, count, sorts) -> dict[str, Any]:
148
- """
149
- Retrieves a time series of funding milestone data based on specified filters, pagination, and sorting options.
150
-
151
- Args:
152
- filters: dict. Criteria to filter the funding milestones to include in the time series.
153
- offset: int. The starting index for pagination of the results.
154
- count: int. The maximum number of records to return.
155
- sorts: list. Sorting rules to apply to the results.
156
-
157
- Returns:
158
- dict. A JSON-compatible dictionary containing the retrieved funding milestone time series data.
159
-
160
- Raises:
161
- ValueError: If any of the required parameters ('filters', 'offset', 'count', or 'sorts') are None.
162
- requests.HTTPError: If the underlying HTTP request fails or returns an error status code.
163
-
164
- Tags:
165
- get, fetch, timeseries, funding, milestones, data-lab, api
166
- """
167
- if filters is None:
168
- raise ValueError("Missing required parameter 'filters'")
169
- if offset is None:
170
- raise ValueError("Missing required parameter 'offset'")
171
- if count is None:
172
- raise ValueError("Missing required parameter 'count'")
173
- if sorts is None:
174
- raise ValueError("Missing required parameter 'sorts'")
175
- request_body = {
176
- 'filters': filters,
177
- 'offset': offset,
178
- 'count': count,
179
- 'sorts': sorts,
180
- }
181
- request_body = {k: v for k, v in request_body.items() if v is not None}
182
- url = f"{self.base_url}/data_lab/funding_milestone_timeseries/"
183
- query_params = {}
184
- response = self._post(url, data=request_body, params=query_params)
185
- response.raise_for_status()
186
- return response.json()
187
-
188
- def get_decision_makers(self, filters, offset, count, sorts, decision_maker_titles) -> dict[str, Any]:
189
- """
190
- Retrieves decision makers based on specified filters and parameters.
191
-
192
- Args:
193
- filters: Dictionary containing filter criteria for decision makers.
194
- offset: Integer indicating the starting position for retrieving results.
195
- count: Integer specifying the number of results to retrieve.
196
- sorts: List or dictionary defining the sorting order of the results.
197
- decision_maker_titles: List of titles to filter decision makers by.
198
-
199
- Returns:
200
- Dictionary containing decision maker data with structure dependent on the API response.
201
-
202
- Raises:
203
- ValueError: Raised when any of the required parameters (filters, offset, count, sorts, decision_maker_titles) is None.
204
- HTTPError: Raised when the API request fails with an error status code.
205
-
206
- Tags:
207
- retrieve, search, data, decision-makers, api, filtering
208
- """
209
- if filters is None:
210
- raise ValueError("Missing required parameter 'filters'")
211
- if offset is None:
212
- raise ValueError("Missing required parameter 'offset'")
213
- if count is None:
214
- raise ValueError("Missing required parameter 'count'")
215
- if sorts is None:
216
- raise ValueError("Missing required parameter 'sorts'")
217
- if decision_maker_titles is None:
218
- raise ValueError("Missing required parameter 'decision_maker_titles'")
219
- request_body = {
220
- 'filters': filters,
221
- 'offset': offset,
222
- 'count': count,
223
- 'sorts': sorts,
224
- 'decision_maker_titles': decision_maker_titles,
225
- }
226
- request_body = {k: v for k, v in request_body.items() if v is not None}
227
- url = f"{self.base_url}/data_lab/decision_makers/"
228
- query_params = {}
229
- response = self._post(url, data=request_body, params=query_params)
230
- response.raise_for_status()
231
- return response.json()
232
-
233
- def get_web_traffic(self, filters, offset, count, sorts) -> dict[str, Any]:
234
- """
235
- Retrieves web traffic data based on provided filters, pagination, and sorting criteria.
236
-
237
- Args:
238
- filters: dict. Filtering options to apply to the web traffic query.
239
- offset: int. The starting index from which to retrieve results (pagination offset).
240
- count: int. The number of records to retrieve (pagination count).
241
- sorts: list. Sorting criteria to order the web traffic results.
242
-
243
- Returns:
244
- dict. Parsed JSON response containing the web traffic data matching the specified criteria.
245
-
246
- Raises:
247
- ValueError: If any of the required parameters ('filters', 'offset', 'count', or 'sorts') are None.
248
- requests.HTTPError: If the HTTP request to the web traffic endpoint fails or returns an error status code.
249
-
250
- Tags:
251
- get, web-traffic, data, fetch, api
252
- """
253
- if filters is None:
254
- raise ValueError("Missing required parameter 'filters'")
255
- if offset is None:
256
- raise ValueError("Missing required parameter 'offset'")
257
- if count is None:
258
- raise ValueError("Missing required parameter 'count'")
259
- if sorts is None:
260
- raise ValueError("Missing required parameter 'sorts'")
261
- request_body = {
262
- 'filters': filters,
263
- 'offset': offset,
264
- 'count': count,
265
- 'sorts': sorts,
266
- }
267
- request_body = {k: v for k, v in request_body.items() if v is not None}
268
- url = f"{self.base_url}/data_lab/webtraffic/"
269
- query_params = {}
270
- response = self._post(url, data=request_body, params=query_params)
271
- response.raise_for_status()
272
- return response.json()
273
-
274
- def get_investor_portfolio(self, investor_name) -> dict[str, Any]:
275
- """
276
- Retrieves the investment portfolio information for a specified investor.
277
-
278
- Args:
279
- investor_name: str. The name of the investor whose portfolio data is to be fetched.
280
-
281
- Returns:
282
- dict[str, Any]: A dictionary containing portfolio data for the given investor as returned by the API.
283
-
284
- Raises:
285
- ValueError: If 'investor_name' is None.
286
- HTTPError: If the HTTP request to the portfolio endpoint fails.
287
-
288
- Tags:
289
- get, portfolio, investor, api
290
- """
291
- if investor_name is None:
292
- raise ValueError("Missing required parameter 'investor_name'")
293
- url = f"{self.base_url}/data_lab/investor_portfolio"
294
- query_params = {k: v for k, v in [('investor_name', investor_name)] if v is not None}
295
- response = self._get(url, params=query_params)
296
- response.raise_for_status()
297
- return response.json()
298
-
299
- def get_job_listings(self, tickers, dataset, filters, offset, count, sorts) -> dict[str, Any]:
300
- """
301
- Retrieves job listings data based on specified parameters.
302
-
303
- Args:
304
- tickers: List of ticker symbols to filter job listings.
305
- dataset: Specific dataset to query for job listings.
306
- filters: Criteria to filter the job listings results.
307
- offset: Starting position for pagination of results.
308
- count: Number of results to return per page.
309
- sorts: Sorting criteria for the returned job listings.
310
-
311
- Returns:
312
- A dictionary containing job listings data and associated metadata.
313
-
314
- Raises:
315
- ValueError: Raised when any of the required parameters is None.
316
- HTTPError: Raised when the HTTP request fails.
317
-
318
- Tags:
319
- get, retrieve, job-listings, data, pagination, filtering, important
320
- """
321
- if tickers is None:
322
- raise ValueError("Missing required parameter 'tickers'")
323
- if dataset is None:
324
- raise ValueError("Missing required parameter 'dataset'")
325
- if filters is None:
326
- raise ValueError("Missing required parameter 'filters'")
327
- if offset is None:
328
- raise ValueError("Missing required parameter 'offset'")
329
- if count is None:
330
- raise ValueError("Missing required parameter 'count'")
331
- if sorts is None:
332
- raise ValueError("Missing required parameter 'sorts'")
333
- request_body = {
334
- 'tickers': tickers,
335
- 'dataset': dataset,
336
- 'filters': filters,
337
- 'offset': offset,
338
- 'count': count,
339
- 'sorts': sorts,
340
- }
341
- request_body = {k: v for k, v in request_body.items() if v is not None}
342
- url = f"{self.base_url}/data_lab/job_listings/Table/"
343
- query_params = {}
344
- response = self._post(url, data=request_body, params=query_params)
345
- response.raise_for_status()
346
- return response.json()
347
-
348
- def search_persons(self, job_id) -> dict[str, Any]:
349
- """
350
- Submits a search request for persons associated with a given asynchronous job and returns the search results as a dictionary.
351
-
352
- Args:
353
- job_id: The identifier of the asynchronous job used to retrieve associated persons.
354
-
355
- Returns:
356
- A dictionary containing the search results for persons related to the specified job.
357
-
358
- Raises:
359
- ValueError: Raised if job_id is None.
360
- HTTPError: Raised if the HTTP request to the person search endpoint fails (from response.raise_for_status()).
361
-
362
- Tags:
363
- search, person, async-job, status, api, important
364
- """
365
- if job_id is None:
366
- raise ValueError("Missing required parameter 'job_id'")
367
- request_body = {
368
- 'job_id': job_id,
369
- }
370
- request_body = {k: v for k, v in request_body.items() if v is not None}
371
- url = f"{self.base_url}/screener/person/search"
372
- query_params = {}
373
- response = self._post(url, data=request_body, params=query_params)
374
- response.raise_for_status()
375
- return response.json()
376
-
377
- def search_companies(self, filters, page) -> dict[str, Any]:
378
- """
379
- Searches for companies using specified filters and pagination parameters.
380
-
381
- Args:
382
- filters: Dictionary containing filter criteria for company search.
383
- page: Dictionary or object containing pagination parameters (e.g., page number, size).
384
-
385
- Returns:
386
- Dictionary containing search results with company data and pagination information.
387
-
388
- Raises:
389
- ValueError: Raised when either 'filters' or 'page' parameters are None.
390
- HTTPError: Raised when the API request fails with an error status code.
391
-
392
- Tags:
393
- search, company, filtering, pagination, api
394
- """
395
- if filters is None:
396
- raise ValueError("Missing required parameter 'filters'")
397
- if page is None:
398
- raise ValueError("Missing required parameter 'page'")
399
- request_body = {
400
- 'filters': filters,
401
- 'page': page,
402
- }
403
- request_body = {k: v for k, v in request_body.items() if v is not None}
404
- url = f"{self.base_url}/screener/company/search"
405
- query_params = {}
406
- response = self._post(url, data=request_body, params=query_params)
407
- response.raise_for_status()
408
- return response.json()
409
-
410
- def enrich_person(self, linkedin_profile_url, enrich_realtime, fields) -> dict[str, Any]:
411
- """
412
- Retrieves enriched person data from LinkedIn profile using the provided profile URL, enrichment mode, and requested fields.
413
-
414
- Args:
415
- linkedin_profile_url: str. The LinkedIn profile URL of the person to enrich.
416
- enrich_realtime: bool. Whether to perform enrichment in real-time (True) or allow cached data (False).
417
- fields: str. Comma-separated list of fields to include in the enrichment.
418
-
419
- Returns:
420
- dict[str, Any]: A dictionary containing the enriched person data as returned by the enrichment API.
421
-
422
- Raises:
423
- ValueError: If any of 'linkedin_profile_url', 'enrich_realtime', or 'fields' is None.
424
- requests.HTTPError: If the API response status is not successful.
425
-
426
- Tags:
427
- enrich, person, lookup, api
428
- """
429
- if linkedin_profile_url is None:
430
- raise ValueError("Missing required parameter 'linkedin_profile_url'")
431
- if enrich_realtime is None:
432
- raise ValueError("Missing required parameter 'enrich_realtime'")
433
- if fields is None:
434
- raise ValueError("Missing required parameter 'fields'")
435
- url = f"{self.base_url}/screener/person/enrich"
436
- query_params = {k: v for k, v in [('linkedin_profile_url', linkedin_profile_url), ('enrich_realtime', enrich_realtime), ('fields', fields)] if v is not None}
437
- response = self._get(url, params=query_params)
438
- response.raise_for_status()
439
- return response.json()
440
-
441
- def enrich_company(self, company_domain, enrich_realtime) -> dict[str, Any]:
442
- """
443
- Retrieves enriched company data using the provided company domain and enrichment mode.
444
-
445
- Args:
446
- company_domain: str. The company's domain name to enrich. Must not be None.
447
- enrich_realtime: bool. Flag indicating whether to perform real-time enrichment. Must not be None.
448
-
449
- Returns:
450
- dict[str, Any]: The JSON response containing enriched company information.
451
-
452
- Raises:
453
- ValueError: Raised if 'company_domain' or 'enrich_realtime' is None.
454
- HTTPError: Raised if the HTTP request returned an unsuccessful status code.
455
-
456
- Tags:
457
- enrich, company, ai
458
- """
459
- if company_domain is None:
460
- raise ValueError("Missing required parameter 'company_domain'")
461
- if enrich_realtime is None:
462
- raise ValueError("Missing required parameter 'enrich_realtime'")
463
- url = f"{self.base_url}/screener/company"
464
- query_params = {k: v for k, v in [('company_domain', company_domain), ('enrich_realtime', enrich_realtime)] if v is not None}
465
- response = self._get(url, params=query_params)
466
- response.raise_for_status()
467
- return response.json()
468
-
469
- def get_linked_in_posts(self, company_linkedin_url) -> dict[str, Any]:
470
- """
471
- Fetches LinkedIn posts for a specified company using its LinkedIn URL.
472
-
473
- Args:
474
- company_linkedin_url: str. The public LinkedIn URL of the target company whose posts you want to retrieve.
475
-
476
- Returns:
477
- dict[str, Any]: A dictionary containing LinkedIn posts and associated metadata for the specified company.
478
-
479
- Raises:
480
- ValueError: Raised if 'company_linkedin_url' is None.
481
- HTTPError: Raised if the HTTP request to the LinkedIn posts service fails.
482
-
483
- Tags:
484
- fetch, linkedin, posts, company, api
485
- """
486
- if company_linkedin_url is None:
487
- raise ValueError("Missing required parameter 'company_linkedin_url'")
488
- url = f"{self.base_url}/screener/linkedin_posts"
489
- query_params = {k: v for k, v in [('company_linkedin_url', company_linkedin_url)] if v is not None}
490
- response = self._get(url, params=query_params)
491
- response.raise_for_status()
492
- return response.json()
493
-
494
- def search_linked_in_posts(self, keyword, page, sort_by, date_posted) -> dict[str, Any]:
495
- """
496
- Searches LinkedIn posts using the provided keyword and filters, returning the search results as a dictionary.
497
-
498
- Args:
499
- keyword: str. The keyword or phrase to search for in LinkedIn posts.
500
- page: int. The results page number to retrieve.
501
- sort_by: str. The sorting method to apply to the search results (e.g., 'relevance', 'date').
502
- date_posted: str. A filter indicating the date range of posts to include (e.g., 'past_24_hours', 'past_week').
503
-
504
- Returns:
505
- dict[str, Any]: A dictionary containing the LinkedIn post search results.
506
-
507
- Raises:
508
- ValueError: Raised if any of the required parameters ('keyword', 'page', 'sort_by', 'date_posted') are missing.
509
- requests.HTTPError: Raised if the HTTP request to the LinkedIn search endpoint returns an unsuccessful status code.
510
-
511
- Tags:
512
- search, linkedin, posts, api
513
- """
514
- if keyword is None:
515
- raise ValueError("Missing required parameter 'keyword'")
516
- if page is None:
517
- raise ValueError("Missing required parameter 'page'")
518
- if sort_by is None:
519
- raise ValueError("Missing required parameter 'sort_by'")
520
- if date_posted is None:
521
- raise ValueError("Missing required parameter 'date_posted'")
522
- request_body = {
523
- 'keyword': keyword,
524
- 'page': page,
525
- 'sort_by': sort_by,
526
- 'date_posted': date_posted,
527
- }
528
- request_body = {k: v for k, v in request_body.items() if v is not None}
529
- url = f"{self.base_url}/screener/linkedin_posts/keyword_search/"
530
- query_params = {}
531
- response = self._post(url, data=request_body, params=query_params)
532
- response.raise_for_status()
533
- return response.json()
534
-
535
- def list_tools(self):
536
- return [
537
- self.screen_companies,
538
- self.get_headcount_timeseries,
539
- self.get_headcount_by_facet_timeseries,
540
- self.get_funding_milestone_timeseries,
541
- self.get_decision_makers,
542
- self.get_web_traffic,
543
- self.get_investor_portfolio,
544
- self.get_job_listings,
545
- self.search_persons,
546
- self.search_companies,
547
- self.enrich_person,
548
- self.enrich_company,
549
- self.get_linked_in_posts,
550
- self.search_linked_in_posts
551
- ]
@@ -1,37 +0,0 @@
1
-
2
- # E2B MCP Server
3
-
4
- An MCP Server for the E2B API.
5
-
6
- ## Supported Integrations
7
-
8
- - AgentR
9
- - API Key (Coming Soon)
10
- - OAuth (Coming Soon)
11
-
12
- ## Tools
13
-
14
- This is automatically generated from OpenAPI schema for the E2B API.
15
-
16
- ## Supported Integrations
17
-
18
- This tool can be integrated with any service that supports HTTP requests.
19
-
20
- ## Tool List
21
-
22
- | Tool | Description |
23
- |------|-------------|
24
- | execute_python_code | Executes Python code within a secure E2B cloud sandbox. |
25
-
26
-
27
-
28
- ## Usage
29
-
30
- - Login to AgentR
31
- - Follow the quickstart guide to setup MCP Server for your client
32
- - Visit Apps Store and enable the E2B app
33
- - Restart the MCP Server
34
-
35
- ### Local Development
36
-
37
- - Follow the README to test with the local MCP Server