universal-mcp 0.1.11rc3__py3-none-any.whl → 0.1.13rc1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. universal_mcp/applications/__init__.py +51 -7
  2. universal_mcp/applications/curstdata/README.md +50 -0
  3. universal_mcp/applications/curstdata/__init__.py +0 -0
  4. universal_mcp/applications/curstdata/app.py +551 -0
  5. universal_mcp/applications/falai/app.py +8 -8
  6. universal_mcp/applications/neon/README.md +99 -0
  7. universal_mcp/applications/neon/__init__.py +0 -0
  8. universal_mcp/applications/neon/app.py +1924 -0
  9. universal_mcp/applications/shortcut/README.md +153 -0
  10. universal_mcp/applications/shortcut/__init__.py +0 -0
  11. universal_mcp/applications/shortcut/app.py +3880 -0
  12. universal_mcp/cli.py +109 -17
  13. universal_mcp/integrations/__init__.py +1 -1
  14. universal_mcp/integrations/integration.py +79 -0
  15. universal_mcp/servers/README.md +79 -0
  16. universal_mcp/servers/server.py +17 -29
  17. universal_mcp/stores/README.md +74 -0
  18. universal_mcp/stores/store.py +0 -2
  19. universal_mcp/templates/README.md.j2 +93 -0
  20. universal_mcp/templates/api_client.py.j2 +27 -0
  21. universal_mcp/tools/README.md +86 -0
  22. universal_mcp/tools/tools.py +1 -1
  23. universal_mcp/utils/agentr.py +90 -0
  24. universal_mcp/utils/api_generator.py +166 -208
  25. universal_mcp/utils/openapi.py +221 -321
  26. universal_mcp/utils/singleton.py +23 -0
  27. {universal_mcp-0.1.11rc3.dist-info → universal_mcp-0.1.13rc1.dist-info}/METADATA +16 -41
  28. {universal_mcp-0.1.11rc3.dist-info → universal_mcp-0.1.13rc1.dist-info}/RECORD +30 -17
  29. universal_mcp/applications/hashnode/app.py +0 -81
  30. universal_mcp/applications/hashnode/prompt.md +0 -23
  31. universal_mcp/integrations/agentr.py +0 -112
  32. {universal_mcp-0.1.11rc3.dist-info → universal_mcp-0.1.13rc1.dist-info}/WHEEL +0 -0
  33. {universal_mcp-0.1.11rc3.dist-info → universal_mcp-0.1.13rc1.dist-info}/entry_points.txt +0 -0
@@ -7,21 +7,65 @@ from universal_mcp.applications.application import (
7
7
  BaseApplication,
8
8
  GraphQLApplication,
9
9
  )
10
+ import subprocess
10
11
 
11
12
  # Name are in the format of "app-name", eg, google-calendar
12
13
  # Folder name is "app_name", eg, google_calendar
13
14
  # Class name is NameApp, eg, GoogleCalendarApp
14
15
 
16
+ def _import_class(module_path: str, class_name: str):
17
+ """
18
+ Helper to import a class by name from a module.
19
+ Raises ModuleNotFoundError if module or class does not exist.
20
+ """
21
+ try:
22
+ module = importlib.import_module(module_path)
23
+ except ModuleNotFoundError as e:
24
+ logger.debug(f"Import failed for module '{module_path}': {e}")
25
+ raise
26
+ try:
27
+ return getattr(module, class_name)
28
+ except AttributeError as e:
29
+ logger.error(f"Class '{class_name}' not found in module '{module_path}'")
30
+ raise ModuleNotFoundError(f"Class '{class_name}' not found in module '{module_path}'") from e
31
+
32
+ def _install_package(slug_clean: str):
33
+ """
34
+ Helper to install a package via pip from the universal-mcp GitHub repository.
35
+ """
36
+ repo_url = f"git+https://github.com/universal-mcp/{slug_clean}"
37
+ cmd = ["uv", "pip", "install", repo_url]
38
+ logger.info(f"Installing package '{slug_clean}' with command: {' '.join(cmd)}")
39
+ try:
40
+ subprocess.check_call(cmd)
41
+ except subprocess.CalledProcessError as e:
42
+ logger.error(f"Installation failed for '{slug_clean}': {e}")
43
+ raise ModuleNotFoundError(f"Installation failed for package '{slug_clean}'") from e
44
+ else:
45
+ logger.info(f"Package '{slug_clean}' installed successfully")
15
46
 
16
47
  def app_from_slug(slug: str):
17
- name = slug.lower().strip()
18
- app_name = "".join(word.title() for word in name.split("-")) + "App"
19
- folder_name = name.replace("-", "_").lower()
20
- logger.info(f"Importing {app_name} from {folder_name}")
21
- module = importlib.import_module(f"universal_mcp.applications.{folder_name}.app")
22
- app_class = getattr(module, app_name)
23
- return app_class
48
+ """
49
+ Dynamically resolve and return the application class for the given slug.
50
+ Attempts installation from GitHub if the package is not found locally.
51
+ """
52
+ slug_clean = slug.strip().lower()
53
+ class_name = "".join(part.capitalize() for part in slug_clean.split("-")) + "App"
54
+ package_prefix = f"universal_mcp_{slug_clean.replace('-', '_')}"
55
+ module_path = f"{package_prefix}.app"
24
56
 
57
+ logger.info(f"Resolving app for slug '{slug}' → module '{module_path}', class '{class_name}'")
58
+ try:
59
+ return _import_class(module_path, class_name)
60
+ except ModuleNotFoundError as orig_err:
61
+ logger.warning(f"Module '{module_path}' not found locally: {orig_err}. Installing...")
62
+ _install_package(slug_clean)
63
+ # Retry import after installation
64
+ try:
65
+ return _import_class(module_path, class_name)
66
+ except ModuleNotFoundError as retry_err:
67
+ logger.error(f"Still cannot import '{module_path}' after installation: {retry_err}")
68
+ raise
25
69
 
26
70
  __all__ = [
27
71
  "app_from_slug",
@@ -0,0 +1,50 @@
1
+
2
+ # Curstdata MCP Server
3
+
4
+ An MCP Server for the Curstdata API.
5
+
6
+ ## Supported Integrations
7
+
8
+ - AgentR
9
+ - API Key (Coming Soon)
10
+ - OAuth (Coming Soon)
11
+
12
+ ## Tools
13
+
14
+ This is automatically generated from OpenAPI schema for the Curstdata API.
15
+
16
+ ## Supported Integrations
17
+
18
+ This tool can be integrated with any service that supports HTTP requests.
19
+
20
+ ## Tool List
21
+
22
+ | Tool | Description |
23
+ |------|-------------|
24
+ | screen_companies | Screens companies based on specified metrics, filters, sorting, and pagination parameters, and returns the result as a JSON-compatible dictionary. |
25
+ | get_headcount_timeseries | Retrieve headcount timeseries data from the data lab endpoint using the provided filters, pagination, and sorting options. |
26
+ | get_headcount_by_facet_timeseries | Retrieves headcount timeseries data aggregated by specified facets using provided filters and sorting options. |
27
+ | get_funding_milestone_timeseries | Retrieves a time series of funding milestone data based on specified filters, pagination, and sorting options. |
28
+ | get_decision_makers | Retrieves decision makers based on specified filters and parameters. |
29
+ | get_web_traffic | Retrieves web traffic data based on provided filters, pagination, and sorting criteria. |
30
+ | get_investor_portfolio | Retrieves the investment portfolio information for a specified investor. |
31
+ | get_job_listings | Retrieves job listings data based on specified parameters. |
32
+ | search_persons | Submits a search request for persons associated with a given asynchronous job and returns the search results as a dictionary. |
33
+ | search_companies | Searches for companies using specified filters and pagination parameters. |
34
+ | enrich_person | Retrieves enriched person data from LinkedIn profile using the provided profile URL, enrichment mode, and requested fields. |
35
+ | enrich_company | Retrieves enriched company data using the provided company domain and enrichment mode. |
36
+ | get_linked_in_posts | Fetches LinkedIn posts for a specified company using its LinkedIn URL. |
37
+ | search_linked_in_posts | Searches LinkedIn posts using the provided keyword and filters, returning the search results as a dictionary. |
38
+
39
+
40
+
41
+ ## Usage
42
+
43
+ - Login to AgentR
44
+ - Follow the quickstart guide to setup MCP Server for your client
45
+ - Visit Apps Store and enable the Curstdata app
46
+ - Restart the MCP Server
47
+
48
+ ### Local Development
49
+
50
+ - Follow the README to test with the local MCP Server
File without changes
@@ -0,0 +1,551 @@
1
+ from typing import Any
2
+
3
+ from universal_mcp.applications import APIApplication
4
+ from universal_mcp.integrations import Integration
5
+
6
+
7
+ class CrustdataApp(APIApplication):
8
+ def __init__(self, integration: Integration = None, **kwargs) -> None:
9
+ super().__init__(name='crustdata', integration=integration, **kwargs)
10
+ self.base_url = "https://api.crustdata.com"
11
+
12
+ def _get_headers(self) -> dict[str, Any]:
13
+ api_key = self.integration.get_credentials().get("api_key")
14
+ return {
15
+ "Authorization": f"Token {api_key}",
16
+ "Content-Type": "application/json",
17
+ "Accept": "application/json",
18
+ }
19
+
20
+ def screen_companies(self, metrics, filters, offset, count, sorts) -> dict[str, Any]:
21
+ """
22
+ Screens companies based on specified metrics, filters, sorting, and pagination parameters, and returns the result as a JSON-compatible dictionary.
23
+
24
+ Args:
25
+ metrics: List or structure specifying the financial or business metrics to screen companies by.
26
+ filters: List or structure defining the filter criteria to apply to the company screening.
27
+ offset: Integer specifying the starting index for paginated results.
28
+ count: Integer specifying the maximum number of results to return.
29
+ sorts: List or structure describing the sorting criteria for the returned companies.
30
+
31
+ Returns:
32
+ A dictionary representing the JSON response from the company screener API, typically containing the screened companies and associated data.
33
+
34
+ Raises:
35
+ ValueError: If any of the required parameters (metrics, filters, offset, count, sorts) is None.
36
+ requests.HTTPError: If the HTTP request to the screener API fails or returns a bad status code.
37
+
38
+ Tags:
39
+ screen, companies, api, filter, pagination
40
+ """
41
+ if metrics is None:
42
+ raise ValueError("Missing required parameter 'metrics'")
43
+ if filters is None:
44
+ raise ValueError("Missing required parameter 'filters'")
45
+ if offset is None:
46
+ raise ValueError("Missing required parameter 'offset'")
47
+ if count is None:
48
+ raise ValueError("Missing required parameter 'count'")
49
+ if sorts is None:
50
+ raise ValueError("Missing required parameter 'sorts'")
51
+ request_body = {
52
+ 'metrics': metrics,
53
+ 'filters': filters,
54
+ 'offset': offset,
55
+ 'count': count,
56
+ 'sorts': sorts,
57
+ }
58
+ request_body = {k: v for k, v in request_body.items() if v is not None}
59
+ url = f"{self.base_url}/screener/screen/"
60
+ query_params = {}
61
+ response = self._post(url, data=request_body, params=query_params)
62
+ response.raise_for_status()
63
+ return response.json()
64
+
65
+ def get_headcount_timeseries(self, filters, offset, count, sorts) -> dict[str, Any]:
66
+ """
67
+ Retrieve headcount timeseries data from the data lab endpoint using the provided filters, pagination, and sorting options.
68
+
69
+ Args:
70
+ filters: dict. Dictionary specifying filter criteria to apply to the headcount timeseries query.
71
+ offset: int. The starting index for pagination of the timeseries results.
72
+ count: int. The number of records to retrieve in the result set.
73
+ sorts: list or dict. Sorting options to order the returned timeseries data.
74
+
75
+ Returns:
76
+ dict. Parsed JSON response containing the headcount timeseries data and associated metadata.
77
+
78
+ Raises:
79
+ ValueError: Raised if any of the required parameters ('filters', 'offset', 'count', 'sorts') are not provided (i.e., are None).
80
+ requests.HTTPError: Raised if the HTTP request to the endpoint fails or returns a non-success status code.
81
+
82
+ Tags:
83
+ get, fetch, headcount, timeseries, data, api, management
84
+ """
85
+ if filters is None:
86
+ raise ValueError("Missing required parameter 'filters'")
87
+ if offset is None:
88
+ raise ValueError("Missing required parameter 'offset'")
89
+ if count is None:
90
+ raise ValueError("Missing required parameter 'count'")
91
+ if sorts is None:
92
+ raise ValueError("Missing required parameter 'sorts'")
93
+ request_body = {
94
+ 'filters': filters,
95
+ 'offset': offset,
96
+ 'count': count,
97
+ 'sorts': sorts,
98
+ }
99
+ request_body = {k: v for k, v in request_body.items() if v is not None}
100
+ url = f"{self.base_url}/data_lab/headcount_timeseries/"
101
+ query_params = {}
102
+ response = self._post(url, data=request_body, params=query_params)
103
+ response.raise_for_status()
104
+ return response.json()
105
+
106
+ def get_headcount_by_facet_timeseries(self, filters, offset, count, sorts) -> dict[str, Any]:
107
+ """
108
+ Retrieves headcount timeseries data aggregated by specified facets using provided filters and sorting options.
109
+
110
+ Args:
111
+ filters: dict. A dictionary specifying the filter criteria for the data retrieval.
112
+ offset: int. The number of records to skip before starting to return results, used for pagination.
113
+ count: int. The maximum number of records to return.
114
+ sorts: list or dict. Sorting instructions determining the order of the returned timeseries data.
115
+
116
+ Returns:
117
+ dict. A dictionary containing the requested headcount timeseries data aggregated by facets.
118
+
119
+ Raises:
120
+ ValueError: Raised if any of the required parameters ('filters', 'offset', 'count', 'sorts') is None.
121
+ requests.HTTPError: Raised if the HTTP request to the data source fails (non-2xx response).
122
+
123
+ Tags:
124
+ get, timeseries, headcount, data-retrieval, facets, management
125
+ """
126
+ if filters is None:
127
+ raise ValueError("Missing required parameter 'filters'")
128
+ if offset is None:
129
+ raise ValueError("Missing required parameter 'offset'")
130
+ if count is None:
131
+ raise ValueError("Missing required parameter 'count'")
132
+ if sorts is None:
133
+ raise ValueError("Missing required parameter 'sorts'")
134
+ request_body = {
135
+ 'filters': filters,
136
+ 'offset': offset,
137
+ 'count': count,
138
+ 'sorts': sorts,
139
+ }
140
+ request_body = {k: v for k, v in request_body.items() if v is not None}
141
+ url = f"{self.base_url}/data_lab/headcount_by_facet_timeseries/"
142
+ query_params = {}
143
+ response = self._post(url, data=request_body, params=query_params)
144
+ response.raise_for_status()
145
+ return response.json()
146
+
147
+ def get_funding_milestone_timeseries(self, filters, offset, count, sorts) -> dict[str, Any]:
148
+ """
149
+ Retrieves a time series of funding milestone data based on specified filters, pagination, and sorting options.
150
+
151
+ Args:
152
+ filters: dict. Criteria to filter the funding milestones to include in the time series.
153
+ offset: int. The starting index for pagination of the results.
154
+ count: int. The maximum number of records to return.
155
+ sorts: list. Sorting rules to apply to the results.
156
+
157
+ Returns:
158
+ dict. A JSON-compatible dictionary containing the retrieved funding milestone time series data.
159
+
160
+ Raises:
161
+ ValueError: If any of the required parameters ('filters', 'offset', 'count', or 'sorts') are None.
162
+ requests.HTTPError: If the underlying HTTP request fails or returns an error status code.
163
+
164
+ Tags:
165
+ get, fetch, timeseries, funding, milestones, data-lab, api
166
+ """
167
+ if filters is None:
168
+ raise ValueError("Missing required parameter 'filters'")
169
+ if offset is None:
170
+ raise ValueError("Missing required parameter 'offset'")
171
+ if count is None:
172
+ raise ValueError("Missing required parameter 'count'")
173
+ if sorts is None:
174
+ raise ValueError("Missing required parameter 'sorts'")
175
+ request_body = {
176
+ 'filters': filters,
177
+ 'offset': offset,
178
+ 'count': count,
179
+ 'sorts': sorts,
180
+ }
181
+ request_body = {k: v for k, v in request_body.items() if v is not None}
182
+ url = f"{self.base_url}/data_lab/funding_milestone_timeseries/"
183
+ query_params = {}
184
+ response = self._post(url, data=request_body, params=query_params)
185
+ response.raise_for_status()
186
+ return response.json()
187
+
188
+ def get_decision_makers(self, filters, offset, count, sorts, decision_maker_titles) -> dict[str, Any]:
189
+ """
190
+ Retrieves decision makers based on specified filters and parameters.
191
+
192
+ Args:
193
+ filters: Dictionary containing filter criteria for decision makers.
194
+ offset: Integer indicating the starting position for retrieving results.
195
+ count: Integer specifying the number of results to retrieve.
196
+ sorts: List or dictionary defining the sorting order of the results.
197
+ decision_maker_titles: List of titles to filter decision makers by.
198
+
199
+ Returns:
200
+ Dictionary containing decision maker data with structure dependent on the API response.
201
+
202
+ Raises:
203
+ ValueError: Raised when any of the required parameters (filters, offset, count, sorts, decision_maker_titles) is None.
204
+ HTTPError: Raised when the API request fails with an error status code.
205
+
206
+ Tags:
207
+ retrieve, search, data, decision-makers, api, filtering
208
+ """
209
+ if filters is None:
210
+ raise ValueError("Missing required parameter 'filters'")
211
+ if offset is None:
212
+ raise ValueError("Missing required parameter 'offset'")
213
+ if count is None:
214
+ raise ValueError("Missing required parameter 'count'")
215
+ if sorts is None:
216
+ raise ValueError("Missing required parameter 'sorts'")
217
+ if decision_maker_titles is None:
218
+ raise ValueError("Missing required parameter 'decision_maker_titles'")
219
+ request_body = {
220
+ 'filters': filters,
221
+ 'offset': offset,
222
+ 'count': count,
223
+ 'sorts': sorts,
224
+ 'decision_maker_titles': decision_maker_titles,
225
+ }
226
+ request_body = {k: v for k, v in request_body.items() if v is not None}
227
+ url = f"{self.base_url}/data_lab/decision_makers/"
228
+ query_params = {}
229
+ response = self._post(url, data=request_body, params=query_params)
230
+ response.raise_for_status()
231
+ return response.json()
232
+
233
+ def get_web_traffic(self, filters, offset, count, sorts) -> dict[str, Any]:
234
+ """
235
+ Retrieves web traffic data based on provided filters, pagination, and sorting criteria.
236
+
237
+ Args:
238
+ filters: dict. Filtering options to apply to the web traffic query.
239
+ offset: int. The starting index from which to retrieve results (pagination offset).
240
+ count: int. The number of records to retrieve (pagination count).
241
+ sorts: list. Sorting criteria to order the web traffic results.
242
+
243
+ Returns:
244
+ dict. Parsed JSON response containing the web traffic data matching the specified criteria.
245
+
246
+ Raises:
247
+ ValueError: If any of the required parameters ('filters', 'offset', 'count', or 'sorts') are None.
248
+ requests.HTTPError: If the HTTP request to the web traffic endpoint fails or returns an error status code.
249
+
250
+ Tags:
251
+ get, web-traffic, data, fetch, api
252
+ """
253
+ if filters is None:
254
+ raise ValueError("Missing required parameter 'filters'")
255
+ if offset is None:
256
+ raise ValueError("Missing required parameter 'offset'")
257
+ if count is None:
258
+ raise ValueError("Missing required parameter 'count'")
259
+ if sorts is None:
260
+ raise ValueError("Missing required parameter 'sorts'")
261
+ request_body = {
262
+ 'filters': filters,
263
+ 'offset': offset,
264
+ 'count': count,
265
+ 'sorts': sorts,
266
+ }
267
+ request_body = {k: v for k, v in request_body.items() if v is not None}
268
+ url = f"{self.base_url}/data_lab/webtraffic/"
269
+ query_params = {}
270
+ response = self._post(url, data=request_body, params=query_params)
271
+ response.raise_for_status()
272
+ return response.json()
273
+
274
+ def get_investor_portfolio(self, investor_name) -> dict[str, Any]:
275
+ """
276
+ Retrieves the investment portfolio information for a specified investor.
277
+
278
+ Args:
279
+ investor_name: str. The name of the investor whose portfolio data is to be fetched.
280
+
281
+ Returns:
282
+ dict[str, Any]: A dictionary containing portfolio data for the given investor as returned by the API.
283
+
284
+ Raises:
285
+ ValueError: If 'investor_name' is None.
286
+ HTTPError: If the HTTP request to the portfolio endpoint fails.
287
+
288
+ Tags:
289
+ get, portfolio, investor, api
290
+ """
291
+ if investor_name is None:
292
+ raise ValueError("Missing required parameter 'investor_name'")
293
+ url = f"{self.base_url}/data_lab/investor_portfolio"
294
+ query_params = {k: v for k, v in [('investor_name', investor_name)] if v is not None}
295
+ response = self._get(url, params=query_params)
296
+ response.raise_for_status()
297
+ return response.json()
298
+
299
+ def get_job_listings(self, tickers, dataset, filters, offset, count, sorts) -> dict[str, Any]:
300
+ """
301
+ Retrieves job listings data based on specified parameters.
302
+
303
+ Args:
304
+ tickers: List of ticker symbols to filter job listings.
305
+ dataset: Specific dataset to query for job listings.
306
+ filters: Criteria to filter the job listings results.
307
+ offset: Starting position for pagination of results.
308
+ count: Number of results to return per page.
309
+ sorts: Sorting criteria for the returned job listings.
310
+
311
+ Returns:
312
+ A dictionary containing job listings data and associated metadata.
313
+
314
+ Raises:
315
+ ValueError: Raised when any of the required parameters is None.
316
+ HTTPError: Raised when the HTTP request fails.
317
+
318
+ Tags:
319
+ get, retrieve, job-listings, data, pagination, filtering, important
320
+ """
321
+ if tickers is None:
322
+ raise ValueError("Missing required parameter 'tickers'")
323
+ if dataset is None:
324
+ raise ValueError("Missing required parameter 'dataset'")
325
+ if filters is None:
326
+ raise ValueError("Missing required parameter 'filters'")
327
+ if offset is None:
328
+ raise ValueError("Missing required parameter 'offset'")
329
+ if count is None:
330
+ raise ValueError("Missing required parameter 'count'")
331
+ if sorts is None:
332
+ raise ValueError("Missing required parameter 'sorts'")
333
+ request_body = {
334
+ 'tickers': tickers,
335
+ 'dataset': dataset,
336
+ 'filters': filters,
337
+ 'offset': offset,
338
+ 'count': count,
339
+ 'sorts': sorts,
340
+ }
341
+ request_body = {k: v for k, v in request_body.items() if v is not None}
342
+ url = f"{self.base_url}/data_lab/job_listings/Table/"
343
+ query_params = {}
344
+ response = self._post(url, data=request_body, params=query_params)
345
+ response.raise_for_status()
346
+ return response.json()
347
+
348
+ def search_persons(self, job_id) -> dict[str, Any]:
349
+ """
350
+ Submits a search request for persons associated with a given asynchronous job and returns the search results as a dictionary.
351
+
352
+ Args:
353
+ job_id: The identifier of the asynchronous job used to retrieve associated persons.
354
+
355
+ Returns:
356
+ A dictionary containing the search results for persons related to the specified job.
357
+
358
+ Raises:
359
+ ValueError: Raised if job_id is None.
360
+ HTTPError: Raised if the HTTP request to the person search endpoint fails (from response.raise_for_status()).
361
+
362
+ Tags:
363
+ search, person, async-job, status, api, important
364
+ """
365
+ if job_id is None:
366
+ raise ValueError("Missing required parameter 'job_id'")
367
+ request_body = {
368
+ 'job_id': job_id,
369
+ }
370
+ request_body = {k: v for k, v in request_body.items() if v is not None}
371
+ url = f"{self.base_url}/screener/person/search"
372
+ query_params = {}
373
+ response = self._post(url, data=request_body, params=query_params)
374
+ response.raise_for_status()
375
+ return response.json()
376
+
377
+ def search_companies(self, filters, page) -> dict[str, Any]:
378
+ """
379
+ Searches for companies using specified filters and pagination parameters.
380
+
381
+ Args:
382
+ filters: Dictionary containing filter criteria for company search.
383
+ page: Dictionary or object containing pagination parameters (e.g., page number, size).
384
+
385
+ Returns:
386
+ Dictionary containing search results with company data and pagination information.
387
+
388
+ Raises:
389
+ ValueError: Raised when either 'filters' or 'page' parameters are None.
390
+ HTTPError: Raised when the API request fails with an error status code.
391
+
392
+ Tags:
393
+ search, company, filtering, pagination, api
394
+ """
395
+ if filters is None:
396
+ raise ValueError("Missing required parameter 'filters'")
397
+ if page is None:
398
+ raise ValueError("Missing required parameter 'page'")
399
+ request_body = {
400
+ 'filters': filters,
401
+ 'page': page,
402
+ }
403
+ request_body = {k: v for k, v in request_body.items() if v is not None}
404
+ url = f"{self.base_url}/screener/company/search"
405
+ query_params = {}
406
+ response = self._post(url, data=request_body, params=query_params)
407
+ response.raise_for_status()
408
+ return response.json()
409
+
410
+ def enrich_person(self, linkedin_profile_url, enrich_realtime, fields) -> dict[str, Any]:
411
+ """
412
+ Retrieves enriched person data from LinkedIn profile using the provided profile URL, enrichment mode, and requested fields.
413
+
414
+ Args:
415
+ linkedin_profile_url: str. The LinkedIn profile URL of the person to enrich.
416
+ enrich_realtime: bool. Whether to perform enrichment in real-time (True) or allow cached data (False).
417
+ fields: str. Comma-separated list of fields to include in the enrichment.
418
+
419
+ Returns:
420
+ dict[str, Any]: A dictionary containing the enriched person data as returned by the enrichment API.
421
+
422
+ Raises:
423
+ ValueError: If any of 'linkedin_profile_url', 'enrich_realtime', or 'fields' is None.
424
+ requests.HTTPError: If the API response status is not successful.
425
+
426
+ Tags:
427
+ enrich, person, lookup, api
428
+ """
429
+ if linkedin_profile_url is None:
430
+ raise ValueError("Missing required parameter 'linkedin_profile_url'")
431
+ if enrich_realtime is None:
432
+ raise ValueError("Missing required parameter 'enrich_realtime'")
433
+ if fields is None:
434
+ raise ValueError("Missing required parameter 'fields'")
435
+ url = f"{self.base_url}/screener/person/enrich"
436
+ query_params = {k: v for k, v in [('linkedin_profile_url', linkedin_profile_url), ('enrich_realtime', enrich_realtime), ('fields', fields)] if v is not None}
437
+ response = self._get(url, params=query_params)
438
+ response.raise_for_status()
439
+ return response.json()
440
+
441
+ def enrich_company(self, company_domain, enrich_realtime) -> dict[str, Any]:
442
+ """
443
+ Retrieves enriched company data using the provided company domain and enrichment mode.
444
+
445
+ Args:
446
+ company_domain: str. The company's domain name to enrich. Must not be None.
447
+ enrich_realtime: bool. Flag indicating whether to perform real-time enrichment. Must not be None.
448
+
449
+ Returns:
450
+ dict[str, Any]: The JSON response containing enriched company information.
451
+
452
+ Raises:
453
+ ValueError: Raised if 'company_domain' or 'enrich_realtime' is None.
454
+ HTTPError: Raised if the HTTP request returned an unsuccessful status code.
455
+
456
+ Tags:
457
+ enrich, company, ai
458
+ """
459
+ if company_domain is None:
460
+ raise ValueError("Missing required parameter 'company_domain'")
461
+ if enrich_realtime is None:
462
+ raise ValueError("Missing required parameter 'enrich_realtime'")
463
+ url = f"{self.base_url}/screener/company"
464
+ query_params = {k: v for k, v in [('company_domain', company_domain), ('enrich_realtime', enrich_realtime)] if v is not None}
465
+ response = self._get(url, params=query_params)
466
+ response.raise_for_status()
467
+ return response.json()
468
+
469
+ def get_linked_in_posts(self, company_linkedin_url) -> dict[str, Any]:
470
+ """
471
+ Fetches LinkedIn posts for a specified company using its LinkedIn URL.
472
+
473
+ Args:
474
+ company_linkedin_url: str. The public LinkedIn URL of the target company whose posts you want to retrieve.
475
+
476
+ Returns:
477
+ dict[str, Any]: A dictionary containing LinkedIn posts and associated metadata for the specified company.
478
+
479
+ Raises:
480
+ ValueError: Raised if 'company_linkedin_url' is None.
481
+ HTTPError: Raised if the HTTP request to the LinkedIn posts service fails.
482
+
483
+ Tags:
484
+ fetch, linkedin, posts, company, api
485
+ """
486
+ if company_linkedin_url is None:
487
+ raise ValueError("Missing required parameter 'company_linkedin_url'")
488
+ url = f"{self.base_url}/screener/linkedin_posts"
489
+ query_params = {k: v for k, v in [('company_linkedin_url', company_linkedin_url)] if v is not None}
490
+ response = self._get(url, params=query_params)
491
+ response.raise_for_status()
492
+ return response.json()
493
+
494
+ def search_linked_in_posts(self, keyword, page, sort_by, date_posted) -> dict[str, Any]:
495
+ """
496
+ Searches LinkedIn posts using the provided keyword and filters, returning the search results as a dictionary.
497
+
498
+ Args:
499
+ keyword: str. The keyword or phrase to search for in LinkedIn posts.
500
+ page: int. The results page number to retrieve.
501
+ sort_by: str. The sorting method to apply to the search results (e.g., 'relevance', 'date').
502
+ date_posted: str. A filter indicating the date range of posts to include (e.g., 'past_24_hours', 'past_week').
503
+
504
+ Returns:
505
+ dict[str, Any]: A dictionary containing the LinkedIn post search results.
506
+
507
+ Raises:
508
+ ValueError: Raised if any of the required parameters ('keyword', 'page', 'sort_by', 'date_posted') are missing.
509
+ requests.HTTPError: Raised if the HTTP request to the LinkedIn search endpoint returns an unsuccessful status code.
510
+
511
+ Tags:
512
+ search, linkedin, posts, api
513
+ """
514
+ if keyword is None:
515
+ raise ValueError("Missing required parameter 'keyword'")
516
+ if page is None:
517
+ raise ValueError("Missing required parameter 'page'")
518
+ if sort_by is None:
519
+ raise ValueError("Missing required parameter 'sort_by'")
520
+ if date_posted is None:
521
+ raise ValueError("Missing required parameter 'date_posted'")
522
+ request_body = {
523
+ 'keyword': keyword,
524
+ 'page': page,
525
+ 'sort_by': sort_by,
526
+ 'date_posted': date_posted,
527
+ }
528
+ request_body = {k: v for k, v in request_body.items() if v is not None}
529
+ url = f"{self.base_url}/screener/linkedin_posts/keyword_search/"
530
+ query_params = {}
531
+ response = self._post(url, data=request_body, params=query_params)
532
+ response.raise_for_status()
533
+ return response.json()
534
+
535
+ def list_tools(self):
536
+ return [
537
+ self.screen_companies,
538
+ self.get_headcount_timeseries,
539
+ self.get_headcount_by_facet_timeseries,
540
+ self.get_funding_milestone_timeseries,
541
+ self.get_decision_makers,
542
+ self.get_web_traffic,
543
+ self.get_investor_portfolio,
544
+ self.get_job_listings,
545
+ self.search_persons,
546
+ self.search_companies,
547
+ self.enrich_person,
548
+ self.enrich_company,
549
+ self.get_linked_in_posts,
550
+ self.search_linked_in_posts
551
+ ]