universal-mcp-applications 0.1.30rc1__py3-none-any.whl → 0.1.36rc1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of universal-mcp-applications might be problematic. Click here for more details.
- universal_mcp/applications/ahrefs/app.py +52 -198
- universal_mcp/applications/airtable/app.py +23 -122
- universal_mcp/applications/apollo/app.py +111 -464
- universal_mcp/applications/asana/app.py +417 -1567
- universal_mcp/applications/aws_s3/app.py +33 -100
- universal_mcp/applications/bill/app.py +546 -1957
- universal_mcp/applications/box/app.py +1068 -3981
- universal_mcp/applications/braze/app.py +364 -1430
- universal_mcp/applications/browser_use/app.py +2 -8
- universal_mcp/applications/cal_com_v2/app.py +207 -625
- universal_mcp/applications/calendly/app.py +61 -200
- universal_mcp/applications/canva/app.py +45 -110
- universal_mcp/applications/clickup/app.py +207 -674
- universal_mcp/applications/coda/app.py +146 -426
- universal_mcp/applications/confluence/app.py +310 -1098
- universal_mcp/applications/contentful/app.py +36 -151
- universal_mcp/applications/crustdata/app.py +28 -107
- universal_mcp/applications/dialpad/app.py +283 -756
- universal_mcp/applications/digitalocean/app.py +1766 -5777
- universal_mcp/applications/domain_checker/app.py +3 -54
- universal_mcp/applications/e2b/app.py +14 -64
- universal_mcp/applications/elevenlabs/app.py +9 -47
- universal_mcp/applications/exa/app.py +6 -17
- universal_mcp/applications/falai/app.py +23 -100
- universal_mcp/applications/figma/app.py +53 -137
- universal_mcp/applications/file_system/app.py +2 -13
- universal_mcp/applications/firecrawl/app.py +51 -152
- universal_mcp/applications/fireflies/app.py +59 -281
- universal_mcp/applications/fpl/app.py +91 -528
- universal_mcp/applications/fpl/utils/fixtures.py +15 -49
- universal_mcp/applications/fpl/utils/helper.py +25 -89
- universal_mcp/applications/fpl/utils/league_utils.py +20 -64
- universal_mcp/applications/ghost_content/app.py +52 -161
- universal_mcp/applications/github/app.py +19 -56
- universal_mcp/applications/gong/app.py +88 -248
- universal_mcp/applications/google_calendar/app.py +16 -68
- universal_mcp/applications/google_docs/app.py +88 -188
- universal_mcp/applications/google_drive/app.py +140 -462
- universal_mcp/applications/google_gemini/app.py +12 -64
- universal_mcp/applications/google_mail/app.py +28 -157
- universal_mcp/applications/google_searchconsole/app.py +15 -48
- universal_mcp/applications/google_sheet/app.py +101 -578
- universal_mcp/applications/google_sheet/helper.py +10 -37
- universal_mcp/applications/hashnode/app.py +57 -269
- universal_mcp/applications/heygen/app.py +44 -122
- universal_mcp/applications/http_tools/app.py +10 -32
- universal_mcp/applications/hubspot/api_segments/crm_api.py +460 -1573
- universal_mcp/applications/hubspot/api_segments/marketing_api.py +74 -262
- universal_mcp/applications/hubspot/app.py +23 -87
- universal_mcp/applications/jira/app.py +2071 -7986
- universal_mcp/applications/klaviyo/app.py +494 -1376
- universal_mcp/applications/linkedin/README.md +23 -4
- universal_mcp/applications/linkedin/app.py +392 -212
- universal_mcp/applications/mailchimp/app.py +450 -1605
- universal_mcp/applications/markitdown/app.py +8 -20
- universal_mcp/applications/miro/app.py +217 -699
- universal_mcp/applications/ms_teams/app.py +64 -186
- universal_mcp/applications/neon/app.py +86 -192
- universal_mcp/applications/notion/app.py +21 -36
- universal_mcp/applications/onedrive/app.py +14 -36
- universal_mcp/applications/openai/app.py +42 -165
- universal_mcp/applications/outlook/app.py +16 -76
- universal_mcp/applications/perplexity/app.py +4 -19
- universal_mcp/applications/pipedrive/app.py +832 -3142
- universal_mcp/applications/posthog/app.py +163 -432
- universal_mcp/applications/reddit/app.py +40 -139
- universal_mcp/applications/resend/app.py +41 -107
- universal_mcp/applications/retell/app.py +14 -41
- universal_mcp/applications/rocketlane/app.py +221 -934
- universal_mcp/applications/scraper/README.md +7 -4
- universal_mcp/applications/scraper/app.py +280 -93
- universal_mcp/applications/semanticscholar/app.py +22 -64
- universal_mcp/applications/semrush/app.py +43 -77
- universal_mcp/applications/sendgrid/app.py +512 -1262
- universal_mcp/applications/sentry/app.py +271 -906
- universal_mcp/applications/serpapi/app.py +40 -143
- universal_mcp/applications/sharepoint/app.py +15 -37
- universal_mcp/applications/shopify/app.py +1551 -4287
- universal_mcp/applications/shortcut/app.py +155 -417
- universal_mcp/applications/slack/app.py +50 -101
- universal_mcp/applications/spotify/app.py +126 -325
- universal_mcp/applications/supabase/app.py +104 -213
- universal_mcp/applications/tavily/app.py +1 -1
- universal_mcp/applications/trello/app.py +693 -2656
- universal_mcp/applications/twilio/app.py +14 -50
- universal_mcp/applications/twitter/api_segments/compliance_api.py +4 -14
- universal_mcp/applications/twitter/api_segments/dm_conversations_api.py +6 -18
- universal_mcp/applications/twitter/api_segments/likes_api.py +1 -3
- universal_mcp/applications/twitter/api_segments/lists_api.py +5 -15
- universal_mcp/applications/twitter/api_segments/trends_api.py +1 -3
- universal_mcp/applications/twitter/api_segments/tweets_api.py +9 -31
- universal_mcp/applications/twitter/api_segments/usage_api.py +1 -5
- universal_mcp/applications/twitter/api_segments/users_api.py +14 -42
- universal_mcp/applications/whatsapp/app.py +35 -186
- universal_mcp/applications/whatsapp/audio.py +2 -6
- universal_mcp/applications/whatsapp/whatsapp.py +17 -51
- universal_mcp/applications/whatsapp_business/app.py +70 -283
- universal_mcp/applications/wrike/app.py +45 -118
- universal_mcp/applications/yahoo_finance/app.py +19 -65
- universal_mcp/applications/youtube/app.py +75 -261
- universal_mcp/applications/zenquotes/app.py +2 -2
- {universal_mcp_applications-0.1.30rc1.dist-info → universal_mcp_applications-0.1.36rc1.dist-info}/METADATA +2 -2
- {universal_mcp_applications-0.1.30rc1.dist-info → universal_mcp_applications-0.1.36rc1.dist-info}/RECORD +105 -106
- universal_mcp/applications/scraper/scraper_testers.py +0 -17
- {universal_mcp_applications-0.1.30rc1.dist-info → universal_mcp_applications-0.1.36rc1.dist-info}/WHEEL +0 -0
- {universal_mcp_applications-0.1.30rc1.dist-info → universal_mcp_applications-0.1.36rc1.dist-info}/licenses/LICENSE +0 -0
|
@@ -9,7 +9,10 @@ This is automatically generated from OpenAPI schema for the ScraperApp API.
|
|
|
9
9
|
|
|
10
10
|
| Tool | Description |
|
|
11
11
|
|------|-------------|
|
|
12
|
-
| `
|
|
13
|
-
| `
|
|
14
|
-
| `
|
|
15
|
-
| `
|
|
12
|
+
| `linkedin_list_profile_posts` | Fetches a paginated list of posts from a specific user or company profile using its provider ID. The `is_company` flag must specify the entity type. Unlike `linkedin_search_posts`, this function directly retrieves content from a known profile's feed instead of performing a global keyword search. |
|
|
13
|
+
| `linkedin_retrieve_profile` | Fetches a specific LinkedIn user's profile using their public or internal ID. Unlike `linkedin_search_people`, which discovers multiple users via keywords, this function targets and retrieves detailed data for a single, known individual based on a direct identifier. |
|
|
14
|
+
| `linkedin_list_post_comments` | Fetches a paginated list of comments for a specified LinkedIn post. It can retrieve either top-level comments or threaded replies if an optional `comment_id` is provided. This is a read-only operation, distinct from functions that search for posts or list user-specific content. |
|
|
15
|
+
| `linkedin_search_people` | Performs a paginated search for people on LinkedIn, distinct from searches for companies or jobs. It filters results using keywords, location, industry, and company, internally converting filter names like 'United States' into their required API IDs before making the request. |
|
|
16
|
+
| `linkedin_search_companies` | Executes a paginated LinkedIn search for companies, filtering by optional keywords, location, and industry. Unlike `linkedin_search_people` or `linkedin_search_jobs`, this function specifically sets the API search category to 'companies' to ensure that only company profiles are returned in the search results. |
|
|
17
|
+
| `linkedin_search_posts` | Performs a keyword-based search for LinkedIn posts, allowing results to be filtered by date and sorted by relevance. This function specifically queries the 'posts' category, distinguishing it from other search methods in the class that target people, companies, or jobs, and returns relevant content. |
|
|
18
|
+
| `linkedin_search_jobs` | Executes a LinkedIn search specifically for job listings using keywords and filters like region, industry, and minimum salary. Unlike other search functions targeting people or companies, this is specialized for job listings and converts friendly filter names (e.g., "United States") into their required API IDs. |
|
|
@@ -1,15 +1,12 @@
|
|
|
1
|
+
import os
|
|
1
2
|
from dotenv import load_dotenv
|
|
2
3
|
|
|
3
4
|
load_dotenv()
|
|
4
|
-
|
|
5
5
|
from typing import Any, Literal
|
|
6
|
-
|
|
7
6
|
from loguru import logger
|
|
8
7
|
from universal_mcp.applications.application import APIApplication
|
|
9
8
|
from universal_mcp.integrations import Integration
|
|
10
9
|
|
|
11
|
-
from universal_mcp.applications.linkedin import LinkedinApp
|
|
12
|
-
|
|
13
10
|
|
|
14
11
|
class ScraperApp(APIApplication):
|
|
15
12
|
"""
|
|
@@ -22,143 +19,330 @@ class ScraperApp(APIApplication):
|
|
|
22
19
|
if self.integration:
|
|
23
20
|
credentials = self.integration.get_credentials()
|
|
24
21
|
self.account_id = credentials.get("account_id")
|
|
25
|
-
self._unipile_app = LinkedinApp(integration=self.integration)
|
|
26
22
|
else:
|
|
27
23
|
logger.warning("Integration not found")
|
|
28
24
|
self.account_id = None
|
|
29
|
-
self._unipile_app = None
|
|
30
25
|
|
|
31
|
-
|
|
26
|
+
@property
|
|
27
|
+
def base_url(self) -> str:
|
|
28
|
+
if not self._base_url:
|
|
29
|
+
unipile_dsn = os.getenv("UNIPILE_DSN")
|
|
30
|
+
if not unipile_dsn:
|
|
31
|
+
logger.error("UnipileApp: UNIPILE_DSN environment variable is not set.")
|
|
32
|
+
raise ValueError("UnipileApp: UNIPILE_DSN environment variable is required.")
|
|
33
|
+
self._base_url = f"https://{unipile_dsn}"
|
|
34
|
+
return self._base_url
|
|
35
|
+
|
|
36
|
+
@base_url.setter
|
|
37
|
+
def base_url(self, base_url: str) -> None:
|
|
38
|
+
self._base_url = base_url
|
|
39
|
+
logger.info(f"UnipileApp: Base URL set to {self._base_url}")
|
|
40
|
+
|
|
41
|
+
def _get_headers(self) -> dict[str, str]:
|
|
42
|
+
"""
|
|
43
|
+
Get the headers for Unipile API requests.
|
|
44
|
+
Overrides the base class method to use X-Api-Key.
|
|
45
|
+
"""
|
|
46
|
+
if not self.integration:
|
|
47
|
+
logger.warning("UnipileApp: No integration configured, returning empty headers.")
|
|
48
|
+
return {}
|
|
49
|
+
api_key = os.getenv("UNIPILE_API_KEY")
|
|
50
|
+
if not api_key:
|
|
51
|
+
logger.error("UnipileApp: API key not found in integration credentials for Unipile.")
|
|
52
|
+
return {"Content-Type": "application/json", "Cache-Control": "no-cache"}
|
|
53
|
+
logger.debug("UnipileApp: Using X-Api-Key for authentication.")
|
|
54
|
+
return {"x-api-key": api_key, "Content-Type": "application/json", "Cache-Control": "no-cache"}
|
|
55
|
+
|
|
56
|
+
def _get_search_parameter_id(self, param_type: str, keywords: str) -> str:
|
|
57
|
+
"""
|
|
58
|
+
Retrieves the ID for a given LinkedIn search parameter by its name.
|
|
59
|
+
|
|
60
|
+
Args:
|
|
61
|
+
param_type: The type of parameter to search for (e.g., "LOCATION", "COMPANY").
|
|
62
|
+
keywords: The name of the parameter to find (e.g., "United States").
|
|
63
|
+
|
|
64
|
+
Returns:
|
|
65
|
+
The corresponding ID for the search parameter.
|
|
66
|
+
|
|
67
|
+
Raises:
|
|
68
|
+
ValueError: If no exact match for the keywords is found.
|
|
69
|
+
httpx.HTTPError: If the API request fails.
|
|
70
|
+
"""
|
|
71
|
+
url = f"{self.base_url}/api/v1/linkedin/search/parameters"
|
|
72
|
+
params = {"account_id": self.account_id, "keywords": keywords, "type": param_type}
|
|
73
|
+
response = self._get(url, params=params)
|
|
74
|
+
results = self._handle_response(response)
|
|
75
|
+
items = results.get("items", [])
|
|
76
|
+
if items:
|
|
77
|
+
return items[0]["id"]
|
|
78
|
+
raise ValueError(f'Could not find a matching ID for {param_type}: "{keywords}"')
|
|
79
|
+
|
|
80
|
+
async def linkedin_list_profile_posts(
|
|
81
|
+
self, identifier: str, cursor: str | None = None, limit: int | None = None, is_company: bool | None = None
|
|
82
|
+
) -> dict[str, Any]:
|
|
83
|
+
"""
|
|
84
|
+
Fetches a paginated list of posts from a specific user or company profile using its provider ID. The `is_company` flag must specify the entity type. Unlike `linkedin_search_posts`, this function directly retrieves content from a known profile's feed instead of performing a global keyword search.
|
|
85
|
+
|
|
86
|
+
Args:
|
|
87
|
+
identifier: The entity's provider internal ID (LinkedIn ID).
|
|
88
|
+
cursor: Pagination cursor.
|
|
89
|
+
limit: Number of items to return (1-100, as per Unipile example, though spec allows up to 250).
|
|
90
|
+
is_company: Boolean indicating if the identifier is for a company.
|
|
91
|
+
|
|
92
|
+
Returns:
|
|
93
|
+
A dictionary containing a list of post objects and pagination details.
|
|
94
|
+
|
|
95
|
+
Raises:
|
|
96
|
+
httpx.HTTPError: If the API request fails.
|
|
97
|
+
|
|
98
|
+
Tags:
|
|
99
|
+
linkedin, post, list, user_posts, company_posts, content, api, important
|
|
100
|
+
"""
|
|
101
|
+
url = f"{self.base_url}/api/v1/users/{identifier}/posts"
|
|
102
|
+
params: dict[str, Any] = {"account_id": self.account_id}
|
|
103
|
+
if cursor:
|
|
104
|
+
params["cursor"] = cursor
|
|
105
|
+
if limit:
|
|
106
|
+
params["limit"] = limit
|
|
107
|
+
if is_company is not None:
|
|
108
|
+
params["is_company"] = is_company
|
|
109
|
+
response = self._get(url, params=params)
|
|
110
|
+
return response.json()
|
|
111
|
+
|
|
112
|
+
async def linkedin_retrieve_profile(self, identifier: str) -> dict[str, Any]:
|
|
113
|
+
"""
|
|
114
|
+
Fetches a specific LinkedIn user's profile using their public or internal ID. Unlike `linkedin_search_people`, which discovers multiple users via keywords, this function targets and retrieves detailed data for a single, known individual based on a direct identifier.
|
|
115
|
+
|
|
116
|
+
Args:
|
|
117
|
+
identifier: Can be the provider's internal id OR the provider's public id of the requested user.For example, for https://www.linkedin.com/in/manojbajaj95/, the identifier is "manojbajaj95".
|
|
118
|
+
|
|
119
|
+
Returns:
|
|
120
|
+
A dictionary containing the user's profile details.
|
|
121
|
+
|
|
122
|
+
Raises:
|
|
123
|
+
httpx.HTTPError: If the API request fails.
|
|
124
|
+
|
|
125
|
+
Tags:
|
|
126
|
+
linkedin, user, profile, retrieve, get, api, important
|
|
127
|
+
"""
|
|
128
|
+
url = f"{self.base_url}/api/v1/users/{identifier}"
|
|
129
|
+
params: dict[str, Any] = {"account_id": self.account_id}
|
|
130
|
+
response = self._get(url, params=params)
|
|
131
|
+
return self._handle_response(response)
|
|
132
|
+
|
|
133
|
+
async def linkedin_list_post_comments(
|
|
134
|
+
self, post_id: str, comment_id: str | None = None, cursor: str | None = None, limit: int | None = None
|
|
135
|
+
) -> dict[str, Any]:
|
|
136
|
+
"""
|
|
137
|
+
Fetches a paginated list of comments for a specified LinkedIn post. It can retrieve either top-level comments or threaded replies if an optional `comment_id` is provided. This is a read-only operation, distinct from functions that search for posts or list user-specific content.
|
|
138
|
+
|
|
139
|
+
Args:
|
|
140
|
+
post_id: The social ID of the post.
|
|
141
|
+
comment_id: If provided, retrieves replies to this comment ID instead of top-level comments.
|
|
142
|
+
cursor: Pagination cursor.
|
|
143
|
+
limit: Number of comments to return. (OpenAPI spec shows type string, passed as string if provided).
|
|
144
|
+
|
|
145
|
+
Returns:
|
|
146
|
+
A dictionary containing a list of comment objects and pagination details.
|
|
147
|
+
|
|
148
|
+
Raises:
|
|
149
|
+
httpx.HTTPError: If the API request fails.
|
|
150
|
+
|
|
151
|
+
Tags:
|
|
152
|
+
linkedin, post, comment, list, content, api, important
|
|
153
|
+
"""
|
|
154
|
+
url = f"{self.base_url}/api/v1/posts/{post_id}/comments"
|
|
155
|
+
params: dict[str, Any] = {"account_id": self.account_id}
|
|
156
|
+
if cursor:
|
|
157
|
+
params["cursor"] = cursor
|
|
158
|
+
if limit is not None:
|
|
159
|
+
params["limit"] = str(limit)
|
|
160
|
+
if comment_id:
|
|
161
|
+
params["comment_id"] = comment_id
|
|
162
|
+
response = self._get(url, params=params)
|
|
163
|
+
return response.json()
|
|
164
|
+
|
|
165
|
+
async def linkedin_search_people(
|
|
32
166
|
self,
|
|
33
|
-
category: Literal["people", "companies", "posts", "jobs"],
|
|
34
167
|
cursor: str | None = None,
|
|
35
168
|
limit: int | None = None,
|
|
36
169
|
keywords: str | None = None,
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
170
|
+
location: str | None = None,
|
|
171
|
+
industry: str | None = None,
|
|
172
|
+
company: str | None = None,
|
|
40
173
|
) -> dict[str, Any]:
|
|
41
174
|
"""
|
|
42
|
-
Performs a
|
|
43
|
-
|
|
175
|
+
Performs a paginated search for people on LinkedIn, distinct from searches for companies or jobs. It filters results using keywords, location, industry, and company, internally converting filter names like 'United States' into their required API IDs before making the request.
|
|
176
|
+
|
|
44
177
|
Args:
|
|
45
|
-
category: Type of search to perform. Valid values are "people", "companies", "posts", or "jobs".
|
|
46
178
|
cursor: Pagination cursor for the next page of entries.
|
|
47
179
|
limit: Number of items to return (up to 50 for Classic search).
|
|
48
180
|
keywords: Keywords to search for.
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
181
|
+
location: The geographical location to filter people by (e.g., "United States").
|
|
182
|
+
industry: The industry to filter people by.(e.g., "Software Development".)
|
|
183
|
+
company: The company to filter people by.(e.g., "Google".)
|
|
184
|
+
|
|
53
185
|
Returns:
|
|
54
186
|
A dictionary containing search results and pagination details.
|
|
55
|
-
|
|
187
|
+
|
|
56
188
|
Raises:
|
|
57
189
|
httpx.HTTPError: If the API request fails.
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
keywords=keywords
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
190
|
+
"""
|
|
191
|
+
url = f"{self.base_url}/api/v1/linkedin/search"
|
|
192
|
+
params: dict[str, Any] = {"account_id": self.account_id}
|
|
193
|
+
if cursor:
|
|
194
|
+
params["cursor"] = cursor
|
|
195
|
+
if limit is not None:
|
|
196
|
+
params["limit"] = limit
|
|
197
|
+
payload: dict[str, Any] = {"api": "classic", "category": "people"}
|
|
198
|
+
if keywords:
|
|
199
|
+
payload["keywords"] = keywords
|
|
200
|
+
if location:
|
|
201
|
+
location_id = self._get_search_parameter_id("LOCATION", location)
|
|
202
|
+
payload["location"] = [location_id]
|
|
203
|
+
if industry:
|
|
204
|
+
industry_id = self._get_search_parameter_id("INDUSTRY", industry)
|
|
205
|
+
payload["industry"] = [industry_id]
|
|
206
|
+
if company:
|
|
207
|
+
company_id = self._get_search_parameter_id("COMPANY", company)
|
|
208
|
+
payload["company"] = [company_id]
|
|
209
|
+
response = self._post(url, params=params, data=payload)
|
|
210
|
+
return self._handle_response(response)
|
|
211
|
+
|
|
212
|
+
async def linkedin_search_companies(
|
|
74
213
|
self,
|
|
75
|
-
identifier: str,
|
|
76
214
|
cursor: str | None = None,
|
|
77
215
|
limit: int | None = None,
|
|
216
|
+
keywords: str | None = None,
|
|
217
|
+
location: str | None = None,
|
|
218
|
+
industry: str | None = None,
|
|
78
219
|
) -> dict[str, Any]:
|
|
79
220
|
"""
|
|
80
|
-
|
|
81
|
-
|
|
221
|
+
Executes a paginated LinkedIn search for companies, filtering by optional keywords, location, and industry. Unlike `linkedin_search_people` or `linkedin_search_jobs`, this function specifically sets the API search category to 'companies' to ensure that only company profiles are returned in the search results.
|
|
222
|
+
|
|
82
223
|
Args:
|
|
83
|
-
identifier: The entity's provider internal ID (LinkedIn ID).starts with ACo for users, while for companies it's a series of numbers. You can get it in the results of linkedin_search.
|
|
84
224
|
cursor: Pagination cursor for the next page of entries.
|
|
85
|
-
limit: Number of items to return (
|
|
86
|
-
|
|
225
|
+
limit: Number of items to return (up to 50 for Classic search).
|
|
226
|
+
keywords: Keywords to search for.
|
|
227
|
+
location: The geographical location to filter companies by (e.g., "United States").
|
|
228
|
+
industry: The industry to filter companies by.(e.g., "Software Development".)
|
|
229
|
+
|
|
87
230
|
Returns:
|
|
88
|
-
A dictionary containing
|
|
89
|
-
|
|
231
|
+
A dictionary containing search results and pagination details.
|
|
232
|
+
|
|
90
233
|
Raises:
|
|
91
234
|
httpx.HTTPError: If the API request fails.
|
|
92
|
-
|
|
93
|
-
Tags:
|
|
94
|
-
linkedin, post, list, user_posts, company_posts, content, api, important
|
|
95
235
|
"""
|
|
236
|
+
url = f"{self.base_url}/api/v1/linkedin/search"
|
|
237
|
+
params: dict[str, Any] = {"account_id": self.account_id}
|
|
238
|
+
if cursor:
|
|
239
|
+
params["cursor"] = cursor
|
|
240
|
+
if limit is not None:
|
|
241
|
+
params["limit"] = limit
|
|
242
|
+
payload: dict[str, Any] = {"api": "classic", "category": "companies"}
|
|
243
|
+
if keywords:
|
|
244
|
+
payload["keywords"] = keywords
|
|
245
|
+
if location:
|
|
246
|
+
location_id = self._get_search_parameter_id("LOCATION", location)
|
|
247
|
+
payload["location"] = [location_id]
|
|
248
|
+
if industry:
|
|
249
|
+
industry_id = self._get_search_parameter_id("INDUSTRY", industry)
|
|
250
|
+
payload["industry"] = [industry_id]
|
|
251
|
+
response = self._post(url, params=params, data=payload)
|
|
252
|
+
return self._handle_response(response)
|
|
96
253
|
|
|
97
|
-
|
|
98
|
-
identifier=identifier,
|
|
99
|
-
account_id=self.account_id,
|
|
100
|
-
cursor=cursor,
|
|
101
|
-
limit=limit,
|
|
102
|
-
)
|
|
103
|
-
|
|
104
|
-
def linkedin_retrieve_profile(
|
|
254
|
+
async def linkedin_search_posts(
|
|
105
255
|
self,
|
|
106
|
-
|
|
256
|
+
cursor: str | None = None,
|
|
257
|
+
limit: int | None = None,
|
|
258
|
+
keywords: str | None = None,
|
|
259
|
+
date_posted: Literal["past_day", "past_week", "past_month"] | None = None,
|
|
260
|
+
sort_by: Literal["relevance", "date"] = "relevance",
|
|
107
261
|
) -> dict[str, Any]:
|
|
108
262
|
"""
|
|
109
|
-
|
|
110
|
-
|
|
263
|
+
Performs a keyword-based search for LinkedIn posts, allowing results to be filtered by date and sorted by relevance. This function specifically queries the 'posts' category, distinguishing it from other search methods in the class that target people, companies, or jobs, and returns relevant content.
|
|
264
|
+
|
|
111
265
|
Args:
|
|
112
|
-
|
|
113
|
-
|
|
266
|
+
cursor: Pagination cursor for the next page of entries.
|
|
267
|
+
limit: Number of items to return (up to 50 for Classic search).
|
|
268
|
+
keywords: Keywords to search for.
|
|
269
|
+
date_posted: Filter by when the post was posted.
|
|
270
|
+
sort_by: How to sort the results.
|
|
271
|
+
|
|
114
272
|
Returns:
|
|
115
|
-
A dictionary containing
|
|
116
|
-
|
|
273
|
+
A dictionary containing search results and pagination details.
|
|
274
|
+
|
|
117
275
|
Raises:
|
|
118
276
|
httpx.HTTPError: If the API request fails.
|
|
119
|
-
|
|
120
|
-
Tags:
|
|
121
|
-
linkedin, user, profile, retrieve, get, api, important
|
|
122
277
|
"""
|
|
278
|
+
url = f"{self.base_url}/api/v1/linkedin/search"
|
|
279
|
+
params: dict[str, Any] = {"account_id": self.account_id}
|
|
280
|
+
if cursor:
|
|
281
|
+
params["cursor"] = cursor
|
|
282
|
+
if limit is not None:
|
|
283
|
+
params["limit"] = limit
|
|
284
|
+
payload: dict[str, Any] = {"api": "classic", "category": "posts"}
|
|
285
|
+
if keywords:
|
|
286
|
+
payload["keywords"] = keywords
|
|
287
|
+
if date_posted:
|
|
288
|
+
payload["date_posted"] = date_posted
|
|
289
|
+
if sort_by:
|
|
290
|
+
payload["sort_by"] = sort_by
|
|
291
|
+
response = self._post(url, params=params, data=payload)
|
|
292
|
+
return self._handle_response(response)
|
|
123
293
|
|
|
124
|
-
|
|
125
|
-
identifier=identifier,
|
|
126
|
-
account_id=self.account_id,
|
|
127
|
-
)
|
|
128
|
-
|
|
129
|
-
def linkedin_list_post_comments(
|
|
294
|
+
async def linkedin_search_jobs(
|
|
130
295
|
self,
|
|
131
|
-
post_id: str,
|
|
132
|
-
comment_id: str | None = None,
|
|
133
296
|
cursor: str | None = None,
|
|
134
297
|
limit: int | None = None,
|
|
298
|
+
keywords: str | None = None,
|
|
299
|
+
region: str | None = None,
|
|
300
|
+
sort_by: Literal["relevance", "date"] = "relevance",
|
|
301
|
+
minimum_salary_value: int = 40,
|
|
302
|
+
industry: str | None = None,
|
|
135
303
|
) -> dict[str, Any]:
|
|
136
304
|
"""
|
|
137
|
-
|
|
138
|
-
|
|
305
|
+
Executes a LinkedIn search specifically for job listings using keywords and filters like region, industry, and minimum salary. Unlike other search functions targeting people or companies, this is specialized for job listings and converts friendly filter names (e.g., "United States") into their required API IDs.
|
|
306
|
+
|
|
139
307
|
Args:
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
308
|
+
cursor: Pagination cursor for the next page of entries.
|
|
309
|
+
limit: Number of items to return (up to 50 for Classic search).
|
|
310
|
+
keywords: Keywords to search for.
|
|
311
|
+
region: The geographical region to filter jobs by (e.g., "United States").
|
|
312
|
+
sort_by: How to sort the results.(e.g., "relevance" or "date".)
|
|
313
|
+
minimum_salary_value: The minimum salary to filter for.
|
|
314
|
+
industry: The industry to filter jobs by. (e.g., "Software Development".)
|
|
315
|
+
|
|
145
316
|
Returns:
|
|
146
|
-
A dictionary containing
|
|
147
|
-
|
|
317
|
+
A dictionary containing search results and pagination details.
|
|
318
|
+
|
|
148
319
|
Raises:
|
|
149
320
|
httpx.HTTPError: If the API request fails.
|
|
150
|
-
|
|
151
|
-
Tags:
|
|
152
|
-
linkedin, post, comment, list, content, api, important
|
|
321
|
+
ValueError: If the specified location is not found.
|
|
153
322
|
"""
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
323
|
+
url = f"{self.base_url}/api/v1/linkedin/search"
|
|
324
|
+
params: dict[str, Any] = {"account_id": self.account_id}
|
|
325
|
+
if cursor:
|
|
326
|
+
params["cursor"] = cursor
|
|
327
|
+
if limit is not None:
|
|
328
|
+
params["limit"] = limit
|
|
329
|
+
payload: dict[str, Any] = {
|
|
330
|
+
"api": "classic",
|
|
331
|
+
"category": "jobs",
|
|
332
|
+
"minimum_salary": {"currency": "USD", "value": minimum_salary_value},
|
|
333
|
+
}
|
|
334
|
+
if keywords:
|
|
335
|
+
payload["keywords"] = keywords
|
|
336
|
+
if sort_by:
|
|
337
|
+
payload["sort_by"] = sort_by
|
|
338
|
+
if region:
|
|
339
|
+
location_id = self._get_search_parameter_id("LOCATION", region)
|
|
340
|
+
payload["region"] = location_id
|
|
341
|
+
if industry:
|
|
342
|
+
industry_id = self._get_search_parameter_id("INDUSTRY", industry)
|
|
343
|
+
payload["industry"] = [industry_id]
|
|
344
|
+
response = self._post(url, params=params, data=payload)
|
|
345
|
+
return self._handle_response(response)
|
|
162
346
|
|
|
163
347
|
def list_tools(self):
|
|
164
348
|
"""
|
|
@@ -168,8 +352,11 @@ class ScraperApp(APIApplication):
|
|
|
168
352
|
A list of functions that can be used as tools.
|
|
169
353
|
"""
|
|
170
354
|
return [
|
|
171
|
-
self.linkedin_search,
|
|
172
355
|
self.linkedin_list_profile_posts,
|
|
173
356
|
self.linkedin_retrieve_profile,
|
|
174
357
|
self.linkedin_list_post_comments,
|
|
358
|
+
self.linkedin_search_people,
|
|
359
|
+
self.linkedin_search_companies,
|
|
360
|
+
self.linkedin_search_posts,
|
|
361
|
+
self.linkedin_search_jobs,
|
|
175
362
|
]
|