universal-mcp-applications 0.1.33__py3-none-any.whl → 0.1.39rc8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of universal-mcp-applications might be problematic. Click here for more details.
- universal_mcp/applications/ahrefs/app.py +92 -238
- universal_mcp/applications/airtable/app.py +23 -122
- universal_mcp/applications/apollo/app.py +122 -475
- universal_mcp/applications/asana/app.py +605 -1755
- universal_mcp/applications/aws_s3/app.py +36 -103
- universal_mcp/applications/bill/app.py +644 -2055
- universal_mcp/applications/box/app.py +1246 -4159
- universal_mcp/applications/braze/app.py +410 -1476
- universal_mcp/applications/browser_use/README.md +15 -1
- universal_mcp/applications/browser_use/__init__.py +1 -0
- universal_mcp/applications/browser_use/app.py +86 -24
- universal_mcp/applications/cal_com_v2/app.py +207 -625
- universal_mcp/applications/calendly/app.py +103 -242
- universal_mcp/applications/canva/app.py +75 -140
- universal_mcp/applications/clickup/app.py +331 -798
- universal_mcp/applications/coda/app.py +240 -520
- universal_mcp/applications/confluence/app.py +497 -1285
- universal_mcp/applications/contentful/app.py +36 -151
- universal_mcp/applications/crustdata/app.py +42 -121
- universal_mcp/applications/dialpad/app.py +451 -924
- universal_mcp/applications/digitalocean/app.py +2071 -6082
- universal_mcp/applications/domain_checker/app.py +3 -54
- universal_mcp/applications/e2b/app.py +14 -64
- universal_mcp/applications/elevenlabs/app.py +9 -47
- universal_mcp/applications/exa/README.md +8 -4
- universal_mcp/applications/exa/app.py +408 -186
- universal_mcp/applications/falai/app.py +24 -101
- universal_mcp/applications/figma/app.py +91 -175
- universal_mcp/applications/file_system/app.py +2 -13
- universal_mcp/applications/firecrawl/app.py +186 -163
- universal_mcp/applications/fireflies/app.py +59 -281
- universal_mcp/applications/fpl/app.py +92 -529
- universal_mcp/applications/fpl/utils/fixtures.py +15 -49
- universal_mcp/applications/fpl/utils/helper.py +25 -89
- universal_mcp/applications/fpl/utils/league_utils.py +20 -64
- universal_mcp/applications/ghost_content/app.py +66 -175
- universal_mcp/applications/github/app.py +28 -65
- universal_mcp/applications/gong/app.py +140 -300
- universal_mcp/applications/google_calendar/app.py +26 -78
- universal_mcp/applications/google_docs/app.py +98 -202
- universal_mcp/applications/google_drive/app.py +194 -793
- universal_mcp/applications/google_gemini/app.py +27 -62
- universal_mcp/applications/google_mail/README.md +1 -0
- universal_mcp/applications/google_mail/app.py +93 -214
- universal_mcp/applications/google_searchconsole/app.py +25 -58
- universal_mcp/applications/google_sheet/app.py +171 -624
- universal_mcp/applications/google_sheet/helper.py +26 -53
- universal_mcp/applications/hashnode/app.py +57 -269
- universal_mcp/applications/heygen/app.py +77 -155
- universal_mcp/applications/http_tools/app.py +10 -32
- universal_mcp/applications/hubspot/README.md +1 -1
- universal_mcp/applications/hubspot/app.py +7508 -99
- universal_mcp/applications/jira/app.py +2419 -8334
- universal_mcp/applications/klaviyo/app.py +737 -1619
- universal_mcp/applications/linkedin/README.md +5 -0
- universal_mcp/applications/linkedin/app.py +332 -227
- universal_mcp/applications/mailchimp/app.py +696 -1851
- universal_mcp/applications/markitdown/app.py +8 -20
- universal_mcp/applications/miro/app.py +333 -815
- universal_mcp/applications/ms_teams/app.py +85 -207
- universal_mcp/applications/neon/app.py +144 -250
- universal_mcp/applications/notion/app.py +36 -51
- universal_mcp/applications/onedrive/app.py +26 -48
- universal_mcp/applications/openai/app.py +42 -165
- universal_mcp/applications/outlook/README.md +22 -9
- universal_mcp/applications/outlook/app.py +403 -141
- universal_mcp/applications/perplexity/README.md +2 -1
- universal_mcp/applications/perplexity/app.py +162 -20
- universal_mcp/applications/pipedrive/app.py +1021 -3331
- universal_mcp/applications/posthog/app.py +272 -541
- universal_mcp/applications/reddit/app.py +61 -160
- universal_mcp/applications/resend/app.py +41 -107
- universal_mcp/applications/retell/app.py +23 -50
- universal_mcp/applications/rocketlane/app.py +250 -963
- universal_mcp/applications/scraper/app.py +67 -125
- universal_mcp/applications/semanticscholar/app.py +36 -78
- universal_mcp/applications/semrush/app.py +43 -77
- universal_mcp/applications/sendgrid/app.py +826 -1576
- universal_mcp/applications/sentry/app.py +444 -1079
- universal_mcp/applications/serpapi/app.py +40 -143
- universal_mcp/applications/sharepoint/app.py +27 -49
- universal_mcp/applications/shopify/app.py +1743 -4479
- universal_mcp/applications/shortcut/app.py +272 -534
- universal_mcp/applications/slack/app.py +41 -123
- universal_mcp/applications/spotify/app.py +206 -405
- universal_mcp/applications/supabase/app.py +174 -283
- universal_mcp/applications/tavily/app.py +2 -2
- universal_mcp/applications/trello/app.py +853 -2816
- universal_mcp/applications/twilio/app.py +14 -50
- universal_mcp/applications/twitter/api_segments/compliance_api.py +4 -14
- universal_mcp/applications/twitter/api_segments/dm_conversations_api.py +6 -18
- universal_mcp/applications/twitter/api_segments/likes_api.py +1 -3
- universal_mcp/applications/twitter/api_segments/lists_api.py +5 -15
- universal_mcp/applications/twitter/api_segments/trends_api.py +1 -3
- universal_mcp/applications/twitter/api_segments/tweets_api.py +9 -31
- universal_mcp/applications/twitter/api_segments/usage_api.py +1 -5
- universal_mcp/applications/twitter/api_segments/users_api.py +14 -42
- universal_mcp/applications/whatsapp/app.py +35 -186
- universal_mcp/applications/whatsapp/audio.py +2 -6
- universal_mcp/applications/whatsapp/whatsapp.py +17 -51
- universal_mcp/applications/whatsapp_business/app.py +86 -299
- universal_mcp/applications/wrike/app.py +80 -153
- universal_mcp/applications/yahoo_finance/app.py +19 -65
- universal_mcp/applications/youtube/app.py +120 -306
- universal_mcp/applications/zenquotes/app.py +3 -3
- {universal_mcp_applications-0.1.33.dist-info → universal_mcp_applications-0.1.39rc8.dist-info}/METADATA +4 -2
- {universal_mcp_applications-0.1.33.dist-info → universal_mcp_applications-0.1.39rc8.dist-info}/RECORD +109 -113
- {universal_mcp_applications-0.1.33.dist-info → universal_mcp_applications-0.1.39rc8.dist-info}/WHEEL +1 -1
- universal_mcp/applications/hubspot/api_segments/__init__.py +0 -0
- universal_mcp/applications/hubspot/api_segments/api_segment_base.py +0 -54
- universal_mcp/applications/hubspot/api_segments/crm_api.py +0 -7337
- universal_mcp/applications/hubspot/api_segments/marketing_api.py +0 -1467
- {universal_mcp_applications-0.1.33.dist-info → universal_mcp_applications-0.1.39rc8.dist-info}/licenses/LICENSE +0 -0
|
@@ -2,13 +2,12 @@ import os
|
|
|
2
2
|
from dotenv import load_dotenv
|
|
3
3
|
|
|
4
4
|
load_dotenv()
|
|
5
|
-
|
|
6
5
|
from typing import Any, Literal
|
|
7
|
-
|
|
8
6
|
from loguru import logger
|
|
9
7
|
from universal_mcp.applications.application import APIApplication
|
|
10
8
|
from universal_mcp.integrations import Integration
|
|
11
9
|
|
|
10
|
+
|
|
12
11
|
class ScraperApp(APIApplication):
|
|
13
12
|
"""
|
|
14
13
|
Application for interacting with LinkedIn API.
|
|
@@ -29,12 +28,8 @@ class ScraperApp(APIApplication):
|
|
|
29
28
|
if not self._base_url:
|
|
30
29
|
unipile_dsn = os.getenv("UNIPILE_DSN")
|
|
31
30
|
if not unipile_dsn:
|
|
32
|
-
logger.error(
|
|
33
|
-
|
|
34
|
-
)
|
|
35
|
-
raise ValueError(
|
|
36
|
-
"UnipileApp: UNIPILE_DSN environment variable is required."
|
|
37
|
-
)
|
|
31
|
+
logger.error("UnipileApp: UNIPILE_DSN environment variable is not set.")
|
|
32
|
+
raise ValueError("UnipileApp: UNIPILE_DSN environment variable is required.")
|
|
38
33
|
self._base_url = f"https://{unipile_dsn}"
|
|
39
34
|
return self._base_url
|
|
40
35
|
|
|
@@ -49,29 +44,23 @@ class ScraperApp(APIApplication):
|
|
|
49
44
|
Overrides the base class method to use X-Api-Key.
|
|
50
45
|
"""
|
|
51
46
|
if not self.integration:
|
|
52
|
-
logger.warning(
|
|
53
|
-
"UnipileApp: No integration configured, returning empty headers."
|
|
54
|
-
)
|
|
47
|
+
logger.warning("UnipileApp: No integration configured, returning empty headers.")
|
|
55
48
|
return {}
|
|
56
|
-
|
|
57
49
|
api_key = os.getenv("UNIPILE_API_KEY")
|
|
58
50
|
if not api_key:
|
|
59
|
-
logger.error(
|
|
60
|
-
|
|
61
|
-
)
|
|
62
|
-
return { # Or return minimal headers if some calls might not need auth (unlikely for Unipile)
|
|
63
|
-
"Content-Type": "application/json",
|
|
64
|
-
"Cache-Control": "no-cache",
|
|
65
|
-
}
|
|
66
|
-
|
|
51
|
+
logger.error("UnipileApp: API key not found in integration credentials for Unipile.")
|
|
52
|
+
return {"Content-Type": "application/json", "Cache-Control": "no-cache"}
|
|
67
53
|
logger.debug("UnipileApp: Using X-Api-Key for authentication.")
|
|
68
|
-
return {
|
|
69
|
-
"x-api-key": api_key,
|
|
70
|
-
"Content-Type": "application/json",
|
|
71
|
-
"Cache-Control": "no-cache", # Often good practice for APIs
|
|
72
|
-
}
|
|
54
|
+
return {"x-api-key": api_key, "Content-Type": "application/json", "Cache-Control": "no-cache"}
|
|
73
55
|
|
|
74
|
-
def
|
|
56
|
+
async def _aget_headers(self) -> dict[str, str]:
|
|
57
|
+
"""
|
|
58
|
+
Get the headers for Unipile API requests asynchronously.
|
|
59
|
+
Overrides the base class method to use X-Api-Key.
|
|
60
|
+
"""
|
|
61
|
+
return self._get_headers()
|
|
62
|
+
|
|
63
|
+
async def _aget_search_parameter_id(self, param_type: str, keywords: str) -> str:
|
|
75
64
|
"""
|
|
76
65
|
Retrieves the ID for a given LinkedIn search parameter by its name.
|
|
77
66
|
|
|
@@ -87,45 +76,32 @@ class ScraperApp(APIApplication):
|
|
|
87
76
|
httpx.HTTPError: If the API request fails.
|
|
88
77
|
"""
|
|
89
78
|
url = f"{self.base_url}/api/v1/linkedin/search/parameters"
|
|
90
|
-
params = {
|
|
91
|
-
|
|
92
|
-
"keywords": keywords,
|
|
93
|
-
"type": param_type,
|
|
94
|
-
}
|
|
95
|
-
|
|
96
|
-
response = self._get(url, params=params)
|
|
79
|
+
params = {"account_id": self.account_id, "keywords": keywords, "type": param_type}
|
|
80
|
+
response = await self._aget(url, params=params)
|
|
97
81
|
results = self._handle_response(response)
|
|
98
|
-
|
|
99
82
|
items = results.get("items", [])
|
|
100
83
|
if items:
|
|
101
|
-
# Return the ID of the first result, assuming it's the most relevant
|
|
102
84
|
return items[0]["id"]
|
|
103
|
-
|
|
104
85
|
raise ValueError(f'Could not find a matching ID for {param_type}: "{keywords}"')
|
|
105
86
|
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
self,
|
|
109
|
-
identifier: str, # User or Company provider internal ID
|
|
110
|
-
cursor: str | None = None,
|
|
111
|
-
limit: int | None = None, # 1-100 (spec says max 250)
|
|
112
|
-
is_company: bool | None = None,
|
|
87
|
+
async def linkedin_list_profile_posts(
|
|
88
|
+
self, identifier: str, cursor: str | None = None, limit: int | None = None, is_company: bool | None = None
|
|
113
89
|
) -> dict[str, Any]:
|
|
114
90
|
"""
|
|
115
91
|
Fetches a paginated list of posts from a specific user or company profile using its provider ID. The `is_company` flag must specify the entity type. Unlike `linkedin_search_posts`, this function directly retrieves content from a known profile's feed instead of performing a global keyword search.
|
|
116
|
-
|
|
92
|
+
|
|
117
93
|
Args:
|
|
118
94
|
identifier: The entity's provider internal ID (LinkedIn ID).
|
|
119
95
|
cursor: Pagination cursor.
|
|
120
96
|
limit: Number of items to return (1-100, as per Unipile example, though spec allows up to 250).
|
|
121
97
|
is_company: Boolean indicating if the identifier is for a company.
|
|
122
|
-
|
|
98
|
+
|
|
123
99
|
Returns:
|
|
124
100
|
A dictionary containing a list of post objects and pagination details.
|
|
125
|
-
|
|
101
|
+
|
|
126
102
|
Raises:
|
|
127
103
|
httpx.HTTPError: If the API request fails.
|
|
128
|
-
|
|
104
|
+
|
|
129
105
|
Tags:
|
|
130
106
|
linkedin, post, list, user_posts, company_posts, content, api, important
|
|
131
107
|
"""
|
|
@@ -137,53 +113,48 @@ class ScraperApp(APIApplication):
|
|
|
137
113
|
params["limit"] = limit
|
|
138
114
|
if is_company is not None:
|
|
139
115
|
params["is_company"] = is_company
|
|
140
|
-
|
|
141
|
-
response = self._get(url, params=params)
|
|
116
|
+
response = await self._aget(url, params=params)
|
|
142
117
|
return response.json()
|
|
143
118
|
|
|
144
|
-
def linkedin_retrieve_profile(self, identifier: str) -> dict[str, Any]:
|
|
119
|
+
async def linkedin_retrieve_profile(self, identifier: str) -> dict[str, Any]:
|
|
145
120
|
"""
|
|
146
121
|
Fetches a specific LinkedIn user's profile using their public or internal ID. Unlike `linkedin_search_people`, which discovers multiple users via keywords, this function targets and retrieves detailed data for a single, known individual based on a direct identifier.
|
|
147
|
-
|
|
122
|
+
|
|
148
123
|
Args:
|
|
149
124
|
identifier: Can be the provider's internal id OR the provider's public id of the requested user.For example, for https://www.linkedin.com/in/manojbajaj95/, the identifier is "manojbajaj95".
|
|
150
|
-
|
|
125
|
+
|
|
151
126
|
Returns:
|
|
152
127
|
A dictionary containing the user's profile details.
|
|
153
|
-
|
|
128
|
+
|
|
154
129
|
Raises:
|
|
155
130
|
httpx.HTTPError: If the API request fails.
|
|
156
|
-
|
|
131
|
+
|
|
157
132
|
Tags:
|
|
158
133
|
linkedin, user, profile, retrieve, get, api, important
|
|
159
134
|
"""
|
|
160
135
|
url = f"{self.base_url}/api/v1/users/{identifier}"
|
|
161
136
|
params: dict[str, Any] = {"account_id": self.account_id}
|
|
162
|
-
response = self.
|
|
137
|
+
response = await self._aget(url, params=params)
|
|
163
138
|
return self._handle_response(response)
|
|
164
139
|
|
|
165
|
-
def linkedin_list_post_comments(
|
|
166
|
-
self,
|
|
167
|
-
post_id: str,
|
|
168
|
-
comment_id: str | None = None,
|
|
169
|
-
cursor: str | None = None,
|
|
170
|
-
limit: int | None = None,
|
|
140
|
+
async def linkedin_list_post_comments(
|
|
141
|
+
self, post_id: str, comment_id: str | None = None, cursor: str | None = None, limit: int | None = None
|
|
171
142
|
) -> dict[str, Any]:
|
|
172
143
|
"""
|
|
173
144
|
Fetches a paginated list of comments for a specified LinkedIn post. It can retrieve either top-level comments or threaded replies if an optional `comment_id` is provided. This is a read-only operation, distinct from functions that search for posts or list user-specific content.
|
|
174
|
-
|
|
145
|
+
|
|
175
146
|
Args:
|
|
176
147
|
post_id: The social ID of the post.
|
|
177
148
|
comment_id: If provided, retrieves replies to this comment ID instead of top-level comments.
|
|
178
149
|
cursor: Pagination cursor.
|
|
179
150
|
limit: Number of comments to return. (OpenAPI spec shows type string, passed as string if provided).
|
|
180
|
-
|
|
151
|
+
|
|
181
152
|
Returns:
|
|
182
153
|
A dictionary containing a list of comment objects and pagination details.
|
|
183
|
-
|
|
154
|
+
|
|
184
155
|
Raises:
|
|
185
156
|
httpx.HTTPError: If the API request fails.
|
|
186
|
-
|
|
157
|
+
|
|
187
158
|
Tags:
|
|
188
159
|
linkedin, post, comment, list, content, api, important
|
|
189
160
|
"""
|
|
@@ -195,11 +166,10 @@ class ScraperApp(APIApplication):
|
|
|
195
166
|
params["limit"] = str(limit)
|
|
196
167
|
if comment_id:
|
|
197
168
|
params["comment_id"] = comment_id
|
|
198
|
-
|
|
199
|
-
response = self._get(url, params=params)
|
|
169
|
+
response = await self._aget(url, params=params)
|
|
200
170
|
return response.json()
|
|
201
171
|
|
|
202
|
-
def linkedin_search_people(
|
|
172
|
+
async def linkedin_search_people(
|
|
203
173
|
self,
|
|
204
174
|
cursor: str | None = None,
|
|
205
175
|
limit: int | None = None,
|
|
@@ -210,7 +180,7 @@ class ScraperApp(APIApplication):
|
|
|
210
180
|
) -> dict[str, Any]:
|
|
211
181
|
"""
|
|
212
182
|
Performs a paginated search for people on LinkedIn, distinct from searches for companies or jobs. It filters results using keywords, location, industry, and company, internally converting filter names like 'United States' into their required API IDs before making the request.
|
|
213
|
-
|
|
183
|
+
|
|
214
184
|
Args:
|
|
215
185
|
cursor: Pagination cursor for the next page of entries.
|
|
216
186
|
limit: Number of items to return (up to 50 for Classic search).
|
|
@@ -218,42 +188,35 @@ class ScraperApp(APIApplication):
|
|
|
218
188
|
location: The geographical location to filter people by (e.g., "United States").
|
|
219
189
|
industry: The industry to filter people by.(e.g., "Software Development".)
|
|
220
190
|
company: The company to filter people by.(e.g., "Google".)
|
|
221
|
-
|
|
191
|
+
|
|
222
192
|
Returns:
|
|
223
193
|
A dictionary containing search results and pagination details.
|
|
224
|
-
|
|
194
|
+
|
|
225
195
|
Raises:
|
|
226
196
|
httpx.HTTPError: If the API request fails.
|
|
227
197
|
"""
|
|
228
198
|
url = f"{self.base_url}/api/v1/linkedin/search"
|
|
229
|
-
|
|
230
199
|
params: dict[str, Any] = {"account_id": self.account_id}
|
|
231
200
|
if cursor:
|
|
232
201
|
params["cursor"] = cursor
|
|
233
202
|
if limit is not None:
|
|
234
203
|
params["limit"] = limit
|
|
235
|
-
|
|
236
204
|
payload: dict[str, Any] = {"api": "classic", "category": "people"}
|
|
237
|
-
|
|
238
205
|
if keywords:
|
|
239
206
|
payload["keywords"] = keywords
|
|
240
|
-
|
|
241
207
|
if location:
|
|
242
|
-
location_id = self.
|
|
208
|
+
location_id = await self._aget_search_parameter_id("LOCATION", location)
|
|
243
209
|
payload["location"] = [location_id]
|
|
244
|
-
|
|
245
210
|
if industry:
|
|
246
|
-
industry_id = self.
|
|
211
|
+
industry_id = await self._aget_search_parameter_id("INDUSTRY", industry)
|
|
247
212
|
payload["industry"] = [industry_id]
|
|
248
|
-
|
|
249
213
|
if company:
|
|
250
|
-
company_id = self.
|
|
214
|
+
company_id = await self._aget_search_parameter_id("COMPANY", company)
|
|
251
215
|
payload["company"] = [company_id]
|
|
252
|
-
|
|
253
|
-
response = self._post(url, params=params, data=payload)
|
|
216
|
+
response = await self._apost(url, params=params, data=payload)
|
|
254
217
|
return self._handle_response(response)
|
|
255
218
|
|
|
256
|
-
def linkedin_search_companies(
|
|
219
|
+
async def linkedin_search_companies(
|
|
257
220
|
self,
|
|
258
221
|
cursor: str | None = None,
|
|
259
222
|
limit: int | None = None,
|
|
@@ -263,45 +226,39 @@ class ScraperApp(APIApplication):
|
|
|
263
226
|
) -> dict[str, Any]:
|
|
264
227
|
"""
|
|
265
228
|
Executes a paginated LinkedIn search for companies, filtering by optional keywords, location, and industry. Unlike `linkedin_search_people` or `linkedin_search_jobs`, this function specifically sets the API search category to 'companies' to ensure that only company profiles are returned in the search results.
|
|
266
|
-
|
|
229
|
+
|
|
267
230
|
Args:
|
|
268
231
|
cursor: Pagination cursor for the next page of entries.
|
|
269
232
|
limit: Number of items to return (up to 50 for Classic search).
|
|
270
233
|
keywords: Keywords to search for.
|
|
271
234
|
location: The geographical location to filter companies by (e.g., "United States").
|
|
272
235
|
industry: The industry to filter companies by.(e.g., "Software Development".)
|
|
273
|
-
|
|
236
|
+
|
|
274
237
|
Returns:
|
|
275
238
|
A dictionary containing search results and pagination details.
|
|
276
|
-
|
|
239
|
+
|
|
277
240
|
Raises:
|
|
278
241
|
httpx.HTTPError: If the API request fails.
|
|
279
242
|
"""
|
|
280
243
|
url = f"{self.base_url}/api/v1/linkedin/search"
|
|
281
|
-
|
|
282
244
|
params: dict[str, Any] = {"account_id": self.account_id}
|
|
283
245
|
if cursor:
|
|
284
246
|
params["cursor"] = cursor
|
|
285
247
|
if limit is not None:
|
|
286
248
|
params["limit"] = limit
|
|
287
|
-
|
|
288
249
|
payload: dict[str, Any] = {"api": "classic", "category": "companies"}
|
|
289
|
-
|
|
290
250
|
if keywords:
|
|
291
251
|
payload["keywords"] = keywords
|
|
292
|
-
|
|
293
252
|
if location:
|
|
294
|
-
location_id = self.
|
|
253
|
+
location_id = await self._aget_search_parameter_id("LOCATION", location)
|
|
295
254
|
payload["location"] = [location_id]
|
|
296
|
-
|
|
297
255
|
if industry:
|
|
298
|
-
industry_id = self.
|
|
256
|
+
industry_id = await self._aget_search_parameter_id("INDUSTRY", industry)
|
|
299
257
|
payload["industry"] = [industry_id]
|
|
300
|
-
|
|
301
|
-
response = self._post(url, params=params, data=payload)
|
|
258
|
+
response = await self._apost(url, params=params, data=payload)
|
|
302
259
|
return self._handle_response(response)
|
|
303
260
|
|
|
304
|
-
def linkedin_search_posts(
|
|
261
|
+
async def linkedin_search_posts(
|
|
305
262
|
self,
|
|
306
263
|
cursor: str | None = None,
|
|
307
264
|
limit: int | None = None,
|
|
@@ -311,104 +268,89 @@ class ScraperApp(APIApplication):
|
|
|
311
268
|
) -> dict[str, Any]:
|
|
312
269
|
"""
|
|
313
270
|
Performs a keyword-based search for LinkedIn posts, allowing results to be filtered by date and sorted by relevance. This function specifically queries the 'posts' category, distinguishing it from other search methods in the class that target people, companies, or jobs, and returns relevant content.
|
|
314
|
-
|
|
271
|
+
|
|
315
272
|
Args:
|
|
316
273
|
cursor: Pagination cursor for the next page of entries.
|
|
317
274
|
limit: Number of items to return (up to 50 for Classic search).
|
|
318
275
|
keywords: Keywords to search for.
|
|
319
276
|
date_posted: Filter by when the post was posted.
|
|
320
277
|
sort_by: How to sort the results.
|
|
321
|
-
|
|
278
|
+
|
|
322
279
|
Returns:
|
|
323
280
|
A dictionary containing search results and pagination details.
|
|
324
|
-
|
|
281
|
+
|
|
325
282
|
Raises:
|
|
326
283
|
httpx.HTTPError: If the API request fails.
|
|
327
284
|
"""
|
|
328
285
|
url = f"{self.base_url}/api/v1/linkedin/search"
|
|
329
|
-
|
|
330
286
|
params: dict[str, Any] = {"account_id": self.account_id}
|
|
331
287
|
if cursor:
|
|
332
288
|
params["cursor"] = cursor
|
|
333
289
|
if limit is not None:
|
|
334
290
|
params["limit"] = limit
|
|
335
|
-
|
|
336
291
|
payload: dict[str, Any] = {"api": "classic", "category": "posts"}
|
|
337
|
-
|
|
338
292
|
if keywords:
|
|
339
293
|
payload["keywords"] = keywords
|
|
340
294
|
if date_posted:
|
|
341
295
|
payload["date_posted"] = date_posted
|
|
342
296
|
if sort_by:
|
|
343
297
|
payload["sort_by"] = sort_by
|
|
344
|
-
|
|
345
|
-
response = self._post(url, params=params, data=payload)
|
|
298
|
+
response = await self._apost(url, params=params, data=payload)
|
|
346
299
|
return self._handle_response(response)
|
|
347
300
|
|
|
348
|
-
def linkedin_search_jobs(
|
|
301
|
+
async def linkedin_search_jobs(
|
|
349
302
|
self,
|
|
350
303
|
cursor: str | None = None,
|
|
351
304
|
limit: int | None = None,
|
|
352
305
|
keywords: str | None = None,
|
|
353
306
|
region: str | None = None,
|
|
354
307
|
sort_by: Literal["relevance", "date"] = "relevance",
|
|
355
|
-
minimum_salary_value:
|
|
308
|
+
minimum_salary_value: Literal[40, 60, 80, 100, 120, 140, 160, 180, 200] = 40,
|
|
356
309
|
industry: str | None = None,
|
|
357
310
|
) -> dict[str, Any]:
|
|
358
311
|
"""
|
|
359
312
|
Executes a LinkedIn search specifically for job listings using keywords and filters like region, industry, and minimum salary. Unlike other search functions targeting people or companies, this is specialized for job listings and converts friendly filter names (e.g., "United States") into their required API IDs.
|
|
360
|
-
|
|
313
|
+
|
|
361
314
|
Args:
|
|
362
315
|
cursor: Pagination cursor for the next page of entries.
|
|
363
316
|
limit: Number of items to return (up to 50 for Classic search).
|
|
364
317
|
keywords: Keywords to search for.
|
|
365
318
|
region: The geographical region to filter jobs by (e.g., "United States").
|
|
366
319
|
sort_by: How to sort the results.(e.g., "relevance" or "date".)
|
|
367
|
-
minimum_salary_value: The minimum salary to filter for.
|
|
320
|
+
minimum_salary_value: The minimum salary to filter for. Allowed values are 40, 60, 80, 100, 120, 140, 160, 180, 200.
|
|
368
321
|
industry: The industry to filter jobs by. (e.g., "Software Development".)
|
|
369
|
-
|
|
322
|
+
|
|
370
323
|
Returns:
|
|
371
324
|
A dictionary containing search results and pagination details.
|
|
372
|
-
|
|
325
|
+
|
|
373
326
|
Raises:
|
|
374
327
|
httpx.HTTPError: If the API request fails.
|
|
375
328
|
ValueError: If the specified location is not found.
|
|
376
329
|
"""
|
|
377
330
|
url = f"{self.base_url}/api/v1/linkedin/search"
|
|
378
|
-
|
|
379
331
|
params: dict[str, Any] = {"account_id": self.account_id}
|
|
380
332
|
if cursor:
|
|
381
333
|
params["cursor"] = cursor
|
|
382
334
|
if limit is not None:
|
|
383
335
|
params["limit"] = limit
|
|
384
|
-
|
|
385
336
|
payload: dict[str, Any] = {
|
|
386
337
|
"api": "classic",
|
|
387
338
|
"category": "jobs",
|
|
388
|
-
"minimum_salary": {
|
|
389
|
-
"currency": "USD",
|
|
390
|
-
"value": minimum_salary_value,
|
|
391
|
-
},
|
|
339
|
+
"minimum_salary": {"currency": "USD", "value": minimum_salary_value},
|
|
392
340
|
}
|
|
393
|
-
|
|
394
341
|
if keywords:
|
|
395
342
|
payload["keywords"] = keywords
|
|
396
343
|
if sort_by:
|
|
397
344
|
payload["sort_by"] = sort_by
|
|
398
|
-
|
|
399
|
-
# If location is provided, get its ID and add it to the payload
|
|
400
345
|
if region:
|
|
401
|
-
location_id = self.
|
|
346
|
+
location_id = await self._aget_search_parameter_id("LOCATION", region)
|
|
402
347
|
payload["region"] = location_id
|
|
403
|
-
|
|
404
348
|
if industry:
|
|
405
|
-
industry_id = self.
|
|
349
|
+
industry_id = await self._aget_search_parameter_id("INDUSTRY", industry)
|
|
406
350
|
payload["industry"] = [industry_id]
|
|
407
|
-
|
|
408
|
-
response = self._post(url, params=params, data=payload)
|
|
351
|
+
response = await self._apost(url, params=params, data=payload)
|
|
409
352
|
return self._handle_response(response)
|
|
410
353
|
|
|
411
|
-
|
|
412
354
|
def list_tools(self):
|
|
413
355
|
"""
|
|
414
356
|
Returns a list of available tools/functions in this application.
|