universal-mcp-applications 0.1.33__py3-none-any.whl → 0.1.39rc16__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of universal-mcp-applications might be problematic. Click here for more details.
- universal_mcp/applications/BEST_PRACTICES.md +1 -1
- universal_mcp/applications/ahrefs/app.py +92 -238
- universal_mcp/applications/airtable/app.py +36 -135
- universal_mcp/applications/apollo/app.py +124 -477
- universal_mcp/applications/asana/app.py +605 -1755
- universal_mcp/applications/aws_s3/app.py +63 -119
- universal_mcp/applications/bill/app.py +644 -2055
- universal_mcp/applications/box/app.py +1246 -4159
- universal_mcp/applications/braze/app.py +410 -1476
- universal_mcp/applications/browser_use/README.md +15 -1
- universal_mcp/applications/browser_use/__init__.py +1 -0
- universal_mcp/applications/browser_use/app.py +91 -26
- universal_mcp/applications/cal_com_v2/app.py +207 -625
- universal_mcp/applications/calendly/app.py +103 -242
- universal_mcp/applications/canva/app.py +75 -140
- universal_mcp/applications/clickup/app.py +331 -798
- universal_mcp/applications/coda/app.py +240 -520
- universal_mcp/applications/confluence/app.py +497 -1285
- universal_mcp/applications/contentful/app.py +40 -155
- universal_mcp/applications/crustdata/app.py +44 -123
- universal_mcp/applications/dialpad/app.py +451 -924
- universal_mcp/applications/digitalocean/app.py +2071 -6082
- universal_mcp/applications/domain_checker/app.py +3 -54
- universal_mcp/applications/e2b/app.py +17 -68
- universal_mcp/applications/elevenlabs/README.md +27 -3
- universal_mcp/applications/elevenlabs/app.py +741 -74
- universal_mcp/applications/exa/README.md +8 -4
- universal_mcp/applications/exa/app.py +415 -186
- universal_mcp/applications/falai/README.md +5 -7
- universal_mcp/applications/falai/app.py +156 -232
- universal_mcp/applications/figma/app.py +91 -175
- universal_mcp/applications/file_system/app.py +2 -13
- universal_mcp/applications/firecrawl/app.py +198 -176
- universal_mcp/applications/fireflies/app.py +59 -281
- universal_mcp/applications/fpl/app.py +92 -529
- universal_mcp/applications/fpl/utils/fixtures.py +15 -49
- universal_mcp/applications/fpl/utils/helper.py +25 -89
- universal_mcp/applications/fpl/utils/league_utils.py +20 -64
- universal_mcp/applications/ghost_content/app.py +70 -179
- universal_mcp/applications/github/app.py +30 -67
- universal_mcp/applications/gong/app.py +142 -302
- universal_mcp/applications/google_calendar/app.py +26 -78
- universal_mcp/applications/google_docs/README.md +15 -14
- universal_mcp/applications/google_docs/app.py +103 -206
- universal_mcp/applications/google_drive/app.py +194 -793
- universal_mcp/applications/google_gemini/app.py +68 -59
- universal_mcp/applications/google_mail/README.md +1 -0
- universal_mcp/applications/google_mail/app.py +93 -214
- universal_mcp/applications/google_searchconsole/app.py +25 -58
- universal_mcp/applications/google_sheet/README.md +2 -1
- universal_mcp/applications/google_sheet/app.py +226 -624
- universal_mcp/applications/google_sheet/helper.py +26 -53
- universal_mcp/applications/hashnode/app.py +57 -269
- universal_mcp/applications/heygen/README.md +10 -32
- universal_mcp/applications/heygen/app.py +339 -811
- universal_mcp/applications/http_tools/app.py +10 -32
- universal_mcp/applications/hubspot/README.md +1 -1
- universal_mcp/applications/hubspot/app.py +7508 -99
- universal_mcp/applications/jira/app.py +2419 -8334
- universal_mcp/applications/klaviyo/app.py +739 -1621
- universal_mcp/applications/linkedin/README.md +18 -1
- universal_mcp/applications/linkedin/app.py +729 -251
- universal_mcp/applications/mailchimp/app.py +696 -1851
- universal_mcp/applications/markitdown/app.py +8 -20
- universal_mcp/applications/miro/app.py +333 -815
- universal_mcp/applications/ms_teams/app.py +420 -1407
- universal_mcp/applications/neon/app.py +144 -250
- universal_mcp/applications/notion/app.py +38 -53
- universal_mcp/applications/onedrive/app.py +26 -48
- universal_mcp/applications/openai/app.py +43 -166
- universal_mcp/applications/outlook/README.md +22 -9
- universal_mcp/applications/outlook/app.py +403 -141
- universal_mcp/applications/perplexity/README.md +2 -1
- universal_mcp/applications/perplexity/app.py +161 -20
- universal_mcp/applications/pipedrive/app.py +1021 -3331
- universal_mcp/applications/posthog/app.py +272 -541
- universal_mcp/applications/reddit/app.py +65 -164
- universal_mcp/applications/resend/app.py +72 -139
- universal_mcp/applications/retell/app.py +23 -50
- universal_mcp/applications/rocketlane/app.py +252 -965
- universal_mcp/applications/scraper/app.py +114 -142
- universal_mcp/applications/semanticscholar/app.py +36 -78
- universal_mcp/applications/semrush/app.py +44 -78
- universal_mcp/applications/sendgrid/app.py +826 -1576
- universal_mcp/applications/sentry/app.py +444 -1079
- universal_mcp/applications/serpapi/app.py +44 -146
- universal_mcp/applications/sharepoint/app.py +27 -49
- universal_mcp/applications/shopify/app.py +1748 -4486
- universal_mcp/applications/shortcut/app.py +275 -536
- universal_mcp/applications/slack/app.py +43 -125
- universal_mcp/applications/spotify/app.py +206 -405
- universal_mcp/applications/supabase/app.py +174 -283
- universal_mcp/applications/tavily/app.py +2 -2
- universal_mcp/applications/trello/app.py +853 -2816
- universal_mcp/applications/twilio/app.py +27 -62
- universal_mcp/applications/twitter/api_segments/compliance_api.py +4 -14
- universal_mcp/applications/twitter/api_segments/dm_conversations_api.py +6 -18
- universal_mcp/applications/twitter/api_segments/likes_api.py +1 -3
- universal_mcp/applications/twitter/api_segments/lists_api.py +5 -15
- universal_mcp/applications/twitter/api_segments/trends_api.py +1 -3
- universal_mcp/applications/twitter/api_segments/tweets_api.py +9 -31
- universal_mcp/applications/twitter/api_segments/usage_api.py +1 -5
- universal_mcp/applications/twitter/api_segments/users_api.py +14 -42
- universal_mcp/applications/whatsapp/app.py +35 -186
- universal_mcp/applications/whatsapp/audio.py +2 -6
- universal_mcp/applications/whatsapp/whatsapp.py +17 -51
- universal_mcp/applications/whatsapp_business/app.py +86 -299
- universal_mcp/applications/wrike/app.py +80 -153
- universal_mcp/applications/yahoo_finance/app.py +19 -65
- universal_mcp/applications/youtube/app.py +120 -306
- universal_mcp/applications/zenquotes/app.py +3 -3
- {universal_mcp_applications-0.1.33.dist-info → universal_mcp_applications-0.1.39rc16.dist-info}/METADATA +4 -2
- {universal_mcp_applications-0.1.33.dist-info → universal_mcp_applications-0.1.39rc16.dist-info}/RECORD +115 -119
- {universal_mcp_applications-0.1.33.dist-info → universal_mcp_applications-0.1.39rc16.dist-info}/WHEEL +1 -1
- universal_mcp/applications/hubspot/api_segments/__init__.py +0 -0
- universal_mcp/applications/hubspot/api_segments/api_segment_base.py +0 -54
- universal_mcp/applications/hubspot/api_segments/crm_api.py +0 -7337
- universal_mcp/applications/hubspot/api_segments/marketing_api.py +0 -1467
- {universal_mcp_applications-0.1.33.dist-info → universal_mcp_applications-0.1.39rc16.dist-info}/licenses/LICENSE +0 -0
|
@@ -2,13 +2,12 @@ import os
|
|
|
2
2
|
from dotenv import load_dotenv
|
|
3
3
|
|
|
4
4
|
load_dotenv()
|
|
5
|
-
|
|
6
5
|
from typing import Any, Literal
|
|
7
|
-
|
|
8
6
|
from loguru import logger
|
|
9
7
|
from universal_mcp.applications.application import APIApplication
|
|
10
8
|
from universal_mcp.integrations import Integration
|
|
11
9
|
|
|
10
|
+
|
|
12
11
|
class ScraperApp(APIApplication):
|
|
13
12
|
"""
|
|
14
13
|
Application for interacting with LinkedIn API.
|
|
@@ -17,24 +16,25 @@ class ScraperApp(APIApplication):
|
|
|
17
16
|
|
|
18
17
|
def __init__(self, integration: Integration, **kwargs: Any) -> None:
|
|
19
18
|
super().__init__(name="scraper", integration=integration, **kwargs)
|
|
19
|
+
self._account_id = None
|
|
20
|
+
|
|
21
|
+
async def _get_account_id(self) -> str | None:
|
|
22
|
+
if self._account_id:
|
|
23
|
+
return self._account_id
|
|
20
24
|
if self.integration:
|
|
21
|
-
credentials = self.integration.
|
|
22
|
-
self.
|
|
25
|
+
credentials = await self.integration.get_credentials_async()
|
|
26
|
+
self._account_id = credentials.get("account_id")
|
|
23
27
|
else:
|
|
24
28
|
logger.warning("Integration not found")
|
|
25
|
-
|
|
29
|
+
return self._account_id
|
|
26
30
|
|
|
27
31
|
@property
|
|
28
32
|
def base_url(self) -> str:
|
|
29
33
|
if not self._base_url:
|
|
30
34
|
unipile_dsn = os.getenv("UNIPILE_DSN")
|
|
31
35
|
if not unipile_dsn:
|
|
32
|
-
logger.error(
|
|
33
|
-
|
|
34
|
-
)
|
|
35
|
-
raise ValueError(
|
|
36
|
-
"UnipileApp: UNIPILE_DSN environment variable is required."
|
|
37
|
-
)
|
|
36
|
+
logger.error("UnipileApp: UNIPILE_DSN environment variable is not set.")
|
|
37
|
+
raise ValueError("UnipileApp: UNIPILE_DSN environment variable is required.")
|
|
38
38
|
self._base_url = f"https://{unipile_dsn}"
|
|
39
39
|
return self._base_url
|
|
40
40
|
|
|
@@ -48,30 +48,21 @@ class ScraperApp(APIApplication):
|
|
|
48
48
|
Get the headers for Unipile API requests.
|
|
49
49
|
Overrides the base class method to use X-Api-Key.
|
|
50
50
|
"""
|
|
51
|
-
if not self.integration:
|
|
52
|
-
logger.warning(
|
|
53
|
-
"UnipileApp: No integration configured, returning empty headers."
|
|
54
|
-
)
|
|
55
|
-
return {}
|
|
56
|
-
|
|
57
51
|
api_key = os.getenv("UNIPILE_API_KEY")
|
|
58
52
|
if not api_key:
|
|
59
|
-
logger.error(
|
|
60
|
-
|
|
61
|
-
)
|
|
62
|
-
return { # Or return minimal headers if some calls might not need auth (unlikely for Unipile)
|
|
63
|
-
"Content-Type": "application/json",
|
|
64
|
-
"Cache-Control": "no-cache",
|
|
65
|
-
}
|
|
66
|
-
|
|
53
|
+
logger.error("UnipileApp: API key not found in integration credentials for Unipile.")
|
|
54
|
+
return {"Content-Type": "application/json", "Cache-Control": "no-cache"}
|
|
67
55
|
logger.debug("UnipileApp: Using X-Api-Key for authentication.")
|
|
68
|
-
return {
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
56
|
+
return {"x-api-key": api_key, "Content-Type": "application/json", "Cache-Control": "no-cache"}
|
|
57
|
+
|
|
58
|
+
async def _aget_headers(self) -> dict[str, str]:
|
|
59
|
+
"""
|
|
60
|
+
Get the headers for Unipile API requests asynchronously.
|
|
61
|
+
Overrides the base class method to use X-Api-Key.
|
|
62
|
+
"""
|
|
63
|
+
return self._get_headers()
|
|
73
64
|
|
|
74
|
-
def
|
|
65
|
+
async def _aget_search_parameter_id(self, param_type: str, keywords: str) -> str:
|
|
75
66
|
"""
|
|
76
67
|
Retrieves the ID for a given LinkedIn search parameter by its name.
|
|
77
68
|
|
|
@@ -87,119 +78,127 @@ class ScraperApp(APIApplication):
|
|
|
87
78
|
httpx.HTTPError: If the API request fails.
|
|
88
79
|
"""
|
|
89
80
|
url = f"{self.base_url}/api/v1/linkedin/search/parameters"
|
|
90
|
-
params = {
|
|
91
|
-
|
|
92
|
-
"keywords": keywords,
|
|
93
|
-
"type": param_type,
|
|
94
|
-
}
|
|
95
|
-
|
|
96
|
-
response = self._get(url, params=params)
|
|
81
|
+
params = {"account_id": await self._get_account_id(), "keywords": keywords, "type": param_type}
|
|
82
|
+
response = await self._aget(url, params=params)
|
|
97
83
|
results = self._handle_response(response)
|
|
98
|
-
|
|
99
84
|
items = results.get("items", [])
|
|
100
85
|
if items:
|
|
101
|
-
# Return the ID of the first result, assuming it's the most relevant
|
|
102
86
|
return items[0]["id"]
|
|
103
|
-
|
|
104
87
|
raise ValueError(f'Could not find a matching ID for {param_type}: "{keywords}"')
|
|
105
88
|
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
self,
|
|
109
|
-
identifier: str, # User or Company provider internal ID
|
|
110
|
-
cursor: str | None = None,
|
|
111
|
-
limit: int | None = None, # 1-100 (spec says max 250)
|
|
112
|
-
is_company: bool | None = None,
|
|
89
|
+
async def linkedin_list_profile_posts(
|
|
90
|
+
self, provider_id: str, cursor: str | None = None, limit: int | None = None, is_company: bool | None = None
|
|
113
91
|
) -> dict[str, Any]:
|
|
114
92
|
"""
|
|
115
93
|
Fetches a paginated list of posts from a specific user or company profile using its provider ID. The `is_company` flag must specify the entity type. Unlike `linkedin_search_posts`, this function directly retrieves content from a known profile's feed instead of performing a global keyword search.
|
|
116
|
-
|
|
94
|
+
|
|
117
95
|
Args:
|
|
118
|
-
|
|
96
|
+
provider_id: The entity's provider internal ID (LinkedIn ID).
|
|
119
97
|
cursor: Pagination cursor.
|
|
120
98
|
limit: Number of items to return (1-100, as per Unipile example, though spec allows up to 250).
|
|
121
|
-
is_company: Boolean indicating if the
|
|
122
|
-
|
|
99
|
+
is_company: Boolean indicating if the provider_id is for a company.
|
|
100
|
+
|
|
123
101
|
Returns:
|
|
124
102
|
A dictionary containing a list of post objects and pagination details.
|
|
125
|
-
|
|
103
|
+
|
|
126
104
|
Raises:
|
|
127
105
|
httpx.HTTPError: If the API request fails.
|
|
128
|
-
|
|
106
|
+
|
|
129
107
|
Tags:
|
|
130
108
|
linkedin, post, list, user_posts, company_posts, content, api, important
|
|
131
109
|
"""
|
|
132
|
-
url = f"{self.base_url}/api/v1/users/{
|
|
133
|
-
params: dict[str, Any] = {"account_id": self.
|
|
110
|
+
url = f"{self.base_url}/api/v1/users/{provider_id}/posts"
|
|
111
|
+
params: dict[str, Any] = {"account_id": await self._get_account_id()}
|
|
134
112
|
if cursor:
|
|
135
113
|
params["cursor"] = cursor
|
|
136
114
|
if limit:
|
|
137
115
|
params["limit"] = limit
|
|
138
116
|
if is_company is not None:
|
|
139
117
|
params["is_company"] = is_company
|
|
140
|
-
|
|
141
|
-
response = self._get(url, params=params)
|
|
118
|
+
response = await self._aget(url, params=params)
|
|
142
119
|
return response.json()
|
|
143
120
|
|
|
144
|
-
def
|
|
121
|
+
async def linkedin_list_profile_comments(self, provider_id: str, limit: int | None = None, cursor: str | None = None) -> dict[str, Any]:
|
|
122
|
+
"""
|
|
123
|
+
Retrieves a list of comments made by a specific user using their provider ID.
|
|
124
|
+
|
|
125
|
+
Args:
|
|
126
|
+
provider_id: The entity's provider internal ID (LinkedIn ID).
|
|
127
|
+
limit: Number of items to return (1-100).
|
|
128
|
+
cursor: Pagination cursor.
|
|
129
|
+
|
|
130
|
+
Returns:
|
|
131
|
+
A dictionary containing the list of comments.
|
|
132
|
+
|
|
133
|
+
Raises:
|
|
134
|
+
httpx.HTTPError: If the API request fails.
|
|
135
|
+
|
|
136
|
+
Tags:
|
|
137
|
+
linkedin, user, comments, list, content, api
|
|
138
|
+
"""
|
|
139
|
+
url = f"{self.base_url}/api/v1/users/{provider_id}/comments"
|
|
140
|
+
params: dict[str, Any] = {"account_id": await self._get_account_id()}
|
|
141
|
+
if cursor:
|
|
142
|
+
params["cursor"] = cursor
|
|
143
|
+
if limit:
|
|
144
|
+
params["limit"] = limit
|
|
145
|
+
response = await self._aget(url, params=params)
|
|
146
|
+
return self._handle_response(response)
|
|
147
|
+
|
|
148
|
+
async def linkedin_retrieve_profile(self, provider_id: str) -> dict[str, Any]:
|
|
145
149
|
"""
|
|
146
150
|
Fetches a specific LinkedIn user's profile using their public or internal ID. Unlike `linkedin_search_people`, which discovers multiple users via keywords, this function targets and retrieves detailed data for a single, known individual based on a direct identifier.
|
|
147
|
-
|
|
151
|
+
|
|
148
152
|
Args:
|
|
149
|
-
|
|
150
|
-
|
|
153
|
+
provider_id: Can be the provider's internal id OR the provider's public id of the requested user.For example, for https://www.linkedin.com/in/manojbajaj95/, the identifier is "manojbajaj95".
|
|
154
|
+
|
|
151
155
|
Returns:
|
|
152
156
|
A dictionary containing the user's profile details.
|
|
153
|
-
|
|
157
|
+
|
|
154
158
|
Raises:
|
|
155
159
|
httpx.HTTPError: If the API request fails.
|
|
156
|
-
|
|
160
|
+
|
|
157
161
|
Tags:
|
|
158
162
|
linkedin, user, profile, retrieve, get, api, important
|
|
159
163
|
"""
|
|
160
|
-
url = f"{self.base_url}/api/v1/users/{
|
|
161
|
-
params: dict[str, Any] = {"account_id": self.
|
|
162
|
-
response = self.
|
|
164
|
+
url = f"{self.base_url}/api/v1/users/{provider_id}"
|
|
165
|
+
params: dict[str, Any] = {"account_id": await self._get_account_id()}
|
|
166
|
+
response = await self._aget(url, params=params)
|
|
163
167
|
return self._handle_response(response)
|
|
164
168
|
|
|
165
|
-
def linkedin_list_post_comments(
|
|
166
|
-
self,
|
|
167
|
-
post_id: str,
|
|
168
|
-
comment_id: str | None = None,
|
|
169
|
-
cursor: str | None = None,
|
|
170
|
-
limit: int | None = None,
|
|
169
|
+
async def linkedin_list_post_comments(
|
|
170
|
+
self, post_id: str, comment_id: str | None = None, cursor: str | None = None, limit: int | None = None
|
|
171
171
|
) -> dict[str, Any]:
|
|
172
172
|
"""
|
|
173
173
|
Fetches a paginated list of comments for a specified LinkedIn post. It can retrieve either top-level comments or threaded replies if an optional `comment_id` is provided. This is a read-only operation, distinct from functions that search for posts or list user-specific content.
|
|
174
|
-
|
|
174
|
+
|
|
175
175
|
Args:
|
|
176
176
|
post_id: The social ID of the post.
|
|
177
177
|
comment_id: If provided, retrieves replies to this comment ID instead of top-level comments.
|
|
178
178
|
cursor: Pagination cursor.
|
|
179
179
|
limit: Number of comments to return. (OpenAPI spec shows type string, passed as string if provided).
|
|
180
|
-
|
|
180
|
+
|
|
181
181
|
Returns:
|
|
182
182
|
A dictionary containing a list of comment objects and pagination details.
|
|
183
|
-
|
|
183
|
+
|
|
184
184
|
Raises:
|
|
185
185
|
httpx.HTTPError: If the API request fails.
|
|
186
|
-
|
|
186
|
+
|
|
187
187
|
Tags:
|
|
188
188
|
linkedin, post, comment, list, content, api, important
|
|
189
189
|
"""
|
|
190
190
|
url = f"{self.base_url}/api/v1/posts/{post_id}/comments"
|
|
191
|
-
params: dict[str, Any] = {"account_id": self.
|
|
191
|
+
params: dict[str, Any] = {"account_id": await self._get_account_id()}
|
|
192
192
|
if cursor:
|
|
193
193
|
params["cursor"] = cursor
|
|
194
194
|
if limit is not None:
|
|
195
195
|
params["limit"] = str(limit)
|
|
196
196
|
if comment_id:
|
|
197
197
|
params["comment_id"] = comment_id
|
|
198
|
-
|
|
199
|
-
response = self._get(url, params=params)
|
|
198
|
+
response = await self._aget(url, params=params)
|
|
200
199
|
return response.json()
|
|
201
200
|
|
|
202
|
-
def linkedin_search_people(
|
|
201
|
+
async def linkedin_search_people(
|
|
203
202
|
self,
|
|
204
203
|
cursor: str | None = None,
|
|
205
204
|
limit: int | None = None,
|
|
@@ -210,7 +209,7 @@ class ScraperApp(APIApplication):
|
|
|
210
209
|
) -> dict[str, Any]:
|
|
211
210
|
"""
|
|
212
211
|
Performs a paginated search for people on LinkedIn, distinct from searches for companies or jobs. It filters results using keywords, location, industry, and company, internally converting filter names like 'United States' into their required API IDs before making the request.
|
|
213
|
-
|
|
212
|
+
|
|
214
213
|
Args:
|
|
215
214
|
cursor: Pagination cursor for the next page of entries.
|
|
216
215
|
limit: Number of items to return (up to 50 for Classic search).
|
|
@@ -218,42 +217,35 @@ class ScraperApp(APIApplication):
|
|
|
218
217
|
location: The geographical location to filter people by (e.g., "United States").
|
|
219
218
|
industry: The industry to filter people by.(e.g., "Software Development".)
|
|
220
219
|
company: The company to filter people by.(e.g., "Google".)
|
|
221
|
-
|
|
220
|
+
|
|
222
221
|
Returns:
|
|
223
222
|
A dictionary containing search results and pagination details.
|
|
224
|
-
|
|
223
|
+
|
|
225
224
|
Raises:
|
|
226
225
|
httpx.HTTPError: If the API request fails.
|
|
227
226
|
"""
|
|
228
227
|
url = f"{self.base_url}/api/v1/linkedin/search"
|
|
229
|
-
|
|
230
|
-
params: dict[str, Any] = {"account_id": self.account_id}
|
|
228
|
+
params: dict[str, Any] = {"account_id": await self._get_account_id()}
|
|
231
229
|
if cursor:
|
|
232
230
|
params["cursor"] = cursor
|
|
233
231
|
if limit is not None:
|
|
234
232
|
params["limit"] = limit
|
|
235
|
-
|
|
236
233
|
payload: dict[str, Any] = {"api": "classic", "category": "people"}
|
|
237
|
-
|
|
238
234
|
if keywords:
|
|
239
235
|
payload["keywords"] = keywords
|
|
240
|
-
|
|
241
236
|
if location:
|
|
242
|
-
location_id = self.
|
|
237
|
+
location_id = await self._aget_search_parameter_id("LOCATION", location)
|
|
243
238
|
payload["location"] = [location_id]
|
|
244
|
-
|
|
245
239
|
if industry:
|
|
246
|
-
industry_id = self.
|
|
240
|
+
industry_id = await self._aget_search_parameter_id("INDUSTRY", industry)
|
|
247
241
|
payload["industry"] = [industry_id]
|
|
248
|
-
|
|
249
242
|
if company:
|
|
250
|
-
company_id = self.
|
|
243
|
+
company_id = await self._aget_search_parameter_id("COMPANY", company)
|
|
251
244
|
payload["company"] = [company_id]
|
|
252
|
-
|
|
253
|
-
response = self._post(url, params=params, data=payload)
|
|
245
|
+
response = await self._apost(url, params=params, data=payload)
|
|
254
246
|
return self._handle_response(response)
|
|
255
247
|
|
|
256
|
-
def linkedin_search_companies(
|
|
248
|
+
async def linkedin_search_companies(
|
|
257
249
|
self,
|
|
258
250
|
cursor: str | None = None,
|
|
259
251
|
limit: int | None = None,
|
|
@@ -263,45 +255,39 @@ class ScraperApp(APIApplication):
|
|
|
263
255
|
) -> dict[str, Any]:
|
|
264
256
|
"""
|
|
265
257
|
Executes a paginated LinkedIn search for companies, filtering by optional keywords, location, and industry. Unlike `linkedin_search_people` or `linkedin_search_jobs`, this function specifically sets the API search category to 'companies' to ensure that only company profiles are returned in the search results.
|
|
266
|
-
|
|
258
|
+
|
|
267
259
|
Args:
|
|
268
260
|
cursor: Pagination cursor for the next page of entries.
|
|
269
261
|
limit: Number of items to return (up to 50 for Classic search).
|
|
270
262
|
keywords: Keywords to search for.
|
|
271
263
|
location: The geographical location to filter companies by (e.g., "United States").
|
|
272
264
|
industry: The industry to filter companies by.(e.g., "Software Development".)
|
|
273
|
-
|
|
265
|
+
|
|
274
266
|
Returns:
|
|
275
267
|
A dictionary containing search results and pagination details.
|
|
276
|
-
|
|
268
|
+
|
|
277
269
|
Raises:
|
|
278
270
|
httpx.HTTPError: If the API request fails.
|
|
279
271
|
"""
|
|
280
272
|
url = f"{self.base_url}/api/v1/linkedin/search"
|
|
281
|
-
|
|
282
|
-
params: dict[str, Any] = {"account_id": self.account_id}
|
|
273
|
+
params: dict[str, Any] = {"account_id": await self._get_account_id()}
|
|
283
274
|
if cursor:
|
|
284
275
|
params["cursor"] = cursor
|
|
285
276
|
if limit is not None:
|
|
286
277
|
params["limit"] = limit
|
|
287
|
-
|
|
288
278
|
payload: dict[str, Any] = {"api": "classic", "category": "companies"}
|
|
289
|
-
|
|
290
279
|
if keywords:
|
|
291
280
|
payload["keywords"] = keywords
|
|
292
|
-
|
|
293
281
|
if location:
|
|
294
|
-
location_id = self.
|
|
282
|
+
location_id = await self._aget_search_parameter_id("LOCATION", location)
|
|
295
283
|
payload["location"] = [location_id]
|
|
296
|
-
|
|
297
284
|
if industry:
|
|
298
|
-
industry_id = self.
|
|
285
|
+
industry_id = await self._aget_search_parameter_id("INDUSTRY", industry)
|
|
299
286
|
payload["industry"] = [industry_id]
|
|
300
|
-
|
|
301
|
-
response = self._post(url, params=params, data=payload)
|
|
287
|
+
response = await self._apost(url, params=params, data=payload)
|
|
302
288
|
return self._handle_response(response)
|
|
303
289
|
|
|
304
|
-
def linkedin_search_posts(
|
|
290
|
+
async def linkedin_search_posts(
|
|
305
291
|
self,
|
|
306
292
|
cursor: str | None = None,
|
|
307
293
|
limit: int | None = None,
|
|
@@ -311,104 +297,89 @@ class ScraperApp(APIApplication):
|
|
|
311
297
|
) -> dict[str, Any]:
|
|
312
298
|
"""
|
|
313
299
|
Performs a keyword-based search for LinkedIn posts, allowing results to be filtered by date and sorted by relevance. This function specifically queries the 'posts' category, distinguishing it from other search methods in the class that target people, companies, or jobs, and returns relevant content.
|
|
314
|
-
|
|
300
|
+
|
|
315
301
|
Args:
|
|
316
302
|
cursor: Pagination cursor for the next page of entries.
|
|
317
303
|
limit: Number of items to return (up to 50 for Classic search).
|
|
318
304
|
keywords: Keywords to search for.
|
|
319
305
|
date_posted: Filter by when the post was posted.
|
|
320
306
|
sort_by: How to sort the results.
|
|
321
|
-
|
|
307
|
+
|
|
322
308
|
Returns:
|
|
323
309
|
A dictionary containing search results and pagination details.
|
|
324
|
-
|
|
310
|
+
|
|
325
311
|
Raises:
|
|
326
312
|
httpx.HTTPError: If the API request fails.
|
|
327
313
|
"""
|
|
328
314
|
url = f"{self.base_url}/api/v1/linkedin/search"
|
|
329
|
-
|
|
330
|
-
params: dict[str, Any] = {"account_id": self.account_id}
|
|
315
|
+
params: dict[str, Any] = {"account_id": await self._get_account_id()}
|
|
331
316
|
if cursor:
|
|
332
317
|
params["cursor"] = cursor
|
|
333
318
|
if limit is not None:
|
|
334
319
|
params["limit"] = limit
|
|
335
|
-
|
|
336
320
|
payload: dict[str, Any] = {"api": "classic", "category": "posts"}
|
|
337
|
-
|
|
338
321
|
if keywords:
|
|
339
322
|
payload["keywords"] = keywords
|
|
340
323
|
if date_posted:
|
|
341
324
|
payload["date_posted"] = date_posted
|
|
342
325
|
if sort_by:
|
|
343
326
|
payload["sort_by"] = sort_by
|
|
344
|
-
|
|
345
|
-
response = self._post(url, params=params, data=payload)
|
|
327
|
+
response = await self._apost(url, params=params, data=payload)
|
|
346
328
|
return self._handle_response(response)
|
|
347
329
|
|
|
348
|
-
def linkedin_search_jobs(
|
|
330
|
+
async def linkedin_search_jobs(
|
|
349
331
|
self,
|
|
350
332
|
cursor: str | None = None,
|
|
351
333
|
limit: int | None = None,
|
|
352
334
|
keywords: str | None = None,
|
|
353
335
|
region: str | None = None,
|
|
354
336
|
sort_by: Literal["relevance", "date"] = "relevance",
|
|
355
|
-
minimum_salary_value:
|
|
337
|
+
minimum_salary_value: Literal[40, 60, 80, 100, 120, 140, 160, 180, 200] = 40,
|
|
356
338
|
industry: str | None = None,
|
|
357
339
|
) -> dict[str, Any]:
|
|
358
340
|
"""
|
|
359
341
|
Executes a LinkedIn search specifically for job listings using keywords and filters like region, industry, and minimum salary. Unlike other search functions targeting people or companies, this is specialized for job listings and converts friendly filter names (e.g., "United States") into their required API IDs.
|
|
360
|
-
|
|
342
|
+
|
|
361
343
|
Args:
|
|
362
344
|
cursor: Pagination cursor for the next page of entries.
|
|
363
345
|
limit: Number of items to return (up to 50 for Classic search).
|
|
364
346
|
keywords: Keywords to search for.
|
|
365
347
|
region: The geographical region to filter jobs by (e.g., "United States").
|
|
366
348
|
sort_by: How to sort the results.(e.g., "relevance" or "date".)
|
|
367
|
-
minimum_salary_value: The minimum salary to filter for.
|
|
349
|
+
minimum_salary_value: The minimum salary to filter for. Allowed values are 40, 60, 80, 100, 120, 140, 160, 180, 200.
|
|
368
350
|
industry: The industry to filter jobs by. (e.g., "Software Development".)
|
|
369
|
-
|
|
351
|
+
|
|
370
352
|
Returns:
|
|
371
353
|
A dictionary containing search results and pagination details.
|
|
372
|
-
|
|
354
|
+
|
|
373
355
|
Raises:
|
|
374
356
|
httpx.HTTPError: If the API request fails.
|
|
375
357
|
ValueError: If the specified location is not found.
|
|
376
358
|
"""
|
|
377
359
|
url = f"{self.base_url}/api/v1/linkedin/search"
|
|
378
|
-
|
|
379
|
-
params: dict[str, Any] = {"account_id": self.account_id}
|
|
360
|
+
params: dict[str, Any] = {"account_id": await self._get_account_id()}
|
|
380
361
|
if cursor:
|
|
381
362
|
params["cursor"] = cursor
|
|
382
363
|
if limit is not None:
|
|
383
364
|
params["limit"] = limit
|
|
384
|
-
|
|
385
365
|
payload: dict[str, Any] = {
|
|
386
366
|
"api": "classic",
|
|
387
367
|
"category": "jobs",
|
|
388
|
-
"minimum_salary": {
|
|
389
|
-
"currency": "USD",
|
|
390
|
-
"value": minimum_salary_value,
|
|
391
|
-
},
|
|
368
|
+
"minimum_salary": {"currency": "USD", "value": minimum_salary_value},
|
|
392
369
|
}
|
|
393
|
-
|
|
394
370
|
if keywords:
|
|
395
371
|
payload["keywords"] = keywords
|
|
396
372
|
if sort_by:
|
|
397
373
|
payload["sort_by"] = sort_by
|
|
398
|
-
|
|
399
|
-
# If location is provided, get its ID and add it to the payload
|
|
400
374
|
if region:
|
|
401
|
-
location_id = self.
|
|
375
|
+
location_id = await self._aget_search_parameter_id("LOCATION", region)
|
|
402
376
|
payload["region"] = location_id
|
|
403
|
-
|
|
404
377
|
if industry:
|
|
405
|
-
industry_id = self.
|
|
378
|
+
industry_id = await self._aget_search_parameter_id("INDUSTRY", industry)
|
|
406
379
|
payload["industry"] = [industry_id]
|
|
407
|
-
|
|
408
|
-
response = self._post(url, params=params, data=payload)
|
|
380
|
+
response = await self._apost(url, params=params, data=payload)
|
|
409
381
|
return self._handle_response(response)
|
|
410
382
|
|
|
411
|
-
|
|
412
383
|
def list_tools(self):
|
|
413
384
|
"""
|
|
414
385
|
Returns a list of available tools/functions in this application.
|
|
@@ -418,6 +389,7 @@ class ScraperApp(APIApplication):
|
|
|
418
389
|
"""
|
|
419
390
|
return [
|
|
420
391
|
self.linkedin_list_profile_posts,
|
|
392
|
+
self.linkedin_list_profile_comments,
|
|
421
393
|
self.linkedin_retrieve_profile,
|
|
422
394
|
self.linkedin_list_post_comments,
|
|
423
395
|
self.linkedin_search_people,
|