universal-mcp-applications 0.1.32__py3-none-any.whl → 0.1.36rc2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (105) hide show
  1. universal_mcp/applications/ahrefs/app.py +52 -198
  2. universal_mcp/applications/airtable/app.py +23 -122
  3. universal_mcp/applications/apollo/app.py +111 -464
  4. universal_mcp/applications/asana/app.py +417 -1567
  5. universal_mcp/applications/aws_s3/app.py +36 -103
  6. universal_mcp/applications/bill/app.py +546 -1957
  7. universal_mcp/applications/box/app.py +1068 -3981
  8. universal_mcp/applications/braze/app.py +364 -1430
  9. universal_mcp/applications/browser_use/app.py +2 -8
  10. universal_mcp/applications/cal_com_v2/app.py +207 -625
  11. universal_mcp/applications/calendly/app.py +61 -200
  12. universal_mcp/applications/canva/app.py +45 -110
  13. universal_mcp/applications/clickup/app.py +207 -674
  14. universal_mcp/applications/coda/app.py +146 -426
  15. universal_mcp/applications/confluence/app.py +310 -1098
  16. universal_mcp/applications/contentful/app.py +36 -151
  17. universal_mcp/applications/crustdata/app.py +28 -107
  18. universal_mcp/applications/dialpad/app.py +283 -756
  19. universal_mcp/applications/digitalocean/app.py +1766 -5777
  20. universal_mcp/applications/domain_checker/app.py +3 -54
  21. universal_mcp/applications/e2b/app.py +14 -64
  22. universal_mcp/applications/elevenlabs/app.py +9 -47
  23. universal_mcp/applications/exa/app.py +6 -17
  24. universal_mcp/applications/falai/app.py +24 -101
  25. universal_mcp/applications/figma/app.py +53 -137
  26. universal_mcp/applications/file_system/app.py +2 -13
  27. universal_mcp/applications/firecrawl/app.py +51 -152
  28. universal_mcp/applications/fireflies/app.py +59 -281
  29. universal_mcp/applications/fpl/app.py +91 -528
  30. universal_mcp/applications/fpl/utils/fixtures.py +15 -49
  31. universal_mcp/applications/fpl/utils/helper.py +25 -89
  32. universal_mcp/applications/fpl/utils/league_utils.py +20 -64
  33. universal_mcp/applications/ghost_content/app.py +52 -161
  34. universal_mcp/applications/github/app.py +19 -56
  35. universal_mcp/applications/gong/app.py +88 -248
  36. universal_mcp/applications/google_calendar/app.py +16 -68
  37. universal_mcp/applications/google_docs/app.py +85 -189
  38. universal_mcp/applications/google_drive/app.py +141 -463
  39. universal_mcp/applications/google_gemini/app.py +12 -64
  40. universal_mcp/applications/google_mail/app.py +28 -157
  41. universal_mcp/applications/google_searchconsole/app.py +15 -48
  42. universal_mcp/applications/google_sheet/app.py +100 -581
  43. universal_mcp/applications/google_sheet/helper.py +10 -37
  44. universal_mcp/applications/hashnode/app.py +57 -269
  45. universal_mcp/applications/heygen/app.py +44 -122
  46. universal_mcp/applications/http_tools/app.py +10 -32
  47. universal_mcp/applications/hubspot/api_segments/crm_api.py +460 -1573
  48. universal_mcp/applications/hubspot/api_segments/marketing_api.py +74 -262
  49. universal_mcp/applications/hubspot/app.py +23 -87
  50. universal_mcp/applications/jira/app.py +2071 -7986
  51. universal_mcp/applications/klaviyo/app.py +494 -1376
  52. universal_mcp/applications/linkedin/README.md +9 -2
  53. universal_mcp/applications/linkedin/app.py +240 -181
  54. universal_mcp/applications/mailchimp/app.py +450 -1605
  55. universal_mcp/applications/markitdown/app.py +8 -20
  56. universal_mcp/applications/miro/app.py +217 -699
  57. universal_mcp/applications/ms_teams/app.py +64 -186
  58. universal_mcp/applications/neon/app.py +86 -192
  59. universal_mcp/applications/notion/app.py +21 -36
  60. universal_mcp/applications/onedrive/app.py +16 -38
  61. universal_mcp/applications/openai/app.py +42 -165
  62. universal_mcp/applications/outlook/app.py +24 -84
  63. universal_mcp/applications/perplexity/app.py +4 -19
  64. universal_mcp/applications/pipedrive/app.py +832 -3142
  65. universal_mcp/applications/posthog/app.py +163 -432
  66. universal_mcp/applications/reddit/app.py +40 -139
  67. universal_mcp/applications/resend/app.py +41 -107
  68. universal_mcp/applications/retell/app.py +14 -41
  69. universal_mcp/applications/rocketlane/app.py +221 -934
  70. universal_mcp/applications/scraper/README.md +7 -4
  71. universal_mcp/applications/scraper/app.py +50 -109
  72. universal_mcp/applications/semanticscholar/app.py +22 -64
  73. universal_mcp/applications/semrush/app.py +43 -77
  74. universal_mcp/applications/sendgrid/app.py +512 -1262
  75. universal_mcp/applications/sentry/app.py +271 -906
  76. universal_mcp/applications/serpapi/app.py +40 -143
  77. universal_mcp/applications/sharepoint/app.py +17 -39
  78. universal_mcp/applications/shopify/app.py +1551 -4287
  79. universal_mcp/applications/shortcut/app.py +155 -417
  80. universal_mcp/applications/slack/app.py +33 -115
  81. universal_mcp/applications/spotify/app.py +126 -325
  82. universal_mcp/applications/supabase/app.py +104 -213
  83. universal_mcp/applications/tavily/app.py +1 -1
  84. universal_mcp/applications/trello/app.py +693 -2656
  85. universal_mcp/applications/twilio/app.py +14 -50
  86. universal_mcp/applications/twitter/api_segments/compliance_api.py +4 -14
  87. universal_mcp/applications/twitter/api_segments/dm_conversations_api.py +6 -18
  88. universal_mcp/applications/twitter/api_segments/likes_api.py +1 -3
  89. universal_mcp/applications/twitter/api_segments/lists_api.py +5 -15
  90. universal_mcp/applications/twitter/api_segments/trends_api.py +1 -3
  91. universal_mcp/applications/twitter/api_segments/tweets_api.py +9 -31
  92. universal_mcp/applications/twitter/api_segments/usage_api.py +1 -5
  93. universal_mcp/applications/twitter/api_segments/users_api.py +14 -42
  94. universal_mcp/applications/whatsapp/app.py +35 -186
  95. universal_mcp/applications/whatsapp/audio.py +2 -6
  96. universal_mcp/applications/whatsapp/whatsapp.py +17 -51
  97. universal_mcp/applications/whatsapp_business/app.py +70 -283
  98. universal_mcp/applications/wrike/app.py +45 -118
  99. universal_mcp/applications/yahoo_finance/app.py +19 -65
  100. universal_mcp/applications/youtube/app.py +75 -261
  101. universal_mcp/applications/zenquotes/app.py +2 -2
  102. {universal_mcp_applications-0.1.32.dist-info → universal_mcp_applications-0.1.36rc2.dist-info}/METADATA +2 -2
  103. {universal_mcp_applications-0.1.32.dist-info → universal_mcp_applications-0.1.36rc2.dist-info}/RECORD +105 -105
  104. {universal_mcp_applications-0.1.32.dist-info → universal_mcp_applications-0.1.36rc2.dist-info}/WHEEL +0 -0
  105. {universal_mcp_applications-0.1.32.dist-info → universal_mcp_applications-0.1.36rc2.dist-info}/licenses/LICENSE +0 -0
@@ -9,7 +9,10 @@ This is automatically generated from OpenAPI schema for the ScraperApp API.
9
9
 
10
10
  | Tool | Description |
11
11
  |------|-------------|
12
- | `linkedin_post_search` | Performs a general LinkedIn search for posts using keywords and filters like date and content type. It supports pagination and can utilize either the 'classic' or 'sales_navigator' API, searching broadly across the platform rather than fetching posts from a specific user's profile. |
13
- | `linkedin_list_profile_posts` | Fetches a paginated list of all LinkedIn posts from a specific user or company profile using their unique identifier. This function retrieves content directly from a profile, unlike `linkedin_post_search` which finds posts across LinkedIn based on keywords and other filters. |
14
- | `linkedin_retrieve_profile` | Retrieves a specific LinkedIn user's profile by their unique identifier, which can be an internal provider ID or a public username. This function simplifies data access by delegating the actual profile retrieval request to the integrated Unipile application, distinct from functions that list posts or comments. |
15
- | `linkedin_list_post_comments` | Fetches comments for a specified LinkedIn post. If a `comment_id` is provided, it retrieves replies to that comment instead of top-level comments. This function supports pagination and specifically targets comments, unlike others in the class that search for or list entire posts. |
12
+ | `linkedin_list_profile_posts` | Fetches a paginated list of posts from a specific user or company profile using its provider ID. The `is_company` flag must specify the entity type. Unlike `linkedin_search_posts`, this function directly retrieves content from a known profile's feed instead of performing a global keyword search. |
13
+ | `linkedin_retrieve_profile` | Fetches a specific LinkedIn user's profile using their public or internal ID. Unlike `linkedin_search_people`, which discovers multiple users via keywords, this function targets and retrieves detailed data for a single, known individual based on a direct identifier. |
14
+ | `linkedin_list_post_comments` | Fetches a paginated list of comments for a specified LinkedIn post. It can retrieve either top-level comments or threaded replies if an optional `comment_id` is provided. This is a read-only operation, distinct from functions that search for posts or list user-specific content. |
15
+ | `linkedin_search_people` | Performs a paginated search for people on LinkedIn, distinct from searches for companies or jobs. It filters results using keywords, location, industry, and company, internally converting filter names like 'United States' into their required API IDs before making the request. |
16
+ | `linkedin_search_companies` | Executes a paginated LinkedIn search for companies, filtering by optional keywords, location, and industry. Unlike `linkedin_search_people` or `linkedin_search_jobs`, this function specifically sets the API search category to 'companies' to ensure that only company profiles are returned in the search results. |
17
+ | `linkedin_search_posts` | Performs a keyword-based search for LinkedIn posts, allowing results to be filtered by date and sorted by relevance. This function specifically queries the 'posts' category, distinguishing it from other search methods in the class that target people, companies, or jobs, and returns relevant content. |
18
+ | `linkedin_search_jobs` | Executes a LinkedIn search specifically for job listings using keywords and filters like region, industry, and minimum salary. Unlike other search functions targeting people or companies, this is specialized for job listings and converts friendly filter names (e.g., "United States") into their required API IDs. |
@@ -2,13 +2,12 @@ import os
2
2
  from dotenv import load_dotenv
3
3
 
4
4
  load_dotenv()
5
-
6
5
  from typing import Any, Literal
7
-
8
6
  from loguru import logger
9
7
  from universal_mcp.applications.application import APIApplication
10
8
  from universal_mcp.integrations import Integration
11
9
 
10
+
12
11
  class ScraperApp(APIApplication):
13
12
  """
14
13
  Application for interacting with LinkedIn API.
@@ -29,12 +28,8 @@ class ScraperApp(APIApplication):
29
28
  if not self._base_url:
30
29
  unipile_dsn = os.getenv("UNIPILE_DSN")
31
30
  if not unipile_dsn:
32
- logger.error(
33
- "UnipileApp: UNIPILE_DSN environment variable is not set."
34
- )
35
- raise ValueError(
36
- "UnipileApp: UNIPILE_DSN environment variable is required."
37
- )
31
+ logger.error("UnipileApp: UNIPILE_DSN environment variable is not set.")
32
+ raise ValueError("UnipileApp: UNIPILE_DSN environment variable is required.")
38
33
  self._base_url = f"https://{unipile_dsn}"
39
34
  return self._base_url
40
35
 
@@ -49,27 +44,14 @@ class ScraperApp(APIApplication):
49
44
  Overrides the base class method to use X-Api-Key.
50
45
  """
51
46
  if not self.integration:
52
- logger.warning(
53
- "UnipileApp: No integration configured, returning empty headers."
54
- )
47
+ logger.warning("UnipileApp: No integration configured, returning empty headers.")
55
48
  return {}
56
-
57
49
  api_key = os.getenv("UNIPILE_API_KEY")
58
50
  if not api_key:
59
- logger.error(
60
- "UnipileApp: API key not found in integration credentials for Unipile."
61
- )
62
- return { # Or return minimal headers if some calls might not need auth (unlikely for Unipile)
63
- "Content-Type": "application/json",
64
- "Cache-Control": "no-cache",
65
- }
66
-
51
+ logger.error("UnipileApp: API key not found in integration credentials for Unipile.")
52
+ return {"Content-Type": "application/json", "Cache-Control": "no-cache"}
67
53
  logger.debug("UnipileApp: Using X-Api-Key for authentication.")
68
- return {
69
- "x-api-key": api_key,
70
- "Content-Type": "application/json",
71
- "Cache-Control": "no-cache", # Often good practice for APIs
72
- }
54
+ return {"x-api-key": api_key, "Content-Type": "application/json", "Cache-Control": "no-cache"}
73
55
 
74
56
  def _get_search_parameter_id(self, param_type: str, keywords: str) -> str:
75
57
  """
@@ -87,45 +69,32 @@ class ScraperApp(APIApplication):
87
69
  httpx.HTTPError: If the API request fails.
88
70
  """
89
71
  url = f"{self.base_url}/api/v1/linkedin/search/parameters"
90
- params = {
91
- "account_id": self.account_id,
92
- "keywords": keywords,
93
- "type": param_type,
94
- }
95
-
72
+ params = {"account_id": self.account_id, "keywords": keywords, "type": param_type}
96
73
  response = self._get(url, params=params)
97
74
  results = self._handle_response(response)
98
-
99
75
  items = results.get("items", [])
100
76
  if items:
101
- # Return the ID of the first result, assuming it's the most relevant
102
77
  return items[0]["id"]
103
-
104
78
  raise ValueError(f'Could not find a matching ID for {param_type}: "{keywords}"')
105
79
 
106
-
107
- def linkedin_list_profile_posts(
108
- self,
109
- identifier: str, # User or Company provider internal ID
110
- cursor: str | None = None,
111
- limit: int | None = None, # 1-100 (spec says max 250)
112
- is_company: bool | None = None,
80
+ async def linkedin_list_profile_posts(
81
+ self, identifier: str, cursor: str | None = None, limit: int | None = None, is_company: bool | None = None
113
82
  ) -> dict[str, Any]:
114
83
  """
115
84
  Fetches a paginated list of posts from a specific user or company profile using its provider ID. The `is_company` flag must specify the entity type. Unlike `linkedin_search_posts`, this function directly retrieves content from a known profile's feed instead of performing a global keyword search.
116
-
85
+
117
86
  Args:
118
87
  identifier: The entity's provider internal ID (LinkedIn ID).
119
88
  cursor: Pagination cursor.
120
89
  limit: Number of items to return (1-100, as per Unipile example, though spec allows up to 250).
121
90
  is_company: Boolean indicating if the identifier is for a company.
122
-
91
+
123
92
  Returns:
124
93
  A dictionary containing a list of post objects and pagination details.
125
-
94
+
126
95
  Raises:
127
96
  httpx.HTTPError: If the API request fails.
128
-
97
+
129
98
  Tags:
130
99
  linkedin, post, list, user_posts, company_posts, content, api, important
131
100
  """
@@ -137,23 +106,22 @@ class ScraperApp(APIApplication):
137
106
  params["limit"] = limit
138
107
  if is_company is not None:
139
108
  params["is_company"] = is_company
140
-
141
109
  response = self._get(url, params=params)
142
110
  return response.json()
143
111
 
144
- def linkedin_retrieve_profile(self, identifier: str) -> dict[str, Any]:
112
+ async def linkedin_retrieve_profile(self, identifier: str) -> dict[str, Any]:
145
113
  """
146
114
  Fetches a specific LinkedIn user's profile using their public or internal ID. Unlike `linkedin_search_people`, which discovers multiple users via keywords, this function targets and retrieves detailed data for a single, known individual based on a direct identifier.
147
-
115
+
148
116
  Args:
149
117
  identifier: Can be the provider's internal id OR the provider's public id of the requested user.For example, for https://www.linkedin.com/in/manojbajaj95/, the identifier is "manojbajaj95".
150
-
118
+
151
119
  Returns:
152
120
  A dictionary containing the user's profile details.
153
-
121
+
154
122
  Raises:
155
123
  httpx.HTTPError: If the API request fails.
156
-
124
+
157
125
  Tags:
158
126
  linkedin, user, profile, retrieve, get, api, important
159
127
  """
@@ -162,28 +130,24 @@ class ScraperApp(APIApplication):
162
130
  response = self._get(url, params=params)
163
131
  return self._handle_response(response)
164
132
 
165
- def linkedin_list_post_comments(
166
- self,
167
- post_id: str,
168
- comment_id: str | None = None,
169
- cursor: str | None = None,
170
- limit: int | None = None,
133
+ async def linkedin_list_post_comments(
134
+ self, post_id: str, comment_id: str | None = None, cursor: str | None = None, limit: int | None = None
171
135
  ) -> dict[str, Any]:
172
136
  """
173
137
  Fetches a paginated list of comments for a specified LinkedIn post. It can retrieve either top-level comments or threaded replies if an optional `comment_id` is provided. This is a read-only operation, distinct from functions that search for posts or list user-specific content.
174
-
138
+
175
139
  Args:
176
140
  post_id: The social ID of the post.
177
141
  comment_id: If provided, retrieves replies to this comment ID instead of top-level comments.
178
142
  cursor: Pagination cursor.
179
143
  limit: Number of comments to return. (OpenAPI spec shows type string, passed as string if provided).
180
-
144
+
181
145
  Returns:
182
146
  A dictionary containing a list of comment objects and pagination details.
183
-
147
+
184
148
  Raises:
185
149
  httpx.HTTPError: If the API request fails.
186
-
150
+
187
151
  Tags:
188
152
  linkedin, post, comment, list, content, api, important
189
153
  """
@@ -195,11 +159,10 @@ class ScraperApp(APIApplication):
195
159
  params["limit"] = str(limit)
196
160
  if comment_id:
197
161
  params["comment_id"] = comment_id
198
-
199
162
  response = self._get(url, params=params)
200
163
  return response.json()
201
164
 
202
- def linkedin_search_people(
165
+ async def linkedin_search_people(
203
166
  self,
204
167
  cursor: str | None = None,
205
168
  limit: int | None = None,
@@ -210,47 +173,43 @@ class ScraperApp(APIApplication):
210
173
  ) -> dict[str, Any]:
211
174
  """
212
175
  Performs a paginated search for people on LinkedIn, distinct from searches for companies or jobs. It filters results using keywords, location, industry, and company, internally converting filter names like 'United States' into their required API IDs before making the request.
213
-
176
+
214
177
  Args:
215
178
  cursor: Pagination cursor for the next page of entries.
216
179
  limit: Number of items to return (up to 50 for Classic search).
217
180
  keywords: Keywords to search for.
218
-
181
+ location: The geographical location to filter people by (e.g., "United States").
182
+ industry: The industry to filter people by.(e.g., "Software Development".)
183
+ company: The company to filter people by.(e.g., "Google".)
184
+
219
185
  Returns:
220
186
  A dictionary containing search results and pagination details.
221
-
187
+
222
188
  Raises:
223
189
  httpx.HTTPError: If the API request fails.
224
190
  """
225
191
  url = f"{self.base_url}/api/v1/linkedin/search"
226
-
227
192
  params: dict[str, Any] = {"account_id": self.account_id}
228
193
  if cursor:
229
194
  params["cursor"] = cursor
230
195
  if limit is not None:
231
196
  params["limit"] = limit
232
-
233
197
  payload: dict[str, Any] = {"api": "classic", "category": "people"}
234
-
235
198
  if keywords:
236
199
  payload["keywords"] = keywords
237
-
238
200
  if location:
239
201
  location_id = self._get_search_parameter_id("LOCATION", location)
240
202
  payload["location"] = [location_id]
241
-
242
203
  if industry:
243
204
  industry_id = self._get_search_parameter_id("INDUSTRY", industry)
244
205
  payload["industry"] = [industry_id]
245
-
246
206
  if company:
247
207
  company_id = self._get_search_parameter_id("COMPANY", company)
248
208
  payload["company"] = [company_id]
249
-
250
209
  response = self._post(url, params=params, data=payload)
251
210
  return self._handle_response(response)
252
211
 
253
- def linkedin_search_companies(
212
+ async def linkedin_search_companies(
254
213
  self,
255
214
  cursor: str | None = None,
256
215
  limit: int | None = None,
@@ -260,43 +219,39 @@ class ScraperApp(APIApplication):
260
219
  ) -> dict[str, Any]:
261
220
  """
262
221
  Executes a paginated LinkedIn search for companies, filtering by optional keywords, location, and industry. Unlike `linkedin_search_people` or `linkedin_search_jobs`, this function specifically sets the API search category to 'companies' to ensure that only company profiles are returned in the search results.
263
-
222
+
264
223
  Args:
265
224
  cursor: Pagination cursor for the next page of entries.
266
225
  limit: Number of items to return (up to 50 for Classic search).
267
226
  keywords: Keywords to search for.
268
-
227
+ location: The geographical location to filter companies by (e.g., "United States").
228
+ industry: The industry to filter companies by.(e.g., "Software Development".)
229
+
269
230
  Returns:
270
231
  A dictionary containing search results and pagination details.
271
-
232
+
272
233
  Raises:
273
234
  httpx.HTTPError: If the API request fails.
274
235
  """
275
236
  url = f"{self.base_url}/api/v1/linkedin/search"
276
-
277
237
  params: dict[str, Any] = {"account_id": self.account_id}
278
238
  if cursor:
279
239
  params["cursor"] = cursor
280
240
  if limit is not None:
281
241
  params["limit"] = limit
282
-
283
242
  payload: dict[str, Any] = {"api": "classic", "category": "companies"}
284
-
285
243
  if keywords:
286
244
  payload["keywords"] = keywords
287
-
288
245
  if location:
289
246
  location_id = self._get_search_parameter_id("LOCATION", location)
290
247
  payload["location"] = [location_id]
291
-
292
248
  if industry:
293
249
  industry_id = self._get_search_parameter_id("INDUSTRY", industry)
294
250
  payload["industry"] = [industry_id]
295
-
296
251
  response = self._post(url, params=params, data=payload)
297
252
  return self._handle_response(response)
298
253
 
299
- def linkedin_search_posts(
254
+ async def linkedin_search_posts(
300
255
  self,
301
256
  cursor: str | None = None,
302
257
  limit: int | None = None,
@@ -306,41 +261,37 @@ class ScraperApp(APIApplication):
306
261
  ) -> dict[str, Any]:
307
262
  """
308
263
  Performs a keyword-based search for LinkedIn posts, allowing results to be filtered by date and sorted by relevance. This function specifically queries the 'posts' category, distinguishing it from other search methods in the class that target people, companies, or jobs, and returns relevant content.
309
-
264
+
310
265
  Args:
311
266
  cursor: Pagination cursor for the next page of entries.
312
267
  limit: Number of items to return (up to 50 for Classic search).
313
268
  keywords: Keywords to search for.
314
269
  date_posted: Filter by when the post was posted.
315
270
  sort_by: How to sort the results.
316
-
271
+
317
272
  Returns:
318
273
  A dictionary containing search results and pagination details.
319
-
274
+
320
275
  Raises:
321
276
  httpx.HTTPError: If the API request fails.
322
277
  """
323
278
  url = f"{self.base_url}/api/v1/linkedin/search"
324
-
325
279
  params: dict[str, Any] = {"account_id": self.account_id}
326
280
  if cursor:
327
281
  params["cursor"] = cursor
328
282
  if limit is not None:
329
283
  params["limit"] = limit
330
-
331
284
  payload: dict[str, Any] = {"api": "classic", "category": "posts"}
332
-
333
285
  if keywords:
334
286
  payload["keywords"] = keywords
335
287
  if date_posted:
336
288
  payload["date_posted"] = date_posted
337
289
  if sort_by:
338
290
  payload["sort_by"] = sort_by
339
-
340
291
  response = self._post(url, params=params, data=payload)
341
292
  return self._handle_response(response)
342
293
 
343
- def linkedin_search_jobs(
294
+ async def linkedin_search_jobs(
344
295
  self,
345
296
  cursor: str | None = None,
346
297
  limit: int | None = None,
@@ -352,57 +303,47 @@ class ScraperApp(APIApplication):
352
303
  ) -> dict[str, Any]:
353
304
  """
354
305
  Executes a LinkedIn search specifically for job listings using keywords and filters like region, industry, and minimum salary. Unlike other search functions targeting people or companies, this is specialized for job listings and converts friendly filter names (e.g., "United States") into their required API IDs.
355
-
306
+
356
307
  Args:
357
308
  cursor: Pagination cursor for the next page of entries.
358
309
  limit: Number of items to return (up to 50 for Classic search).
359
310
  keywords: Keywords to search for.
360
- location: The geographical location to filter jobs by (e.g., "United States").
361
- sort_by: How to sort the results.
311
+ region: The geographical region to filter jobs by (e.g., "United States").
312
+ sort_by: How to sort the results.(e.g., "relevance" or "date".)
362
313
  minimum_salary_value: The minimum salary to filter for.
363
-
314
+ industry: The industry to filter jobs by. (e.g., "Software Development".)
315
+
364
316
  Returns:
365
317
  A dictionary containing search results and pagination details.
366
-
318
+
367
319
  Raises:
368
320
  httpx.HTTPError: If the API request fails.
369
321
  ValueError: If the specified location is not found.
370
322
  """
371
323
  url = f"{self.base_url}/api/v1/linkedin/search"
372
-
373
324
  params: dict[str, Any] = {"account_id": self.account_id}
374
325
  if cursor:
375
326
  params["cursor"] = cursor
376
327
  if limit is not None:
377
328
  params["limit"] = limit
378
-
379
329
  payload: dict[str, Any] = {
380
330
  "api": "classic",
381
331
  "category": "jobs",
382
- "minimum_salary": {
383
- "currency": "USD",
384
- "value": minimum_salary_value,
385
- },
332
+ "minimum_salary": {"currency": "USD", "value": minimum_salary_value},
386
333
  }
387
-
388
334
  if keywords:
389
335
  payload["keywords"] = keywords
390
336
  if sort_by:
391
337
  payload["sort_by"] = sort_by
392
-
393
- # If location is provided, get its ID and add it to the payload
394
338
  if region:
395
339
  location_id = self._get_search_parameter_id("LOCATION", region)
396
340
  payload["region"] = location_id
397
-
398
341
  if industry:
399
342
  industry_id = self._get_search_parameter_id("INDUSTRY", industry)
400
343
  payload["industry"] = [industry_id]
401
-
402
344
  response = self._post(url, params=params, data=payload)
403
345
  return self._handle_response(response)
404
346
 
405
-
406
347
  def list_tools(self):
407
348
  """
408
349
  Returns a list of available tools/functions in this application.
@@ -1,5 +1,4 @@
1
1
  from typing import Any
2
-
3
2
  from universal_mcp.applications.application import APIApplication
4
3
  from universal_mcp.integrations import Integration
5
4
 
@@ -9,7 +8,7 @@ class SemanticscholarApp(APIApplication):
9
8
  super().__init__(name="semanticscholar", integration=integration, **kwargs)
10
9
  self.base_url = "/graph/v1"
11
10
 
12
- def post_graph_get_authors(self, fields=None, ids=None) -> dict[str, Any]:
11
+ async def post_graph_get_authors(self, fields=None, ids=None) -> dict[str, Any]:
13
12
  """
14
13
  Creates a batch of authors using the provided JSON data in the request body, optionally specifying fields to include in the response via a query parameter.
15
14
 
@@ -23,9 +22,7 @@ class SemanticscholarApp(APIApplication):
23
22
  Tags:
24
23
  Author Data
25
24
  """
26
- request_body = {
27
- "ids": ids,
28
- }
25
+ request_body = {"ids": ids}
29
26
  request_body = {k: v for k, v in request_body.items() if v is not None}
30
27
  url = f"{self.base_url}/author/batch"
31
28
  query_params = {k: v for k, v in [("fields", fields)] if v is not None}
@@ -33,9 +30,7 @@ class SemanticscholarApp(APIApplication):
33
30
  response.raise_for_status()
34
31
  return response.json()
35
32
 
36
- def get_graph_get_author_search(
37
- self, query, offset=None, limit=None, fields=None
38
- ) -> dict[str, Any]:
33
+ async def get_graph_get_author_search(self, query, offset=None, limit=None, fields=None) -> dict[str, Any]:
39
34
  """
40
35
  Searches for authors based on a query string with optional pagination and field selection parameters.
41
36
 
@@ -52,21 +47,12 @@ class SemanticscholarApp(APIApplication):
52
47
  Author Data
53
48
  """
54
49
  url = f"{self.base_url}/author/search"
55
- query_params = {
56
- k: v
57
- for k, v in [
58
- ("offset", offset),
59
- ("limit", limit),
60
- ("fields", fields),
61
- ("query", query),
62
- ]
63
- if v is not None
64
- }
50
+ query_params = {k: v for k, v in [("offset", offset), ("limit", limit), ("fields", fields), ("query", query)] if v is not None}
65
51
  response = self._get(url, params=query_params)
66
52
  response.raise_for_status()
67
53
  return response.json()
68
54
 
69
- def get_graph_get_author(self, author_id, fields=None) -> dict[str, Any]:
55
+ async def get_graph_get_author(self, author_id, fields=None) -> dict[str, Any]:
70
56
  """
71
57
  Retrieves the profile information for a specific author identified by the `author_id` and returns it with optional fields specified in the `fields` query parameter.
72
58
 
@@ -88,9 +74,7 @@ class SemanticscholarApp(APIApplication):
88
74
  response.raise_for_status()
89
75
  return response.json()
90
76
 
91
- def get_graph_get_author_papers(
92
- self, author_id, offset=None, limit=None, fields=None
93
- ) -> dict[str, Any]:
77
+ async def get_graph_get_author_papers(self, author_id, offset=None, limit=None, fields=None) -> dict[str, Any]:
94
78
  """
95
79
  Retrieves a paginated list of papers authored by the specified author, with optional field selection.
96
80
 
@@ -109,16 +93,12 @@ class SemanticscholarApp(APIApplication):
109
93
  if author_id is None:
110
94
  raise ValueError("Missing required parameter 'author_id'")
111
95
  url = f"{self.base_url}/author/{author_id}/papers"
112
- query_params = {
113
- k: v
114
- for k, v in [("offset", offset), ("limit", limit), ("fields", fields)]
115
- if v is not None
116
- }
96
+ query_params = {k: v for k, v in [("offset", offset), ("limit", limit), ("fields", fields)] if v is not None}
117
97
  response = self._get(url, params=query_params)
118
98
  response.raise_for_status()
119
99
  return response.json()
120
100
 
121
- def get_graph_get_paper_autocomplete(self, query) -> dict[str, Any]:
101
+ async def get_graph_get_paper_autocomplete(self, query) -> dict[str, Any]:
122
102
  """
123
103
  Provides an autocomplete suggestion list based on a required query string parameter.
124
104
 
@@ -137,7 +117,7 @@ class SemanticscholarApp(APIApplication):
137
117
  response.raise_for_status()
138
118
  return response.json()
139
119
 
140
- def post_graph_get_papers(self, fields=None, ids=None) -> dict[str, Any]:
120
+ async def post_graph_get_papers(self, fields=None, ids=None) -> dict[str, Any]:
141
121
  """
142
122
  Creates a batch of papers using JSON data in the request body and optionally specifies fields to include in the response.
143
123
 
@@ -151,9 +131,7 @@ class SemanticscholarApp(APIApplication):
151
131
  Tags:
152
132
  Paper Data
153
133
  """
154
- request_body = {
155
- "ids": ids,
156
- }
134
+ request_body = {"ids": ids}
157
135
  request_body = {k: v for k, v in request_body.items() if v is not None}
158
136
  url = f"{self.base_url}/paper/batch"
159
137
  query_params = {k: v for k, v in [("fields", fields)] if v is not None}
@@ -161,7 +139,7 @@ class SemanticscholarApp(APIApplication):
161
139
  response.raise_for_status()
162
140
  return response.json()
163
141
 
164
- def get_graph_paper_relevance_search(
142
+ async def get_graph_paper_relevance_search(
165
143
  self,
166
144
  query,
167
145
  fields=None,
@@ -219,7 +197,7 @@ class SemanticscholarApp(APIApplication):
219
197
  response.raise_for_status()
220
198
  return response.json()
221
199
 
222
- def get_graph_paper_bulk_search(
200
+ async def get_graph_paper_bulk_search(
223
201
  self,
224
202
  query,
225
203
  token=None,
@@ -277,7 +255,7 @@ class SemanticscholarApp(APIApplication):
277
255
  response.raise_for_status()
278
256
  return response.json()
279
257
 
280
- def get_graph_paper_title_search(
258
+ async def get_graph_paper_title_search(
281
259
  self,
282
260
  query,
283
261
  fields=None,
@@ -329,7 +307,7 @@ class SemanticscholarApp(APIApplication):
329
307
  response.raise_for_status()
330
308
  return response.json()
331
309
 
332
- def get_graph_get_paper(self, paper_id, fields=None) -> dict[str, Any]:
310
+ async def get_graph_get_paper(self, paper_id, fields=None) -> dict[str, Any]:
333
311
  """
334
312
  Retrieves details of a paper by its ID, optionally specifying fields to include in the response.
335
313
 
@@ -351,9 +329,7 @@ class SemanticscholarApp(APIApplication):
351
329
  response.raise_for_status()
352
330
  return response.json()
353
331
 
354
- def get_graph_get_paper_authors(
355
- self, paper_id, offset=None, limit=None, fields=None
356
- ) -> dict[str, Any]:
332
+ async def get_graph_get_paper_authors(self, paper_id, offset=None, limit=None, fields=None) -> dict[str, Any]:
357
333
  """
358
334
  Retrieves a list of authors for a specific paper identified by the `paper_id`, allowing optional parameters for offset, limit, and fields to customize the response.
359
335
 
@@ -372,18 +348,12 @@ class SemanticscholarApp(APIApplication):
372
348
  if paper_id is None:
373
349
  raise ValueError("Missing required parameter 'paper_id'")
374
350
  url = f"{self.base_url}/paper/{paper_id}/authors"
375
- query_params = {
376
- k: v
377
- for k, v in [("offset", offset), ("limit", limit), ("fields", fields)]
378
- if v is not None
379
- }
351
+ query_params = {k: v for k, v in [("offset", offset), ("limit", limit), ("fields", fields)] if v is not None}
380
352
  response = self._get(url, params=query_params)
381
353
  response.raise_for_status()
382
354
  return response.json()
383
355
 
384
- def get_graph_get_paper_citations(
385
- self, paper_id, offset=None, limit=None, fields=None
386
- ) -> dict[str, Any]:
356
+ async def get_graph_get_paper_citations(self, paper_id, offset=None, limit=None, fields=None) -> dict[str, Any]:
387
357
  """
388
358
  Retrieves a list of citations for a specific paper, identified by its paper ID, with optional parameters for offset, limit, and fields.
389
359
 
@@ -402,18 +372,12 @@ class SemanticscholarApp(APIApplication):
402
372
  if paper_id is None:
403
373
  raise ValueError("Missing required parameter 'paper_id'")
404
374
  url = f"{self.base_url}/paper/{paper_id}/citations"
405
- query_params = {
406
- k: v
407
- for k, v in [("offset", offset), ("limit", limit), ("fields", fields)]
408
- if v is not None
409
- }
375
+ query_params = {k: v for k, v in [("offset", offset), ("limit", limit), ("fields", fields)] if v is not None}
410
376
  response = self._get(url, params=query_params)
411
377
  response.raise_for_status()
412
378
  return response.json()
413
379
 
414
- def get_graph_get_paper_references(
415
- self, paper_id, offset=None, limit=None, fields=None
416
- ) -> dict[str, Any]:
380
+ async def get_graph_get_paper_references(self, paper_id, offset=None, limit=None, fields=None) -> dict[str, Any]:
417
381
  """
418
382
  Retrieves references for a specific paper by its ID using the "GET" method and allows optional filtering by offset, limit, and fields for customizable output.
419
383
 
@@ -432,16 +396,12 @@ class SemanticscholarApp(APIApplication):
432
396
  if paper_id is None:
433
397
  raise ValueError("Missing required parameter 'paper_id'")
434
398
  url = f"{self.base_url}/paper/{paper_id}/references"
435
- query_params = {
436
- k: v
437
- for k, v in [("offset", offset), ("limit", limit), ("fields", fields)]
438
- if v is not None
439
- }
399
+ query_params = {k: v for k, v in [("offset", offset), ("limit", limit), ("fields", fields)] if v is not None}
440
400
  response = self._get(url, params=query_params)
441
401
  response.raise_for_status()
442
402
  return response.json()
443
403
 
444
- def get_snippet_search(self, query, limit=None) -> dict[str, Any]:
404
+ async def get_snippet_search(self, query, limit=None) -> dict[str, Any]:
445
405
  """
446
406
  Retrieves a list of search results based on a specified query string, optionally limited by a user-defined number of results, using the "GET" method at the "/snippet/search" endpoint.
447
407
 
@@ -456,9 +416,7 @@ class SemanticscholarApp(APIApplication):
456
416
  Snippet Text
457
417
  """
458
418
  url = f"{self.base_url}/snippet/search"
459
- query_params = {
460
- k: v for k, v in [("query", query), ("limit", limit)] if v is not None
461
- }
419
+ query_params = {k: v for k, v in [("query", query), ("limit", limit)] if v is not None}
462
420
  response = self._get(url, params=query_params)
463
421
  response.raise_for_status()
464
422
  return response.json()