universal-mcp-applications 0.1.21__py3-none-any.whl → 0.1.22__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of universal-mcp-applications might be problematic. Click here for more details.
- universal_mcp/applications/BEST_PRACTICES.md +166 -0
- universal_mcp/applications/airtable/app.py +0 -1
- universal_mcp/applications/apollo/app.py +0 -1
- universal_mcp/applications/aws_s3/app.py +40 -39
- universal_mcp/applications/browser_use/README.md +1 -0
- universal_mcp/applications/browser_use/__init__.py +0 -0
- universal_mcp/applications/browser_use/app.py +76 -0
- universal_mcp/applications/calendly/app.py +125 -125
- universal_mcp/applications/canva/app.py +95 -99
- universal_mcp/applications/confluence/app.py +0 -1
- universal_mcp/applications/contentful/app.py +4 -5
- universal_mcp/applications/domain_checker/app.py +11 -15
- universal_mcp/applications/e2b/app.py +4 -4
- universal_mcp/applications/elevenlabs/app.py +18 -15
- universal_mcp/applications/exa/app.py +17 -17
- universal_mcp/applications/falai/app.py +28 -29
- universal_mcp/applications/file_system/app.py +9 -9
- universal_mcp/applications/firecrawl/app.py +36 -36
- universal_mcp/applications/fireflies/app.py +55 -56
- universal_mcp/applications/fpl/app.py +49 -50
- universal_mcp/applications/ghost_content/app.py +0 -1
- universal_mcp/applications/github/app.py +41 -43
- universal_mcp/applications/google_calendar/app.py +40 -39
- universal_mcp/applications/google_docs/app.py +56 -56
- universal_mcp/applications/google_drive/app.py +212 -215
- universal_mcp/applications/google_gemini/app.py +1 -5
- universal_mcp/applications/google_mail/app.py +91 -90
- universal_mcp/applications/google_searchconsole/app.py +29 -29
- universal_mcp/applications/google_sheet/app.py +115 -115
- universal_mcp/applications/hashnode/README.md +6 -3
- universal_mcp/applications/hashnode/app.py +174 -25
- universal_mcp/applications/http_tools/app.py +10 -11
- universal_mcp/applications/hubspot/__init__.py +1 -1
- universal_mcp/applications/hubspot/api_segments/api_segment_base.py +36 -7
- universal_mcp/applications/hubspot/api_segments/crm_api.py +368 -368
- universal_mcp/applications/hubspot/api_segments/marketing_api.py +115 -115
- universal_mcp/applications/hubspot/app.py +131 -72
- universal_mcp/applications/jira/app.py +0 -1
- universal_mcp/applications/linkedin/app.py +20 -20
- universal_mcp/applications/markitdown/app.py +10 -5
- universal_mcp/applications/ms_teams/app.py +123 -123
- universal_mcp/applications/openai/app.py +40 -39
- universal_mcp/applications/outlook/app.py +32 -32
- universal_mcp/applications/perplexity/app.py +4 -4
- universal_mcp/applications/reddit/app.py +69 -70
- universal_mcp/applications/resend/app.py +116 -117
- universal_mcp/applications/rocketlane/app.py +0 -1
- universal_mcp/applications/scraper/__init__.py +1 -1
- universal_mcp/applications/scraper/app.py +80 -81
- universal_mcp/applications/serpapi/app.py +14 -14
- universal_mcp/applications/sharepoint/app.py +19 -20
- universal_mcp/applications/shopify/app.py +0 -1
- universal_mcp/applications/slack/app.py +48 -48
- universal_mcp/applications/tavily/app.py +4 -4
- universal_mcp/applications/twitter/api_segments/compliance_api.py +13 -15
- universal_mcp/applications/twitter/api_segments/dm_conversations_api.py +20 -20
- universal_mcp/applications/twitter/api_segments/dm_events_api.py +12 -12
- universal_mcp/applications/twitter/api_segments/likes_api.py +12 -12
- universal_mcp/applications/twitter/api_segments/lists_api.py +37 -39
- universal_mcp/applications/twitter/api_segments/spaces_api.py +24 -24
- universal_mcp/applications/twitter/api_segments/trends_api.py +4 -4
- universal_mcp/applications/twitter/api_segments/tweets_api.py +105 -105
- universal_mcp/applications/twitter/api_segments/usage_api.py +4 -4
- universal_mcp/applications/twitter/api_segments/users_api.py +136 -136
- universal_mcp/applications/twitter/app.py +6 -2
- universal_mcp/applications/unipile/app.py +90 -97
- universal_mcp/applications/whatsapp/app.py +53 -54
- universal_mcp/applications/whatsapp/audio.py +39 -35
- universal_mcp/applications/whatsapp/whatsapp.py +176 -154
- universal_mcp/applications/whatsapp_business/app.py +92 -92
- universal_mcp/applications/yahoo_finance/app.py +105 -63
- universal_mcp/applications/youtube/app.py +193 -196
- universal_mcp/applications/zenquotes/__init__.py +2 -0
- universal_mcp/applications/zenquotes/app.py +3 -3
- {universal_mcp_applications-0.1.21.dist-info → universal_mcp_applications-0.1.22.dist-info}/METADATA +2 -1
- {universal_mcp_applications-0.1.21.dist-info → universal_mcp_applications-0.1.22.dist-info}/RECORD +78 -74
- {universal_mcp_applications-0.1.21.dist-info → universal_mcp_applications-0.1.22.dist-info}/WHEEL +0 -0
- {universal_mcp_applications-0.1.21.dist-info → universal_mcp_applications-0.1.22.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,15 +1,14 @@
|
|
|
1
|
-
import os
|
|
2
1
|
from dotenv import load_dotenv
|
|
3
2
|
|
|
4
3
|
load_dotenv()
|
|
5
4
|
|
|
6
5
|
from typing import Any
|
|
7
6
|
|
|
7
|
+
from loguru import logger
|
|
8
8
|
from universal_mcp.applications.application import APIApplication
|
|
9
|
-
from universal_mcp.applications.unipile import UnipileApp
|
|
10
|
-
from typing import Any, Optional
|
|
11
9
|
from universal_mcp.integrations import Integration
|
|
12
|
-
|
|
10
|
+
|
|
11
|
+
from universal_mcp.applications.unipile import UnipileApp
|
|
13
12
|
|
|
14
13
|
|
|
15
14
|
class ScraperApp(APIApplication):
|
|
@@ -33,7 +32,7 @@ class ScraperApp(APIApplication):
|
|
|
33
32
|
self.account_id = credentials.get("account_id")
|
|
34
33
|
self._unipile_app = UnipileApp(integration=self.integration)
|
|
35
34
|
else:
|
|
36
|
-
logger.warning(
|
|
35
|
+
logger.warning("Integration not found")
|
|
37
36
|
self.account_id = None
|
|
38
37
|
self._unipile_app = None
|
|
39
38
|
|
|
@@ -41,12 +40,12 @@ class ScraperApp(APIApplication):
|
|
|
41
40
|
self,
|
|
42
41
|
category: str = "posts",
|
|
43
42
|
api: str = "classic",
|
|
44
|
-
cursor:
|
|
45
|
-
limit:
|
|
46
|
-
keywords:
|
|
47
|
-
sort_by:
|
|
48
|
-
date_posted:
|
|
49
|
-
content_type:
|
|
43
|
+
cursor: str | None = None,
|
|
44
|
+
limit: int | None = None,
|
|
45
|
+
keywords: str | None = None,
|
|
46
|
+
sort_by: str | None = None,
|
|
47
|
+
date_posted: str | None = None,
|
|
48
|
+
content_type: str | None = None,
|
|
50
49
|
) -> dict[str, Any]:
|
|
51
50
|
"""
|
|
52
51
|
Performs a general LinkedIn search for posts using keywords and filters like date and content type. It supports pagination and can utilize either the 'classic' or 'sales_navigator' API, searching broadly across the platform rather than fetching posts from a specific user's profile.
|
|
@@ -86,8 +85,8 @@ class ScraperApp(APIApplication):
|
|
|
86
85
|
def linkedin_list_profile_posts(
|
|
87
86
|
self,
|
|
88
87
|
identifier: str,
|
|
89
|
-
cursor:
|
|
90
|
-
limit:
|
|
88
|
+
cursor: str | None = None,
|
|
89
|
+
limit: int | None = None,
|
|
91
90
|
) -> dict[str, Any]:
|
|
92
91
|
"""
|
|
93
92
|
Fetches a paginated list of all LinkedIn posts from a specific user or company profile using their unique identifier. This function retrieves content directly from a profile, unlike `linkedin_post_search` which finds posts across LinkedIn based on keywords and other filters.
|
|
@@ -143,9 +142,9 @@ class ScraperApp(APIApplication):
|
|
|
143
142
|
def linkedin_list_post_comments(
|
|
144
143
|
self,
|
|
145
144
|
post_id: str,
|
|
146
|
-
comment_id:
|
|
147
|
-
cursor:
|
|
148
|
-
limit:
|
|
145
|
+
comment_id: str | None = None,
|
|
146
|
+
cursor: str | None = None,
|
|
147
|
+
limit: int | None = None,
|
|
149
148
|
) -> dict[str, Any]:
|
|
150
149
|
"""
|
|
151
150
|
Fetches comments for a specified LinkedIn post. If a `comment_id` is provided, it retrieves replies to that comment instead of top-level comments. This function supports pagination and specifically targets comments, unlike others in the class that search for or list entire posts.
|
|
@@ -176,54 +175,54 @@ class ScraperApp(APIApplication):
|
|
|
176
175
|
|
|
177
176
|
def linkedin_people_search(
|
|
178
177
|
self,
|
|
179
|
-
cursor:
|
|
180
|
-
limit:
|
|
181
|
-
keywords:
|
|
182
|
-
last_viewed_at:
|
|
183
|
-
saved_search_id:
|
|
184
|
-
recent_search_id:
|
|
185
|
-
location:
|
|
186
|
-
location_by_postal_code:
|
|
187
|
-
industry:
|
|
188
|
-
first_name:
|
|
189
|
-
last_name:
|
|
190
|
-
tenure:
|
|
191
|
-
groups:
|
|
192
|
-
school:
|
|
193
|
-
profile_language:
|
|
194
|
-
company:
|
|
195
|
-
company_headcount:
|
|
196
|
-
company_type:
|
|
197
|
-
company_location:
|
|
198
|
-
tenure_at_company:
|
|
199
|
-
past_company:
|
|
200
|
-
function:
|
|
201
|
-
role:
|
|
202
|
-
tenure_at_role:
|
|
203
|
-
seniority:
|
|
204
|
-
past_role:
|
|
205
|
-
following_your_company:
|
|
206
|
-
viewed_your_profile_recently:
|
|
207
|
-
network_distance:
|
|
208
|
-
connections_of:
|
|
209
|
-
past_colleague:
|
|
210
|
-
shared_experiences:
|
|
211
|
-
changed_jobs:
|
|
212
|
-
posted_on_linkedin:
|
|
213
|
-
mentionned_in_news:
|
|
214
|
-
persona:
|
|
215
|
-
account_lists:
|
|
216
|
-
lead_lists:
|
|
217
|
-
viewed_profile_recently:
|
|
218
|
-
messaged_recently:
|
|
219
|
-
include_saved_leads:
|
|
220
|
-
include_saved_accounts:
|
|
178
|
+
cursor: str | None = None,
|
|
179
|
+
limit: int | None = None,
|
|
180
|
+
keywords: str | None = None,
|
|
181
|
+
last_viewed_at: int | None = None,
|
|
182
|
+
saved_search_id: str | None = None,
|
|
183
|
+
recent_search_id: str | None = None,
|
|
184
|
+
location: dict[str, Any] | None = None,
|
|
185
|
+
location_by_postal_code: dict[str, Any] | None = None,
|
|
186
|
+
industry: dict[str, Any] | None = None,
|
|
187
|
+
first_name: str | None = None,
|
|
188
|
+
last_name: str | None = None,
|
|
189
|
+
tenure: list[dict[str, Any]] | None = None,
|
|
190
|
+
groups: list[str] | None = None,
|
|
191
|
+
school: dict[str, Any] | None = None,
|
|
192
|
+
profile_language: list[str] | None = None,
|
|
193
|
+
company: dict[str, Any] | None = None,
|
|
194
|
+
company_headcount: list[dict[str, Any]] | None = None,
|
|
195
|
+
company_type: list[str] | None = None,
|
|
196
|
+
company_location: dict[str, Any] | None = None,
|
|
197
|
+
tenure_at_company: list[dict[str, Any]] | None = None,
|
|
198
|
+
past_company: dict[str, Any] | None = None,
|
|
199
|
+
function: dict[str, Any] | None = None,
|
|
200
|
+
role: dict[str, Any] | None = None,
|
|
201
|
+
tenure_at_role: list[dict[str, Any]] | None = None,
|
|
202
|
+
seniority: dict[str, Any] | None = None,
|
|
203
|
+
past_role: dict[str, Any] | None = None,
|
|
204
|
+
following_your_company: bool | None = None,
|
|
205
|
+
viewed_your_profile_recently: bool | None = None,
|
|
206
|
+
network_distance: list[str] | None = None,
|
|
207
|
+
connections_of: list[str] | None = None,
|
|
208
|
+
past_colleague: bool | None = None,
|
|
209
|
+
shared_experiences: bool | None = None,
|
|
210
|
+
changed_jobs: bool | None = None,
|
|
211
|
+
posted_on_linkedin: bool | None = None,
|
|
212
|
+
mentionned_in_news: bool | None = None,
|
|
213
|
+
persona: list[str] | None = None,
|
|
214
|
+
account_lists: dict[str, Any] | None = None,
|
|
215
|
+
lead_lists: dict[str, Any] | None = None,
|
|
216
|
+
viewed_profile_recently: bool | None = None,
|
|
217
|
+
messaged_recently: bool | None = None,
|
|
218
|
+
include_saved_leads: bool | None = None,
|
|
219
|
+
include_saved_accounts: bool | None = None,
|
|
221
220
|
) -> dict[str, Any]:
|
|
222
221
|
"""
|
|
223
222
|
Performs a comprehensive LinkedIn Sales Navigator people search with advanced targeting options.
|
|
224
223
|
This function provides access to LinkedIn's Sales Navigator search capabilities for finding people
|
|
225
224
|
with precise filters including experience, company details, education, and relationship criteria.
|
|
226
|
-
|
|
225
|
+
|
|
227
226
|
Args:
|
|
228
227
|
cursor: Pagination cursor for the next page of entries.
|
|
229
228
|
limit: Number of items to return.
|
|
@@ -267,13 +266,13 @@ class ScraperApp(APIApplication):
|
|
|
267
266
|
messaged_recently: LinkedIn native filter: PEOPLE YOU INTERACTED WITH / MESSAGED. Example: True
|
|
268
267
|
include_saved_leads: LinkedIn native filter: SAVED LEADS AND ACCOUNTS / ALL MY SAVED LEADS. Example: True
|
|
269
268
|
include_saved_accounts: LinkedIn native filter: SAVED LEADS AND ACCOUNTS / ALL MY SAVED ACCOUNTS. Example: True
|
|
270
|
-
|
|
269
|
+
|
|
271
270
|
Returns:
|
|
272
271
|
A dictionary containing search results and pagination details.
|
|
273
|
-
|
|
272
|
+
|
|
274
273
|
Raises:
|
|
275
274
|
httpx.HTTPError: If the API request fails.
|
|
276
|
-
|
|
275
|
+
|
|
277
276
|
Tags:
|
|
278
277
|
linkedin, sales_navigator, people, search, advanced, scraper, api, important
|
|
279
278
|
"""
|
|
@@ -325,27 +324,27 @@ class ScraperApp(APIApplication):
|
|
|
325
324
|
|
|
326
325
|
def linkedin_company_search(
|
|
327
326
|
self,
|
|
328
|
-
cursor:
|
|
329
|
-
limit:
|
|
330
|
-
keywords:
|
|
331
|
-
last_viewed_at:
|
|
332
|
-
saved_search_id:
|
|
333
|
-
recent_search_id:
|
|
334
|
-
location:
|
|
335
|
-
location_by_postal_code:
|
|
336
|
-
industry:
|
|
337
|
-
company_headcount:
|
|
338
|
-
company_type:
|
|
339
|
-
company_location:
|
|
340
|
-
following_your_company:
|
|
341
|
-
account_lists:
|
|
342
|
-
include_saved_accounts:
|
|
327
|
+
cursor: str | None = None,
|
|
328
|
+
limit: int | None = None,
|
|
329
|
+
keywords: str | None = None,
|
|
330
|
+
last_viewed_at: int | None = None,
|
|
331
|
+
saved_search_id: str | None = None,
|
|
332
|
+
recent_search_id: str | None = None,
|
|
333
|
+
location: dict[str, Any] | None = None,
|
|
334
|
+
location_by_postal_code: dict[str, Any] | None = None,
|
|
335
|
+
industry: dict[str, Any] | None = None,
|
|
336
|
+
company_headcount: list[dict[str, Any]] | None = None,
|
|
337
|
+
company_type: list[str] | None = None,
|
|
338
|
+
company_location: dict[str, Any] | None = None,
|
|
339
|
+
following_your_company: bool | None = None,
|
|
340
|
+
account_lists: dict[str, Any] | None = None,
|
|
341
|
+
include_saved_accounts: bool | None = None,
|
|
343
342
|
) -> dict[str, Any]:
|
|
344
343
|
"""
|
|
345
344
|
Performs a comprehensive LinkedIn Sales Navigator company search with advanced targeting options.
|
|
346
345
|
This function provides access to LinkedIn's Sales Navigator search capabilities for finding companies
|
|
347
346
|
with precise filters including size, location, industry, and relationship criteria.
|
|
348
|
-
|
|
347
|
+
|
|
349
348
|
Args:
|
|
350
349
|
cursor: Pagination cursor for the next page of entries.
|
|
351
350
|
limit: Number of items to return.
|
|
@@ -362,13 +361,13 @@ class ScraperApp(APIApplication):
|
|
|
362
361
|
following_your_company: LinkedIn native filter: FOLLOWING YOUR COMPANY. Example: True
|
|
363
362
|
account_lists: LinkedIn native filter: ACCOUNT LISTS. Example: {"include": ["account_list_id_1"]}
|
|
364
363
|
include_saved_accounts: LinkedIn native filter: SAVED LEADS AND ACCOUNTS / ALL MY SAVED ACCOUNTS. Example: True
|
|
365
|
-
|
|
364
|
+
|
|
366
365
|
Returns:
|
|
367
366
|
A dictionary containing search results and pagination details.
|
|
368
|
-
|
|
367
|
+
|
|
369
368
|
Raises:
|
|
370
369
|
httpx.HTTPError: If the API request fails.
|
|
371
|
-
|
|
370
|
+
|
|
372
371
|
Tags:
|
|
373
372
|
linkedin, sales_navigator, companies, search, advanced, scraper, api, important
|
|
374
373
|
"""
|
|
@@ -2,12 +2,12 @@ from typing import Any # For type hinting
|
|
|
2
2
|
|
|
3
3
|
import httpx
|
|
4
4
|
from loguru import logger
|
|
5
|
-
|
|
6
|
-
from serpapi import SerpApiClient as SerpApiSearch # Added SerpApiError
|
|
7
5
|
from universal_mcp.applications.application import APIApplication
|
|
8
6
|
from universal_mcp.exceptions import NotAuthorizedError # For auth errors
|
|
9
7
|
from universal_mcp.integrations import Integration # For integration type hint
|
|
10
8
|
|
|
9
|
+
from serpapi import SerpApiClient as SerpApiSearch # Added SerpApiError
|
|
10
|
+
|
|
11
11
|
|
|
12
12
|
class SerpapiApp(APIApplication):
|
|
13
13
|
def __init__(self, integration: Integration | None = None, **kwargs: Any) -> None:
|
|
@@ -78,17 +78,17 @@ class SerpapiApp(APIApplication):
|
|
|
78
78
|
async def web_search(self, params: dict[str, Any] | None = None) -> str:
|
|
79
79
|
"""
|
|
80
80
|
Performs a general web search via SerpApi, defaulting to the 'google_light' engine. It accepts custom parameters, retrieves organic results, and formats them into a string with titles, links, and snippets. It also handles API authentication and raises `NotAuthorizedError` for credential-related issues.
|
|
81
|
-
|
|
81
|
+
|
|
82
82
|
Args:
|
|
83
83
|
params: Dictionary of engine-specific parameters (e.g., {'q': 'Coffee', 'engine': 'google_light', 'location': 'Austin, TX'}). Defaults to None.
|
|
84
|
-
|
|
84
|
+
|
|
85
85
|
Returns:
|
|
86
86
|
A formatted string containing search results with titles, links, and snippets, or an error message if the search fails.
|
|
87
|
-
|
|
87
|
+
|
|
88
88
|
Raises:
|
|
89
89
|
NotAuthorizedError: If the API key cannot be retrieved or is invalid/rejected by SerpApi.
|
|
90
90
|
Exception: For other unexpected errors during the search process. (Specific HTTP errors or SerpApiErrors are caught and returned as strings or raise NotAuthorizedError).
|
|
91
|
-
|
|
91
|
+
|
|
92
92
|
Tags:
|
|
93
93
|
search, async, web-scraping, api, serpapi, important
|
|
94
94
|
"""
|
|
@@ -194,19 +194,19 @@ class SerpapiApp(APIApplication):
|
|
|
194
194
|
) -> dict[str, Any]:
|
|
195
195
|
"""
|
|
196
196
|
Executes a Google Maps search via SerpApi using a query, coordinates, or place ID. It enhances the results by adding a `google_maps_url` to each location, distinguishing it from `get_google_maps_reviews` which retrieves reviews for a known place.
|
|
197
|
-
|
|
197
|
+
|
|
198
198
|
Args:
|
|
199
199
|
q (string, optional): The search query for Google Maps (e.g., "Coffee", "Restaurants", "Gas stations").
|
|
200
200
|
ll (string, optional): Latitude and longitude with zoom level in format "@lat,lng,zoom" (e.g., "@40.7455096,-74.0083012,14z"). The zoom attribute ranges from 3z (map completely zoomed out) to 21z (map completely zoomed in). Results are not guaranteed to be within the requested geographic location.
|
|
201
201
|
place_id (string, optional): The unique reference to a place in Google Maps. Place IDs are available for most locations, including businesses, landmarks, parks, and intersections. You can find the place_id using our Google Maps API. place_id can be used without any other optional parameter. place_id and data_cid can't be used together.
|
|
202
|
-
|
|
202
|
+
|
|
203
203
|
Returns:
|
|
204
204
|
dict[str, Any]: Formatted Google Maps search results with place names, addresses, ratings, and other details.
|
|
205
|
-
|
|
205
|
+
|
|
206
206
|
Raises:
|
|
207
207
|
ValueError: Raised when required parameters are missing.
|
|
208
208
|
HTTPStatusError: Raised when the API request fails with detailed error information including status code and response body.
|
|
209
|
-
|
|
209
|
+
|
|
210
210
|
Tags:
|
|
211
211
|
google-maps, search, location, places, important
|
|
212
212
|
"""
|
|
@@ -249,18 +249,18 @@ class SerpapiApp(APIApplication):
|
|
|
249
249
|
) -> dict[str, Any]:
|
|
250
250
|
"""
|
|
251
251
|
Fetches Google Maps reviews for a specific location via SerpApi using its unique `data_id`. This function uses the `google_maps_reviews` engine, unlike `google_maps_search` which finds locations. Results can be returned in a specified language, defaulting to English.
|
|
252
|
-
|
|
252
|
+
|
|
253
253
|
Args:
|
|
254
254
|
data_id (string): The data ID of the place to get reviews for (e.g., "0x89c259af336b3341:0xa4969e07ce3108de").
|
|
255
255
|
hl (string, optional): Language parameter for the search results. Defaults to "en".
|
|
256
|
-
|
|
256
|
+
|
|
257
257
|
Returns:
|
|
258
258
|
dict[str, Any]: Google Maps reviews data with ratings, comments, and other review details.
|
|
259
|
-
|
|
259
|
+
|
|
260
260
|
Raises:
|
|
261
261
|
ValueError: Raised when required parameters are missing.
|
|
262
262
|
HTTPStatusError: Raised when the API request fails with detailed error information including status code and response body.
|
|
263
|
-
|
|
263
|
+
|
|
264
264
|
Tags:
|
|
265
265
|
google-maps, reviews, ratings, places, important
|
|
266
266
|
"""
|
|
@@ -6,7 +6,6 @@ from typing import Any
|
|
|
6
6
|
|
|
7
7
|
from loguru import logger
|
|
8
8
|
from office365.graph_client import GraphClient
|
|
9
|
-
|
|
10
9
|
from universal_mcp.applications.application import BaseApplication
|
|
11
10
|
from universal_mcp.integrations import Integration
|
|
12
11
|
|
|
@@ -37,7 +36,7 @@ class SharepointApp(BaseApplication):
|
|
|
37
36
|
def client(self):
|
|
38
37
|
"""
|
|
39
38
|
A lazy-loaded property that gets or creates an authenticated GraphClient instance. On first access, it uses integration credentials to initialize the client, fetches initial user and site data, and caches the instance for subsequent use, ensuring efficient connection management for all SharePoint operations.
|
|
40
|
-
|
|
39
|
+
|
|
41
40
|
Returns:
|
|
42
41
|
GraphClient: The authenticated GraphClient instance.
|
|
43
42
|
"""
|
|
@@ -75,13 +74,13 @@ class SharepointApp(BaseApplication):
|
|
|
75
74
|
def list_folders(self, folder_path: str | None = None) -> list[dict[str, Any]]:
|
|
76
75
|
"""
|
|
77
76
|
Retrieves a list of immediate subfolder names within a specified SharePoint directory. If no path is provided, it defaults to the root drive. This function is distinct from `list_files`, as it exclusively lists directories, not files.
|
|
78
|
-
|
|
77
|
+
|
|
79
78
|
Args:
|
|
80
79
|
folder_path (Optional[str], optional): The path to the parent folder. If None, lists folders in the root.
|
|
81
|
-
|
|
80
|
+
|
|
82
81
|
Returns:
|
|
83
82
|
List[Dict[str, Any]]: A list of folder names in the specified directory.
|
|
84
|
-
|
|
83
|
+
|
|
85
84
|
Tags:
|
|
86
85
|
important
|
|
87
86
|
"""
|
|
@@ -98,14 +97,14 @@ class SharepointApp(BaseApplication):
|
|
|
98
97
|
) -> dict[str, Any]:
|
|
99
98
|
"""
|
|
100
99
|
Creates a new folder with a given name inside a specified parent directory (or the root). It then returns an updated list of all folder names within that same directory, effectively confirming that the creation operation was successful.
|
|
101
|
-
|
|
100
|
+
|
|
102
101
|
Args:
|
|
103
102
|
folder_name (str): The name of the folder to create.
|
|
104
103
|
folder_path (str | None, optional): The path to the parent folder. If None, creates in the root.
|
|
105
|
-
|
|
104
|
+
|
|
106
105
|
Returns:
|
|
107
106
|
Dict[str, Any]: The updated list of folders in the target directory.
|
|
108
|
-
|
|
107
|
+
|
|
109
108
|
Tags:
|
|
110
109
|
important
|
|
111
110
|
"""
|
|
@@ -119,13 +118,13 @@ class SharepointApp(BaseApplication):
|
|
|
119
118
|
def list_files(self, folder_path: str) -> list[dict[str, Any]]:
|
|
120
119
|
"""
|
|
121
120
|
Retrieves metadata for all files in a specified folder. For each file, it returns key details like name, URL, size, and timestamps. This function exclusively lists file properties, distinguishing it from `list_folders` (which lists directories) and `get_document_content` (which retrieves file content).
|
|
122
|
-
|
|
121
|
+
|
|
123
122
|
Args:
|
|
124
123
|
folder_path (str): The path to the folder whose documents are to be listed.
|
|
125
|
-
|
|
124
|
+
|
|
126
125
|
Returns:
|
|
127
126
|
List[Dict[str, Any]]: A list of dictionaries containing document metadata.
|
|
128
|
-
|
|
127
|
+
|
|
129
128
|
Tags:
|
|
130
129
|
important
|
|
131
130
|
"""
|
|
@@ -148,15 +147,15 @@ class SharepointApp(BaseApplication):
|
|
|
148
147
|
) -> dict[str, Any]:
|
|
149
148
|
"""
|
|
150
149
|
Uploads string content to create a new file in a specified SharePoint folder. To confirm the operation, it returns an updated list of all files and their metadata from that directory, including the newly created file.
|
|
151
|
-
|
|
150
|
+
|
|
152
151
|
Args:
|
|
153
152
|
file_path (str): The path to the folder where the document will be created.
|
|
154
153
|
file_name (str): The name of the document to create.
|
|
155
154
|
content (str): The content to write into the document.
|
|
156
|
-
|
|
155
|
+
|
|
157
156
|
Returns:
|
|
158
157
|
Dict[str, Any]: The updated list of documents in the folder.
|
|
159
|
-
|
|
158
|
+
|
|
160
159
|
Tags: important
|
|
161
160
|
"""
|
|
162
161
|
file = self.client.me.drive.root.get_by_path(file_path)
|
|
@@ -168,13 +167,13 @@ class SharepointApp(BaseApplication):
|
|
|
168
167
|
def get_document_content(self, file_path: str) -> dict[str, Any]:
|
|
169
168
|
"""
|
|
170
169
|
Retrieves a file's content from a specified SharePoint path. It returns a dictionary containing the file's name and size, decoding text files as a string and Base64-encoding binary files. Unlike `list_files`, which only fetches metadata, this function provides the actual file content.
|
|
171
|
-
|
|
170
|
+
|
|
172
171
|
Args:
|
|
173
172
|
file_path (str): The path to the document.
|
|
174
|
-
|
|
173
|
+
|
|
175
174
|
Returns:
|
|
176
175
|
Dict[str, Any]: A dictionary containing the document's name, content type, content (as text or base64), and size.
|
|
177
|
-
|
|
176
|
+
|
|
178
177
|
Tags: important
|
|
179
178
|
"""
|
|
180
179
|
file = self.client.me.drive.root.get_by_path(file_path).get().execute_query()
|
|
@@ -201,13 +200,13 @@ class SharepointApp(BaseApplication):
|
|
|
201
200
|
def delete_document(self, file_path: str):
|
|
202
201
|
"""
|
|
203
202
|
Permanently deletes a specified file from a SharePoint drive using its full path. This is the sole destructive file operation, contrasting with functions that read or create files. It returns `True` on successful deletion and raises an exception on failure, such as if the file is not found.
|
|
204
|
-
|
|
203
|
+
|
|
205
204
|
Args:
|
|
206
205
|
file_path (str): The path to the file to delete.
|
|
207
|
-
|
|
206
|
+
|
|
208
207
|
Returns:
|
|
209
208
|
bool: True if the file was deleted successfully.
|
|
210
|
-
|
|
209
|
+
|
|
211
210
|
Tags:
|
|
212
211
|
important
|
|
213
212
|
"""
|