universal-mcp-applications 0.1.30rc2__py3-none-any.whl → 0.1.32__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- universal_mcp/applications/google_docs/app.py +6 -2
- universal_mcp/applications/google_sheet/app.py +6 -2
- universal_mcp/applications/linkedin/app.py +218 -97
- universal_mcp/applications/scraper/app.py +242 -69
- universal_mcp/applications/slack/app.py +31 -0
- {universal_mcp_applications-0.1.30rc2.dist-info → universal_mcp_applications-0.1.32.dist-info}/METADATA +1 -1
- {universal_mcp_applications-0.1.30rc2.dist-info → universal_mcp_applications-0.1.32.dist-info}/RECORD +9 -9
- {universal_mcp_applications-0.1.30rc2.dist-info → universal_mcp_applications-0.1.32.dist-info}/WHEEL +0 -0
- {universal_mcp_applications-0.1.30rc2.dist-info → universal_mcp_applications-0.1.32.dist-info}/licenses/LICENSE +0 -0
|
@@ -11,7 +11,7 @@ class GoogleDocsApp(APIApplication):
|
|
|
11
11
|
|
|
12
12
|
def create_document(self, title: str) -> dict[str, Any]:
|
|
13
13
|
"""
|
|
14
|
-
Creates a blank Google Document with a specified title by sending a POST request to the Google Docs API. The function returns a dictionary containing the new document's metadata, including the unique document ID required by other functions for subsequent modifications or retrieval.
|
|
14
|
+
Creates a blank Google Document with a specified title by sending a POST request to the Google Docs API. The function returns a dictionary containing the new document's metadata, including the unique document ID required by other functions for subsequent modifications or retrieval. Note that you need to call other google_docs functions (e.g. `google_docs__insert_text`) to actually add content after creating the document.
|
|
15
15
|
|
|
16
16
|
Args:
|
|
17
17
|
title: The title for the new Google Document to be created.
|
|
@@ -30,7 +30,11 @@ class GoogleDocsApp(APIApplication):
|
|
|
30
30
|
document_data = {"title": title}
|
|
31
31
|
response = self._post(url, data=document_data)
|
|
32
32
|
response.raise_for_status()
|
|
33
|
-
|
|
33
|
+
payload = response.json()
|
|
34
|
+
payload["Note"] = (
|
|
35
|
+
"You must load and call other google docs content functions (like google_docs__insert_text)"
|
|
36
|
+
)
|
|
37
|
+
return payload
|
|
34
38
|
|
|
35
39
|
def get_document(self, document_id: str) -> dict[str, Any]:
|
|
36
40
|
"""
|
|
@@ -21,7 +21,7 @@ class GoogleSheetApp(APIApplication):
|
|
|
21
21
|
|
|
22
22
|
def create_spreadsheet(self, title: str) -> dict[str, Any]:
|
|
23
23
|
"""
|
|
24
|
-
Creates a new, blank Google Spreadsheet file with a specified title. This function generates a completely new document, unlike `add_sheet` which adds a worksheet (tab) to an existing spreadsheet. It returns the API response containing the new spreadsheet's metadata.
|
|
24
|
+
Creates a new, blank Google Spreadsheet file with a specified title. This function generates a completely new document, unlike `add_sheet` which adds a worksheet (tab) to an existing spreadsheet. It returns the API response containing the new spreadsheet's metadata. Note that you need to call other google_sheet functions (e.g. `google_sheet__write_values_to_sheet`) to actually add content after creating the spreadsheet.
|
|
25
25
|
|
|
26
26
|
Args:
|
|
27
27
|
title: String representing the desired title for the new spreadsheet
|
|
@@ -39,7 +39,11 @@ class GoogleSheetApp(APIApplication):
|
|
|
39
39
|
url = self.base_url
|
|
40
40
|
spreadsheet_data = {"properties": {"title": title}}
|
|
41
41
|
response = self._post(url, data=spreadsheet_data)
|
|
42
|
-
|
|
42
|
+
payload = self._handle_response(response)
|
|
43
|
+
payload["Note"] = (
|
|
44
|
+
"You must load and call other google_sheet content functions (like `google_sheet__write_values_to_sheet`)"
|
|
45
|
+
)
|
|
46
|
+
return payload
|
|
43
47
|
|
|
44
48
|
def get_spreadsheet_metadata(self, spreadsheetId: str) -> dict[str, Any]:
|
|
45
49
|
"""
|
|
@@ -78,6 +78,38 @@ class LinkedinApp(APIApplication):
|
|
|
78
78
|
"Cache-Control": "no-cache", # Often good practice for APIs
|
|
79
79
|
}
|
|
80
80
|
|
|
81
|
+
def _get_search_parameter_id(self, param_type: str, keywords: str) -> str:
|
|
82
|
+
"""
|
|
83
|
+
Retrieves the ID for a given LinkedIn search parameter by its name.
|
|
84
|
+
|
|
85
|
+
Args:
|
|
86
|
+
param_type: The type of parameter to search for (e.g., "LOCATION", "COMPANY").
|
|
87
|
+
keywords: The name of the parameter to find (e.g., "United States").
|
|
88
|
+
|
|
89
|
+
Returns:
|
|
90
|
+
The corresponding ID for the search parameter.
|
|
91
|
+
|
|
92
|
+
Raises:
|
|
93
|
+
ValueError: If no exact match for the keywords is found.
|
|
94
|
+
httpx.HTTPError: If the API request fails.
|
|
95
|
+
"""
|
|
96
|
+
url = f"{self.base_url}/api/v1/linkedin/search/parameters"
|
|
97
|
+
params = {
|
|
98
|
+
"account_id": self.account_id,
|
|
99
|
+
"keywords": keywords,
|
|
100
|
+
"type": param_type,
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
response = self._get(url, params=params)
|
|
104
|
+
results = self._handle_response(response)
|
|
105
|
+
|
|
106
|
+
items = results.get("items", [])
|
|
107
|
+
if items:
|
|
108
|
+
# Return the ID of the first result, assuming it's the most relevant
|
|
109
|
+
return items[0]["id"]
|
|
110
|
+
|
|
111
|
+
raise ValueError(f'Could not find a matching ID for {param_type}: "{keywords}"')
|
|
112
|
+
|
|
81
113
|
def list_all_chats(
|
|
82
114
|
self,
|
|
83
115
|
unread: bool | None = None,
|
|
@@ -271,54 +303,6 @@ class LinkedinApp(APIApplication):
|
|
|
271
303
|
response = self._get(url, params=params)
|
|
272
304
|
return response.json()
|
|
273
305
|
|
|
274
|
-
def list_all_accounts(
|
|
275
|
-
self,
|
|
276
|
-
cursor: str | None = None,
|
|
277
|
-
limit: int | None = None, # 1-259 according to spec
|
|
278
|
-
) -> dict[str, Any]:
|
|
279
|
-
"""
|
|
280
|
-
Retrieves a paginated list of all social media accounts linked to the Unipile service. This is crucial for obtaining the `account_id` required by other methods to specify which user account should perform an action, like sending a message or retrieving user-specific posts.
|
|
281
|
-
|
|
282
|
-
Args:
|
|
283
|
-
cursor: Pagination cursor.
|
|
284
|
-
limit: Number of items to return (1-259).
|
|
285
|
-
|
|
286
|
-
Returns:
|
|
287
|
-
A dictionary containing a list of account objects and a pagination cursor.
|
|
288
|
-
|
|
289
|
-
Raises:
|
|
290
|
-
httpx.HTTPError: If the API request fails.
|
|
291
|
-
|
|
292
|
-
Tags:
|
|
293
|
-
linkedin, account, list, unipile, api, important
|
|
294
|
-
"""
|
|
295
|
-
url = f"{self.base_url}/api/v1/accounts"
|
|
296
|
-
params: dict[str, Any] = {}
|
|
297
|
-
if cursor:
|
|
298
|
-
params["cursor"] = cursor
|
|
299
|
-
if limit:
|
|
300
|
-
params["limit"] = limit
|
|
301
|
-
|
|
302
|
-
response = self._get(url, params=params)
|
|
303
|
-
return response.json()
|
|
304
|
-
|
|
305
|
-
# def retrieve_linked_account(self) -> dict[str, Any]:
|
|
306
|
-
# """
|
|
307
|
-
# Retrieves details for the account linked to Unipile. It fetches metadata about the connection itself (e.g., a linked LinkedIn account), differentiating it from `retrieve_user_profile` which fetches a user's profile from the external platform.
|
|
308
|
-
|
|
309
|
-
# Returns:
|
|
310
|
-
# A dictionary containing the account object details.
|
|
311
|
-
|
|
312
|
-
# Raises:
|
|
313
|
-
# httpx.HTTPError: If the API request fails.
|
|
314
|
-
|
|
315
|
-
# Tags:
|
|
316
|
-
# linkedin, account, retrieve, get, unipile, api, important
|
|
317
|
-
# """
|
|
318
|
-
# url = f"{self.base_url}/api/v1/accounts/{self.account_id}"
|
|
319
|
-
# response = self._get(url)
|
|
320
|
-
# return response.json()
|
|
321
|
-
|
|
322
306
|
def list_profile_posts(
|
|
323
307
|
self,
|
|
324
308
|
identifier: str, # User or Company provider internal ID
|
|
@@ -602,43 +586,50 @@ class LinkedinApp(APIApplication):
|
|
|
602
586
|
"message": "Reaction action processed.",
|
|
603
587
|
}
|
|
604
588
|
|
|
605
|
-
def
|
|
589
|
+
def retrieve_user_profile(self, identifier: str) -> dict[str, Any]:
|
|
590
|
+
"""
|
|
591
|
+
Retrieves a specific LinkedIn user's profile using their public or internal ID. Unlike `retrieve_own_profile`, which fetches the authenticated user's details, this function targets and returns data for any specified third-party user profile on the platform.
|
|
592
|
+
|
|
593
|
+
Args:
|
|
594
|
+
identifier: Can be the provider's internal id OR the provider's public id of the requested user.For example, for https://www.linkedin.com/in/manojbajaj95/, the identifier is "manojbajaj95".
|
|
595
|
+
|
|
596
|
+
Returns:
|
|
597
|
+
A dictionary containing the user's profile details.
|
|
598
|
+
|
|
599
|
+
Raises:
|
|
600
|
+
httpx.HTTPError: If the API request fails.
|
|
601
|
+
|
|
602
|
+
Tags:
|
|
603
|
+
linkedin, user, profile, retrieve, get, api, important
|
|
604
|
+
"""
|
|
605
|
+
url = f"{self.base_url}/api/v1/users/{identifier}"
|
|
606
|
+
params: dict[str, Any] = {"account_id": self.account_id}
|
|
607
|
+
response = self._get(url, params=params)
|
|
608
|
+
return self._handle_response(response)
|
|
609
|
+
|
|
610
|
+
def search_people(
|
|
606
611
|
self,
|
|
607
|
-
category: Literal["people", "companies", "posts", "jobs"],
|
|
608
612
|
cursor: str | None = None,
|
|
609
613
|
limit: int | None = None,
|
|
610
614
|
keywords: str | None = None,
|
|
611
|
-
|
|
612
|
-
|
|
613
|
-
|
|
615
|
+
location: str | None = None,
|
|
616
|
+
industry: str | None = None,
|
|
617
|
+
company: str | None = None,
|
|
614
618
|
) -> dict[str, Any]:
|
|
615
619
|
"""
|
|
616
|
-
|
|
617
|
-
|
|
618
|
-
For people, companies, and jobs, it uses the classic API.
|
|
619
|
-
|
|
620
|
+
Searches for LinkedIn user profiles using keywords, with optional filters for location, industry, and company. This function specifically targets the 'people' category, distinguishing it from other search methods like `search_companies` or `search_jobs` that query different entity types through the same API endpoint.
|
|
621
|
+
|
|
620
622
|
Args:
|
|
621
|
-
category: Type of search to perform. Valid values are "people", "companies", "posts", or "jobs".
|
|
622
623
|
cursor: Pagination cursor for the next page of entries.
|
|
623
624
|
limit: Number of items to return (up to 50 for Classic search).
|
|
624
625
|
keywords: Keywords to search for.
|
|
625
|
-
|
|
626
|
-
sort_by: How to sort the results (for posts and jobs). Valid values are "relevance" or "date".
|
|
627
|
-
minimum_salary_value: The minimum salary to filter for (jobs only).
|
|
628
|
-
|
|
626
|
+
|
|
629
627
|
Returns:
|
|
630
628
|
A dictionary containing search results and pagination details.
|
|
631
|
-
|
|
629
|
+
|
|
632
630
|
Raises:
|
|
633
631
|
httpx.HTTPError: If the API request fails.
|
|
634
|
-
ValueError: If the category is empty.
|
|
635
|
-
|
|
636
|
-
Tags:
|
|
637
|
-
linkedin, search, people, companies, posts, jobs, api, important
|
|
638
632
|
"""
|
|
639
|
-
if not category:
|
|
640
|
-
raise ValueError("Category cannot be empty.")
|
|
641
|
-
|
|
642
633
|
url = f"{self.base_url}/api/v1/linkedin/search"
|
|
643
634
|
|
|
644
635
|
params: dict[str, Any] = {"account_id": self.account_id}
|
|
@@ -647,47 +638,176 @@ class LinkedinApp(APIApplication):
|
|
|
647
638
|
if limit is not None:
|
|
648
639
|
params["limit"] = limit
|
|
649
640
|
|
|
650
|
-
payload: dict[str, Any] = {"api": "classic", "category":
|
|
641
|
+
payload: dict[str, Any] = {"api": "classic", "category": "people"}
|
|
651
642
|
|
|
652
643
|
if keywords:
|
|
653
644
|
payload["keywords"] = keywords
|
|
645
|
+
|
|
646
|
+
if location:
|
|
647
|
+
location_id = self._get_search_parameter_id("LOCATION", location)
|
|
648
|
+
payload["location"] = [location_id]
|
|
654
649
|
|
|
655
|
-
if
|
|
656
|
-
|
|
657
|
-
|
|
658
|
-
|
|
659
|
-
|
|
650
|
+
if industry:
|
|
651
|
+
industry_id = self._get_search_parameter_id("INDUSTRY", industry)
|
|
652
|
+
payload["industry"] = [industry_id]
|
|
653
|
+
|
|
654
|
+
if company:
|
|
655
|
+
company_id = self._get_search_parameter_id("COMPANY", company)
|
|
656
|
+
payload["company"] = [company_id]
|
|
657
|
+
|
|
658
|
+
response = self._post(url, params=params, data=payload)
|
|
659
|
+
return self._handle_response(response)
|
|
660
660
|
|
|
661
|
-
|
|
662
|
-
|
|
663
|
-
|
|
664
|
-
|
|
665
|
-
|
|
666
|
-
|
|
667
|
-
|
|
661
|
+
def search_companies(
|
|
662
|
+
self,
|
|
663
|
+
cursor: str | None = None,
|
|
664
|
+
limit: int | None = None,
|
|
665
|
+
keywords: str | None = None,
|
|
666
|
+
location: str | None = None,
|
|
667
|
+
industry: str | None = None,
|
|
668
|
+
) -> dict[str, Any]:
|
|
669
|
+
"""
|
|
670
|
+
Performs a paginated search for companies on LinkedIn using keywords, with optional location and industry filters. Its specific 'companies' search category distinguishes it from other methods like `search_people` or `search_posts`, ensuring that only company profiles are returned.
|
|
671
|
+
|
|
672
|
+
Args:
|
|
673
|
+
cursor: Pagination cursor for the next page of entries.
|
|
674
|
+
limit: Number of items to return (up to 50 for Classic search).
|
|
675
|
+
keywords: Keywords to search for.
|
|
676
|
+
|
|
677
|
+
Returns:
|
|
678
|
+
A dictionary containing search results and pagination details.
|
|
679
|
+
|
|
680
|
+
Raises:
|
|
681
|
+
httpx.HTTPError: If the API request fails.
|
|
682
|
+
"""
|
|
683
|
+
url = f"{self.base_url}/api/v1/linkedin/search"
|
|
684
|
+
|
|
685
|
+
params: dict[str, Any] = {"account_id": self.account_id}
|
|
686
|
+
if cursor:
|
|
687
|
+
params["cursor"] = cursor
|
|
688
|
+
if limit is not None:
|
|
689
|
+
params["limit"] = limit
|
|
690
|
+
|
|
691
|
+
payload: dict[str, Any] = {"api": "classic", "category": "companies"}
|
|
692
|
+
|
|
693
|
+
if keywords:
|
|
694
|
+
payload["keywords"] = keywords
|
|
695
|
+
|
|
696
|
+
if location:
|
|
697
|
+
location_id = self._get_search_parameter_id("LOCATION", location)
|
|
698
|
+
payload["location"] = [location_id]
|
|
699
|
+
|
|
700
|
+
if industry:
|
|
701
|
+
industry_id = self._get_search_parameter_id("INDUSTRY", industry)
|
|
702
|
+
payload["industry"] = [industry_id]
|
|
668
703
|
|
|
669
704
|
response = self._post(url, params=params, data=payload)
|
|
670
705
|
return self._handle_response(response)
|
|
671
706
|
|
|
672
|
-
def
|
|
707
|
+
def search_posts(
|
|
708
|
+
self,
|
|
709
|
+
cursor: str | None = None,
|
|
710
|
+
limit: int | None = None,
|
|
711
|
+
keywords: str | None = None,
|
|
712
|
+
date_posted: Literal["past_day", "past_week", "past_month"] | None = None,
|
|
713
|
+
sort_by: Literal["relevance", "date"] = "relevance",
|
|
714
|
+
) -> dict[str, Any]:
|
|
673
715
|
"""
|
|
674
|
-
|
|
675
|
-
|
|
716
|
+
Performs a keyword-based search for LinkedIn posts, allowing filters for date and sorting by relevance. This function executes a general, platform-wide content search, distinguishing it from other search functions that target people, companies, or jobs, and from `list_profile_posts` which retrieves from a specific profile.
|
|
717
|
+
|
|
676
718
|
Args:
|
|
677
|
-
|
|
678
|
-
|
|
719
|
+
cursor: Pagination cursor for the next page of entries.
|
|
720
|
+
limit: Number of items to return (up to 50 for Classic search).
|
|
721
|
+
keywords: Keywords to search for.
|
|
722
|
+
date_posted: Filter by when the post was posted.
|
|
723
|
+
sort_by: How to sort the results.
|
|
724
|
+
|
|
679
725
|
Returns:
|
|
680
|
-
A dictionary containing
|
|
681
|
-
|
|
726
|
+
A dictionary containing search results and pagination details.
|
|
727
|
+
|
|
682
728
|
Raises:
|
|
683
729
|
httpx.HTTPError: If the API request fails.
|
|
730
|
+
"""
|
|
731
|
+
url = f"{self.base_url}/api/v1/linkedin/search"
|
|
684
732
|
|
|
685
|
-
|
|
686
|
-
|
|
733
|
+
params: dict[str, Any] = {"account_id": self.account_id}
|
|
734
|
+
if cursor:
|
|
735
|
+
params["cursor"] = cursor
|
|
736
|
+
if limit is not None:
|
|
737
|
+
params["limit"] = limit
|
|
738
|
+
|
|
739
|
+
payload: dict[str, Any] = {"api": "classic", "category": "posts"}
|
|
740
|
+
|
|
741
|
+
if keywords:
|
|
742
|
+
payload["keywords"] = keywords
|
|
743
|
+
if date_posted:
|
|
744
|
+
payload["date_posted"] = date_posted
|
|
745
|
+
if sort_by:
|
|
746
|
+
payload["sort_by"] = sort_by
|
|
747
|
+
|
|
748
|
+
response = self._post(url, params=params, data=payload)
|
|
749
|
+
return self._handle_response(response)
|
|
750
|
+
|
|
751
|
+
def search_jobs(
|
|
752
|
+
self,
|
|
753
|
+
cursor: str | None = None,
|
|
754
|
+
limit: int | None = None,
|
|
755
|
+
keywords: str | None = None,
|
|
756
|
+
region: str | None = None,
|
|
757
|
+
sort_by: Literal["relevance", "date"] = "relevance",
|
|
758
|
+
minimum_salary_value: int = 40,
|
|
759
|
+
industry: str | None = None,
|
|
760
|
+
) -> dict[str, Any]:
|
|
687
761
|
"""
|
|
688
|
-
|
|
762
|
+
Performs a LinkedIn search for jobs, filtering results by keywords, region, industry, and minimum salary. Unlike other search functions (`search_people`, `search_companies`), this method is specifically configured to query the 'jobs' category, providing a paginated list of relevant employment opportunities.
|
|
763
|
+
|
|
764
|
+
Args:
|
|
765
|
+
cursor: Pagination cursor for the next page of entries.
|
|
766
|
+
limit: Number of items to return (up to 50 for Classic search).
|
|
767
|
+
keywords: Keywords to search for.
|
|
768
|
+
location: The geographical location to filter jobs by (e.g., "United States").
|
|
769
|
+
sort_by: How to sort the results.
|
|
770
|
+
minimum_salary_value: The minimum salary to filter for.
|
|
771
|
+
|
|
772
|
+
Returns:
|
|
773
|
+
A dictionary containing search results and pagination details.
|
|
774
|
+
|
|
775
|
+
Raises:
|
|
776
|
+
httpx.HTTPError: If the API request fails.
|
|
777
|
+
ValueError: If the specified location is not found.
|
|
778
|
+
"""
|
|
779
|
+
url = f"{self.base_url}/api/v1/linkedin/search"
|
|
780
|
+
|
|
689
781
|
params: dict[str, Any] = {"account_id": self.account_id}
|
|
690
|
-
|
|
782
|
+
if cursor:
|
|
783
|
+
params["cursor"] = cursor
|
|
784
|
+
if limit is not None:
|
|
785
|
+
params["limit"] = limit
|
|
786
|
+
|
|
787
|
+
payload: dict[str, Any] = {
|
|
788
|
+
"api": "classic",
|
|
789
|
+
"category": "jobs",
|
|
790
|
+
"minimum_salary": {
|
|
791
|
+
"currency": "USD",
|
|
792
|
+
"value": minimum_salary_value,
|
|
793
|
+
},
|
|
794
|
+
}
|
|
795
|
+
|
|
796
|
+
if keywords:
|
|
797
|
+
payload["keywords"] = keywords
|
|
798
|
+
if sort_by:
|
|
799
|
+
payload["sort_by"] = sort_by
|
|
800
|
+
|
|
801
|
+
# If location is provided, get its ID and add it to the payload
|
|
802
|
+
if region:
|
|
803
|
+
location_id = self._get_search_parameter_id("LOCATION", region)
|
|
804
|
+
payload["region"] = location_id
|
|
805
|
+
|
|
806
|
+
if industry:
|
|
807
|
+
industry_id = self._get_search_parameter_id("INDUSTRY", industry)
|
|
808
|
+
payload["industry"] = [industry_id]
|
|
809
|
+
|
|
810
|
+
response = self._post(url, params=params, data=payload)
|
|
691
811
|
return self._handle_response(response)
|
|
692
812
|
|
|
693
813
|
def list_tools(self) -> list[Callable]:
|
|
@@ -697,8 +817,6 @@ class LinkedinApp(APIApplication):
|
|
|
697
817
|
self.send_chat_message,
|
|
698
818
|
self.retrieve_chat,
|
|
699
819
|
self.list_all_messages,
|
|
700
|
-
self.list_all_accounts,
|
|
701
|
-
# self.retrieve_linked_account,
|
|
702
820
|
self.list_profile_posts,
|
|
703
821
|
self.retrieve_own_profile,
|
|
704
822
|
self.retrieve_user_profile,
|
|
@@ -708,5 +826,8 @@ class LinkedinApp(APIApplication):
|
|
|
708
826
|
self.list_content_reactions,
|
|
709
827
|
self.create_post_comment,
|
|
710
828
|
self.create_reaction,
|
|
711
|
-
self.
|
|
829
|
+
self.search_companies,
|
|
830
|
+
self.search_jobs,
|
|
831
|
+
self.search_people,
|
|
832
|
+
self.search_posts,
|
|
712
833
|
]
|
|
@@ -71,72 +71,38 @@ class ScraperApp(APIApplication):
|
|
|
71
71
|
"Cache-Control": "no-cache", # Often good practice for APIs
|
|
72
72
|
}
|
|
73
73
|
|
|
74
|
-
def
|
|
75
|
-
self,
|
|
76
|
-
category: Literal["people", "companies", "posts", "jobs"],
|
|
77
|
-
cursor: str | None = None,
|
|
78
|
-
limit: int | None = None,
|
|
79
|
-
keywords: str | None = None,
|
|
80
|
-
date_posted: Literal["past_day", "past_week", "past_month"] | None = None,
|
|
81
|
-
sort_by: Literal["relevance", "date"] = "relevance",
|
|
82
|
-
minimum_salary_value: int = 40,
|
|
83
|
-
) -> dict[str, Any]:
|
|
74
|
+
def _get_search_parameter_id(self, param_type: str, keywords: str) -> str:
|
|
84
75
|
"""
|
|
85
|
-
|
|
86
|
-
Supports pagination and targets either the classic or Sales Navigator API for posts.
|
|
87
|
-
For people, companies, and jobs, it uses the classic API.
|
|
76
|
+
Retrieves the ID for a given LinkedIn search parameter by its name.
|
|
88
77
|
|
|
89
78
|
Args:
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
limit: Number of items to return (up to 50 for Classic search).
|
|
93
|
-
keywords: Keywords to search for.
|
|
94
|
-
date_posted: Filter by when the post was posted (posts only). Valid values are "past_day", "past_week", or "past_month".
|
|
95
|
-
sort_by: How to sort the results (for posts and jobs). Valid values are "relevance" or "date".
|
|
96
|
-
minimum_salary_value: The minimum salary to filter for (jobs only).
|
|
79
|
+
param_type: The type of parameter to search for (e.g., "LOCATION", "COMPANY").
|
|
80
|
+
keywords: The name of the parameter to find (e.g., "United States").
|
|
97
81
|
|
|
98
82
|
Returns:
|
|
99
|
-
|
|
83
|
+
The corresponding ID for the search parameter.
|
|
100
84
|
|
|
101
85
|
Raises:
|
|
86
|
+
ValueError: If no exact match for the keywords is found.
|
|
102
87
|
httpx.HTTPError: If the API request fails.
|
|
103
|
-
ValueError: If the category is empty.
|
|
104
|
-
|
|
105
|
-
Tags:
|
|
106
|
-
linkedin, search, people, companies, posts, jobs, api, important
|
|
107
88
|
"""
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
if cursor:
|
|
115
|
-
params["cursor"] = cursor
|
|
116
|
-
if limit is not None:
|
|
117
|
-
params["limit"] = limit
|
|
89
|
+
url = f"{self.base_url}/api/v1/linkedin/search/parameters"
|
|
90
|
+
params = {
|
|
91
|
+
"account_id": self.account_id,
|
|
92
|
+
"keywords": keywords,
|
|
93
|
+
"type": param_type,
|
|
94
|
+
}
|
|
118
95
|
|
|
119
|
-
|
|
96
|
+
response = self._get(url, params=params)
|
|
97
|
+
results = self._handle_response(response)
|
|
120
98
|
|
|
121
|
-
|
|
122
|
-
|
|
99
|
+
items = results.get("items", [])
|
|
100
|
+
if items:
|
|
101
|
+
# Return the ID of the first result, assuming it's the most relevant
|
|
102
|
+
return items[0]["id"]
|
|
123
103
|
|
|
124
|
-
|
|
125
|
-
if date_posted:
|
|
126
|
-
payload["date_posted"] = date_posted
|
|
127
|
-
if sort_by:
|
|
128
|
-
payload["sort_by"] = sort_by
|
|
104
|
+
raise ValueError(f'Could not find a matching ID for {param_type}: "{keywords}"')
|
|
129
105
|
|
|
130
|
-
elif category == "jobs":
|
|
131
|
-
payload["minimum_salary"] = {
|
|
132
|
-
"currency": "USD",
|
|
133
|
-
"value": minimum_salary_value,
|
|
134
|
-
}
|
|
135
|
-
if sort_by:
|
|
136
|
-
payload["sort_by"] = sort_by
|
|
137
|
-
|
|
138
|
-
response = self._post(url, params=params, data=payload)
|
|
139
|
-
return self._handle_response(response)
|
|
140
106
|
|
|
141
107
|
def linkedin_list_profile_posts(
|
|
142
108
|
self,
|
|
@@ -146,20 +112,20 @@ class ScraperApp(APIApplication):
|
|
|
146
112
|
is_company: bool | None = None,
|
|
147
113
|
) -> dict[str, Any]:
|
|
148
114
|
"""
|
|
149
|
-
|
|
150
|
-
|
|
115
|
+
Fetches a paginated list of posts from a specific user or company profile using its provider ID. The `is_company` flag must specify the entity type. Unlike `linkedin_search_posts`, this function directly retrieves content from a known profile's feed instead of performing a global keyword search.
|
|
116
|
+
|
|
151
117
|
Args:
|
|
152
118
|
identifier: The entity's provider internal ID (LinkedIn ID).
|
|
153
119
|
cursor: Pagination cursor.
|
|
154
120
|
limit: Number of items to return (1-100, as per Unipile example, though spec allows up to 250).
|
|
155
121
|
is_company: Boolean indicating if the identifier is for a company.
|
|
156
|
-
|
|
122
|
+
|
|
157
123
|
Returns:
|
|
158
124
|
A dictionary containing a list of post objects and pagination details.
|
|
159
|
-
|
|
125
|
+
|
|
160
126
|
Raises:
|
|
161
127
|
httpx.HTTPError: If the API request fails.
|
|
162
|
-
|
|
128
|
+
|
|
163
129
|
Tags:
|
|
164
130
|
linkedin, post, list, user_posts, company_posts, content, api, important
|
|
165
131
|
"""
|
|
@@ -177,17 +143,17 @@ class ScraperApp(APIApplication):
|
|
|
177
143
|
|
|
178
144
|
def linkedin_retrieve_profile(self, identifier: str) -> dict[str, Any]:
|
|
179
145
|
"""
|
|
180
|
-
|
|
181
|
-
|
|
146
|
+
Fetches a specific LinkedIn user's profile using their public or internal ID. Unlike `linkedin_search_people`, which discovers multiple users via keywords, this function targets and retrieves detailed data for a single, known individual based on a direct identifier.
|
|
147
|
+
|
|
182
148
|
Args:
|
|
183
149
|
identifier: Can be the provider's internal id OR the provider's public id of the requested user.For example, for https://www.linkedin.com/in/manojbajaj95/, the identifier is "manojbajaj95".
|
|
184
|
-
|
|
150
|
+
|
|
185
151
|
Returns:
|
|
186
152
|
A dictionary containing the user's profile details.
|
|
187
|
-
|
|
153
|
+
|
|
188
154
|
Raises:
|
|
189
155
|
httpx.HTTPError: If the API request fails.
|
|
190
|
-
|
|
156
|
+
|
|
191
157
|
Tags:
|
|
192
158
|
linkedin, user, profile, retrieve, get, api, important
|
|
193
159
|
"""
|
|
@@ -204,20 +170,20 @@ class ScraperApp(APIApplication):
|
|
|
204
170
|
limit: int | None = None,
|
|
205
171
|
) -> dict[str, Any]:
|
|
206
172
|
"""
|
|
207
|
-
Fetches comments for a
|
|
208
|
-
|
|
173
|
+
Fetches a paginated list of comments for a specified LinkedIn post. It can retrieve either top-level comments or threaded replies if an optional `comment_id` is provided. This is a read-only operation, distinct from functions that search for posts or list user-specific content.
|
|
174
|
+
|
|
209
175
|
Args:
|
|
210
176
|
post_id: The social ID of the post.
|
|
211
177
|
comment_id: If provided, retrieves replies to this comment ID instead of top-level comments.
|
|
212
178
|
cursor: Pagination cursor.
|
|
213
179
|
limit: Number of comments to return. (OpenAPI spec shows type string, passed as string if provided).
|
|
214
|
-
|
|
180
|
+
|
|
215
181
|
Returns:
|
|
216
182
|
A dictionary containing a list of comment objects and pagination details.
|
|
217
|
-
|
|
183
|
+
|
|
218
184
|
Raises:
|
|
219
185
|
httpx.HTTPError: If the API request fails.
|
|
220
|
-
|
|
186
|
+
|
|
221
187
|
Tags:
|
|
222
188
|
linkedin, post, comment, list, content, api, important
|
|
223
189
|
"""
|
|
@@ -233,6 +199,210 @@ class ScraperApp(APIApplication):
|
|
|
233
199
|
response = self._get(url, params=params)
|
|
234
200
|
return response.json()
|
|
235
201
|
|
|
202
|
+
def linkedin_search_people(
|
|
203
|
+
self,
|
|
204
|
+
cursor: str | None = None,
|
|
205
|
+
limit: int | None = None,
|
|
206
|
+
keywords: str | None = None,
|
|
207
|
+
location: str | None = None,
|
|
208
|
+
industry: str | None = None,
|
|
209
|
+
company: str | None = None,
|
|
210
|
+
) -> dict[str, Any]:
|
|
211
|
+
"""
|
|
212
|
+
Performs a paginated search for people on LinkedIn, distinct from searches for companies or jobs. It filters results using keywords, location, industry, and company, internally converting filter names like 'United States' into their required API IDs before making the request.
|
|
213
|
+
|
|
214
|
+
Args:
|
|
215
|
+
cursor: Pagination cursor for the next page of entries.
|
|
216
|
+
limit: Number of items to return (up to 50 for Classic search).
|
|
217
|
+
keywords: Keywords to search for.
|
|
218
|
+
|
|
219
|
+
Returns:
|
|
220
|
+
A dictionary containing search results and pagination details.
|
|
221
|
+
|
|
222
|
+
Raises:
|
|
223
|
+
httpx.HTTPError: If the API request fails.
|
|
224
|
+
"""
|
|
225
|
+
url = f"{self.base_url}/api/v1/linkedin/search"
|
|
226
|
+
|
|
227
|
+
params: dict[str, Any] = {"account_id": self.account_id}
|
|
228
|
+
if cursor:
|
|
229
|
+
params["cursor"] = cursor
|
|
230
|
+
if limit is not None:
|
|
231
|
+
params["limit"] = limit
|
|
232
|
+
|
|
233
|
+
payload: dict[str, Any] = {"api": "classic", "category": "people"}
|
|
234
|
+
|
|
235
|
+
if keywords:
|
|
236
|
+
payload["keywords"] = keywords
|
|
237
|
+
|
|
238
|
+
if location:
|
|
239
|
+
location_id = self._get_search_parameter_id("LOCATION", location)
|
|
240
|
+
payload["location"] = [location_id]
|
|
241
|
+
|
|
242
|
+
if industry:
|
|
243
|
+
industry_id = self._get_search_parameter_id("INDUSTRY", industry)
|
|
244
|
+
payload["industry"] = [industry_id]
|
|
245
|
+
|
|
246
|
+
if company:
|
|
247
|
+
company_id = self._get_search_parameter_id("COMPANY", company)
|
|
248
|
+
payload["company"] = [company_id]
|
|
249
|
+
|
|
250
|
+
response = self._post(url, params=params, data=payload)
|
|
251
|
+
return self._handle_response(response)
|
|
252
|
+
|
|
253
|
+
def linkedin_search_companies(
|
|
254
|
+
self,
|
|
255
|
+
cursor: str | None = None,
|
|
256
|
+
limit: int | None = None,
|
|
257
|
+
keywords: str | None = None,
|
|
258
|
+
location: str | None = None,
|
|
259
|
+
industry: str | None = None,
|
|
260
|
+
) -> dict[str, Any]:
|
|
261
|
+
"""
|
|
262
|
+
Executes a paginated LinkedIn search for companies, filtering by optional keywords, location, and industry. Unlike `linkedin_search_people` or `linkedin_search_jobs`, this function specifically sets the API search category to 'companies' to ensure that only company profiles are returned in the search results.
|
|
263
|
+
|
|
264
|
+
Args:
|
|
265
|
+
cursor: Pagination cursor for the next page of entries.
|
|
266
|
+
limit: Number of items to return (up to 50 for Classic search).
|
|
267
|
+
keywords: Keywords to search for.
|
|
268
|
+
|
|
269
|
+
Returns:
|
|
270
|
+
A dictionary containing search results and pagination details.
|
|
271
|
+
|
|
272
|
+
Raises:
|
|
273
|
+
httpx.HTTPError: If the API request fails.
|
|
274
|
+
"""
|
|
275
|
+
url = f"{self.base_url}/api/v1/linkedin/search"
|
|
276
|
+
|
|
277
|
+
params: dict[str, Any] = {"account_id": self.account_id}
|
|
278
|
+
if cursor:
|
|
279
|
+
params["cursor"] = cursor
|
|
280
|
+
if limit is not None:
|
|
281
|
+
params["limit"] = limit
|
|
282
|
+
|
|
283
|
+
payload: dict[str, Any] = {"api": "classic", "category": "companies"}
|
|
284
|
+
|
|
285
|
+
if keywords:
|
|
286
|
+
payload["keywords"] = keywords
|
|
287
|
+
|
|
288
|
+
if location:
|
|
289
|
+
location_id = self._get_search_parameter_id("LOCATION", location)
|
|
290
|
+
payload["location"] = [location_id]
|
|
291
|
+
|
|
292
|
+
if industry:
|
|
293
|
+
industry_id = self._get_search_parameter_id("INDUSTRY", industry)
|
|
294
|
+
payload["industry"] = [industry_id]
|
|
295
|
+
|
|
296
|
+
response = self._post(url, params=params, data=payload)
|
|
297
|
+
return self._handle_response(response)
|
|
298
|
+
|
|
299
|
+
def linkedin_search_posts(
|
|
300
|
+
self,
|
|
301
|
+
cursor: str | None = None,
|
|
302
|
+
limit: int | None = None,
|
|
303
|
+
keywords: str | None = None,
|
|
304
|
+
date_posted: Literal["past_day", "past_week", "past_month"] | None = None,
|
|
305
|
+
sort_by: Literal["relevance", "date"] = "relevance",
|
|
306
|
+
) -> dict[str, Any]:
|
|
307
|
+
"""
|
|
308
|
+
Performs a keyword-based search for LinkedIn posts, allowing results to be filtered by date and sorted by relevance. This function specifically queries the 'posts' category, distinguishing it from other search methods in the class that target people, companies, or jobs, and returns relevant content.
|
|
309
|
+
|
|
310
|
+
Args:
|
|
311
|
+
cursor: Pagination cursor for the next page of entries.
|
|
312
|
+
limit: Number of items to return (up to 50 for Classic search).
|
|
313
|
+
keywords: Keywords to search for.
|
|
314
|
+
date_posted: Filter by when the post was posted.
|
|
315
|
+
sort_by: How to sort the results.
|
|
316
|
+
|
|
317
|
+
Returns:
|
|
318
|
+
A dictionary containing search results and pagination details.
|
|
319
|
+
|
|
320
|
+
Raises:
|
|
321
|
+
httpx.HTTPError: If the API request fails.
|
|
322
|
+
"""
|
|
323
|
+
url = f"{self.base_url}/api/v1/linkedin/search"
|
|
324
|
+
|
|
325
|
+
params: dict[str, Any] = {"account_id": self.account_id}
|
|
326
|
+
if cursor:
|
|
327
|
+
params["cursor"] = cursor
|
|
328
|
+
if limit is not None:
|
|
329
|
+
params["limit"] = limit
|
|
330
|
+
|
|
331
|
+
payload: dict[str, Any] = {"api": "classic", "category": "posts"}
|
|
332
|
+
|
|
333
|
+
if keywords:
|
|
334
|
+
payload["keywords"] = keywords
|
|
335
|
+
if date_posted:
|
|
336
|
+
payload["date_posted"] = date_posted
|
|
337
|
+
if sort_by:
|
|
338
|
+
payload["sort_by"] = sort_by
|
|
339
|
+
|
|
340
|
+
response = self._post(url, params=params, data=payload)
|
|
341
|
+
return self._handle_response(response)
|
|
342
|
+
|
|
343
|
+
def linkedin_search_jobs(
|
|
344
|
+
self,
|
|
345
|
+
cursor: str | None = None,
|
|
346
|
+
limit: int | None = None,
|
|
347
|
+
keywords: str | None = None,
|
|
348
|
+
region: str | None = None,
|
|
349
|
+
sort_by: Literal["relevance", "date"] = "relevance",
|
|
350
|
+
minimum_salary_value: int = 40,
|
|
351
|
+
industry: str | None = None,
|
|
352
|
+
) -> dict[str, Any]:
|
|
353
|
+
"""
|
|
354
|
+
Executes a LinkedIn search specifically for job listings using keywords and filters like region, industry, and minimum salary. Unlike other search functions targeting people or companies, this is specialized for job listings and converts friendly filter names (e.g., "United States") into their required API IDs.
|
|
355
|
+
|
|
356
|
+
Args:
|
|
357
|
+
cursor: Pagination cursor for the next page of entries.
|
|
358
|
+
limit: Number of items to return (up to 50 for Classic search).
|
|
359
|
+
keywords: Keywords to search for.
|
|
360
|
+
location: The geographical location to filter jobs by (e.g., "United States").
|
|
361
|
+
sort_by: How to sort the results.
|
|
362
|
+
minimum_salary_value: The minimum salary to filter for.
|
|
363
|
+
|
|
364
|
+
Returns:
|
|
365
|
+
A dictionary containing search results and pagination details.
|
|
366
|
+
|
|
367
|
+
Raises:
|
|
368
|
+
httpx.HTTPError: If the API request fails.
|
|
369
|
+
ValueError: If the specified location is not found.
|
|
370
|
+
"""
|
|
371
|
+
url = f"{self.base_url}/api/v1/linkedin/search"
|
|
372
|
+
|
|
373
|
+
params: dict[str, Any] = {"account_id": self.account_id}
|
|
374
|
+
if cursor:
|
|
375
|
+
params["cursor"] = cursor
|
|
376
|
+
if limit is not None:
|
|
377
|
+
params["limit"] = limit
|
|
378
|
+
|
|
379
|
+
payload: dict[str, Any] = {
|
|
380
|
+
"api": "classic",
|
|
381
|
+
"category": "jobs",
|
|
382
|
+
"minimum_salary": {
|
|
383
|
+
"currency": "USD",
|
|
384
|
+
"value": minimum_salary_value,
|
|
385
|
+
},
|
|
386
|
+
}
|
|
387
|
+
|
|
388
|
+
if keywords:
|
|
389
|
+
payload["keywords"] = keywords
|
|
390
|
+
if sort_by:
|
|
391
|
+
payload["sort_by"] = sort_by
|
|
392
|
+
|
|
393
|
+
# If location is provided, get its ID and add it to the payload
|
|
394
|
+
if region:
|
|
395
|
+
location_id = self._get_search_parameter_id("LOCATION", region)
|
|
396
|
+
payload["region"] = location_id
|
|
397
|
+
|
|
398
|
+
if industry:
|
|
399
|
+
industry_id = self._get_search_parameter_id("INDUSTRY", industry)
|
|
400
|
+
payload["industry"] = [industry_id]
|
|
401
|
+
|
|
402
|
+
response = self._post(url, params=params, data=payload)
|
|
403
|
+
return self._handle_response(response)
|
|
404
|
+
|
|
405
|
+
|
|
236
406
|
def list_tools(self):
|
|
237
407
|
"""
|
|
238
408
|
Returns a list of available tools/functions in this application.
|
|
@@ -241,8 +411,11 @@ class ScraperApp(APIApplication):
|
|
|
241
411
|
A list of functions that can be used as tools.
|
|
242
412
|
"""
|
|
243
413
|
return [
|
|
244
|
-
self.linkedin_search,
|
|
245
414
|
self.linkedin_list_profile_posts,
|
|
246
415
|
self.linkedin_retrieve_profile,
|
|
247
416
|
self.linkedin_list_post_comments,
|
|
417
|
+
self.linkedin_search_people,
|
|
418
|
+
self.linkedin_search_companies,
|
|
419
|
+
self.linkedin_search_posts,
|
|
420
|
+
self.linkedin_search_jobs,
|
|
248
421
|
]
|
|
@@ -9,6 +9,37 @@ class SlackApp(APIApplication):
|
|
|
9
9
|
super().__init__(name="slack", integration=integration, **kwargs)
|
|
10
10
|
self.base_url = "https://slack.com/api"
|
|
11
11
|
|
|
12
|
+
def _get_headers(self) -> dict[str, str]:
|
|
13
|
+
"""
|
|
14
|
+
Get headers for Slack API requests.
|
|
15
|
+
Prioritizes user-scoped access token from raw.authed_user.access_token
|
|
16
|
+
over the bot token at the root level.
|
|
17
|
+
"""
|
|
18
|
+
if not self.integration:
|
|
19
|
+
raise ValueError("Integration not configured for SlackApp")
|
|
20
|
+
|
|
21
|
+
credentials = self.integration.get_credentials()
|
|
22
|
+
if not credentials:
|
|
23
|
+
raise ValueError("No credentials found for Slack integration")
|
|
24
|
+
|
|
25
|
+
access_token = None
|
|
26
|
+
raw = credentials.get('raw', {})
|
|
27
|
+
if isinstance(raw, dict) and 'authed_user' in raw:
|
|
28
|
+
authed_user = raw.get('authed_user', {})
|
|
29
|
+
if isinstance(authed_user, dict):
|
|
30
|
+
access_token = authed_user.get('access_token')
|
|
31
|
+
|
|
32
|
+
if not access_token:
|
|
33
|
+
access_token = credentials.get('access_token')
|
|
34
|
+
|
|
35
|
+
if not access_token:
|
|
36
|
+
raise ValueError("Access token not found in Slack credentials")
|
|
37
|
+
|
|
38
|
+
return {
|
|
39
|
+
"Authorization": f"Bearer {access_token}",
|
|
40
|
+
"Content-Type": "application/json",
|
|
41
|
+
}
|
|
42
|
+
|
|
12
43
|
def chat_delete(
|
|
13
44
|
self,
|
|
14
45
|
as_user: bool | None = None,
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: universal-mcp-applications
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.32
|
|
4
4
|
Summary: A Universal MCP Application: universal_mcp_applications
|
|
5
5
|
Project-URL: Homepage, https://github.com/universal-mcp/applications
|
|
6
6
|
Project-URL: Repository, https://github.com/universal-mcp/applications
|
|
@@ -105,7 +105,7 @@ universal_mcp/applications/google_calendar/__init__.py,sha256=qxVxf_Q5lOdxXRHzmE
|
|
|
105
105
|
universal_mcp/applications/google_calendar/app.py,sha256=FZptXBLsRo4Rp2kRrVJO_dM3Wr8G0XyMXLHWfPya80Q,25884
|
|
106
106
|
universal_mcp/applications/google_docs/README.md,sha256=KDy_X4SRELegE5sEdixAP0YeXZOXdADTX2D-tAUlCJM,4512
|
|
107
107
|
universal_mcp/applications/google_docs/__init__.py,sha256=U0pWagxnj0VD-AcKNd8eS0orzaMmlUOgvW9vkYBNH40,31
|
|
108
|
-
universal_mcp/applications/google_docs/app.py,sha256=
|
|
108
|
+
universal_mcp/applications/google_docs/app.py,sha256=RtAgXXlUHQlKi9MWOGNfPIhWwFMf-o8_R00B78UOYY8,40048
|
|
109
109
|
universal_mcp/applications/google_drive/README.md,sha256=Kmg7LLaDW-7bnsgdVimwxc5SdUf2uA9Fv8zIMXVa-Uc,15393
|
|
110
110
|
universal_mcp/applications/google_drive/__init__.py,sha256=DTyed4ADcCmALSyPT8whjXoosPXl3m-i8JrilPJ3ijU,32
|
|
111
111
|
universal_mcp/applications/google_drive/app.py,sha256=J81m8OBjE0552GGWsIfgM4idFjjZfPEOsjk0ZVeJzgM,257259
|
|
@@ -120,7 +120,7 @@ universal_mcp/applications/google_searchconsole/__init__.py,sha256=PHuwQFk0_a-jb
|
|
|
120
120
|
universal_mcp/applications/google_searchconsole/app.py,sha256=Cb0UiWi6-acWprr5arn_3efW9QHQ4NEiPec4OPeisXk,14040
|
|
121
121
|
universal_mcp/applications/google_sheet/README.md,sha256=ZoKzozYSGDl1AxcVgWkJk2i5uLwB17UaUsVYqo9epqs,7772
|
|
122
122
|
universal_mcp/applications/google_sheet/__init__.py,sha256=sl1VQKQMlYuzZGHUIyVsFvnar6APaIFb4Y_nl7TA0us,32
|
|
123
|
-
universal_mcp/applications/google_sheet/app.py,sha256=
|
|
123
|
+
universal_mcp/applications/google_sheet/app.py,sha256=BWCsmVxjTqHPLukA6KODVqQgJ3zghnCjIOVBcaiNYtw,87753
|
|
124
124
|
universal_mcp/applications/google_sheet/helper.py,sha256=rC_2I4oKfd-rpj3UIWXGH4pZlMX1vI9kixryxBFymSY,11996
|
|
125
125
|
universal_mcp/applications/hashnode/README.md,sha256=l0BsLTCS3AeUphVRF2xLWEE0DFPtNBrSNUeaHPcmsx0,1028
|
|
126
126
|
universal_mcp/applications/hashnode/__init__.py,sha256=ty459WmLiNoGM4XZAKWNASp-0MCMBV15J3LstDbZWPw,29
|
|
@@ -146,7 +146,7 @@ universal_mcp/applications/klaviyo/__init__.py,sha256=YS2GhW7my_I1tfyLlxlkeTFmlz
|
|
|
146
146
|
universal_mcp/applications/klaviyo/app.py,sha256=xHQxEZFVIWPCBmL6YoYxuVREibwPRH21izw0psmOzFc,423692
|
|
147
147
|
universal_mcp/applications/linkedin/README.md,sha256=gwbNgrPKUsGzQEsta3kp8SK5RJJ5mPg23WGG548Y6no,4476
|
|
148
148
|
universal_mcp/applications/linkedin/__init__.py,sha256=Yj-713vb4ZYykIlXlwOkKkIXIOB3opCW8wvp_CCqlKk,29
|
|
149
|
-
universal_mcp/applications/linkedin/app.py,sha256=
|
|
149
|
+
universal_mcp/applications/linkedin/app.py,sha256=8pstuCQ88n2QehDAd_FfBaYxlDLF4jbnaGVAr09a8Dc,31988
|
|
150
150
|
universal_mcp/applications/mailchimp/README.md,sha256=xOR32HA8h-WMS9ntcBxyllM3UOBYiyvZ6tJBHlAuU7k,33802
|
|
151
151
|
universal_mcp/applications/mailchimp/__init__.py,sha256=wmXVl-NJyTNkFT5db29OZmeiLWAGu9jXwdZC5o2jZBw,30
|
|
152
152
|
universal_mcp/applications/mailchimp/app.py,sha256=_a6iByjDK1SuM3UoT5lTokptdEryUzrS8JsYNLCTwi4,466723
|
|
@@ -197,7 +197,7 @@ universal_mcp/applications/rocketlane/__init__.py,sha256=jl3PjnTvPdjnbFXJgLywSlE
|
|
|
197
197
|
universal_mcp/applications/rocketlane/app.py,sha256=Ae2hQFI5PylCLtNPJkTqWMLGsLx5fDd4wRFDhxTzTXQ,240689
|
|
198
198
|
universal_mcp/applications/scraper/README.md,sha256=JUNLshHABs4T1f24nvQeee62YIElSkxpU-zs2kuS0Gw,1497
|
|
199
199
|
universal_mcp/applications/scraper/__init__.py,sha256=W5Buzq8QbetUQm5m9xXCHeWcvVObU2vZ4xbvYtZImJo,28
|
|
200
|
-
universal_mcp/applications/scraper/app.py,sha256=
|
|
200
|
+
universal_mcp/applications/scraper/app.py,sha256=YKWdxvzWSM9J-zbor0Ty66BfzLQEbdpHybmbgC8Hwhc,16435
|
|
201
201
|
universal_mcp/applications/semanticscholar/README.md,sha256=JpLY_698pvstgoNfQ5Go8C8ehQ-o68uFDX5kr86upK0,2834
|
|
202
202
|
universal_mcp/applications/semanticscholar/__init__.py,sha256=eR36chrc0pbBsSE1GadvmQH0OmtKnSC91xbE7HcDPf0,36
|
|
203
203
|
universal_mcp/applications/semanticscholar/app.py,sha256=OHTFkR-IwRU5Rvb1bEu7XmRHikht3hEgZxszLQu6kFI,22234
|
|
@@ -224,7 +224,7 @@ universal_mcp/applications/shortcut/__init__.py,sha256=05TfFUEWxlnh3TmeIdCks1dHb
|
|
|
224
224
|
universal_mcp/applications/shortcut/app.py,sha256=X-MBMc2mgQgJKKPVbuB9CC5lMpaEm30PSg-rNGwhI2E,172078
|
|
225
225
|
universal_mcp/applications/slack/README.md,sha256=FovYD_aU_NQ6Bj3BPbCk03urDv3UsPu_-7lyeJU4R8I,3634
|
|
226
226
|
universal_mcp/applications/slack/__init__.py,sha256=ch8yJiFaTzKNt0zG6G4n_BpscOFEt1DUrPtXL1K9oIE,26
|
|
227
|
-
universal_mcp/applications/slack/app.py,sha256=
|
|
227
|
+
universal_mcp/applications/slack/app.py,sha256=LzAWVwbyFzvozfD6HeGTw8M7q3uoLNmxGhqM-Va-SlY,28494
|
|
228
228
|
universal_mcp/applications/spotify/README.md,sha256=pbQbCxPE7vFMQqFzy5kjYLLyJ78KcwZluUHitNC1jAs,9362
|
|
229
229
|
universal_mcp/applications/spotify/__init__.py,sha256=CFdDEEPuXWvfXpwPQxeQ9KN0bTLcnCF6FsA93he8XqA,28
|
|
230
230
|
universal_mcp/applications/spotify/app.py,sha256=BsfOYP87rmJ0Jjjlg9p4z0lKQRPAwsvzWnwMPirJ-Zg,105456
|
|
@@ -276,7 +276,7 @@ universal_mcp/applications/youtube/app.py,sha256=eqgqe0b53W9Mj0FZGW3ZqY3xkGF4NbO
|
|
|
276
276
|
universal_mcp/applications/zenquotes/README.md,sha256=FJyoTGRCaZjF_bsCBqg1CrYcvIfuUG_Qk616G1wjhF8,512
|
|
277
277
|
universal_mcp/applications/zenquotes/__init__.py,sha256=C5nEHZ3Xy6nYUarq0BqQbbJnHs0UtSlqhk0DqmvWiHk,58
|
|
278
278
|
universal_mcp/applications/zenquotes/app.py,sha256=7xIEnSZWAGYu5583Be2ZjSCtLUAfMWRzucSpp7hw_h4,1299
|
|
279
|
-
universal_mcp_applications-0.1.
|
|
280
|
-
universal_mcp_applications-0.1.
|
|
281
|
-
universal_mcp_applications-0.1.
|
|
282
|
-
universal_mcp_applications-0.1.
|
|
279
|
+
universal_mcp_applications-0.1.32.dist-info/METADATA,sha256=5y-qE3OI4j5463JmNBk5ClM1sYIzwvBbVVG9YVvWWio,2956
|
|
280
|
+
universal_mcp_applications-0.1.32.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
281
|
+
universal_mcp_applications-0.1.32.dist-info/licenses/LICENSE,sha256=NweDZVPslBAZFzlgByF158b85GR0f5_tLQgq1NS48To,1063
|
|
282
|
+
universal_mcp_applications-0.1.32.dist-info/RECORD,,
|
{universal_mcp_applications-0.1.30rc2.dist-info → universal_mcp_applications-0.1.32.dist-info}/WHEEL
RENAMED
|
File without changes
|
|
File without changes
|