universal-mcp-applications 0.1.22__py3-none-any.whl → 0.1.39rc8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of universal-mcp-applications might be problematic. Click here for more details.
- universal_mcp/applications/ahrefs/app.py +92 -238
- universal_mcp/applications/airtable/app.py +23 -122
- universal_mcp/applications/apollo/app.py +122 -475
- universal_mcp/applications/asana/app.py +605 -1755
- universal_mcp/applications/aws_s3/app.py +36 -103
- universal_mcp/applications/bill/app.py +644 -2055
- universal_mcp/applications/box/app.py +1246 -4159
- universal_mcp/applications/braze/app.py +410 -1476
- universal_mcp/applications/browser_use/README.md +15 -1
- universal_mcp/applications/browser_use/__init__.py +1 -0
- universal_mcp/applications/browser_use/app.py +94 -37
- universal_mcp/applications/cal_com_v2/app.py +207 -625
- universal_mcp/applications/calendly/app.py +103 -242
- universal_mcp/applications/canva/app.py +75 -140
- universal_mcp/applications/clickup/app.py +331 -798
- universal_mcp/applications/coda/app.py +240 -520
- universal_mcp/applications/confluence/app.py +497 -1285
- universal_mcp/applications/contentful/app.py +36 -151
- universal_mcp/applications/crustdata/app.py +42 -121
- universal_mcp/applications/dialpad/app.py +451 -924
- universal_mcp/applications/digitalocean/app.py +2071 -6082
- universal_mcp/applications/domain_checker/app.py +3 -54
- universal_mcp/applications/e2b/app.py +14 -64
- universal_mcp/applications/elevenlabs/app.py +9 -47
- universal_mcp/applications/exa/README.md +8 -4
- universal_mcp/applications/exa/app.py +408 -186
- universal_mcp/applications/falai/app.py +24 -101
- universal_mcp/applications/figma/app.py +91 -175
- universal_mcp/applications/file_system/app.py +2 -13
- universal_mcp/applications/firecrawl/app.py +186 -163
- universal_mcp/applications/fireflies/app.py +59 -281
- universal_mcp/applications/fpl/app.py +92 -529
- universal_mcp/applications/fpl/utils/fixtures.py +15 -49
- universal_mcp/applications/fpl/utils/helper.py +25 -89
- universal_mcp/applications/fpl/utils/league_utils.py +20 -64
- universal_mcp/applications/ghost_content/app.py +66 -175
- universal_mcp/applications/github/app.py +28 -65
- universal_mcp/applications/gong/app.py +140 -300
- universal_mcp/applications/google_calendar/app.py +26 -78
- universal_mcp/applications/google_docs/app.py +324 -354
- universal_mcp/applications/google_drive/app.py +194 -793
- universal_mcp/applications/google_gemini/app.py +29 -64
- universal_mcp/applications/google_mail/README.md +1 -0
- universal_mcp/applications/google_mail/app.py +93 -214
- universal_mcp/applications/google_searchconsole/app.py +25 -58
- universal_mcp/applications/google_sheet/app.py +174 -623
- universal_mcp/applications/google_sheet/helper.py +26 -53
- universal_mcp/applications/hashnode/app.py +57 -269
- universal_mcp/applications/heygen/app.py +77 -155
- universal_mcp/applications/http_tools/app.py +10 -32
- universal_mcp/applications/hubspot/README.md +1 -1
- universal_mcp/applications/hubspot/app.py +7508 -99
- universal_mcp/applications/jira/app.py +2419 -8334
- universal_mcp/applications/klaviyo/app.py +737 -1619
- universal_mcp/applications/linkedin/README.md +23 -4
- universal_mcp/applications/linkedin/app.py +861 -155
- universal_mcp/applications/mailchimp/app.py +696 -1851
- universal_mcp/applications/markitdown/app.py +8 -20
- universal_mcp/applications/miro/app.py +333 -815
- universal_mcp/applications/ms_teams/app.py +85 -207
- universal_mcp/applications/neon/app.py +144 -250
- universal_mcp/applications/notion/app.py +36 -51
- universal_mcp/applications/onedrive/README.md +24 -0
- universal_mcp/applications/onedrive/__init__.py +1 -0
- universal_mcp/applications/onedrive/app.py +316 -0
- universal_mcp/applications/openai/app.py +42 -165
- universal_mcp/applications/outlook/README.md +22 -9
- universal_mcp/applications/outlook/app.py +606 -262
- universal_mcp/applications/perplexity/README.md +2 -1
- universal_mcp/applications/perplexity/app.py +162 -20
- universal_mcp/applications/pipedrive/app.py +1021 -3331
- universal_mcp/applications/posthog/app.py +272 -541
- universal_mcp/applications/reddit/app.py +88 -204
- universal_mcp/applications/resend/app.py +41 -107
- universal_mcp/applications/retell/app.py +23 -50
- universal_mcp/applications/rocketlane/app.py +250 -963
- universal_mcp/applications/scraper/README.md +7 -4
- universal_mcp/applications/scraper/app.py +245 -283
- universal_mcp/applications/semanticscholar/app.py +36 -78
- universal_mcp/applications/semrush/app.py +43 -77
- universal_mcp/applications/sendgrid/app.py +826 -1576
- universal_mcp/applications/sentry/app.py +444 -1079
- universal_mcp/applications/serpapi/app.py +40 -143
- universal_mcp/applications/sharepoint/README.md +16 -14
- universal_mcp/applications/sharepoint/app.py +245 -154
- universal_mcp/applications/shopify/app.py +1743 -4479
- universal_mcp/applications/shortcut/app.py +272 -534
- universal_mcp/applications/slack/app.py +58 -109
- universal_mcp/applications/spotify/app.py +206 -405
- universal_mcp/applications/supabase/app.py +174 -283
- universal_mcp/applications/tavily/app.py +2 -2
- universal_mcp/applications/trello/app.py +853 -2816
- universal_mcp/applications/twilio/app.py +14 -50
- universal_mcp/applications/twitter/api_segments/compliance_api.py +4 -14
- universal_mcp/applications/twitter/api_segments/dm_conversations_api.py +6 -18
- universal_mcp/applications/twitter/api_segments/likes_api.py +1 -3
- universal_mcp/applications/twitter/api_segments/lists_api.py +5 -15
- universal_mcp/applications/twitter/api_segments/trends_api.py +1 -3
- universal_mcp/applications/twitter/api_segments/tweets_api.py +9 -31
- universal_mcp/applications/twitter/api_segments/usage_api.py +1 -5
- universal_mcp/applications/twitter/api_segments/users_api.py +14 -42
- universal_mcp/applications/whatsapp/app.py +35 -186
- universal_mcp/applications/whatsapp/audio.py +2 -6
- universal_mcp/applications/whatsapp/whatsapp.py +17 -51
- universal_mcp/applications/whatsapp_business/app.py +86 -299
- universal_mcp/applications/wrike/app.py +80 -153
- universal_mcp/applications/yahoo_finance/app.py +19 -65
- universal_mcp/applications/youtube/app.py +120 -306
- universal_mcp/applications/zenquotes/app.py +4 -4
- {universal_mcp_applications-0.1.22.dist-info → universal_mcp_applications-0.1.39rc8.dist-info}/METADATA +4 -2
- {universal_mcp_applications-0.1.22.dist-info → universal_mcp_applications-0.1.39rc8.dist-info}/RECORD +113 -117
- {universal_mcp_applications-0.1.22.dist-info → universal_mcp_applications-0.1.39rc8.dist-info}/WHEEL +1 -1
- universal_mcp/applications/hubspot/api_segments/__init__.py +0 -0
- universal_mcp/applications/hubspot/api_segments/api_segment_base.py +0 -54
- universal_mcp/applications/hubspot/api_segments/crm_api.py +0 -7337
- universal_mcp/applications/hubspot/api_segments/marketing_api.py +0 -1467
- universal_mcp/applications/unipile/README.md +0 -28
- universal_mcp/applications/unipile/__init__.py +0 -1
- universal_mcp/applications/unipile/app.py +0 -1077
- {universal_mcp_applications-0.1.22.dist-info → universal_mcp_applications-0.1.39rc8.dist-info}/licenses/LICENSE +0 -0
|
@@ -5,7 +5,7 @@ Helper functions for Google Sheets table detection and analysis.
|
|
|
5
5
|
from typing import Any
|
|
6
6
|
|
|
7
7
|
|
|
8
|
-
def analyze_sheet_for_tables(
|
|
8
|
+
async def analyze_sheet_for_tables(
|
|
9
9
|
get_values_func,
|
|
10
10
|
spreadsheet_id: str,
|
|
11
11
|
sheet_id: int,
|
|
@@ -20,17 +20,17 @@ def analyze_sheet_for_tables(
|
|
|
20
20
|
try:
|
|
21
21
|
# Get sample data from the sheet (first 100 rows)
|
|
22
22
|
sample_range = f"{sheet_title}!A1:Z100"
|
|
23
|
-
sample_data = get_values_func(spreadsheetId=spreadsheet_id, range=sample_range)
|
|
23
|
+
sample_data = await get_values_func(spreadsheetId=spreadsheet_id, range=sample_range)
|
|
24
24
|
|
|
25
25
|
values = sample_data.get("values", [])
|
|
26
26
|
if not values or len(values) < min_rows:
|
|
27
27
|
return tables
|
|
28
28
|
|
|
29
29
|
# Find potential table regions
|
|
30
|
-
table_regions = find_table_regions(values, min_rows, min_columns)
|
|
30
|
+
table_regions = await find_table_regions(values, min_rows, min_columns)
|
|
31
31
|
|
|
32
32
|
for i, region in enumerate(table_regions):
|
|
33
|
-
confidence = calculate_table_confidence(values, region)
|
|
33
|
+
confidence = await calculate_table_confidence(values, region)
|
|
34
34
|
|
|
35
35
|
if confidence >= min_confidence:
|
|
36
36
|
table_info = {
|
|
@@ -45,7 +45,7 @@ def analyze_sheet_for_tables(
|
|
|
45
45
|
"rows": region["end_row"] - region["start_row"] + 1,
|
|
46
46
|
"columns": region["end_column"] - region["start_column"] + 1,
|
|
47
47
|
"confidence": confidence,
|
|
48
|
-
"range": f"{sheet_title}!{get_column_letter(region['start_column'])}{region['start_row'] + 1}:{get_column_letter(region['end_column'])}{region['end_row'] + 1}",
|
|
48
|
+
"range": f"{sheet_title}!{await get_column_letter(region['start_column'])}{region['start_row'] + 1}:{await get_column_letter(region['end_column'])}{region['end_row'] + 1}",
|
|
49
49
|
}
|
|
50
50
|
tables.append(table_info)
|
|
51
51
|
|
|
@@ -56,9 +56,7 @@ def analyze_sheet_for_tables(
|
|
|
56
56
|
return tables
|
|
57
57
|
|
|
58
58
|
|
|
59
|
-
def analyze_table_schema(
|
|
60
|
-
get_values_func, spreadsheet_id: str, table_info: dict, sample_size: int = 50
|
|
61
|
-
) -> dict[str, Any]:
|
|
59
|
+
async def analyze_table_schema(get_values_func, spreadsheet_id: str, table_info: dict, sample_size: int = 50) -> dict[str, Any]:
|
|
62
60
|
"""
|
|
63
61
|
Analyze table structure and infer column names, types, and constraints.
|
|
64
62
|
|
|
@@ -74,7 +72,7 @@ def analyze_table_schema(
|
|
|
74
72
|
try:
|
|
75
73
|
# Get sample data from the table
|
|
76
74
|
sample_range = table_info["range"]
|
|
77
|
-
sample_data = get_values_func(spreadsheetId=spreadsheet_id, range=sample_range)
|
|
75
|
+
sample_data = await get_values_func(spreadsheetId=spreadsheet_id, range=sample_range)
|
|
78
76
|
|
|
79
77
|
values = sample_data.get("values", [])
|
|
80
78
|
if not values:
|
|
@@ -85,7 +83,7 @@ def analyze_table_schema(
|
|
|
85
83
|
sample_values = values[:actual_sample_size]
|
|
86
84
|
|
|
87
85
|
# Analyze column structure
|
|
88
|
-
columns = analyze_columns(sample_values)
|
|
86
|
+
columns = await analyze_columns(sample_values)
|
|
89
87
|
|
|
90
88
|
return {
|
|
91
89
|
"spreadsheet_id": spreadsheet_id,
|
|
@@ -103,7 +101,7 @@ def analyze_table_schema(
|
|
|
103
101
|
raise ValueError(f"Failed to analyze table schema: {str(e)}")
|
|
104
102
|
|
|
105
103
|
|
|
106
|
-
def analyze_columns(sample_values: list[list[Any]]) -> list[dict]:
|
|
104
|
+
async def analyze_columns(sample_values: list[list[Any]]) -> list[dict]:
|
|
107
105
|
"""Analyze column structure and infer types."""
|
|
108
106
|
if not sample_values:
|
|
109
107
|
return []
|
|
@@ -115,9 +113,7 @@ def analyze_columns(sample_values: list[list[Any]]) -> list[dict]:
|
|
|
115
113
|
columns = []
|
|
116
114
|
|
|
117
115
|
for col_idx in range(len(headers)):
|
|
118
|
-
column_name = (
|
|
119
|
-
str(headers[col_idx]) if col_idx < len(headers) else f"Column_{col_idx + 1}"
|
|
120
|
-
)
|
|
116
|
+
column_name = str(headers[col_idx]) if col_idx < len(headers) else f"Column_{col_idx + 1}"
|
|
121
117
|
|
|
122
118
|
# Extract column values
|
|
123
119
|
column_values = []
|
|
@@ -126,7 +122,7 @@ def analyze_columns(sample_values: list[list[Any]]) -> list[dict]:
|
|
|
126
122
|
column_values.append(row[col_idx])
|
|
127
123
|
|
|
128
124
|
# Analyze column type
|
|
129
|
-
column_type, constraints = infer_column_type(column_values)
|
|
125
|
+
column_type, constraints = await infer_column_type(column_values)
|
|
130
126
|
|
|
131
127
|
column_info = {
|
|
132
128
|
"name": column_name,
|
|
@@ -134,12 +130,8 @@ def analyze_columns(sample_values: list[list[Any]]) -> list[dict]:
|
|
|
134
130
|
"type": column_type,
|
|
135
131
|
"constraints": constraints,
|
|
136
132
|
"sample_values": column_values[:5], # First 5 sample values
|
|
137
|
-
"null_count": sum(
|
|
138
|
-
|
|
139
|
-
),
|
|
140
|
-
"unique_count": len(
|
|
141
|
-
set(str(val) for val in column_values if val and str(val).strip())
|
|
142
|
-
),
|
|
133
|
+
"null_count": sum(1 for val in column_values if not val or str(val).strip() == ""),
|
|
134
|
+
"unique_count": len(set(str(val) for val in column_values if val and str(val).strip())),
|
|
143
135
|
}
|
|
144
136
|
|
|
145
137
|
columns.append(column_info)
|
|
@@ -147,7 +139,7 @@ def analyze_columns(sample_values: list[list[Any]]) -> list[dict]:
|
|
|
147
139
|
return columns
|
|
148
140
|
|
|
149
141
|
|
|
150
|
-
def infer_column_type(values: list[Any]) -> tuple[str, dict]:
|
|
142
|
+
async def infer_column_type(values: list[Any]) -> tuple[str, dict]:
|
|
151
143
|
"""Infer the most likely data type for a column."""
|
|
152
144
|
if not values:
|
|
153
145
|
return "TEXT", {}
|
|
@@ -159,11 +151,7 @@ def infer_column_type(values: list[Any]) -> tuple[str, dict]:
|
|
|
159
151
|
return "TEXT", {}
|
|
160
152
|
|
|
161
153
|
# Check for boolean values
|
|
162
|
-
boolean_count = sum(
|
|
163
|
-
1
|
|
164
|
-
for val in non_empty_values
|
|
165
|
-
if str(val).lower() in ["true", "false", "yes", "no", "1", "0"]
|
|
166
|
-
)
|
|
154
|
+
boolean_count = sum(1 for val in non_empty_values if str(val).lower() in ["true", "false", "yes", "no", "1", "0"])
|
|
167
155
|
if boolean_count / len(non_empty_values) >= 0.8:
|
|
168
156
|
return "BOOLEAN", {}
|
|
169
157
|
|
|
@@ -215,9 +203,7 @@ def infer_column_type(values: list[Any]) -> tuple[str, dict]:
|
|
|
215
203
|
return "TEXT", {}
|
|
216
204
|
|
|
217
205
|
|
|
218
|
-
def find_table_regions(
|
|
219
|
-
values: list[list], min_rows: int, min_columns: int
|
|
220
|
-
) -> list[dict]:
|
|
206
|
+
async def find_table_regions(values: list[list], min_rows: int, min_columns: int) -> list[dict]:
|
|
221
207
|
"""Find potential table regions in the data."""
|
|
222
208
|
regions = []
|
|
223
209
|
|
|
@@ -268,7 +254,7 @@ def find_table_regions(
|
|
|
268
254
|
return regions
|
|
269
255
|
|
|
270
256
|
|
|
271
|
-
def calculate_table_confidence(values: list[list], region: dict) -> float:
|
|
257
|
+
async def calculate_table_confidence(values: list[list], region: dict) -> float:
|
|
272
258
|
"""Calculate confidence score for a potential table region."""
|
|
273
259
|
if not values:
|
|
274
260
|
return 0.0
|
|
@@ -290,9 +276,7 @@ def calculate_table_confidence(values: list[list], region: dict) -> float:
|
|
|
290
276
|
|
|
291
277
|
# Calculate confidence based on data consistency
|
|
292
278
|
total_cells = sum(len(row) for row in region_data)
|
|
293
|
-
non_empty_cells = sum(
|
|
294
|
-
sum(1 for cell in row if cell and str(cell).strip()) for row in region_data
|
|
295
|
-
)
|
|
279
|
+
non_empty_cells = sum(sum(1 for cell in row if cell and str(cell).strip()) for row in region_data)
|
|
296
280
|
|
|
297
281
|
if total_cells == 0:
|
|
298
282
|
return 0.0
|
|
@@ -301,8 +285,8 @@ def calculate_table_confidence(values: list[list], region: dict) -> float:
|
|
|
301
285
|
data_density = non_empty_cells / total_cells
|
|
302
286
|
|
|
303
287
|
# Additional factors
|
|
304
|
-
has_headers = has_header_row(region_data)
|
|
305
|
-
consistent_columns = has_consistent_columns(region_data)
|
|
288
|
+
has_headers = await has_header_row(region_data)
|
|
289
|
+
consistent_columns = await has_consistent_columns(region_data)
|
|
306
290
|
|
|
307
291
|
confidence = data_density * 0.6 # 60% weight to data density
|
|
308
292
|
|
|
@@ -315,7 +299,7 @@ def calculate_table_confidence(values: list[list], region: dict) -> float:
|
|
|
315
299
|
return min(confidence, 1.0)
|
|
316
300
|
|
|
317
301
|
|
|
318
|
-
def has_header_row(data: list[list]) -> bool:
|
|
302
|
+
async def has_header_row(data: list[list]) -> bool:
|
|
319
303
|
"""Check if the first row looks like a header."""
|
|
320
304
|
if not data or len(data) < 2:
|
|
321
305
|
return False
|
|
@@ -328,11 +312,7 @@ def has_header_row(data: list[list]) -> bool:
|
|
|
328
312
|
|
|
329
313
|
# Check if header row has mostly text values
|
|
330
314
|
header_text_count = sum(
|
|
331
|
-
1
|
|
332
|
-
for cell in header_row
|
|
333
|
-
if cell
|
|
334
|
-
and isinstance(cell, str)
|
|
335
|
-
and not cell.replace(".", "").replace("-", "").isdigit()
|
|
315
|
+
1 for cell in header_row if cell and isinstance(cell, str) and not cell.replace(".", "").replace("-", "").isdigit()
|
|
336
316
|
)
|
|
337
317
|
|
|
338
318
|
# Check if data rows have different data types than header
|
|
@@ -345,7 +325,7 @@ def has_header_row(data: list[list]) -> bool:
|
|
|
345
325
|
return header_text_count > len(header_row) * 0.5 and data_numeric_count > 0
|
|
346
326
|
|
|
347
327
|
|
|
348
|
-
def has_consistent_columns(data: list[list]) -> bool:
|
|
328
|
+
async def has_consistent_columns(data: list[list]) -> bool:
|
|
349
329
|
"""Check if columns have consistent data types."""
|
|
350
330
|
if not data or len(data) < 2:
|
|
351
331
|
return False
|
|
@@ -358,24 +338,17 @@ def has_consistent_columns(data: list[list]) -> bool:
|
|
|
358
338
|
column_values = [row[col] for row in data if col < len(row) and row[col]]
|
|
359
339
|
if len(column_values) >= 2:
|
|
360
340
|
# Check if column has consistent type
|
|
361
|
-
numeric_count = sum(
|
|
362
|
-
1
|
|
363
|
-
for val in column_values
|
|
364
|
-
if str(val).replace(".", "").replace("-", "").isdigit()
|
|
365
|
-
)
|
|
341
|
+
numeric_count = sum(1 for val in column_values if str(val).replace(".", "").replace("-", "").isdigit())
|
|
366
342
|
text_count = len(column_values) - numeric_count
|
|
367
343
|
|
|
368
344
|
# If 80% of values are same type, consider consistent
|
|
369
|
-
if (
|
|
370
|
-
numeric_count / len(column_values) >= 0.8
|
|
371
|
-
or text_count / len(column_values) >= 0.8
|
|
372
|
-
):
|
|
345
|
+
if numeric_count / len(column_values) >= 0.8 or text_count / len(column_values) >= 0.8:
|
|
373
346
|
consistent_columns += 1
|
|
374
347
|
|
|
375
348
|
return consistent_columns / total_columns >= 0.6 if total_columns > 0 else False
|
|
376
349
|
|
|
377
350
|
|
|
378
|
-
def get_column_letter(column_index: int) -> str:
|
|
351
|
+
async def get_column_letter(column_index: int) -> str:
|
|
379
352
|
"""Convert column index to A1 notation letter."""
|
|
380
353
|
result = ""
|
|
381
354
|
while column_index >= 0:
|