universal-mcp-applications 0.1.1__py3-none-any.whl → 0.1.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- universal_mcp/applications/airtable/app.py +1 -0
- universal_mcp/applications/apollo/app.py +1 -0
- universal_mcp/applications/{aws-s3 → aws_s3}/app.py +4 -5
- universal_mcp/applications/bill/app.py +3 -3
- universal_mcp/applications/box/app.py +2 -6
- universal_mcp/applications/braze/app.py +2 -6
- universal_mcp/applications/cal_com_v2/__init__.py +1 -0
- universal_mcp/applications/{cal-com-v2 → cal_com_v2}/app.py +138 -182
- universal_mcp/applications/clickup/app.py +2 -2
- universal_mcp/applications/confluence/app.py +1 -0
- universal_mcp/applications/contentful/app.py +8 -19
- universal_mcp/applications/digitalocean/app.py +9 -27
- universal_mcp/applications/{domain-checker → domain_checker}/app.py +2 -1
- universal_mcp/applications/elevenlabs/app.py +98 -3188
- universal_mcp/applications/falai/app.py +1 -0
- universal_mcp/applications/file_system/__init__.py +1 -0
- universal_mcp/applications/file_system/app.py +96 -0
- universal_mcp/applications/fireflies/app.py +4 -3
- universal_mcp/applications/fpl/app.py +1 -0
- universal_mcp/applications/fpl/utils/fixtures.py +1 -1
- universal_mcp/applications/fpl/utils/helper.py +1 -1
- universal_mcp/applications/fpl/utils/position_utils.py +0 -1
- universal_mcp/applications/{ghost-content → ghost_content}/app.py +2 -1
- universal_mcp/applications/github/app.py +4 -3
- universal_mcp/applications/{google-calendar → google_calendar}/app.py +2 -1
- universal_mcp/applications/{google-docs → google_docs}/app.py +1 -1
- universal_mcp/applications/{google-drive → google_drive}/app.py +2 -1
- universal_mcp/applications/google_gemini/app.py +183 -0
- universal_mcp/applications/{google-mail → google_mail}/app.py +2 -1
- universal_mcp/applications/{google-searchconsole → google_searchconsole}/app.py +1 -1
- universal_mcp/applications/{google-sheet → google_sheet}/app.py +3 -2
- universal_mcp/applications/google_sheet/helper.py +385 -0
- universal_mcp/applications/hashnode/app.py +2 -1
- universal_mcp/applications/{http-tools → http_tools}/app.py +2 -1
- universal_mcp/applications/hubspot/app.py +16 -2
- universal_mcp/applications/jira/app.py +7 -18
- universal_mcp/applications/markitdown/app.py +2 -3
- universal_mcp/applications/{ms-teams → ms_teams}/app.py +1 -1
- universal_mcp/applications/openai/app.py +2 -3
- universal_mcp/applications/outlook/app.py +1 -3
- universal_mcp/applications/pipedrive/app.py +2 -6
- universal_mcp/applications/reddit/app.py +1 -0
- universal_mcp/applications/replicate/app.py +3 -3
- universal_mcp/applications/resend/app.py +1 -2
- universal_mcp/applications/rocketlane/app.py +1 -0
- universal_mcp/applications/semrush/app.py +478 -1467
- universal_mcp/applications/sentry/README.md +20 -20
- universal_mcp/applications/sentry/app.py +40 -40
- universal_mcp/applications/serpapi/app.py +2 -2
- universal_mcp/applications/sharepoint/app.py +2 -1
- universal_mcp/applications/shopify/app.py +1 -0
- universal_mcp/applications/slack/app.py +3 -3
- universal_mcp/applications/trello/app.py +9 -27
- universal_mcp/applications/twilio/__init__.py +1 -0
- universal_mcp/applications/{twillo → twilio}/app.py +2 -2
- universal_mcp/applications/twitter/README.md +1 -1
- universal_mcp/applications/twitter/api_segments/dm_conversations_api.py +2 -2
- universal_mcp/applications/twitter/api_segments/lists_api.py +1 -1
- universal_mcp/applications/unipile/app.py +5 -1
- universal_mcp/applications/whatsapp/app.py +18 -17
- universal_mcp/applications/whatsapp/audio.py +110 -0
- universal_mcp/applications/whatsapp/whatsapp.py +398 -0
- universal_mcp/applications/{whatsapp-business → whatsapp_business}/app.py +1 -1
- universal_mcp/applications/youtube/app.py +195 -191
- universal_mcp/applications/zenquotes/app.py +1 -1
- {universal_mcp_applications-0.1.1.dist-info → universal_mcp_applications-0.1.3.dist-info}/METADATA +4 -2
- {universal_mcp_applications-0.1.1.dist-info → universal_mcp_applications-0.1.3.dist-info}/RECORD +97 -95
- universal_mcp/applications/cal-com-v2/__init__.py +0 -1
- universal_mcp/applications/google-ads/__init__.py +0 -1
- universal_mcp/applications/google-ads/app.py +0 -23
- universal_mcp/applications/google-gemini/app.py +0 -663
- universal_mcp/applications/twillo/README.md +0 -0
- universal_mcp/applications/twillo/__init__.py +0 -1
- /universal_mcp/applications/{aws-s3 → aws_s3}/README.md +0 -0
- /universal_mcp/applications/{aws-s3 → aws_s3}/__init__.py +0 -0
- /universal_mcp/applications/{cal-com-v2 → cal_com_v2}/README.md +0 -0
- /universal_mcp/applications/{domain-checker → domain_checker}/README.md +0 -0
- /universal_mcp/applications/{domain-checker → domain_checker}/__init__.py +0 -0
- /universal_mcp/applications/{ghost-content → ghost_content}/README.md +0 -0
- /universal_mcp/applications/{ghost-content → ghost_content}/__init__.py +0 -0
- /universal_mcp/applications/{google-calendar → google_calendar}/README.md +0 -0
- /universal_mcp/applications/{google-calendar → google_calendar}/__init__.py +0 -0
- /universal_mcp/applications/{google-docs → google_docs}/README.md +0 -0
- /universal_mcp/applications/{google-docs → google_docs}/__init__.py +0 -0
- /universal_mcp/applications/{google-drive → google_drive}/README.md +0 -0
- /universal_mcp/applications/{google-drive → google_drive}/__init__.py +0 -0
- /universal_mcp/applications/{google-gemini → google_gemini}/README.md +0 -0
- /universal_mcp/applications/{google-gemini → google_gemini}/__init__.py +0 -0
- /universal_mcp/applications/{google-mail → google_mail}/README.md +0 -0
- /universal_mcp/applications/{google-mail → google_mail}/__init__.py +0 -0
- /universal_mcp/applications/{google-searchconsole → google_searchconsole}/README.md +0 -0
- /universal_mcp/applications/{google-searchconsole → google_searchconsole}/__init__.py +0 -0
- /universal_mcp/applications/{google-sheet → google_sheet}/README.md +0 -0
- /universal_mcp/applications/{google-sheet → google_sheet}/__init__.py +0 -0
- /universal_mcp/applications/{http-tools → http_tools}/README.md +0 -0
- /universal_mcp/applications/{http-tools → http_tools}/__init__.py +0 -0
- /universal_mcp/applications/{ms-teams → ms_teams}/README.md +0 -0
- /universal_mcp/applications/{ms-teams → ms_teams}/__init__.py +0 -0
- /universal_mcp/applications/{google-ads → twilio}/README.md +0 -0
- /universal_mcp/applications/{whatsapp-business → whatsapp_business}/README.md +0 -0
- /universal_mcp/applications/{whatsapp-business → whatsapp_business}/__init__.py +0 -0
- {universal_mcp_applications-0.1.1.dist-info → universal_mcp_applications-0.1.3.dist-info}/WHEEL +0 -0
- {universal_mcp_applications-0.1.1.dist-info → universal_mcp_applications-0.1.3.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,385 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Helper functions for Google Sheets table detection and analysis.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from typing import Any
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def analyze_sheet_for_tables(
|
|
9
|
+
get_values_func,
|
|
10
|
+
spreadsheet_id: str,
|
|
11
|
+
sheet_id: int,
|
|
12
|
+
sheet_title: str,
|
|
13
|
+
min_rows: int,
|
|
14
|
+
min_columns: int,
|
|
15
|
+
min_confidence: float,
|
|
16
|
+
) -> list[dict]:
|
|
17
|
+
"""Analyze a sheet to find potential tables."""
|
|
18
|
+
tables = []
|
|
19
|
+
|
|
20
|
+
try:
|
|
21
|
+
# Get sample data from the sheet (first 100 rows)
|
|
22
|
+
sample_range = f"{sheet_title}!A1:Z100"
|
|
23
|
+
sample_data = get_values_func(spreadsheetId=spreadsheet_id, range=sample_range)
|
|
24
|
+
|
|
25
|
+
values = sample_data.get("values", [])
|
|
26
|
+
if not values or len(values) < min_rows:
|
|
27
|
+
return tables
|
|
28
|
+
|
|
29
|
+
# Find potential table regions
|
|
30
|
+
table_regions = find_table_regions(values, min_rows, min_columns)
|
|
31
|
+
|
|
32
|
+
for i, region in enumerate(table_regions):
|
|
33
|
+
confidence = calculate_table_confidence(values, region)
|
|
34
|
+
|
|
35
|
+
if confidence >= min_confidence:
|
|
36
|
+
table_info = {
|
|
37
|
+
"table_id": f"{sheet_title}_table_{i + 1}",
|
|
38
|
+
"table_name": f"{sheet_title}_Table_{i + 1}",
|
|
39
|
+
"sheet_id": sheet_id,
|
|
40
|
+
"sheet_name": sheet_title,
|
|
41
|
+
"start_row": region["start_row"],
|
|
42
|
+
"end_row": region["end_row"],
|
|
43
|
+
"start_column": region["start_column"],
|
|
44
|
+
"end_column": region["end_column"],
|
|
45
|
+
"rows": region["end_row"] - region["start_row"] + 1,
|
|
46
|
+
"columns": region["end_column"] - region["start_column"] + 1,
|
|
47
|
+
"confidence": confidence,
|
|
48
|
+
"range": f"{sheet_title}!{get_column_letter(region['start_column'])}{region['start_row'] + 1}:{get_column_letter(region['end_column'])}{region['end_row'] + 1}",
|
|
49
|
+
}
|
|
50
|
+
tables.append(table_info)
|
|
51
|
+
|
|
52
|
+
except Exception:
|
|
53
|
+
# If analysis fails for a sheet, continue with other sheets
|
|
54
|
+
pass
|
|
55
|
+
|
|
56
|
+
return tables
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
def analyze_table_schema(
|
|
60
|
+
get_values_func, spreadsheet_id: str, table_info: dict, sample_size: int = 50
|
|
61
|
+
) -> dict[str, Any]:
|
|
62
|
+
"""
|
|
63
|
+
Analyze table structure and infer column names, types, and constraints.
|
|
64
|
+
|
|
65
|
+
Args:
|
|
66
|
+
get_values_func: Function to get values from spreadsheet
|
|
67
|
+
spreadsheet_id: The spreadsheet ID
|
|
68
|
+
table_info: Dictionary containing table information from list_tables
|
|
69
|
+
sample_size: Number of rows to sample for type inference
|
|
70
|
+
|
|
71
|
+
Returns:
|
|
72
|
+
Dictionary containing the table schema with column analysis
|
|
73
|
+
"""
|
|
74
|
+
try:
|
|
75
|
+
# Get sample data from the table
|
|
76
|
+
sample_range = table_info["range"]
|
|
77
|
+
sample_data = get_values_func(spreadsheetId=spreadsheet_id, range=sample_range)
|
|
78
|
+
|
|
79
|
+
values = sample_data.get("values", [])
|
|
80
|
+
if not values:
|
|
81
|
+
raise ValueError("No data found in the specified table")
|
|
82
|
+
|
|
83
|
+
# Limit sample size to available data
|
|
84
|
+
actual_sample_size = min(sample_size, len(values))
|
|
85
|
+
sample_values = values[:actual_sample_size]
|
|
86
|
+
|
|
87
|
+
# Analyze column structure
|
|
88
|
+
columns = analyze_columns(sample_values)
|
|
89
|
+
|
|
90
|
+
return {
|
|
91
|
+
"spreadsheet_id": spreadsheet_id,
|
|
92
|
+
"table_name": table_info["table_name"],
|
|
93
|
+
"sheet_name": table_info["sheet_name"],
|
|
94
|
+
"table_range": table_info["range"],
|
|
95
|
+
"total_rows": table_info["rows"],
|
|
96
|
+
"total_columns": table_info["columns"],
|
|
97
|
+
"sample_size": actual_sample_size,
|
|
98
|
+
"columns": columns,
|
|
99
|
+
"schema_version": "1.0",
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
except Exception as e:
|
|
103
|
+
raise ValueError(f"Failed to analyze table schema: {str(e)}")
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
def analyze_columns(sample_values: list[list[Any]]) -> list[dict]:
|
|
107
|
+
"""Analyze column structure and infer types."""
|
|
108
|
+
if not sample_values:
|
|
109
|
+
return []
|
|
110
|
+
|
|
111
|
+
# Get headers (first row)
|
|
112
|
+
headers = sample_values[0] if sample_values else []
|
|
113
|
+
data_rows = sample_values[1:] if len(sample_values) > 1 else []
|
|
114
|
+
|
|
115
|
+
columns = []
|
|
116
|
+
|
|
117
|
+
for col_idx in range(len(headers)):
|
|
118
|
+
column_name = (
|
|
119
|
+
str(headers[col_idx]) if col_idx < len(headers) else f"Column_{col_idx + 1}"
|
|
120
|
+
)
|
|
121
|
+
|
|
122
|
+
# Extract column values
|
|
123
|
+
column_values = []
|
|
124
|
+
for row in data_rows:
|
|
125
|
+
if col_idx < len(row):
|
|
126
|
+
column_values.append(row[col_idx])
|
|
127
|
+
|
|
128
|
+
# Analyze column type
|
|
129
|
+
column_type, constraints = infer_column_type(column_values)
|
|
130
|
+
|
|
131
|
+
column_info = {
|
|
132
|
+
"name": column_name,
|
|
133
|
+
"index": col_idx,
|
|
134
|
+
"type": column_type,
|
|
135
|
+
"constraints": constraints,
|
|
136
|
+
"sample_values": column_values[:5], # First 5 sample values
|
|
137
|
+
"null_count": sum(
|
|
138
|
+
1 for val in column_values if not val or str(val).strip() == ""
|
|
139
|
+
),
|
|
140
|
+
"unique_count": len(
|
|
141
|
+
set(str(val) for val in column_values if val and str(val).strip())
|
|
142
|
+
),
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
columns.append(column_info)
|
|
146
|
+
|
|
147
|
+
return columns
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
def infer_column_type(values: list[Any]) -> tuple[str, dict]:
|
|
151
|
+
"""Infer the most likely data type for a column."""
|
|
152
|
+
if not values:
|
|
153
|
+
return "TEXT", {}
|
|
154
|
+
|
|
155
|
+
# Remove empty values
|
|
156
|
+
non_empty_values = [val for val in values if val and str(val).strip()]
|
|
157
|
+
|
|
158
|
+
if not non_empty_values:
|
|
159
|
+
return "TEXT", {}
|
|
160
|
+
|
|
161
|
+
# Check for boolean values
|
|
162
|
+
boolean_count = sum(
|
|
163
|
+
1
|
|
164
|
+
for val in non_empty_values
|
|
165
|
+
if str(val).lower() in ["true", "false", "yes", "no", "1", "0"]
|
|
166
|
+
)
|
|
167
|
+
if boolean_count / len(non_empty_values) >= 0.8:
|
|
168
|
+
return "BOOLEAN", {}
|
|
169
|
+
|
|
170
|
+
# Check for numeric values
|
|
171
|
+
numeric_count = 0
|
|
172
|
+
decimal_count = 0
|
|
173
|
+
date_count = 0
|
|
174
|
+
|
|
175
|
+
for val in non_empty_values:
|
|
176
|
+
val_str = str(val)
|
|
177
|
+
|
|
178
|
+
# Check for dates (basic patterns)
|
|
179
|
+
if any(
|
|
180
|
+
pattern in val_str.lower()
|
|
181
|
+
for pattern in [
|
|
182
|
+
"/",
|
|
183
|
+
"-",
|
|
184
|
+
"jan",
|
|
185
|
+
"feb",
|
|
186
|
+
"mar",
|
|
187
|
+
"apr",
|
|
188
|
+
"may",
|
|
189
|
+
"jun",
|
|
190
|
+
"jul",
|
|
191
|
+
"aug",
|
|
192
|
+
"sep",
|
|
193
|
+
"oct",
|
|
194
|
+
"nov",
|
|
195
|
+
"dec",
|
|
196
|
+
]
|
|
197
|
+
):
|
|
198
|
+
date_count += 1
|
|
199
|
+
|
|
200
|
+
# Check for numbers
|
|
201
|
+
if val_str.replace(".", "").replace("-", "").replace(",", "").isdigit():
|
|
202
|
+
numeric_count += 1
|
|
203
|
+
if "." in val_str:
|
|
204
|
+
decimal_count += 1
|
|
205
|
+
|
|
206
|
+
# Determine type based on analysis
|
|
207
|
+
if date_count / len(non_empty_values) >= 0.6:
|
|
208
|
+
return "DATE", {}
|
|
209
|
+
elif numeric_count / len(non_empty_values) >= 0.8:
|
|
210
|
+
if decimal_count / numeric_count >= 0.3:
|
|
211
|
+
return "DECIMAL", {"precision": 2}
|
|
212
|
+
else:
|
|
213
|
+
return "INTEGER", {}
|
|
214
|
+
else:
|
|
215
|
+
return "TEXT", {}
|
|
216
|
+
|
|
217
|
+
|
|
218
|
+
def find_table_regions(
|
|
219
|
+
values: list[list], min_rows: int, min_columns: int
|
|
220
|
+
) -> list[dict]:
|
|
221
|
+
"""Find potential table regions in the data."""
|
|
222
|
+
regions = []
|
|
223
|
+
|
|
224
|
+
if not values or len(values) < min_rows:
|
|
225
|
+
return regions
|
|
226
|
+
|
|
227
|
+
rows = len(values)
|
|
228
|
+
cols = max(len(row) for row in values) if values else 0
|
|
229
|
+
|
|
230
|
+
if cols < min_columns:
|
|
231
|
+
return regions
|
|
232
|
+
|
|
233
|
+
# Simple heuristic: look for regions with consistent data
|
|
234
|
+
current_start = -1
|
|
235
|
+
|
|
236
|
+
for i in range(rows):
|
|
237
|
+
# Check if this row has enough data
|
|
238
|
+
row_data_count = sum(1 for cell in values[i] if cell and str(cell).strip())
|
|
239
|
+
|
|
240
|
+
if row_data_count >= min_columns:
|
|
241
|
+
# Continue current region
|
|
242
|
+
if current_start == -1:
|
|
243
|
+
current_start = i
|
|
244
|
+
else:
|
|
245
|
+
# End current region if it's valid
|
|
246
|
+
if current_start != -1 and i - current_start >= min_rows:
|
|
247
|
+
regions.append(
|
|
248
|
+
{
|
|
249
|
+
"start_row": current_start,
|
|
250
|
+
"end_row": i - 1,
|
|
251
|
+
"start_column": 0,
|
|
252
|
+
"end_column": cols - 1,
|
|
253
|
+
}
|
|
254
|
+
)
|
|
255
|
+
current_start = -1
|
|
256
|
+
|
|
257
|
+
# Handle region that extends to end
|
|
258
|
+
if current_start != -1 and rows - current_start >= min_rows:
|
|
259
|
+
regions.append(
|
|
260
|
+
{
|
|
261
|
+
"start_row": current_start,
|
|
262
|
+
"end_row": rows - 1,
|
|
263
|
+
"start_column": 0,
|
|
264
|
+
"end_column": cols - 1,
|
|
265
|
+
}
|
|
266
|
+
)
|
|
267
|
+
|
|
268
|
+
return regions
|
|
269
|
+
|
|
270
|
+
|
|
271
|
+
def calculate_table_confidence(values: list[list], region: dict) -> float:
|
|
272
|
+
"""Calculate confidence score for a potential table region."""
|
|
273
|
+
if not values:
|
|
274
|
+
return 0.0
|
|
275
|
+
|
|
276
|
+
start_row = region["start_row"]
|
|
277
|
+
end_row = region["end_row"]
|
|
278
|
+
start_col = region["start_column"]
|
|
279
|
+
end_col = region["end_column"]
|
|
280
|
+
|
|
281
|
+
# Extract region data
|
|
282
|
+
region_data = []
|
|
283
|
+
for i in range(start_row, min(end_row + 1, len(values))):
|
|
284
|
+
row = values[i]
|
|
285
|
+
if len(row) > start_col:
|
|
286
|
+
region_data.append(row[start_col : min(end_col + 1, len(row))])
|
|
287
|
+
|
|
288
|
+
if not region_data:
|
|
289
|
+
return 0.0
|
|
290
|
+
|
|
291
|
+
# Calculate confidence based on data consistency
|
|
292
|
+
total_cells = sum(len(row) for row in region_data)
|
|
293
|
+
non_empty_cells = sum(
|
|
294
|
+
sum(1 for cell in row if cell and str(cell).strip()) for row in region_data
|
|
295
|
+
)
|
|
296
|
+
|
|
297
|
+
if total_cells == 0:
|
|
298
|
+
return 0.0
|
|
299
|
+
|
|
300
|
+
# Base confidence on data density
|
|
301
|
+
data_density = non_empty_cells / total_cells
|
|
302
|
+
|
|
303
|
+
# Additional factors
|
|
304
|
+
has_headers = has_header_row(region_data)
|
|
305
|
+
consistent_columns = has_consistent_columns(region_data)
|
|
306
|
+
|
|
307
|
+
confidence = data_density * 0.6 # 60% weight to data density
|
|
308
|
+
|
|
309
|
+
if has_headers:
|
|
310
|
+
confidence += 0.2 # 20% bonus for headers
|
|
311
|
+
|
|
312
|
+
if consistent_columns:
|
|
313
|
+
confidence += 0.2 # 20% bonus for consistent structure
|
|
314
|
+
|
|
315
|
+
return min(confidence, 1.0)
|
|
316
|
+
|
|
317
|
+
|
|
318
|
+
def has_header_row(data: list[list]) -> bool:
|
|
319
|
+
"""Check if the first row looks like a header."""
|
|
320
|
+
if not data or len(data) < 2:
|
|
321
|
+
return False
|
|
322
|
+
|
|
323
|
+
header_row = data[0]
|
|
324
|
+
data_rows = data[1:]
|
|
325
|
+
|
|
326
|
+
if not header_row or not data_rows:
|
|
327
|
+
return False
|
|
328
|
+
|
|
329
|
+
# Check if header row has mostly text values
|
|
330
|
+
header_text_count = sum(
|
|
331
|
+
1
|
|
332
|
+
for cell in header_row
|
|
333
|
+
if cell
|
|
334
|
+
and isinstance(cell, str)
|
|
335
|
+
and not cell.replace(".", "").replace("-", "").isdigit()
|
|
336
|
+
)
|
|
337
|
+
|
|
338
|
+
# Check if data rows have different data types than header
|
|
339
|
+
data_numeric_count = 0
|
|
340
|
+
for row in data_rows[:3]: # Check first 3 data rows
|
|
341
|
+
for cell in row:
|
|
342
|
+
if cell and str(cell).replace(".", "").replace("-", "").isdigit():
|
|
343
|
+
data_numeric_count += 1
|
|
344
|
+
|
|
345
|
+
return header_text_count > len(header_row) * 0.5 and data_numeric_count > 0
|
|
346
|
+
|
|
347
|
+
|
|
348
|
+
def has_consistent_columns(data: list[list]) -> bool:
|
|
349
|
+
"""Check if columns have consistent data types."""
|
|
350
|
+
if not data or len(data) < 2:
|
|
351
|
+
return False
|
|
352
|
+
|
|
353
|
+
# Check if most columns have consistent data types
|
|
354
|
+
consistent_columns = 0
|
|
355
|
+
total_columns = max(len(row) for row in data)
|
|
356
|
+
|
|
357
|
+
for col in range(total_columns):
|
|
358
|
+
column_values = [row[col] for row in data if col < len(row) and row[col]]
|
|
359
|
+
if len(column_values) >= 2:
|
|
360
|
+
# Check if column has consistent type
|
|
361
|
+
numeric_count = sum(
|
|
362
|
+
1
|
|
363
|
+
for val in column_values
|
|
364
|
+
if str(val).replace(".", "").replace("-", "").isdigit()
|
|
365
|
+
)
|
|
366
|
+
text_count = len(column_values) - numeric_count
|
|
367
|
+
|
|
368
|
+
# If 80% of values are same type, consider consistent
|
|
369
|
+
if (
|
|
370
|
+
numeric_count / len(column_values) >= 0.8
|
|
371
|
+
or text_count / len(column_values) >= 0.8
|
|
372
|
+
):
|
|
373
|
+
consistent_columns += 1
|
|
374
|
+
|
|
375
|
+
return consistent_columns / total_columns >= 0.6 if total_columns > 0 else False
|
|
376
|
+
|
|
377
|
+
|
|
378
|
+
def get_column_letter(column_index: int) -> str:
|
|
379
|
+
"""Convert column index to A1 notation letter."""
|
|
380
|
+
result = ""
|
|
381
|
+
while column_index >= 0:
|
|
382
|
+
column_index, remainder = divmod(column_index, 26)
|
|
383
|
+
result = chr(65 + remainder) + result
|
|
384
|
+
column_index -= 1
|
|
385
|
+
return result
|
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
import httpx
|
|
2
2
|
from loguru import logger
|
|
3
|
+
|
|
3
4
|
from universal_mcp.applications.application import APIApplication
|
|
4
5
|
|
|
5
6
|
|
|
@@ -15,7 +16,7 @@ class HttpToolsApp(APIApplication):
|
|
|
15
16
|
Args:
|
|
16
17
|
**kwargs: Additional keyword arguments for the parent APIApplication.
|
|
17
18
|
"""
|
|
18
|
-
super().__init__(name="
|
|
19
|
+
super().__init__(name="http_tools", **kwargs)
|
|
19
20
|
|
|
20
21
|
def _handle_response(self, response: httpx.Response):
|
|
21
22
|
"""
|
|
@@ -3,8 +3,22 @@ from typing import Any
|
|
|
3
3
|
|
|
4
4
|
from universal_mcp.applications.application import APIApplication
|
|
5
5
|
from universal_mcp.integrations import Integration
|
|
6
|
-
|
|
7
|
-
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class CrmApi:
|
|
9
|
+
def __init__(self, api_client):
|
|
10
|
+
pass
|
|
11
|
+
|
|
12
|
+
def list_tools(self):
|
|
13
|
+
return []
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class MarketingApi:
|
|
17
|
+
def __init__(self, api_client):
|
|
18
|
+
pass
|
|
19
|
+
|
|
20
|
+
def list_tools(self):
|
|
21
|
+
return []
|
|
8
22
|
|
|
9
23
|
|
|
10
24
|
class HubspotApp(APIApplication):
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
from typing import Any
|
|
2
2
|
|
|
3
3
|
import httpx
|
|
4
|
+
|
|
4
5
|
from universal_mcp.applications.application import APIApplication
|
|
5
6
|
from universal_mcp.integrations import Integration
|
|
6
7
|
|
|
@@ -7710,9 +7711,7 @@ class JiraApp(APIApplication):
|
|
|
7710
7711
|
except ValueError:
|
|
7711
7712
|
return None
|
|
7712
7713
|
|
|
7713
|
-
def archive_issues(
|
|
7714
|
-
self, issueIdsOrKeys: list[str] | None = None
|
|
7715
|
-
) -> dict[str, Any]:
|
|
7714
|
+
def archive_issues(self, issueIdsOrKeys: list[str] | None = None) -> dict[str, Any]:
|
|
7716
7715
|
"""
|
|
7717
7716
|
Archives Jira issues via the specified issue IDs/keys using the PUT method, handling bulk operations and returning status/error details.
|
|
7718
7717
|
|
|
@@ -8424,9 +8423,7 @@ class JiraApp(APIApplication):
|
|
|
8424
8423
|
except ValueError:
|
|
8425
8424
|
return None
|
|
8426
8425
|
|
|
8427
|
-
def delete_issue(
|
|
8428
|
-
self, issueIdOrKey: str, deleteSubtasks: str | None = None
|
|
8429
|
-
) -> Any:
|
|
8426
|
+
def delete_issue(self, issueIdOrKey: str, deleteSubtasks: str | None = None) -> Any:
|
|
8430
8427
|
"""
|
|
8431
8428
|
Deletes a Jira issue identified by its ID or key, optionally deleting associated subtasks if the `deleteSubtasks` query parameter is set to `true`.
|
|
8432
8429
|
|
|
@@ -13865,9 +13862,7 @@ class JiraApp(APIApplication):
|
|
|
13865
13862
|
except ValueError:
|
|
13866
13863
|
return None
|
|
13867
13864
|
|
|
13868
|
-
def migrate_queries(
|
|
13869
|
-
self, queryStrings: list[str] | None = None
|
|
13870
|
-
) -> dict[str, Any]:
|
|
13865
|
+
def migrate_queries(self, queryStrings: list[str] | None = None) -> dict[str, Any]:
|
|
13871
13866
|
"""
|
|
13872
13867
|
Converts JQL queries containing usernames or user keys to equivalent queries with account IDs, handling unknown users appropriately.
|
|
13873
13868
|
|
|
@@ -14850,9 +14845,7 @@ class JiraApp(APIApplication):
|
|
|
14850
14845
|
except ValueError:
|
|
14851
14846
|
return None
|
|
14852
14847
|
|
|
14853
|
-
def get_all_permission_schemes(
|
|
14854
|
-
self, expand: str | None = None
|
|
14855
|
-
) -> dict[str, Any]:
|
|
14848
|
+
def get_all_permission_schemes(self, expand: str | None = None) -> dict[str, Any]:
|
|
14856
14849
|
"""
|
|
14857
14850
|
Retrieves a list of all permission schemes in Jira Cloud, optionally expanding the response to include additional details such as groups by using the "expand" query parameter.
|
|
14858
14851
|
|
|
@@ -15389,9 +15382,7 @@ class JiraApp(APIApplication):
|
|
|
15389
15382
|
except ValueError:
|
|
15390
15383
|
return None
|
|
15391
15384
|
|
|
15392
|
-
def get_plan(
|
|
15393
|
-
self, planId: str, useGroupId: bool | None = None
|
|
15394
|
-
) -> dict[str, Any]:
|
|
15385
|
+
def get_plan(self, planId: str, useGroupId: bool | None = None) -> dict[str, Any]:
|
|
15395
15386
|
"""
|
|
15396
15387
|
Retrieves the details of a specific plan identified by its planId using a GET request.
|
|
15397
15388
|
|
|
@@ -21679,9 +21670,7 @@ class JiraApp(APIApplication):
|
|
|
21679
21670
|
except ValueError:
|
|
21680
21671
|
return None
|
|
21681
21672
|
|
|
21682
|
-
def get_statuses_by_id(
|
|
21683
|
-
self, id: list[str], expand: str | None = None
|
|
21684
|
-
) -> list[Any]:
|
|
21673
|
+
def get_statuses_by_id(self, id: list[str], expand: str | None = None) -> list[Any]:
|
|
21685
21674
|
"""
|
|
21686
21675
|
Retrieves a list of statuses in Jira using the "/rest/api/3/statuses" endpoint, allowing you to fetch details of statuses based on query parameters like expansion and ID, though specific details about what statuses are returned are not provided.
|
|
21687
21676
|
|
|
@@ -1,13 +1,12 @@
|
|
|
1
1
|
import re
|
|
2
2
|
|
|
3
|
-
from universal_mcp.applications import BaseApplication
|
|
4
|
-
|
|
5
3
|
from markitdown import MarkItDown
|
|
4
|
+
from universal_mcp.applications.application import BaseApplication
|
|
6
5
|
|
|
7
6
|
|
|
8
7
|
class MarkitdownApp(BaseApplication):
|
|
9
8
|
def __init__(self, **kwargs):
|
|
10
|
-
super().__init__(name="markitdown"
|
|
9
|
+
super().__init__(name="markitdown")
|
|
11
10
|
self.markitdown = MarkItDown(enable_plugins=True)
|
|
12
11
|
|
|
13
12
|
async def convert_to_markdown(self, uri: str) -> str:
|
|
@@ -6,7 +6,7 @@ from universal_mcp.integrations import Integration
|
|
|
6
6
|
|
|
7
7
|
class MsTeamsApp(APIApplication):
|
|
8
8
|
def __init__(self, integration: Integration = None, **kwargs) -> None:
|
|
9
|
-
super().__init__(name="
|
|
9
|
+
super().__init__(name="ms_teams", integration=integration, **kwargs)
|
|
10
10
|
self.base_url = "https://graph.microsoft.com/v1.0"
|
|
11
11
|
|
|
12
12
|
def list_chats(
|
|
@@ -1,9 +1,6 @@
|
|
|
1
1
|
import base64
|
|
2
2
|
from typing import Any, Literal
|
|
3
3
|
|
|
4
|
-
from universal_mcp.applications.application import APIApplication
|
|
5
|
-
from universal_mcp.integrations import Integration
|
|
6
|
-
|
|
7
4
|
from openai import NOT_GIVEN, AsyncOpenAI, OpenAIError
|
|
8
5
|
from openai._types import FileTypes as OpenAiFileTypes
|
|
9
6
|
from openai.types import FilePurpose as OpenAiFilePurpose
|
|
@@ -18,6 +15,8 @@ from openai.types.audio_model import AudioModel as OpenAiAudioModel
|
|
|
18
15
|
from openai.types.chat import ChatCompletionMessageParam
|
|
19
16
|
from openai.types.file_object import FileObject
|
|
20
17
|
from openai.types.image_model import ImageModel as OpenAiImageModel
|
|
18
|
+
from universal_mcp.applications.application import APIApplication
|
|
19
|
+
from universal_mcp.integrations import Integration
|
|
21
20
|
|
|
22
21
|
|
|
23
22
|
class OpenaiApp(APIApplication):
|
|
@@ -291,9 +291,7 @@ class OutlookApp(APIApplication):
|
|
|
291
291
|
response = self._get(url, params=query_params)
|
|
292
292
|
return self._handle_response(response)
|
|
293
293
|
|
|
294
|
-
def user_delete_message(
|
|
295
|
-
self, message_id: str, user_id: str | None = None
|
|
296
|
-
) -> Any:
|
|
294
|
+
def user_delete_message(self, message_id: str, user_id: str | None = None) -> Any:
|
|
297
295
|
"""
|
|
298
296
|
Deletes a specific message for a given user using the DELETE method and optional If-Match header for conditional requests.
|
|
299
297
|
|
|
@@ -1239,9 +1239,7 @@ class PipedriveApp(APIApplication):
|
|
|
1239
1239
|
except ValueError:
|
|
1240
1240
|
return None
|
|
1241
1241
|
|
|
1242
|
-
def currencies_get_all_supported(
|
|
1243
|
-
self, term: str | None = None
|
|
1244
|
-
) -> dict[str, Any]:
|
|
1242
|
+
def currencies_get_all_supported(self, term: str | None = None) -> dict[str, Any]:
|
|
1245
1243
|
"""
|
|
1246
1244
|
Retrieves a list of currencies based on a search term using the "GET" method at the "/currencies" path.
|
|
1247
1245
|
|
|
@@ -2140,9 +2138,7 @@ class PipedriveApp(APIApplication):
|
|
|
2140
2138
|
except ValueError:
|
|
2141
2139
|
return None
|
|
2142
2140
|
|
|
2143
|
-
def deals_add_follower(
|
|
2144
|
-
self, id: str, user_id: int | None = None
|
|
2145
|
-
) -> dict[str, Any]:
|
|
2141
|
+
def deals_add_follower(self, id: str, user_id: int | None = None) -> dict[str, Any]:
|
|
2146
2142
|
"""
|
|
2147
2143
|
Adds followers to a specified deal and returns a success status.
|
|
2148
2144
|
|
|
@@ -3,14 +3,14 @@ from pathlib import Path
|
|
|
3
3
|
from typing import Any
|
|
4
4
|
|
|
5
5
|
from loguru import logger
|
|
6
|
-
from universal_mcp.applications.application import APIApplication
|
|
7
|
-
from universal_mcp.exceptions import NotAuthorizedError, ToolError
|
|
8
|
-
from universal_mcp.integrations import Integration
|
|
9
6
|
|
|
10
7
|
import replicate
|
|
11
8
|
from replicate.exceptions import ModelError as ReplicateModelError
|
|
12
9
|
from replicate.exceptions import ReplicateError as ReplicateAPIError
|
|
13
10
|
from replicate.prediction import Prediction
|
|
11
|
+
from universal_mcp.applications.application import APIApplication
|
|
12
|
+
from universal_mcp.exceptions import NotAuthorizedError, ToolError
|
|
13
|
+
from universal_mcp.integrations import Integration
|
|
14
14
|
|
|
15
15
|
|
|
16
16
|
class ReplicateApp(APIApplication):
|
|
@@ -1,11 +1,10 @@
|
|
|
1
1
|
from typing import Any
|
|
2
2
|
|
|
3
|
+
import resend
|
|
3
4
|
from universal_mcp.applications.application import APIApplication
|
|
4
5
|
from universal_mcp.exceptions import NotAuthorizedError, ToolError
|
|
5
6
|
from universal_mcp.integrations import Integration
|
|
6
7
|
|
|
7
|
-
import resend
|
|
8
|
-
|
|
9
8
|
|
|
10
9
|
class ResendApp(APIApplication):
|
|
11
10
|
def __init__(self, integration: Integration, **kwargs: Any) -> None:
|