universal-mcp-applications 0.1.2__py3-none-any.whl → 0.1.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of universal-mcp-applications might be problematic. Click here for more details.
- universal_mcp/applications/airtable/app.py +1 -0
- universal_mcp/applications/apollo/app.py +1 -0
- universal_mcp/applications/aws_s3/app.py +3 -4
- universal_mcp/applications/bill/app.py +3 -3
- universal_mcp/applications/box/app.py +2 -6
- universal_mcp/applications/braze/app.py +2 -6
- universal_mcp/applications/cal_com_v2/app.py +22 -64
- universal_mcp/applications/confluence/app.py +1 -0
- universal_mcp/applications/contentful/app.py +8 -19
- universal_mcp/applications/digitalocean/app.py +9 -27
- universal_mcp/applications/{domain-checker → domain_checker}/app.py +2 -1
- universal_mcp/applications/elevenlabs/app.py +98 -3188
- universal_mcp/applications/falai/app.py +1 -0
- universal_mcp/applications/file_system/__init__.py +1 -0
- universal_mcp/applications/file_system/app.py +96 -0
- universal_mcp/applications/fireflies/app.py +4 -3
- universal_mcp/applications/fpl/app.py +1 -0
- universal_mcp/applications/fpl/utils/fixtures.py +1 -1
- universal_mcp/applications/fpl/utils/helper.py +1 -1
- universal_mcp/applications/fpl/utils/position_utils.py +0 -1
- universal_mcp/applications/{ghost-content → ghost_content}/app.py +2 -1
- universal_mcp/applications/github/app.py +3 -1
- universal_mcp/applications/google_calendar/app.py +2 -1
- universal_mcp/applications/google_docs/app.py +1 -1
- universal_mcp/applications/google_drive/app.py +3 -68
- universal_mcp/applications/google_gemini/app.py +138 -618
- universal_mcp/applications/google_mail/app.py +2 -1
- universal_mcp/applications/{google-searchconsole → google_searchconsole}/app.py +1 -1
- universal_mcp/applications/google_sheet/app.py +2 -1
- universal_mcp/applications/google_sheet/helper.py +156 -116
- universal_mcp/applications/hashnode/app.py +1 -0
- universal_mcp/applications/{http-tools → http_tools}/app.py +2 -1
- universal_mcp/applications/hubspot/app.py +4 -1
- universal_mcp/applications/jira/app.py +7 -18
- universal_mcp/applications/markitdown/app.py +2 -3
- universal_mcp/applications/ms_teams/app.py +1 -1
- universal_mcp/applications/openai/app.py +2 -3
- universal_mcp/applications/outlook/app.py +1 -3
- universal_mcp/applications/pipedrive/app.py +2 -6
- universal_mcp/applications/reddit/app.py +1 -0
- universal_mcp/applications/replicate/app.py +3 -3
- universal_mcp/applications/resend/app.py +1 -2
- universal_mcp/applications/rocketlane/app.py +1 -0
- universal_mcp/applications/semrush/app.py +1 -1
- universal_mcp/applications/sentry/README.md +20 -20
- universal_mcp/applications/sentry/app.py +40 -40
- universal_mcp/applications/serpapi/app.py +2 -2
- universal_mcp/applications/sharepoint/app.py +1 -0
- universal_mcp/applications/shopify/app.py +1 -0
- universal_mcp/applications/slack/app.py +3 -3
- universal_mcp/applications/trello/app.py +9 -27
- universal_mcp/applications/twilio/__init__.py +1 -0
- universal_mcp/applications/{twillo → twilio}/app.py +2 -2
- universal_mcp/applications/twitter/README.md +1 -1
- universal_mcp/applications/twitter/api_segments/dm_conversations_api.py +2 -2
- universal_mcp/applications/twitter/api_segments/lists_api.py +1 -1
- universal_mcp/applications/unipile/app.py +5 -1
- universal_mcp/applications/whatsapp/app.py +18 -17
- universal_mcp/applications/whatsapp/audio.py +110 -0
- universal_mcp/applications/whatsapp/whatsapp.py +398 -0
- universal_mcp/applications/whatsapp_business/app.py +1 -1
- universal_mcp/applications/youtube/app.py +195 -191
- universal_mcp/applications/zenquotes/app.py +1 -1
- {universal_mcp_applications-0.1.2.dist-info → universal_mcp_applications-0.1.4.dist-info}/METADATA +4 -2
- {universal_mcp_applications-0.1.2.dist-info → universal_mcp_applications-0.1.4.dist-info}/RECORD +76 -75
- universal_mcp/applications/google-ads/__init__.py +0 -1
- universal_mcp/applications/google-ads/app.py +0 -23
- universal_mcp/applications/twillo/README.md +0 -0
- universal_mcp/applications/twillo/__init__.py +0 -1
- /universal_mcp/applications/{domain-checker → domain_checker}/README.md +0 -0
- /universal_mcp/applications/{domain-checker → domain_checker}/__init__.py +0 -0
- /universal_mcp/applications/{ghost-content → ghost_content}/README.md +0 -0
- /universal_mcp/applications/{ghost-content → ghost_content}/__init__.py +0 -0
- /universal_mcp/applications/{google-searchconsole → google_searchconsole}/README.md +0 -0
- /universal_mcp/applications/{google-searchconsole → google_searchconsole}/__init__.py +0 -0
- /universal_mcp/applications/{http-tools → http_tools}/README.md +0 -0
- /universal_mcp/applications/{http-tools → http_tools}/__init__.py +0 -0
- /universal_mcp/applications/{google-ads → twilio}/README.md +0 -0
- {universal_mcp_applications-0.1.2.dist-info → universal_mcp_applications-0.1.4.dist-info}/WHEEL +0 -0
- {universal_mcp_applications-0.1.2.dist-info → universal_mcp_applications-0.1.4.dist-info}/licenses/LICENSE +0 -0
|
@@ -4,13 +4,14 @@ from email.message import EmailMessage
|
|
|
4
4
|
from typing import Any
|
|
5
5
|
|
|
6
6
|
from loguru import logger
|
|
7
|
+
|
|
7
8
|
from universal_mcp.applications.application import APIApplication
|
|
8
9
|
from universal_mcp.integrations import Integration
|
|
9
10
|
|
|
10
11
|
|
|
11
12
|
class GoogleMailApp(APIApplication):
|
|
12
13
|
def __init__(self, integration: Integration) -> None:
|
|
13
|
-
super().__init__(name="
|
|
14
|
+
super().__init__(name="google_mail", integration=integration)
|
|
14
15
|
self.base_api_url = "https://gmail.googleapis.com/gmail/v1/users/me"
|
|
15
16
|
self.base_url = "https://gmail.googleapis.com"
|
|
16
17
|
|
|
@@ -10,7 +10,7 @@ logger = logging.getLogger(__name__)
|
|
|
10
10
|
|
|
11
11
|
class GoogleSearchconsoleApp(APIApplication):
|
|
12
12
|
def __init__(self, integration: Integration = None, **kwargs) -> None:
|
|
13
|
-
super().__init__(name="
|
|
13
|
+
super().__init__(name="google_searchconsole", integration=integration, **kwargs)
|
|
14
14
|
self.webmasters_base_url = "https://www.googleapis.com/webmasters/v3"
|
|
15
15
|
self.searchconsole_base_url = "https://searchconsole.googleapis.com/v1"
|
|
16
16
|
|
|
@@ -2,6 +2,7 @@ from typing import Any
|
|
|
2
2
|
|
|
3
3
|
from universal_mcp.applications.application import APIApplication
|
|
4
4
|
from universal_mcp.integrations import Integration
|
|
5
|
+
|
|
5
6
|
from .helper import (
|
|
6
7
|
analyze_sheet_for_tables,
|
|
7
8
|
analyze_table_schema,
|
|
@@ -15,7 +16,7 @@ class GoogleSheetApp(APIApplication):
|
|
|
15
16
|
"""
|
|
16
17
|
|
|
17
18
|
def __init__(self, integration: Integration | None = None) -> None:
|
|
18
|
-
super().__init__(name="
|
|
19
|
+
super().__init__(name="google_sheet", integration=integration)
|
|
19
20
|
self.base_url = "https://sheets.googleapis.com/v4/spreadsheets"
|
|
20
21
|
|
|
21
22
|
def create_spreadsheet(self, title: str) -> dict[str, Any]:
|
|
@@ -2,43 +2,40 @@
|
|
|
2
2
|
Helper functions for Google Sheets table detection and analysis.
|
|
3
3
|
"""
|
|
4
4
|
|
|
5
|
-
from typing import Any
|
|
5
|
+
from typing import Any
|
|
6
6
|
|
|
7
7
|
|
|
8
8
|
def analyze_sheet_for_tables(
|
|
9
9
|
get_values_func,
|
|
10
|
-
spreadsheet_id: str,
|
|
11
|
-
sheet_id: int,
|
|
12
|
-
sheet_title: str,
|
|
13
|
-
min_rows: int,
|
|
14
|
-
min_columns: int,
|
|
15
|
-
min_confidence: float
|
|
16
|
-
) ->
|
|
10
|
+
spreadsheet_id: str,
|
|
11
|
+
sheet_id: int,
|
|
12
|
+
sheet_title: str,
|
|
13
|
+
min_rows: int,
|
|
14
|
+
min_columns: int,
|
|
15
|
+
min_confidence: float,
|
|
16
|
+
) -> list[dict]:
|
|
17
17
|
"""Analyze a sheet to find potential tables."""
|
|
18
18
|
tables = []
|
|
19
|
-
|
|
19
|
+
|
|
20
20
|
try:
|
|
21
21
|
# Get sample data from the sheet (first 100 rows)
|
|
22
22
|
sample_range = f"{sheet_title}!A1:Z100"
|
|
23
|
-
sample_data = get_values_func(
|
|
24
|
-
|
|
25
|
-
range=sample_range
|
|
26
|
-
)
|
|
27
|
-
|
|
23
|
+
sample_data = get_values_func(spreadsheetId=spreadsheet_id, range=sample_range)
|
|
24
|
+
|
|
28
25
|
values = sample_data.get("values", [])
|
|
29
26
|
if not values or len(values) < min_rows:
|
|
30
27
|
return tables
|
|
31
|
-
|
|
28
|
+
|
|
32
29
|
# Find potential table regions
|
|
33
30
|
table_regions = find_table_regions(values, min_rows, min_columns)
|
|
34
|
-
|
|
31
|
+
|
|
35
32
|
for i, region in enumerate(table_regions):
|
|
36
33
|
confidence = calculate_table_confidence(values, region)
|
|
37
|
-
|
|
34
|
+
|
|
38
35
|
if confidence >= min_confidence:
|
|
39
36
|
table_info = {
|
|
40
|
-
"table_id": f"{sheet_title}_table_{i+1}",
|
|
41
|
-
"table_name": f"{sheet_title}_Table_{i+1}",
|
|
37
|
+
"table_id": f"{sheet_title}_table_{i + 1}",
|
|
38
|
+
"table_name": f"{sheet_title}_Table_{i + 1}",
|
|
42
39
|
"sheet_id": sheet_id,
|
|
43
40
|
"sheet_name": sheet_title,
|
|
44
41
|
"start_row": region["start_row"],
|
|
@@ -48,54 +45,48 @@ def analyze_sheet_for_tables(
|
|
|
48
45
|
"rows": region["end_row"] - region["start_row"] + 1,
|
|
49
46
|
"columns": region["end_column"] - region["start_column"] + 1,
|
|
50
47
|
"confidence": confidence,
|
|
51
|
-
"range": f"{sheet_title}!{get_column_letter(region['start_column'])}{region['start_row']+1}:{get_column_letter(region['end_column'])}{region['end_row']+1}"
|
|
48
|
+
"range": f"{sheet_title}!{get_column_letter(region['start_column'])}{region['start_row'] + 1}:{get_column_letter(region['end_column'])}{region['end_row'] + 1}",
|
|
52
49
|
}
|
|
53
50
|
tables.append(table_info)
|
|
54
|
-
|
|
55
|
-
except Exception
|
|
51
|
+
|
|
52
|
+
except Exception:
|
|
56
53
|
# If analysis fails for a sheet, continue with other sheets
|
|
57
54
|
pass
|
|
58
|
-
|
|
55
|
+
|
|
59
56
|
return tables
|
|
60
57
|
|
|
61
58
|
|
|
62
59
|
def analyze_table_schema(
|
|
63
|
-
get_values_func,
|
|
64
|
-
|
|
65
|
-
table_info: Dict,
|
|
66
|
-
sample_size: int = 50
|
|
67
|
-
) -> Dict[str, Any]:
|
|
60
|
+
get_values_func, spreadsheet_id: str, table_info: dict, sample_size: int = 50
|
|
61
|
+
) -> dict[str, Any]:
|
|
68
62
|
"""
|
|
69
63
|
Analyze table structure and infer column names, types, and constraints.
|
|
70
|
-
|
|
64
|
+
|
|
71
65
|
Args:
|
|
72
66
|
get_values_func: Function to get values from spreadsheet
|
|
73
67
|
spreadsheet_id: The spreadsheet ID
|
|
74
68
|
table_info: Dictionary containing table information from list_tables
|
|
75
69
|
sample_size: Number of rows to sample for type inference
|
|
76
|
-
|
|
70
|
+
|
|
77
71
|
Returns:
|
|
78
72
|
Dictionary containing the table schema with column analysis
|
|
79
73
|
"""
|
|
80
74
|
try:
|
|
81
75
|
# Get sample data from the table
|
|
82
76
|
sample_range = table_info["range"]
|
|
83
|
-
sample_data = get_values_func(
|
|
84
|
-
|
|
85
|
-
range=sample_range
|
|
86
|
-
)
|
|
87
|
-
|
|
77
|
+
sample_data = get_values_func(spreadsheetId=spreadsheet_id, range=sample_range)
|
|
78
|
+
|
|
88
79
|
values = sample_data.get("values", [])
|
|
89
80
|
if not values:
|
|
90
81
|
raise ValueError("No data found in the specified table")
|
|
91
|
-
|
|
82
|
+
|
|
92
83
|
# Limit sample size to available data
|
|
93
84
|
actual_sample_size = min(sample_size, len(values))
|
|
94
85
|
sample_values = values[:actual_sample_size]
|
|
95
|
-
|
|
86
|
+
|
|
96
87
|
# Analyze column structure
|
|
97
88
|
columns = analyze_columns(sample_values)
|
|
98
|
-
|
|
89
|
+
|
|
99
90
|
return {
|
|
100
91
|
"spreadsheet_id": spreadsheet_id,
|
|
101
92
|
"table_name": table_info["table_name"],
|
|
@@ -105,85 +96,113 @@ def analyze_table_schema(
|
|
|
105
96
|
"total_columns": table_info["columns"],
|
|
106
97
|
"sample_size": actual_sample_size,
|
|
107
98
|
"columns": columns,
|
|
108
|
-
"schema_version": "1.0"
|
|
99
|
+
"schema_version": "1.0",
|
|
109
100
|
}
|
|
110
|
-
|
|
101
|
+
|
|
111
102
|
except Exception as e:
|
|
112
103
|
raise ValueError(f"Failed to analyze table schema: {str(e)}")
|
|
113
104
|
|
|
114
105
|
|
|
115
|
-
def analyze_columns(sample_values:
|
|
106
|
+
def analyze_columns(sample_values: list[list[Any]]) -> list[dict]:
|
|
116
107
|
"""Analyze column structure and infer types."""
|
|
117
108
|
if not sample_values:
|
|
118
109
|
return []
|
|
119
|
-
|
|
110
|
+
|
|
120
111
|
# Get headers (first row)
|
|
121
112
|
headers = sample_values[0] if sample_values else []
|
|
122
113
|
data_rows = sample_values[1:] if len(sample_values) > 1 else []
|
|
123
|
-
|
|
114
|
+
|
|
124
115
|
columns = []
|
|
125
|
-
|
|
116
|
+
|
|
126
117
|
for col_idx in range(len(headers)):
|
|
127
|
-
column_name =
|
|
128
|
-
|
|
118
|
+
column_name = (
|
|
119
|
+
str(headers[col_idx]) if col_idx < len(headers) else f"Column_{col_idx + 1}"
|
|
120
|
+
)
|
|
121
|
+
|
|
129
122
|
# Extract column values
|
|
130
123
|
column_values = []
|
|
131
124
|
for row in data_rows:
|
|
132
125
|
if col_idx < len(row):
|
|
133
126
|
column_values.append(row[col_idx])
|
|
134
|
-
|
|
127
|
+
|
|
135
128
|
# Analyze column type
|
|
136
129
|
column_type, constraints = infer_column_type(column_values)
|
|
137
|
-
|
|
130
|
+
|
|
138
131
|
column_info = {
|
|
139
132
|
"name": column_name,
|
|
140
133
|
"index": col_idx,
|
|
141
134
|
"type": column_type,
|
|
142
135
|
"constraints": constraints,
|
|
143
136
|
"sample_values": column_values[:5], # First 5 sample values
|
|
144
|
-
"null_count": sum(
|
|
145
|
-
|
|
137
|
+
"null_count": sum(
|
|
138
|
+
1 for val in column_values if not val or str(val).strip() == ""
|
|
139
|
+
),
|
|
140
|
+
"unique_count": len(
|
|
141
|
+
set(str(val) for val in column_values if val and str(val).strip())
|
|
142
|
+
),
|
|
146
143
|
}
|
|
147
|
-
|
|
144
|
+
|
|
148
145
|
columns.append(column_info)
|
|
149
|
-
|
|
146
|
+
|
|
150
147
|
return columns
|
|
151
148
|
|
|
152
149
|
|
|
153
|
-
def infer_column_type(values:
|
|
150
|
+
def infer_column_type(values: list[Any]) -> tuple[str, dict]:
|
|
154
151
|
"""Infer the most likely data type for a column."""
|
|
155
152
|
if not values:
|
|
156
153
|
return "TEXT", {}
|
|
157
|
-
|
|
154
|
+
|
|
158
155
|
# Remove empty values
|
|
159
156
|
non_empty_values = [val for val in values if val and str(val).strip()]
|
|
160
|
-
|
|
157
|
+
|
|
161
158
|
if not non_empty_values:
|
|
162
159
|
return "TEXT", {}
|
|
163
|
-
|
|
160
|
+
|
|
164
161
|
# Check for boolean values
|
|
165
|
-
boolean_count = sum(
|
|
162
|
+
boolean_count = sum(
|
|
163
|
+
1
|
|
164
|
+
for val in non_empty_values
|
|
165
|
+
if str(val).lower() in ["true", "false", "yes", "no", "1", "0"]
|
|
166
|
+
)
|
|
166
167
|
if boolean_count / len(non_empty_values) >= 0.8:
|
|
167
168
|
return "BOOLEAN", {}
|
|
168
|
-
|
|
169
|
+
|
|
169
170
|
# Check for numeric values
|
|
170
171
|
numeric_count = 0
|
|
171
172
|
decimal_count = 0
|
|
172
173
|
date_count = 0
|
|
173
|
-
|
|
174
|
+
|
|
174
175
|
for val in non_empty_values:
|
|
175
176
|
val_str = str(val)
|
|
176
|
-
|
|
177
|
+
|
|
177
178
|
# Check for dates (basic patterns)
|
|
178
|
-
if any(
|
|
179
|
+
if any(
|
|
180
|
+
pattern in val_str.lower()
|
|
181
|
+
for pattern in [
|
|
182
|
+
"/",
|
|
183
|
+
"-",
|
|
184
|
+
"jan",
|
|
185
|
+
"feb",
|
|
186
|
+
"mar",
|
|
187
|
+
"apr",
|
|
188
|
+
"may",
|
|
189
|
+
"jun",
|
|
190
|
+
"jul",
|
|
191
|
+
"aug",
|
|
192
|
+
"sep",
|
|
193
|
+
"oct",
|
|
194
|
+
"nov",
|
|
195
|
+
"dec",
|
|
196
|
+
]
|
|
197
|
+
):
|
|
179
198
|
date_count += 1
|
|
180
|
-
|
|
199
|
+
|
|
181
200
|
# Check for numbers
|
|
182
|
-
if val_str.replace(
|
|
201
|
+
if val_str.replace(".", "").replace("-", "").replace(",", "").isdigit():
|
|
183
202
|
numeric_count += 1
|
|
184
|
-
if
|
|
203
|
+
if "." in val_str:
|
|
185
204
|
decimal_count += 1
|
|
186
|
-
|
|
205
|
+
|
|
187
206
|
# Determine type based on analysis
|
|
188
207
|
if date_count / len(non_empty_values) >= 0.6:
|
|
189
208
|
return "DATE", {}
|
|
@@ -196,26 +215,28 @@ def infer_column_type(values: List[Any]) -> Tuple[str, Dict]:
|
|
|
196
215
|
return "TEXT", {}
|
|
197
216
|
|
|
198
217
|
|
|
199
|
-
def find_table_regions(
|
|
218
|
+
def find_table_regions(
|
|
219
|
+
values: list[list], min_rows: int, min_columns: int
|
|
220
|
+
) -> list[dict]:
|
|
200
221
|
"""Find potential table regions in the data."""
|
|
201
222
|
regions = []
|
|
202
|
-
|
|
223
|
+
|
|
203
224
|
if not values or len(values) < min_rows:
|
|
204
225
|
return regions
|
|
205
|
-
|
|
226
|
+
|
|
206
227
|
rows = len(values)
|
|
207
228
|
cols = max(len(row) for row in values) if values else 0
|
|
208
|
-
|
|
229
|
+
|
|
209
230
|
if cols < min_columns:
|
|
210
231
|
return regions
|
|
211
|
-
|
|
232
|
+
|
|
212
233
|
# Simple heuristic: look for regions with consistent data
|
|
213
234
|
current_start = -1
|
|
214
|
-
|
|
235
|
+
|
|
215
236
|
for i in range(rows):
|
|
216
237
|
# Check if this row has enough data
|
|
217
238
|
row_data_count = sum(1 for cell in values[i] if cell and str(cell).strip())
|
|
218
|
-
|
|
239
|
+
|
|
219
240
|
if row_data_count >= min_columns:
|
|
220
241
|
# Continue current region
|
|
221
242
|
if current_start == -1:
|
|
@@ -223,115 +244,134 @@ def find_table_regions(values: List[List], min_rows: int, min_columns: int) -> L
|
|
|
223
244
|
else:
|
|
224
245
|
# End current region if it's valid
|
|
225
246
|
if current_start != -1 and i - current_start >= min_rows:
|
|
226
|
-
regions.append(
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
247
|
+
regions.append(
|
|
248
|
+
{
|
|
249
|
+
"start_row": current_start,
|
|
250
|
+
"end_row": i - 1,
|
|
251
|
+
"start_column": 0,
|
|
252
|
+
"end_column": cols - 1,
|
|
253
|
+
}
|
|
254
|
+
)
|
|
232
255
|
current_start = -1
|
|
233
|
-
|
|
256
|
+
|
|
234
257
|
# Handle region that extends to end
|
|
235
258
|
if current_start != -1 and rows - current_start >= min_rows:
|
|
236
|
-
regions.append(
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
259
|
+
regions.append(
|
|
260
|
+
{
|
|
261
|
+
"start_row": current_start,
|
|
262
|
+
"end_row": rows - 1,
|
|
263
|
+
"start_column": 0,
|
|
264
|
+
"end_column": cols - 1,
|
|
265
|
+
}
|
|
266
|
+
)
|
|
267
|
+
|
|
243
268
|
return regions
|
|
244
269
|
|
|
245
270
|
|
|
246
|
-
def calculate_table_confidence(values:
|
|
271
|
+
def calculate_table_confidence(values: list[list], region: dict) -> float:
|
|
247
272
|
"""Calculate confidence score for a potential table region."""
|
|
248
273
|
if not values:
|
|
249
274
|
return 0.0
|
|
250
|
-
|
|
275
|
+
|
|
251
276
|
start_row = region["start_row"]
|
|
252
277
|
end_row = region["end_row"]
|
|
253
278
|
start_col = region["start_column"]
|
|
254
279
|
end_col = region["end_column"]
|
|
255
|
-
|
|
280
|
+
|
|
256
281
|
# Extract region data
|
|
257
282
|
region_data = []
|
|
258
283
|
for i in range(start_row, min(end_row + 1, len(values))):
|
|
259
284
|
row = values[i]
|
|
260
285
|
if len(row) > start_col:
|
|
261
|
-
region_data.append(row[start_col:min(end_col + 1, len(row))])
|
|
262
|
-
|
|
286
|
+
region_data.append(row[start_col : min(end_col + 1, len(row))])
|
|
287
|
+
|
|
263
288
|
if not region_data:
|
|
264
289
|
return 0.0
|
|
265
|
-
|
|
290
|
+
|
|
266
291
|
# Calculate confidence based on data consistency
|
|
267
292
|
total_cells = sum(len(row) for row in region_data)
|
|
268
|
-
non_empty_cells = sum(
|
|
269
|
-
|
|
293
|
+
non_empty_cells = sum(
|
|
294
|
+
sum(1 for cell in row if cell and str(cell).strip()) for row in region_data
|
|
295
|
+
)
|
|
296
|
+
|
|
270
297
|
if total_cells == 0:
|
|
271
298
|
return 0.0
|
|
272
|
-
|
|
299
|
+
|
|
273
300
|
# Base confidence on data density
|
|
274
301
|
data_density = non_empty_cells / total_cells
|
|
275
|
-
|
|
302
|
+
|
|
276
303
|
# Additional factors
|
|
277
304
|
has_headers = has_header_row(region_data)
|
|
278
305
|
consistent_columns = has_consistent_columns(region_data)
|
|
279
|
-
|
|
306
|
+
|
|
280
307
|
confidence = data_density * 0.6 # 60% weight to data density
|
|
281
|
-
|
|
308
|
+
|
|
282
309
|
if has_headers:
|
|
283
310
|
confidence += 0.2 # 20% bonus for headers
|
|
284
|
-
|
|
311
|
+
|
|
285
312
|
if consistent_columns:
|
|
286
313
|
confidence += 0.2 # 20% bonus for consistent structure
|
|
287
|
-
|
|
314
|
+
|
|
288
315
|
return min(confidence, 1.0)
|
|
289
316
|
|
|
290
317
|
|
|
291
|
-
def has_header_row(data:
|
|
318
|
+
def has_header_row(data: list[list]) -> bool:
|
|
292
319
|
"""Check if the first row looks like a header."""
|
|
293
320
|
if not data or len(data) < 2:
|
|
294
321
|
return False
|
|
295
|
-
|
|
322
|
+
|
|
296
323
|
header_row = data[0]
|
|
297
324
|
data_rows = data[1:]
|
|
298
|
-
|
|
325
|
+
|
|
299
326
|
if not header_row or not data_rows:
|
|
300
327
|
return False
|
|
301
|
-
|
|
328
|
+
|
|
302
329
|
# Check if header row has mostly text values
|
|
303
|
-
header_text_count = sum(
|
|
304
|
-
|
|
330
|
+
header_text_count = sum(
|
|
331
|
+
1
|
|
332
|
+
for cell in header_row
|
|
333
|
+
if cell
|
|
334
|
+
and isinstance(cell, str)
|
|
335
|
+
and not cell.replace(".", "").replace("-", "").isdigit()
|
|
336
|
+
)
|
|
337
|
+
|
|
305
338
|
# Check if data rows have different data types than header
|
|
306
339
|
data_numeric_count = 0
|
|
307
340
|
for row in data_rows[:3]: # Check first 3 data rows
|
|
308
341
|
for cell in row:
|
|
309
|
-
if cell and str(cell).replace(
|
|
342
|
+
if cell and str(cell).replace(".", "").replace("-", "").isdigit():
|
|
310
343
|
data_numeric_count += 1
|
|
311
|
-
|
|
344
|
+
|
|
312
345
|
return header_text_count > len(header_row) * 0.5 and data_numeric_count > 0
|
|
313
346
|
|
|
314
347
|
|
|
315
|
-
def has_consistent_columns(data:
|
|
348
|
+
def has_consistent_columns(data: list[list]) -> bool:
|
|
316
349
|
"""Check if columns have consistent data types."""
|
|
317
350
|
if not data or len(data) < 2:
|
|
318
351
|
return False
|
|
319
|
-
|
|
352
|
+
|
|
320
353
|
# Check if most columns have consistent data types
|
|
321
354
|
consistent_columns = 0
|
|
322
355
|
total_columns = max(len(row) for row in data)
|
|
323
|
-
|
|
356
|
+
|
|
324
357
|
for col in range(total_columns):
|
|
325
358
|
column_values = [row[col] for row in data if col < len(row) and row[col]]
|
|
326
359
|
if len(column_values) >= 2:
|
|
327
360
|
# Check if column has consistent type
|
|
328
|
-
numeric_count = sum(
|
|
361
|
+
numeric_count = sum(
|
|
362
|
+
1
|
|
363
|
+
for val in column_values
|
|
364
|
+
if str(val).replace(".", "").replace("-", "").isdigit()
|
|
365
|
+
)
|
|
329
366
|
text_count = len(column_values) - numeric_count
|
|
330
|
-
|
|
367
|
+
|
|
331
368
|
# If 80% of values are same type, consider consistent
|
|
332
|
-
if
|
|
369
|
+
if (
|
|
370
|
+
numeric_count / len(column_values) >= 0.8
|
|
371
|
+
or text_count / len(column_values) >= 0.8
|
|
372
|
+
):
|
|
333
373
|
consistent_columns += 1
|
|
334
|
-
|
|
374
|
+
|
|
335
375
|
return consistent_columns / total_columns >= 0.6 if total_columns > 0 else False
|
|
336
376
|
|
|
337
377
|
|
|
@@ -342,4 +382,4 @@ def get_column_letter(column_index: int) -> str:
|
|
|
342
382
|
column_index, remainder = divmod(column_index, 26)
|
|
343
383
|
result = chr(65 + remainder) + result
|
|
344
384
|
column_index -= 1
|
|
345
|
-
return result
|
|
385
|
+
return result
|
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
import httpx
|
|
2
2
|
from loguru import logger
|
|
3
|
+
|
|
3
4
|
from universal_mcp.applications.application import APIApplication
|
|
4
5
|
|
|
5
6
|
|
|
@@ -15,7 +16,7 @@ class HttpToolsApp(APIApplication):
|
|
|
15
16
|
Args:
|
|
16
17
|
**kwargs: Additional keyword arguments for the parent APIApplication.
|
|
17
18
|
"""
|
|
18
|
-
super().__init__(name="
|
|
19
|
+
super().__init__(name="http_tools", **kwargs)
|
|
19
20
|
|
|
20
21
|
def _handle_response(self, response: httpx.Response):
|
|
21
22
|
"""
|
|
@@ -4,20 +4,23 @@ from typing import Any
|
|
|
4
4
|
from universal_mcp.applications.application import APIApplication
|
|
5
5
|
from universal_mcp.integrations import Integration
|
|
6
6
|
|
|
7
|
+
|
|
7
8
|
class CrmApi:
|
|
8
9
|
def __init__(self, api_client):
|
|
9
10
|
pass
|
|
11
|
+
|
|
10
12
|
def list_tools(self):
|
|
11
13
|
return []
|
|
12
14
|
|
|
15
|
+
|
|
13
16
|
class MarketingApi:
|
|
14
17
|
def __init__(self, api_client):
|
|
15
18
|
pass
|
|
19
|
+
|
|
16
20
|
def list_tools(self):
|
|
17
21
|
return []
|
|
18
22
|
|
|
19
23
|
|
|
20
|
-
|
|
21
24
|
class HubspotApp(APIApplication):
|
|
22
25
|
def __init__(self, integration: Integration = None, **kwargs) -> None:
|
|
23
26
|
super().__init__(name="hubspot", integration=integration, **kwargs)
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
from typing import Any
|
|
2
2
|
|
|
3
3
|
import httpx
|
|
4
|
+
|
|
4
5
|
from universal_mcp.applications.application import APIApplication
|
|
5
6
|
from universal_mcp.integrations import Integration
|
|
6
7
|
|
|
@@ -7710,9 +7711,7 @@ class JiraApp(APIApplication):
|
|
|
7710
7711
|
except ValueError:
|
|
7711
7712
|
return None
|
|
7712
7713
|
|
|
7713
|
-
def archive_issues(
|
|
7714
|
-
self, issueIdsOrKeys: list[str] | None = None
|
|
7715
|
-
) -> dict[str, Any]:
|
|
7714
|
+
def archive_issues(self, issueIdsOrKeys: list[str] | None = None) -> dict[str, Any]:
|
|
7716
7715
|
"""
|
|
7717
7716
|
Archives Jira issues via the specified issue IDs/keys using the PUT method, handling bulk operations and returning status/error details.
|
|
7718
7717
|
|
|
@@ -8424,9 +8423,7 @@ class JiraApp(APIApplication):
|
|
|
8424
8423
|
except ValueError:
|
|
8425
8424
|
return None
|
|
8426
8425
|
|
|
8427
|
-
def delete_issue(
|
|
8428
|
-
self, issueIdOrKey: str, deleteSubtasks: str | None = None
|
|
8429
|
-
) -> Any:
|
|
8426
|
+
def delete_issue(self, issueIdOrKey: str, deleteSubtasks: str | None = None) -> Any:
|
|
8430
8427
|
"""
|
|
8431
8428
|
Deletes a Jira issue identified by its ID or key, optionally deleting associated subtasks if the `deleteSubtasks` query parameter is set to `true`.
|
|
8432
8429
|
|
|
@@ -13865,9 +13862,7 @@ class JiraApp(APIApplication):
|
|
|
13865
13862
|
except ValueError:
|
|
13866
13863
|
return None
|
|
13867
13864
|
|
|
13868
|
-
def migrate_queries(
|
|
13869
|
-
self, queryStrings: list[str] | None = None
|
|
13870
|
-
) -> dict[str, Any]:
|
|
13865
|
+
def migrate_queries(self, queryStrings: list[str] | None = None) -> dict[str, Any]:
|
|
13871
13866
|
"""
|
|
13872
13867
|
Converts JQL queries containing usernames or user keys to equivalent queries with account IDs, handling unknown users appropriately.
|
|
13873
13868
|
|
|
@@ -14850,9 +14845,7 @@ class JiraApp(APIApplication):
|
|
|
14850
14845
|
except ValueError:
|
|
14851
14846
|
return None
|
|
14852
14847
|
|
|
14853
|
-
def get_all_permission_schemes(
|
|
14854
|
-
self, expand: str | None = None
|
|
14855
|
-
) -> dict[str, Any]:
|
|
14848
|
+
def get_all_permission_schemes(self, expand: str | None = None) -> dict[str, Any]:
|
|
14856
14849
|
"""
|
|
14857
14850
|
Retrieves a list of all permission schemes in Jira Cloud, optionally expanding the response to include additional details such as groups by using the "expand" query parameter.
|
|
14858
14851
|
|
|
@@ -15389,9 +15382,7 @@ class JiraApp(APIApplication):
|
|
|
15389
15382
|
except ValueError:
|
|
15390
15383
|
return None
|
|
15391
15384
|
|
|
15392
|
-
def get_plan(
|
|
15393
|
-
self, planId: str, useGroupId: bool | None = None
|
|
15394
|
-
) -> dict[str, Any]:
|
|
15385
|
+
def get_plan(self, planId: str, useGroupId: bool | None = None) -> dict[str, Any]:
|
|
15395
15386
|
"""
|
|
15396
15387
|
Retrieves the details of a specific plan identified by its planId using a GET request.
|
|
15397
15388
|
|
|
@@ -21679,9 +21670,7 @@ class JiraApp(APIApplication):
|
|
|
21679
21670
|
except ValueError:
|
|
21680
21671
|
return None
|
|
21681
21672
|
|
|
21682
|
-
def get_statuses_by_id(
|
|
21683
|
-
self, id: list[str], expand: str | None = None
|
|
21684
|
-
) -> list[Any]:
|
|
21673
|
+
def get_statuses_by_id(self, id: list[str], expand: str | None = None) -> list[Any]:
|
|
21685
21674
|
"""
|
|
21686
21675
|
Retrieves a list of statuses in Jira using the "/rest/api/3/statuses" endpoint, allowing you to fetch details of statuses based on query parameters like expansion and ID, though specific details about what statuses are returned are not provided.
|
|
21687
21676
|
|
|
@@ -1,13 +1,12 @@
|
|
|
1
1
|
import re
|
|
2
2
|
|
|
3
|
-
from universal_mcp.applications.application import BaseApplication
|
|
4
|
-
|
|
5
3
|
from markitdown import MarkItDown
|
|
4
|
+
from universal_mcp.applications.application import BaseApplication
|
|
6
5
|
|
|
7
6
|
|
|
8
7
|
class MarkitdownApp(BaseApplication):
|
|
9
8
|
def __init__(self, **kwargs):
|
|
10
|
-
super().__init__(name="markitdown"
|
|
9
|
+
super().__init__(name="markitdown")
|
|
11
10
|
self.markitdown = MarkItDown(enable_plugins=True)
|
|
12
11
|
|
|
13
12
|
async def convert_to_markdown(self, uri: str) -> str:
|