universal-mcp-applications 0.1.1__py3-none-any.whl → 0.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of universal-mcp-applications might be problematic. Click here for more details.

Files changed (43) hide show
  1. universal_mcp/applications/{aws-s3 → aws_s3}/app.py +1 -1
  2. universal_mcp/applications/cal_com_v2/__init__.py +1 -0
  3. universal_mcp/applications/{cal-com-v2 → cal_com_v2}/app.py +137 -139
  4. universal_mcp/applications/clickup/app.py +2 -2
  5. universal_mcp/applications/github/app.py +1 -2
  6. universal_mcp/applications/{google-sheet → google_sheet}/app.py +1 -1
  7. universal_mcp/applications/google_sheet/helper.py +345 -0
  8. universal_mcp/applications/hashnode/app.py +1 -1
  9. universal_mcp/applications/hubspot/app.py +13 -2
  10. universal_mcp/applications/markitdown/app.py +1 -1
  11. universal_mcp/applications/{ms-teams → ms_teams}/app.py +1 -1
  12. universal_mcp/applications/semrush/app.py +479 -1468
  13. universal_mcp/applications/sharepoint/app.py +1 -1
  14. {universal_mcp_applications-0.1.1.dist-info → universal_mcp_applications-0.1.2.dist-info}/METADATA +1 -1
  15. {universal_mcp_applications-0.1.1.dist-info → universal_mcp_applications-0.1.2.dist-info}/RECORD +42 -41
  16. universal_mcp/applications/cal-com-v2/__init__.py +0 -1
  17. /universal_mcp/applications/{aws-s3 → aws_s3}/README.md +0 -0
  18. /universal_mcp/applications/{aws-s3 → aws_s3}/__init__.py +0 -0
  19. /universal_mcp/applications/{cal-com-v2 → cal_com_v2}/README.md +0 -0
  20. /universal_mcp/applications/{google-calendar → google_calendar}/README.md +0 -0
  21. /universal_mcp/applications/{google-calendar → google_calendar}/__init__.py +0 -0
  22. /universal_mcp/applications/{google-calendar → google_calendar}/app.py +0 -0
  23. /universal_mcp/applications/{google-docs → google_docs}/README.md +0 -0
  24. /universal_mcp/applications/{google-docs → google_docs}/__init__.py +0 -0
  25. /universal_mcp/applications/{google-docs → google_docs}/app.py +0 -0
  26. /universal_mcp/applications/{google-drive → google_drive}/README.md +0 -0
  27. /universal_mcp/applications/{google-drive → google_drive}/__init__.py +0 -0
  28. /universal_mcp/applications/{google-drive → google_drive}/app.py +0 -0
  29. /universal_mcp/applications/{google-gemini → google_gemini}/README.md +0 -0
  30. /universal_mcp/applications/{google-gemini → google_gemini}/__init__.py +0 -0
  31. /universal_mcp/applications/{google-gemini → google_gemini}/app.py +0 -0
  32. /universal_mcp/applications/{google-mail → google_mail}/README.md +0 -0
  33. /universal_mcp/applications/{google-mail → google_mail}/__init__.py +0 -0
  34. /universal_mcp/applications/{google-mail → google_mail}/app.py +0 -0
  35. /universal_mcp/applications/{google-sheet → google_sheet}/README.md +0 -0
  36. /universal_mcp/applications/{google-sheet → google_sheet}/__init__.py +0 -0
  37. /universal_mcp/applications/{ms-teams → ms_teams}/README.md +0 -0
  38. /universal_mcp/applications/{ms-teams → ms_teams}/__init__.py +0 -0
  39. /universal_mcp/applications/{whatsapp-business → whatsapp_business}/README.md +0 -0
  40. /universal_mcp/applications/{whatsapp-business → whatsapp_business}/__init__.py +0 -0
  41. /universal_mcp/applications/{whatsapp-business → whatsapp_business}/app.py +0 -0
  42. {universal_mcp_applications-0.1.1.dist-info → universal_mcp_applications-0.1.2.dist-info}/WHEEL +0 -0
  43. {universal_mcp_applications-0.1.1.dist-info → universal_mcp_applications-0.1.2.dist-info}/licenses/LICENSE +0 -0
@@ -1901,7 +1901,7 @@ class ClickupApp(APIApplication):
1901
1901
  response.raise_for_status()
1902
1902
  return response.json()
1903
1903
 
1904
- def lists_update_list_info_due_date_priority_assignee_color(
1904
+ def lists_update_list(
1905
1905
  self,
1906
1906
  list_id,
1907
1907
  name,
@@ -4933,7 +4933,7 @@ class ClickupApp(APIApplication):
4933
4933
  self.lists_get_folderless,
4934
4934
  self.lists_create_folderless_list,
4935
4935
  self.lists_get_list_details,
4936
- self.lists_update_list_info_due_date_priority_assignee_color,
4936
+ self.lists_update_list,
4937
4937
  self.lists_remove_list,
4938
4938
  self.lists_add_task_to_list,
4939
4939
  self.lists_remove_task_from_list,
@@ -49574,7 +49574,7 @@ class GithubApp(APIApplication):
49574
49574
  self.update_issue,
49575
49575
  self.list_repo_activities,
49576
49576
  # Auto Generated from open api spec
49577
- self.meta_root,
49577
+ self.meta_get,
49578
49578
  self.list_advisories,
49579
49579
  self.get_advisory_by_id,
49580
49580
  self.apps_get_authenticated,
@@ -49654,7 +49654,6 @@ class GithubApp(APIApplication):
49654
49654
  self.get_stubbed_account,
49655
49655
  self.apps_list_plans_stubbed,
49656
49656
  self.get_plan_accounts,
49657
- self.meta_get,
49658
49657
  self.get_network_repo_events,
49659
49658
  self.get_notifications,
49660
49659
  self.update_notification,
@@ -2,7 +2,7 @@ from typing import Any
2
2
 
3
3
  from universal_mcp.applications.application import APIApplication
4
4
  from universal_mcp.integrations import Integration
5
- from universal_mcp_google_sheet.helper import (
5
+ from .helper import (
6
6
  analyze_sheet_for_tables,
7
7
  analyze_table_schema,
8
8
  )
@@ -0,0 +1,345 @@
1
+ """
2
+ Helper functions for Google Sheets table detection and analysis.
3
+ """
4
+
5
+ from typing import Any, List, Dict, Tuple
6
+
7
+
8
+ def analyze_sheet_for_tables(
9
+ get_values_func,
10
+ spreadsheet_id: str,
11
+ sheet_id: int,
12
+ sheet_title: str,
13
+ min_rows: int,
14
+ min_columns: int,
15
+ min_confidence: float
16
+ ) -> List[Dict]:
17
+ """Analyze a sheet to find potential tables."""
18
+ tables = []
19
+
20
+ try:
21
+ # Get sample data from the sheet (first 100 rows)
22
+ sample_range = f"{sheet_title}!A1:Z100"
23
+ sample_data = get_values_func(
24
+ spreadsheetId=spreadsheet_id,
25
+ range=sample_range
26
+ )
27
+
28
+ values = sample_data.get("values", [])
29
+ if not values or len(values) < min_rows:
30
+ return tables
31
+
32
+ # Find potential table regions
33
+ table_regions = find_table_regions(values, min_rows, min_columns)
34
+
35
+ for i, region in enumerate(table_regions):
36
+ confidence = calculate_table_confidence(values, region)
37
+
38
+ if confidence >= min_confidence:
39
+ table_info = {
40
+ "table_id": f"{sheet_title}_table_{i+1}",
41
+ "table_name": f"{sheet_title}_Table_{i+1}",
42
+ "sheet_id": sheet_id,
43
+ "sheet_name": sheet_title,
44
+ "start_row": region["start_row"],
45
+ "end_row": region["end_row"],
46
+ "start_column": region["start_column"],
47
+ "end_column": region["end_column"],
48
+ "rows": region["end_row"] - region["start_row"] + 1,
49
+ "columns": region["end_column"] - region["start_column"] + 1,
50
+ "confidence": confidence,
51
+ "range": f"{sheet_title}!{get_column_letter(region['start_column'])}{region['start_row']+1}:{get_column_letter(region['end_column'])}{region['end_row']+1}"
52
+ }
53
+ tables.append(table_info)
54
+
55
+ except Exception as e:
56
+ # If analysis fails for a sheet, continue with other sheets
57
+ pass
58
+
59
+ return tables
60
+
61
+
62
+ def analyze_table_schema(
63
+ get_values_func,
64
+ spreadsheet_id: str,
65
+ table_info: Dict,
66
+ sample_size: int = 50
67
+ ) -> Dict[str, Any]:
68
+ """
69
+ Analyze table structure and infer column names, types, and constraints.
70
+
71
+ Args:
72
+ get_values_func: Function to get values from spreadsheet
73
+ spreadsheet_id: The spreadsheet ID
74
+ table_info: Dictionary containing table information from list_tables
75
+ sample_size: Number of rows to sample for type inference
76
+
77
+ Returns:
78
+ Dictionary containing the table schema with column analysis
79
+ """
80
+ try:
81
+ # Get sample data from the table
82
+ sample_range = table_info["range"]
83
+ sample_data = get_values_func(
84
+ spreadsheetId=spreadsheet_id,
85
+ range=sample_range
86
+ )
87
+
88
+ values = sample_data.get("values", [])
89
+ if not values:
90
+ raise ValueError("No data found in the specified table")
91
+
92
+ # Limit sample size to available data
93
+ actual_sample_size = min(sample_size, len(values))
94
+ sample_values = values[:actual_sample_size]
95
+
96
+ # Analyze column structure
97
+ columns = analyze_columns(sample_values)
98
+
99
+ return {
100
+ "spreadsheet_id": spreadsheet_id,
101
+ "table_name": table_info["table_name"],
102
+ "sheet_name": table_info["sheet_name"],
103
+ "table_range": table_info["range"],
104
+ "total_rows": table_info["rows"],
105
+ "total_columns": table_info["columns"],
106
+ "sample_size": actual_sample_size,
107
+ "columns": columns,
108
+ "schema_version": "1.0"
109
+ }
110
+
111
+ except Exception as e:
112
+ raise ValueError(f"Failed to analyze table schema: {str(e)}")
113
+
114
+
115
+ def analyze_columns(sample_values: List[List[Any]]) -> List[Dict]:
116
+ """Analyze column structure and infer types."""
117
+ if not sample_values:
118
+ return []
119
+
120
+ # Get headers (first row)
121
+ headers = sample_values[0] if sample_values else []
122
+ data_rows = sample_values[1:] if len(sample_values) > 1 else []
123
+
124
+ columns = []
125
+
126
+ for col_idx in range(len(headers)):
127
+ column_name = str(headers[col_idx]) if col_idx < len(headers) else f"Column_{col_idx + 1}"
128
+
129
+ # Extract column values
130
+ column_values = []
131
+ for row in data_rows:
132
+ if col_idx < len(row):
133
+ column_values.append(row[col_idx])
134
+
135
+ # Analyze column type
136
+ column_type, constraints = infer_column_type(column_values)
137
+
138
+ column_info = {
139
+ "name": column_name,
140
+ "index": col_idx,
141
+ "type": column_type,
142
+ "constraints": constraints,
143
+ "sample_values": column_values[:5], # First 5 sample values
144
+ "null_count": sum(1 for val in column_values if not val or str(val).strip() == ""),
145
+ "unique_count": len(set(str(val) for val in column_values if val and str(val).strip()))
146
+ }
147
+
148
+ columns.append(column_info)
149
+
150
+ return columns
151
+
152
+
153
+ def infer_column_type(values: List[Any]) -> Tuple[str, Dict]:
154
+ """Infer the most likely data type for a column."""
155
+ if not values:
156
+ return "TEXT", {}
157
+
158
+ # Remove empty values
159
+ non_empty_values = [val for val in values if val and str(val).strip()]
160
+
161
+ if not non_empty_values:
162
+ return "TEXT", {}
163
+
164
+ # Check for boolean values
165
+ boolean_count = sum(1 for val in non_empty_values if str(val).lower() in ['true', 'false', 'yes', 'no', '1', '0'])
166
+ if boolean_count / len(non_empty_values) >= 0.8:
167
+ return "BOOLEAN", {}
168
+
169
+ # Check for numeric values
170
+ numeric_count = 0
171
+ decimal_count = 0
172
+ date_count = 0
173
+
174
+ for val in non_empty_values:
175
+ val_str = str(val)
176
+
177
+ # Check for dates (basic patterns)
178
+ if any(pattern in val_str.lower() for pattern in ['/', '-', 'jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec']):
179
+ date_count += 1
180
+
181
+ # Check for numbers
182
+ if val_str.replace('.', '').replace('-', '').replace(',', '').isdigit():
183
+ numeric_count += 1
184
+ if '.' in val_str:
185
+ decimal_count += 1
186
+
187
+ # Determine type based on analysis
188
+ if date_count / len(non_empty_values) >= 0.6:
189
+ return "DATE", {}
190
+ elif numeric_count / len(non_empty_values) >= 0.8:
191
+ if decimal_count / numeric_count >= 0.3:
192
+ return "DECIMAL", {"precision": 2}
193
+ else:
194
+ return "INTEGER", {}
195
+ else:
196
+ return "TEXT", {}
197
+
198
+
199
+ def find_table_regions(values: List[List], min_rows: int, min_columns: int) -> List[Dict]:
200
+ """Find potential table regions in the data."""
201
+ regions = []
202
+
203
+ if not values or len(values) < min_rows:
204
+ return regions
205
+
206
+ rows = len(values)
207
+ cols = max(len(row) for row in values) if values else 0
208
+
209
+ if cols < min_columns:
210
+ return regions
211
+
212
+ # Simple heuristic: look for regions with consistent data
213
+ current_start = -1
214
+
215
+ for i in range(rows):
216
+ # Check if this row has enough data
217
+ row_data_count = sum(1 for cell in values[i] if cell and str(cell).strip())
218
+
219
+ if row_data_count >= min_columns:
220
+ # Continue current region
221
+ if current_start == -1:
222
+ current_start = i
223
+ else:
224
+ # End current region if it's valid
225
+ if current_start != -1 and i - current_start >= min_rows:
226
+ regions.append({
227
+ "start_row": current_start,
228
+ "end_row": i - 1,
229
+ "start_column": 0,
230
+ "end_column": cols - 1
231
+ })
232
+ current_start = -1
233
+
234
+ # Handle region that extends to end
235
+ if current_start != -1 and rows - current_start >= min_rows:
236
+ regions.append({
237
+ "start_row": current_start,
238
+ "end_row": rows - 1,
239
+ "start_column": 0,
240
+ "end_column": cols - 1
241
+ })
242
+
243
+ return regions
244
+
245
+
246
+ def calculate_table_confidence(values: List[List], region: Dict) -> float:
247
+ """Calculate confidence score for a potential table region."""
248
+ if not values:
249
+ return 0.0
250
+
251
+ start_row = region["start_row"]
252
+ end_row = region["end_row"]
253
+ start_col = region["start_column"]
254
+ end_col = region["end_column"]
255
+
256
+ # Extract region data
257
+ region_data = []
258
+ for i in range(start_row, min(end_row + 1, len(values))):
259
+ row = values[i]
260
+ if len(row) > start_col:
261
+ region_data.append(row[start_col:min(end_col + 1, len(row))])
262
+
263
+ if not region_data:
264
+ return 0.0
265
+
266
+ # Calculate confidence based on data consistency
267
+ total_cells = sum(len(row) for row in region_data)
268
+ non_empty_cells = sum(sum(1 for cell in row if cell and str(cell).strip()) for row in region_data)
269
+
270
+ if total_cells == 0:
271
+ return 0.0
272
+
273
+ # Base confidence on data density
274
+ data_density = non_empty_cells / total_cells
275
+
276
+ # Additional factors
277
+ has_headers = has_header_row(region_data)
278
+ consistent_columns = has_consistent_columns(region_data)
279
+
280
+ confidence = data_density * 0.6 # 60% weight to data density
281
+
282
+ if has_headers:
283
+ confidence += 0.2 # 20% bonus for headers
284
+
285
+ if consistent_columns:
286
+ confidence += 0.2 # 20% bonus for consistent structure
287
+
288
+ return min(confidence, 1.0)
289
+
290
+
291
+ def has_header_row(data: List[List]) -> bool:
292
+ """Check if the first row looks like a header."""
293
+ if not data or len(data) < 2:
294
+ return False
295
+
296
+ header_row = data[0]
297
+ data_rows = data[1:]
298
+
299
+ if not header_row or not data_rows:
300
+ return False
301
+
302
+ # Check if header row has mostly text values
303
+ header_text_count = sum(1 for cell in header_row if cell and isinstance(cell, str) and not cell.replace('.', '').replace('-', '').isdigit())
304
+
305
+ # Check if data rows have different data types than header
306
+ data_numeric_count = 0
307
+ for row in data_rows[:3]: # Check first 3 data rows
308
+ for cell in row:
309
+ if cell and str(cell).replace('.', '').replace('-', '').isdigit():
310
+ data_numeric_count += 1
311
+
312
+ return header_text_count > len(header_row) * 0.5 and data_numeric_count > 0
313
+
314
+
315
+ def has_consistent_columns(data: List[List]) -> bool:
316
+ """Check if columns have consistent data types."""
317
+ if not data or len(data) < 2:
318
+ return False
319
+
320
+ # Check if most columns have consistent data types
321
+ consistent_columns = 0
322
+ total_columns = max(len(row) for row in data)
323
+
324
+ for col in range(total_columns):
325
+ column_values = [row[col] for row in data if col < len(row) and row[col]]
326
+ if len(column_values) >= 2:
327
+ # Check if column has consistent type
328
+ numeric_count = sum(1 for val in column_values if str(val).replace('.', '').replace('-', '').isdigit())
329
+ text_count = len(column_values) - numeric_count
330
+
331
+ # If 80% of values are same type, consider consistent
332
+ if numeric_count / len(column_values) >= 0.8 or text_count / len(column_values) >= 0.8:
333
+ consistent_columns += 1
334
+
335
+ return consistent_columns / total_columns >= 0.6 if total_columns > 0 else False
336
+
337
+
338
+ def get_column_letter(column_index: int) -> str:
339
+ """Convert column index to A1 notation letter."""
340
+ result = ""
341
+ while column_index >= 0:
342
+ column_index, remainder = divmod(column_index, 26)
343
+ result = chr(65 + remainder) + result
344
+ column_index -= 1
345
+ return result
@@ -1,5 +1,5 @@
1
1
  from gql import gql
2
- from universal_mcp.applications import GraphQLApplication
2
+ from universal_mcp.applications.application import GraphQLApplication
3
3
  from universal_mcp.integrations import Integration
4
4
 
5
5
 
@@ -3,8 +3,19 @@ from typing import Any
3
3
 
4
4
  from universal_mcp.applications.application import APIApplication
5
5
  from universal_mcp.integrations import Integration
6
- from universal_mcp_hubspot.api_segments.crm_api import CrmApi
7
- from universal_mcp_hubspot.api_segments.marketing_api import MarketingApi
6
+
7
+ class CrmApi:
8
+ def __init__(self, api_client):
9
+ pass
10
+ def list_tools(self):
11
+ return []
12
+
13
+ class MarketingApi:
14
+ def __init__(self, api_client):
15
+ pass
16
+ def list_tools(self):
17
+ return []
18
+
8
19
 
9
20
 
10
21
  class HubspotApp(APIApplication):
@@ -1,6 +1,6 @@
1
1
  import re
2
2
 
3
- from universal_mcp.applications import BaseApplication
3
+ from universal_mcp.applications.application import BaseApplication
4
4
 
5
5
  from markitdown import MarkItDown
6
6
 
@@ -6,7 +6,7 @@ from universal_mcp.integrations import Integration
6
6
 
7
7
  class MsTeamsApp(APIApplication):
8
8
  def __init__(self, integration: Integration = None, **kwargs) -> None:
9
- super().__init__(name="microsoft-teams", integration=integration, **kwargs)
9
+ super().__init__(name="ms-teams", integration=integration, **kwargs)
10
10
  self.base_url = "https://graph.microsoft.com/v1.0"
11
11
 
12
12
  def list_chats(