universal-mcp 0.1.8rc1__py3-none-any.whl → 0.1.8rc3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- universal_mcp/__init__.py +0 -2
- universal_mcp/analytics.py +75 -0
- universal_mcp/applications/application.py +28 -5
- universal_mcp/applications/calendly/README.md +78 -0
- universal_mcp/applications/calendly/app.py +1207 -0
- universal_mcp/applications/coda/README.md +133 -0
- universal_mcp/applications/coda/__init__.py +0 -0
- universal_mcp/applications/coda/app.py +3704 -0
- universal_mcp/applications/e2b/app.py +12 -7
- universal_mcp/applications/firecrawl/app.py +27 -0
- universal_mcp/applications/github/app.py +127 -85
- universal_mcp/applications/google_calendar/app.py +62 -127
- universal_mcp/applications/google_docs/app.py +48 -35
- universal_mcp/applications/google_drive/app.py +119 -96
- universal_mcp/applications/google_mail/app.py +124 -34
- universal_mcp/applications/google_sheet/app.py +90 -74
- universal_mcp/applications/markitdown/app.py +9 -8
- universal_mcp/applications/notion/app.py +254 -134
- universal_mcp/applications/perplexity/app.py +16 -14
- universal_mcp/applications/reddit/app.py +94 -85
- universal_mcp/applications/resend/app.py +12 -5
- universal_mcp/applications/serpapi/app.py +11 -4
- universal_mcp/applications/tavily/app.py +11 -8
- universal_mcp/applications/wrike/README.md +71 -0
- universal_mcp/applications/wrike/__init__.py +0 -0
- universal_mcp/applications/wrike/app.py +1384 -0
- universal_mcp/applications/youtube/README.md +82 -0
- universal_mcp/applications/youtube/__init__.py +0 -0
- universal_mcp/applications/youtube/app.py +1446 -0
- universal_mcp/applications/zenquotes/app.py +12 -2
- universal_mcp/exceptions.py +9 -2
- universal_mcp/integrations/__init__.py +24 -1
- universal_mcp/integrations/integration.py +133 -28
- universal_mcp/logger.py +3 -56
- universal_mcp/servers/__init__.py +6 -14
- universal_mcp/servers/server.py +205 -150
- universal_mcp/stores/__init__.py +7 -2
- universal_mcp/stores/store.py +103 -40
- universal_mcp/tools/__init__.py +3 -0
- universal_mcp/tools/adapters.py +43 -0
- universal_mcp/tools/func_metadata.py +213 -0
- universal_mcp/tools/tools.py +342 -0
- universal_mcp/utils/docgen.py +325 -119
- universal_mcp/utils/docstring_parser.py +179 -0
- universal_mcp/utils/dump_app_tools.py +33 -23
- universal_mcp/utils/openapi.py +229 -46
- {universal_mcp-0.1.8rc1.dist-info → universal_mcp-0.1.8rc3.dist-info}/METADATA +8 -4
- universal_mcp-0.1.8rc3.dist-info/RECORD +75 -0
- universal_mcp-0.1.8rc1.dist-info/RECORD +0 -58
- /universal_mcp/{utils/bridge.py → applications/calendly/__init__.py} +0 -0
- {universal_mcp-0.1.8rc1.dist-info → universal_mcp-0.1.8rc3.dist-info}/WHEEL +0 -0
- {universal_mcp-0.1.8rc1.dist-info → universal_mcp-0.1.8rc3.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,179 @@
|
|
1
|
+
import re
|
2
|
+
from typing import Any
|
3
|
+
|
4
|
+
|
5
|
+
def parse_docstring(docstring: str | None) -> dict[str, Any]:
|
6
|
+
"""
|
7
|
+
Parses a standard Python docstring into summary, args, returns, raises, and tags.
|
8
|
+
|
9
|
+
Args:
|
10
|
+
docstring: The docstring to parse.
|
11
|
+
|
12
|
+
Returns:
|
13
|
+
A dictionary with keys 'summary', 'args', 'returns', 'raises', 'tags'.
|
14
|
+
'args' is a dict mapping arg names to descriptions.
|
15
|
+
'raises' is a dict mapping exception type names to descriptions.
|
16
|
+
'tags' is a list of strings extracted from the 'Tags:' section, comma-separated.
|
17
|
+
"""
|
18
|
+
if not docstring:
|
19
|
+
return {"summary": "", "args": {}, "returns": "", "raises": {}, "tags": []}
|
20
|
+
|
21
|
+
lines = docstring.strip().splitlines()
|
22
|
+
if not lines:
|
23
|
+
return {"summary": "", "args": {}, "returns": "", "raises": {}, "tags": []}
|
24
|
+
|
25
|
+
summary = lines[0].strip()
|
26
|
+
args = {}
|
27
|
+
returns = ""
|
28
|
+
raises = {}
|
29
|
+
tags: list[str] = [] # Final list of parsed tags
|
30
|
+
current_section = None
|
31
|
+
current_key = None
|
32
|
+
current_desc_lines = [] # Accumulator for multi-line descriptions/tag content
|
33
|
+
key_pattern = re.compile(r"^\s*([\w\.]+)\s*(?:\(.*\))?:\s*(.*)")
|
34
|
+
|
35
|
+
def finalize_current_item():
|
36
|
+
"""Helper function to finalize the currently parsed item."""
|
37
|
+
nonlocal returns, tags # Allow modification of outer scope variables
|
38
|
+
desc = " ".join(current_desc_lines).strip()
|
39
|
+
if current_section == "args" and current_key:
|
40
|
+
args[current_key] = desc
|
41
|
+
elif current_section == "raises" and current_key:
|
42
|
+
raises[current_key] = desc
|
43
|
+
elif current_section == "returns":
|
44
|
+
returns = desc
|
45
|
+
# SIM102 applied: Combine nested if
|
46
|
+
elif current_section == "tags" and desc: # Only process if there's content
|
47
|
+
tags = [tag.strip() for tag in desc.split(",") if tag.strip()]
|
48
|
+
|
49
|
+
# B007 applied: Rename unused loop variable i to _
|
50
|
+
for _, line in enumerate(lines[1:]):
|
51
|
+
stripped_line = line.strip()
|
52
|
+
original_indentation = len(line) - len(line.lstrip(" "))
|
53
|
+
|
54
|
+
section_line = stripped_line.lower()
|
55
|
+
is_new_section_header = False
|
56
|
+
new_section_type = None
|
57
|
+
header_content = ""
|
58
|
+
|
59
|
+
if section_line in ("args:", "arguments:", "parameters:"):
|
60
|
+
new_section_type = "args"
|
61
|
+
is_new_section_header = True
|
62
|
+
elif section_line in ("returns:", "yields:"):
|
63
|
+
new_section_type = "returns"
|
64
|
+
is_new_section_header = True
|
65
|
+
elif section_line.startswith(("raises ", "raises:", "errors:", "exceptions:")):
|
66
|
+
new_section_type = "raises"
|
67
|
+
is_new_section_header = True
|
68
|
+
elif section_line.startswith(
|
69
|
+
("tags:", "tags")
|
70
|
+
): # Match "Tags:" or "Tags" potentially followed by content
|
71
|
+
new_section_type = "tags"
|
72
|
+
is_new_section_header = True
|
73
|
+
if ":" in stripped_line:
|
74
|
+
header_content = stripped_line.split(":", 1)[1].strip()
|
75
|
+
elif section_line.endswith(":") and section_line[:-1] in (
|
76
|
+
"attributes",
|
77
|
+
"see also",
|
78
|
+
"example",
|
79
|
+
"examples",
|
80
|
+
"notes",
|
81
|
+
):
|
82
|
+
new_section_type = "other"
|
83
|
+
is_new_section_header = True
|
84
|
+
|
85
|
+
finalize_previous = False
|
86
|
+
if is_new_section_header:
|
87
|
+
finalize_previous = True
|
88
|
+
elif current_section in ["args", "raises"] and current_key:
|
89
|
+
if key_pattern.match(line) or (original_indentation == 0 and stripped_line):
|
90
|
+
finalize_previous = True
|
91
|
+
elif current_section in ["returns", "tags"] and current_desc_lines:
|
92
|
+
if original_indentation == 0 and stripped_line:
|
93
|
+
finalize_previous = True
|
94
|
+
# SIM102 applied: Combine nested if/elif
|
95
|
+
elif (
|
96
|
+
not stripped_line
|
97
|
+
and current_desc_lines
|
98
|
+
and current_section in ["args", "raises", "returns", "tags"]
|
99
|
+
and (current_section not in ["args", "raises"] or current_key)
|
100
|
+
):
|
101
|
+
finalize_previous = True
|
102
|
+
|
103
|
+
if finalize_previous:
|
104
|
+
finalize_current_item()
|
105
|
+
current_key = None
|
106
|
+
current_desc_lines = []
|
107
|
+
if not is_new_section_header or new_section_type == "other":
|
108
|
+
current_section = None
|
109
|
+
|
110
|
+
if is_new_section_header and new_section_type != "other":
|
111
|
+
current_section = new_section_type
|
112
|
+
# If Tags header had content, start accumulating it
|
113
|
+
if new_section_type == "tags" and header_content:
|
114
|
+
current_desc_lines.append(header_content)
|
115
|
+
# Don't process the header line itself further
|
116
|
+
continue
|
117
|
+
|
118
|
+
if not stripped_line:
|
119
|
+
continue
|
120
|
+
|
121
|
+
if current_section == "args" or current_section == "raises":
|
122
|
+
match = key_pattern.match(line)
|
123
|
+
if match:
|
124
|
+
current_key = match.group(1)
|
125
|
+
current_desc_lines = [match.group(2).strip()] # Start new description
|
126
|
+
elif (
|
127
|
+
current_key and original_indentation > 0
|
128
|
+
): # Check for indentation for continuation
|
129
|
+
current_desc_lines.append(stripped_line)
|
130
|
+
|
131
|
+
elif current_section == "returns":
|
132
|
+
if not current_desc_lines or original_indentation > 0:
|
133
|
+
current_desc_lines.append(stripped_line)
|
134
|
+
|
135
|
+
elif current_section == "tags":
|
136
|
+
if (
|
137
|
+
original_indentation > 0 or not current_desc_lines
|
138
|
+
): # Indented or first line
|
139
|
+
current_desc_lines.append(stripped_line)
|
140
|
+
|
141
|
+
finalize_current_item()
|
142
|
+
return {
|
143
|
+
"summary": summary,
|
144
|
+
"args": args,
|
145
|
+
"returns": returns,
|
146
|
+
"raises": raises,
|
147
|
+
"tags": tags,
|
148
|
+
}
|
149
|
+
|
150
|
+
|
151
|
+
docstring_example = """
|
152
|
+
Starts a crawl job for a given URL using Firecrawl. Returns the job ID immediately.
|
153
|
+
|
154
|
+
Args:
|
155
|
+
url: The starting URL for the crawl.
|
156
|
+
It can be a very long url that spans multiple lines if needed.
|
157
|
+
params: Optional dictionary of parameters to customize the crawl.
|
158
|
+
See API docs for details.
|
159
|
+
idempotency_key: Optional unique key to prevent duplicate jobs.
|
160
|
+
|
161
|
+
Returns:
|
162
|
+
A dictionary containing the job initiation response on success,
|
163
|
+
or a string containing an error message on failure. This description
|
164
|
+
can also span multiple lines.
|
165
|
+
|
166
|
+
Raises:
|
167
|
+
ValueError: If the URL is invalid.
|
168
|
+
requests.exceptions.ConnectionError: If connection fails.
|
169
|
+
|
170
|
+
Tags:
|
171
|
+
crawl, async_job, start, api, long_tag_example , another
|
172
|
+
, final_tag
|
173
|
+
"""
|
174
|
+
|
175
|
+
if __name__ == "__main__":
|
176
|
+
parsed = parse_docstring(docstring_example)
|
177
|
+
import json
|
178
|
+
|
179
|
+
print(json.dumps(parsed, indent=4))
|
@@ -7,62 +7,72 @@ from universal_mcp.applications import app_from_slug
|
|
7
7
|
def discover_available_app_slugs():
|
8
8
|
apps_dir = Path(__file__).resolve().parent.parent / "applications"
|
9
9
|
app_slugs = []
|
10
|
-
|
10
|
+
|
11
11
|
for item in apps_dir.iterdir():
|
12
|
-
if not item.is_dir() or item.name.startswith(
|
12
|
+
if not item.is_dir() or item.name.startswith("_"):
|
13
13
|
continue
|
14
|
-
|
14
|
+
|
15
15
|
if (item / "app.py").exists():
|
16
16
|
slug = item.name.replace("_", "-")
|
17
17
|
app_slugs.append(slug)
|
18
|
-
|
18
|
+
|
19
19
|
return app_slugs
|
20
20
|
|
21
|
+
|
21
22
|
def extract_app_tools(app_slugs):
|
22
23
|
all_apps_tools = []
|
23
|
-
|
24
|
+
|
24
25
|
for slug in app_slugs:
|
25
26
|
try:
|
26
27
|
print(f"Loading app: {slug}")
|
27
28
|
app_class = app_from_slug(slug)
|
28
|
-
|
29
|
+
|
29
30
|
app_instance = app_class(integration=None)
|
30
|
-
|
31
|
+
|
31
32
|
tools = app_instance.list_tools()
|
32
|
-
|
33
|
+
|
33
34
|
for tool in tools:
|
34
35
|
tool_name = tool.__name__
|
35
|
-
description =
|
36
|
-
|
37
|
-
|
38
|
-
"
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
36
|
+
description = (
|
37
|
+
tool.__doc__.strip().split("\n")[0]
|
38
|
+
if tool.__doc__
|
39
|
+
else "No description"
|
40
|
+
)
|
41
|
+
|
42
|
+
all_apps_tools.append(
|
43
|
+
{
|
44
|
+
"app_name": slug,
|
45
|
+
"tool_name": tool_name,
|
46
|
+
"description": description,
|
47
|
+
}
|
48
|
+
)
|
49
|
+
|
43
50
|
except Exception as e:
|
44
51
|
print(f"Error loading app {slug}: {e}")
|
45
|
-
|
52
|
+
|
46
53
|
return all_apps_tools
|
47
54
|
|
55
|
+
|
48
56
|
def write_to_csv(app_tools, output_file="app_tools.csv"):
|
49
57
|
fieldnames = ["app_name", "tool_name", "description"]
|
50
|
-
|
51
|
-
with open(output_file,
|
58
|
+
|
59
|
+
with open(output_file, "w", newline="") as csvfile:
|
52
60
|
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
|
53
61
|
writer.writeheader()
|
54
62
|
writer.writerows(app_tools)
|
55
|
-
|
63
|
+
|
56
64
|
print(f"CSV file created: {output_file}")
|
57
65
|
|
66
|
+
|
58
67
|
def main():
|
59
68
|
app_slugs = discover_available_app_slugs()
|
60
69
|
print(f"Found {len(app_slugs)} app slugs: {', '.join(app_slugs)}")
|
61
|
-
|
70
|
+
|
62
71
|
app_tools = extract_app_tools(app_slugs)
|
63
72
|
print(f"Extracted {len(app_tools)} tools from all apps")
|
64
|
-
|
73
|
+
|
65
74
|
write_to_csv(app_tools)
|
66
75
|
|
76
|
+
|
67
77
|
if __name__ == "__main__":
|
68
|
-
main()
|
78
|
+
main()
|
universal_mcp/utils/openapi.py
CHANGED
@@ -70,6 +70,33 @@ def determine_return_type(operation: dict[str, Any]) -> str:
|
|
70
70
|
return "Any"
|
71
71
|
|
72
72
|
|
73
|
+
def resolve_schema_reference(reference, schema):
|
74
|
+
"""
|
75
|
+
Resolve a JSON schema reference to its target schema.
|
76
|
+
|
77
|
+
Args:
|
78
|
+
reference (str): The reference string (e.g., '#/components/schemas/User')
|
79
|
+
schema (dict): The complete OpenAPI schema that contains the reference
|
80
|
+
|
81
|
+
Returns:
|
82
|
+
dict: The resolved schema, or None if not found
|
83
|
+
"""
|
84
|
+
if not reference.startswith("#/"):
|
85
|
+
return None
|
86
|
+
|
87
|
+
# Split the reference path and navigate through the schema
|
88
|
+
parts = reference[2:].split("/")
|
89
|
+
current = schema
|
90
|
+
|
91
|
+
for part in parts:
|
92
|
+
if part in current:
|
93
|
+
current = current[part]
|
94
|
+
else:
|
95
|
+
return None
|
96
|
+
|
97
|
+
return current
|
98
|
+
|
99
|
+
|
73
100
|
def generate_api_client(schema):
|
74
101
|
"""
|
75
102
|
Generate a Python API client class from an OpenAPI schema.
|
@@ -130,7 +157,7 @@ def generate_api_client(schema):
|
|
130
157
|
if method in ["get", "post", "put", "delete", "patch", "options", "head"]:
|
131
158
|
operation = path_info[method]
|
132
159
|
method_code, func_name = generate_method_code(
|
133
|
-
path, method, operation, tool_name
|
160
|
+
path, method, operation, schema, tool_name
|
134
161
|
)
|
135
162
|
methods.append(method_code)
|
136
163
|
method_names.append(func_name)
|
@@ -164,7 +191,7 @@ def generate_api_client(schema):
|
|
164
191
|
return class_code
|
165
192
|
|
166
193
|
|
167
|
-
def generate_method_code(path, method, operation, tool_name=None):
|
194
|
+
def generate_method_code(path, method, operation, full_schema, tool_name=None):
|
168
195
|
"""
|
169
196
|
Generate the code for a single API method.
|
170
197
|
|
@@ -172,14 +199,15 @@ def generate_method_code(path, method, operation, tool_name=None):
|
|
172
199
|
path (str): The API path (e.g., '/users/{user_id}').
|
173
200
|
method (str): The HTTP method (e.g., 'get').
|
174
201
|
operation (dict): The operation details from the schema.
|
202
|
+
full_schema (dict): The complete OpenAPI schema, used for reference resolution.
|
175
203
|
tool_name (str, optional): The name of the tool/app to prefix the function name with.
|
176
204
|
|
177
205
|
Returns:
|
178
206
|
tuple: (method_code, func_name) - The Python code for the method and its name.
|
179
207
|
"""
|
180
208
|
# Extract path parameters from the URL path
|
181
|
-
path_params_in_url = re.findall(r
|
182
|
-
|
209
|
+
path_params_in_url = re.findall(r"{([^}]+)}", path)
|
210
|
+
|
183
211
|
# Determine function name
|
184
212
|
if "operationId" in operation:
|
185
213
|
raw_name = operation["operationId"]
|
@@ -195,42 +223,179 @@ def generate_method_code(path, method, operation, tool_name=None):
|
|
195
223
|
else:
|
196
224
|
name_parts.append(part)
|
197
225
|
func_name = "_".join(name_parts).replace("-", "_").lower()
|
198
|
-
|
199
|
-
|
200
|
-
|
201
|
-
|
202
|
-
|
226
|
+
|
227
|
+
# Only fix isolated 'a' and 'an' as articles, not when they're part of words
|
228
|
+
func_name = re.sub(
|
229
|
+
r"_a([^_a-z])", r"_a_\1", func_name
|
230
|
+
) # Fix for patterns like retrieve_ablock -> retrieve_a_block
|
231
|
+
func_name = re.sub(
|
232
|
+
r"_a$", r"_a", func_name
|
233
|
+
) # Don't change if 'a' is at the end of the name
|
234
|
+
func_name = re.sub(
|
235
|
+
r"_an([^_a-z])", r"_an_\1", func_name
|
236
|
+
) # Fix for patterns like create_anitem -> create_an_item
|
237
|
+
func_name = re.sub(
|
238
|
+
r"_an$", r"_an", func_name
|
239
|
+
) # Don't change if 'an' is at the end of the name
|
203
240
|
|
204
241
|
# Get parameters and request body
|
205
|
-
#
|
206
|
-
|
242
|
+
# Resolve parameter references before processing
|
243
|
+
resolved_parameters = []
|
244
|
+
for param in operation.get("parameters", []):
|
245
|
+
if "$ref" in param:
|
246
|
+
# Resolve reference to actual parameter object
|
247
|
+
ref_param = resolve_schema_reference(param["$ref"], full_schema)
|
248
|
+
if ref_param:
|
249
|
+
resolved_parameters.append(ref_param)
|
250
|
+
else:
|
251
|
+
print(
|
252
|
+
f"Warning: Could not resolve parameter reference: {param['$ref']}"
|
253
|
+
)
|
254
|
+
else:
|
255
|
+
resolved_parameters.append(param)
|
256
|
+
|
257
|
+
# Filter out header parameters from the resolved parameters
|
258
|
+
parameters = [param for param in resolved_parameters if param.get("in") != "header"]
|
259
|
+
|
207
260
|
has_body = "requestBody" in operation
|
208
261
|
body_required = has_body and operation["requestBody"].get("required", False)
|
209
262
|
|
263
|
+
# Check if the requestBody has actual content or is empty
|
264
|
+
has_empty_body = False
|
265
|
+
if has_body:
|
266
|
+
request_body_content = operation["requestBody"].get("content", {})
|
267
|
+
if not request_body_content or all(
|
268
|
+
not content for content_type, content in request_body_content.items()
|
269
|
+
):
|
270
|
+
has_empty_body = True
|
271
|
+
else:
|
272
|
+
# Handle empty properties with additionalProperties:true
|
273
|
+
for content_type, content in request_body_content.items():
|
274
|
+
if content_type.startswith("application/json") and "schema" in content:
|
275
|
+
schema = content["schema"]
|
276
|
+
|
277
|
+
# Resolve schema reference if present
|
278
|
+
if "$ref" in schema:
|
279
|
+
ref_schema = resolve_schema_reference(
|
280
|
+
schema["$ref"], full_schema
|
281
|
+
)
|
282
|
+
if ref_schema:
|
283
|
+
schema = ref_schema
|
284
|
+
|
285
|
+
# Check if properties is empty and additionalProperties is true
|
286
|
+
if (
|
287
|
+
schema.get("type") == "object"
|
288
|
+
and schema.get("additionalProperties", False) is True
|
289
|
+
):
|
290
|
+
properties = schema.get("properties", {})
|
291
|
+
if not properties or len(properties) == 0:
|
292
|
+
has_empty_body = True
|
293
|
+
|
294
|
+
# Extract request body schema properties and required fields
|
295
|
+
required_fields = []
|
296
|
+
request_body_properties = {}
|
297
|
+
is_array_body = False
|
298
|
+
array_items_schema = None
|
299
|
+
|
300
|
+
if has_body:
|
301
|
+
for content_type, content in (
|
302
|
+
operation["requestBody"].get("content", {}).items()
|
303
|
+
):
|
304
|
+
if content_type.startswith("application/json") and "schema" in content:
|
305
|
+
schema = content["schema"]
|
306
|
+
|
307
|
+
# Resolve schema reference if present
|
308
|
+
if "$ref" in schema:
|
309
|
+
ref_schema = resolve_schema_reference(schema["$ref"], full_schema)
|
310
|
+
if ref_schema:
|
311
|
+
schema = ref_schema
|
312
|
+
|
313
|
+
# Check if the schema is an array type
|
314
|
+
if schema.get("type") == "array":
|
315
|
+
is_array_body = True
|
316
|
+
array_items_schema = schema.get("items", {})
|
317
|
+
# Try to resolve any reference in items
|
318
|
+
if "$ref" in array_items_schema:
|
319
|
+
array_items_schema = resolve_schema_reference(
|
320
|
+
array_items_schema["$ref"], full_schema
|
321
|
+
)
|
322
|
+
else:
|
323
|
+
# Extract required fields from schema
|
324
|
+
if "required" in schema:
|
325
|
+
required_fields = schema["required"]
|
326
|
+
# Extract properties from schema
|
327
|
+
if "properties" in schema:
|
328
|
+
request_body_properties = schema["properties"]
|
329
|
+
|
330
|
+
# Check for nested references in properties
|
331
|
+
for prop_name, prop_schema in request_body_properties.items():
|
332
|
+
if "$ref" in prop_schema:
|
333
|
+
ref_prop_schema = resolve_schema_reference(
|
334
|
+
prop_schema["$ref"], full_schema
|
335
|
+
)
|
336
|
+
if ref_prop_schema:
|
337
|
+
request_body_properties[prop_name] = ref_prop_schema
|
338
|
+
|
339
|
+
# Handle schemas with empty properties but additionalProperties: true
|
340
|
+
# by treating them similar to empty bodies
|
341
|
+
if (
|
342
|
+
not request_body_properties or len(request_body_properties) == 0
|
343
|
+
) and schema.get("additionalProperties") is True:
|
344
|
+
has_empty_body = True
|
345
|
+
|
210
346
|
# Build function arguments
|
211
347
|
required_args = []
|
212
348
|
optional_args = []
|
213
|
-
|
214
|
-
|
349
|
+
|
350
|
+
# Add path parameters
|
215
351
|
for param_name in path_params_in_url:
|
216
352
|
if param_name not in required_args:
|
217
353
|
required_args.append(param_name)
|
218
354
|
|
219
|
-
|
355
|
+
# Add query parameters
|
220
356
|
for param in parameters:
|
221
357
|
param_name = param["name"]
|
222
|
-
if param_name not in required_args:
|
358
|
+
if param_name not in required_args:
|
223
359
|
if param.get("required", False):
|
224
360
|
required_args.append(param_name)
|
225
361
|
else:
|
226
362
|
optional_args.append(f"{param_name}=None")
|
227
363
|
|
228
|
-
#
|
364
|
+
# Handle array type request body differently
|
365
|
+
request_body_params = []
|
229
366
|
if has_body:
|
230
|
-
if
|
231
|
-
|
232
|
-
|
233
|
-
|
367
|
+
if is_array_body:
|
368
|
+
# For array request bodies, add a single parameter for the entire array
|
369
|
+
array_param_name = "items"
|
370
|
+
# Try to get a better name from the operation or path
|
371
|
+
if func_name.endswith("_list_input"):
|
372
|
+
array_param_name = func_name.replace("_list_input", "")
|
373
|
+
elif "List" in func_name:
|
374
|
+
array_param_name = func_name.split("List")[0].lower() + "_list"
|
375
|
+
|
376
|
+
# Make the array parameter required if the request body is required
|
377
|
+
if body_required:
|
378
|
+
required_args.append(array_param_name)
|
379
|
+
else:
|
380
|
+
optional_args.append(f"{array_param_name}=None")
|
381
|
+
|
382
|
+
# Remember this is an array param
|
383
|
+
request_body_params = [array_param_name]
|
384
|
+
elif request_body_properties:
|
385
|
+
# For object request bodies, add individual properties as parameters
|
386
|
+
for prop_name in request_body_properties:
|
387
|
+
if prop_name in required_fields:
|
388
|
+
request_body_params.append(prop_name)
|
389
|
+
if prop_name not in required_args:
|
390
|
+
required_args.append(prop_name)
|
391
|
+
else:
|
392
|
+
request_body_params.append(prop_name)
|
393
|
+
if f"{prop_name}=None" not in optional_args:
|
394
|
+
optional_args.append(f"{prop_name}=None")
|
395
|
+
|
396
|
+
# If request body is present but empty (content: {}), add a generic request_body parameter
|
397
|
+
if has_empty_body and "request_body=None" not in optional_args:
|
398
|
+
optional_args.append("request_body=None")
|
234
399
|
|
235
400
|
# Combine required and optional arguments
|
236
401
|
args = required_args + optional_args
|
@@ -245,19 +410,32 @@ def generate_method_code(path, method, operation, tool_name=None):
|
|
245
410
|
|
246
411
|
# Validate required parameters including path parameters
|
247
412
|
for param_name in required_args:
|
248
|
-
if param_name
|
249
|
-
body_lines.append(f" if {param_name} is None:")
|
250
|
-
body_lines.append(
|
251
|
-
f" raise ValueError(\"Missing required parameter '{param_name}'\")"
|
252
|
-
)
|
253
|
-
|
254
|
-
# Validate required body
|
255
|
-
if has_body and body_required:
|
256
|
-
body_lines.append(" if request_body is None:")
|
413
|
+
body_lines.append(f" if {param_name} is None:")
|
257
414
|
body_lines.append(
|
258
|
-
|
415
|
+
f" raise ValueError(\"Missing required parameter '{param_name}'\")"
|
259
416
|
)
|
260
417
|
|
418
|
+
# Build request body (handle array and object types differently)
|
419
|
+
if has_body:
|
420
|
+
if is_array_body:
|
421
|
+
# For array request bodies, use the array parameter directly
|
422
|
+
body_lines.append(" # Use items array directly as request body")
|
423
|
+
body_lines.append(f" request_body = {request_body_params[0]}")
|
424
|
+
elif request_body_properties:
|
425
|
+
# For object request bodies, build the request body from individual parameters
|
426
|
+
|
427
|
+
body_lines.append(" request_body = {")
|
428
|
+
|
429
|
+
for prop_name in request_body_params:
|
430
|
+
# Only include non-None values in the request body
|
431
|
+
body_lines.append(f" '{prop_name}': {prop_name},")
|
432
|
+
|
433
|
+
body_lines.append(" }")
|
434
|
+
|
435
|
+
body_lines.append(
|
436
|
+
" request_body = {k: v for k, v in request_body.items() if v is not None}"
|
437
|
+
)
|
438
|
+
|
261
439
|
# Format URL directly with path parameters
|
262
440
|
url_line = f' url = f"{{self.base_url}}{path}"'
|
263
441
|
body_lines.append(url_line)
|
@@ -276,30 +454,35 @@ def generate_method_code(path, method, operation, tool_name=None):
|
|
276
454
|
|
277
455
|
# Make HTTP request using the proper method
|
278
456
|
method_lower = method.lower()
|
457
|
+
|
458
|
+
# Determine what to use as the request body argument
|
459
|
+
if has_empty_body:
|
460
|
+
request_body_arg = "request_body"
|
461
|
+
elif not has_body:
|
462
|
+
request_body_arg = "{}"
|
463
|
+
else:
|
464
|
+
request_body_arg = "request_body"
|
465
|
+
|
279
466
|
if method_lower == "get":
|
280
467
|
body_lines.append(" response = self._get(url, params=query_params)")
|
281
468
|
elif method_lower == "post":
|
282
|
-
|
283
|
-
|
284
|
-
|
285
|
-
body_lines.append(" response = self._post(url, data={}, params=query_params)")
|
469
|
+
body_lines.append(
|
470
|
+
f" response = self._post(url, data={request_body_arg}, params=query_params)"
|
471
|
+
)
|
286
472
|
elif method_lower == "put":
|
287
|
-
|
288
|
-
|
289
|
-
|
290
|
-
body_lines.append(" response = self._put(url, data={}, params=query_params)")
|
473
|
+
body_lines.append(
|
474
|
+
f" response = self._put(url, data={request_body_arg}, params=query_params)"
|
475
|
+
)
|
291
476
|
elif method_lower == "patch":
|
292
|
-
|
293
|
-
|
294
|
-
|
295
|
-
body_lines.append(" response = self._patch(url, data={}, params=query_params)")
|
477
|
+
body_lines.append(
|
478
|
+
f" response = self._patch(url, data={request_body_arg}, params=query_params)"
|
479
|
+
)
|
296
480
|
elif method_lower == "delete":
|
297
481
|
body_lines.append(" response = self._delete(url, params=query_params)")
|
298
482
|
else:
|
299
|
-
|
300
|
-
|
301
|
-
|
302
|
-
body_lines.append(f" response = self._{method_lower}(url, data={{}}, params=query_params)")
|
483
|
+
body_lines.append(
|
484
|
+
f" response = self._{method_lower}(url, data={request_body_arg}, params=query_params)"
|
485
|
+
)
|
303
486
|
|
304
487
|
# Handle response
|
305
488
|
body_lines.append(" response.raise_for_status()")
|