swarms 7.7.9__py3-none-any.whl → 7.8.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- swarms/agents/self_agent_builder.py +40 -0
- swarms/prompts/agent_self_builder_prompt.py +103 -0
- swarms/schemas/__init__.py +6 -1
- swarms/schemas/agent_class_schema.py +91 -0
- swarms/schemas/agent_mcp_errors.py +18 -0
- swarms/schemas/agent_tool_schema.py +13 -0
- swarms/schemas/llm_agent_schema.py +92 -0
- swarms/schemas/mcp_schemas.py +43 -0
- swarms/structs/__init__.py +4 -0
- swarms/structs/agent.py +305 -262
- swarms/structs/aop.py +3 -1
- swarms/structs/batch_agent_execution.py +64 -0
- swarms/structs/conversation.py +33 -19
- swarms/structs/council_judge.py +179 -93
- swarms/structs/long_agent.py +424 -0
- swarms/structs/ma_utils.py +11 -8
- swarms/structs/malt.py +1 -1
- swarms/structs/swarm_router.py +68 -13
- swarms/tools/__init__.py +12 -0
- swarms/tools/base_tool.py +2840 -264
- swarms/tools/create_agent_tool.py +104 -0
- swarms/tools/mcp_client_call.py +504 -0
- swarms/tools/py_func_to_openai_func_str.py +43 -5
- swarms/tools/pydantic_to_json.py +10 -27
- swarms/utils/audio_processing.py +343 -0
- swarms/utils/index.py +226 -0
- swarms/utils/litellm_wrapper.py +65 -67
- {swarms-7.7.9.dist-info → swarms-7.8.0.dist-info}/METADATA +2 -2
- {swarms-7.7.9.dist-info → swarms-7.8.0.dist-info}/RECORD +32 -21
- swarms/tools/mcp_client.py +0 -246
- swarms/tools/mcp_integration.py +0 -340
- {swarms-7.7.9.dist-info → swarms-7.8.0.dist-info}/LICENSE +0 -0
- {swarms-7.7.9.dist-info → swarms-7.8.0.dist-info}/WHEEL +0 -0
- {swarms-7.7.9.dist-info → swarms-7.8.0.dist-info}/entry_points.txt +0 -0
swarms/tools/pydantic_to_json.py
CHANGED
@@ -39,7 +39,6 @@ def check_pydantic_name(pydantic_type: type[BaseModel]) -> str:
|
|
39
39
|
|
40
40
|
def base_model_to_openai_function(
|
41
41
|
pydantic_type: type[BaseModel],
|
42
|
-
output_str: bool = False,
|
43
42
|
) -> dict[str, Any]:
|
44
43
|
"""
|
45
44
|
Convert a Pydantic model to a dictionary representation of functions.
|
@@ -86,34 +85,18 @@ def base_model_to_openai_function(
|
|
86
85
|
_remove_a_key(parameters, "title")
|
87
86
|
_remove_a_key(parameters, "additionalProperties")
|
88
87
|
|
89
|
-
|
90
|
-
|
91
|
-
"
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
{
|
96
|
-
"name": name,
|
97
|
-
"description": schema["description"],
|
98
|
-
"parameters": parameters,
|
99
|
-
},
|
100
|
-
],
|
101
|
-
}
|
102
|
-
return str(out)
|
103
|
-
|
104
|
-
else:
|
105
|
-
return {
|
106
|
-
"function_call": {
|
88
|
+
return {
|
89
|
+
"function_call": {
|
90
|
+
"name": name,
|
91
|
+
},
|
92
|
+
"functions": [
|
93
|
+
{
|
107
94
|
"name": name,
|
95
|
+
"description": schema["description"],
|
96
|
+
"parameters": parameters,
|
108
97
|
},
|
109
|
-
|
110
|
-
|
111
|
-
"name": name,
|
112
|
-
"description": schema["description"],
|
113
|
-
"parameters": parameters,
|
114
|
-
},
|
115
|
-
],
|
116
|
-
}
|
98
|
+
],
|
99
|
+
}
|
117
100
|
|
118
101
|
|
119
102
|
def multi_base_model_to_openai_function(
|
@@ -0,0 +1,343 @@
|
|
1
|
+
import base64
|
2
|
+
from typing import Union, Dict, Any, Tuple
|
3
|
+
import requests
|
4
|
+
from pathlib import Path
|
5
|
+
import wave
|
6
|
+
import numpy as np
|
7
|
+
|
8
|
+
|
9
|
+
def encode_audio_to_base64(audio_path: Union[str, Path]) -> str:
|
10
|
+
"""
|
11
|
+
Encode a WAV file to base64 string.
|
12
|
+
|
13
|
+
Args:
|
14
|
+
audio_path (Union[str, Path]): Path to the WAV file
|
15
|
+
|
16
|
+
Returns:
|
17
|
+
str: Base64 encoded string of the audio file
|
18
|
+
|
19
|
+
Raises:
|
20
|
+
FileNotFoundError: If the audio file doesn't exist
|
21
|
+
ValueError: If the file is not a valid WAV file
|
22
|
+
"""
|
23
|
+
try:
|
24
|
+
audio_path = Path(audio_path)
|
25
|
+
if not audio_path.exists():
|
26
|
+
raise FileNotFoundError(
|
27
|
+
f"Audio file not found: {audio_path}"
|
28
|
+
)
|
29
|
+
|
30
|
+
if not audio_path.suffix.lower() == ".wav":
|
31
|
+
raise ValueError("File must be a WAV file")
|
32
|
+
|
33
|
+
with open(audio_path, "rb") as audio_file:
|
34
|
+
audio_data = audio_file.read()
|
35
|
+
return base64.b64encode(audio_data).decode("utf-8")
|
36
|
+
except Exception as e:
|
37
|
+
raise Exception(f"Error encoding audio file: {str(e)}")
|
38
|
+
|
39
|
+
|
40
|
+
def decode_base64_to_audio(
|
41
|
+
base64_string: str, output_path: Union[str, Path]
|
42
|
+
) -> None:
|
43
|
+
"""
|
44
|
+
Decode a base64 string to a WAV file.
|
45
|
+
|
46
|
+
Args:
|
47
|
+
base64_string (str): Base64 encoded audio data
|
48
|
+
output_path (Union[str, Path]): Path where the WAV file should be saved
|
49
|
+
|
50
|
+
Raises:
|
51
|
+
ValueError: If the base64 string is invalid
|
52
|
+
IOError: If there's an error writing the file
|
53
|
+
"""
|
54
|
+
try:
|
55
|
+
output_path = Path(output_path)
|
56
|
+
output_path.parent.mkdir(parents=True, exist_ok=True)
|
57
|
+
|
58
|
+
audio_data = base64.b64decode(base64_string)
|
59
|
+
with open(output_path, "wb") as audio_file:
|
60
|
+
audio_file.write(audio_data)
|
61
|
+
except Exception as e:
|
62
|
+
raise Exception(f"Error decoding audio data: {str(e)}")
|
63
|
+
|
64
|
+
|
65
|
+
def download_audio_from_url(
|
66
|
+
url: str, output_path: Union[str, Path]
|
67
|
+
) -> None:
|
68
|
+
"""
|
69
|
+
Download an audio file from a URL and save it locally.
|
70
|
+
|
71
|
+
Args:
|
72
|
+
url (str): URL of the audio file
|
73
|
+
output_path (Union[str, Path]): Path where the audio file should be saved
|
74
|
+
|
75
|
+
Raises:
|
76
|
+
requests.RequestException: If there's an error downloading the file
|
77
|
+
IOError: If there's an error saving the file
|
78
|
+
"""
|
79
|
+
try:
|
80
|
+
output_path = Path(output_path)
|
81
|
+
output_path.parent.mkdir(parents=True, exist_ok=True)
|
82
|
+
|
83
|
+
response = requests.get(url)
|
84
|
+
response.raise_for_status()
|
85
|
+
|
86
|
+
with open(output_path, "wb") as audio_file:
|
87
|
+
audio_file.write(response.content)
|
88
|
+
except Exception as e:
|
89
|
+
raise Exception(f"Error downloading audio file: {str(e)}")
|
90
|
+
|
91
|
+
|
92
|
+
def process_audio_with_model(
|
93
|
+
audio_path: Union[str, Path],
|
94
|
+
model: str,
|
95
|
+
prompt: str,
|
96
|
+
voice: str = "alloy",
|
97
|
+
format: str = "wav",
|
98
|
+
) -> Dict[str, Any]:
|
99
|
+
"""
|
100
|
+
Process an audio file with a model that supports audio input/output.
|
101
|
+
|
102
|
+
Args:
|
103
|
+
audio_path (Union[str, Path]): Path to the input WAV file
|
104
|
+
model (str): Model name to use for processing
|
105
|
+
prompt (str): Text prompt to accompany the audio
|
106
|
+
voice (str, optional): Voice to use for audio output. Defaults to "alloy"
|
107
|
+
format (str, optional): Audio format. Defaults to "wav"
|
108
|
+
|
109
|
+
Returns:
|
110
|
+
Dict[str, Any]: Model response containing both text and audio if applicable
|
111
|
+
|
112
|
+
Raises:
|
113
|
+
ImportError: If litellm is not installed
|
114
|
+
ValueError: If the model doesn't support audio processing
|
115
|
+
"""
|
116
|
+
try:
|
117
|
+
from litellm import (
|
118
|
+
completion,
|
119
|
+
supports_audio_input,
|
120
|
+
supports_audio_output,
|
121
|
+
)
|
122
|
+
|
123
|
+
if not supports_audio_input(model):
|
124
|
+
raise ValueError(
|
125
|
+
f"Model {model} does not support audio input"
|
126
|
+
)
|
127
|
+
|
128
|
+
# Encode the audio file
|
129
|
+
encoded_audio = encode_audio_to_base64(audio_path)
|
130
|
+
|
131
|
+
# Prepare the messages
|
132
|
+
messages = [
|
133
|
+
{
|
134
|
+
"role": "user",
|
135
|
+
"content": [
|
136
|
+
{"type": "text", "text": prompt},
|
137
|
+
{
|
138
|
+
"type": "input_audio",
|
139
|
+
"input_audio": {
|
140
|
+
"data": encoded_audio,
|
141
|
+
"format": format,
|
142
|
+
},
|
143
|
+
},
|
144
|
+
],
|
145
|
+
}
|
146
|
+
]
|
147
|
+
|
148
|
+
# Make the API call
|
149
|
+
response = completion(
|
150
|
+
model=model,
|
151
|
+
modalities=["text", "audio"],
|
152
|
+
audio={"voice": voice, "format": format},
|
153
|
+
messages=messages,
|
154
|
+
)
|
155
|
+
|
156
|
+
return response
|
157
|
+
except ImportError:
|
158
|
+
raise ImportError(
|
159
|
+
"Please install litellm: pip install litellm"
|
160
|
+
)
|
161
|
+
except Exception as e:
|
162
|
+
raise Exception(
|
163
|
+
f"Error processing audio with model: {str(e)}"
|
164
|
+
)
|
165
|
+
|
166
|
+
|
167
|
+
def read_wav_file(
|
168
|
+
file_path: Union[str, Path],
|
169
|
+
) -> Tuple[np.ndarray, int]:
|
170
|
+
"""
|
171
|
+
Read a WAV file and return its audio data and sample rate.
|
172
|
+
|
173
|
+
Args:
|
174
|
+
file_path (Union[str, Path]): Path to the WAV file
|
175
|
+
|
176
|
+
Returns:
|
177
|
+
Tuple[np.ndarray, int]: Audio data as numpy array and sample rate
|
178
|
+
|
179
|
+
Raises:
|
180
|
+
FileNotFoundError: If the file doesn't exist
|
181
|
+
ValueError: If the file is not a valid WAV file
|
182
|
+
"""
|
183
|
+
try:
|
184
|
+
file_path = Path(file_path)
|
185
|
+
if not file_path.exists():
|
186
|
+
raise FileNotFoundError(
|
187
|
+
f"Audio file not found: {file_path}"
|
188
|
+
)
|
189
|
+
|
190
|
+
with wave.open(str(file_path), "rb") as wav_file:
|
191
|
+
# Get audio parameters
|
192
|
+
n_channels = wav_file.getnchannels()
|
193
|
+
sample_width = wav_file.getsampwidth()
|
194
|
+
frame_rate = wav_file.getframerate()
|
195
|
+
n_frames = wav_file.getnframes()
|
196
|
+
|
197
|
+
# Read audio data
|
198
|
+
frames = wav_file.readframes(n_frames)
|
199
|
+
|
200
|
+
# Convert to numpy array
|
201
|
+
dtype = np.int16 if sample_width == 2 else np.int8
|
202
|
+
audio_data = np.frombuffer(frames, dtype=dtype)
|
203
|
+
|
204
|
+
# Reshape if stereo
|
205
|
+
if n_channels == 2:
|
206
|
+
audio_data = audio_data.reshape(-1, 2)
|
207
|
+
|
208
|
+
return audio_data, frame_rate
|
209
|
+
|
210
|
+
except Exception as e:
|
211
|
+
raise Exception(f"Error reading WAV file: {str(e)}")
|
212
|
+
|
213
|
+
|
214
|
+
def write_wav_file(
|
215
|
+
audio_data: np.ndarray,
|
216
|
+
file_path: Union[str, Path],
|
217
|
+
sample_rate: int,
|
218
|
+
sample_width: int = 2,
|
219
|
+
) -> None:
|
220
|
+
"""
|
221
|
+
Write audio data to a WAV file.
|
222
|
+
|
223
|
+
Args:
|
224
|
+
audio_data (np.ndarray): Audio data as numpy array
|
225
|
+
file_path (Union[str, Path]): Path where to save the WAV file
|
226
|
+
sample_rate (int): Sample rate of the audio
|
227
|
+
sample_width (int, optional): Sample width in bytes. Defaults to 2 (16-bit)
|
228
|
+
|
229
|
+
Raises:
|
230
|
+
ValueError: If the audio data is invalid
|
231
|
+
IOError: If there's an error writing the file
|
232
|
+
"""
|
233
|
+
try:
|
234
|
+
file_path = Path(file_path)
|
235
|
+
file_path.parent.mkdir(parents=True, exist_ok=True)
|
236
|
+
|
237
|
+
# Ensure audio data is in the correct format
|
238
|
+
if audio_data.dtype != np.int16 and sample_width == 2:
|
239
|
+
audio_data = (audio_data * 32767).astype(np.int16)
|
240
|
+
elif audio_data.dtype != np.int8 and sample_width == 1:
|
241
|
+
audio_data = (audio_data * 127).astype(np.int8)
|
242
|
+
|
243
|
+
# Determine number of channels
|
244
|
+
n_channels = (
|
245
|
+
2
|
246
|
+
if len(audio_data.shape) > 1 and audio_data.shape[1] == 2
|
247
|
+
else 1
|
248
|
+
)
|
249
|
+
|
250
|
+
with wave.open(str(file_path), "wb") as wav_file:
|
251
|
+
wav_file.setnchannels(n_channels)
|
252
|
+
wav_file.setsampwidth(sample_width)
|
253
|
+
wav_file.setframerate(sample_rate)
|
254
|
+
wav_file.writeframes(audio_data.tobytes())
|
255
|
+
|
256
|
+
except Exception as e:
|
257
|
+
raise Exception(f"Error writing WAV file: {str(e)}")
|
258
|
+
|
259
|
+
|
260
|
+
def normalize_audio(audio_data: np.ndarray) -> np.ndarray:
|
261
|
+
"""
|
262
|
+
Normalize audio data to have maximum amplitude of 1.0.
|
263
|
+
|
264
|
+
Args:
|
265
|
+
audio_data (np.ndarray): Input audio data
|
266
|
+
|
267
|
+
Returns:
|
268
|
+
np.ndarray: Normalized audio data
|
269
|
+
"""
|
270
|
+
return audio_data / np.max(np.abs(audio_data))
|
271
|
+
|
272
|
+
|
273
|
+
def convert_to_mono(audio_data: np.ndarray) -> np.ndarray:
|
274
|
+
"""
|
275
|
+
Convert stereo audio to mono by averaging channels.
|
276
|
+
|
277
|
+
Args:
|
278
|
+
audio_data (np.ndarray): Input audio data (stereo)
|
279
|
+
|
280
|
+
Returns:
|
281
|
+
np.ndarray: Mono audio data
|
282
|
+
"""
|
283
|
+
if len(audio_data.shape) == 1:
|
284
|
+
return audio_data
|
285
|
+
return np.mean(audio_data, axis=1)
|
286
|
+
|
287
|
+
|
288
|
+
def encode_wav_to_base64(
|
289
|
+
audio_data: np.ndarray, sample_rate: int
|
290
|
+
) -> str:
|
291
|
+
"""
|
292
|
+
Convert audio data to base64 encoded WAV string.
|
293
|
+
|
294
|
+
Args:
|
295
|
+
audio_data (np.ndarray): Audio data
|
296
|
+
sample_rate (int): Sample rate of the audio
|
297
|
+
|
298
|
+
Returns:
|
299
|
+
str: Base64 encoded WAV data
|
300
|
+
"""
|
301
|
+
# Create a temporary WAV file in memory
|
302
|
+
with wave.open("temp.wav", "wb") as wav_file:
|
303
|
+
wav_file.setnchannels(1 if len(audio_data.shape) == 1 else 2)
|
304
|
+
wav_file.setsampwidth(2) # 16-bit
|
305
|
+
wav_file.setframerate(sample_rate)
|
306
|
+
wav_file.writeframes(audio_data.tobytes())
|
307
|
+
|
308
|
+
# Read the file and encode to base64
|
309
|
+
with open("temp.wav", "rb") as f:
|
310
|
+
wav_bytes = f.read()
|
311
|
+
|
312
|
+
# Clean up temporary file
|
313
|
+
Path("temp.wav").unlink()
|
314
|
+
|
315
|
+
return base64.b64encode(wav_bytes).decode("utf-8")
|
316
|
+
|
317
|
+
|
318
|
+
def decode_base64_to_wav(
|
319
|
+
base64_string: str,
|
320
|
+
) -> Tuple[np.ndarray, int]:
|
321
|
+
"""
|
322
|
+
Convert base64 encoded WAV string to audio data and sample rate.
|
323
|
+
|
324
|
+
Args:
|
325
|
+
base64_string (str): Base64 encoded WAV data
|
326
|
+
|
327
|
+
Returns:
|
328
|
+
Tuple[np.ndarray, int]: Audio data and sample rate
|
329
|
+
"""
|
330
|
+
# Decode base64 string
|
331
|
+
wav_bytes = base64.b64decode(base64_string)
|
332
|
+
|
333
|
+
# Write to temporary file
|
334
|
+
with open("temp.wav", "wb") as f:
|
335
|
+
f.write(wav_bytes)
|
336
|
+
|
337
|
+
# Read the WAV file
|
338
|
+
audio_data, sample_rate = read_wav_file("temp.wav")
|
339
|
+
|
340
|
+
# Clean up temporary file
|
341
|
+
Path("temp.wav").unlink()
|
342
|
+
|
343
|
+
return audio_data, sample_rate
|
swarms/utils/index.py
ADDED
@@ -0,0 +1,226 @@
|
|
1
|
+
def exists(val):
|
2
|
+
return val is not None
|
3
|
+
|
4
|
+
|
5
|
+
def format_dict_to_string(data: dict, indent_level=0, use_colon=True):
|
6
|
+
"""
|
7
|
+
Recursively formats a dictionary into a multi-line string.
|
8
|
+
|
9
|
+
Args:
|
10
|
+
data (dict): The dictionary to format
|
11
|
+
indent_level (int): Current indentation level for nested structures
|
12
|
+
use_colon (bool): Whether to use "key: value" or "key value" format
|
13
|
+
|
14
|
+
Returns:
|
15
|
+
str: Formatted string representation of the dictionary
|
16
|
+
"""
|
17
|
+
if not isinstance(data, dict):
|
18
|
+
return str(data)
|
19
|
+
|
20
|
+
lines = []
|
21
|
+
indent = " " * indent_level # 2 spaces per indentation level
|
22
|
+
separator = ": " if use_colon else " "
|
23
|
+
|
24
|
+
for key, value in data.items():
|
25
|
+
if isinstance(value, dict):
|
26
|
+
# Recursive case: nested dictionary
|
27
|
+
lines.append(f"{indent}{key}:")
|
28
|
+
nested_string = format_dict_to_string(
|
29
|
+
value, indent_level + 1, use_colon
|
30
|
+
)
|
31
|
+
lines.append(nested_string)
|
32
|
+
else:
|
33
|
+
# Base case: simple key-value pair
|
34
|
+
lines.append(f"{indent}{key}{separator}{value}")
|
35
|
+
|
36
|
+
return "\n".join(lines)
|
37
|
+
|
38
|
+
|
39
|
+
def format_data_structure(
|
40
|
+
data: any, indent_level: int = 0, max_depth: int = 10
|
41
|
+
) -> str:
|
42
|
+
"""
|
43
|
+
Fast formatter for any Python data structure into readable new-line format.
|
44
|
+
|
45
|
+
Args:
|
46
|
+
data: Any Python data structure to format
|
47
|
+
indent_level (int): Current indentation level for nested structures
|
48
|
+
max_depth (int): Maximum depth to prevent infinite recursion
|
49
|
+
|
50
|
+
Returns:
|
51
|
+
str: Formatted string representation with new lines
|
52
|
+
"""
|
53
|
+
if indent_level >= max_depth:
|
54
|
+
return f"{' ' * indent_level}... (max depth reached)"
|
55
|
+
|
56
|
+
indent = " " * indent_level
|
57
|
+
data_type = type(data)
|
58
|
+
|
59
|
+
# Fast type checking using type() instead of isinstance() for speed
|
60
|
+
if data_type is dict:
|
61
|
+
if not data:
|
62
|
+
return f"{indent}{{}} (empty dict)"
|
63
|
+
|
64
|
+
lines = []
|
65
|
+
for key, value in data.items():
|
66
|
+
if type(value) in (dict, list, tuple, set):
|
67
|
+
lines.append(f"{indent}{key}:")
|
68
|
+
lines.append(
|
69
|
+
format_data_structure(
|
70
|
+
value, indent_level + 1, max_depth
|
71
|
+
)
|
72
|
+
)
|
73
|
+
else:
|
74
|
+
lines.append(f"{indent}{key}: {value}")
|
75
|
+
return "\n".join(lines)
|
76
|
+
|
77
|
+
elif data_type is list:
|
78
|
+
if not data:
|
79
|
+
return f"{indent}[] (empty list)"
|
80
|
+
|
81
|
+
lines = []
|
82
|
+
for i, item in enumerate(data):
|
83
|
+
if type(item) in (dict, list, tuple, set):
|
84
|
+
lines.append(f"{indent}[{i}]:")
|
85
|
+
lines.append(
|
86
|
+
format_data_structure(
|
87
|
+
item, indent_level + 1, max_depth
|
88
|
+
)
|
89
|
+
)
|
90
|
+
else:
|
91
|
+
lines.append(f"{indent}{item}")
|
92
|
+
return "\n".join(lines)
|
93
|
+
|
94
|
+
elif data_type is tuple:
|
95
|
+
if not data:
|
96
|
+
return f"{indent}() (empty tuple)"
|
97
|
+
|
98
|
+
lines = []
|
99
|
+
for i, item in enumerate(data):
|
100
|
+
if type(item) in (dict, list, tuple, set):
|
101
|
+
lines.append(f"{indent}({i}):")
|
102
|
+
lines.append(
|
103
|
+
format_data_structure(
|
104
|
+
item, indent_level + 1, max_depth
|
105
|
+
)
|
106
|
+
)
|
107
|
+
else:
|
108
|
+
lines.append(f"{indent}{item}")
|
109
|
+
return "\n".join(lines)
|
110
|
+
|
111
|
+
elif data_type is set:
|
112
|
+
if not data:
|
113
|
+
return f"{indent}set() (empty set)"
|
114
|
+
|
115
|
+
lines = []
|
116
|
+
for item in sorted(
|
117
|
+
data, key=str
|
118
|
+
): # Sort for consistent output
|
119
|
+
if type(item) in (dict, list, tuple, set):
|
120
|
+
lines.append(f"{indent}set item:")
|
121
|
+
lines.append(
|
122
|
+
format_data_structure(
|
123
|
+
item, indent_level + 1, max_depth
|
124
|
+
)
|
125
|
+
)
|
126
|
+
else:
|
127
|
+
lines.append(f"{indent}{item}")
|
128
|
+
return "\n".join(lines)
|
129
|
+
|
130
|
+
elif data_type is str:
|
131
|
+
# Handle multi-line strings
|
132
|
+
if "\n" in data:
|
133
|
+
lines = data.split("\n")
|
134
|
+
return "\n".join(f"{indent}{line}" for line in lines)
|
135
|
+
return f"{indent}{data}"
|
136
|
+
|
137
|
+
elif data_type in (int, float, bool, type(None)):
|
138
|
+
return f"{indent}{data}"
|
139
|
+
|
140
|
+
else:
|
141
|
+
# Handle other types (custom objects, etc.)
|
142
|
+
if hasattr(data, "__dict__"):
|
143
|
+
# Object with attributes
|
144
|
+
lines = [f"{indent}{data_type.__name__} object:"]
|
145
|
+
for attr, value in data.__dict__.items():
|
146
|
+
if not attr.startswith(
|
147
|
+
"_"
|
148
|
+
): # Skip private attributes
|
149
|
+
if type(value) in (dict, list, tuple, set):
|
150
|
+
lines.append(f"{indent} {attr}:")
|
151
|
+
lines.append(
|
152
|
+
format_data_structure(
|
153
|
+
value, indent_level + 2, max_depth
|
154
|
+
)
|
155
|
+
)
|
156
|
+
else:
|
157
|
+
lines.append(f"{indent} {attr}: {value}")
|
158
|
+
return "\n".join(lines)
|
159
|
+
else:
|
160
|
+
# Fallback for other types
|
161
|
+
return f"{indent}{data} ({data_type.__name__})"
|
162
|
+
|
163
|
+
|
164
|
+
# test_dict = {
|
165
|
+
# "name": "John",
|
166
|
+
# "age": 30,
|
167
|
+
# "address": {
|
168
|
+
# "street": "123 Main St",
|
169
|
+
# "city": "Anytown",
|
170
|
+
# "state": "CA",
|
171
|
+
# "zip": "12345"
|
172
|
+
# }
|
173
|
+
# }
|
174
|
+
|
175
|
+
# print(format_dict_to_string(test_dict))
|
176
|
+
|
177
|
+
|
178
|
+
# # Example usage of format_data_structure:
|
179
|
+
# if __name__ == "__main__":
|
180
|
+
# # Test different data structures
|
181
|
+
|
182
|
+
# # Dictionary
|
183
|
+
# test_dict = {
|
184
|
+
# "name": "John",
|
185
|
+
# "age": 30,
|
186
|
+
# "address": {
|
187
|
+
# "street": "123 Main St",
|
188
|
+
# "city": "Anytown"
|
189
|
+
# }
|
190
|
+
# }
|
191
|
+
# print("=== Dictionary ===")
|
192
|
+
# print(format_data_structure(test_dict))
|
193
|
+
# print()
|
194
|
+
|
195
|
+
# # List
|
196
|
+
# test_list = ["apple", "banana", {"nested": "dict"}, [1, 2, 3]]
|
197
|
+
# print("=== List ===")
|
198
|
+
# print(format_data_structure(test_list))
|
199
|
+
# print()
|
200
|
+
|
201
|
+
# # Tuple
|
202
|
+
# test_tuple = ("first", "second", {"key": "value"}, (1, 2))
|
203
|
+
# print("=== Tuple ===")
|
204
|
+
# print(format_data_structure(test_tuple))
|
205
|
+
# print()
|
206
|
+
|
207
|
+
# # Set
|
208
|
+
# test_set = {"apple", "banana", "cherry"}
|
209
|
+
# print("=== Set ===")
|
210
|
+
# print(format_data_structure(test_set))
|
211
|
+
# print()
|
212
|
+
|
213
|
+
# # Mixed complex structure
|
214
|
+
# complex_data = {
|
215
|
+
# "users": [
|
216
|
+
# {"name": "Alice", "scores": [95, 87, 92]},
|
217
|
+
# {"name": "Bob", "scores": [88, 91, 85]}
|
218
|
+
# ],
|
219
|
+
# "metadata": {
|
220
|
+
# "total_users": 2,
|
221
|
+
# "categories": ("students", "teachers"),
|
222
|
+
# "settings": {"debug": True, "version": "1.0"}
|
223
|
+
# }
|
224
|
+
# }
|
225
|
+
# print("=== Complex Structure ===")
|
226
|
+
# print(format_data_structure(complex_data))
|