swarms 7.6.1__py3-none-any.whl → 7.6.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. swarms/__init__.py +1 -0
  2. swarms/agents/__init__.py +4 -5
  3. swarms/agents/flexion_agent.py +2 -1
  4. swarms/agents/reasoning_agents.py +10 -0
  5. swarms/client/__init__.py +15 -0
  6. swarms/prompts/multi_agent_collab_prompt.py +313 -0
  7. swarms/structs/__init__.py +10 -17
  8. swarms/structs/agent.py +178 -262
  9. swarms/structs/base_swarm.py +0 -7
  10. swarms/structs/concurrent_workflow.py +2 -2
  11. swarms/structs/conversation.py +16 -2
  12. swarms/structs/de_hallucination_swarm.py +8 -4
  13. swarms/structs/dynamic_conversational_swarm.py +226 -0
  14. swarms/structs/groupchat.py +80 -84
  15. swarms/structs/hiearchical_swarm.py +1 -1
  16. swarms/structs/hybrid_hiearchical_peer_swarm.py +256 -0
  17. swarms/structs/majority_voting.py +1 -1
  18. swarms/structs/mixture_of_agents.py +1 -1
  19. swarms/structs/multi_agent_exec.py +63 -139
  20. swarms/structs/multi_agent_orchestrator.py +1 -1
  21. swarms/structs/output_types.py +3 -0
  22. swarms/structs/rearrange.py +66 -205
  23. swarms/structs/sequential_workflow.py +34 -47
  24. swarms/structs/swarm_router.py +3 -2
  25. swarms/telemetry/bootup.py +19 -38
  26. swarms/telemetry/main.py +62 -22
  27. swarms/tools/tool_schema_base_model.py +57 -0
  28. swarms/utils/auto_download_check_packages.py +2 -2
  29. swarms/utils/disable_logging.py +0 -17
  30. swarms/utils/history_output_formatter.py +8 -3
  31. swarms/utils/litellm_wrapper.py +117 -1
  32. {swarms-7.6.1.dist-info → swarms-7.6.4.dist-info}/METADATA +1 -5
  33. {swarms-7.6.1.dist-info → swarms-7.6.4.dist-info}/RECORD +37 -37
  34. swarms/structs/agent_security.py +0 -318
  35. swarms/structs/airflow_swarm.py +0 -430
  36. swarms/structs/output_type.py +0 -18
  37. swarms/utils/agent_ops_check.py +0 -26
  38. swarms/utils/pandas_utils.py +0 -92
  39. /swarms/{structs/swarms_api.py → client/main.py} +0 -0
  40. {swarms-7.6.1.dist-info → swarms-7.6.4.dist-info}/LICENSE +0 -0
  41. {swarms-7.6.1.dist-info → swarms-7.6.4.dist-info}/WHEEL +0 -0
  42. {swarms-7.6.1.dist-info → swarms-7.6.4.dist-info}/entry_points.txt +0 -0
@@ -2,20 +2,21 @@ import asyncio
2
2
  import json
3
3
  import uuid
4
4
  from concurrent.futures import ThreadPoolExecutor
5
- from datetime import datetime
6
5
  from typing import Any, Callable, Dict, List, Optional
7
6
 
8
- from pydantic import BaseModel, Field
9
7
 
10
- from swarms.schemas.agent_step_schemas import ManySteps
11
8
  from swarms.structs.agent import Agent
12
- from swarms.structs.agents_available import showcase_available_agents
13
9
  from swarms.structs.base_swarm import BaseSwarm
14
10
 
11
+ from swarms.utils.any_to_str import any_to_str
12
+ from swarms.utils.history_output_formatter import (
13
+ history_output_formatter,
14
+ )
15
15
  from swarms.utils.loguru_logger import initialize_logger
16
16
  from swarms.telemetry.main import log_agent_data
17
17
  from swarms.structs.conversation import Conversation
18
- from swarms.structs.output_type import OutputType
18
+ from swarms.structs.output_types import OutputType
19
+ from swarms.structs.multi_agent_exec import get_agents_info
19
20
 
20
21
  logger = initialize_logger(log_folder="rearrange")
21
22
 
@@ -24,35 +25,6 @@ def swarm_id():
24
25
  return uuid.uuid4().hex
25
26
 
26
27
 
27
- class AgentRearrangeInput(BaseModel):
28
- swarm_id: Optional[str] = None
29
- name: Optional[str] = None
30
- description: Optional[str] = None
31
- flow: Optional[str] = None
32
- max_loops: Optional[int] = None
33
- time: str = Field(
34
- default_factory=lambda: datetime.now().strftime(
35
- "%Y-%m-%d %H:%M:%S"
36
- ),
37
- description="The time the agent was created.",
38
- )
39
- output_type: OutputType = Field(default="final")
40
-
41
-
42
- class AgentRearrangeOutput(BaseModel):
43
- output_id: str = Field(
44
- default=swarm_id(), description="Output-UUID"
45
- )
46
- input: Optional[AgentRearrangeInput] = None
47
- outputs: Optional[List[ManySteps]] = None
48
- time: str = Field(
49
- default_factory=lambda: datetime.now().strftime(
50
- "%Y-%m-%d %H:%M:%S"
51
- ),
52
- description="The time the agent was created.",
53
- )
54
-
55
-
56
28
  class AgentRearrange(BaseSwarm):
57
29
  """
58
30
  A class representing a swarm of agents for rearranging tasks.
@@ -117,6 +89,7 @@ class AgentRearrange(BaseSwarm):
117
89
  autosave: bool = True,
118
90
  return_entire_history: bool = False,
119
91
  rules: str = None,
92
+ team_awareness: bool = False,
120
93
  *args,
121
94
  **kwargs,
122
95
  ):
@@ -146,56 +119,18 @@ class AgentRearrange(BaseSwarm):
146
119
  self.no_use_clusterops = no_use_clusterops
147
120
  self.autosave = autosave
148
121
  self.return_entire_history = return_entire_history
149
- self.output_schema = AgentRearrangeOutput(
150
- input=AgentRearrangeInput(
151
- swarm_id=id,
152
- name=name,
153
- description=description,
154
- flow=flow,
155
- max_loops=max_loops,
156
- ),
157
- outputs=[],
158
- )
159
122
 
160
- self.conversation = Conversation()
123
+ self.conversation = Conversation(
124
+ time_enabled=False, token_count=False
125
+ )
161
126
 
162
127
  if rules:
163
- self.conversation.add("user", rules)
164
-
165
- def showcase_agents(self):
166
- # Get formatted agent info once
167
- agents_available = showcase_available_agents(
168
- name=self.name,
169
- description=self.description,
170
- agents=self.agents,
171
- format="Table",
172
- )
128
+ self.conversation.add("User", rules)
173
129
 
174
- return agents_available
130
+ if team_awareness is True:
131
+ agents_info = get_agents_info(self.agents, self.name)
175
132
 
176
- def rearrange_prompt_prep(self) -> str:
177
- """Prepares a formatted prompt describing the swarm configuration.
178
-
179
- Returns:
180
- str: A formatted string containing the swarm's name, description,
181
- flow pattern, and participating agents.
182
- """
183
- agents_available = self.showcase_agents()
184
- prompt = f"""
185
- ===== Swarm Configuration =====
186
-
187
- Name: {self.name}
188
- Description: {self.description}
189
-
190
- ===== Execution Flow =====
191
- {self.flow}
192
-
193
- ===== Participating Agents =====
194
- {agents_available}
195
-
196
- ===========================
197
- """
198
- return prompt
133
+ self.conversation.add("Your Swarm", agents_info)
199
134
 
200
135
  def set_custom_flow(self, flow: str):
201
136
  self.flow = flow
@@ -313,7 +248,7 @@ class AgentRearrange(BaseSwarm):
313
248
  Exception: For any other errors during execution
314
249
  """
315
250
  try:
316
- self.conversation.add("user", task)
251
+ self.conversation.add("User", task)
317
252
 
318
253
  if not self.validate_flow():
319
254
  logger.error("Flow validation failed")
@@ -321,15 +256,13 @@ class AgentRearrange(BaseSwarm):
321
256
 
322
257
  tasks = self.flow.split("->")
323
258
  current_task = task
324
- all_responses = []
325
259
  response_dict = {}
326
- previous_agent = None
327
260
 
328
261
  logger.info(
329
262
  f"Starting task execution with {len(tasks)} steps"
330
263
  )
331
264
 
332
- # Handle custom tasks
265
+ # # Handle custom tasks
333
266
  if custom_tasks is not None:
334
267
  logger.info("Processing custom tasks")
335
268
  c_agent_name, c_task = next(
@@ -354,150 +287,65 @@ class AgentRearrange(BaseSwarm):
354
287
  name.strip() for name in task.split(",")
355
288
  ]
356
289
 
357
- # Prepare prompt with previous agent info
358
- prompt_prefix = ""
359
- if previous_agent and task_idx > 0:
360
- prompt_prefix = f"Previous agent {previous_agent} output: {current_task}\n"
361
- elif task_idx == 0:
362
- prompt_prefix = "Initial task: "
363
-
364
290
  if len(agent_names) > 1:
365
291
  # Parallel processing
366
292
  logger.info(
367
293
  f"Running agents in parallel: {agent_names}"
368
294
  )
369
- results = []
370
295
 
371
296
  for agent_name in agent_names:
372
- if agent_name == "H":
373
- if (
374
- self.human_in_the_loop
375
- and self.custom_human_in_the_loop
376
- ):
377
- current_task = (
378
- self.custom_human_in_the_loop(
379
- prompt_prefix
380
- + str(current_task)
381
- )
382
- )
383
- else:
384
- current_task = input(
385
- prompt_prefix
386
- + "Enter your response: "
387
- )
388
- results.append(current_task)
389
- response_dict[agent_name] = (
390
- current_task
391
- )
392
- else:
393
- agent = self.agents[agent_name]
394
- task_with_context = (
395
- prompt_prefix + str(current_task)
396
- if current_task
397
- else prompt_prefix
398
- )
399
- result = agent.run(
400
- task=task_with_context,
401
- img=img,
402
- is_last=is_last,
403
- *args,
404
- **kwargs,
405
- )
406
- result = str(result)
407
- self.conversation.add(
408
- agent.agent_name, result
409
- )
410
- results.append(result)
411
- response_dict[agent_name] = result
412
- self.output_schema.outputs.append(
413
- agent.agent_output
414
- )
415
- logger.debug(
416
- f"Agent {agent_name} output: {result}"
417
- )
418
-
419
- current_task = "; ".join(results)
420
- all_responses.extend(results)
421
- previous_agent = ",".join(agent_names)
422
-
423
- else:
424
- # Sequential processing
425
- logger.info(
426
- f"Running agent sequentially: {agent_names[0]}"
427
- )
428
- agent_name = agent_names[0]
429
-
430
- if agent_name == "H":
431
- if (
432
- self.human_in_the_loop
433
- and self.custom_human_in_the_loop
434
- ):
435
- current_task = (
436
- self.custom_human_in_the_loop(
437
- prompt_prefix
438
- + str(current_task)
439
- )
440
- )
441
- else:
442
- current_task = input(
443
- prompt_prefix
444
- + "Enter the next task: "
445
- )
446
- response_dict[agent_name] = current_task
447
- else:
448
297
  agent = self.agents[agent_name]
449
- task_with_context = (
450
- prompt_prefix + str(current_task)
451
- if current_task
452
- else prompt_prefix
453
- )
454
- current_task = agent.run(
455
- task=task_with_context,
298
+ result = agent.run(
299
+ task=self.conversation.get_str(),
456
300
  img=img,
457
301
  is_last=is_last,
458
302
  *args,
459
303
  **kwargs,
460
304
  )
461
- current_task = str(current_task)
305
+ result = any_to_str(result)
306
+
462
307
  self.conversation.add(
463
- agent.agent_name, current_task
464
- )
465
- response_dict[agent_name] = current_task
466
- self.output_schema.outputs.append(
467
- agent.agent_output
308
+ agent.agent_name, result
468
309
  )
310
+
311
+ response_dict[agent_name] = result
469
312
  logger.debug(
470
- f"Agent {agent_name} output: {current_task}"
313
+ f"Agent {agent_name} output: {result}"
471
314
  )
472
315
 
473
- all_responses.append(
474
- f"Agent Name: {agent.agent_name} \n Output: {current_task} "
316
+ ",".join(agent_names)
317
+
318
+ else:
319
+ # Sequential processing
320
+ logger.info(
321
+ f"Running agent sequentially: {agent_names[0]}"
475
322
  )
476
- previous_agent = agent_name
323
+ agent_name = agent_names[0]
477
324
 
478
- loop_count += 1
325
+ agent = self.agents[agent_name]
479
326
 
480
- logger.info("Task execution completed")
327
+ current_task = agent.run(
328
+ task=self.conversation.get_str(),
329
+ img=img,
330
+ is_last=is_last,
331
+ *args,
332
+ **kwargs,
333
+ )
334
+ current_task = any_to_str(current_task)
481
335
 
482
- if self.return_json:
483
- return self.output_schema.model_dump_json(indent=4)
336
+ self.conversation.add(
337
+ agent.agent_name, current_task
338
+ )
484
339
 
485
- if self.return_entire_history:
486
- return self.output_schema.model_dump_json(indent=4)
340
+ response_dict[agent_name] = current_task
487
341
 
488
- # Handle different output types
489
- if self.output_type == "all":
490
- output = " ".join(all_responses)
491
- elif self.output_type == "list":
492
- output = all_responses
493
- elif self.output_type == "dict":
494
- output = (
495
- self.conversation.return_messages_as_dictionary()
496
- )
497
- else: # "final"
498
- output = current_task
342
+ loop_count += 1
343
+
344
+ logger.info("Task execution completed")
499
345
 
500
- return output
346
+ return history_output_formatter(
347
+ self.conversation, self.output_type
348
+ )
501
349
 
502
350
  except Exception as e:
503
351
  self._catch_error(e)
@@ -542,13 +390,19 @@ class AgentRearrange(BaseSwarm):
542
390
  The result from executing the task through the cluster operations wrapper.
543
391
  """
544
392
  try:
545
- return self._run(
393
+ log_agent_data(self.to_dict())
394
+
395
+ out = self._run(
546
396
  task=task,
547
397
  img=img,
548
398
  *args,
549
399
  **kwargs,
550
400
  )
551
401
 
402
+ log_agent_data(self.to_dict())
403
+
404
+ return out
405
+
552
406
  except Exception as e:
553
407
  self._catch_error(e)
554
408
 
@@ -780,6 +634,8 @@ class AgentRearrange(BaseSwarm):
780
634
 
781
635
 
782
636
  def rearrange(
637
+ name: str = None,
638
+ description: str = None,
783
639
  agents: List[Agent] = None,
784
640
  flow: str = None,
785
641
  task: str = None,
@@ -807,6 +663,11 @@ def rearrange(
807
663
  rearrange(agents, flow, task)
808
664
  """
809
665
  agent_system = AgentRearrange(
810
- agents=agents, flow=flow, *args, **kwargs
666
+ name=name,
667
+ description=description,
668
+ agents=agents,
669
+ flow=flow,
670
+ *args,
671
+ **kwargs,
811
672
  )
812
- return agent_system.run(task, img=img, *args, **kwargs)
673
+ return agent_system.run(task=task, img=img)
@@ -1,8 +1,9 @@
1
+ from concurrent.futures import ThreadPoolExecutor, as_completed
1
2
  from typing import List, Optional
3
+
2
4
  from swarms.structs.agent import Agent
5
+ from swarms.structs.output_types import OutputType
3
6
  from swarms.structs.rearrange import AgentRearrange
4
- from swarms.structs.output_type import OutputType
5
- from concurrent.futures import ThreadPoolExecutor, as_completed
6
7
  from swarms.utils.loguru_logger import initialize_logger
7
8
 
8
9
  logger = initialize_logger(log_folder="sequential_workflow")
@@ -10,18 +11,20 @@ logger = initialize_logger(log_folder="sequential_workflow")
10
11
 
11
12
  class SequentialWorkflow:
12
13
  """
13
- Initializes a SequentialWorkflow object, which orchestrates the execution of a sequence of agents.
14
+ A class that orchestrates the execution of a sequence of agents in a defined workflow.
14
15
 
15
16
  Args:
16
17
  name (str, optional): The name of the workflow. Defaults to "SequentialWorkflow".
17
18
  description (str, optional): A description of the workflow. Defaults to "Sequential Workflow, where agents are executed in a sequence."
18
- agents (List[Agent], optional): The list of agents in the workflow. Defaults to None.
19
- max_loops (int, optional): The maximum number of loops to execute the workflow. Defaults to 1.
20
- *args: Variable length argument list.
21
- **kwargs: Arbitrary keyword arguments.
19
+ agents (List[Agent], optional): A list of agents that will be part of the workflow. Defaults to an empty list.
20
+ max_loops (int, optional): The maximum number of times to execute the workflow. Defaults to 1.
21
+ output_type (OutputType, optional): The format of the output from the workflow. Defaults to "dict".
22
+ shared_memory_system (callable, optional): A callable for managing shared memory between agents. Defaults to None.
23
+ *args: Additional positional arguments.
24
+ **kwargs: Additional keyword arguments.
22
25
 
23
26
  Raises:
24
- ValueError: If agents list is None or empty, or if max_loops is 0
27
+ ValueError: If the agents list is None or empty, or if max_loops is set to 0.
25
28
  """
26
29
 
27
30
  def __init__(
@@ -31,9 +34,7 @@ class SequentialWorkflow:
31
34
  agents: List[Agent] = [],
32
35
  max_loops: int = 1,
33
36
  output_type: OutputType = "dict",
34
- return_json: bool = False,
35
37
  shared_memory_system: callable = None,
36
- return_entire_history: bool = False,
37
38
  *args,
38
39
  **kwargs,
39
40
  ):
@@ -42,9 +43,7 @@ class SequentialWorkflow:
42
43
  self.agents = agents
43
44
  self.max_loops = max_loops
44
45
  self.output_type = output_type
45
- self.return_json = return_json
46
46
  self.shared_memory_system = shared_memory_system
47
- self.return_entire_history = return_entire_history
48
47
 
49
48
  self.reliability_check()
50
49
  self.flow = self.sequential_flow()
@@ -56,7 +55,6 @@ class SequentialWorkflow:
56
55
  flow=self.flow,
57
56
  max_loops=max_loops,
58
57
  output_type=output_type,
59
- return_json=return_json,
60
58
  shared_memory_system=shared_memory_system,
61
59
  *args,
62
60
  **kwargs,
@@ -101,7 +99,7 @@ class SequentialWorkflow:
101
99
  if self.max_loops == 0:
102
100
  raise ValueError("max_loops cannot be 0")
103
101
 
104
- logger.info("Checks completed your swarm is ready.")
102
+ logger.info("Checks completed; your swarm is ready.")
105
103
 
106
104
  def run(
107
105
  self,
@@ -114,25 +112,25 @@ class SequentialWorkflow:
114
112
  no_use_clusterops: bool = True,
115
113
  *args,
116
114
  **kwargs,
117
- ) -> str:
115
+ ):
118
116
  """
119
- Executes a task through the agents in the dynamically constructed flow.
117
+ Executes a specified task through the agents in the dynamically constructed flow.
120
118
 
121
119
  Args:
122
120
  task (str): The task for the agents to execute.
123
- device (str): The device to use for the agents to execute.
124
- all_cores (bool): Whether to use all cores.
125
- all_gpus (bool): Whether to use all gpus.
126
- device_id (int): The device id to use for the agents to execute.
127
- no_use_clusterops (bool): Whether to use clusterops.
128
-
121
+ img (Optional[str]): An optional image input for the agents.
122
+ device (str): The device to use for the agents to execute. Defaults to "cpu".
123
+ all_cores (bool): Whether to utilize all CPU cores. Defaults to False.
124
+ all_gpus (bool): Whether to utilize all available GPUs. Defaults to False.
125
+ device_id (int): The specific device ID to use for execution. Defaults to 0.
126
+ no_use_clusterops (bool): Whether to avoid using cluster operations. Defaults to True.
129
127
 
130
128
  Returns:
131
129
  str: The final result after processing through all agents.
132
130
 
133
131
  Raises:
134
- ValueError: If task is None or empty
135
- Exception: If any error occurs during task execution
132
+ ValueError: If the task is None or empty.
133
+ Exception: If any error occurs during task execution.
136
134
  """
137
135
 
138
136
  try:
@@ -143,17 +141,6 @@ class SequentialWorkflow:
143
141
  **kwargs,
144
142
  )
145
143
 
146
- if self.output_type == "dict":
147
- result = (
148
- self.agent_rearrange.conversation.return_messages_as_dictionary()
149
- )
150
- elif self.output_type == "list":
151
- result = (
152
- self.agent_rearrange.conversation.return_messages_as_list()
153
- )
154
- elif self.output_type == "str" or self.return_json:
155
- result = self.agent_rearrange.conversation.get_str()
156
-
157
144
  return result
158
145
  except Exception as e:
159
146
  logger.error(
@@ -161,7 +148,7 @@ class SequentialWorkflow:
161
148
  )
162
149
  raise e
163
150
 
164
- def __call__(self, task: str, *args, **kwargs) -> str:
151
+ def __call__(self, task: str, *args, **kwargs):
165
152
  return self.run(task, *args, **kwargs)
166
153
 
167
154
  def run_batched(self, tasks: List[str]) -> List[str]:
@@ -169,14 +156,14 @@ class SequentialWorkflow:
169
156
  Executes a batch of tasks through the agents in the dynamically constructed flow.
170
157
 
171
158
  Args:
172
- tasks (List[str]): The tasks for the agents to execute.
159
+ tasks (List[str]): A list of tasks for the agents to execute.
173
160
 
174
161
  Returns:
175
- List[str]: The final results after processing through all agents.
162
+ List[str]: A list of final results after processing through all agents.
176
163
 
177
164
  Raises:
178
- ValueError: If tasks is None or empty
179
- Exception: If any error occurs during task execution
165
+ ValueError: If tasks is None or empty.
166
+ Exception: If any error occurs during task execution.
180
167
  """
181
168
  if not tasks or not all(
182
169
  isinstance(task, str) for task in tasks
@@ -195,7 +182,7 @@ class SequentialWorkflow:
195
182
 
196
183
  async def run_async(self, task: str) -> str:
197
184
  """
198
- Executes the task through the agents in the dynamically constructed flow asynchronously.
185
+ Executes the specified task through the agents in the dynamically constructed flow asynchronously.
199
186
 
200
187
  Args:
201
188
  task (str): The task for the agents to execute.
@@ -204,8 +191,8 @@ class SequentialWorkflow:
204
191
  str: The final result after processing through all agents.
205
192
 
206
193
  Raises:
207
- ValueError: If task is None or empty
208
- Exception: If any error occurs during task execution
194
+ ValueError: If task is None or empty.
195
+ Exception: If any error occurs during task execution.
209
196
  """
210
197
  if not task or not isinstance(task, str):
211
198
  raise ValueError("Task must be a non-empty string")
@@ -223,14 +210,14 @@ class SequentialWorkflow:
223
210
  Executes a batch of tasks through the agents in the dynamically constructed flow concurrently.
224
211
 
225
212
  Args:
226
- tasks (List[str]): The tasks for the agents to execute.
213
+ tasks (List[str]): A list of tasks for the agents to execute.
227
214
 
228
215
  Returns:
229
- List[str]: The final results after processing through all agents.
216
+ List[str]: A list of final results after processing through all agents.
230
217
 
231
218
  Raises:
232
- ValueError: If tasks is None or empty
233
- Exception: If any error occurs during task execution
219
+ ValueError: If tasks is None or empty.
220
+ Exception: If any error occurs during task execution.
234
221
  """
235
222
  if not tasks or not all(
236
223
  isinstance(task, str) for task in tasks
@@ -18,7 +18,7 @@ from swarms.structs.rearrange import AgentRearrange
18
18
  from swarms.structs.sequential_workflow import SequentialWorkflow
19
19
  from swarms.structs.spreadsheet_swarm import SpreadSheetSwarm
20
20
  from swarms.structs.swarm_matcher import swarm_matcher
21
- from swarms.structs.output_type import OutputType
21
+ from swarms.structs.output_types import OutputType
22
22
  from swarms.utils.loguru_logger import initialize_logger
23
23
  from swarms.structs.malt import MALT
24
24
  from swarms.structs.deep_research_swarm import DeepResearchSwarm
@@ -193,7 +193,8 @@ class SwarmRouter:
193
193
  )
194
194
 
195
195
  # Handle Automated Prompt Engineering
196
- self.activate_ape()
196
+ if self.auto_generate_prompts is True:
197
+ self.activate_ape()
197
198
 
198
199
  # Handle shared memory
199
200
  if self.shared_memory_system is not None:
@@ -1,59 +1,40 @@
1
1
  import os
2
- import logging
3
2
  import warnings
4
- import concurrent.futures
5
- from dotenv import load_dotenv
6
- from loguru import logger
3
+ from pathlib import Path
7
4
  from swarms.utils.disable_logging import disable_logging
5
+ from loguru import logger
8
6
 
9
7
 
10
8
  def bootup():
11
- """Initialize swarms environment and configuration
12
-
13
- Handles environment setup, logging configuration, telemetry,
14
- and workspace initialization.
15
- """
9
+ """Super-fast initialization of swarms environment"""
16
10
  try:
17
- # Load environment variables
18
- load_dotenv()
19
-
20
- # Configure logging
21
- if (
22
- os.getenv("SWARMS_VERBOSE_GLOBAL", "False").lower()
23
- == "false"
24
- ):
25
- logger.disable("")
26
- logging.disable(logging.CRITICAL)
11
+ # Cache env vars
12
+ verbose = os.getenv("SWARMS_VERBOSE_GLOBAL", "False").lower()
13
+ workspace_path = Path.cwd() / "agent_workspace"
27
14
 
15
+ # Configure logging early
16
+ if verbose == "false":
17
+ logger.disable("CRITICAL")
28
18
  else:
29
19
  logger.enable("")
30
20
 
31
- # Silent wandb
21
+ # Silence wandb
32
22
  os.environ["WANDB_SILENT"] = "true"
33
23
 
34
- # Configure workspace
35
- workspace_dir = os.path.join(os.getcwd(), "agent_workspace")
36
- os.makedirs(workspace_dir, exist_ok=True)
37
- os.environ["WORKSPACE_DIR"] = workspace_dir
24
+ # Setup workspace dir only if needed
25
+ if not workspace_path.exists():
26
+ workspace_path.mkdir(parents=True, exist_ok=True)
27
+ os.environ["WORKSPACE_DIR"] = str(workspace_path)
38
28
 
39
- # Suppress warnings
29
+ # Suppress deprecation warnings
40
30
  warnings.filterwarnings("ignore", category=DeprecationWarning)
41
31
 
42
- # Run telemetry functions concurrently
32
+ # Run lightweight telemetry
43
33
  try:
44
- with concurrent.futures.ThreadPoolExecutor(
45
- max_workers=2
46
- ) as executor:
47
-
48
- future_disable_logging = executor.submit(
49
- disable_logging
50
- )
51
-
52
- # Wait for completion and check for exceptions
53
- future_disable_logging.result()
34
+ disable_logging()
54
35
  except Exception as e:
55
- logger.error(f"Error running telemetry functions: {e}")
36
+ logger.error(f"Telemetry error: {e}")
56
37
 
57
38
  except Exception as e:
58
- logger.error(f"Error during bootup: {str(e)}")
39
+ logger.error(f"Bootup error: {str(e)}")
59
40
  raise