swarms 7.7.7__tar.gz → 7.7.9__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {swarms-7.7.7 → swarms-7.7.9}/PKG-INFO +4 -3
- {swarms-7.7.7 → swarms-7.7.9}/pyproject.toml +1 -8
- {swarms-7.7.7 → swarms-7.7.9}/swarms/__init__.py +0 -1
- swarms-7.7.9/swarms/agents/cort_agent.py +206 -0
- swarms-7.7.9/swarms/agents/react_agent.py +173 -0
- swarms-7.7.9/swarms/communication/base_communication.py +290 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/communication/duckdb_wrap.py +369 -72
- swarms-7.7.9/swarms/communication/pulsar_struct.py +691 -0
- swarms-7.7.9/swarms/communication/redis_wrap.py +1362 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/communication/sqlite_wrap.py +547 -44
- swarms-7.7.9/swarms/prompts/safety_prompt.py +50 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/structs/agent.py +13 -8
- {swarms-7.7.7 → swarms-7.7.9}/swarms/structs/concurrent_workflow.py +56 -242
- {swarms-7.7.7 → swarms-7.7.9}/swarms/structs/conversation.py +228 -38
- swarms-7.7.9/swarms/structs/council_judge.py +456 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/structs/deep_research_swarm.py +19 -22
- {swarms-7.7.7 → swarms-7.7.9}/swarms/structs/malt.py +30 -28
- {swarms-7.7.7 → swarms-7.7.9}/swarms/structs/multi_model_gpu_manager.py +1 -1
- {swarms-7.7.7 → swarms-7.7.9}/swarms/structs/output_types.py +1 -1
- {swarms-7.7.7 → swarms-7.7.9}/swarms/structs/swarm_router.py +2 -2
- {swarms-7.7.7 → swarms-7.7.9}/swarms/tools/mcp_client.py +1 -1
- {swarms-7.7.7 → swarms-7.7.9}/swarms/tools/py_func_to_openai_func_str.py +2 -2
- {swarms-7.7.7 → swarms-7.7.9}/swarms/utils/history_output_formatter.py +5 -5
- {swarms-7.7.7 → swarms-7.7.9}/swarms/utils/try_except_wrapper.py +2 -2
- swarms-7.7.9/swarms/utils/xml_utils.py +42 -0
- swarms-7.7.7/swarms/client/__init__.py +0 -15
- swarms-7.7.7/swarms/client/main.py +0 -407
- {swarms-7.7.7 → swarms-7.7.9}/LICENSE +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/README.md +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/agents/__init__.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/agents/agent_judge.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/agents/agent_print.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/agents/ape_agent.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/agents/auto_generate_swarm_config.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/agents/consistency_agent.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/agents/create_agents_from_yaml.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/agents/flexion_agent.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/agents/gkp_agent.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/agents/i_agent.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/agents/openai_assistant.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/agents/reasoning_agents.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/agents/reasoning_duo.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/agents/tool_agent.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/artifacts/__init__.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/artifacts/main_artifact.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/cli/__init__.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/cli/create_agent.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/cli/main.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/cli/onboarding_process.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/communication/__init__.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/prompts/__init__.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/prompts/accountant_swarm_prompts.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/prompts/ag_prompt.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/prompts/aga.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/prompts/agent_judge_prompt.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/prompts/agent_prompt.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/prompts/agent_prompts.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/prompts/agent_system_prompts.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/prompts/ai_research_team.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/prompts/aot_prompt.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/prompts/autobloggen.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/prompts/autoswarm.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/prompts/chat_prompt.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/prompts/code_interpreter.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/prompts/code_spawner.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/prompts/debate.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/prompts/documentation.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/prompts/education.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/prompts/finance_agent_prompt.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/prompts/finance_agent_sys_prompt.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/prompts/growth_agent_prompt.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/prompts/idea2img.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/prompts/legal_agent_prompt.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/prompts/logistics.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/prompts/max_loop_prompt.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/prompts/meta_system_prompt.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/prompts/multi_agent_collab_prompt.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/prompts/multi_modal_autonomous_instruction_prompt.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/prompts/multi_modal_prompts.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/prompts/multi_modal_visual_prompts.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/prompts/operations_agent_prompt.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/prompts/paper_idea_agent.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/prompts/personal_stylist.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/prompts/product_agent_prompt.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/prompts/programming.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/prompts/project_manager.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/prompts/prompt.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/prompts/prompt_generator.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/prompts/prompt_generator_optimizer.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/prompts/python.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/prompts/react.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/prompts/react_base_prompt.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/prompts/reasoning_prompt.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/prompts/refiner_agent_prompt.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/prompts/sales.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/prompts/sales_prompts.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/prompts/security_team.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/prompts/self_operating_prompt.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/prompts/sop_generator_agent_prompt.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/prompts/summaries_prompts.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/prompts/support_agent_prompt.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/prompts/swarm_manager_agent.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/prompts/task_assignment_prompt.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/prompts/tests.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/prompts/tools.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/prompts/urban_planning.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/prompts/visual_cot.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/prompts/worker_prompt.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/prompts/xray_swarm_prompt.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/schemas/__init__.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/schemas/agent_step_schemas.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/schemas/base_schemas.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/structs/__init__.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/structs/agent_builder.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/structs/agent_registry.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/structs/agent_roles.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/structs/agent_router.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/structs/aop.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/structs/auto_swarm_builder.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/structs/base_structure.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/structs/base_swarm.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/structs/base_workflow.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/structs/concat.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/structs/csv_to_agent.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/structs/de_hallucination_swarm.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/structs/dynamic_conversational_swarm.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/structs/graph_workflow.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/structs/groupchat.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/structs/hiearchical_swarm.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/structs/hybrid_hiearchical_peer_swarm.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/structs/ma_utils.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/structs/majority_voting.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/structs/matrix_swarm.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/structs/meme_agent_persona_generator.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/structs/mixture_of_agents.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/structs/model_router.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/structs/multi_agent_collab.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/structs/multi_agent_exec.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/structs/multi_agent_router.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/structs/omni_agent_types.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/structs/rearrange.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/structs/round_robin.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/structs/safe_loading.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/structs/sequential_workflow.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/structs/spreadsheet_swarm.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/structs/stopping_conditions.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/structs/swarm_arange.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/structs/swarm_eval.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/structs/swarm_id_generator.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/structs/swarm_matcher.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/structs/swarm_registry.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/structs/swarming_architectures.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/structs/tree_swarm.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/structs/utils.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/structs/various_alt_swarms.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/telemetry/__init__.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/telemetry/bootup.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/telemetry/main.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/tools/__init__.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/tools/base_tool.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/tools/cohere_func_call_schema.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/tools/func_calling_utils.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/tools/func_to_str.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/tools/function_util.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/tools/json_former.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/tools/json_utils.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/tools/logits_processor.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/tools/mcp_integration.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/tools/openai_func_calling_schema_pydantic.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/tools/openai_tool_creator_decorator.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/tools/pydantic_to_json.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/tools/tool_parse_exec.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/tools/tool_registry.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/tools/tool_schema_base_model.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/tools/tool_utils.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/utils/__init__.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/utils/any_to_str.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/utils/auto_download_check_packages.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/utils/calculate_func_metrics.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/utils/data_to_text.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/utils/disable_logging.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/utils/file_processing.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/utils/formatter.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/utils/function_caller_model.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/utils/generate_keys.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/utils/litellm_tokenizer.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/utils/litellm_wrapper.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/utils/loguru_logger.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/utils/markdown_message.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/utils/parse_code.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/utils/pdf_to_text.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/utils/str_to_dict.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/utils/visualizer.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/utils/vllm_wrapper.py +0 -0
- {swarms-7.7.7 → swarms-7.7.9}/swarms/utils/wrapper_clusterop.py +0 -0
@@ -1,8 +1,7 @@
|
|
1
|
-
Metadata-Version: 2.
|
1
|
+
Metadata-Version: 2.3
|
2
2
|
Name: swarms
|
3
|
-
Version: 7.7.
|
3
|
+
Version: 7.7.9
|
4
4
|
Summary: Swarms - TGSC
|
5
|
-
Home-page: https://github.com/kyegomez/swarms
|
6
5
|
License: MIT
|
7
6
|
Keywords: artificial intelligence,deep learning,optimizers,Prompt Engineering,swarms,agents,llms,transformers,multi-agent,swarms of agents,Enterprise-Grade Agents,Production-Grade Agents,Agents,Multi-Grade-Agents,Swarms,Transformers,LLMs,Prompt Engineering,Agents,Generative Agents,Generative AI,Agent Marketplace,Agent Store,quant,finance,algorithmic trading,portfolio optimization,risk management,financial modeling,machine learning for finance,natural language processing for finance
|
8
7
|
Author: Kye Gomez
|
@@ -15,6 +14,7 @@ Classifier: Programming Language :: Python :: 3
|
|
15
14
|
Classifier: Programming Language :: Python :: 3.10
|
16
15
|
Classifier: Programming Language :: Python :: 3.11
|
17
16
|
Classifier: Programming Language :: Python :: 3.12
|
17
|
+
Classifier: Programming Language :: Python :: 3.13
|
18
18
|
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
19
19
|
Requires-Dist: PyYAML
|
20
20
|
Requires-Dist: aiofiles
|
@@ -38,6 +38,7 @@ Requires-Dist: tenacity
|
|
38
38
|
Requires-Dist: toml
|
39
39
|
Requires-Dist: torch
|
40
40
|
Project-URL: Documentation, https://docs.swarms.world
|
41
|
+
Project-URL: Homepage, https://github.com/kyegomez/swarms
|
41
42
|
Project-URL: Repository, https://github.com/kyegomez/swarms
|
42
43
|
Description-Content-Type: text/markdown
|
43
44
|
|
@@ -5,7 +5,7 @@ build-backend = "poetry.core.masonry.api"
|
|
5
5
|
|
6
6
|
[tool.poetry]
|
7
7
|
name = "swarms"
|
8
|
-
version = "7.7.
|
8
|
+
version = "7.7.9"
|
9
9
|
description = "Swarms - TGSC"
|
10
10
|
license = "MIT"
|
11
11
|
authors = ["Kye Gomez <kye@apac.ai>"]
|
@@ -119,10 +119,3 @@ exclude = '''
|
|
119
119
|
)/
|
120
120
|
'''
|
121
121
|
|
122
|
-
|
123
|
-
|
124
|
-
[tool.maturin]
|
125
|
-
module-name = "swarms_rust"
|
126
|
-
|
127
|
-
[tool.maturin.build]
|
128
|
-
features = ["extension-module"]
|
@@ -0,0 +1,206 @@
|
|
1
|
+
# AI generate initial response
|
2
|
+
# AI decides how many "thinking rounds" it needs
|
3
|
+
# For each round:
|
4
|
+
# Generates 3 alternative responses
|
5
|
+
# Evaluates all responses
|
6
|
+
# Picks the best one
|
7
|
+
# Final response is the survivor of this AI battle royale
|
8
|
+
from swarms import Agent
|
9
|
+
|
10
|
+
|
11
|
+
# OpenAI function schema for determining thinking rounds
|
12
|
+
thinking_rounds_schema = {
|
13
|
+
"name": "determine_thinking_rounds",
|
14
|
+
"description": "Determines the optimal number of thinking rounds needed for a task",
|
15
|
+
"parameters": {
|
16
|
+
"type": "object",
|
17
|
+
"properties": {
|
18
|
+
"num_rounds": {
|
19
|
+
"type": "integer",
|
20
|
+
"description": "The number of thinking rounds needed (1-5)",
|
21
|
+
"minimum": 1,
|
22
|
+
"maximum": 5,
|
23
|
+
}
|
24
|
+
},
|
25
|
+
"required": ["num_rounds"],
|
26
|
+
},
|
27
|
+
}
|
28
|
+
|
29
|
+
# System prompt for determining thinking rounds
|
30
|
+
THINKING_ROUNDS_PROMPT = """You are an expert at determining the optimal number of thinking rounds needed for complex tasks. Your role is to analyze the task and determine how many rounds of thinking and evaluation would be most beneficial.
|
31
|
+
|
32
|
+
Consider the following factors when determining the number of rounds:
|
33
|
+
1. Task Complexity: More complex tasks may require more rounds
|
34
|
+
2. Potential for Multiple Valid Approaches: Tasks with multiple valid solutions need more rounds
|
35
|
+
3. Risk of Error: Higher-stakes tasks may benefit from more rounds
|
36
|
+
4. Time Sensitivity: Balance thoroughness with efficiency
|
37
|
+
|
38
|
+
Guidelines for number of rounds:
|
39
|
+
- 1 round: Simple, straightforward tasks with clear solutions
|
40
|
+
- 2-3 rounds: Moderately complex tasks with some ambiguity
|
41
|
+
- 4-5 rounds: Highly complex tasks with multiple valid approaches or high-stakes decisions
|
42
|
+
|
43
|
+
Your response should be a single number between 1 and 5, representing the optimal number of thinking rounds needed."""
|
44
|
+
|
45
|
+
# Schema for generating alternative responses
|
46
|
+
alternative_responses_schema = {
|
47
|
+
"name": "generate_alternatives",
|
48
|
+
"description": "Generates multiple alternative responses to a task",
|
49
|
+
"parameters": {
|
50
|
+
"type": "object",
|
51
|
+
"properties": {
|
52
|
+
"alternatives": {
|
53
|
+
"type": "array",
|
54
|
+
"description": "List of alternative responses",
|
55
|
+
"items": {
|
56
|
+
"type": "object",
|
57
|
+
"properties": {
|
58
|
+
"response": {
|
59
|
+
"type": "string",
|
60
|
+
"description": "The alternative response",
|
61
|
+
},
|
62
|
+
"reasoning": {
|
63
|
+
"type": "string",
|
64
|
+
"description": "Explanation of why this approach was chosen",
|
65
|
+
},
|
66
|
+
},
|
67
|
+
"required": ["response", "reasoning"],
|
68
|
+
},
|
69
|
+
"minItems": 3,
|
70
|
+
"maxItems": 3,
|
71
|
+
}
|
72
|
+
},
|
73
|
+
"required": ["alternatives"],
|
74
|
+
},
|
75
|
+
}
|
76
|
+
|
77
|
+
# Schema for evaluating responses
|
78
|
+
evaluation_schema = {
|
79
|
+
"name": "evaluate_responses",
|
80
|
+
"description": "Evaluates and ranks alternative responses",
|
81
|
+
"parameters": {
|
82
|
+
"type": "object",
|
83
|
+
"properties": {
|
84
|
+
"evaluation": {
|
85
|
+
"type": "object",
|
86
|
+
"properties": {
|
87
|
+
"best_response": {
|
88
|
+
"type": "string",
|
89
|
+
"description": "The selected best response",
|
90
|
+
},
|
91
|
+
"ranking": {
|
92
|
+
"type": "array",
|
93
|
+
"description": "Ranked list of responses from best to worst",
|
94
|
+
"items": {
|
95
|
+
"type": "object",
|
96
|
+
"properties": {
|
97
|
+
"response": {
|
98
|
+
"type": "string",
|
99
|
+
"description": "The response",
|
100
|
+
},
|
101
|
+
"score": {
|
102
|
+
"type": "number",
|
103
|
+
"description": "Score from 0-100",
|
104
|
+
},
|
105
|
+
"reasoning": {
|
106
|
+
"type": "string",
|
107
|
+
"description": "Explanation of the score",
|
108
|
+
},
|
109
|
+
},
|
110
|
+
"required": [
|
111
|
+
"response",
|
112
|
+
"score",
|
113
|
+
"reasoning",
|
114
|
+
],
|
115
|
+
},
|
116
|
+
},
|
117
|
+
},
|
118
|
+
"required": ["best_response", "ranking"],
|
119
|
+
}
|
120
|
+
},
|
121
|
+
"required": ["evaluation"],
|
122
|
+
},
|
123
|
+
}
|
124
|
+
|
125
|
+
# System prompt for generating alternatives
|
126
|
+
ALTERNATIVES_PROMPT = """You are an expert at generating diverse and creative alternative responses to tasks. Your role is to generate 3 distinct approaches to solving the given task.
|
127
|
+
|
128
|
+
For each alternative:
|
129
|
+
1. Consider a different perspective or approach
|
130
|
+
2. Provide clear reasoning for why this approach might be effective
|
131
|
+
3. Ensure alternatives are meaningfully different from each other
|
132
|
+
4. Maintain high quality and relevance to the task
|
133
|
+
|
134
|
+
Your response should include 3 alternatives, each with its own reasoning."""
|
135
|
+
|
136
|
+
# System prompt for evaluation
|
137
|
+
EVALUATION_PROMPT = """You are an expert at evaluating and comparing different responses to tasks. Your role is to critically analyze each response and determine which is the most effective.
|
138
|
+
|
139
|
+
Consider the following criteria when evaluating:
|
140
|
+
1. Relevance to the task
|
141
|
+
2. Completeness of the solution
|
142
|
+
3. Creativity and innovation
|
143
|
+
4. Practicality and feasibility
|
144
|
+
5. Clarity and coherence
|
145
|
+
|
146
|
+
Your response should include:
|
147
|
+
1. The best response selected
|
148
|
+
2. A ranked list of all responses with scores and reasoning"""
|
149
|
+
|
150
|
+
|
151
|
+
class CortAgent:
|
152
|
+
def __init__(
|
153
|
+
self,
|
154
|
+
alternative_responses: int = 3,
|
155
|
+
):
|
156
|
+
self.thinking_rounds = Agent(
|
157
|
+
agent_name="CortAgent",
|
158
|
+
agent_description="CortAgent is a multi-step agent that uses a battle royale approach to determine the best response to a task.",
|
159
|
+
model_name="gpt-4o-mini",
|
160
|
+
max_loops=1,
|
161
|
+
dynamic_temperature_enabled=True,
|
162
|
+
tools_list_dictionary=thinking_rounds_schema,
|
163
|
+
system_prompt=THINKING_ROUNDS_PROMPT,
|
164
|
+
)
|
165
|
+
|
166
|
+
self.alternatives_agent = Agent(
|
167
|
+
agent_name="CortAgentAlternatives",
|
168
|
+
agent_description="Generates multiple alternative responses to a task",
|
169
|
+
model_name="gpt-4o-mini",
|
170
|
+
max_loops=1,
|
171
|
+
dynamic_temperature_enabled=True,
|
172
|
+
tools_list_dictionary=alternative_responses_schema,
|
173
|
+
system_prompt=ALTERNATIVES_PROMPT,
|
174
|
+
)
|
175
|
+
|
176
|
+
self.evaluation_agent = Agent(
|
177
|
+
agent_name="CortAgentEvaluation",
|
178
|
+
agent_description="Evaluates and ranks alternative responses",
|
179
|
+
model_name="gpt-4o-mini",
|
180
|
+
max_loops=1,
|
181
|
+
dynamic_temperature_enabled=True,
|
182
|
+
tools_list_dictionary=evaluation_schema,
|
183
|
+
system_prompt=EVALUATION_PROMPT,
|
184
|
+
)
|
185
|
+
|
186
|
+
def run(self, task: str):
|
187
|
+
# First determine number of thinking rounds
|
188
|
+
num_rounds = self.thinking_rounds.run(task)
|
189
|
+
|
190
|
+
# Initialize with the task
|
191
|
+
current_task = task
|
192
|
+
best_response = None
|
193
|
+
|
194
|
+
# Run the battle royale for the determined number of rounds
|
195
|
+
for round_num in range(num_rounds):
|
196
|
+
# Generate alternatives
|
197
|
+
alternatives = self.alternatives_agent.run(current_task)
|
198
|
+
|
199
|
+
# Evaluate alternatives
|
200
|
+
evaluation = self.evaluation_agent.run(alternatives)
|
201
|
+
|
202
|
+
# Update best response and current task for next round
|
203
|
+
best_response = evaluation["evaluation"]["best_response"]
|
204
|
+
current_task = f"Previous best response: {best_response}\nOriginal task: {task}"
|
205
|
+
|
206
|
+
return best_response
|
@@ -0,0 +1,173 @@
|
|
1
|
+
from swarms import Agent
|
2
|
+
from typing import List
|
3
|
+
|
4
|
+
|
5
|
+
# System prompt for REACT agent
|
6
|
+
REACT_AGENT_PROMPT = """
|
7
|
+
You are a REACT (Reason, Act, Observe) agent designed to solve tasks through an iterative process of reasoning and action. You maintain memory of previous steps to build upon past actions and observations.
|
8
|
+
|
9
|
+
Your process follows these key components:
|
10
|
+
|
11
|
+
1. MEMORY: Review and utilize previous steps
|
12
|
+
- Access and analyze previous observations
|
13
|
+
- Build upon past thoughts and plans
|
14
|
+
- Learn from previous actions
|
15
|
+
- Use historical context to make better decisions
|
16
|
+
|
17
|
+
2. OBSERVE: Analyze current state
|
18
|
+
- Consider both new information and memory
|
19
|
+
- Identify relevant patterns from past steps
|
20
|
+
- Note any changes or progress made
|
21
|
+
- Evaluate success of previous actions
|
22
|
+
|
23
|
+
3. THINK: Process and reason
|
24
|
+
- Combine new observations with historical knowledge
|
25
|
+
- Consider how past steps influence current decisions
|
26
|
+
- Identify patterns and learning opportunities
|
27
|
+
- Plan improvements based on previous outcomes
|
28
|
+
|
29
|
+
4. PLAN: Develop next steps
|
30
|
+
- Create strategies that build on previous success
|
31
|
+
- Avoid repeating unsuccessful approaches
|
32
|
+
- Consider long-term goals and progress
|
33
|
+
- Maintain consistency with previous actions
|
34
|
+
|
35
|
+
5. ACT: Execute with context
|
36
|
+
- Implement actions that progress from previous steps
|
37
|
+
- Build upon successful past actions
|
38
|
+
- Adapt based on learned experiences
|
39
|
+
- Maintain continuity in approach
|
40
|
+
|
41
|
+
For each step, you should:
|
42
|
+
- Reference relevant previous steps
|
43
|
+
- Show how current decisions relate to past actions
|
44
|
+
- Demonstrate learning and adaptation
|
45
|
+
- Maintain coherent progression toward the goal
|
46
|
+
|
47
|
+
Your responses should be structured, logical, and show clear reasoning that builds upon previous steps."""
|
48
|
+
|
49
|
+
# Schema for REACT agent responses
|
50
|
+
react_agent_schema = {
|
51
|
+
"type": "function",
|
52
|
+
"function": {
|
53
|
+
"name": "generate_react_response",
|
54
|
+
"description": "Generates a structured REACT agent response with memory of previous steps",
|
55
|
+
"parameters": {
|
56
|
+
"type": "object",
|
57
|
+
"properties": {
|
58
|
+
"memory_reflection": {
|
59
|
+
"type": "string",
|
60
|
+
"description": "Analysis of previous steps and their influence on current thinking",
|
61
|
+
},
|
62
|
+
"observation": {
|
63
|
+
"type": "string",
|
64
|
+
"description": "Current state observation incorporating both new information and historical context",
|
65
|
+
},
|
66
|
+
"thought": {
|
67
|
+
"type": "string",
|
68
|
+
"description": "Reasoning that builds upon previous steps and current observation",
|
69
|
+
},
|
70
|
+
"plan": {
|
71
|
+
"type": "string",
|
72
|
+
"description": "Structured plan that shows progression from previous actions",
|
73
|
+
},
|
74
|
+
"action": {
|
75
|
+
"type": "string",
|
76
|
+
"description": "Specific action that builds upon previous steps and advances toward the goal",
|
77
|
+
},
|
78
|
+
},
|
79
|
+
"required": [
|
80
|
+
"memory_reflection",
|
81
|
+
"observation",
|
82
|
+
"thought",
|
83
|
+
"plan",
|
84
|
+
"action",
|
85
|
+
],
|
86
|
+
},
|
87
|
+
},
|
88
|
+
}
|
89
|
+
|
90
|
+
|
91
|
+
class ReactAgent:
|
92
|
+
def __init__(
|
93
|
+
self,
|
94
|
+
name: str = "react-agent-o1",
|
95
|
+
description: str = "A react agent that uses o1 preview to solve tasks",
|
96
|
+
model_name: str = "openai/gpt-4o",
|
97
|
+
max_loops: int = 1,
|
98
|
+
):
|
99
|
+
self.name = name
|
100
|
+
self.description = description
|
101
|
+
self.model_name = model_name
|
102
|
+
self.max_loops = max_loops
|
103
|
+
|
104
|
+
self.agent = Agent(
|
105
|
+
agent_name=self.name,
|
106
|
+
agent_description=self.description,
|
107
|
+
model_name=self.model_name,
|
108
|
+
max_loops=1,
|
109
|
+
tools_list_dictionary=[react_agent_schema],
|
110
|
+
output_type="final",
|
111
|
+
)
|
112
|
+
|
113
|
+
# Initialize memory for storing steps
|
114
|
+
self.memory: List[str] = []
|
115
|
+
|
116
|
+
def step(self, task: str) -> str:
|
117
|
+
"""Execute a single step of the REACT process.
|
118
|
+
|
119
|
+
Args:
|
120
|
+
task: The task description or current state
|
121
|
+
|
122
|
+
Returns:
|
123
|
+
String response from the agent
|
124
|
+
"""
|
125
|
+
response = self.agent.run(task)
|
126
|
+
print(response)
|
127
|
+
return response
|
128
|
+
|
129
|
+
def run(self, task: str, *args, **kwargs) -> List[str]:
|
130
|
+
"""Run the REACT agent for multiple steps with memory.
|
131
|
+
|
132
|
+
Args:
|
133
|
+
task: The initial task description
|
134
|
+
*args: Additional positional arguments
|
135
|
+
**kwargs: Additional keyword arguments
|
136
|
+
|
137
|
+
Returns:
|
138
|
+
List of all steps taken as strings
|
139
|
+
"""
|
140
|
+
# Reset memory at the start of a new run
|
141
|
+
self.memory = []
|
142
|
+
|
143
|
+
current_task = task
|
144
|
+
for i in range(self.max_loops):
|
145
|
+
print(f"\nExecuting step {i+1}/{self.max_loops}")
|
146
|
+
step_result = self.step(current_task)
|
147
|
+
print(step_result)
|
148
|
+
|
149
|
+
# Store step in memory
|
150
|
+
self.memory.append(step_result)
|
151
|
+
|
152
|
+
# Update task with previous response and memory context
|
153
|
+
memory_context = (
|
154
|
+
"\n\nMemory of previous steps:\n"
|
155
|
+
+ "\n".join(
|
156
|
+
f"Step {j+1}:\n{step}"
|
157
|
+
for j, step in enumerate(self.memory)
|
158
|
+
)
|
159
|
+
)
|
160
|
+
|
161
|
+
current_task = f"Previous response:\n{step_result}\n{memory_context}\n\nContinue with the original task: {task}"
|
162
|
+
|
163
|
+
return self.memory
|
164
|
+
|
165
|
+
|
166
|
+
# if __name__ == "__main__":
|
167
|
+
# agent = ReactAgent(
|
168
|
+
# max_loops=1
|
169
|
+
# ) # Increased max_loops to see the iteration
|
170
|
+
# result = agent.run(
|
171
|
+
# "Write a short story about a robot that can fly."
|
172
|
+
# )
|
173
|
+
# print(result)
|