camel-ai 0.2.59__py3-none-any.whl → 0.2.82__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +3 -3
- camel/agents/__init__.py +2 -2
- camel/agents/_types.py +9 -4
- camel/agents/_utils.py +40 -2
- camel/agents/base.py +2 -2
- camel/agents/chat_agent.py +5012 -902
- camel/agents/critic_agent.py +2 -2
- camel/agents/deductive_reasoner_agent.py +56 -56
- camel/agents/embodied_agent.py +2 -2
- camel/agents/knowledge_graph_agent.py +20 -20
- camel/agents/mcp_agent.py +39 -36
- camel/agents/multi_hop_generator_agent.py +3 -3
- camel/agents/programmed_agent_instruction.py +2 -2
- camel/agents/repo_agent.py +4 -3
- camel/agents/role_assignment_agent.py +2 -2
- camel/agents/search_agent.py +2 -2
- camel/agents/task_agent.py +2 -2
- camel/agents/tool_agents/__init__.py +2 -2
- camel/agents/tool_agents/base.py +2 -2
- camel/agents/tool_agents/hugging_face_tool_agent.py +3 -3
- camel/benchmarks/__init__.py +2 -2
- camel/benchmarks/apibank.py +5 -5
- camel/benchmarks/apibench.py +2 -2
- camel/benchmarks/base.py +2 -2
- camel/benchmarks/browsecomp.py +44 -33
- camel/benchmarks/gaia.py +17 -13
- camel/benchmarks/mock_website/README.md +94 -0
- camel/benchmarks/mock_website/mock_web.py +299 -0
- camel/benchmarks/mock_website/requirements.txt +3 -0
- camel/benchmarks/mock_website/shopping_mall/app.py +465 -0
- camel/benchmarks/mock_website/task.json +104 -0
- camel/benchmarks/nexus.py +3 -3
- camel/benchmarks/ragbench.py +2 -2
- camel/bots/__init__.py +2 -2
- camel/bots/discord/__init__.py +2 -2
- camel/bots/discord/discord_app.py +2 -2
- camel/bots/discord/discord_installation.py +2 -2
- camel/bots/discord/discord_store.py +3 -3
- camel/bots/slack/__init__.py +2 -2
- camel/bots/slack/models.py +4 -4
- camel/bots/slack/slack_app.py +2 -2
- camel/bots/telegram_bot.py +2 -2
- camel/configs/__init__.py +26 -2
- camel/configs/aihubmix_config.py +90 -0
- camel/configs/aiml_config.py +2 -2
- camel/configs/amd_config.py +70 -0
- camel/configs/anthropic_config.py +8 -7
- camel/configs/base_config.py +2 -2
- camel/configs/bedrock_config.py +5 -3
- camel/configs/cerebras_config.py +98 -0
- camel/configs/cohere_config.py +3 -3
- camel/configs/cometapi_config.py +106 -0
- camel/configs/crynux_config.py +94 -0
- camel/configs/deepseek_config.py +9 -8
- camel/configs/gemini_config.py +6 -4
- camel/configs/groq_config.py +6 -4
- camel/configs/internlm_config.py +6 -4
- camel/configs/litellm_config.py +2 -2
- camel/configs/lmstudio_config.py +6 -4
- camel/configs/minimax_config.py +95 -0
- camel/configs/mistral_config.py +3 -3
- camel/configs/modelscope_config.py +5 -3
- camel/configs/moonshot_config.py +2 -2
- camel/configs/nebius_config.py +105 -0
- camel/configs/netmind_config.py +2 -2
- camel/configs/novita_config.py +2 -2
- camel/configs/nvidia_config.py +2 -2
- camel/configs/ollama_config.py +2 -2
- camel/configs/openai_config.py +8 -3
- camel/configs/openrouter_config.py +6 -4
- camel/configs/ppio_config.py +2 -2
- camel/configs/qianfan_config.py +85 -0
- camel/configs/qwen_config.py +2 -2
- camel/configs/reka_config.py +3 -3
- camel/configs/samba_config.py +8 -6
- camel/configs/sglang_config.py +2 -2
- camel/configs/siliconflow_config.py +2 -2
- camel/configs/togetherai_config.py +2 -2
- camel/configs/vllm_config.py +4 -2
- camel/configs/watsonx_config.py +2 -2
- camel/configs/yi_config.py +6 -4
- camel/configs/zhipuai_config.py +6 -4
- camel/{data_collector → data_collectors}/__init__.py +2 -2
- camel/{data_collector → data_collectors}/alpaca_collector.py +19 -10
- camel/{data_collector → data_collectors}/base.py +2 -2
- camel/{data_collector → data_collectors}/sharegpt_collector.py +3 -3
- camel/datagen/__init__.py +2 -2
- camel/datagen/cot_datagen.py +32 -37
- camel/datagen/evol_instruct/__init__.py +2 -2
- camel/datagen/evol_instruct/evol_instruct.py +2 -2
- camel/datagen/evol_instruct/scorer.py +24 -25
- camel/datagen/evol_instruct/templates.py +48 -48
- camel/datagen/self_improving_cot.py +5 -5
- camel/datagen/self_instruct/__init__.py +2 -2
- camel/datagen/self_instruct/filter/__init__.py +2 -2
- camel/datagen/self_instruct/filter/filter_function.py +2 -2
- camel/datagen/self_instruct/filter/filter_registry.py +2 -2
- camel/datagen/self_instruct/filter/instruction_filter.py +2 -2
- camel/datagen/self_instruct/self_instruct.py +2 -2
- camel/datagen/self_instruct/templates.py +47 -47
- camel/datagen/source2synth/__init__.py +2 -2
- camel/datagen/source2synth/data_processor.py +2 -2
- camel/datagen/source2synth/models.py +2 -2
- camel/datagen/source2synth/user_data_processor_config.py +2 -2
- camel/datahubs/__init__.py +2 -2
- camel/datahubs/base.py +2 -2
- camel/datahubs/huggingface.py +2 -2
- camel/datahubs/models.py +2 -2
- camel/datasets/__init__.py +2 -2
- camel/datasets/base_generator.py +41 -12
- camel/datasets/few_shot_generator.py +18 -18
- camel/datasets/models.py +3 -3
- camel/datasets/self_instruct_generator.py +2 -2
- camel/datasets/static_dataset.py +152 -2
- camel/embeddings/__init__.py +2 -2
- camel/embeddings/azure_embedding.py +2 -2
- camel/embeddings/base.py +2 -2
- camel/embeddings/gemini_embedding.py +2 -2
- camel/embeddings/jina_embedding.py +10 -3
- camel/embeddings/mistral_embedding.py +2 -2
- camel/embeddings/openai_compatible_embedding.py +2 -2
- camel/embeddings/openai_embedding.py +2 -2
- camel/embeddings/sentence_transformers_embeddings.py +4 -4
- camel/embeddings/together_embedding.py +2 -2
- camel/embeddings/vlm_embedding.py +11 -4
- camel/environments/__init__.py +14 -2
- camel/environments/models.py +2 -2
- camel/environments/multi_step.py +2 -2
- camel/environments/rlcards_env.py +860 -0
- camel/environments/single_step.py +30 -5
- camel/environments/tic_tac_toe.py +3 -3
- camel/extractors/__init__.py +2 -2
- camel/extractors/base.py +2 -2
- camel/extractors/python_strategies.py +2 -2
- camel/generators.py +2 -2
- camel/human.py +2 -2
- camel/interpreters/__init__.py +4 -2
- camel/interpreters/base.py +16 -3
- camel/interpreters/docker/Dockerfile +53 -7
- camel/interpreters/docker_interpreter.py +70 -11
- camel/interpreters/e2b_interpreter.py +59 -11
- camel/interpreters/internal_python_interpreter.py +81 -4
- camel/interpreters/interpreter_error.py +2 -2
- camel/interpreters/ipython_interpreter.py +23 -5
- camel/interpreters/microsandbox_interpreter.py +395 -0
- camel/interpreters/subprocess_interpreter.py +36 -4
- camel/loaders/__init__.py +17 -5
- camel/loaders/apify_reader.py +2 -2
- camel/loaders/base_io.py +2 -2
- camel/loaders/base_loader.py +85 -0
- camel/loaders/chunkr_reader.py +128 -93
- camel/loaders/crawl4ai_reader.py +2 -2
- camel/loaders/firecrawl_reader.py +6 -6
- camel/loaders/jina_url_reader.py +2 -2
- camel/loaders/markitdown.py +2 -2
- camel/loaders/mineru_extractor.py +2 -2
- camel/loaders/mistral_reader.py +148 -0
- camel/loaders/scrapegraph_reader.py +2 -2
- camel/loaders/unstructured_io.py +2 -2
- camel/logger.py +5 -5
- camel/memories/__init__.py +2 -2
- camel/memories/agent_memories.py +86 -3
- camel/memories/base.py +36 -2
- camel/memories/blocks/__init__.py +2 -2
- camel/memories/blocks/chat_history_block.py +126 -9
- camel/memories/blocks/vectordb_block.py +10 -3
- camel/memories/context_creators/__init__.py +2 -2
- camel/memories/context_creators/score_based.py +31 -239
- camel/memories/records.py +98 -13
- camel/messages/__init__.py +2 -2
- camel/messages/base.py +193 -46
- camel/messages/conversion/__init__.py +2 -2
- camel/messages/conversion/alpaca.py +2 -2
- camel/messages/conversion/conversation_models.py +2 -2
- camel/messages/conversion/sharegpt/__init__.py +2 -2
- camel/messages/conversion/sharegpt/function_call_formatter.py +2 -2
- camel/messages/conversion/sharegpt/hermes/__init__.py +2 -2
- camel/messages/conversion/sharegpt/hermes/hermes_function_formatter.py +2 -2
- camel/messages/func_message.py +54 -17
- camel/models/__init__.py +18 -2
- camel/models/_utils.py +3 -3
- camel/models/aihubmix_model.py +83 -0
- camel/models/aiml_model.py +11 -18
- camel/models/amd_model.py +101 -0
- camel/models/anthropic_model.py +127 -20
- camel/models/aws_bedrock_model.py +12 -35
- camel/models/azure_openai_model.py +263 -63
- camel/models/base_audio_model.py +5 -3
- camel/models/base_model.py +195 -26
- camel/models/cerebras_model.py +83 -0
- camel/models/cohere_model.py +81 -21
- camel/models/cometapi_model.py +83 -0
- camel/models/crynux_model.py +87 -0
- camel/models/deepseek_model.py +61 -59
- camel/models/fish_audio_model.py +8 -2
- camel/models/gemini_model.py +439 -30
- camel/models/groq_model.py +11 -19
- camel/models/internlm_model.py +11 -18
- camel/models/litellm_model.py +94 -34
- camel/models/lmstudio_model.py +17 -20
- camel/models/minimax_model.py +83 -0
- camel/models/mistral_model.py +84 -19
- camel/models/model_factory.py +49 -6
- camel/models/model_manager.py +33 -11
- camel/models/modelscope_model.py +13 -193
- camel/models/moonshot_model.py +195 -21
- camel/models/nebius_model.py +83 -0
- camel/models/nemotron_model.py +19 -9
- camel/models/netmind_model.py +11 -18
- camel/models/novita_model.py +11 -18
- camel/models/nvidia_model.py +11 -18
- camel/models/ollama_model.py +14 -21
- camel/models/openai_audio_models.py +2 -2
- camel/models/openai_compatible_model.py +234 -27
- camel/models/openai_model.py +255 -39
- camel/models/openrouter_model.py +11 -19
- camel/models/ppio_model.py +11 -18
- camel/models/qianfan_model.py +89 -0
- camel/models/qwen_model.py +13 -193
- camel/models/reka_model.py +90 -21
- camel/models/reward/__init__.py +2 -2
- camel/models/reward/base_reward_model.py +2 -2
- camel/models/reward/evaluator.py +2 -2
- camel/models/reward/nemotron_model.py +2 -2
- camel/models/reward/skywork_model.py +2 -2
- camel/models/samba_model.py +117 -49
- camel/models/sglang_model.py +162 -42
- camel/models/siliconflow_model.py +12 -35
- camel/models/stub_model.py +10 -7
- camel/models/togetherai_model.py +11 -18
- camel/models/vllm_model.py +10 -18
- camel/models/volcano_model.py +16 -20
- camel/models/watsonx_model.py +69 -19
- camel/models/yi_model.py +11 -18
- camel/models/zhipuai_model.py +70 -18
- camel/parsers/__init__.py +18 -0
- camel/parsers/mcp_tool_call_parser.py +176 -0
- camel/personas/__init__.py +2 -2
- camel/personas/persona.py +2 -2
- camel/personas/persona_hub.py +2 -2
- camel/prompts/__init__.py +2 -2
- camel/prompts/ai_society.py +2 -2
- camel/prompts/base.py +2 -2
- camel/prompts/code.py +2 -2
- camel/prompts/evaluation.py +2 -2
- camel/prompts/generate_text_embedding_data.py +2 -2
- camel/prompts/image_craft.py +2 -2
- camel/prompts/misalignment.py +2 -2
- camel/prompts/multi_condition_image_craft.py +2 -2
- camel/prompts/object_recognition.py +2 -2
- camel/prompts/persona_hub.py +3 -3
- camel/prompts/prompt_templates.py +2 -2
- camel/prompts/role_description_prompt_template.py +2 -2
- camel/prompts/solution_extraction.py +8 -8
- camel/prompts/task_prompt_template.py +2 -2
- camel/prompts/translation.py +2 -2
- camel/prompts/video_description_prompt.py +3 -3
- camel/responses/__init__.py +2 -2
- camel/responses/agent_responses.py +2 -2
- camel/retrievers/__init__.py +2 -2
- camel/retrievers/auto_retriever.py +23 -3
- camel/retrievers/base.py +2 -2
- camel/retrievers/bm25_retriever.py +3 -4
- camel/retrievers/cohere_rerank_retriever.py +2 -2
- camel/retrievers/hybrid_retrival.py +4 -4
- camel/retrievers/vector_retriever.py +2 -2
- camel/runtimes/Dockerfile.multi-toolkit +90 -0
- camel/{runtime → runtimes}/__init__.py +2 -2
- camel/runtimes/api.py +153 -0
- camel/{runtime → runtimes}/base.py +2 -2
- camel/{runtime → runtimes}/configs.py +13 -13
- camel/{runtime → runtimes}/daytona_runtime.py +18 -19
- camel/{runtime → runtimes}/docker_runtime.py +13 -13
- camel/{runtime → runtimes}/llm_guard_runtime.py +28 -28
- camel/{runtime → runtimes}/remote_http_runtime.py +12 -12
- camel/{runtime → runtimes}/ubuntu_docker_runtime.py +3 -3
- camel/{runtime → runtimes}/utils/__init__.py +2 -2
- camel/{runtime → runtimes}/utils/function_risk_toolkit.py +2 -2
- camel/{runtime → runtimes}/utils/ignore_risk_toolkit.py +2 -2
- camel/schemas/__init__.py +2 -2
- camel/schemas/base.py +2 -2
- camel/schemas/openai_converter.py +3 -3
- camel/schemas/outlines_converter.py +2 -2
- camel/services/agent_openapi_server.py +380 -0
- camel/societies/__init__.py +4 -2
- camel/societies/babyagi_playing.py +2 -2
- camel/societies/role_playing.py +201 -80
- camel/societies/workforce/__init__.py +10 -3
- camel/societies/workforce/base.py +9 -5
- camel/societies/workforce/events.py +143 -0
- camel/societies/workforce/prompts.py +258 -33
- camel/societies/workforce/role_playing_worker.py +95 -30
- camel/societies/workforce/single_agent_worker.py +659 -30
- camel/societies/workforce/structured_output_handler.py +512 -0
- camel/societies/workforce/task_channel.py +182 -38
- camel/societies/workforce/utils.py +784 -18
- camel/societies/workforce/worker.py +96 -28
- camel/societies/workforce/workflow_memory_manager.py +1746 -0
- camel/societies/workforce/workforce.py +5730 -366
- camel/societies/workforce/workforce_callback.py +103 -0
- camel/societies/workforce/workforce_logger.py +647 -0
- camel/societies/workforce/workforce_metrics.py +33 -0
- camel/storages/__init__.py +10 -2
- camel/storages/graph_storages/__init__.py +2 -2
- camel/storages/graph_storages/base.py +2 -2
- camel/storages/graph_storages/graph_element.py +2 -2
- camel/storages/graph_storages/nebula_graph.py +4 -4
- camel/storages/graph_storages/neo4j_graph.py +7 -7
- camel/storages/key_value_storages/__init__.py +2 -2
- camel/storages/key_value_storages/base.py +2 -2
- camel/storages/key_value_storages/in_memory.py +2 -2
- camel/storages/key_value_storages/json.py +17 -4
- camel/storages/key_value_storages/mem0_cloud.py +50 -49
- camel/storages/key_value_storages/redis.py +2 -2
- camel/storages/object_storages/__init__.py +2 -2
- camel/storages/object_storages/amazon_s3.py +2 -2
- camel/storages/object_storages/azure_blob.py +2 -2
- camel/storages/object_storages/base.py +2 -2
- camel/storages/object_storages/google_cloud.py +3 -3
- camel/storages/vectordb_storages/__init__.py +12 -2
- camel/storages/vectordb_storages/base.py +2 -2
- camel/storages/vectordb_storages/chroma.py +731 -0
- camel/storages/vectordb_storages/faiss.py +712 -0
- camel/storages/vectordb_storages/milvus.py +2 -2
- camel/storages/vectordb_storages/oceanbase.py +16 -17
- camel/storages/vectordb_storages/pgvector.py +349 -0
- camel/storages/vectordb_storages/qdrant.py +6 -6
- camel/storages/vectordb_storages/surreal.py +372 -0
- camel/storages/vectordb_storages/tidb.py +11 -8
- camel/storages/vectordb_storages/weaviate.py +714 -0
- camel/tasks/__init__.py +2 -2
- camel/tasks/task.py +366 -27
- camel/tasks/task_prompt.py +3 -3
- camel/terminators/__init__.py +2 -2
- camel/terminators/base.py +2 -2
- camel/terminators/response_terminator.py +2 -2
- camel/terminators/token_limit_terminator.py +2 -2
- camel/toolkits/__init__.py +58 -10
- camel/toolkits/aci_toolkit.py +66 -21
- camel/toolkits/arxiv_toolkit.py +8 -8
- camel/toolkits/ask_news_toolkit.py +2 -2
- camel/toolkits/async_browser_toolkit.py +174 -575
- camel/toolkits/audio_analysis_toolkit.py +3 -3
- camel/toolkits/base.py +65 -7
- camel/toolkits/bohrium_toolkit.py +318 -0
- camel/toolkits/browser_toolkit.py +306 -566
- camel/toolkits/browser_toolkit_commons.py +568 -0
- camel/toolkits/code_execution.py +67 -11
- camel/toolkits/context_summarizer_toolkit.py +684 -0
- camel/toolkits/craw4ai_toolkit.py +93 -0
- camel/toolkits/dappier_toolkit.py +12 -8
- camel/toolkits/data_commons_toolkit.py +2 -2
- camel/toolkits/dingtalk.py +1135 -0
- camel/toolkits/earth_science_toolkit.py +5367 -0
- camel/toolkits/edgeone_pages_mcp_toolkit.py +49 -0
- camel/toolkits/excel_toolkit.py +910 -70
- camel/toolkits/file_toolkit.py +1402 -0
- camel/toolkits/function_tool.py +128 -20
- camel/toolkits/github_toolkit.py +148 -43
- camel/toolkits/gmail_toolkit.py +1839 -0
- camel/toolkits/google_calendar_toolkit.py +40 -6
- camel/toolkits/google_drive_mcp_toolkit.py +54 -0
- camel/toolkits/google_maps_toolkit.py +2 -2
- camel/toolkits/google_scholar_toolkit.py +2 -2
- camel/toolkits/human_toolkit.py +36 -12
- camel/toolkits/hybrid_browser_toolkit/__init__.py +18 -0
- camel/toolkits/hybrid_browser_toolkit/config_loader.py +185 -0
- camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit.py +246 -0
- camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit_ts.py +1973 -0
- camel/toolkits/hybrid_browser_toolkit/installer.py +203 -0
- camel/toolkits/hybrid_browser_toolkit/ts/package-lock.json +4589 -0
- camel/toolkits/hybrid_browser_toolkit/ts/package.json +33 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/browser-scripts.js +125 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/browser-session.ts +1929 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/config-loader.ts +233 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/hybrid-browser-toolkit.ts +589 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/index.ts +7 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/parent-child-filter.ts +226 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/snapshot-parser.ts +219 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/som-screenshot-injected.ts +543 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/types.ts +129 -0
- camel/toolkits/hybrid_browser_toolkit/ts/tsconfig.json +27 -0
- camel/toolkits/hybrid_browser_toolkit/ts/websocket-server.js +319 -0
- camel/toolkits/hybrid_browser_toolkit/ws_wrapper.py +1037 -0
- camel/toolkits/hybrid_browser_toolkit_py/__init__.py +17 -0
- camel/toolkits/hybrid_browser_toolkit_py/actions.py +575 -0
- camel/toolkits/hybrid_browser_toolkit_py/agent.py +311 -0
- camel/toolkits/hybrid_browser_toolkit_py/browser_session.py +787 -0
- camel/toolkits/hybrid_browser_toolkit_py/config_loader.py +490 -0
- camel/toolkits/hybrid_browser_toolkit_py/hybrid_browser_toolkit.py +2390 -0
- camel/toolkits/hybrid_browser_toolkit_py/snapshot.py +233 -0
- camel/toolkits/hybrid_browser_toolkit_py/stealth_script.js +0 -0
- camel/toolkits/hybrid_browser_toolkit_py/unified_analyzer.js +1043 -0
- camel/toolkits/image_analysis_toolkit.py +3 -3
- camel/toolkits/image_generation_toolkit.py +390 -0
- camel/toolkits/jina_reranker_toolkit.py +195 -79
- camel/toolkits/klavis_toolkit.py +7 -3
- camel/toolkits/linkedin_toolkit.py +2 -2
- camel/toolkits/markitdown_toolkit.py +104 -0
- camel/toolkits/math_toolkit.py +66 -12
- camel/toolkits/mcp_toolkit.py +841 -600
- camel/toolkits/memory_toolkit.py +7 -3
- camel/toolkits/meshy_toolkit.py +2 -2
- camel/toolkits/message_agent_toolkit.py +608 -0
- camel/toolkits/message_integration.py +724 -0
- camel/toolkits/mineru_toolkit.py +2 -2
- camel/toolkits/minimax_mcp_toolkit.py +195 -0
- camel/toolkits/networkx_toolkit.py +2 -2
- camel/toolkits/note_taking_toolkit.py +277 -0
- camel/toolkits/notion_mcp_toolkit.py +224 -0
- camel/toolkits/notion_toolkit.py +2 -2
- camel/toolkits/open_api_specs/biztoc/__init__.py +2 -2
- camel/toolkits/open_api_specs/biztoc/ai-plugin.json +1 -1
- camel/toolkits/open_api_specs/coursera/__init__.py +2 -2
- camel/toolkits/open_api_specs/create_qr_code/__init__.py +2 -2
- camel/toolkits/open_api_specs/klarna/__init__.py +2 -2
- camel/toolkits/open_api_specs/nasa_apod/__init__.py +2 -2
- camel/toolkits/open_api_specs/outschool/__init__.py +2 -2
- camel/toolkits/open_api_specs/outschool/ai-plugin.json +1 -1
- camel/toolkits/open_api_specs/outschool/openapi.yaml +1 -1
- camel/toolkits/open_api_specs/outschool/paths/__init__.py +2 -2
- camel/toolkits/open_api_specs/outschool/paths/get_classes.py +2 -2
- camel/toolkits/open_api_specs/outschool/paths/search_teachers.py +2 -2
- camel/toolkits/open_api_specs/security_config.py +2 -2
- camel/toolkits/open_api_specs/speak/__init__.py +2 -2
- camel/toolkits/open_api_specs/web_scraper/__init__.py +2 -2
- camel/toolkits/open_api_specs/web_scraper/ai-plugin.json +1 -1
- camel/toolkits/open_api_specs/web_scraper/paths/__init__.py +2 -2
- camel/toolkits/open_api_specs/web_scraper/paths/scraper.py +2 -2
- camel/toolkits/open_api_toolkit.py +2 -2
- camel/toolkits/openbb_toolkit.py +7 -3
- camel/toolkits/origene_mcp_toolkit.py +56 -0
- camel/toolkits/page_script.js +86 -74
- camel/toolkits/playwright_mcp_toolkit.py +27 -32
- camel/toolkits/pptx_toolkit.py +790 -0
- camel/toolkits/pubmed_toolkit.py +2 -2
- camel/toolkits/pulse_mcp_search_toolkit.py +2 -2
- camel/toolkits/pyautogui_toolkit.py +2 -2
- camel/toolkits/reddit_toolkit.py +2 -2
- camel/toolkits/resend_toolkit.py +168 -0
- camel/toolkits/retrieval_toolkit.py +2 -2
- camel/toolkits/screenshot_toolkit.py +213 -0
- camel/toolkits/search_toolkit.py +539 -146
- camel/toolkits/searxng_toolkit.py +2 -2
- camel/toolkits/semantic_scholar_toolkit.py +2 -2
- camel/toolkits/slack_toolkit.py +108 -58
- camel/toolkits/sql_toolkit.py +712 -0
- camel/toolkits/stripe_toolkit.py +2 -2
- camel/toolkits/sympy_toolkit.py +3 -3
- camel/toolkits/task_planning_toolkit.py +134 -0
- camel/toolkits/terminal_toolkit/__init__.py +18 -0
- camel/toolkits/terminal_toolkit/terminal_toolkit.py +1070 -0
- camel/toolkits/terminal_toolkit/utils.py +532 -0
- camel/toolkits/thinking_toolkit.py +3 -3
- camel/toolkits/twitter_toolkit.py +8 -3
- camel/toolkits/vertex_ai_veo_toolkit.py +590 -0
- camel/toolkits/video_analysis_toolkit.py +112 -29
- camel/toolkits/video_download_toolkit.py +22 -16
- camel/toolkits/weather_toolkit.py +2 -2
- camel/toolkits/web_deploy_toolkit.py +1219 -0
- camel/toolkits/wechat_official_toolkit.py +483 -0
- camel/toolkits/whatsapp_toolkit.py +2 -2
- camel/toolkits/wolfram_alpha_toolkit.py +53 -25
- camel/toolkits/zapier_toolkit.py +7 -3
- camel/types/__init__.py +4 -4
- camel/types/agents/__init__.py +2 -2
- camel/types/agents/tool_calling_record.py +6 -3
- camel/types/enums.py +454 -35
- camel/types/mcp_registries.py +2 -2
- camel/types/openai_types.py +4 -4
- camel/types/unified_model_type.py +43 -6
- camel/utils/__init__.py +20 -2
- camel/utils/async_func.py +2 -2
- camel/utils/chunker/__init__.py +2 -2
- camel/utils/chunker/base.py +2 -2
- camel/utils/chunker/code_chunker.py +2 -2
- camel/utils/chunker/uio_chunker.py +2 -2
- camel/utils/commons.py +65 -7
- camel/utils/constants.py +5 -2
- camel/utils/context_utils.py +1134 -0
- camel/utils/deduplication.py +2 -2
- camel/utils/filename.py +2 -2
- camel/utils/langfuse.py +258 -0
- camel/utils/mcp.py +140 -6
- camel/utils/mcp_client.py +1056 -0
- camel/utils/message_summarizer.py +148 -0
- camel/utils/response_format.py +2 -2
- camel/utils/token_counting.py +45 -22
- camel/utils/tool_result.py +44 -0
- camel/verifiers/__init__.py +2 -2
- camel/verifiers/base.py +2 -2
- camel/verifiers/math_verifier.py +2 -2
- camel/verifiers/models.py +2 -2
- camel/verifiers/physics_verifier.py +2 -2
- camel/verifiers/python_verifier.py +2 -2
- {camel_ai-0.2.59.dist-info → camel_ai-0.2.82.dist-info}/METADATA +349 -108
- camel_ai-0.2.82.dist-info/RECORD +507 -0
- {camel_ai-0.2.59.dist-info → camel_ai-0.2.82.dist-info}/WHEEL +1 -1
- {camel_ai-0.2.59.dist-info → camel_ai-0.2.82.dist-info}/licenses/LICENSE +1 -1
- camel/loaders/pandas_reader.py +0 -368
- camel/runtime/api.py +0 -97
- camel/toolkits/dalle_toolkit.py +0 -171
- camel/toolkits/file_write_toolkit.py +0 -395
- camel/toolkits/openai_agent_toolkit.py +0 -135
- camel/toolkits/terminal_toolkit.py +0 -1037
- camel_ai-0.2.59.dist-info/RECORD +0 -410
|
@@ -0,0 +1,1746 @@
|
|
|
1
|
+
# ========= Copyright 2023-2025 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# ========= Copyright 2023-2025 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
|
|
15
|
+
import glob
|
|
16
|
+
import os
|
|
17
|
+
import re
|
|
18
|
+
from enum import Enum
|
|
19
|
+
from pathlib import Path
|
|
20
|
+
from typing import Any, Dict, List, Optional
|
|
21
|
+
|
|
22
|
+
from camel.agents import ChatAgent
|
|
23
|
+
from camel.logger import get_logger
|
|
24
|
+
from camel.societies.workforce.structured_output_handler import (
|
|
25
|
+
StructuredOutputHandler,
|
|
26
|
+
)
|
|
27
|
+
from camel.societies.workforce.utils import (
|
|
28
|
+
WorkflowConfig,
|
|
29
|
+
WorkflowMetadata,
|
|
30
|
+
is_generic_role_name,
|
|
31
|
+
)
|
|
32
|
+
from camel.types import OpenAIBackendRole
|
|
33
|
+
from camel.utils.context_utils import ContextUtility, WorkflowSummary
|
|
34
|
+
|
|
35
|
+
logger = get_logger(__name__)
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
class WorkflowSelectionMethod(Enum):
|
|
39
|
+
r"""Enum representing the method used to select workflows.
|
|
40
|
+
|
|
41
|
+
Attributes:
|
|
42
|
+
AGENT_SELECTED: Agent-based intelligent selection using metadata.
|
|
43
|
+
ROLE_NAME_MATCH: Pattern matching by role_name.
|
|
44
|
+
MOST_RECENT: Fallback to most recent workflows.
|
|
45
|
+
ALL_AVAILABLE: Returned all workflows (fewer than max requested).
|
|
46
|
+
NONE: No workflows available.
|
|
47
|
+
"""
|
|
48
|
+
|
|
49
|
+
AGENT_SELECTED = "agent_selected"
|
|
50
|
+
ROLE_NAME_MATCH = "role_name_match"
|
|
51
|
+
MOST_RECENT = "most_recent"
|
|
52
|
+
ALL_AVAILABLE = "all_available"
|
|
53
|
+
NONE = "none"
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
class WorkflowMemoryManager:
|
|
57
|
+
r"""Manages workflow memory operations for workforce workers.
|
|
58
|
+
|
|
59
|
+
This class encapsulates all workflow memory functionality including
|
|
60
|
+
intelligent loading, saving, and selection of workflows. It separates
|
|
61
|
+
workflow management concerns from the core worker task processing logic.
|
|
62
|
+
|
|
63
|
+
Args:
|
|
64
|
+
worker (ChatAgent): The worker agent that will use workflows.
|
|
65
|
+
description (str): Description of the worker's role.
|
|
66
|
+
context_utility (Optional[ContextUtility]): Shared context utility
|
|
67
|
+
for workflow operations. If None, creates a new instance.
|
|
68
|
+
role_identifier (Optional[str]): Role identifier for organizing
|
|
69
|
+
workflows by role. If provided, workflows will be stored in
|
|
70
|
+
role-based folders. If None, uses default workforce context.
|
|
71
|
+
config (Optional[WorkflowConfig]): Configuration for workflow
|
|
72
|
+
management. If None, uses default configuration.
|
|
73
|
+
"""
|
|
74
|
+
|
|
75
|
+
def __init__(
|
|
76
|
+
self,
|
|
77
|
+
worker: ChatAgent,
|
|
78
|
+
description: str,
|
|
79
|
+
context_utility: Optional[ContextUtility] = None,
|
|
80
|
+
role_identifier: Optional[str] = None,
|
|
81
|
+
config: Optional[WorkflowConfig] = None,
|
|
82
|
+
):
|
|
83
|
+
# validate worker type at initialization
|
|
84
|
+
if not isinstance(worker, ChatAgent):
|
|
85
|
+
raise TypeError(
|
|
86
|
+
f"Worker must be a ChatAgent instance, "
|
|
87
|
+
f"got {type(worker).__name__}"
|
|
88
|
+
)
|
|
89
|
+
|
|
90
|
+
self.worker = worker
|
|
91
|
+
self.description = description
|
|
92
|
+
self._context_utility = context_utility
|
|
93
|
+
self._role_identifier = role_identifier
|
|
94
|
+
self.config = config if config is not None else WorkflowConfig()
|
|
95
|
+
|
|
96
|
+
# mapping of loaded workflow filenames to their full file paths
|
|
97
|
+
# populated when workflows are loaded, used to resolve update targets
|
|
98
|
+
self._loaded_workflow_paths: Dict[str, str] = {}
|
|
99
|
+
|
|
100
|
+
# cached loaded workflow contents for reuse in prompt preparation
|
|
101
|
+
# list of dicts with 'filename' and 'content' keys
|
|
102
|
+
self._loaded_workflow_contents: List[Dict[str, str]] = []
|
|
103
|
+
|
|
104
|
+
def _get_context_utility(self) -> ContextUtility:
|
|
105
|
+
r"""Get context utility with lazy initialization.
|
|
106
|
+
|
|
107
|
+
Uses role-based context if role_identifier is set, otherwise falls
|
|
108
|
+
back to default workforce shared context.
|
|
109
|
+
"""
|
|
110
|
+
if self._context_utility is None:
|
|
111
|
+
if self._role_identifier:
|
|
112
|
+
self._context_utility = (
|
|
113
|
+
ContextUtility.get_workforce_shared_by_role(
|
|
114
|
+
self._role_identifier
|
|
115
|
+
)
|
|
116
|
+
)
|
|
117
|
+
else:
|
|
118
|
+
self._context_utility = ContextUtility.get_workforce_shared()
|
|
119
|
+
return self._context_utility
|
|
120
|
+
|
|
121
|
+
def _extract_existing_workflow_metadata(
|
|
122
|
+
self, file_path: Path
|
|
123
|
+
) -> Optional[WorkflowMetadata]:
|
|
124
|
+
r"""Extract metadata from an existing workflow file for versioning.
|
|
125
|
+
|
|
126
|
+
This method reads the metadata section from an existing workflow
|
|
127
|
+
markdown file to retrieve version number and creation timestamp,
|
|
128
|
+
enabling proper version tracking when updating workflows.
|
|
129
|
+
|
|
130
|
+
Args:
|
|
131
|
+
file_path (Path): Path to the existing workflow file.
|
|
132
|
+
|
|
133
|
+
Returns:
|
|
134
|
+
Optional[WorkflowMetadata]: WorkflowMetadata instance if file
|
|
135
|
+
exists and metadata is successfully parsed, None otherwise.
|
|
136
|
+
"""
|
|
137
|
+
try:
|
|
138
|
+
# check if parent directory exists first
|
|
139
|
+
if not file_path.parent.exists():
|
|
140
|
+
return None
|
|
141
|
+
|
|
142
|
+
if not file_path.exists():
|
|
143
|
+
return None
|
|
144
|
+
|
|
145
|
+
with open(file_path, 'r', encoding='utf-8') as f:
|
|
146
|
+
content = f.read()
|
|
147
|
+
|
|
148
|
+
# extract metadata section
|
|
149
|
+
metadata_match = re.search(
|
|
150
|
+
r'## Metadata\s*\n(.*?)(?:\n##|$)', content, re.DOTALL
|
|
151
|
+
)
|
|
152
|
+
|
|
153
|
+
if not metadata_match:
|
|
154
|
+
return None
|
|
155
|
+
|
|
156
|
+
metadata_section = metadata_match.group(1).strip()
|
|
157
|
+
|
|
158
|
+
# parse metadata lines (format: "- key: value")
|
|
159
|
+
metadata_dict: Dict[str, Any] = {}
|
|
160
|
+
for line in metadata_section.split('\n'):
|
|
161
|
+
line = line.strip()
|
|
162
|
+
if line.startswith('-'):
|
|
163
|
+
# remove leading "- " and split on first ":"
|
|
164
|
+
line = line[1:].strip()
|
|
165
|
+
if ':' in line:
|
|
166
|
+
key, value = line.split(':', 1)
|
|
167
|
+
key = key.strip()
|
|
168
|
+
value = value.strip()
|
|
169
|
+
|
|
170
|
+
# convert workflow_version to int
|
|
171
|
+
if key == 'workflow_version':
|
|
172
|
+
try:
|
|
173
|
+
metadata_dict[key] = int(value)
|
|
174
|
+
except ValueError:
|
|
175
|
+
metadata_dict[key] = 1
|
|
176
|
+
# convert message_count to int
|
|
177
|
+
elif key == 'message_count':
|
|
178
|
+
try:
|
|
179
|
+
metadata_dict[key] = int(value)
|
|
180
|
+
except ValueError:
|
|
181
|
+
metadata_dict[key] = 0
|
|
182
|
+
else:
|
|
183
|
+
metadata_dict[key] = value
|
|
184
|
+
|
|
185
|
+
# create WorkflowMetadata instance if we have required fields
|
|
186
|
+
required_fields = {
|
|
187
|
+
'session_id',
|
|
188
|
+
'working_directory',
|
|
189
|
+
'created_at',
|
|
190
|
+
'agent_id',
|
|
191
|
+
}
|
|
192
|
+
if not required_fields.issubset(metadata_dict.keys()):
|
|
193
|
+
logger.warning(
|
|
194
|
+
f"Existing workflow missing required metadata fields: "
|
|
195
|
+
f"{file_path}"
|
|
196
|
+
)
|
|
197
|
+
return None
|
|
198
|
+
|
|
199
|
+
# ensure we have updated_at and workflow_version
|
|
200
|
+
if 'updated_at' not in metadata_dict:
|
|
201
|
+
metadata_dict['updated_at'] = metadata_dict['created_at']
|
|
202
|
+
if 'workflow_version' not in metadata_dict:
|
|
203
|
+
metadata_dict['workflow_version'] = 1
|
|
204
|
+
if 'message_count' not in metadata_dict:
|
|
205
|
+
metadata_dict['message_count'] = 0
|
|
206
|
+
|
|
207
|
+
return WorkflowMetadata(**metadata_dict)
|
|
208
|
+
|
|
209
|
+
except Exception as e:
|
|
210
|
+
logger.warning(
|
|
211
|
+
f"Error extracting workflow metadata from {file_path}: {e}"
|
|
212
|
+
)
|
|
213
|
+
return None
|
|
214
|
+
|
|
215
|
+
def _try_role_based_loading(
|
|
216
|
+
self,
|
|
217
|
+
role_name: str,
|
|
218
|
+
pattern: Optional[str],
|
|
219
|
+
max_files_to_load: int,
|
|
220
|
+
use_smart_selection: bool,
|
|
221
|
+
) -> bool:
|
|
222
|
+
r"""Try loading workflows from role-based directory structure.
|
|
223
|
+
|
|
224
|
+
Args:
|
|
225
|
+
role_name (str): Role name to load workflows from.
|
|
226
|
+
pattern (Optional[str]): Custom search pattern for workflow files.
|
|
227
|
+
max_files_to_load (int): Maximum number of workflow files to load.
|
|
228
|
+
use_smart_selection (bool): Whether to use agent-based selection.
|
|
229
|
+
|
|
230
|
+
Returns:
|
|
231
|
+
bool: True if workflows were successfully loaded, False otherwise.
|
|
232
|
+
"""
|
|
233
|
+
logger.info(f"Attempting to load workflows for role: {role_name}")
|
|
234
|
+
|
|
235
|
+
loaded = self.load_workflows_by_role(
|
|
236
|
+
role_name=role_name,
|
|
237
|
+
pattern=pattern,
|
|
238
|
+
max_files_to_load=max_files_to_load,
|
|
239
|
+
use_smart_selection=use_smart_selection,
|
|
240
|
+
)
|
|
241
|
+
|
|
242
|
+
return loaded
|
|
243
|
+
|
|
244
|
+
def _try_session_based_loading(
|
|
245
|
+
self,
|
|
246
|
+
session_id: str,
|
|
247
|
+
role_name: str,
|
|
248
|
+
pattern: Optional[str],
|
|
249
|
+
max_files_to_load: int,
|
|
250
|
+
use_smart_selection: bool,
|
|
251
|
+
) -> bool:
|
|
252
|
+
r"""Try loading workflows from session-based directory (deprecated).
|
|
253
|
+
|
|
254
|
+
Args:
|
|
255
|
+
session_id (str): Workforce session ID to load from.
|
|
256
|
+
role_name (str): Role name (for deprecation warning).
|
|
257
|
+
pattern (Optional[str]): Custom search pattern for workflow files.
|
|
258
|
+
max_files_to_load (int): Maximum number of workflow files to load.
|
|
259
|
+
use_smart_selection (bool): Whether to use agent-based selection.
|
|
260
|
+
|
|
261
|
+
Returns:
|
|
262
|
+
bool: True if workflows were successfully loaded, False otherwise.
|
|
263
|
+
"""
|
|
264
|
+
import warnings
|
|
265
|
+
|
|
266
|
+
warnings.warn(
|
|
267
|
+
f"Session-based workflow loading "
|
|
268
|
+
f"(session_id={session_id}) is deprecated. "
|
|
269
|
+
f"Workflows are now organized by role in "
|
|
270
|
+
f"workforce_workflows/{{role_name}}/ folders. "
|
|
271
|
+
f"No workflows found for role '{role_name}'.",
|
|
272
|
+
FutureWarning,
|
|
273
|
+
stacklevel=2,
|
|
274
|
+
)
|
|
275
|
+
|
|
276
|
+
logger.info(
|
|
277
|
+
f"Falling back to session-based loading for "
|
|
278
|
+
f"session_id={session_id}"
|
|
279
|
+
)
|
|
280
|
+
|
|
281
|
+
if use_smart_selection:
|
|
282
|
+
return self._session_based_smart_loading(
|
|
283
|
+
session_id, max_files_to_load
|
|
284
|
+
)
|
|
285
|
+
else:
|
|
286
|
+
return self._session_based_pattern_loading(
|
|
287
|
+
pattern, session_id, max_files_to_load
|
|
288
|
+
)
|
|
289
|
+
|
|
290
|
+
def _session_based_smart_loading(
|
|
291
|
+
self, session_id: str, max_files_to_load: int
|
|
292
|
+
) -> bool:
|
|
293
|
+
r"""Load workflows from session using smart selection.
|
|
294
|
+
|
|
295
|
+
Args:
|
|
296
|
+
session_id (str): Session ID to load from.
|
|
297
|
+
max_files_to_load (int): Maximum number of files to load.
|
|
298
|
+
|
|
299
|
+
Returns:
|
|
300
|
+
bool: True if workflows were loaded, False otherwise.
|
|
301
|
+
"""
|
|
302
|
+
context_util = self._get_context_utility()
|
|
303
|
+
workflows_metadata = context_util.get_all_workflows_info(session_id)
|
|
304
|
+
|
|
305
|
+
if workflows_metadata:
|
|
306
|
+
selected_files, selection_method = self._select_relevant_workflows(
|
|
307
|
+
workflows_metadata,
|
|
308
|
+
max_files_to_load,
|
|
309
|
+
session_id,
|
|
310
|
+
)
|
|
311
|
+
|
|
312
|
+
if selected_files:
|
|
313
|
+
logger.info(
|
|
314
|
+
f"Workflow selection method: {selection_method.value}"
|
|
315
|
+
)
|
|
316
|
+
loaded_count = self._load_workflow_files(
|
|
317
|
+
selected_files, max_files_to_load
|
|
318
|
+
)
|
|
319
|
+
return loaded_count > 0
|
|
320
|
+
|
|
321
|
+
return False
|
|
322
|
+
|
|
323
|
+
def _session_based_pattern_loading(
|
|
324
|
+
self,
|
|
325
|
+
pattern: Optional[str],
|
|
326
|
+
session_id: str,
|
|
327
|
+
max_files_to_load: int,
|
|
328
|
+
) -> bool:
|
|
329
|
+
r"""Load workflows from session using pattern matching.
|
|
330
|
+
|
|
331
|
+
Args:
|
|
332
|
+
pattern (Optional[str]): Pattern for file matching.
|
|
333
|
+
session_id (str): Session ID to load from.
|
|
334
|
+
max_files_to_load (int): Maximum number of files to load.
|
|
335
|
+
|
|
336
|
+
Returns:
|
|
337
|
+
bool: True if workflows were loaded, False otherwise.
|
|
338
|
+
"""
|
|
339
|
+
workflow_files = self._find_workflow_files(pattern, session_id)
|
|
340
|
+
if workflow_files:
|
|
341
|
+
loaded_count = self._load_workflow_files(
|
|
342
|
+
workflow_files, max_files_to_load
|
|
343
|
+
)
|
|
344
|
+
return loaded_count > 0
|
|
345
|
+
|
|
346
|
+
return False
|
|
347
|
+
|
|
348
|
+
def load_workflows(
|
|
349
|
+
self,
|
|
350
|
+
pattern: Optional[str] = None,
|
|
351
|
+
max_files_to_load: Optional[int] = None,
|
|
352
|
+
session_id: Optional[str] = None,
|
|
353
|
+
use_smart_selection: bool = True,
|
|
354
|
+
) -> bool:
|
|
355
|
+
r"""Load workflow memories using intelligent agent-based selection.
|
|
356
|
+
|
|
357
|
+
This method first tries to load workflows from the role-based folder
|
|
358
|
+
structure. If no workflows are found and session_id is provided, falls
|
|
359
|
+
back to session-based loading (deprecated).
|
|
360
|
+
|
|
361
|
+
Args:
|
|
362
|
+
pattern (Optional[str]): Legacy parameter for backward
|
|
363
|
+
compatibility. When use_smart_selection=False, uses this
|
|
364
|
+
pattern for file matching. Ignored when smart selection
|
|
365
|
+
is enabled.
|
|
366
|
+
max_files_to_load (Optional[int]): Maximum number of workflow files
|
|
367
|
+
to load. If None, uses config.default_max_files_to_load.
|
|
368
|
+
(default: :obj:`None`)
|
|
369
|
+
session_id (Optional[str]): Deprecated. Specific workforce session
|
|
370
|
+
ID to load from using legacy session-based organization.
|
|
371
|
+
(default: :obj:`None`)
|
|
372
|
+
use_smart_selection (bool): Whether to use agent-based
|
|
373
|
+
intelligent workflow selection. When True, uses workflow
|
|
374
|
+
information and LLM to select most relevant workflows. When
|
|
375
|
+
False, falls back to pattern matching. (default: :obj:`True`)
|
|
376
|
+
|
|
377
|
+
Returns:
|
|
378
|
+
bool: True if workflow memories were successfully loaded, False
|
|
379
|
+
otherwise. Check logs for detailed error messages.
|
|
380
|
+
"""
|
|
381
|
+
try:
|
|
382
|
+
# use config default if not specified
|
|
383
|
+
if max_files_to_load is None:
|
|
384
|
+
max_files_to_load = self.config.default_max_files_to_load
|
|
385
|
+
|
|
386
|
+
# reset system message to original state before loading
|
|
387
|
+
# this prevents duplicate workflow context on multiple calls
|
|
388
|
+
self.worker.reset_to_original_system_message()
|
|
389
|
+
|
|
390
|
+
# determine role name to use
|
|
391
|
+
role_name = (
|
|
392
|
+
self._role_identifier
|
|
393
|
+
if self._role_identifier
|
|
394
|
+
else self._get_sanitized_role_name()
|
|
395
|
+
)
|
|
396
|
+
|
|
397
|
+
# try role-based loading first
|
|
398
|
+
loaded = self._try_role_based_loading(
|
|
399
|
+
role_name, pattern, max_files_to_load, use_smart_selection
|
|
400
|
+
)
|
|
401
|
+
|
|
402
|
+
if loaded:
|
|
403
|
+
logger.info(
|
|
404
|
+
f"Successfully loaded workflows for role '{role_name}'"
|
|
405
|
+
)
|
|
406
|
+
return True
|
|
407
|
+
|
|
408
|
+
# fallback to session-based if session_id is provided
|
|
409
|
+
if session_id is not None:
|
|
410
|
+
loaded = self._try_session_based_loading(
|
|
411
|
+
session_id,
|
|
412
|
+
role_name,
|
|
413
|
+
pattern,
|
|
414
|
+
max_files_to_load,
|
|
415
|
+
use_smart_selection,
|
|
416
|
+
)
|
|
417
|
+
if loaded:
|
|
418
|
+
logger.info(
|
|
419
|
+
f"Successfully loaded workflows from session "
|
|
420
|
+
f"'{session_id}' (deprecated)"
|
|
421
|
+
)
|
|
422
|
+
return True
|
|
423
|
+
|
|
424
|
+
logger.info(
|
|
425
|
+
f"No workflow files found for role '{role_name}'. "
|
|
426
|
+
f"This may be expected if no workflows have been saved yet."
|
|
427
|
+
)
|
|
428
|
+
return False
|
|
429
|
+
|
|
430
|
+
except Exception as e:
|
|
431
|
+
logger.error(
|
|
432
|
+
f"Error loading workflow memories for "
|
|
433
|
+
f"{self.description}: {e!s}",
|
|
434
|
+
exc_info=True,
|
|
435
|
+
)
|
|
436
|
+
return False
|
|
437
|
+
|
|
438
|
+
def load_workflows_by_role(
|
|
439
|
+
self,
|
|
440
|
+
role_name: Optional[str] = None,
|
|
441
|
+
pattern: Optional[str] = None,
|
|
442
|
+
max_files_to_load: Optional[int] = None,
|
|
443
|
+
use_smart_selection: bool = True,
|
|
444
|
+
) -> bool:
|
|
445
|
+
r"""Load workflow memories from role-based directory structure.
|
|
446
|
+
|
|
447
|
+
This method loads workflows from the new role-based folder structure:
|
|
448
|
+
workforce_workflows/{role_name}/*.md
|
|
449
|
+
|
|
450
|
+
Args:
|
|
451
|
+
role_name (Optional[str]): Role name to load workflows from. If
|
|
452
|
+
None, uses the worker's role_name or role_identifier.
|
|
453
|
+
pattern (Optional[str]): Custom search pattern for workflow files.
|
|
454
|
+
Ignored when use_smart_selection=True.
|
|
455
|
+
max_files_to_load (Optional[int]): Maximum number of workflow files
|
|
456
|
+
to load. If None, uses config.default_max_files_to_load.
|
|
457
|
+
(default: :obj:`None`)
|
|
458
|
+
use_smart_selection (bool): Whether to use agent-based
|
|
459
|
+
intelligent workflow selection. When True, uses workflow
|
|
460
|
+
information and LLM to select most relevant workflows. When
|
|
461
|
+
False, falls back to pattern matching. (default: :obj:`True`)
|
|
462
|
+
|
|
463
|
+
Returns:
|
|
464
|
+
bool: True if workflow memories were successfully loaded, False
|
|
465
|
+
otherwise.
|
|
466
|
+
"""
|
|
467
|
+
try:
|
|
468
|
+
# use config default if not specified
|
|
469
|
+
if max_files_to_load is None:
|
|
470
|
+
max_files_to_load = self.config.default_max_files_to_load
|
|
471
|
+
|
|
472
|
+
# reset system message to original state before loading
|
|
473
|
+
self.worker.reset_to_original_system_message()
|
|
474
|
+
|
|
475
|
+
# determine role name to use
|
|
476
|
+
if role_name is None:
|
|
477
|
+
role_name = (
|
|
478
|
+
self._role_identifier or self._get_sanitized_role_name()
|
|
479
|
+
)
|
|
480
|
+
|
|
481
|
+
# determine which selection method to use
|
|
482
|
+
if use_smart_selection:
|
|
483
|
+
# smart selection: use workflow information and agent
|
|
484
|
+
# intelligence
|
|
485
|
+
context_util = self._get_context_utility()
|
|
486
|
+
|
|
487
|
+
# find workflow files in role-based directory
|
|
488
|
+
workflow_files = self._find_workflow_files_by_role(
|
|
489
|
+
role_name, pattern
|
|
490
|
+
)
|
|
491
|
+
|
|
492
|
+
# get workflow metadata for smart selection
|
|
493
|
+
workflows_metadata = []
|
|
494
|
+
for file_path in workflow_files:
|
|
495
|
+
metadata = context_util.extract_workflow_info(file_path)
|
|
496
|
+
if metadata:
|
|
497
|
+
workflows_metadata.append(metadata)
|
|
498
|
+
|
|
499
|
+
if not workflows_metadata:
|
|
500
|
+
logger.info(
|
|
501
|
+
f"No workflow files found for role: {role_name}"
|
|
502
|
+
)
|
|
503
|
+
return False
|
|
504
|
+
|
|
505
|
+
# use agent to select most relevant workflows
|
|
506
|
+
selected_files, selection_method = (
|
|
507
|
+
self._select_relevant_workflows(
|
|
508
|
+
workflows_metadata, max_files_to_load
|
|
509
|
+
)
|
|
510
|
+
)
|
|
511
|
+
|
|
512
|
+
if not selected_files:
|
|
513
|
+
logger.info(
|
|
514
|
+
f"No workflows selected for role {role_name} "
|
|
515
|
+
f"(method: {selection_method.value})"
|
|
516
|
+
)
|
|
517
|
+
return False
|
|
518
|
+
|
|
519
|
+
# log selection method used
|
|
520
|
+
logger.info(
|
|
521
|
+
f"Workflow selection method: {selection_method.value}"
|
|
522
|
+
)
|
|
523
|
+
|
|
524
|
+
# load selected workflows
|
|
525
|
+
loaded_count = self._load_workflow_files(
|
|
526
|
+
selected_files, max_files_to_load
|
|
527
|
+
)
|
|
528
|
+
|
|
529
|
+
else:
|
|
530
|
+
# legacy pattern matching approach
|
|
531
|
+
workflow_files = self._find_workflow_files_by_role(
|
|
532
|
+
role_name, pattern
|
|
533
|
+
)
|
|
534
|
+
|
|
535
|
+
if not workflow_files:
|
|
536
|
+
logger.info(
|
|
537
|
+
f"No workflow files found for role: {role_name}"
|
|
538
|
+
)
|
|
539
|
+
return False
|
|
540
|
+
|
|
541
|
+
loaded_count = self._load_workflow_files(
|
|
542
|
+
workflow_files, max_files_to_load
|
|
543
|
+
)
|
|
544
|
+
|
|
545
|
+
# report results
|
|
546
|
+
if loaded_count > 0:
|
|
547
|
+
logger.info(
|
|
548
|
+
f"Successfully loaded {loaded_count} workflow file(s) for "
|
|
549
|
+
f"role {role_name}"
|
|
550
|
+
)
|
|
551
|
+
return loaded_count > 0
|
|
552
|
+
|
|
553
|
+
except Exception as e:
|
|
554
|
+
logger.warning(
|
|
555
|
+
f"Error loading workflow memories for role {role_name}: {e!s}"
|
|
556
|
+
)
|
|
557
|
+
return False
|
|
558
|
+
|
|
559
|
+
def save_workflow(
|
|
560
|
+
self, conversation_accumulator: Optional[ChatAgent] = None
|
|
561
|
+
) -> Dict[str, Any]:
|
|
562
|
+
r"""Save the worker's current workflow memories using agent
|
|
563
|
+
summarization.
|
|
564
|
+
|
|
565
|
+
This method uses a two-pass approach: first generates the workflow
|
|
566
|
+
summary to determine operation_mode (update vs create), then saves
|
|
567
|
+
to the appropriate file path based on that decision.
|
|
568
|
+
|
|
569
|
+
Args:
|
|
570
|
+
conversation_accumulator (Optional[ChatAgent]): Optional
|
|
571
|
+
accumulator agent with collected conversations. If provided,
|
|
572
|
+
uses this instead of the main worker agent.
|
|
573
|
+
|
|
574
|
+
Returns:
|
|
575
|
+
Dict[str, Any]: Result dictionary with keys:
|
|
576
|
+
- status (str): "success" or "error"
|
|
577
|
+
- summary (str): Generated workflow summary
|
|
578
|
+
- file_path (str): Path to saved file
|
|
579
|
+
- worker_description (str): Worker description used
|
|
580
|
+
"""
|
|
581
|
+
try:
|
|
582
|
+
# pass 1: generate workflow summary (without saving to disk)
|
|
583
|
+
summary_result = self.generate_workflow_summary(
|
|
584
|
+
conversation_accumulator=conversation_accumulator
|
|
585
|
+
)
|
|
586
|
+
|
|
587
|
+
if summary_result["status"] != "success":
|
|
588
|
+
return {
|
|
589
|
+
"status": "error",
|
|
590
|
+
"summary": "",
|
|
591
|
+
"file_path": None,
|
|
592
|
+
"worker_description": self.description,
|
|
593
|
+
"message": f"Failed to generate summary: "
|
|
594
|
+
f"{summary_result['status']}",
|
|
595
|
+
}
|
|
596
|
+
|
|
597
|
+
workflow_summary = summary_result["structured_summary"]
|
|
598
|
+
if not workflow_summary:
|
|
599
|
+
return {
|
|
600
|
+
"status": "error",
|
|
601
|
+
"summary": "",
|
|
602
|
+
"file_path": None,
|
|
603
|
+
"worker_description": self.description,
|
|
604
|
+
"message": "No structured summary generated",
|
|
605
|
+
}
|
|
606
|
+
|
|
607
|
+
# pass 2: save using save_workflow_content which handles
|
|
608
|
+
# operation_mode branching
|
|
609
|
+
context_util = self._get_context_utility()
|
|
610
|
+
result = self.save_workflow_content(
|
|
611
|
+
workflow_summary=workflow_summary,
|
|
612
|
+
context_utility=context_util,
|
|
613
|
+
conversation_accumulator=conversation_accumulator,
|
|
614
|
+
)
|
|
615
|
+
|
|
616
|
+
return result
|
|
617
|
+
|
|
618
|
+
except Exception as e:
|
|
619
|
+
return {
|
|
620
|
+
"status": "error",
|
|
621
|
+
"summary": "",
|
|
622
|
+
"file_path": None,
|
|
623
|
+
"worker_description": self.description,
|
|
624
|
+
"message": f"Failed to save workflow memories: {e!s}",
|
|
625
|
+
}
|
|
626
|
+
|
|
627
|
+
def generate_workflow_summary(
|
|
628
|
+
self,
|
|
629
|
+
conversation_accumulator: Optional[ChatAgent] = None,
|
|
630
|
+
) -> Dict[str, Any]:
|
|
631
|
+
r"""Generate a workflow summary without saving to disk.
|
|
632
|
+
|
|
633
|
+
This method generates a workflow summary by calling a dedicated
|
|
634
|
+
summarizer agent. It does NOT save to disk - only generates the
|
|
635
|
+
summary content and structured output. Use this when you need to
|
|
636
|
+
inspect the summary (e.g., extract operation_mode) before determining
|
|
637
|
+
where to save it.
|
|
638
|
+
|
|
639
|
+
Args:
|
|
640
|
+
conversation_accumulator (Optional[ChatAgent]): Optional
|
|
641
|
+
accumulator agent with collected conversations. If provided,
|
|
642
|
+
uses this instead of the main worker agent.
|
|
643
|
+
|
|
644
|
+
Returns:
|
|
645
|
+
Dict[str, Any]: Result dictionary with:
|
|
646
|
+
- structured_summary: WorkflowSummary instance or None
|
|
647
|
+
- summary_content: Raw text content
|
|
648
|
+
- status: "success" or error message
|
|
649
|
+
"""
|
|
650
|
+
|
|
651
|
+
result: Dict[str, Any] = {
|
|
652
|
+
"structured_summary": None,
|
|
653
|
+
"summary_content": "",
|
|
654
|
+
"status": "",
|
|
655
|
+
}
|
|
656
|
+
|
|
657
|
+
try:
|
|
658
|
+
# setup context utility
|
|
659
|
+
context_util = self._get_context_utility()
|
|
660
|
+
self.worker.set_context_utility(context_util)
|
|
661
|
+
|
|
662
|
+
# prepare workflow summarization prompt
|
|
663
|
+
structured_prompt = self._prepare_workflow_prompt()
|
|
664
|
+
|
|
665
|
+
# select agent for summarization
|
|
666
|
+
agent_to_summarize = self.worker
|
|
667
|
+
if conversation_accumulator is not None:
|
|
668
|
+
accumulator_messages, _ = (
|
|
669
|
+
conversation_accumulator.memory.get_context()
|
|
670
|
+
)
|
|
671
|
+
if accumulator_messages:
|
|
672
|
+
conversation_accumulator.set_context_utility(context_util)
|
|
673
|
+
agent_to_summarize = conversation_accumulator
|
|
674
|
+
logger.info(
|
|
675
|
+
f"Using conversation accumulator with "
|
|
676
|
+
f"{len(accumulator_messages)} messages for workflow "
|
|
677
|
+
f"summary"
|
|
678
|
+
)
|
|
679
|
+
|
|
680
|
+
# get conversation from agent's memory
|
|
681
|
+
source_agent = (
|
|
682
|
+
conversation_accumulator
|
|
683
|
+
if conversation_accumulator
|
|
684
|
+
else self.worker
|
|
685
|
+
)
|
|
686
|
+
messages, _ = source_agent.memory.get_context()
|
|
687
|
+
|
|
688
|
+
if not messages:
|
|
689
|
+
result["status"] = "No conversation context available"
|
|
690
|
+
return result
|
|
691
|
+
|
|
692
|
+
# build conversation text
|
|
693
|
+
conversation_text, _ = (
|
|
694
|
+
agent_to_summarize._build_conversation_text_from_messages(
|
|
695
|
+
messages, include_summaries=False
|
|
696
|
+
)
|
|
697
|
+
)
|
|
698
|
+
|
|
699
|
+
if not conversation_text:
|
|
700
|
+
result["status"] = "Conversation context is empty"
|
|
701
|
+
return result
|
|
702
|
+
|
|
703
|
+
# create or reuse summarizer agent
|
|
704
|
+
if agent_to_summarize._context_summary_agent is None:
|
|
705
|
+
agent_to_summarize._context_summary_agent = ChatAgent(
|
|
706
|
+
system_message=(
|
|
707
|
+
"You are a helpful assistant that summarizes "
|
|
708
|
+
"conversations"
|
|
709
|
+
),
|
|
710
|
+
model=agent_to_summarize.model_backend,
|
|
711
|
+
agent_id=f"{agent_to_summarize.agent_id}_context_summarizer",
|
|
712
|
+
)
|
|
713
|
+
else:
|
|
714
|
+
agent_to_summarize._context_summary_agent.reset()
|
|
715
|
+
|
|
716
|
+
# prepare prompt
|
|
717
|
+
prompt_text = (
|
|
718
|
+
f"{structured_prompt.rstrip()}\n\n"
|
|
719
|
+
f"AGENT CONVERSATION TO BE SUMMARIZED:\n"
|
|
720
|
+
f"{conversation_text}"
|
|
721
|
+
)
|
|
722
|
+
|
|
723
|
+
# call summarizer agent with structured output
|
|
724
|
+
response = agent_to_summarize._context_summary_agent.step(
|
|
725
|
+
prompt_text, response_format=WorkflowSummary
|
|
726
|
+
)
|
|
727
|
+
|
|
728
|
+
if not response.msgs:
|
|
729
|
+
result["status"] = "Failed to generate summary"
|
|
730
|
+
return result
|
|
731
|
+
|
|
732
|
+
summary_content = response.msgs[-1].content.strip()
|
|
733
|
+
structured_output = None
|
|
734
|
+
if response.msgs[-1].parsed:
|
|
735
|
+
structured_output = response.msgs[-1].parsed
|
|
736
|
+
|
|
737
|
+
result.update(
|
|
738
|
+
{
|
|
739
|
+
"structured_summary": structured_output,
|
|
740
|
+
"summary_content": summary_content,
|
|
741
|
+
"status": "success",
|
|
742
|
+
}
|
|
743
|
+
)
|
|
744
|
+
return result
|
|
745
|
+
|
|
746
|
+
except Exception as exc:
|
|
747
|
+
error_message = f"Failed to generate summary: {exc}"
|
|
748
|
+
logger.error(error_message)
|
|
749
|
+
result["status"] = error_message
|
|
750
|
+
return result
|
|
751
|
+
|
|
752
|
+
async def generate_workflow_summary_async(
|
|
753
|
+
self,
|
|
754
|
+
conversation_accumulator: Optional[ChatAgent] = None,
|
|
755
|
+
) -> Dict[str, Any]:
|
|
756
|
+
r"""Asynchronously generate a workflow summary without saving to disk.
|
|
757
|
+
|
|
758
|
+
This is the async version of generate_workflow_summary() that uses
|
|
759
|
+
astep() for non-blocking LLM calls. It does NOT save to disk - only
|
|
760
|
+
generates the summary content and structured output.
|
|
761
|
+
|
|
762
|
+
Args:
|
|
763
|
+
conversation_accumulator (Optional[ChatAgent]): Optional
|
|
764
|
+
accumulator agent with collected conversations. If provided,
|
|
765
|
+
uses this instead of the main worker agent.
|
|
766
|
+
|
|
767
|
+
Returns:
|
|
768
|
+
Dict[str, Any]: Result dictionary with:
|
|
769
|
+
- structured_summary: WorkflowSummary instance or None
|
|
770
|
+
- summary_content: Raw text content
|
|
771
|
+
- status: "success" or error message
|
|
772
|
+
"""
|
|
773
|
+
result: Dict[str, Any] = {
|
|
774
|
+
"structured_summary": None,
|
|
775
|
+
"summary_content": "",
|
|
776
|
+
"status": "",
|
|
777
|
+
}
|
|
778
|
+
|
|
779
|
+
try:
|
|
780
|
+
# setup context utility
|
|
781
|
+
context_util = self._get_context_utility()
|
|
782
|
+
self.worker.set_context_utility(context_util)
|
|
783
|
+
|
|
784
|
+
# prepare workflow summarization prompt
|
|
785
|
+
structured_prompt = self._prepare_workflow_prompt()
|
|
786
|
+
|
|
787
|
+
# select agent for summarization
|
|
788
|
+
agent_to_summarize = self.worker
|
|
789
|
+
if conversation_accumulator is not None:
|
|
790
|
+
accumulator_messages, _ = (
|
|
791
|
+
conversation_accumulator.memory.get_context()
|
|
792
|
+
)
|
|
793
|
+
if accumulator_messages:
|
|
794
|
+
conversation_accumulator.set_context_utility(context_util)
|
|
795
|
+
agent_to_summarize = conversation_accumulator
|
|
796
|
+
logger.info(
|
|
797
|
+
f"Using conversation accumulator with "
|
|
798
|
+
f"{len(accumulator_messages)} messages for workflow "
|
|
799
|
+
f"summary"
|
|
800
|
+
)
|
|
801
|
+
|
|
802
|
+
# get conversation from agent's memory
|
|
803
|
+
source_agent = (
|
|
804
|
+
conversation_accumulator
|
|
805
|
+
if conversation_accumulator
|
|
806
|
+
else self.worker
|
|
807
|
+
)
|
|
808
|
+
messages, _ = source_agent.memory.get_context()
|
|
809
|
+
|
|
810
|
+
if not messages:
|
|
811
|
+
result["status"] = "No conversation context available"
|
|
812
|
+
return result
|
|
813
|
+
|
|
814
|
+
# build conversation text
|
|
815
|
+
conversation_text, _ = (
|
|
816
|
+
agent_to_summarize._build_conversation_text_from_messages(
|
|
817
|
+
messages, include_summaries=False
|
|
818
|
+
)
|
|
819
|
+
)
|
|
820
|
+
|
|
821
|
+
if not conversation_text:
|
|
822
|
+
result["status"] = "Conversation context is empty"
|
|
823
|
+
return result
|
|
824
|
+
|
|
825
|
+
# create or reuse summarizer agent
|
|
826
|
+
if agent_to_summarize._context_summary_agent is None:
|
|
827
|
+
agent_to_summarize._context_summary_agent = ChatAgent(
|
|
828
|
+
system_message=(
|
|
829
|
+
"You are a helpful assistant that summarizes "
|
|
830
|
+
"conversations"
|
|
831
|
+
),
|
|
832
|
+
model=agent_to_summarize.model_backend,
|
|
833
|
+
agent_id=f"{agent_to_summarize.agent_id}_context_summarizer",
|
|
834
|
+
)
|
|
835
|
+
else:
|
|
836
|
+
agent_to_summarize._context_summary_agent.reset()
|
|
837
|
+
|
|
838
|
+
# prepare prompt
|
|
839
|
+
prompt_text = (
|
|
840
|
+
f"{structured_prompt.rstrip()}\n\n"
|
|
841
|
+
f"AGENT CONVERSATION TO BE SUMMARIZED:\n"
|
|
842
|
+
f"{conversation_text}"
|
|
843
|
+
)
|
|
844
|
+
|
|
845
|
+
# call summarizer agent with structured output (async)
|
|
846
|
+
response = await agent_to_summarize._context_summary_agent.astep(
|
|
847
|
+
prompt_text, response_format=WorkflowSummary
|
|
848
|
+
)
|
|
849
|
+
|
|
850
|
+
if not response.msgs:
|
|
851
|
+
result["status"] = "Failed to generate summary"
|
|
852
|
+
return result
|
|
853
|
+
|
|
854
|
+
summary_content = response.msgs[-1].content.strip()
|
|
855
|
+
structured_output = None
|
|
856
|
+
if response.msgs[-1].parsed:
|
|
857
|
+
structured_output = response.msgs[-1].parsed
|
|
858
|
+
|
|
859
|
+
result.update(
|
|
860
|
+
{
|
|
861
|
+
"structured_summary": structured_output,
|
|
862
|
+
"summary_content": summary_content,
|
|
863
|
+
"status": "success",
|
|
864
|
+
}
|
|
865
|
+
)
|
|
866
|
+
return result
|
|
867
|
+
|
|
868
|
+
except Exception as exc:
|
|
869
|
+
error_message = f"Failed to generate summary: {exc}"
|
|
870
|
+
logger.error(error_message)
|
|
871
|
+
result["status"] = error_message
|
|
872
|
+
return result
|
|
873
|
+
|
|
874
|
+
def save_workflow_content(
|
|
875
|
+
self,
|
|
876
|
+
workflow_summary: 'WorkflowSummary',
|
|
877
|
+
context_utility: Optional[ContextUtility] = None,
|
|
878
|
+
conversation_accumulator: Optional[ChatAgent] = None,
|
|
879
|
+
) -> Dict[str, Any]:
|
|
880
|
+
r"""Save a pre-generated workflow summary to disk.
|
|
881
|
+
|
|
882
|
+
This method takes a pre-generated WorkflowSummary object and saves
|
|
883
|
+
it to disk using the provided context utility. It does NOT call the
|
|
884
|
+
LLM - just formats and saves the content. Use this for two-pass
|
|
885
|
+
workflows where the summary is generated first, then saved to a
|
|
886
|
+
location determined by the summary content.
|
|
887
|
+
|
|
888
|
+
Args:
|
|
889
|
+
workflow_summary (WorkflowSummary): Pre-generated workflow summary
|
|
890
|
+
object containing task_title, agent_title, etc.
|
|
891
|
+
context_utility (Optional[ContextUtility]): Context utility with
|
|
892
|
+
correct working directory. If None, uses default.
|
|
893
|
+
conversation_accumulator (Optional[ChatAgent]): An optional agent
|
|
894
|
+
that holds accumulated conversation history. Used to get
|
|
895
|
+
accurate message_count metadata. (default: :obj:`None`)
|
|
896
|
+
|
|
897
|
+
Returns:
|
|
898
|
+
Dict[str, Any]: Result dictionary with keys:
|
|
899
|
+
- status (str): "success" or "error"
|
|
900
|
+
- summary (str): Formatted workflow summary
|
|
901
|
+
- file_path (str): Path to saved file
|
|
902
|
+
- worker_description (str): Worker description used
|
|
903
|
+
"""
|
|
904
|
+
|
|
905
|
+
def _create_error_result(message: str) -> Dict[str, Any]:
|
|
906
|
+
"""helper to create error result dict."""
|
|
907
|
+
return {
|
|
908
|
+
"status": "error",
|
|
909
|
+
"summary": "",
|
|
910
|
+
"file_path": None,
|
|
911
|
+
"worker_description": self.description,
|
|
912
|
+
"message": message,
|
|
913
|
+
}
|
|
914
|
+
|
|
915
|
+
try:
|
|
916
|
+
# validate workflow_summary input
|
|
917
|
+
if not workflow_summary:
|
|
918
|
+
return _create_error_result("workflow_summary is required")
|
|
919
|
+
|
|
920
|
+
# validate required fields exist
|
|
921
|
+
if not hasattr(workflow_summary, 'task_title'):
|
|
922
|
+
return _create_error_result(
|
|
923
|
+
"workflow_summary must have task_title field"
|
|
924
|
+
)
|
|
925
|
+
|
|
926
|
+
if not hasattr(workflow_summary, 'agent_title'):
|
|
927
|
+
return _create_error_result(
|
|
928
|
+
"workflow_summary must have agent_title field"
|
|
929
|
+
)
|
|
930
|
+
|
|
931
|
+
# validate agent_title is not empty
|
|
932
|
+
agent_title = getattr(workflow_summary, 'agent_title', '').strip()
|
|
933
|
+
if not agent_title:
|
|
934
|
+
return _create_error_result(
|
|
935
|
+
"workflow_summary.agent_title cannot be empty"
|
|
936
|
+
)
|
|
937
|
+
|
|
938
|
+
# use provided context utility or get default
|
|
939
|
+
if context_utility is None:
|
|
940
|
+
context_utility = self._get_context_utility()
|
|
941
|
+
|
|
942
|
+
# set context utility on worker
|
|
943
|
+
self.worker.set_context_utility(context_utility)
|
|
944
|
+
|
|
945
|
+
# determine file path based on operation mode
|
|
946
|
+
operation_mode = getattr(
|
|
947
|
+
workflow_summary, 'operation_mode', 'create'
|
|
948
|
+
)
|
|
949
|
+
target_filename = getattr(
|
|
950
|
+
workflow_summary, 'target_workflow_filename', None
|
|
951
|
+
)
|
|
952
|
+
|
|
953
|
+
# validate operation_mode - default to create for unexpected values
|
|
954
|
+
if operation_mode not in ("create", "update"):
|
|
955
|
+
logger.warning(
|
|
956
|
+
f"Unexpected operation_mode '{operation_mode}', "
|
|
957
|
+
"defaulting to 'create'."
|
|
958
|
+
)
|
|
959
|
+
operation_mode = "create"
|
|
960
|
+
|
|
961
|
+
if operation_mode == "update":
|
|
962
|
+
# if only one workflow loaded and no target specified,
|
|
963
|
+
# assume agent meant that one
|
|
964
|
+
has_single_workflow = len(self._loaded_workflow_paths) == 1
|
|
965
|
+
if not target_filename and has_single_workflow:
|
|
966
|
+
target_filename = next(
|
|
967
|
+
iter(self._loaded_workflow_paths.keys())
|
|
968
|
+
)
|
|
969
|
+
logger.info(
|
|
970
|
+
f"Auto-selecting single loaded workflow: "
|
|
971
|
+
f"{target_filename}"
|
|
972
|
+
)
|
|
973
|
+
|
|
974
|
+
# validate target filename exists in loaded workflows
|
|
975
|
+
if (
|
|
976
|
+
target_filename
|
|
977
|
+
and target_filename in self._loaded_workflow_paths
|
|
978
|
+
):
|
|
979
|
+
# use the stored path for the target workflow
|
|
980
|
+
file_path = Path(
|
|
981
|
+
self._loaded_workflow_paths[target_filename]
|
|
982
|
+
)
|
|
983
|
+
base_filename = target_filename
|
|
984
|
+
logger.info(f"Updating existing workflow: {file_path}")
|
|
985
|
+
else:
|
|
986
|
+
# invalid or missing target, fall back to create mode
|
|
987
|
+
available = list(self._loaded_workflow_paths.keys())
|
|
988
|
+
logger.warning(
|
|
989
|
+
f"Invalid target_workflow_filename "
|
|
990
|
+
f"'{target_filename}', available: {available}. "
|
|
991
|
+
"Falling back to create mode."
|
|
992
|
+
)
|
|
993
|
+
operation_mode = "create"
|
|
994
|
+
|
|
995
|
+
if operation_mode == "create":
|
|
996
|
+
# use task_title from summary for filename
|
|
997
|
+
task_title = workflow_summary.task_title
|
|
998
|
+
clean_title = ContextUtility.sanitize_workflow_filename(
|
|
999
|
+
task_title
|
|
1000
|
+
)
|
|
1001
|
+
base_filename = (
|
|
1002
|
+
f"{clean_title}{self.config.workflow_filename_suffix}"
|
|
1003
|
+
if clean_title
|
|
1004
|
+
else "workflow"
|
|
1005
|
+
)
|
|
1006
|
+
|
|
1007
|
+
file_path = (
|
|
1008
|
+
context_utility.get_working_directory()
|
|
1009
|
+
/ f"{base_filename}.md"
|
|
1010
|
+
)
|
|
1011
|
+
logger.info(f"Creating new workflow: {file_path}")
|
|
1012
|
+
|
|
1013
|
+
# check if workflow file already exists to handle versioning
|
|
1014
|
+
existing_metadata = self._extract_existing_workflow_metadata(
|
|
1015
|
+
file_path
|
|
1016
|
+
)
|
|
1017
|
+
|
|
1018
|
+
# build metadata - get message count from accumulator if available
|
|
1019
|
+
source_agent = (
|
|
1020
|
+
conversation_accumulator
|
|
1021
|
+
if conversation_accumulator
|
|
1022
|
+
else self.worker
|
|
1023
|
+
)
|
|
1024
|
+
|
|
1025
|
+
# determine version and created_at based on existing metadata
|
|
1026
|
+
# only increment version if versioning is enabled
|
|
1027
|
+
if self.config.enable_versioning and existing_metadata:
|
|
1028
|
+
workflow_version = existing_metadata.workflow_version + 1
|
|
1029
|
+
created_at = existing_metadata.created_at
|
|
1030
|
+
else:
|
|
1031
|
+
workflow_version = 1
|
|
1032
|
+
created_at = None
|
|
1033
|
+
|
|
1034
|
+
metadata = context_utility.get_session_metadata(
|
|
1035
|
+
workflow_version=workflow_version, created_at=created_at
|
|
1036
|
+
)
|
|
1037
|
+
metadata.update(
|
|
1038
|
+
{
|
|
1039
|
+
"agent_id": self.worker.agent_id,
|
|
1040
|
+
"message_count": len(source_agent.memory.get_context()[0]),
|
|
1041
|
+
}
|
|
1042
|
+
)
|
|
1043
|
+
|
|
1044
|
+
# convert WorkflowSummary to markdown
|
|
1045
|
+
# exclude operation_mode and target_workflow_filename as they're
|
|
1046
|
+
# only used for save logic, not persisted in the workflow file
|
|
1047
|
+
summary_content = context_utility.structured_output_to_markdown(
|
|
1048
|
+
structured_data=workflow_summary,
|
|
1049
|
+
metadata=metadata,
|
|
1050
|
+
exclude_fields=['operation_mode', 'target_workflow_filename'],
|
|
1051
|
+
)
|
|
1052
|
+
|
|
1053
|
+
# save to disk
|
|
1054
|
+
save_status = context_utility.save_markdown_file(
|
|
1055
|
+
base_filename,
|
|
1056
|
+
summary_content,
|
|
1057
|
+
)
|
|
1058
|
+
|
|
1059
|
+
# format summary with context prefix
|
|
1060
|
+
formatted_summary = (
|
|
1061
|
+
f"[CONTEXT_SUMMARY] The following is a summary of our "
|
|
1062
|
+
f"conversation from a previous session: {summary_content}"
|
|
1063
|
+
)
|
|
1064
|
+
|
|
1065
|
+
status = "success" if save_status == "success" else save_status
|
|
1066
|
+
return {
|
|
1067
|
+
"status": status,
|
|
1068
|
+
"summary": formatted_summary,
|
|
1069
|
+
"file_path": str(file_path),
|
|
1070
|
+
"worker_description": self.description,
|
|
1071
|
+
}
|
|
1072
|
+
|
|
1073
|
+
except Exception as e:
|
|
1074
|
+
return _create_error_result(
|
|
1075
|
+
f"Failed to save workflow content: {e!s}"
|
|
1076
|
+
)
|
|
1077
|
+
|
|
1078
|
+
async def save_workflow_content_async(
|
|
1079
|
+
self,
|
|
1080
|
+
workflow_summary: 'WorkflowSummary',
|
|
1081
|
+
context_utility: Optional[ContextUtility] = None,
|
|
1082
|
+
conversation_accumulator: Optional[ChatAgent] = None,
|
|
1083
|
+
) -> Dict[str, Any]:
|
|
1084
|
+
r"""Async wrapper for save_workflow_content.
|
|
1085
|
+
|
|
1086
|
+
Delegates to sync version since file I/O operations are synchronous.
|
|
1087
|
+
This method exists for API consistency with save_workflow_async().
|
|
1088
|
+
|
|
1089
|
+
Args:
|
|
1090
|
+
workflow_summary (WorkflowSummary): Pre-generated workflow summary
|
|
1091
|
+
object containing task_title, agent_title, etc.
|
|
1092
|
+
context_utility (Optional[ContextUtility]): Context utility with
|
|
1093
|
+
correct working directory. If None, uses default.
|
|
1094
|
+
conversation_accumulator (Optional[ChatAgent]): An optional agent
|
|
1095
|
+
that holds accumulated conversation history. Used to get
|
|
1096
|
+
accurate message_count metadata. (default: :obj:`None`)
|
|
1097
|
+
|
|
1098
|
+
Returns:
|
|
1099
|
+
Dict[str, Any]: Result dictionary with keys:
|
|
1100
|
+
- status (str): "success" or "error"
|
|
1101
|
+
- summary (str): Formatted workflow summary
|
|
1102
|
+
- file_path (str): Path to saved file
|
|
1103
|
+
- worker_description (str): Worker description used
|
|
1104
|
+
"""
|
|
1105
|
+
return self.save_workflow_content(
|
|
1106
|
+
workflow_summary=workflow_summary,
|
|
1107
|
+
context_utility=context_utility,
|
|
1108
|
+
conversation_accumulator=conversation_accumulator,
|
|
1109
|
+
)
|
|
1110
|
+
|
|
1111
|
+
async def save_workflow_async(
|
|
1112
|
+
self, conversation_accumulator: Optional[ChatAgent] = None
|
|
1113
|
+
) -> Dict[str, Any]:
|
|
1114
|
+
r"""Asynchronously save the worker's current workflow memories using
|
|
1115
|
+
agent summarization.
|
|
1116
|
+
|
|
1117
|
+
This is the async version of save_workflow() that uses a two-pass
|
|
1118
|
+
approach: first generate the workflow summary (async LLM call), then
|
|
1119
|
+
save to disk using the appropriate file path based on operation_mode
|
|
1120
|
+
(update vs create).
|
|
1121
|
+
|
|
1122
|
+
Args:
|
|
1123
|
+
conversation_accumulator (Optional[ChatAgent]): Optional
|
|
1124
|
+
accumulator agent with collected conversations. If provided,
|
|
1125
|
+
uses this instead of the main worker agent.
|
|
1126
|
+
|
|
1127
|
+
Returns:
|
|
1128
|
+
Dict[str, Any]: Result dictionary with keys:
|
|
1129
|
+
- status (str): "success" or "error"
|
|
1130
|
+
- summary (str): Generated workflow summary
|
|
1131
|
+
- file_path (str): Path to saved file
|
|
1132
|
+
- worker_description (str): Worker description used
|
|
1133
|
+
"""
|
|
1134
|
+
try:
|
|
1135
|
+
# pass 1: generate workflow summary (without saving to disk)
|
|
1136
|
+
summary_result = await self.generate_workflow_summary_async(
|
|
1137
|
+
conversation_accumulator=conversation_accumulator
|
|
1138
|
+
)
|
|
1139
|
+
|
|
1140
|
+
if summary_result["status"] != "success":
|
|
1141
|
+
return {
|
|
1142
|
+
"status": "error",
|
|
1143
|
+
"summary": "",
|
|
1144
|
+
"file_path": None,
|
|
1145
|
+
"worker_description": self.description,
|
|
1146
|
+
"message": f"Failed to generate summary: "
|
|
1147
|
+
f"{summary_result['status']}",
|
|
1148
|
+
}
|
|
1149
|
+
|
|
1150
|
+
workflow_summary = summary_result["structured_summary"]
|
|
1151
|
+
if not workflow_summary:
|
|
1152
|
+
return {
|
|
1153
|
+
"status": "error",
|
|
1154
|
+
"summary": "",
|
|
1155
|
+
"file_path": None,
|
|
1156
|
+
"worker_description": self.description,
|
|
1157
|
+
"message": "No structured summary generated",
|
|
1158
|
+
}
|
|
1159
|
+
|
|
1160
|
+
# pass 2: save using save_workflow_content which handles
|
|
1161
|
+
# operation_mode branching (sync - file I/O doesn't need async)
|
|
1162
|
+
context_util = self._get_context_utility()
|
|
1163
|
+
return self.save_workflow_content(
|
|
1164
|
+
workflow_summary=workflow_summary,
|
|
1165
|
+
context_utility=context_util,
|
|
1166
|
+
conversation_accumulator=conversation_accumulator,
|
|
1167
|
+
)
|
|
1168
|
+
|
|
1169
|
+
except Exception as e:
|
|
1170
|
+
return {
|
|
1171
|
+
"status": "error",
|
|
1172
|
+
"summary": "",
|
|
1173
|
+
"file_path": None,
|
|
1174
|
+
"worker_description": self.description,
|
|
1175
|
+
"message": f"Failed to save workflow memories: {e!s}",
|
|
1176
|
+
}
|
|
1177
|
+
|
|
1178
|
+
def _select_relevant_workflows(
|
|
1179
|
+
self,
|
|
1180
|
+
workflows_metadata: List[Dict[str, Any]],
|
|
1181
|
+
max_files: int,
|
|
1182
|
+
session_id: Optional[str] = None,
|
|
1183
|
+
) -> tuple[List[str], WorkflowSelectionMethod]:
|
|
1184
|
+
r"""Use worker agent to select most relevant workflows.
|
|
1185
|
+
|
|
1186
|
+
This method creates a prompt with all available workflow information
|
|
1187
|
+
and uses the worker agent to intelligently select the most relevant
|
|
1188
|
+
workflows based on the worker's role and description.
|
|
1189
|
+
|
|
1190
|
+
Args:
|
|
1191
|
+
workflows_metadata (List[Dict[str, Any]]): List of workflow
|
|
1192
|
+
information dicts (contains title, description, tags,
|
|
1193
|
+
file_path).
|
|
1194
|
+
max_files (int): Maximum number of workflows to select.
|
|
1195
|
+
session_id (Optional[str]): Specific workforce session ID to
|
|
1196
|
+
search in for fallback pattern matching. If None, searches
|
|
1197
|
+
across all sessions. (default: :obj:`None`)
|
|
1198
|
+
|
|
1199
|
+
Returns:
|
|
1200
|
+
tuple[List[str], WorkflowSelectionMethod]: Tuple of (selected
|
|
1201
|
+
workflow file paths, selection method used).
|
|
1202
|
+
"""
|
|
1203
|
+
if not workflows_metadata:
|
|
1204
|
+
return [], WorkflowSelectionMethod.NONE
|
|
1205
|
+
|
|
1206
|
+
# format workflows for selection
|
|
1207
|
+
workflows_str = self._format_workflows_for_selection(
|
|
1208
|
+
workflows_metadata
|
|
1209
|
+
)
|
|
1210
|
+
|
|
1211
|
+
# create selection prompt
|
|
1212
|
+
selection_prompt = (
|
|
1213
|
+
f"You are a {self.description}. "
|
|
1214
|
+
f"Review the following {len(workflows_metadata)} available "
|
|
1215
|
+
f"workflow memories and select the {max_files} most relevant "
|
|
1216
|
+
f"ones for your current role. Consider:\n"
|
|
1217
|
+
f"1. Task similarity to your role\n"
|
|
1218
|
+
f"2. Domain relevance\n"
|
|
1219
|
+
f"3. Tool and capability overlap\n\n"
|
|
1220
|
+
f"Available workflows:\n{workflows_str}\n\n"
|
|
1221
|
+
f"Respond with ONLY the workflow numbers you selected "
|
|
1222
|
+
f"(e.g., '1, 3, 5'), separated by commas. "
|
|
1223
|
+
f"Select exactly {max_files} workflows."
|
|
1224
|
+
)
|
|
1225
|
+
|
|
1226
|
+
try:
|
|
1227
|
+
# use worker agent for selection
|
|
1228
|
+
from camel.messages import BaseMessage
|
|
1229
|
+
|
|
1230
|
+
selection_msg = BaseMessage.make_user_message(
|
|
1231
|
+
role_name="user", content=selection_prompt
|
|
1232
|
+
)
|
|
1233
|
+
|
|
1234
|
+
response = self.worker.step(selection_msg)
|
|
1235
|
+
|
|
1236
|
+
# parse response to extract workflow numbers
|
|
1237
|
+
numbers_str = response.msgs[0].content
|
|
1238
|
+
numbers = re.findall(r'\d+', numbers_str)
|
|
1239
|
+
selected_indices = [int(n) - 1 for n in numbers[:max_files]]
|
|
1240
|
+
|
|
1241
|
+
# validate indices and get file paths
|
|
1242
|
+
selected_paths = []
|
|
1243
|
+
for idx in selected_indices:
|
|
1244
|
+
if 0 <= idx < len(workflows_metadata):
|
|
1245
|
+
selected_paths.append(workflows_metadata[idx]['file_path'])
|
|
1246
|
+
else:
|
|
1247
|
+
logger.warning(
|
|
1248
|
+
f"Agent selected invalid workflow index {idx + 1}, "
|
|
1249
|
+
f"only {len(workflows_metadata)} workflows available"
|
|
1250
|
+
)
|
|
1251
|
+
|
|
1252
|
+
if selected_paths:
|
|
1253
|
+
logger.info(
|
|
1254
|
+
f"Agent selected {len(selected_paths)} workflow(s) for "
|
|
1255
|
+
f"{self.description}"
|
|
1256
|
+
)
|
|
1257
|
+
return selected_paths, WorkflowSelectionMethod.AGENT_SELECTED
|
|
1258
|
+
|
|
1259
|
+
# agent returned empty results
|
|
1260
|
+
logger.warning(
|
|
1261
|
+
"Agent selection returned no valid workflows, "
|
|
1262
|
+
"falling back to role-based pattern matching"
|
|
1263
|
+
)
|
|
1264
|
+
|
|
1265
|
+
except Exception as e:
|
|
1266
|
+
logger.warning(
|
|
1267
|
+
f"Error during agent selection: {e!s}. "
|
|
1268
|
+
f"Falling back to role-based pattern matching"
|
|
1269
|
+
)
|
|
1270
|
+
|
|
1271
|
+
finally:
|
|
1272
|
+
# clean up selection conversation from memory to prevent
|
|
1273
|
+
# pollution. this runs whether selection succeeded, failed,
|
|
1274
|
+
# or raised exception
|
|
1275
|
+
self.worker.memory.clear()
|
|
1276
|
+
if self.worker._system_message is not None:
|
|
1277
|
+
self.worker.update_memory(
|
|
1278
|
+
self.worker._system_message, OpenAIBackendRole.SYSTEM
|
|
1279
|
+
)
|
|
1280
|
+
|
|
1281
|
+
# fallback: try pattern matching by role_name
|
|
1282
|
+
pattern_matched_files = self._find_workflow_files(
|
|
1283
|
+
pattern=None, session_id=session_id
|
|
1284
|
+
)
|
|
1285
|
+
if pattern_matched_files:
|
|
1286
|
+
return (
|
|
1287
|
+
pattern_matched_files[:max_files],
|
|
1288
|
+
WorkflowSelectionMethod.ROLE_NAME_MATCH,
|
|
1289
|
+
)
|
|
1290
|
+
|
|
1291
|
+
# last resort: return most recent workflows
|
|
1292
|
+
logger.info(
|
|
1293
|
+
"No role-matched workflows found, using most recent workflows"
|
|
1294
|
+
)
|
|
1295
|
+
return (
|
|
1296
|
+
[wf['file_path'] for wf in workflows_metadata[:max_files]],
|
|
1297
|
+
WorkflowSelectionMethod.MOST_RECENT,
|
|
1298
|
+
)
|
|
1299
|
+
|
|
1300
|
+
def _format_workflows_for_selection(
|
|
1301
|
+
self, workflows_metadata: List[Dict[str, Any]]
|
|
1302
|
+
) -> str:
|
|
1303
|
+
r"""Format workflow information into a readable prompt for selection.
|
|
1304
|
+
|
|
1305
|
+
Args:
|
|
1306
|
+
workflows_metadata (List[Dict[str, Any]]): List of workflow
|
|
1307
|
+
information dicts (contains title, description, tags,
|
|
1308
|
+
file_path).
|
|
1309
|
+
|
|
1310
|
+
Returns:
|
|
1311
|
+
str: Formatted string presenting workflows for LLM selection.
|
|
1312
|
+
"""
|
|
1313
|
+
if not workflows_metadata:
|
|
1314
|
+
return "No workflows available."
|
|
1315
|
+
|
|
1316
|
+
formatted_lines = []
|
|
1317
|
+
for i, workflow in enumerate(workflows_metadata, 1):
|
|
1318
|
+
formatted_lines.append(f"\nWorkflow {i}:")
|
|
1319
|
+
formatted_lines.append(f"- Title: {workflow.get('title', 'N/A')}")
|
|
1320
|
+
formatted_lines.append(
|
|
1321
|
+
f"- Description: {workflow.get('description', 'N/A')}"
|
|
1322
|
+
)
|
|
1323
|
+
tags = workflow.get('tags', [])
|
|
1324
|
+
tags_str = ', '.join(tags) if tags else 'No tags'
|
|
1325
|
+
formatted_lines.append(f"- Tags: {tags_str}")
|
|
1326
|
+
formatted_lines.append(
|
|
1327
|
+
f"- File: {workflow.get('file_path', 'N/A')}"
|
|
1328
|
+
)
|
|
1329
|
+
|
|
1330
|
+
return '\n'.join(formatted_lines)
|
|
1331
|
+
|
|
1332
|
+
def _find_workflow_files(
|
|
1333
|
+
self, pattern: Optional[str], session_id: Optional[str] = None
|
|
1334
|
+
) -> List[str]:
|
|
1335
|
+
r"""Find and return sorted workflow files matching the pattern.
|
|
1336
|
+
|
|
1337
|
+
.. note::
|
|
1338
|
+
Session-based workflow search will be deprecated in a future
|
|
1339
|
+
version. Consider using :meth:`_find_workflow_files_by_role` for
|
|
1340
|
+
role-based organization instead.
|
|
1341
|
+
|
|
1342
|
+
Args:
|
|
1343
|
+
pattern (Optional[str]): Custom search pattern for workflow files.
|
|
1344
|
+
If None, uses worker role_name to generate pattern.
|
|
1345
|
+
session_id (Optional[str]): Specific session ID to search in.
|
|
1346
|
+
If None, searches across all sessions.
|
|
1347
|
+
|
|
1348
|
+
Returns:
|
|
1349
|
+
List[str]: Sorted list of workflow file paths (empty if
|
|
1350
|
+
validation fails).
|
|
1351
|
+
"""
|
|
1352
|
+
import warnings
|
|
1353
|
+
|
|
1354
|
+
warnings.warn(
|
|
1355
|
+
"Session-based workflow search is deprecated and will be removed "
|
|
1356
|
+
"in a future version. Consider using load_workflows_by_role() for "
|
|
1357
|
+
"role-based organization instead.",
|
|
1358
|
+
FutureWarning,
|
|
1359
|
+
stacklevel=2,
|
|
1360
|
+
)
|
|
1361
|
+
|
|
1362
|
+
# generate filename-safe search pattern from worker role name
|
|
1363
|
+
if pattern is None:
|
|
1364
|
+
# get sanitized role name
|
|
1365
|
+
clean_name = self._get_sanitized_role_name()
|
|
1366
|
+
|
|
1367
|
+
# check if role_name is generic
|
|
1368
|
+
if is_generic_role_name(clean_name):
|
|
1369
|
+
# for generic role names, search for all workflow files
|
|
1370
|
+
# since filename is based on task_title
|
|
1371
|
+
pattern = f"*{self.config.workflow_filename_suffix}*.md"
|
|
1372
|
+
else:
|
|
1373
|
+
# for explicit role names, search for role-specific files
|
|
1374
|
+
pattern = (
|
|
1375
|
+
f"{clean_name}{self.config.workflow_filename_suffix}*.md"
|
|
1376
|
+
)
|
|
1377
|
+
|
|
1378
|
+
# get the base workflow directory from config
|
|
1379
|
+
camel_workdir = os.environ.get("CAMEL_WORKDIR")
|
|
1380
|
+
if camel_workdir:
|
|
1381
|
+
base_dir = os.path.join(
|
|
1382
|
+
camel_workdir, self.config.workflow_folder_name
|
|
1383
|
+
)
|
|
1384
|
+
else:
|
|
1385
|
+
base_dir = self.config.workflow_folder_name
|
|
1386
|
+
|
|
1387
|
+
# search for workflow files in specified or all session directories
|
|
1388
|
+
if session_id:
|
|
1389
|
+
search_path = str(Path(base_dir) / session_id / pattern)
|
|
1390
|
+
else:
|
|
1391
|
+
# search across all session directories using wildcard pattern
|
|
1392
|
+
search_path = str(Path(base_dir) / "*" / pattern)
|
|
1393
|
+
workflow_files = glob.glob(search_path)
|
|
1394
|
+
|
|
1395
|
+
if not workflow_files:
|
|
1396
|
+
logger.info(f"No workflow files found for pattern: {pattern}")
|
|
1397
|
+
return []
|
|
1398
|
+
|
|
1399
|
+
# prioritize most recent sessions by session timestamp in
|
|
1400
|
+
# directory name
|
|
1401
|
+
def extract_session_timestamp(filepath: str) -> str:
|
|
1402
|
+
match = re.search(r'session_(\d{8}_\d{6}_\d{6})', filepath)
|
|
1403
|
+
return match.group(1) if match else ""
|
|
1404
|
+
|
|
1405
|
+
workflow_files.sort(key=extract_session_timestamp, reverse=True)
|
|
1406
|
+
return workflow_files
|
|
1407
|
+
|
|
1408
|
+
def _find_workflow_files_by_role(
|
|
1409
|
+
self, role_name: Optional[str] = None, pattern: Optional[str] = None
|
|
1410
|
+
) -> List[str]:
|
|
1411
|
+
r"""Find workflow files in role-based directory structure.
|
|
1412
|
+
|
|
1413
|
+
This method searches for workflows in the new role-based folder
|
|
1414
|
+
structure: workforce_workflows/{role_name}/*.md
|
|
1415
|
+
|
|
1416
|
+
Args:
|
|
1417
|
+
role_name (Optional[str]): Role name to search for. If None,
|
|
1418
|
+
uses the worker's role_name or role_identifier.
|
|
1419
|
+
pattern (Optional[str]): Custom search pattern for workflow files.
|
|
1420
|
+
If None, searches for all workflow files in the role directory.
|
|
1421
|
+
|
|
1422
|
+
Returns:
|
|
1423
|
+
List[str]: Sorted list of workflow file paths by modification time
|
|
1424
|
+
(most recent first).
|
|
1425
|
+
"""
|
|
1426
|
+
# determine role name to use
|
|
1427
|
+
if role_name is None:
|
|
1428
|
+
role_name = (
|
|
1429
|
+
self._role_identifier or self._get_sanitized_role_name()
|
|
1430
|
+
)
|
|
1431
|
+
|
|
1432
|
+
# sanitize role name for filesystem use
|
|
1433
|
+
clean_role = ContextUtility.sanitize_workflow_filename(role_name)
|
|
1434
|
+
if not clean_role:
|
|
1435
|
+
clean_role = "unknown_role"
|
|
1436
|
+
|
|
1437
|
+
# get the base workflow directory from config
|
|
1438
|
+
camel_workdir = os.environ.get("CAMEL_WORKDIR")
|
|
1439
|
+
if camel_workdir:
|
|
1440
|
+
base_dir = os.path.join(
|
|
1441
|
+
camel_workdir, self.config.workflow_folder_name, clean_role
|
|
1442
|
+
)
|
|
1443
|
+
else:
|
|
1444
|
+
base_dir = os.path.join(
|
|
1445
|
+
self.config.workflow_folder_name, clean_role
|
|
1446
|
+
)
|
|
1447
|
+
|
|
1448
|
+
# use provided pattern or default to all workflow files
|
|
1449
|
+
if pattern is None:
|
|
1450
|
+
pattern = f"*{self.config.workflow_filename_suffix}*.md"
|
|
1451
|
+
|
|
1452
|
+
# search for workflow files in role directory
|
|
1453
|
+
search_path = str(Path(base_dir) / pattern)
|
|
1454
|
+
workflow_files = glob.glob(search_path)
|
|
1455
|
+
|
|
1456
|
+
if not workflow_files:
|
|
1457
|
+
logger.info(
|
|
1458
|
+
f"No workflow files found in role directory: {base_dir}"
|
|
1459
|
+
)
|
|
1460
|
+
return []
|
|
1461
|
+
|
|
1462
|
+
# sort by file modification time (most recent first)
|
|
1463
|
+
workflow_files.sort(key=os.path.getmtime, reverse=True)
|
|
1464
|
+
return workflow_files
|
|
1465
|
+
|
|
1466
|
+
def _collect_workflow_contents(
|
|
1467
|
+
self, workflow_files: List[str]
|
|
1468
|
+
) -> List[Dict[str, str]]:
|
|
1469
|
+
r"""Collect and load workflow file contents.
|
|
1470
|
+
|
|
1471
|
+
Also populates the _loaded_workflow_paths mapping for use during
|
|
1472
|
+
workflow save operations (to support update mode).
|
|
1473
|
+
|
|
1474
|
+
Args:
|
|
1475
|
+
workflow_files (List[str]): List of workflow file paths to load.
|
|
1476
|
+
|
|
1477
|
+
Returns:
|
|
1478
|
+
List[Dict[str, str]]: List of dicts with 'filename' and
|
|
1479
|
+
'content' keys.
|
|
1480
|
+
"""
|
|
1481
|
+
workflows_to_load = []
|
|
1482
|
+
for file_path in workflow_files:
|
|
1483
|
+
try:
|
|
1484
|
+
# extract file and session info from full path
|
|
1485
|
+
filename = os.path.basename(file_path).replace('.md', '')
|
|
1486
|
+
session_dir = os.path.dirname(file_path)
|
|
1487
|
+
session_id = os.path.basename(session_dir)
|
|
1488
|
+
|
|
1489
|
+
# create context utility for the specific session
|
|
1490
|
+
temp_utility = ContextUtility.get_workforce_shared(session_id)
|
|
1491
|
+
|
|
1492
|
+
# load the workflow content
|
|
1493
|
+
content = temp_utility.load_markdown_file(filename)
|
|
1494
|
+
|
|
1495
|
+
if content and content.strip():
|
|
1496
|
+
# filter out metadata section
|
|
1497
|
+
content = temp_utility._filter_metadata_from_content(
|
|
1498
|
+
content
|
|
1499
|
+
)
|
|
1500
|
+
workflows_to_load.append(
|
|
1501
|
+
{'filename': filename, 'content': content}
|
|
1502
|
+
)
|
|
1503
|
+
# store filename -> full path mapping for update mode
|
|
1504
|
+
self._loaded_workflow_paths[filename] = file_path
|
|
1505
|
+
logger.info(f"Loaded workflow content: {filename}")
|
|
1506
|
+
else:
|
|
1507
|
+
logger.warning(
|
|
1508
|
+
f"Workflow file empty or not found: {filename}"
|
|
1509
|
+
)
|
|
1510
|
+
|
|
1511
|
+
except Exception as e:
|
|
1512
|
+
logger.warning(
|
|
1513
|
+
f"Failed to load workflow file {file_path}: {e!s}"
|
|
1514
|
+
)
|
|
1515
|
+
continue
|
|
1516
|
+
|
|
1517
|
+
return workflows_to_load
|
|
1518
|
+
|
|
1519
|
+
def _format_workflow_list(
|
|
1520
|
+
self, workflows_to_load: List[Dict[str, str]]
|
|
1521
|
+
) -> str:
|
|
1522
|
+
r"""Format a list of workflows into a readable string.
|
|
1523
|
+
|
|
1524
|
+
This is a helper method that formats workflow content without
|
|
1525
|
+
adding outer headers/footers. Used by _format_workflows_for_context
|
|
1526
|
+
and _prepare_workflow_prompt.
|
|
1527
|
+
|
|
1528
|
+
Args:
|
|
1529
|
+
workflows_to_load (List[Dict[str, str]]): List of workflow
|
|
1530
|
+
dicts with 'filename' and 'content' keys.
|
|
1531
|
+
|
|
1532
|
+
Returns:
|
|
1533
|
+
str: Formatted workflow list string.
|
|
1534
|
+
"""
|
|
1535
|
+
if not workflows_to_load:
|
|
1536
|
+
return ""
|
|
1537
|
+
|
|
1538
|
+
formatted_content = ""
|
|
1539
|
+
for i, workflow_data in enumerate(workflows_to_load, 1):
|
|
1540
|
+
formatted_content += (
|
|
1541
|
+
f"\n\n{'=' * 60}\n"
|
|
1542
|
+
f"Workflow {i}: {workflow_data['filename']}\n"
|
|
1543
|
+
f"{'=' * 60}\n\n"
|
|
1544
|
+
f"{workflow_data['content']}"
|
|
1545
|
+
)
|
|
1546
|
+
|
|
1547
|
+
return formatted_content
|
|
1548
|
+
|
|
1549
|
+
def _format_workflows_for_context(
|
|
1550
|
+
self, workflows_to_load: List[Dict[str, str]]
|
|
1551
|
+
) -> str:
|
|
1552
|
+
r"""Format workflows into a context string for the agent.
|
|
1553
|
+
|
|
1554
|
+
Args:
|
|
1555
|
+
workflows_to_load (List[Dict[str, str]]): List of workflow
|
|
1556
|
+
dicts with 'filename' and 'content' keys.
|
|
1557
|
+
|
|
1558
|
+
Returns:
|
|
1559
|
+
str: Formatted workflow context string with header and all
|
|
1560
|
+
workflows.
|
|
1561
|
+
"""
|
|
1562
|
+
# create single header for all workflows
|
|
1563
|
+
if len(workflows_to_load) == 1:
|
|
1564
|
+
prefix_prompt = (
|
|
1565
|
+
"The following is the context from a previous "
|
|
1566
|
+
"session or workflow which might be useful for "
|
|
1567
|
+
"the current task. This information might help you "
|
|
1568
|
+
"understand the background, choose which tools to use, "
|
|
1569
|
+
"and plan your next steps."
|
|
1570
|
+
)
|
|
1571
|
+
else:
|
|
1572
|
+
prefix_prompt = (
|
|
1573
|
+
f"The following are {len(workflows_to_load)} previous "
|
|
1574
|
+
"workflows which might be useful for "
|
|
1575
|
+
"the current task. These workflows provide context about "
|
|
1576
|
+
"similar tasks, tools used, and approaches taken. "
|
|
1577
|
+
"Review them to understand patterns and make informed "
|
|
1578
|
+
"decisions for your current task."
|
|
1579
|
+
)
|
|
1580
|
+
|
|
1581
|
+
# combine header, formatted workflows, and footer
|
|
1582
|
+
combined_content = f"\n\n--- Previous Workflows ---\n{prefix_prompt}"
|
|
1583
|
+
combined_content += self._format_workflow_list(workflows_to_load)
|
|
1584
|
+
combined_content += "\n\n--- End of Previous Workflows ---\n"
|
|
1585
|
+
|
|
1586
|
+
return combined_content
|
|
1587
|
+
|
|
1588
|
+
def _add_workflows_to_system_message(self, workflow_context: str) -> bool:
|
|
1589
|
+
r"""Add workflow context to agent's system message.
|
|
1590
|
+
|
|
1591
|
+
Args:
|
|
1592
|
+
workflow_context (str): The formatted workflow context to add.
|
|
1593
|
+
|
|
1594
|
+
Returns:
|
|
1595
|
+
bool: True if successful, False otherwise.
|
|
1596
|
+
"""
|
|
1597
|
+
# check if agent has a system message
|
|
1598
|
+
if self.worker._original_system_message is None:
|
|
1599
|
+
logger.error(
|
|
1600
|
+
f"Agent {self.worker.agent_id} has no system message. "
|
|
1601
|
+
"Cannot append workflow memories."
|
|
1602
|
+
)
|
|
1603
|
+
return False
|
|
1604
|
+
|
|
1605
|
+
# update the current system message
|
|
1606
|
+
current_system_message = self.worker._system_message
|
|
1607
|
+
if current_system_message is not None:
|
|
1608
|
+
new_sys_content = current_system_message.content + workflow_context
|
|
1609
|
+
self.worker._system_message = (
|
|
1610
|
+
current_system_message.create_new_instance(new_sys_content)
|
|
1611
|
+
)
|
|
1612
|
+
|
|
1613
|
+
# replace the system message in memory
|
|
1614
|
+
self.worker.memory.clear()
|
|
1615
|
+
self.worker.update_memory(
|
|
1616
|
+
self.worker._system_message, OpenAIBackendRole.SYSTEM
|
|
1617
|
+
)
|
|
1618
|
+
|
|
1619
|
+
return True
|
|
1620
|
+
|
|
1621
|
+
def _load_workflow_files(
|
|
1622
|
+
self, workflow_files: List[str], max_workflows: int
|
|
1623
|
+
) -> int:
|
|
1624
|
+
r"""Load workflow files and return count of successful loads.
|
|
1625
|
+
|
|
1626
|
+
Loads all workflows together with a single header to avoid repetition.
|
|
1627
|
+
Clears and repopulates the _loaded_workflow_paths mapping.
|
|
1628
|
+
|
|
1629
|
+
Args:
|
|
1630
|
+
workflow_files (List[str]): List of workflow file paths to load.
|
|
1631
|
+
max_workflows (int): Maximum number of workflows to load.
|
|
1632
|
+
|
|
1633
|
+
Returns:
|
|
1634
|
+
int: Number of successfully loaded workflow files.
|
|
1635
|
+
"""
|
|
1636
|
+
if not workflow_files:
|
|
1637
|
+
return 0
|
|
1638
|
+
|
|
1639
|
+
# clear previous mapping and cached contents before loading
|
|
1640
|
+
self._loaded_workflow_paths.clear()
|
|
1641
|
+
self._loaded_workflow_contents.clear()
|
|
1642
|
+
|
|
1643
|
+
# collect workflow contents from files (also populates the mapping)
|
|
1644
|
+
workflows_to_load = self._collect_workflow_contents(
|
|
1645
|
+
workflow_files[:max_workflows]
|
|
1646
|
+
)
|
|
1647
|
+
|
|
1648
|
+
if not workflows_to_load:
|
|
1649
|
+
return 0
|
|
1650
|
+
|
|
1651
|
+
# cache loaded contents for reuse in prompt preparation
|
|
1652
|
+
self._loaded_workflow_contents = workflows_to_load
|
|
1653
|
+
|
|
1654
|
+
# format workflows into context string
|
|
1655
|
+
try:
|
|
1656
|
+
workflow_context = self._format_workflows_for_context(
|
|
1657
|
+
workflows_to_load
|
|
1658
|
+
)
|
|
1659
|
+
|
|
1660
|
+
# add workflow context to agent's system message
|
|
1661
|
+
if not self._add_workflows_to_system_message(workflow_context):
|
|
1662
|
+
return 0
|
|
1663
|
+
|
|
1664
|
+
char_count = len(workflow_context)
|
|
1665
|
+
logger.info(
|
|
1666
|
+
f"Appended {len(workflows_to_load)} workflow(s) to agent "
|
|
1667
|
+
f"{self.worker.agent_id} ({char_count} characters)"
|
|
1668
|
+
)
|
|
1669
|
+
|
|
1670
|
+
return len(workflows_to_load)
|
|
1671
|
+
|
|
1672
|
+
except Exception as e:
|
|
1673
|
+
logger.error(
|
|
1674
|
+
f"Failed to append workflows to system message: {e!s}"
|
|
1675
|
+
)
|
|
1676
|
+
return 0
|
|
1677
|
+
|
|
1678
|
+
def _get_sanitized_role_name(self) -> str:
|
|
1679
|
+
r"""Get the sanitized role name for the worker.
|
|
1680
|
+
|
|
1681
|
+
Returns:
|
|
1682
|
+
str: Sanitized role name suitable for use in filenames.
|
|
1683
|
+
"""
|
|
1684
|
+
role_name = getattr(self.worker, 'role_name', 'assistant')
|
|
1685
|
+
return ContextUtility.sanitize_workflow_filename(role_name)
|
|
1686
|
+
|
|
1687
|
+
def _generate_workflow_filename(self) -> str:
|
|
1688
|
+
r"""Generate a filename for the workflow based on worker role name.
|
|
1689
|
+
|
|
1690
|
+
Uses the worker's explicit role_name when available.
|
|
1691
|
+
|
|
1692
|
+
Returns:
|
|
1693
|
+
str: Sanitized filename without timestamp and without .md
|
|
1694
|
+
extension. Format: {role_name}{workflow_filename_suffix}
|
|
1695
|
+
"""
|
|
1696
|
+
clean_name = self._get_sanitized_role_name()
|
|
1697
|
+
return f"{clean_name}{self.config.workflow_filename_suffix}"
|
|
1698
|
+
|
|
1699
|
+
def _prepare_workflow_prompt(self) -> str:
|
|
1700
|
+
r"""Prepare the structured prompt for workflow summarization.
|
|
1701
|
+
|
|
1702
|
+
Includes operation mode instructions if workflows were loaded,
|
|
1703
|
+
guiding the agent to decide whether to update an existing
|
|
1704
|
+
workflow or create a new one.
|
|
1705
|
+
|
|
1706
|
+
Returns:
|
|
1707
|
+
str: Structured prompt for workflow summary.
|
|
1708
|
+
"""
|
|
1709
|
+
workflow_prompt = WorkflowSummary.get_instruction_prompt()
|
|
1710
|
+
|
|
1711
|
+
# add operation mode instructions based on loaded workflows
|
|
1712
|
+
if self._loaded_workflow_paths:
|
|
1713
|
+
loaded_filenames = list(self._loaded_workflow_paths.keys())
|
|
1714
|
+
|
|
1715
|
+
workflow_prompt += (
|
|
1716
|
+
"\n\nOPERATION MODE SELECTION:\n"
|
|
1717
|
+
"You have previously loaded workflow(s). Review them below "
|
|
1718
|
+
"and decide whether to update one or create a new workflow."
|
|
1719
|
+
"\n\nDecision rules:\n"
|
|
1720
|
+
"- If this task is a continuation, improvement, or refinement "
|
|
1721
|
+
"of a loaded workflow → set operation_mode='update' and "
|
|
1722
|
+
"target_workflow_filename to that workflow's exact filename\n"
|
|
1723
|
+
"- If this is a distinctly different task with different "
|
|
1724
|
+
"goals/tools → set operation_mode='create'\n\n"
|
|
1725
|
+
"When choosing 'update', select the single most relevant "
|
|
1726
|
+
"workflow filename. The updated workflow should incorporate "
|
|
1727
|
+
"learnings from this session.\n\n"
|
|
1728
|
+
f"Available workflow filenames: {loaded_filenames}"
|
|
1729
|
+
)
|
|
1730
|
+
|
|
1731
|
+
# include formatted workflow content for reference
|
|
1732
|
+
if self._loaded_workflow_contents:
|
|
1733
|
+
workflow_prompt += "\n\n--- Loaded Workflows Reference ---"
|
|
1734
|
+
workflow_prompt += self._format_workflow_list(
|
|
1735
|
+
self._loaded_workflow_contents
|
|
1736
|
+
)
|
|
1737
|
+
workflow_prompt += "\n\n--- End of Loaded Workflows ---"
|
|
1738
|
+
else:
|
|
1739
|
+
workflow_prompt += (
|
|
1740
|
+
"\n\nOPERATION MODE:\n"
|
|
1741
|
+
"No workflows were loaded. Set operation_mode='create'."
|
|
1742
|
+
)
|
|
1743
|
+
|
|
1744
|
+
return StructuredOutputHandler.generate_structured_prompt(
|
|
1745
|
+
base_prompt=workflow_prompt, schema=WorkflowSummary
|
|
1746
|
+
)
|