camel-ai 0.2.72a4__tar.gz → 0.2.72a6__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/PKG-INFO +3 -3
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/__init__.py +1 -1
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/agents/chat_agent.py +126 -154
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/models/gemini_model.py +30 -2
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/models/openai_model.py +29 -15
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/__init__.py +2 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/hybrid_browser_toolkit/browser_session.py +124 -123
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit.py +19 -19
- camel_ai-0.2.72a6/camel/toolkits/origene_mcp_toolkit.py +95 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/terminal_toolkit.py +36 -34
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/types/enums.py +6 -3
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/types/unified_model_type.py +16 -4
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/utils/mcp_client.py +8 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/pyproject.toml +3 -3
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/.gitignore +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/LICENSE +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/README.md +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/agents/__init__.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/agents/_types.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/agents/_utils.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/agents/base.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/agents/critic_agent.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/agents/deductive_reasoner_agent.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/agents/embodied_agent.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/agents/knowledge_graph_agent.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/agents/mcp_agent.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/agents/multi_hop_generator_agent.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/agents/programmed_agent_instruction.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/agents/repo_agent.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/agents/role_assignment_agent.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/agents/search_agent.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/agents/task_agent.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/agents/tool_agents/__init__.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/agents/tool_agents/base.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/agents/tool_agents/hugging_face_tool_agent.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/benchmarks/__init__.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/benchmarks/apibank.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/benchmarks/apibench.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/benchmarks/base.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/benchmarks/browsecomp.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/benchmarks/gaia.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/benchmarks/mock_website/README.md +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/benchmarks/mock_website/mock_web.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/benchmarks/mock_website/requirements.txt +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/benchmarks/mock_website/shopping_mall/app.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/benchmarks/mock_website/task.json +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/benchmarks/nexus.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/benchmarks/ragbench.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/bots/__init__.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/bots/discord/__init__.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/bots/discord/discord_app.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/bots/discord/discord_installation.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/bots/discord/discord_store.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/bots/slack/__init__.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/bots/slack/models.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/bots/slack/slack_app.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/bots/telegram_bot.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/configs/__init__.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/configs/aiml_config.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/configs/anthropic_config.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/configs/base_config.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/configs/bedrock_config.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/configs/cohere_config.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/configs/crynux_config.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/configs/deepseek_config.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/configs/gemini_config.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/configs/groq_config.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/configs/internlm_config.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/configs/litellm_config.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/configs/lmstudio_config.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/configs/mistral_config.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/configs/modelscope_config.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/configs/moonshot_config.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/configs/netmind_config.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/configs/novita_config.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/configs/nvidia_config.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/configs/ollama_config.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/configs/openai_config.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/configs/openrouter_config.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/configs/ppio_config.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/configs/qianfan_config.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/configs/qwen_config.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/configs/reka_config.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/configs/samba_config.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/configs/sglang_config.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/configs/siliconflow_config.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/configs/togetherai_config.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/configs/vllm_config.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/configs/watsonx_config.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/configs/yi_config.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/configs/zhipuai_config.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/data_collectors/__init__.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/data_collectors/alpaca_collector.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/data_collectors/base.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/data_collectors/sharegpt_collector.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/datagen/__init__.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/datagen/cot_datagen.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/datagen/evol_instruct/__init__.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/datagen/evol_instruct/evol_instruct.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/datagen/evol_instruct/scorer.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/datagen/evol_instruct/templates.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/datagen/self_improving_cot.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/datagen/self_instruct/__init__.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/datagen/self_instruct/filter/__init__.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/datagen/self_instruct/filter/filter_function.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/datagen/self_instruct/filter/filter_registry.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/datagen/self_instruct/filter/instruction_filter.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/datagen/self_instruct/self_instruct.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/datagen/self_instruct/templates.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/datagen/source2synth/__init__.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/datagen/source2synth/data_processor.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/datagen/source2synth/models.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/datagen/source2synth/user_data_processor_config.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/datahubs/__init__.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/datahubs/base.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/datahubs/huggingface.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/datahubs/models.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/datasets/__init__.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/datasets/base_generator.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/datasets/few_shot_generator.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/datasets/models.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/datasets/self_instruct_generator.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/datasets/static_dataset.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/embeddings/__init__.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/embeddings/azure_embedding.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/embeddings/base.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/embeddings/gemini_embedding.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/embeddings/jina_embedding.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/embeddings/mistral_embedding.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/embeddings/openai_compatible_embedding.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/embeddings/openai_embedding.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/embeddings/sentence_transformers_embeddings.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/embeddings/together_embedding.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/embeddings/vlm_embedding.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/environments/__init__.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/environments/models.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/environments/multi_step.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/environments/rlcards_env.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/environments/single_step.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/environments/tic_tac_toe.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/extractors/__init__.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/extractors/base.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/extractors/python_strategies.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/generators.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/human.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/interpreters/__init__.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/interpreters/base.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/interpreters/docker/Dockerfile +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/interpreters/docker_interpreter.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/interpreters/e2b_interpreter.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/interpreters/internal_python_interpreter.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/interpreters/interpreter_error.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/interpreters/ipython_interpreter.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/interpreters/subprocess_interpreter.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/loaders/__init__.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/loaders/apify_reader.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/loaders/base_io.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/loaders/base_loader.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/loaders/chunkr_reader.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/loaders/crawl4ai_reader.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/loaders/firecrawl_reader.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/loaders/jina_url_reader.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/loaders/markitdown.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/loaders/mineru_extractor.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/loaders/mistral_reader.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/loaders/pandas_reader.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/loaders/scrapegraph_reader.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/loaders/unstructured_io.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/logger.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/memories/__init__.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/memories/agent_memories.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/memories/base.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/memories/blocks/__init__.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/memories/blocks/chat_history_block.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/memories/blocks/vectordb_block.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/memories/context_creators/__init__.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/memories/context_creators/score_based.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/memories/records.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/messages/__init__.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/messages/base.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/messages/conversion/__init__.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/messages/conversion/alpaca.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/messages/conversion/conversation_models.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/messages/conversion/sharegpt/__init__.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/messages/conversion/sharegpt/function_call_formatter.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/messages/conversion/sharegpt/hermes/__init__.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/messages/conversion/sharegpt/hermes/hermes_function_formatter.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/messages/func_message.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/models/__init__.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/models/_utils.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/models/aiml_model.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/models/anthropic_model.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/models/aws_bedrock_model.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/models/azure_openai_model.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/models/base_audio_model.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/models/base_model.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/models/cohere_model.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/models/crynux_model.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/models/deepseek_model.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/models/fish_audio_model.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/models/groq_model.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/models/internlm_model.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/models/litellm_model.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/models/lmstudio_model.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/models/mistral_model.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/models/model_factory.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/models/model_manager.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/models/modelscope_model.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/models/moonshot_model.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/models/nemotron_model.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/models/netmind_model.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/models/novita_model.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/models/nvidia_model.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/models/ollama_model.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/models/openai_audio_models.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/models/openai_compatible_model.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/models/openrouter_model.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/models/ppio_model.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/models/qianfan_model.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/models/qwen_model.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/models/reka_model.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/models/reward/__init__.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/models/reward/base_reward_model.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/models/reward/evaluator.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/models/reward/nemotron_model.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/models/reward/skywork_model.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/models/samba_model.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/models/sglang_model.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/models/siliconflow_model.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/models/stub_model.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/models/togetherai_model.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/models/vllm_model.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/models/volcano_model.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/models/watsonx_model.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/models/yi_model.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/models/zhipuai_model.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/personas/__init__.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/personas/persona.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/personas/persona_hub.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/prompts/__init__.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/prompts/ai_society.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/prompts/base.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/prompts/code.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/prompts/evaluation.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/prompts/generate_text_embedding_data.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/prompts/image_craft.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/prompts/misalignment.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/prompts/multi_condition_image_craft.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/prompts/object_recognition.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/prompts/persona_hub.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/prompts/prompt_templates.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/prompts/role_description_prompt_template.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/prompts/solution_extraction.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/prompts/task_prompt_template.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/prompts/translation.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/prompts/video_description_prompt.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/py.typed +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/responses/__init__.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/responses/agent_responses.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/retrievers/__init__.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/retrievers/auto_retriever.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/retrievers/base.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/retrievers/bm25_retriever.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/retrievers/cohere_rerank_retriever.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/retrievers/hybrid_retrival.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/retrievers/vector_retriever.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/runtimes/__init__.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/runtimes/api.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/runtimes/base.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/runtimes/configs.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/runtimes/daytona_runtime.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/runtimes/docker_runtime.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/runtimes/llm_guard_runtime.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/runtimes/remote_http_runtime.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/runtimes/ubuntu_docker_runtime.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/runtimes/utils/__init__.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/runtimes/utils/function_risk_toolkit.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/runtimes/utils/ignore_risk_toolkit.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/schemas/__init__.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/schemas/base.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/schemas/openai_converter.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/schemas/outlines_converter.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/services/agent_openapi_server.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/societies/__init__.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/societies/babyagi_playing.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/societies/role_playing.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/societies/workforce/__init__.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/societies/workforce/base.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/societies/workforce/prompts.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/societies/workforce/role_playing_worker.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/societies/workforce/single_agent_worker.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/societies/workforce/structured_output_handler.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/societies/workforce/task_channel.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/societies/workforce/utils.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/societies/workforce/worker.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/societies/workforce/workforce.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/societies/workforce/workforce_logger.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/storages/__init__.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/storages/graph_storages/__init__.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/storages/graph_storages/base.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/storages/graph_storages/graph_element.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/storages/graph_storages/nebula_graph.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/storages/graph_storages/neo4j_graph.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/storages/key_value_storages/__init__.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/storages/key_value_storages/base.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/storages/key_value_storages/in_memory.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/storages/key_value_storages/json.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/storages/key_value_storages/mem0_cloud.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/storages/key_value_storages/redis.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/storages/object_storages/__init__.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/storages/object_storages/amazon_s3.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/storages/object_storages/azure_blob.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/storages/object_storages/base.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/storages/object_storages/google_cloud.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/storages/vectordb_storages/__init__.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/storages/vectordb_storages/base.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/storages/vectordb_storages/chroma.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/storages/vectordb_storages/faiss.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/storages/vectordb_storages/milvus.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/storages/vectordb_storages/oceanbase.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/storages/vectordb_storages/pgvector.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/storages/vectordb_storages/qdrant.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/storages/vectordb_storages/tidb.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/storages/vectordb_storages/weaviate.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/tasks/__init__.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/tasks/task.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/tasks/task_prompt.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/terminators/__init__.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/terminators/base.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/terminators/response_terminator.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/terminators/token_limit_terminator.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/aci_toolkit.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/arxiv_toolkit.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/ask_news_toolkit.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/async_browser_toolkit.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/audio_analysis_toolkit.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/base.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/bohrium_toolkit.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/browser_toolkit.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/browser_toolkit_commons.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/code_execution.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/craw4ai_toolkit.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/dappier_toolkit.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/data_commons_toolkit.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/edgeone_pages_mcp_toolkit.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/excel_toolkit.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/file_write_toolkit.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/function_tool.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/github_toolkit.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/google_calendar_toolkit.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/google_drive_mcp_toolkit.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/google_maps_toolkit.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/google_scholar_toolkit.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/human_toolkit.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/hybrid_browser_toolkit/__init__.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/hybrid_browser_toolkit/actions.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/hybrid_browser_toolkit/agent.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/hybrid_browser_toolkit/config_loader.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/hybrid_browser_toolkit/snapshot.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/hybrid_browser_toolkit/stealth_script.js +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/hybrid_browser_toolkit/unified_analyzer.js +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/image_analysis_toolkit.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/jina_reranker_toolkit.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/klavis_toolkit.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/linkedin_toolkit.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/markitdown_toolkit.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/math_toolkit.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/mcp_toolkit.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/memory_toolkit.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/meshy_toolkit.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/message_agent_toolkit.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/mineru_toolkit.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/networkx_toolkit.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/note_taking_toolkit.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/notion_toolkit.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/open_api_specs/biztoc/__init__.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/open_api_specs/biztoc/ai-plugin.json +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/open_api_specs/biztoc/openapi.yaml +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/open_api_specs/coursera/__init__.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/open_api_specs/coursera/openapi.yaml +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/open_api_specs/create_qr_code/__init__.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/open_api_specs/create_qr_code/openapi.yaml +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/open_api_specs/klarna/__init__.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/open_api_specs/klarna/openapi.yaml +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/open_api_specs/nasa_apod/__init__.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/open_api_specs/nasa_apod/openapi.yaml +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/open_api_specs/outschool/__init__.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/open_api_specs/outschool/ai-plugin.json +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/open_api_specs/outschool/openapi.yaml +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/open_api_specs/outschool/paths/__init__.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/open_api_specs/outschool/paths/get_classes.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/open_api_specs/outschool/paths/search_teachers.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/open_api_specs/security_config.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/open_api_specs/speak/__init__.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/open_api_specs/speak/openapi.yaml +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/open_api_specs/web_scraper/__init__.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/open_api_specs/web_scraper/ai-plugin.json +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/open_api_specs/web_scraper/openapi.yaml +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/open_api_specs/web_scraper/paths/__init__.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/open_api_specs/web_scraper/paths/scraper.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/open_api_toolkit.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/openai_agent_toolkit.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/openai_image_toolkit.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/openbb_toolkit.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/page_script.js +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/playwright_mcp_toolkit.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/pptx_toolkit.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/pubmed_toolkit.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/pulse_mcp_search_toolkit.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/pyautogui_toolkit.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/reddit_toolkit.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/retrieval_toolkit.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/search_toolkit.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/searxng_toolkit.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/semantic_scholar_toolkit.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/slack_toolkit.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/stripe_toolkit.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/sympy_toolkit.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/task_planning_toolkit.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/thinking_toolkit.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/twitter_toolkit.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/video_analysis_toolkit.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/video_download_toolkit.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/weather_toolkit.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/whatsapp_toolkit.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/wolfram_alpha_toolkit.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/toolkits/zapier_toolkit.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/types/__init__.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/types/agents/__init__.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/types/agents/tool_calling_record.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/types/mcp_registries.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/types/openai_types.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/utils/__init__.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/utils/async_func.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/utils/chunker/__init__.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/utils/chunker/base.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/utils/chunker/code_chunker.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/utils/chunker/uio_chunker.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/utils/commons.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/utils/constants.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/utils/deduplication.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/utils/filename.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/utils/langfuse.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/utils/mcp.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/utils/message_summarizer.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/utils/response_format.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/utils/token_counting.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/utils/tool_result.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/verifiers/__init__.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/verifiers/base.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/verifiers/math_verifier.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/verifiers/models.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/verifiers/physics_verifier.py +0 -0
- {camel_ai-0.2.72a4 → camel_ai-0.2.72a6}/camel/verifiers/python_verifier.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: camel-ai
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.72a6
|
|
4
4
|
Summary: Communicative Agents for AI Society Study
|
|
5
5
|
Project-URL: Homepage, https://www.camel-ai.org/
|
|
6
6
|
Project-URL: Repository, https://github.com/camel-ai/camel
|
|
@@ -127,7 +127,7 @@ Requires-Dist: sympy<2,>=1.13.3; extra == 'all'
|
|
|
127
127
|
Requires-Dist: tabulate>=0.9.0; extra == 'all'
|
|
128
128
|
Requires-Dist: tavily-python<0.6,>=0.5.0; extra == 'all'
|
|
129
129
|
Requires-Dist: textblob<0.18,>=0.17.1; extra == 'all'
|
|
130
|
-
Requires-Dist: traceroot==0.0.
|
|
130
|
+
Requires-Dist: traceroot==0.0.4a2; extra == 'all'
|
|
131
131
|
Requires-Dist: transformers<5,>=4; extra == 'all'
|
|
132
132
|
Requires-Dist: tree-sitter-python<0.24,>=0.23.6; extra == 'all'
|
|
133
133
|
Requires-Dist: tree-sitter<0.24,>=0.23.2; extra == 'all'
|
|
@@ -191,7 +191,7 @@ Requires-Dist: ipykernel<7,>=6.0.0; extra == 'dev-tools'
|
|
|
191
191
|
Requires-Dist: jupyter-client<9,>=8.6.2; extra == 'dev-tools'
|
|
192
192
|
Requires-Dist: langfuse>=2.60.5; extra == 'dev-tools'
|
|
193
193
|
Requires-Dist: mcp>=1.3.0; extra == 'dev-tools'
|
|
194
|
-
Requires-Dist: traceroot==0.0.
|
|
194
|
+
Requires-Dist: traceroot==0.0.4a2; extra == 'dev-tools'
|
|
195
195
|
Requires-Dist: tree-sitter-python<0.24,>=0.23.6; extra == 'dev-tools'
|
|
196
196
|
Requires-Dist: tree-sitter<0.24,>=0.23.2; extra == 'dev-tools'
|
|
197
197
|
Requires-Dist: typer>=0.15.2; extra == 'dev-tools'
|
|
@@ -1264,7 +1264,11 @@ class ChatAgent(BaseAgent):
|
|
|
1264
1264
|
openai_message: OpenAIMessage = {"role": "user", "content": prompt}
|
|
1265
1265
|
# Explicitly set the tools to empty list to avoid calling tools
|
|
1266
1266
|
response = self._get_model_response(
|
|
1267
|
-
[openai_message],
|
|
1267
|
+
openai_messages=[openai_message],
|
|
1268
|
+
num_tokens=0,
|
|
1269
|
+
response_format=response_format,
|
|
1270
|
+
tool_schemas=[],
|
|
1271
|
+
prev_num_openai_messages=0,
|
|
1268
1272
|
)
|
|
1269
1273
|
message.content = response.output_messages[0].content
|
|
1270
1274
|
if not self._try_format_message(message, response_format):
|
|
@@ -1292,7 +1296,11 @@ class ChatAgent(BaseAgent):
|
|
|
1292
1296
|
prompt = SIMPLE_FORMAT_PROMPT.format(content=message.content)
|
|
1293
1297
|
openai_message: OpenAIMessage = {"role": "user", "content": prompt}
|
|
1294
1298
|
response = await self._aget_model_response(
|
|
1295
|
-
[openai_message],
|
|
1299
|
+
openai_messages=[openai_message],
|
|
1300
|
+
num_tokens=0,
|
|
1301
|
+
response_format=response_format,
|
|
1302
|
+
tool_schemas=[],
|
|
1303
|
+
prev_num_openai_messages=0,
|
|
1296
1304
|
)
|
|
1297
1305
|
message.content = response.output_messages[0].content
|
|
1298
1306
|
self._try_format_message(message, response_format)
|
|
@@ -1374,7 +1382,8 @@ class ChatAgent(BaseAgent):
|
|
|
1374
1382
|
|
|
1375
1383
|
# Initialize token usage tracker
|
|
1376
1384
|
step_token_usage = self._create_token_usage_tracker()
|
|
1377
|
-
iteration_count = 0
|
|
1385
|
+
iteration_count: int = 0
|
|
1386
|
+
prev_num_openai_messages: int = 0
|
|
1378
1387
|
|
|
1379
1388
|
while True:
|
|
1380
1389
|
if self.pause_event is not None and not self.pause_event.is_set():
|
|
@@ -1391,10 +1400,13 @@ class ChatAgent(BaseAgent):
|
|
|
1391
1400
|
# Get response from model backend
|
|
1392
1401
|
response = self._get_model_response(
|
|
1393
1402
|
openai_messages,
|
|
1394
|
-
|
|
1395
|
-
|
|
1396
|
-
|
|
1403
|
+
num_tokens=num_tokens,
|
|
1404
|
+
current_iteration=iteration_count,
|
|
1405
|
+
response_format=response_format,
|
|
1406
|
+
tool_schemas=self._get_full_tool_schemas(),
|
|
1407
|
+
prev_num_openai_messages=prev_num_openai_messages,
|
|
1397
1408
|
)
|
|
1409
|
+
prev_num_openai_messages = len(openai_messages)
|
|
1398
1410
|
iteration_count += 1
|
|
1399
1411
|
|
|
1400
1412
|
# Accumulate API token usage
|
|
@@ -1405,6 +1417,9 @@ class ChatAgent(BaseAgent):
|
|
|
1405
1417
|
# Terminate Agent if stop_event is set
|
|
1406
1418
|
if self.stop_event and self.stop_event.is_set():
|
|
1407
1419
|
# Use the _step_terminate to terminate the agent with reason
|
|
1420
|
+
logger.info(
|
|
1421
|
+
f"Termination triggered at iteration " f"{iteration_count}"
|
|
1422
|
+
)
|
|
1408
1423
|
return self._step_terminate(
|
|
1409
1424
|
accumulated_context_tokens,
|
|
1410
1425
|
tool_call_records,
|
|
@@ -1439,6 +1454,7 @@ class ChatAgent(BaseAgent):
|
|
|
1439
1454
|
self.max_iteration is not None
|
|
1440
1455
|
and iteration_count >= self.max_iteration
|
|
1441
1456
|
):
|
|
1457
|
+
logger.info(f"Max iteration reached: {iteration_count}")
|
|
1442
1458
|
break
|
|
1443
1459
|
|
|
1444
1460
|
# If we're still here, continue the loop
|
|
@@ -1564,7 +1580,8 @@ class ChatAgent(BaseAgent):
|
|
|
1564
1580
|
|
|
1565
1581
|
# Initialize token usage tracker
|
|
1566
1582
|
step_token_usage = self._create_token_usage_tracker()
|
|
1567
|
-
iteration_count = 0
|
|
1583
|
+
iteration_count: int = 0
|
|
1584
|
+
prev_num_openai_messages: int = 0
|
|
1568
1585
|
while True:
|
|
1569
1586
|
if self.pause_event is not None and not self.pause_event.is_set():
|
|
1570
1587
|
await self.pause_event.wait()
|
|
@@ -1578,10 +1595,13 @@ class ChatAgent(BaseAgent):
|
|
|
1578
1595
|
|
|
1579
1596
|
response = await self._aget_model_response(
|
|
1580
1597
|
openai_messages,
|
|
1581
|
-
|
|
1582
|
-
|
|
1583
|
-
|
|
1598
|
+
num_tokens=num_tokens,
|
|
1599
|
+
current_iteration=iteration_count,
|
|
1600
|
+
response_format=response_format,
|
|
1601
|
+
tool_schemas=self._get_full_tool_schemas(),
|
|
1602
|
+
prev_num_openai_messages=prev_num_openai_messages,
|
|
1584
1603
|
)
|
|
1604
|
+
prev_num_openai_messages = len(openai_messages)
|
|
1585
1605
|
iteration_count += 1
|
|
1586
1606
|
|
|
1587
1607
|
# Accumulate API token usage
|
|
@@ -1592,6 +1612,9 @@ class ChatAgent(BaseAgent):
|
|
|
1592
1612
|
# Terminate Agent if stop_event is set
|
|
1593
1613
|
if self.stop_event and self.stop_event.is_set():
|
|
1594
1614
|
# Use the _step_terminate to terminate the agent with reason
|
|
1615
|
+
logger.info(
|
|
1616
|
+
f"Termination triggered at iteration " f"{iteration_count}"
|
|
1617
|
+
)
|
|
1595
1618
|
return self._step_terminate(
|
|
1596
1619
|
accumulated_context_tokens,
|
|
1597
1620
|
tool_call_records,
|
|
@@ -1900,14 +1923,32 @@ class ChatAgent(BaseAgent):
|
|
|
1900
1923
|
stripped_messages.append(msg)
|
|
1901
1924
|
return stripped_messages
|
|
1902
1925
|
|
|
1926
|
+
@observe()
|
|
1903
1927
|
def _get_model_response(
|
|
1904
1928
|
self,
|
|
1905
1929
|
openai_messages: List[OpenAIMessage],
|
|
1906
1930
|
num_tokens: int,
|
|
1931
|
+
current_iteration: int = 0,
|
|
1907
1932
|
response_format: Optional[Type[BaseModel]] = None,
|
|
1908
1933
|
tool_schemas: Optional[List[Dict[str, Any]]] = None,
|
|
1934
|
+
prev_num_openai_messages: int = 0,
|
|
1909
1935
|
) -> ModelResponse:
|
|
1910
|
-
r"""Internal function for agent step model response.
|
|
1936
|
+
r"""Internal function for agent step model response.
|
|
1937
|
+
Args:
|
|
1938
|
+
openai_messages (List[OpenAIMessage]): The OpenAI
|
|
1939
|
+
messages to process.
|
|
1940
|
+
num_tokens (int): The number of tokens in the context.
|
|
1941
|
+
current_iteration (int): The current iteration of the step.
|
|
1942
|
+
response_format (Optional[Type[BaseModel]]): The response
|
|
1943
|
+
format to use.
|
|
1944
|
+
tool_schemas (Optional[List[Dict[str, Any]]]): The tool
|
|
1945
|
+
schemas to use.
|
|
1946
|
+
prev_num_openai_messages (int): The number of openai messages
|
|
1947
|
+
logged in the previous iteration.
|
|
1948
|
+
|
|
1949
|
+
Returns:
|
|
1950
|
+
ModelResponse: The model response.
|
|
1951
|
+
"""
|
|
1911
1952
|
|
|
1912
1953
|
response = None
|
|
1913
1954
|
try:
|
|
@@ -1920,7 +1961,8 @@ class ChatAgent(BaseAgent):
|
|
|
1920
1961
|
openai_messages
|
|
1921
1962
|
):
|
|
1922
1963
|
logger.warning(
|
|
1923
|
-
"Model appears to not support vision.
|
|
1964
|
+
"Model appears to not support vision."
|
|
1965
|
+
"Retrying without images."
|
|
1924
1966
|
)
|
|
1925
1967
|
try:
|
|
1926
1968
|
stripped_messages = self._strip_images_from_messages(
|
|
@@ -1937,6 +1979,7 @@ class ChatAgent(BaseAgent):
|
|
|
1937
1979
|
if not response:
|
|
1938
1980
|
logger.error(
|
|
1939
1981
|
f"An error occurred while running model "
|
|
1982
|
+
f"iteration {current_iteration}, "
|
|
1940
1983
|
f"{self.model_backend.model_type}, "
|
|
1941
1984
|
f"index: {self.model_backend.current_model_index}",
|
|
1942
1985
|
exc_info=exc,
|
|
@@ -1955,11 +1998,12 @@ class ChatAgent(BaseAgent):
|
|
|
1955
1998
|
)
|
|
1956
1999
|
|
|
1957
2000
|
sanitized_messages = self._sanitize_messages_for_logging(
|
|
1958
|
-
openai_messages
|
|
2001
|
+
openai_messages, prev_num_openai_messages
|
|
1959
2002
|
)
|
|
1960
2003
|
logger.info(
|
|
1961
2004
|
f"Model {self.model_backend.model_type}, "
|
|
1962
2005
|
f"index {self.model_backend.current_model_index}, "
|
|
2006
|
+
f"iteration {current_iteration}, "
|
|
1963
2007
|
f"processed these messages: {sanitized_messages}"
|
|
1964
2008
|
)
|
|
1965
2009
|
if not isinstance(response, ChatCompletion):
|
|
@@ -1973,10 +2017,27 @@ class ChatAgent(BaseAgent):
|
|
|
1973
2017
|
self,
|
|
1974
2018
|
openai_messages: List[OpenAIMessage],
|
|
1975
2019
|
num_tokens: int,
|
|
2020
|
+
current_iteration: int = 0,
|
|
1976
2021
|
response_format: Optional[Type[BaseModel]] = None,
|
|
1977
2022
|
tool_schemas: Optional[List[Dict[str, Any]]] = None,
|
|
2023
|
+
prev_num_openai_messages: int = 0,
|
|
1978
2024
|
) -> ModelResponse:
|
|
1979
|
-
r"""Internal function for agent step model response.
|
|
2025
|
+
r"""Internal function for agent async step model response.
|
|
2026
|
+
Args:
|
|
2027
|
+
openai_messages (List[OpenAIMessage]): The OpenAI messages
|
|
2028
|
+
to process.
|
|
2029
|
+
num_tokens (int): The number of tokens in the context.
|
|
2030
|
+
current_iteration (int): The current iteration of the step.
|
|
2031
|
+
response_format (Optional[Type[BaseModel]]): The response
|
|
2032
|
+
format to use.
|
|
2033
|
+
tool_schemas (Optional[List[Dict[str, Any]]]): The tool schemas
|
|
2034
|
+
to use.
|
|
2035
|
+
prev_num_openai_messages (int): The number of openai messages
|
|
2036
|
+
logged in the previous iteration.
|
|
2037
|
+
|
|
2038
|
+
Returns:
|
|
2039
|
+
ModelResponse: The model response.
|
|
2040
|
+
"""
|
|
1980
2041
|
|
|
1981
2042
|
response = None
|
|
1982
2043
|
try:
|
|
@@ -2024,11 +2085,12 @@ class ChatAgent(BaseAgent):
|
|
|
2024
2085
|
)
|
|
2025
2086
|
|
|
2026
2087
|
sanitized_messages = self._sanitize_messages_for_logging(
|
|
2027
|
-
openai_messages
|
|
2088
|
+
openai_messages, prev_num_openai_messages
|
|
2028
2089
|
)
|
|
2029
2090
|
logger.info(
|
|
2030
2091
|
f"Model {self.model_backend.model_type}, "
|
|
2031
2092
|
f"index {self.model_backend.current_model_index}, "
|
|
2093
|
+
f"iteration {current_iteration}, "
|
|
2032
2094
|
f"processed these messages: {sanitized_messages}"
|
|
2033
2095
|
)
|
|
2034
2096
|
if not isinstance(response, ChatCompletion):
|
|
@@ -2038,12 +2100,16 @@ class ChatAgent(BaseAgent):
|
|
|
2038
2100
|
)
|
|
2039
2101
|
return self._handle_batch_response(response)
|
|
2040
2102
|
|
|
2041
|
-
def _sanitize_messages_for_logging(
|
|
2103
|
+
def _sanitize_messages_for_logging(
|
|
2104
|
+
self, messages, prev_num_openai_messages: int
|
|
2105
|
+
):
|
|
2042
2106
|
r"""Sanitize OpenAI messages for logging by replacing base64 image
|
|
2043
2107
|
data with a simple message and a link to view the image.
|
|
2044
2108
|
|
|
2045
2109
|
Args:
|
|
2046
2110
|
messages (List[OpenAIMessage]): The OpenAI messages to sanitize.
|
|
2111
|
+
prev_num_openai_messages (int): The number of openai messages
|
|
2112
|
+
logged in the previous iteration.
|
|
2047
2113
|
|
|
2048
2114
|
Returns:
|
|
2049
2115
|
List[OpenAIMessage]: The sanitized OpenAI messages.
|
|
@@ -2056,7 +2122,7 @@ class ChatAgent(BaseAgent):
|
|
|
2056
2122
|
# Create a copy of messages for logging to avoid modifying the
|
|
2057
2123
|
# original messages
|
|
2058
2124
|
sanitized_messages = []
|
|
2059
|
-
for msg in messages:
|
|
2125
|
+
for msg in messages[prev_num_openai_messages:]:
|
|
2060
2126
|
if isinstance(msg, dict):
|
|
2061
2127
|
sanitized_msg = msg.copy()
|
|
2062
2128
|
# Check if content is a list (multimodal content with images)
|
|
@@ -2339,6 +2405,7 @@ class ChatAgent(BaseAgent):
|
|
|
2339
2405
|
info=info,
|
|
2340
2406
|
)
|
|
2341
2407
|
|
|
2408
|
+
@observe()
|
|
2342
2409
|
def _execute_tool(
|
|
2343
2410
|
self,
|
|
2344
2411
|
tool_call_request: ToolCallRequest,
|
|
@@ -2373,7 +2440,7 @@ class ChatAgent(BaseAgent):
|
|
|
2373
2440
|
error_msg = f"Error executing tool '{func_name}': {e!s}"
|
|
2374
2441
|
result = f"Tool execution failed: {error_msg}"
|
|
2375
2442
|
mask_flag = False
|
|
2376
|
-
|
|
2443
|
+
logger.warning(f"{error_msg} with result: {result}")
|
|
2377
2444
|
|
|
2378
2445
|
# Check if result is a ToolResult with images
|
|
2379
2446
|
images_to_attach = None
|
|
@@ -2384,6 +2451,7 @@ class ChatAgent(BaseAgent):
|
|
|
2384
2451
|
tool_record = self._record_tool_calling(
|
|
2385
2452
|
func_name, args, result, tool_call_id, mask_output=mask_flag
|
|
2386
2453
|
)
|
|
2454
|
+
logger.info(f"Tool calling record:\n{tool_record}")
|
|
2387
2455
|
|
|
2388
2456
|
# Store images for later attachment to next user message
|
|
2389
2457
|
if images_to_attach:
|
|
@@ -2594,6 +2662,9 @@ class ChatAgent(BaseAgent):
|
|
|
2594
2662
|
while True:
|
|
2595
2663
|
# Check termination condition
|
|
2596
2664
|
if self.stop_event and self.stop_event.is_set():
|
|
2665
|
+
logger.info(
|
|
2666
|
+
f"Termination triggered at iteration " f"{iteration_count}"
|
|
2667
|
+
)
|
|
2597
2668
|
yield self._step_terminate(
|
|
2598
2669
|
num_tokens, tool_call_records, "termination_triggered"
|
|
2599
2670
|
)
|
|
@@ -2825,21 +2896,13 @@ class ChatAgent(BaseAgent):
|
|
|
2825
2896
|
status_response
|
|
2826
2897
|
) in self._execute_tools_sync_with_status_accumulator(
|
|
2827
2898
|
accumulated_tool_calls,
|
|
2828
|
-
content_accumulator,
|
|
2829
|
-
step_token_usage,
|
|
2830
2899
|
tool_call_records,
|
|
2831
2900
|
):
|
|
2832
2901
|
yield status_response
|
|
2833
2902
|
|
|
2834
|
-
#
|
|
2903
|
+
# Log sending status instead of adding to content
|
|
2835
2904
|
if tool_call_records:
|
|
2836
|
-
|
|
2837
|
-
content_accumulator,
|
|
2838
|
-
"\n------\n\nSending back result to model\n\n",
|
|
2839
|
-
"tool_sending",
|
|
2840
|
-
step_token_usage,
|
|
2841
|
-
)
|
|
2842
|
-
yield sending_status
|
|
2905
|
+
logger.info("Sending back result to model")
|
|
2843
2906
|
|
|
2844
2907
|
# Record final message only if we have content AND no tool
|
|
2845
2908
|
# calls. If there are tool calls, _record_tool_calling
|
|
@@ -2937,15 +3000,13 @@ class ChatAgent(BaseAgent):
|
|
|
2937
3000
|
def _execute_tools_sync_with_status_accumulator(
|
|
2938
3001
|
self,
|
|
2939
3002
|
accumulated_tool_calls: Dict[str, Any],
|
|
2940
|
-
content_accumulator: StreamContentAccumulator,
|
|
2941
|
-
step_token_usage: Dict[str, int],
|
|
2942
3003
|
tool_call_records: List[ToolCallingRecord],
|
|
2943
3004
|
) -> Generator[ChatAgentResponse, None, None]:
|
|
2944
3005
|
r"""Execute multiple tools synchronously with
|
|
2945
3006
|
proper content accumulation, using threads+queue for
|
|
2946
3007
|
non-blocking status streaming."""
|
|
2947
3008
|
|
|
2948
|
-
def tool_worker(
|
|
3009
|
+
def tool_worker(result_queue, tool_call_data):
|
|
2949
3010
|
try:
|
|
2950
3011
|
tool_call_record = self._execute_tool_from_stream_data(
|
|
2951
3012
|
tool_call_data
|
|
@@ -2981,36 +3042,22 @@ class ChatAgent(BaseAgent):
|
|
|
2981
3042
|
)
|
|
2982
3043
|
thread.start()
|
|
2983
3044
|
|
|
2984
|
-
|
|
2985
|
-
|
|
2986
|
-
f"with arguments
|
|
2987
|
-
)
|
|
2988
|
-
status_status = self._create_tool_status_response_with_accumulator(
|
|
2989
|
-
content_accumulator,
|
|
2990
|
-
status_message,
|
|
2991
|
-
"tool_calling",
|
|
2992
|
-
step_token_usage,
|
|
3045
|
+
# Log debug info instead of adding to content
|
|
3046
|
+
logger.info(
|
|
3047
|
+
f"Calling function: {function_name} with arguments: {args}"
|
|
2993
3048
|
)
|
|
2994
|
-
|
|
3049
|
+
|
|
2995
3050
|
# wait for tool thread to finish with optional timeout
|
|
2996
3051
|
thread.join(self.tool_execution_timeout)
|
|
2997
3052
|
|
|
2998
3053
|
# If timeout occurred, mark as error and continue
|
|
2999
3054
|
if thread.is_alive():
|
|
3000
|
-
|
|
3001
|
-
|
|
3002
|
-
f"{
|
|
3003
|
-
|
|
3004
|
-
timeout_status = (
|
|
3005
|
-
self._create_tool_status_response_with_accumulator(
|
|
3006
|
-
content_accumulator,
|
|
3007
|
-
timeout_msg,
|
|
3008
|
-
"tool_timeout",
|
|
3009
|
-
step_token_usage,
|
|
3010
|
-
)
|
|
3055
|
+
# Log timeout info instead of adding to content
|
|
3056
|
+
logger.warning(
|
|
3057
|
+
f"Function '{function_name}' timed out after "
|
|
3058
|
+
f"{self.tool_execution_timeout} seconds"
|
|
3011
3059
|
)
|
|
3012
|
-
|
|
3013
|
-
logger.error(timeout_msg.strip())
|
|
3060
|
+
|
|
3014
3061
|
# Detach thread (it may still finish later). Skip recording.
|
|
3015
3062
|
continue
|
|
3016
3063
|
|
|
@@ -3020,23 +3067,17 @@ class ChatAgent(BaseAgent):
|
|
|
3020
3067
|
tool_call_records.append(tool_call_record)
|
|
3021
3068
|
raw_result = tool_call_record.result
|
|
3022
3069
|
result_str = str(raw_result)
|
|
3023
|
-
|
|
3024
|
-
|
|
3025
|
-
)
|
|
3026
|
-
output_status = (
|
|
3027
|
-
self._create_tool_status_response_with_accumulator(
|
|
3028
|
-
content_accumulator,
|
|
3029
|
-
status_message,
|
|
3030
|
-
"tool_output",
|
|
3031
|
-
step_token_usage,
|
|
3032
|
-
[tool_call_record],
|
|
3033
|
-
)
|
|
3034
|
-
)
|
|
3035
|
-
yield output_status
|
|
3070
|
+
|
|
3071
|
+
# Log debug info instead of adding to content
|
|
3072
|
+
logger.info(f"Function output: {result_str}")
|
|
3036
3073
|
else:
|
|
3037
3074
|
# Error already logged
|
|
3038
3075
|
continue
|
|
3039
3076
|
|
|
3077
|
+
# Ensure this function remains a generator (required by type signature)
|
|
3078
|
+
return
|
|
3079
|
+
yield # This line is never reached but makes this a generator function
|
|
3080
|
+
|
|
3040
3081
|
def _execute_tool_from_stream_data(
|
|
3041
3082
|
self, tool_call_data: Dict[str, Any]
|
|
3042
3083
|
) -> Optional[ToolCallingRecord]:
|
|
@@ -3252,6 +3293,9 @@ class ChatAgent(BaseAgent):
|
|
|
3252
3293
|
while True:
|
|
3253
3294
|
# Check termination condition
|
|
3254
3295
|
if self.stop_event and self.stop_event.is_set():
|
|
3296
|
+
logger.info(
|
|
3297
|
+
f"Termination triggered at iteration " f"{iteration_count}"
|
|
3298
|
+
)
|
|
3255
3299
|
yield self._step_terminate(
|
|
3256
3300
|
num_tokens, tool_call_records, "termination_triggered"
|
|
3257
3301
|
)
|
|
@@ -3540,15 +3584,9 @@ class ChatAgent(BaseAgent):
|
|
|
3540
3584
|
):
|
|
3541
3585
|
yield status_response
|
|
3542
3586
|
|
|
3543
|
-
#
|
|
3587
|
+
# Log sending status instead of adding to content
|
|
3544
3588
|
if tool_call_records:
|
|
3545
|
-
|
|
3546
|
-
content_accumulator,
|
|
3547
|
-
"\n------\n\nSending back result to model\n\n",
|
|
3548
|
-
"tool_sending",
|
|
3549
|
-
step_token_usage,
|
|
3550
|
-
)
|
|
3551
|
-
yield sending_status
|
|
3589
|
+
logger.info("Sending back result to model")
|
|
3552
3590
|
|
|
3553
3591
|
# Record final message only if we have content AND no tool
|
|
3554
3592
|
# calls. If there are tool calls, _record_tool_calling
|
|
@@ -3595,22 +3633,11 @@ class ChatAgent(BaseAgent):
|
|
|
3595
3633
|
except json.JSONDecodeError:
|
|
3596
3634
|
args = tool_call_data['function']['arguments']
|
|
3597
3635
|
|
|
3598
|
-
|
|
3599
|
-
|
|
3600
|
-
f"with arguments
|
|
3636
|
+
# Log debug info instead of adding to content
|
|
3637
|
+
logger.info(
|
|
3638
|
+
f"Calling function: {function_name} with arguments: {args}"
|
|
3601
3639
|
)
|
|
3602
3640
|
|
|
3603
|
-
# Immediately yield "Calling function" status
|
|
3604
|
-
calling_status = (
|
|
3605
|
-
self._create_tool_status_response_with_accumulator(
|
|
3606
|
-
content_accumulator,
|
|
3607
|
-
status_message,
|
|
3608
|
-
"tool_calling",
|
|
3609
|
-
step_token_usage,
|
|
3610
|
-
)
|
|
3611
|
-
)
|
|
3612
|
-
yield calling_status
|
|
3613
|
-
|
|
3614
3641
|
# Start tool execution asynchronously (non-blocking)
|
|
3615
3642
|
if self.tool_execution_timeout is not None:
|
|
3616
3643
|
task = asyncio.create_task(
|
|
@@ -3642,80 +3669,25 @@ class ChatAgent(BaseAgent):
|
|
|
3642
3669
|
# Create output status message
|
|
3643
3670
|
raw_result = tool_call_record.result
|
|
3644
3671
|
result_str = str(raw_result)
|
|
3645
|
-
status_message = (
|
|
3646
|
-
f"\nFunction output: {result_str}\n---------\n"
|
|
3647
|
-
)
|
|
3648
3672
|
|
|
3649
|
-
#
|
|
3650
|
-
|
|
3651
|
-
output_status = (
|
|
3652
|
-
self._create_tool_status_response_with_accumulator(
|
|
3653
|
-
content_accumulator,
|
|
3654
|
-
status_message,
|
|
3655
|
-
"tool_output",
|
|
3656
|
-
step_token_usage,
|
|
3657
|
-
[tool_call_record],
|
|
3658
|
-
)
|
|
3659
|
-
)
|
|
3660
|
-
yield output_status
|
|
3673
|
+
# Log debug info instead of adding to content
|
|
3674
|
+
logger.info(f"Function output: {result_str}")
|
|
3661
3675
|
|
|
3662
3676
|
except Exception as e:
|
|
3663
3677
|
if isinstance(e, asyncio.TimeoutError):
|
|
3664
|
-
|
|
3665
|
-
|
|
3666
|
-
f"
|
|
3667
|
-
f"
|
|
3668
|
-
)
|
|
3669
|
-
timeout_status = (
|
|
3670
|
-
self._create_tool_status_response_with_accumulator(
|
|
3671
|
-
content_accumulator,
|
|
3672
|
-
timeout_msg,
|
|
3673
|
-
"tool_timeout",
|
|
3674
|
-
step_token_usage,
|
|
3675
|
-
)
|
|
3678
|
+
# Log timeout info instead of adding to content
|
|
3679
|
+
logger.warning(
|
|
3680
|
+
f"Function timed out after "
|
|
3681
|
+
f"{self.tool_execution_timeout} seconds"
|
|
3676
3682
|
)
|
|
3677
|
-
yield timeout_status
|
|
3678
|
-
logger.error("Async tool execution timeout")
|
|
3679
3683
|
else:
|
|
3680
3684
|
logger.error(f"Error in async tool execution: {e}")
|
|
3681
3685
|
continue
|
|
3682
3686
|
|
|
3683
|
-
|
|
3684
|
-
|
|
3685
|
-
|
|
3686
|
-
|
|
3687
|
-
status_type: str,
|
|
3688
|
-
step_token_usage: Dict[str, int],
|
|
3689
|
-
tool_calls: Optional[List[ToolCallingRecord]] = None,
|
|
3690
|
-
) -> ChatAgentResponse:
|
|
3691
|
-
r"""Create a tool status response using content accumulator."""
|
|
3692
|
-
|
|
3693
|
-
# Add this status message to accumulator and get full content
|
|
3694
|
-
accumulator.add_tool_status(status_message)
|
|
3695
|
-
full_content = accumulator.get_full_content()
|
|
3696
|
-
|
|
3697
|
-
message = BaseMessage(
|
|
3698
|
-
role_name=self.role_name,
|
|
3699
|
-
role_type=self.role_type,
|
|
3700
|
-
meta_dict={},
|
|
3701
|
-
content=full_content,
|
|
3702
|
-
)
|
|
3703
|
-
|
|
3704
|
-
return ChatAgentResponse(
|
|
3705
|
-
msgs=[message],
|
|
3706
|
-
terminated=False,
|
|
3707
|
-
info={
|
|
3708
|
-
"id": "",
|
|
3709
|
-
"usage": step_token_usage.copy(),
|
|
3710
|
-
"finish_reasons": [status_type],
|
|
3711
|
-
"num_tokens": self._get_token_count(full_content),
|
|
3712
|
-
"tool_calls": tool_calls or [],
|
|
3713
|
-
"external_tool_requests": None,
|
|
3714
|
-
"streaming": True,
|
|
3715
|
-
"tool_status": status_type,
|
|
3716
|
-
"partial": True,
|
|
3717
|
-
},
|
|
3718
|
-
)
|
|
3687
|
+
# Ensure this function remains an async generator
|
|
3688
|
+
return
|
|
3689
|
+
# This line is never reached but makes this an async generator function
|
|
3690
|
+
yield
|
|
3719
3691
|
|
|
3720
3692
|
def _create_streaming_response_with_accumulator(
|
|
3721
3693
|
self,
|
|
@@ -243,7 +243,7 @@ class GeminiModel(OpenAICompatibleModel):
|
|
|
243
243
|
function_dict = tool.get('function', {})
|
|
244
244
|
function_dict.pop("strict", None)
|
|
245
245
|
|
|
246
|
-
# Process parameters to remove anyOf
|
|
246
|
+
# Process parameters to remove anyOf and handle enum/format
|
|
247
247
|
if 'parameters' in function_dict:
|
|
248
248
|
params = function_dict['parameters']
|
|
249
249
|
if 'properties' in params:
|
|
@@ -260,6 +260,20 @@ class GeminiModel(OpenAICompatibleModel):
|
|
|
260
260
|
'description'
|
|
261
261
|
] = prop_value['description']
|
|
262
262
|
|
|
263
|
+
# Handle enum and format restrictions for Gemini
|
|
264
|
+
# API enum: only allowed for string type
|
|
265
|
+
if prop_value.get('type') != 'string':
|
|
266
|
+
prop_value.pop('enum', None)
|
|
267
|
+
|
|
268
|
+
# format: only allowed for string, integer, and
|
|
269
|
+
# number types
|
|
270
|
+
if prop_value.get('type') not in [
|
|
271
|
+
'string',
|
|
272
|
+
'integer',
|
|
273
|
+
'number',
|
|
274
|
+
]:
|
|
275
|
+
prop_value.pop('format', None)
|
|
276
|
+
|
|
263
277
|
request_config["tools"] = tools
|
|
264
278
|
|
|
265
279
|
return self._client.chat.completions.create(
|
|
@@ -283,7 +297,7 @@ class GeminiModel(OpenAICompatibleModel):
|
|
|
283
297
|
function_dict = tool.get('function', {})
|
|
284
298
|
function_dict.pop("strict", None)
|
|
285
299
|
|
|
286
|
-
# Process parameters to remove anyOf
|
|
300
|
+
# Process parameters to remove anyOf and handle enum/format
|
|
287
301
|
if 'parameters' in function_dict:
|
|
288
302
|
params = function_dict['parameters']
|
|
289
303
|
if 'properties' in params:
|
|
@@ -300,6 +314,20 @@ class GeminiModel(OpenAICompatibleModel):
|
|
|
300
314
|
'description'
|
|
301
315
|
] = prop_value['description']
|
|
302
316
|
|
|
317
|
+
# Handle enum and format restrictions for Gemini
|
|
318
|
+
# API enum: only allowed for string type
|
|
319
|
+
if prop_value.get('type') != 'string':
|
|
320
|
+
prop_value.pop('enum', None)
|
|
321
|
+
|
|
322
|
+
# format: only allowed for string, integer, and
|
|
323
|
+
# number types
|
|
324
|
+
if prop_value.get('type') not in [
|
|
325
|
+
'string',
|
|
326
|
+
'integer',
|
|
327
|
+
'number',
|
|
328
|
+
]:
|
|
329
|
+
prop_value.pop('format', None)
|
|
330
|
+
|
|
303
331
|
request_config["tools"] = tools
|
|
304
332
|
|
|
305
333
|
return await self._async_client.chat.completions.create(
|