camel-ai 0.2.34__tar.gz → 0.2.35__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- {camel_ai-0.2.34 → camel_ai-0.2.35}/PKG-INFO +1 -1
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/__init__.py +1 -1
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/agents/_types.py +1 -1
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/agents/_utils.py +4 -4
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/agents/chat_agent.py +174 -29
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/configs/openai_config.py +20 -16
- camel_ai-0.2.35/camel/datasets/base_generator.py +279 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/memories/agent_memories.py +47 -1
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/memories/base.py +23 -1
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/memories/records.py +5 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/models/stub_model.py +25 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/retrievers/vector_retriever.py +12 -7
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/storages/key_value_storages/__init__.py +2 -1
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/storages/key_value_storages/json.py +3 -7
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/storages/vectordb_storages/base.py +5 -1
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/toolkits/__init__.py +2 -1
- camel_ai-0.2.35/camel/toolkits/memory_toolkit.py +129 -0
- camel_ai-0.2.35/camel/utils/chunker/__init__.py +22 -0
- camel_ai-0.2.35/camel/utils/chunker/base.py +24 -0
- camel_ai-0.2.35/camel/utils/chunker/code_chunker.py +193 -0
- camel_ai-0.2.35/camel/utils/chunker/uio_chunker.py +66 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/utils/token_counting.py +133 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/pyproject.toml +2 -2
- camel_ai-0.2.34/camel/datasets/base_generator.py +0 -118
- {camel_ai-0.2.34 → camel_ai-0.2.35}/.gitignore +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/LICENSE +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/README.md +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/agents/__init__.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/agents/base.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/agents/critic_agent.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/agents/deductive_reasoner_agent.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/agents/embodied_agent.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/agents/knowledge_graph_agent.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/agents/multi_hop_generator_agent.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/agents/programmed_agent_instruction.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/agents/role_assignment_agent.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/agents/search_agent.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/agents/task_agent.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/agents/tool_agents/__init__.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/agents/tool_agents/base.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/agents/tool_agents/hugging_face_tool_agent.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/benchmarks/__init__.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/benchmarks/apibank.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/benchmarks/apibench.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/benchmarks/base.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/benchmarks/gaia.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/benchmarks/nexus.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/benchmarks/ragbench.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/bots/__init__.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/bots/discord/__init__.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/bots/discord/discord_app.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/bots/discord/discord_installation.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/bots/discord/discord_store.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/bots/slack/__init__.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/bots/slack/models.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/bots/slack/slack_app.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/bots/telegram_bot.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/configs/__init__.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/configs/aiml_config.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/configs/anthropic_config.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/configs/base_config.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/configs/cohere_config.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/configs/deepseek_config.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/configs/gemini_config.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/configs/groq_config.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/configs/internlm_config.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/configs/litellm_config.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/configs/mistral_config.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/configs/moonshot_config.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/configs/nvidia_config.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/configs/ollama_config.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/configs/qwen_config.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/configs/reka_config.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/configs/samba_config.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/configs/sglang_config.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/configs/siliconflow_config.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/configs/togetherai_config.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/configs/vllm_config.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/configs/yi_config.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/configs/zhipuai_config.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/data_collector/__init__.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/data_collector/alpaca_collector.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/data_collector/base.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/data_collector/sharegpt_collector.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/datagen/__init__.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/datagen/cot_datagen.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/datagen/self_improving_cot.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/datagen/self_instruct/__init__.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/datagen/self_instruct/filter/__init__.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/datagen/self_instruct/filter/filter_function.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/datagen/self_instruct/filter/filter_registry.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/datagen/self_instruct/filter/instruction_filter.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/datagen/self_instruct/self_instruct.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/datagen/self_instruct/templates.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/datagen/source2synth/__init__.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/datagen/source2synth/data_processor.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/datagen/source2synth/models.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/datagen/source2synth/user_data_processor_config.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/datahubs/__init__.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/datahubs/base.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/datahubs/huggingface.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/datahubs/models.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/datasets/__init__.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/datasets/few_shot_generator.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/datasets/models.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/datasets/static_dataset.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/embeddings/__init__.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/embeddings/base.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/embeddings/jina_embedding.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/embeddings/mistral_embedding.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/embeddings/openai_compatible_embedding.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/embeddings/openai_embedding.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/embeddings/sentence_transformers_embeddings.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/embeddings/vlm_embedding.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/environments/__init__.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/environments/models.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/environments/multi_step.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/environments/single_step.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/extractors/__init__.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/extractors/base.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/extractors/python_strategies.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/generators.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/human.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/interpreters/__init__.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/interpreters/base.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/interpreters/docker/Dockerfile +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/interpreters/docker_interpreter.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/interpreters/e2b_interpreter.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/interpreters/internal_python_interpreter.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/interpreters/interpreter_error.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/interpreters/ipython_interpreter.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/interpreters/subprocess_interpreter.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/loaders/__init__.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/loaders/apify_reader.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/loaders/base_io.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/loaders/chunkr_reader.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/loaders/firecrawl_reader.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/loaders/jina_url_reader.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/loaders/mineru_extractor.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/loaders/panda_reader.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/loaders/unstructured_io.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/logger.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/memories/__init__.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/memories/blocks/__init__.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/memories/blocks/chat_history_block.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/memories/blocks/vectordb_block.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/memories/context_creators/__init__.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/memories/context_creators/score_based.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/messages/__init__.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/messages/base.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/messages/conversion/__init__.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/messages/conversion/alpaca.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/messages/conversion/conversation_models.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/messages/conversion/sharegpt/__init__.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/messages/conversion/sharegpt/function_call_formatter.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/messages/conversion/sharegpt/hermes/__init__.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/messages/conversion/sharegpt/hermes/hermes_function_formatter.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/messages/func_message.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/models/__init__.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/models/_utils.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/models/aiml_model.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/models/anthropic_model.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/models/azure_openai_model.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/models/base_audio_model.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/models/base_model.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/models/cohere_model.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/models/deepseek_model.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/models/fish_audio_model.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/models/gemini_model.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/models/groq_model.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/models/internlm_model.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/models/litellm_model.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/models/mistral_model.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/models/model_factory.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/models/model_manager.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/models/moonshot_model.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/models/nemotron_model.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/models/nvidia_model.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/models/ollama_model.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/models/openai_audio_models.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/models/openai_compatible_model.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/models/openai_model.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/models/qwen_model.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/models/reka_model.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/models/reward/__init__.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/models/reward/base_reward_model.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/models/reward/evaluator.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/models/reward/nemotron_model.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/models/reward/skywork_model.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/models/samba_model.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/models/sglang_model.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/models/siliconflow_model.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/models/togetherai_model.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/models/vllm_model.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/models/volcano_model.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/models/yi_model.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/models/zhipuai_model.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/personas/__init__.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/personas/persona.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/personas/persona_hub.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/prompts/__init__.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/prompts/ai_society.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/prompts/base.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/prompts/code.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/prompts/evaluation.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/prompts/generate_text_embedding_data.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/prompts/image_craft.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/prompts/misalignment.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/prompts/multi_condition_image_craft.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/prompts/object_recognition.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/prompts/persona_hub.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/prompts/prompt_templates.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/prompts/role_description_prompt_template.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/prompts/solution_extraction.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/prompts/task_prompt_template.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/prompts/translation.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/prompts/video_description_prompt.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/py.typed +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/responses/__init__.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/responses/agent_responses.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/retrievers/__init__.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/retrievers/auto_retriever.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/retrievers/base.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/retrievers/bm25_retriever.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/retrievers/cohere_rerank_retriever.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/retrievers/hybrid_retrival.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/runtime/__init__.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/runtime/api.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/runtime/base.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/runtime/configs.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/runtime/docker_runtime.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/runtime/llm_guard_runtime.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/runtime/remote_http_runtime.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/runtime/utils/__init__.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/runtime/utils/function_risk_toolkit.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/runtime/utils/ignore_risk_toolkit.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/schemas/__init__.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/schemas/base.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/schemas/openai_converter.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/schemas/outlines_converter.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/societies/__init__.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/societies/babyagi_playing.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/societies/role_playing.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/societies/workforce/__init__.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/societies/workforce/base.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/societies/workforce/prompts.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/societies/workforce/role_playing_worker.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/societies/workforce/single_agent_worker.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/societies/workforce/task_channel.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/societies/workforce/utils.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/societies/workforce/worker.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/societies/workforce/workforce.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/storages/__init__.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/storages/graph_storages/__init__.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/storages/graph_storages/base.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/storages/graph_storages/graph_element.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/storages/graph_storages/nebula_graph.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/storages/graph_storages/neo4j_graph.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/storages/key_value_storages/base.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/storages/key_value_storages/in_memory.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/storages/key_value_storages/redis.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/storages/object_storages/__init__.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/storages/object_storages/amazon_s3.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/storages/object_storages/azure_blob.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/storages/object_storages/base.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/storages/object_storages/google_cloud.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/storages/vectordb_storages/__init__.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/storages/vectordb_storages/milvus.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/storages/vectordb_storages/qdrant.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/tasks/__init__.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/tasks/task.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/tasks/task_prompt.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/terminators/__init__.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/terminators/base.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/terminators/response_terminator.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/terminators/token_limit_terminator.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/toolkits/arxiv_toolkit.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/toolkits/ask_news_toolkit.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/toolkits/audio_analysis_toolkit.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/toolkits/base.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/toolkits/browser_toolkit.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/toolkits/code_execution.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/toolkits/dalle_toolkit.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/toolkits/dappier_toolkit.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/toolkits/data_commons_toolkit.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/toolkits/excel_toolkit.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/toolkits/file_write_toolkit.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/toolkits/function_tool.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/toolkits/github_toolkit.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/toolkits/google_maps_toolkit.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/toolkits/google_scholar_toolkit.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/toolkits/human_toolkit.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/toolkits/image_analysis_toolkit.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/toolkits/linkedin_toolkit.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/toolkits/math_toolkit.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/toolkits/mcp_toolkit.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/toolkits/meshy_toolkit.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/toolkits/mineru_toolkit.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/toolkits/networkx_toolkit.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/toolkits/notion_toolkit.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/toolkits/open_api_specs/biztoc/__init__.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/toolkits/open_api_specs/biztoc/ai-plugin.json +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/toolkits/open_api_specs/biztoc/openapi.yaml +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/toolkits/open_api_specs/coursera/__init__.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/toolkits/open_api_specs/coursera/openapi.yaml +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/toolkits/open_api_specs/create_qr_code/__init__.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/toolkits/open_api_specs/create_qr_code/openapi.yaml +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/toolkits/open_api_specs/klarna/__init__.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/toolkits/open_api_specs/klarna/openapi.yaml +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/toolkits/open_api_specs/nasa_apod/__init__.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/toolkits/open_api_specs/nasa_apod/openapi.yaml +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/toolkits/open_api_specs/outschool/__init__.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/toolkits/open_api_specs/outschool/ai-plugin.json +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/toolkits/open_api_specs/outschool/openapi.yaml +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/toolkits/open_api_specs/outschool/paths/__init__.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/toolkits/open_api_specs/outschool/paths/get_classes.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/toolkits/open_api_specs/outschool/paths/search_teachers.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/toolkits/open_api_specs/security_config.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/toolkits/open_api_specs/speak/__init__.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/toolkits/open_api_specs/speak/openapi.yaml +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/toolkits/open_api_specs/web_scraper/__init__.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/toolkits/open_api_specs/web_scraper/ai-plugin.json +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/toolkits/open_api_specs/web_scraper/openapi.yaml +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/toolkits/open_api_specs/web_scraper/paths/__init__.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/toolkits/open_api_specs/web_scraper/paths/scraper.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/toolkits/open_api_toolkit.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/toolkits/openbb_toolkit.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/toolkits/page_script.js +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/toolkits/pubmed_toolkit.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/toolkits/reddit_toolkit.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/toolkits/retrieval_toolkit.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/toolkits/search_toolkit.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/toolkits/semantic_scholar_toolkit.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/toolkits/slack_toolkit.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/toolkits/stripe_toolkit.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/toolkits/sympy_toolkit.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/toolkits/terminal_toolkit.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/toolkits/twitter_toolkit.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/toolkits/video_analysis_toolkit.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/toolkits/video_download_toolkit.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/toolkits/weather_toolkit.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/toolkits/whatsapp_toolkit.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/toolkits/zapier_toolkit.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/types/__init__.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/types/agents/__init__.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/types/agents/tool_calling_record.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/types/enums.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/types/openai_types.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/types/unified_model_type.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/utils/__init__.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/utils/async_func.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/utils/commons.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/utils/constants.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/utils/deduplication.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/utils/response_format.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/verifiers/__init__.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/verifiers/base.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/verifiers/models.py +0 -0
- {camel_ai-0.2.34 → camel_ai-0.2.35}/camel/verifiers/python_verifier.py +0 -0
|
@@ -34,7 +34,7 @@ class ModelResponse(BaseModel):
|
|
|
34
34
|
model_config = ConfigDict(arbitrary_types_allowed=True)
|
|
35
35
|
|
|
36
36
|
response: Union[ChatCompletion, Stream, AsyncStream]
|
|
37
|
-
|
|
37
|
+
tool_call_requests: Optional[List[ToolCallRequest]]
|
|
38
38
|
output_messages: List[BaseMessage]
|
|
39
39
|
finish_reasons: List[str]
|
|
40
40
|
usage_dict: Dict[str, Any]
|
|
@@ -136,7 +136,7 @@ def get_info_dict(
|
|
|
136
136
|
termination_reasons: List[str],
|
|
137
137
|
num_tokens: int,
|
|
138
138
|
tool_calls: List[ToolCallingRecord],
|
|
139
|
-
|
|
139
|
+
external_tool_call_requests: Optional[List[ToolCallRequest]] = None,
|
|
140
140
|
) -> Dict[str, Any]:
|
|
141
141
|
r"""Returns a dictionary containing information about the chat session.
|
|
142
142
|
|
|
@@ -149,8 +149,8 @@ def get_info_dict(
|
|
|
149
149
|
num_tokens (int): The number of tokens used in the chat session.
|
|
150
150
|
tool_calls (List[ToolCallingRecord]): The list of function
|
|
151
151
|
calling records, containing the information of called tools.
|
|
152
|
-
|
|
153
|
-
|
|
152
|
+
external_tool_call_requests (Optional[List[ToolCallRequest]]): The
|
|
153
|
+
requests for external tool calls.
|
|
154
154
|
|
|
155
155
|
|
|
156
156
|
Returns:
|
|
@@ -162,7 +162,7 @@ def get_info_dict(
|
|
|
162
162
|
"termination_reasons": termination_reasons,
|
|
163
163
|
"num_tokens": num_tokens,
|
|
164
164
|
"tool_calls": tool_calls,
|
|
165
|
-
"
|
|
165
|
+
"external_tool_call_requests": external_tool_call_requests,
|
|
166
166
|
}
|
|
167
167
|
|
|
168
168
|
|
|
@@ -16,7 +16,10 @@ from __future__ import annotations
|
|
|
16
16
|
import json
|
|
17
17
|
import logging
|
|
18
18
|
import textwrap
|
|
19
|
+
import uuid
|
|
19
20
|
from collections import defaultdict
|
|
21
|
+
from datetime import datetime
|
|
22
|
+
from pathlib import Path
|
|
20
23
|
from typing import (
|
|
21
24
|
TYPE_CHECKING,
|
|
22
25
|
Any,
|
|
@@ -59,6 +62,7 @@ from camel.models import (
|
|
|
59
62
|
)
|
|
60
63
|
from camel.prompts import TextPrompt
|
|
61
64
|
from camel.responses import ChatAgentResponse
|
|
65
|
+
from camel.storages import JsonStorage
|
|
62
66
|
from camel.toolkits import FunctionTool
|
|
63
67
|
from camel.types import (
|
|
64
68
|
ChatCompletion,
|
|
@@ -138,6 +142,8 @@ class ChatAgent(BaseAgent):
|
|
|
138
142
|
the next model in ModelManager. (default: :str:`round_robin`)
|
|
139
143
|
single_iteration (bool): Whether to let the agent perform only one
|
|
140
144
|
model calling at each step. (default: :obj:`False`)
|
|
145
|
+
agent_id (str, optional): The ID of the agent. If not provided, a
|
|
146
|
+
random UUID will be generated. (default: :obj:`None`)
|
|
141
147
|
"""
|
|
142
148
|
|
|
143
149
|
def __init__(
|
|
@@ -157,6 +163,7 @@ class ChatAgent(BaseAgent):
|
|
|
157
163
|
response_terminators: Optional[List[ResponseTerminator]] = None,
|
|
158
164
|
scheduling_strategy: str = "round_robin",
|
|
159
165
|
single_iteration: bool = False,
|
|
166
|
+
agent_id: Optional[str] = None,
|
|
160
167
|
) -> None:
|
|
161
168
|
# Set up model backend
|
|
162
169
|
self.model_backend = ModelManager(
|
|
@@ -171,16 +178,25 @@ class ChatAgent(BaseAgent):
|
|
|
171
178
|
scheduling_strategy=scheduling_strategy,
|
|
172
179
|
)
|
|
173
180
|
self.model_type = self.model_backend.model_type
|
|
181
|
+
# Assign unique ID
|
|
182
|
+
self.agent_id = agent_id if agent_id else str(uuid.uuid4())
|
|
174
183
|
|
|
175
184
|
# Set up memory
|
|
176
185
|
context_creator = ScoreBasedContextCreator(
|
|
177
186
|
self.model_backend.token_counter,
|
|
178
187
|
token_limit or self.model_backend.token_limit,
|
|
179
188
|
)
|
|
189
|
+
|
|
180
190
|
self.memory: AgentMemory = memory or ChatHistoryMemory(
|
|
181
|
-
context_creator,
|
|
191
|
+
context_creator,
|
|
192
|
+
window_size=message_window_size,
|
|
193
|
+
agent_id=self.agent_id,
|
|
182
194
|
)
|
|
183
195
|
|
|
196
|
+
# So we don't have to pass agent_id when we define memory
|
|
197
|
+
if memory is not None:
|
|
198
|
+
memory.agent_id = self.agent_id
|
|
199
|
+
|
|
184
200
|
# Set up system message and initialize messages
|
|
185
201
|
self._original_system_message = (
|
|
186
202
|
BaseMessage.make_assistant_message(
|
|
@@ -321,9 +337,101 @@ class ChatAgent(BaseAgent):
|
|
|
321
337
|
role (OpenAIBackendRole): The backend role type.
|
|
322
338
|
"""
|
|
323
339
|
self.memory.write_record(
|
|
324
|
-
MemoryRecord(
|
|
340
|
+
MemoryRecord(
|
|
341
|
+
message=message,
|
|
342
|
+
role_at_backend=role,
|
|
343
|
+
timestamp=datetime.now().timestamp(),
|
|
344
|
+
agent_id=self.agent_id,
|
|
345
|
+
)
|
|
325
346
|
)
|
|
326
347
|
|
|
348
|
+
def load_memory(self, memory: AgentMemory) -> None:
|
|
349
|
+
r"""Load the provided memory into the agent.
|
|
350
|
+
|
|
351
|
+
Args:
|
|
352
|
+
memory (AgentMemory): The memory to load into the agent.
|
|
353
|
+
|
|
354
|
+
Returns:
|
|
355
|
+
None
|
|
356
|
+
"""
|
|
357
|
+
|
|
358
|
+
for context_record in memory.retrieve():
|
|
359
|
+
self.memory.write_record(context_record.memory_record)
|
|
360
|
+
logger.info(f"Memory loaded from {memory}")
|
|
361
|
+
|
|
362
|
+
def load_memory_from_path(self, path: str) -> None:
|
|
363
|
+
r"""Loads memory records from a JSON file filtered by this agent's ID.
|
|
364
|
+
|
|
365
|
+
Args:
|
|
366
|
+
path (str): The file path to a JSON memory file that uses
|
|
367
|
+
JsonStorage.
|
|
368
|
+
|
|
369
|
+
Raises:
|
|
370
|
+
ValueError: If no matching records for the agent_id are found
|
|
371
|
+
(optional check; commented out below).
|
|
372
|
+
"""
|
|
373
|
+
json_store = JsonStorage(Path(path))
|
|
374
|
+
all_records = json_store.load()
|
|
375
|
+
|
|
376
|
+
if not all_records:
|
|
377
|
+
raise ValueError(
|
|
378
|
+
f"No records found for agent_id={self.agent_id} in {path}"
|
|
379
|
+
)
|
|
380
|
+
|
|
381
|
+
for record_dict in all_records:
|
|
382
|
+
# Validate the record dictionary before conversion
|
|
383
|
+
required_keys = ['message', 'role_at_backend', 'agent_id']
|
|
384
|
+
if not all(key in record_dict for key in required_keys):
|
|
385
|
+
logger.warning(
|
|
386
|
+
f"Skipping invalid record: missing required "
|
|
387
|
+
f"keys in {record_dict}"
|
|
388
|
+
)
|
|
389
|
+
continue
|
|
390
|
+
|
|
391
|
+
# Validate message structure in the record
|
|
392
|
+
if (
|
|
393
|
+
not isinstance(record_dict['message'], dict)
|
|
394
|
+
or '__class__' not in record_dict['message']
|
|
395
|
+
):
|
|
396
|
+
logger.warning(
|
|
397
|
+
f"Skipping invalid record: malformed message "
|
|
398
|
+
f"structure in {record_dict}"
|
|
399
|
+
)
|
|
400
|
+
continue
|
|
401
|
+
|
|
402
|
+
try:
|
|
403
|
+
record = MemoryRecord.from_dict(record_dict)
|
|
404
|
+
self.memory.write_records([record])
|
|
405
|
+
except Exception as e:
|
|
406
|
+
logger.warning(
|
|
407
|
+
f"Error converting record to MemoryRecord: {e}. "
|
|
408
|
+
f"Record: {record_dict}"
|
|
409
|
+
)
|
|
410
|
+
logger.info(f"Memory loaded from {path}")
|
|
411
|
+
|
|
412
|
+
def save_memory(self, path: str) -> None:
|
|
413
|
+
r"""Retrieves the current conversation data from memory and writes it
|
|
414
|
+
into a JSON file using JsonStorage.
|
|
415
|
+
|
|
416
|
+
Args:
|
|
417
|
+
path (str): Target file path to store JSON data.
|
|
418
|
+
"""
|
|
419
|
+
json_store = JsonStorage(Path(path))
|
|
420
|
+
context_records = self.memory.retrieve()
|
|
421
|
+
to_save = [cr.memory_record.to_dict() for cr in context_records]
|
|
422
|
+
json_store.save(to_save)
|
|
423
|
+
logger.info(f"Memory saved to {path}")
|
|
424
|
+
|
|
425
|
+
def clear_memory(self) -> None:
|
|
426
|
+
r"""Clear the agent's memory and reset to initial state.
|
|
427
|
+
|
|
428
|
+
Returns:
|
|
429
|
+
None
|
|
430
|
+
"""
|
|
431
|
+
self.memory.clear()
|
|
432
|
+
if self.system_message is not None:
|
|
433
|
+
self.update_memory(self.system_message, OpenAIBackendRole.SYSTEM)
|
|
434
|
+
|
|
327
435
|
def _generate_system_message_for_output_language(
|
|
328
436
|
self,
|
|
329
437
|
) -> Optional[BaseMessage]:
|
|
@@ -474,7 +582,7 @@ class ChatAgent(BaseAgent):
|
|
|
474
582
|
self.update_memory(input_message, OpenAIBackendRole.USER)
|
|
475
583
|
|
|
476
584
|
tool_call_records: List[ToolCallingRecord] = []
|
|
477
|
-
|
|
585
|
+
external_tool_call_requests: Optional[List[ToolCallRequest]] = None
|
|
478
586
|
|
|
479
587
|
while True:
|
|
480
588
|
try:
|
|
@@ -494,12 +602,26 @@ class ChatAgent(BaseAgent):
|
|
|
494
602
|
if self.single_iteration:
|
|
495
603
|
break
|
|
496
604
|
|
|
497
|
-
if
|
|
498
|
-
|
|
499
|
-
|
|
605
|
+
if tool_call_requests := response.tool_call_requests:
|
|
606
|
+
# Process all tool calls
|
|
607
|
+
for tool_call_request in tool_call_requests:
|
|
608
|
+
if (
|
|
609
|
+
tool_call_request.tool_name
|
|
610
|
+
in self._external_tool_schemas
|
|
611
|
+
):
|
|
612
|
+
if external_tool_call_requests is None:
|
|
613
|
+
external_tool_call_requests = []
|
|
614
|
+
external_tool_call_requests.append(tool_call_request)
|
|
615
|
+
else:
|
|
616
|
+
tool_call_records.append(
|
|
617
|
+
self._execute_tool(tool_call_request)
|
|
618
|
+
)
|
|
619
|
+
|
|
620
|
+
# If we found external tool calls, break the loop
|
|
621
|
+
if external_tool_call_requests:
|
|
500
622
|
break
|
|
501
623
|
|
|
502
|
-
|
|
624
|
+
# If we're still here, continue the loop
|
|
503
625
|
continue
|
|
504
626
|
|
|
505
627
|
break
|
|
@@ -508,7 +630,10 @@ class ChatAgent(BaseAgent):
|
|
|
508
630
|
self._record_final_output(response.output_messages)
|
|
509
631
|
|
|
510
632
|
return self._convert_to_chatagent_response(
|
|
511
|
-
response,
|
|
633
|
+
response,
|
|
634
|
+
tool_call_records,
|
|
635
|
+
num_tokens,
|
|
636
|
+
external_tool_call_requests,
|
|
512
637
|
)
|
|
513
638
|
|
|
514
639
|
@property
|
|
@@ -550,7 +675,7 @@ class ChatAgent(BaseAgent):
|
|
|
550
675
|
self.update_memory(input_message, OpenAIBackendRole.USER)
|
|
551
676
|
|
|
552
677
|
tool_call_records: List[ToolCallingRecord] = []
|
|
553
|
-
|
|
678
|
+
external_tool_call_requests: Optional[List[ToolCallRequest]] = None
|
|
554
679
|
while True:
|
|
555
680
|
try:
|
|
556
681
|
openai_messages, num_tokens = self.memory.get_context()
|
|
@@ -569,13 +694,27 @@ class ChatAgent(BaseAgent):
|
|
|
569
694
|
if self.single_iteration:
|
|
570
695
|
break
|
|
571
696
|
|
|
572
|
-
if
|
|
573
|
-
|
|
574
|
-
|
|
697
|
+
if tool_call_requests := response.tool_call_requests:
|
|
698
|
+
# Process all tool calls
|
|
699
|
+
for tool_call_request in tool_call_requests:
|
|
700
|
+
if (
|
|
701
|
+
tool_call_request.tool_name
|
|
702
|
+
in self._external_tool_schemas
|
|
703
|
+
):
|
|
704
|
+
if external_tool_call_requests is None:
|
|
705
|
+
external_tool_call_requests = []
|
|
706
|
+
external_tool_call_requests.append(tool_call_request)
|
|
707
|
+
|
|
708
|
+
tool_call_record = await self._aexecute_tool(
|
|
709
|
+
tool_call_request
|
|
710
|
+
)
|
|
711
|
+
tool_call_records.append(tool_call_record)
|
|
712
|
+
|
|
713
|
+
# If we found an external tool call, break the loop
|
|
714
|
+
if external_tool_call_requests:
|
|
575
715
|
break
|
|
576
716
|
|
|
577
|
-
|
|
578
|
-
tool_call_records.append(tool_call_record)
|
|
717
|
+
# If we're still here, continue the loop
|
|
579
718
|
continue
|
|
580
719
|
|
|
581
720
|
break
|
|
@@ -584,7 +723,10 @@ class ChatAgent(BaseAgent):
|
|
|
584
723
|
self._record_final_output(response.output_messages)
|
|
585
724
|
|
|
586
725
|
return self._convert_to_chatagent_response(
|
|
587
|
-
response,
|
|
726
|
+
response,
|
|
727
|
+
tool_call_records,
|
|
728
|
+
num_tokens,
|
|
729
|
+
external_tool_call_requests,
|
|
588
730
|
)
|
|
589
731
|
|
|
590
732
|
def _convert_to_chatagent_response(
|
|
@@ -592,7 +734,7 @@ class ChatAgent(BaseAgent):
|
|
|
592
734
|
response: ModelResponse,
|
|
593
735
|
tool_call_records: List[ToolCallingRecord],
|
|
594
736
|
num_tokens: int,
|
|
595
|
-
|
|
737
|
+
external_tool_call_requests: Optional[List[ToolCallRequest]],
|
|
596
738
|
) -> ChatAgentResponse:
|
|
597
739
|
r"""Parse the final model response into the chat agent response."""
|
|
598
740
|
info = self._step_get_info(
|
|
@@ -602,7 +744,7 @@ class ChatAgent(BaseAgent):
|
|
|
602
744
|
response.response_id,
|
|
603
745
|
tool_call_records,
|
|
604
746
|
num_tokens,
|
|
605
|
-
|
|
747
|
+
external_tool_call_requests,
|
|
606
748
|
)
|
|
607
749
|
|
|
608
750
|
return ChatAgentResponse(
|
|
@@ -853,7 +995,7 @@ class ChatAgent(BaseAgent):
|
|
|
853
995
|
response_id: str,
|
|
854
996
|
tool_calls: List[ToolCallingRecord],
|
|
855
997
|
num_tokens: int,
|
|
856
|
-
|
|
998
|
+
external_tool_call_requests: Optional[List[ToolCallRequest]] = None,
|
|
857
999
|
) -> Dict[str, Any]:
|
|
858
1000
|
r"""Process the output of a chat step and gather information about the
|
|
859
1001
|
step.
|
|
@@ -910,7 +1052,7 @@ class ChatAgent(BaseAgent):
|
|
|
910
1052
|
finish_reasons,
|
|
911
1053
|
num_tokens,
|
|
912
1054
|
tool_calls,
|
|
913
|
-
|
|
1055
|
+
external_tool_call_requests,
|
|
914
1056
|
)
|
|
915
1057
|
|
|
916
1058
|
def _handle_batch_response(
|
|
@@ -949,18 +1091,21 @@ class ChatAgent(BaseAgent):
|
|
|
949
1091
|
if response.usage is not None:
|
|
950
1092
|
usage = safe_model_dump(response.usage)
|
|
951
1093
|
|
|
952
|
-
|
|
1094
|
+
tool_call_requests: Optional[List[ToolCallRequest]] = None
|
|
953
1095
|
if tool_calls := response.choices[0].message.tool_calls:
|
|
954
|
-
|
|
955
|
-
|
|
956
|
-
|
|
957
|
-
|
|
958
|
-
|
|
959
|
-
|
|
1096
|
+
tool_call_requests = []
|
|
1097
|
+
for tool_call in tool_calls:
|
|
1098
|
+
tool_name = tool_call.function.name
|
|
1099
|
+
tool_call_id = tool_call.id
|
|
1100
|
+
args = json.loads(tool_call.function.arguments)
|
|
1101
|
+
tool_call_request = ToolCallRequest(
|
|
1102
|
+
tool_name=tool_name, args=args, tool_call_id=tool_call_id
|
|
1103
|
+
)
|
|
1104
|
+
tool_call_requests.append(tool_call_request)
|
|
960
1105
|
|
|
961
1106
|
return ModelResponse(
|
|
962
1107
|
response=response,
|
|
963
|
-
|
|
1108
|
+
tool_call_requests=tool_call_requests,
|
|
964
1109
|
output_messages=output_messages,
|
|
965
1110
|
finish_reasons=finish_reasons,
|
|
966
1111
|
usage_dict=usage,
|
|
@@ -1000,7 +1145,7 @@ class ChatAgent(BaseAgent):
|
|
|
1000
1145
|
# TODO: Handle tool calls
|
|
1001
1146
|
return ModelResponse(
|
|
1002
1147
|
response=response,
|
|
1003
|
-
|
|
1148
|
+
tool_call_requests=None,
|
|
1004
1149
|
output_messages=output_messages,
|
|
1005
1150
|
finish_reasons=finish_reasons,
|
|
1006
1151
|
usage_dict=usage_dict,
|
|
@@ -1040,7 +1185,7 @@ class ChatAgent(BaseAgent):
|
|
|
1040
1185
|
# TODO: Handle tool calls
|
|
1041
1186
|
return ModelResponse(
|
|
1042
1187
|
response=response,
|
|
1043
|
-
|
|
1188
|
+
tool_call_requests=None,
|
|
1044
1189
|
output_messages=output_messages,
|
|
1045
1190
|
finish_reasons=finish_reasons,
|
|
1046
1191
|
usage_dict=usage_dict,
|
|
@@ -15,7 +15,7 @@ from __future__ import annotations
|
|
|
15
15
|
|
|
16
16
|
from typing import Dict, Optional, Sequence, Type, Union
|
|
17
17
|
|
|
18
|
-
from pydantic import BaseModel
|
|
18
|
+
from pydantic import BaseModel
|
|
19
19
|
|
|
20
20
|
from camel.configs.base_config import BaseConfig
|
|
21
21
|
|
|
@@ -28,14 +28,14 @@ class ChatGPTConfig(BaseConfig):
|
|
|
28
28
|
temperature (float, optional): Sampling temperature to use, between
|
|
29
29
|
:obj:`0` and :obj:`2`. Higher values make the output more random,
|
|
30
30
|
while lower values make it more focused and deterministic.
|
|
31
|
-
(default: :obj:`
|
|
31
|
+
(default: :obj:`None`)
|
|
32
32
|
top_p (float, optional): An alternative to sampling with temperature,
|
|
33
33
|
called nucleus sampling, where the model considers the results of
|
|
34
34
|
the tokens with top_p probability mass. So :obj:`0.1` means only
|
|
35
35
|
the tokens comprising the top 10% probability mass are considered.
|
|
36
|
-
(default: :obj:`
|
|
36
|
+
(default: :obj:`None`)
|
|
37
37
|
n (int, optional): How many chat completion choices to generate for
|
|
38
|
-
each input message. (default: :obj:`
|
|
38
|
+
each input message. (default: :obj:`None`)
|
|
39
39
|
response_format (object, optional): An object specifying the format
|
|
40
40
|
that the model must output. Compatible with GPT-4 Turbo and all
|
|
41
41
|
GPT-3.5 Turbo models newer than gpt-3.5-turbo-1106. Setting to
|
|
@@ -51,7 +51,7 @@ class ChatGPTConfig(BaseConfig):
|
|
|
51
51
|
max context length.
|
|
52
52
|
stream (bool, optional): If True, partial message deltas will be sent
|
|
53
53
|
as data-only server-sent events as they become available.
|
|
54
|
-
(default: :obj:`
|
|
54
|
+
(default: :obj:`None`)
|
|
55
55
|
stop (str or list, optional): Up to :obj:`4` sequences where the API
|
|
56
56
|
will stop generating further tokens. (default: :obj:`None`)
|
|
57
57
|
max_tokens (int, optional): The maximum number of tokens to generate
|
|
@@ -62,12 +62,12 @@ class ChatGPTConfig(BaseConfig):
|
|
|
62
62
|
:obj:`2.0`. Positive values penalize new tokens based on whether
|
|
63
63
|
they appear in the text so far, increasing the model's likelihood
|
|
64
64
|
to talk about new topics. See more information about frequency and
|
|
65
|
-
presence penalties. (default: :obj:`
|
|
65
|
+
presence penalties. (default: :obj:`None`)
|
|
66
66
|
frequency_penalty (float, optional): Number between :obj:`-2.0` and
|
|
67
67
|
:obj:`2.0`. Positive values penalize new tokens based on their
|
|
68
68
|
existing frequency in the text so far, decreasing the model's
|
|
69
69
|
likelihood to repeat the same line verbatim. See more information
|
|
70
|
-
about frequency and presence penalties. (default: :obj:`
|
|
70
|
+
about frequency and presence penalties. (default: :obj:`None`)
|
|
71
71
|
logit_bias (dict, optional): Modify the likelihood of specified tokens
|
|
72
72
|
appearing in the completion. Accepts a json object that maps tokens
|
|
73
73
|
(specified by their token ID in the tokenizer) to an associated
|
|
@@ -76,7 +76,7 @@ class ChatGPTConfig(BaseConfig):
|
|
|
76
76
|
The exact effect will vary per model, but values between:obj:` -1`
|
|
77
77
|
and :obj:`1` should decrease or increase likelihood of selection;
|
|
78
78
|
values like :obj:`-100` or :obj:`100` should result in a ban or
|
|
79
|
-
exclusive selection of the relevant token. (default: :obj:`
|
|
79
|
+
exclusive selection of the relevant token. (default: :obj:`None`)
|
|
80
80
|
user (str, optional): A unique identifier representing your end-user,
|
|
81
81
|
which can help OpenAI to monitor and detect abuse.
|
|
82
82
|
(default: :obj:`""`)
|
|
@@ -101,21 +101,25 @@ class ChatGPTConfig(BaseConfig):
|
|
|
101
101
|
:obj:`o1mini`, :obj:`o1preview`, :obj:`o3mini`). If not provided
|
|
102
102
|
or if the model type does not support it, this parameter is
|
|
103
103
|
ignored. (default: :obj:`None`)
|
|
104
|
+
parallel_tool_calls (bool, optional): A parameter specifying whether
|
|
105
|
+
the model should call tools in parallel or not. (default:
|
|
106
|
+
:obj:`None`)
|
|
104
107
|
"""
|
|
105
108
|
|
|
106
|
-
temperature: float =
|
|
107
|
-
top_p: float =
|
|
108
|
-
n: int =
|
|
109
|
-
stream: bool =
|
|
109
|
+
temperature: Optional[float] = None
|
|
110
|
+
top_p: Optional[float] = None
|
|
111
|
+
n: Optional[int] = None
|
|
112
|
+
stream: Optional[bool] = None
|
|
110
113
|
stop: Optional[Union[str, Sequence[str]]] = None
|
|
111
114
|
max_tokens: Optional[int] = None
|
|
112
|
-
presence_penalty: float =
|
|
115
|
+
presence_penalty: Optional[float] = None
|
|
113
116
|
response_format: Optional[Union[Type[BaseModel], Dict]] = None
|
|
114
|
-
frequency_penalty: float =
|
|
115
|
-
logit_bias: Dict =
|
|
116
|
-
user: str =
|
|
117
|
+
frequency_penalty: Optional[float] = None
|
|
118
|
+
logit_bias: Optional[Dict] = None
|
|
119
|
+
user: Optional[str] = None
|
|
117
120
|
tool_choice: Optional[Union[Dict[str, str], str]] = None
|
|
118
121
|
reasoning_effort: Optional[str] = None
|
|
122
|
+
parallel_tool_calls: Optional[bool] = None
|
|
119
123
|
|
|
120
124
|
|
|
121
125
|
OPENAI_API_PARAMS = {param for param in ChatGPTConfig.model_fields.keys()}
|