camel-ai 0.2.65__py3-none-any.whl → 0.2.82__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +3 -3
- camel/agents/__init__.py +2 -2
- camel/agents/_types.py +9 -4
- camel/agents/_utils.py +40 -2
- camel/agents/base.py +2 -2
- camel/agents/chat_agent.py +4835 -947
- camel/agents/critic_agent.py +2 -2
- camel/agents/deductive_reasoner_agent.py +56 -56
- camel/agents/embodied_agent.py +2 -2
- camel/agents/knowledge_graph_agent.py +20 -20
- camel/agents/mcp_agent.py +35 -36
- camel/agents/multi_hop_generator_agent.py +3 -3
- camel/agents/programmed_agent_instruction.py +2 -2
- camel/agents/repo_agent.py +4 -3
- camel/agents/role_assignment_agent.py +2 -2
- camel/agents/search_agent.py +2 -2
- camel/agents/task_agent.py +2 -2
- camel/agents/tool_agents/__init__.py +2 -2
- camel/agents/tool_agents/base.py +2 -2
- camel/agents/tool_agents/hugging_face_tool_agent.py +3 -3
- camel/benchmarks/__init__.py +2 -2
- camel/benchmarks/apibank.py +5 -5
- camel/benchmarks/apibench.py +2 -2
- camel/benchmarks/base.py +2 -2
- camel/benchmarks/browsecomp.py +44 -33
- camel/benchmarks/gaia.py +17 -13
- camel/benchmarks/mock_website/README.md +1 -3
- camel/benchmarks/mock_website/mock_web.py +2 -2
- camel/benchmarks/mock_website/requirements.txt +1 -1
- camel/benchmarks/mock_website/shopping_mall/app.py +2 -2
- camel/benchmarks/mock_website/task.json +1 -1
- camel/benchmarks/nexus.py +3 -3
- camel/benchmarks/ragbench.py +2 -2
- camel/bots/__init__.py +2 -2
- camel/bots/discord/__init__.py +2 -2
- camel/bots/discord/discord_app.py +2 -2
- camel/bots/discord/discord_installation.py +2 -2
- camel/bots/discord/discord_store.py +3 -3
- camel/bots/slack/__init__.py +2 -2
- camel/bots/slack/models.py +4 -4
- camel/bots/slack/slack_app.py +2 -2
- camel/bots/telegram_bot.py +2 -2
- camel/configs/__init__.py +23 -2
- camel/configs/aihubmix_config.py +90 -0
- camel/configs/aiml_config.py +2 -2
- camel/configs/amd_config.py +70 -0
- camel/configs/anthropic_config.py +2 -2
- camel/configs/base_config.py +2 -2
- camel/configs/bedrock_config.py +5 -3
- camel/configs/cerebras_config.py +98 -0
- camel/configs/cohere_config.py +2 -2
- camel/configs/cometapi_config.py +106 -0
- camel/configs/crynux_config.py +2 -2
- camel/configs/deepseek_config.py +9 -8
- camel/configs/gemini_config.py +6 -4
- camel/configs/groq_config.py +6 -4
- camel/configs/internlm_config.py +6 -4
- camel/configs/litellm_config.py +2 -2
- camel/configs/lmstudio_config.py +6 -4
- camel/configs/minimax_config.py +95 -0
- camel/configs/mistral_config.py +2 -2
- camel/configs/modelscope_config.py +5 -3
- camel/configs/moonshot_config.py +2 -2
- camel/configs/nebius_config.py +105 -0
- camel/configs/netmind_config.py +2 -2
- camel/configs/novita_config.py +2 -2
- camel/configs/nvidia_config.py +2 -2
- camel/configs/ollama_config.py +2 -2
- camel/configs/openai_config.py +5 -3
- camel/configs/openrouter_config.py +6 -4
- camel/configs/ppio_config.py +2 -2
- camel/configs/qianfan_config.py +85 -0
- camel/configs/qwen_config.py +2 -2
- camel/configs/reka_config.py +2 -2
- camel/configs/samba_config.py +6 -4
- camel/configs/sglang_config.py +2 -2
- camel/configs/siliconflow_config.py +2 -2
- camel/configs/togetherai_config.py +2 -2
- camel/configs/vllm_config.py +4 -2
- camel/configs/watsonx_config.py +2 -2
- camel/configs/yi_config.py +6 -4
- camel/configs/zhipuai_config.py +6 -4
- camel/data_collectors/__init__.py +2 -2
- camel/data_collectors/alpaca_collector.py +18 -9
- camel/data_collectors/base.py +2 -2
- camel/data_collectors/sharegpt_collector.py +2 -2
- camel/datagen/__init__.py +2 -2
- camel/datagen/cot_datagen.py +3 -3
- camel/datagen/evol_instruct/__init__.py +2 -2
- camel/datagen/evol_instruct/evol_instruct.py +2 -2
- camel/datagen/evol_instruct/scorer.py +12 -12
- camel/datagen/evol_instruct/templates.py +16 -16
- camel/datagen/self_improving_cot.py +5 -5
- camel/datagen/self_instruct/__init__.py +2 -2
- camel/datagen/self_instruct/filter/__init__.py +2 -2
- camel/datagen/self_instruct/filter/filter_function.py +2 -2
- camel/datagen/self_instruct/filter/filter_registry.py +2 -2
- camel/datagen/self_instruct/filter/instruction_filter.py +2 -2
- camel/datagen/self_instruct/self_instruct.py +2 -2
- camel/datagen/self_instruct/templates.py +47 -47
- camel/datagen/source2synth/__init__.py +2 -2
- camel/datagen/source2synth/data_processor.py +2 -2
- camel/datagen/source2synth/models.py +2 -2
- camel/datagen/source2synth/user_data_processor_config.py +2 -2
- camel/datahubs/__init__.py +2 -2
- camel/datahubs/base.py +2 -2
- camel/datahubs/huggingface.py +2 -2
- camel/datahubs/models.py +2 -2
- camel/datasets/__init__.py +2 -2
- camel/datasets/base_generator.py +41 -12
- camel/datasets/few_shot_generator.py +18 -18
- camel/datasets/models.py +2 -2
- camel/datasets/self_instruct_generator.py +2 -2
- camel/datasets/static_dataset.py +2 -2
- camel/embeddings/__init__.py +2 -2
- camel/embeddings/azure_embedding.py +2 -2
- camel/embeddings/base.py +2 -2
- camel/embeddings/gemini_embedding.py +2 -2
- camel/embeddings/jina_embedding.py +2 -2
- camel/embeddings/mistral_embedding.py +2 -2
- camel/embeddings/openai_compatible_embedding.py +2 -2
- camel/embeddings/openai_embedding.py +2 -2
- camel/embeddings/sentence_transformers_embeddings.py +2 -2
- camel/embeddings/together_embedding.py +2 -2
- camel/embeddings/vlm_embedding.py +2 -2
- camel/environments/__init__.py +14 -2
- camel/environments/models.py +2 -2
- camel/environments/multi_step.py +2 -2
- camel/environments/rlcards_env.py +860 -0
- camel/environments/single_step.py +30 -5
- camel/environments/tic_tac_toe.py +3 -3
- camel/extractors/__init__.py +2 -2
- camel/extractors/base.py +2 -2
- camel/extractors/python_strategies.py +2 -2
- camel/generators.py +2 -2
- camel/human.py +2 -2
- camel/interpreters/__init__.py +4 -2
- camel/interpreters/base.py +2 -2
- camel/interpreters/docker/Dockerfile +14 -24
- camel/interpreters/docker_interpreter.py +5 -4
- camel/interpreters/e2b_interpreter.py +36 -3
- camel/interpreters/internal_python_interpreter.py +53 -4
- camel/interpreters/interpreter_error.py +2 -2
- camel/interpreters/ipython_interpreter.py +2 -2
- camel/interpreters/microsandbox_interpreter.py +395 -0
- camel/interpreters/subprocess_interpreter.py +2 -2
- camel/loaders/__init__.py +13 -4
- camel/loaders/apify_reader.py +2 -2
- camel/loaders/base_io.py +2 -2
- camel/loaders/base_loader.py +85 -0
- camel/loaders/chunkr_reader.py +11 -2
- camel/loaders/crawl4ai_reader.py +2 -2
- camel/loaders/firecrawl_reader.py +6 -6
- camel/loaders/jina_url_reader.py +2 -2
- camel/loaders/markitdown.py +2 -2
- camel/loaders/mineru_extractor.py +2 -2
- camel/loaders/mistral_reader.py +2 -2
- camel/loaders/scrapegraph_reader.py +2 -2
- camel/loaders/unstructured_io.py +2 -2
- camel/logger.py +5 -5
- camel/memories/__init__.py +2 -2
- camel/memories/agent_memories.py +86 -3
- camel/memories/base.py +36 -2
- camel/memories/blocks/__init__.py +2 -2
- camel/memories/blocks/chat_history_block.py +125 -7
- camel/memories/blocks/vectordb_block.py +10 -3
- camel/memories/context_creators/__init__.py +2 -2
- camel/memories/context_creators/score_based.py +31 -239
- camel/memories/records.py +90 -10
- camel/messages/__init__.py +2 -2
- camel/messages/base.py +178 -43
- camel/messages/conversion/__init__.py +2 -2
- camel/messages/conversion/alpaca.py +2 -2
- camel/messages/conversion/conversation_models.py +2 -2
- camel/messages/conversion/sharegpt/__init__.py +2 -2
- camel/messages/conversion/sharegpt/function_call_formatter.py +2 -2
- camel/messages/conversion/sharegpt/hermes/__init__.py +2 -2
- camel/messages/conversion/sharegpt/hermes/hermes_function_formatter.py +2 -2
- camel/messages/func_message.py +54 -17
- camel/models/__init__.py +16 -2
- camel/models/_utils.py +3 -3
- camel/models/aihubmix_model.py +83 -0
- camel/models/aiml_model.py +11 -18
- camel/models/amd_model.py +101 -0
- camel/models/anthropic_model.py +127 -20
- camel/models/aws_bedrock_model.py +12 -35
- camel/models/azure_openai_model.py +212 -89
- camel/models/base_audio_model.py +5 -3
- camel/models/base_model.py +195 -26
- camel/models/cerebras_model.py +83 -0
- camel/models/cohere_model.py +16 -21
- camel/models/cometapi_model.py +83 -0
- camel/models/crynux_model.py +11 -18
- camel/models/deepseek_model.py +18 -58
- camel/models/fish_audio_model.py +8 -2
- camel/models/gemini_model.py +389 -26
- camel/models/groq_model.py +11 -19
- camel/models/internlm_model.py +11 -18
- camel/models/litellm_model.py +56 -34
- camel/models/lmstudio_model.py +17 -20
- camel/models/minimax_model.py +83 -0
- camel/models/mistral_model.py +18 -19
- camel/models/model_factory.py +37 -3
- camel/models/model_manager.py +26 -8
- camel/models/modelscope_model.py +13 -193
- camel/models/moonshot_model.py +195 -21
- camel/models/nebius_model.py +83 -0
- camel/models/nemotron_model.py +19 -9
- camel/models/netmind_model.py +11 -18
- camel/models/novita_model.py +11 -18
- camel/models/nvidia_model.py +11 -18
- camel/models/ollama_model.py +14 -21
- camel/models/openai_audio_models.py +2 -2
- camel/models/openai_compatible_model.py +188 -45
- camel/models/openai_model.py +216 -71
- camel/models/openrouter_model.py +11 -19
- camel/models/ppio_model.py +11 -18
- camel/models/qianfan_model.py +89 -0
- camel/models/qwen_model.py +13 -193
- camel/models/reka_model.py +21 -21
- camel/models/reward/__init__.py +2 -2
- camel/models/reward/base_reward_model.py +2 -2
- camel/models/reward/evaluator.py +2 -2
- camel/models/reward/nemotron_model.py +2 -2
- camel/models/reward/skywork_model.py +2 -2
- camel/models/samba_model.py +48 -47
- camel/models/sglang_model.py +88 -40
- camel/models/siliconflow_model.py +12 -35
- camel/models/stub_model.py +10 -7
- camel/models/togetherai_model.py +11 -18
- camel/models/vllm_model.py +10 -18
- camel/models/volcano_model.py +16 -20
- camel/models/watsonx_model.py +7 -19
- camel/models/yi_model.py +11 -18
- camel/models/zhipuai_model.py +70 -18
- camel/parsers/__init__.py +18 -0
- camel/parsers/mcp_tool_call_parser.py +176 -0
- camel/personas/__init__.py +2 -2
- camel/personas/persona.py +2 -2
- camel/personas/persona_hub.py +2 -2
- camel/prompts/__init__.py +2 -2
- camel/prompts/ai_society.py +2 -2
- camel/prompts/base.py +2 -2
- camel/prompts/code.py +2 -2
- camel/prompts/evaluation.py +2 -2
- camel/prompts/generate_text_embedding_data.py +2 -2
- camel/prompts/image_craft.py +2 -2
- camel/prompts/misalignment.py +2 -2
- camel/prompts/multi_condition_image_craft.py +2 -2
- camel/prompts/object_recognition.py +2 -2
- camel/prompts/persona_hub.py +3 -3
- camel/prompts/prompt_templates.py +2 -2
- camel/prompts/role_description_prompt_template.py +2 -2
- camel/prompts/solution_extraction.py +8 -8
- camel/prompts/task_prompt_template.py +2 -2
- camel/prompts/translation.py +2 -2
- camel/prompts/video_description_prompt.py +3 -3
- camel/responses/__init__.py +2 -2
- camel/responses/agent_responses.py +2 -2
- camel/retrievers/__init__.py +2 -2
- camel/retrievers/auto_retriever.py +3 -2
- camel/retrievers/base.py +2 -2
- camel/retrievers/bm25_retriever.py +2 -2
- camel/retrievers/cohere_rerank_retriever.py +2 -2
- camel/retrievers/hybrid_retrival.py +2 -2
- camel/retrievers/vector_retriever.py +2 -2
- camel/runtimes/Dockerfile.multi-toolkit +90 -0
- camel/runtimes/__init__.py +2 -2
- camel/runtimes/api.py +79 -23
- camel/runtimes/base.py +2 -2
- camel/runtimes/configs.py +13 -13
- camel/runtimes/daytona_runtime.py +17 -18
- camel/runtimes/docker_runtime.py +12 -12
- camel/runtimes/llm_guard_runtime.py +26 -26
- camel/runtimes/remote_http_runtime.py +11 -11
- camel/runtimes/ubuntu_docker_runtime.py +2 -2
- camel/runtimes/utils/__init__.py +2 -2
- camel/runtimes/utils/function_risk_toolkit.py +2 -2
- camel/runtimes/utils/ignore_risk_toolkit.py +2 -2
- camel/schemas/__init__.py +2 -2
- camel/schemas/base.py +2 -2
- camel/schemas/openai_converter.py +3 -3
- camel/schemas/outlines_converter.py +2 -2
- camel/services/agent_openapi_server.py +380 -0
- camel/societies/__init__.py +4 -2
- camel/societies/babyagi_playing.py +2 -2
- camel/societies/role_playing.py +201 -80
- camel/societies/workforce/__init__.py +10 -3
- camel/societies/workforce/base.py +2 -2
- camel/societies/workforce/events.py +143 -0
- camel/societies/workforce/prompts.py +258 -33
- camel/societies/workforce/role_playing_worker.py +88 -31
- camel/societies/workforce/single_agent_worker.py +638 -40
- camel/societies/workforce/structured_output_handler.py +512 -0
- camel/societies/workforce/task_channel.py +182 -38
- camel/societies/workforce/utils.py +780 -65
- camel/societies/workforce/worker.py +92 -26
- camel/societies/workforce/workflow_memory_manager.py +1746 -0
- camel/societies/workforce/workforce.py +5276 -355
- camel/societies/workforce/workforce_callback.py +103 -0
- camel/societies/workforce/workforce_logger.py +647 -0
- camel/societies/workforce/workforce_metrics.py +33 -0
- camel/storages/__init__.py +6 -2
- camel/storages/graph_storages/__init__.py +2 -2
- camel/storages/graph_storages/base.py +2 -2
- camel/storages/graph_storages/graph_element.py +2 -2
- camel/storages/graph_storages/nebula_graph.py +4 -4
- camel/storages/graph_storages/neo4j_graph.py +7 -7
- camel/storages/key_value_storages/__init__.py +2 -2
- camel/storages/key_value_storages/base.py +2 -2
- camel/storages/key_value_storages/in_memory.py +2 -2
- camel/storages/key_value_storages/json.py +17 -4
- camel/storages/key_value_storages/mem0_cloud.py +50 -49
- camel/storages/key_value_storages/redis.py +2 -2
- camel/storages/object_storages/__init__.py +2 -2
- camel/storages/object_storages/amazon_s3.py +2 -2
- camel/storages/object_storages/azure_blob.py +2 -2
- camel/storages/object_storages/base.py +2 -2
- camel/storages/object_storages/google_cloud.py +3 -3
- camel/storages/vectordb_storages/__init__.py +8 -2
- camel/storages/vectordb_storages/base.py +2 -2
- camel/storages/vectordb_storages/chroma.py +731 -0
- camel/storages/vectordb_storages/faiss.py +2 -2
- camel/storages/vectordb_storages/milvus.py +2 -2
- camel/storages/vectordb_storages/oceanbase.py +15 -15
- camel/storages/vectordb_storages/pgvector.py +349 -0
- camel/storages/vectordb_storages/qdrant.py +6 -6
- camel/storages/vectordb_storages/surreal.py +372 -0
- camel/storages/vectordb_storages/tidb.py +11 -8
- camel/storages/vectordb_storages/weaviate.py +2 -2
- camel/tasks/__init__.py +2 -2
- camel/tasks/task.py +348 -26
- camel/tasks/task_prompt.py +3 -3
- camel/terminators/__init__.py +2 -2
- camel/terminators/base.py +2 -2
- camel/terminators/response_terminator.py +2 -2
- camel/terminators/token_limit_terminator.py +2 -2
- camel/toolkits/__init__.py +54 -10
- camel/toolkits/aci_toolkit.py +66 -21
- camel/toolkits/arxiv_toolkit.py +8 -8
- camel/toolkits/ask_news_toolkit.py +2 -2
- camel/toolkits/async_browser_toolkit.py +4 -4
- camel/toolkits/audio_analysis_toolkit.py +3 -3
- camel/toolkits/base.py +65 -7
- camel/toolkits/bohrium_toolkit.py +2 -2
- camel/toolkits/browser_toolkit.py +34 -21
- camel/toolkits/browser_toolkit_commons.py +4 -4
- camel/toolkits/code_execution.py +31 -4
- camel/toolkits/context_summarizer_toolkit.py +684 -0
- camel/toolkits/craw4ai_toolkit.py +93 -0
- camel/toolkits/dappier_toolkit.py +12 -8
- camel/toolkits/data_commons_toolkit.py +2 -2
- camel/toolkits/dingtalk.py +1135 -0
- camel/toolkits/earth_science_toolkit.py +5367 -0
- camel/toolkits/edgeone_pages_mcp_toolkit.py +49 -0
- camel/toolkits/excel_toolkit.py +905 -71
- camel/toolkits/file_toolkit.py +1402 -0
- camel/toolkits/function_tool.py +126 -18
- camel/toolkits/github_toolkit.py +109 -22
- camel/toolkits/gmail_toolkit.py +1839 -0
- camel/toolkits/google_calendar_toolkit.py +40 -6
- camel/toolkits/google_drive_mcp_toolkit.py +54 -0
- camel/toolkits/google_maps_toolkit.py +2 -2
- camel/toolkits/google_scholar_toolkit.py +2 -2
- camel/toolkits/human_toolkit.py +36 -12
- camel/toolkits/hybrid_browser_toolkit/__init__.py +18 -0
- camel/toolkits/hybrid_browser_toolkit/config_loader.py +185 -0
- camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit.py +246 -0
- camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit_ts.py +1973 -0
- camel/toolkits/hybrid_browser_toolkit/installer.py +203 -0
- camel/toolkits/hybrid_browser_toolkit/ts/package-lock.json +4589 -0
- camel/toolkits/hybrid_browser_toolkit/ts/package.json +33 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/browser-scripts.js +125 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/browser-session.ts +1929 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/config-loader.ts +233 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/hybrid-browser-toolkit.ts +589 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/index.ts +7 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/parent-child-filter.ts +226 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/snapshot-parser.ts +219 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/som-screenshot-injected.ts +543 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/types.ts +129 -0
- camel/toolkits/hybrid_browser_toolkit/ts/tsconfig.json +27 -0
- camel/toolkits/hybrid_browser_toolkit/ts/websocket-server.js +319 -0
- camel/toolkits/hybrid_browser_toolkit/ws_wrapper.py +1037 -0
- camel/toolkits/hybrid_browser_toolkit_py/__init__.py +17 -0
- camel/toolkits/hybrid_browser_toolkit_py/actions.py +575 -0
- camel/toolkits/hybrid_browser_toolkit_py/agent.py +311 -0
- camel/toolkits/hybrid_browser_toolkit_py/browser_session.py +787 -0
- camel/toolkits/hybrid_browser_toolkit_py/config_loader.py +490 -0
- camel/toolkits/hybrid_browser_toolkit_py/hybrid_browser_toolkit.py +2390 -0
- camel/toolkits/hybrid_browser_toolkit_py/snapshot.py +233 -0
- camel/toolkits/hybrid_browser_toolkit_py/stealth_script.js +0 -0
- camel/toolkits/hybrid_browser_toolkit_py/unified_analyzer.js +1043 -0
- camel/toolkits/image_analysis_toolkit.py +3 -6
- camel/toolkits/image_generation_toolkit.py +390 -0
- camel/toolkits/jina_reranker_toolkit.py +5 -6
- camel/toolkits/klavis_toolkit.py +7 -3
- camel/toolkits/linkedin_toolkit.py +2 -2
- camel/toolkits/markitdown_toolkit.py +104 -0
- camel/toolkits/math_toolkit.py +66 -12
- camel/toolkits/mcp_toolkit.py +412 -36
- camel/toolkits/memory_toolkit.py +7 -3
- camel/toolkits/meshy_toolkit.py +2 -2
- camel/toolkits/message_agent_toolkit.py +608 -0
- camel/toolkits/message_integration.py +724 -0
- camel/toolkits/mineru_toolkit.py +2 -2
- camel/toolkits/minimax_mcp_toolkit.py +195 -0
- camel/toolkits/networkx_toolkit.py +2 -2
- camel/toolkits/note_taking_toolkit.py +277 -0
- camel/toolkits/notion_mcp_toolkit.py +224 -0
- camel/toolkits/notion_toolkit.py +2 -2
- camel/toolkits/open_api_specs/biztoc/__init__.py +2 -2
- camel/toolkits/open_api_specs/biztoc/ai-plugin.json +1 -1
- camel/toolkits/open_api_specs/coursera/__init__.py +2 -2
- camel/toolkits/open_api_specs/create_qr_code/__init__.py +2 -2
- camel/toolkits/open_api_specs/klarna/__init__.py +2 -2
- camel/toolkits/open_api_specs/nasa_apod/__init__.py +2 -2
- camel/toolkits/open_api_specs/outschool/__init__.py +2 -2
- camel/toolkits/open_api_specs/outschool/ai-plugin.json +1 -1
- camel/toolkits/open_api_specs/outschool/openapi.yaml +1 -1
- camel/toolkits/open_api_specs/outschool/paths/__init__.py +2 -2
- camel/toolkits/open_api_specs/outschool/paths/get_classes.py +2 -2
- camel/toolkits/open_api_specs/outschool/paths/search_teachers.py +2 -2
- camel/toolkits/open_api_specs/security_config.py +2 -2
- camel/toolkits/open_api_specs/speak/__init__.py +2 -2
- camel/toolkits/open_api_specs/web_scraper/__init__.py +2 -2
- camel/toolkits/open_api_specs/web_scraper/ai-plugin.json +1 -1
- camel/toolkits/open_api_specs/web_scraper/paths/__init__.py +2 -2
- camel/toolkits/open_api_specs/web_scraper/paths/scraper.py +2 -2
- camel/toolkits/open_api_toolkit.py +2 -2
- camel/toolkits/openbb_toolkit.py +7 -3
- camel/toolkits/origene_mcp_toolkit.py +56 -0
- camel/toolkits/page_script.js +53 -53
- camel/toolkits/playwright_mcp_toolkit.py +13 -31
- camel/toolkits/pptx_toolkit.py +36 -23
- camel/toolkits/pubmed_toolkit.py +2 -2
- camel/toolkits/pulse_mcp_search_toolkit.py +2 -2
- camel/toolkits/pyautogui_toolkit.py +2 -2
- camel/toolkits/reddit_toolkit.py +2 -2
- camel/toolkits/resend_toolkit.py +168 -0
- camel/toolkits/retrieval_toolkit.py +2 -2
- camel/toolkits/screenshot_toolkit.py +213 -0
- camel/toolkits/search_toolkit.py +539 -146
- camel/toolkits/searxng_toolkit.py +2 -2
- camel/toolkits/semantic_scholar_toolkit.py +2 -2
- camel/toolkits/slack_toolkit.py +108 -58
- camel/toolkits/sql_toolkit.py +712 -0
- camel/toolkits/stripe_toolkit.py +2 -2
- camel/toolkits/sympy_toolkit.py +3 -3
- camel/toolkits/task_planning_toolkit.py +5 -5
- camel/toolkits/terminal_toolkit/__init__.py +18 -0
- camel/toolkits/terminal_toolkit/terminal_toolkit.py +1070 -0
- camel/toolkits/terminal_toolkit/utils.py +532 -0
- camel/toolkits/thinking_toolkit.py +3 -3
- camel/toolkits/twitter_toolkit.py +2 -2
- camel/toolkits/vertex_ai_veo_toolkit.py +590 -0
- camel/toolkits/video_analysis_toolkit.py +109 -29
- camel/toolkits/video_download_toolkit.py +19 -16
- camel/toolkits/weather_toolkit.py +2 -2
- camel/toolkits/web_deploy_toolkit.py +1219 -0
- camel/toolkits/wechat_official_toolkit.py +483 -0
- camel/toolkits/whatsapp_toolkit.py +2 -2
- camel/toolkits/wolfram_alpha_toolkit.py +2 -2
- camel/toolkits/zapier_toolkit.py +7 -3
- camel/types/__init__.py +4 -4
- camel/types/agents/__init__.py +2 -2
- camel/types/agents/tool_calling_record.py +6 -3
- camel/types/enums.py +378 -39
- camel/types/mcp_registries.py +2 -2
- camel/types/openai_types.py +4 -4
- camel/types/unified_model_type.py +38 -6
- camel/utils/__init__.py +2 -2
- camel/utils/async_func.py +2 -2
- camel/utils/chunker/__init__.py +2 -2
- camel/utils/chunker/base.py +2 -2
- camel/utils/chunker/code_chunker.py +2 -2
- camel/utils/chunker/uio_chunker.py +2 -2
- camel/utils/commons.py +38 -7
- camel/utils/constants.py +5 -2
- camel/utils/context_utils.py +1134 -0
- camel/utils/deduplication.py +2 -2
- camel/utils/filename.py +2 -2
- camel/utils/langfuse.py +2 -2
- camel/utils/mcp.py +140 -6
- camel/utils/mcp_client.py +48 -38
- camel/utils/message_summarizer.py +148 -0
- camel/utils/response_format.py +2 -2
- camel/utils/token_counting.py +45 -22
- camel/utils/tool_result.py +44 -0
- camel/verifiers/__init__.py +2 -2
- camel/verifiers/base.py +2 -2
- camel/verifiers/math_verifier.py +2 -2
- camel/verifiers/models.py +2 -2
- camel/verifiers/physics_verifier.py +2 -2
- camel/verifiers/python_verifier.py +2 -2
- {camel_ai-0.2.65.dist-info → camel_ai-0.2.82.dist-info}/METADATA +327 -94
- camel_ai-0.2.82.dist-info/RECORD +507 -0
- {camel_ai-0.2.65.dist-info → camel_ai-0.2.82.dist-info}/WHEEL +1 -1
- {camel_ai-0.2.65.dist-info → camel_ai-0.2.82.dist-info}/licenses/LICENSE +1 -1
- camel/loaders/pandas_reader.py +0 -368
- camel/toolkits/dalle_toolkit.py +0 -175
- camel/toolkits/file_write_toolkit.py +0 -444
- camel/toolkits/openai_agent_toolkit.py +0 -135
- camel/toolkits/terminal_toolkit.py +0 -1037
- camel_ai-0.2.65.dist-info/RECORD +0 -426
camel/models/deepseek_model.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
# ========= Copyright 2023-
|
|
1
|
+
# ========= Copyright 2023-2025 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
2
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
3
|
# you may not use this file except in compliance with the License.
|
|
4
4
|
# You may obtain a copy of the License at
|
|
@@ -10,7 +10,7 @@
|
|
|
10
10
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
11
|
# See the License for the specific language governing permissions and
|
|
12
12
|
# limitations under the License.
|
|
13
|
-
# ========= Copyright 2023-
|
|
13
|
+
# ========= Copyright 2023-2025 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
14
|
|
|
15
15
|
import os
|
|
16
16
|
from typing import Any, Dict, List, Optional, Type, Union
|
|
@@ -18,7 +18,7 @@ from typing import Any, Dict, List, Optional, Type, Union
|
|
|
18
18
|
from openai import AsyncStream, Stream
|
|
19
19
|
from pydantic import BaseModel
|
|
20
20
|
|
|
21
|
-
from camel.configs import
|
|
21
|
+
from camel.configs import DeepSeekConfig
|
|
22
22
|
from camel.logger import get_logger
|
|
23
23
|
from camel.messages import OpenAIMessage
|
|
24
24
|
from camel.models._utils import try_modify_message_with_format
|
|
@@ -40,6 +40,11 @@ if os.environ.get("LANGFUSE_ENABLED", "False").lower() == "true":
|
|
|
40
40
|
from langfuse.decorators import observe
|
|
41
41
|
except ImportError:
|
|
42
42
|
from camel.utils import observe
|
|
43
|
+
elif os.environ.get("TRACEROOT_ENABLED", "False").lower() == "true":
|
|
44
|
+
try:
|
|
45
|
+
from traceroot import trace as observe # type: ignore[import]
|
|
46
|
+
except ImportError:
|
|
47
|
+
from camel.utils import observe
|
|
43
48
|
else:
|
|
44
49
|
from camel.utils import observe
|
|
45
50
|
|
|
@@ -78,6 +83,10 @@ class DeepSeekModel(OpenAICompatibleModel):
|
|
|
78
83
|
API calls. If not provided, will fall back to the MODEL_TIMEOUT
|
|
79
84
|
environment variable or default to 180 seconds.
|
|
80
85
|
(default: :obj:`None`)
|
|
86
|
+
max_retries (int, optional): Maximum number of retries for API calls.
|
|
87
|
+
(default: :obj:`3`)
|
|
88
|
+
**kwargs (Any): Additional arguments to pass to the client
|
|
89
|
+
initialization.
|
|
81
90
|
|
|
82
91
|
References:
|
|
83
92
|
https://api-docs.deepseek.com/
|
|
@@ -96,6 +105,8 @@ class DeepSeekModel(OpenAICompatibleModel):
|
|
|
96
105
|
url: Optional[str] = None,
|
|
97
106
|
token_counter: Optional[BaseTokenCounter] = None,
|
|
98
107
|
timeout: Optional[float] = None,
|
|
108
|
+
max_retries: int = 3,
|
|
109
|
+
**kwargs: Any,
|
|
99
110
|
) -> None:
|
|
100
111
|
if model_config_dict is None:
|
|
101
112
|
model_config_dict = DeepSeekConfig().as_dict()
|
|
@@ -112,6 +123,8 @@ class DeepSeekModel(OpenAICompatibleModel):
|
|
|
112
123
|
url=url,
|
|
113
124
|
token_counter=token_counter,
|
|
114
125
|
timeout=timeout,
|
|
126
|
+
max_retries=max_retries,
|
|
127
|
+
**kwargs,
|
|
115
128
|
)
|
|
116
129
|
|
|
117
130
|
def _prepare_request(
|
|
@@ -152,44 +165,6 @@ class DeepSeekModel(OpenAICompatibleModel):
|
|
|
152
165
|
|
|
153
166
|
return request_config
|
|
154
167
|
|
|
155
|
-
def _post_handle_response(
|
|
156
|
-
self, response: ChatCompletion
|
|
157
|
-
) -> ChatCompletion:
|
|
158
|
-
r"""Handle reasoning content with <think> tags at the beginning."""
|
|
159
|
-
if (
|
|
160
|
-
self.model_type in [ModelType.DEEPSEEK_REASONER]
|
|
161
|
-
and os.environ.get("GET_REASONING_CONTENT", "false").lower()
|
|
162
|
-
== "true"
|
|
163
|
-
):
|
|
164
|
-
reasoning_content = response.choices[0].message.reasoning_content # type: ignore[attr-defined]
|
|
165
|
-
combined_content = ( # type: ignore[operator]
|
|
166
|
-
f"<think>\n{reasoning_content}\n</think>\n"
|
|
167
|
-
if reasoning_content
|
|
168
|
-
else ""
|
|
169
|
-
) + response.choices[0].message.content
|
|
170
|
-
|
|
171
|
-
response = ChatCompletion.construct(
|
|
172
|
-
id=response.id,
|
|
173
|
-
choices=[
|
|
174
|
-
dict(
|
|
175
|
-
index=response.choices[0].index,
|
|
176
|
-
message={
|
|
177
|
-
"role": response.choices[0].message.role,
|
|
178
|
-
"content": combined_content,
|
|
179
|
-
"tool_calls": None,
|
|
180
|
-
},
|
|
181
|
-
finish_reason=response.choices[0].finish_reason
|
|
182
|
-
if response.choices[0].finish_reason
|
|
183
|
-
else None,
|
|
184
|
-
)
|
|
185
|
-
],
|
|
186
|
-
created=response.created,
|
|
187
|
-
model=response.model,
|
|
188
|
-
object="chat.completion",
|
|
189
|
-
usage=response.usage,
|
|
190
|
-
)
|
|
191
|
-
return response
|
|
192
|
-
|
|
193
168
|
@observe()
|
|
194
169
|
def _run(
|
|
195
170
|
self,
|
|
@@ -231,7 +206,7 @@ class DeepSeekModel(OpenAICompatibleModel):
|
|
|
231
206
|
**request_config,
|
|
232
207
|
)
|
|
233
208
|
|
|
234
|
-
return
|
|
209
|
+
return response
|
|
235
210
|
|
|
236
211
|
@observe()
|
|
237
212
|
async def _arun(
|
|
@@ -273,19 +248,4 @@ class DeepSeekModel(OpenAICompatibleModel):
|
|
|
273
248
|
**request_config,
|
|
274
249
|
)
|
|
275
250
|
|
|
276
|
-
return
|
|
277
|
-
|
|
278
|
-
def check_model_config(self):
|
|
279
|
-
r"""Check whether the model configuration contains any
|
|
280
|
-
unexpected arguments to DeepSeek API.
|
|
281
|
-
|
|
282
|
-
Raises:
|
|
283
|
-
ValueError: If the model configuration dictionary contains any
|
|
284
|
-
unexpected arguments to DeepSeek API.
|
|
285
|
-
"""
|
|
286
|
-
for param in self.model_config_dict:
|
|
287
|
-
if param not in DEEPSEEK_API_PARAMS:
|
|
288
|
-
raise ValueError(
|
|
289
|
-
f"Unexpected argument `{param}` is "
|
|
290
|
-
"input into DeepSeek model backend."
|
|
291
|
-
)
|
|
251
|
+
return response
|
camel/models/fish_audio_model.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
# ========= Copyright 2023-
|
|
1
|
+
# ========= Copyright 2023-2025 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
2
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
3
|
# you may not use this file except in compliance with the License.
|
|
4
4
|
# You may obtain a copy of the License at
|
|
@@ -10,7 +10,7 @@
|
|
|
10
10
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
11
|
# See the License for the specific language governing permissions and
|
|
12
12
|
# limitations under the License.
|
|
13
|
-
# ========= Copyright 2023-
|
|
13
|
+
# ========= Copyright 2023-2025 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
14
|
|
|
15
15
|
import os
|
|
16
16
|
from typing import Any, Optional
|
|
@@ -44,6 +44,12 @@ class FishAudioModel(BaseAudioModel):
|
|
|
44
44
|
self._url = url or os.environ.get(
|
|
45
45
|
"FISHAUDIO_API_BASE_URL", "https://api.fish.audio"
|
|
46
46
|
)
|
|
47
|
+
if self._api_key is None:
|
|
48
|
+
raise ValueError(
|
|
49
|
+
"API key is required for FishAudio. Please provide it via "
|
|
50
|
+
"the 'api_key' parameter or set the 'FISHAUDIO_API_KEY' "
|
|
51
|
+
"environment variable."
|
|
52
|
+
)
|
|
47
53
|
self.session = Session(apikey=self._api_key, base_url=self._url)
|
|
48
54
|
|
|
49
55
|
def text_to_speech(
|
camel/models/gemini_model.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
# ========= Copyright 2023-
|
|
1
|
+
# ========= Copyright 2023-2025 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
2
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
3
|
# you may not use this file except in compliance with the License.
|
|
4
4
|
# You may obtain a copy of the License at
|
|
@@ -10,14 +10,23 @@
|
|
|
10
10
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
11
|
# See the License for the specific language governing permissions and
|
|
12
12
|
# limitations under the License.
|
|
13
|
-
# ========= Copyright 2023-
|
|
13
|
+
# ========= Copyright 2023-2025 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
14
|
import os
|
|
15
|
-
from typing import
|
|
15
|
+
from typing import (
|
|
16
|
+
Any,
|
|
17
|
+
AsyncGenerator,
|
|
18
|
+
Dict,
|
|
19
|
+
Generator,
|
|
20
|
+
List,
|
|
21
|
+
Optional,
|
|
22
|
+
Type,
|
|
23
|
+
Union,
|
|
24
|
+
)
|
|
16
25
|
|
|
17
26
|
from openai import AsyncStream, Stream
|
|
18
27
|
from pydantic import BaseModel
|
|
19
28
|
|
|
20
|
-
from camel.configs import
|
|
29
|
+
from camel.configs import GeminiConfig
|
|
21
30
|
from camel.messages import OpenAIMessage
|
|
22
31
|
from camel.models.openai_compatible_model import OpenAICompatibleModel
|
|
23
32
|
from camel.types import (
|
|
@@ -37,6 +46,11 @@ if os.environ.get("LANGFUSE_ENABLED", "False").lower() == "true":
|
|
|
37
46
|
from langfuse.decorators import observe
|
|
38
47
|
except ImportError:
|
|
39
48
|
from camel.utils import observe
|
|
49
|
+
elif os.environ.get("TRACEROOT_ENABLED", "False").lower() == "true":
|
|
50
|
+
try:
|
|
51
|
+
from traceroot import trace as observe # type: ignore[import]
|
|
52
|
+
except ImportError:
|
|
53
|
+
from camel.utils import observe
|
|
40
54
|
else:
|
|
41
55
|
from camel.utils import observe
|
|
42
56
|
|
|
@@ -64,6 +78,10 @@ class GeminiModel(OpenAICompatibleModel):
|
|
|
64
78
|
API calls. If not provided, will fall back to the MODEL_TIMEOUT
|
|
65
79
|
environment variable or default to 180 seconds.
|
|
66
80
|
(default: :obj:`None`)
|
|
81
|
+
max_retries (int, optional): Maximum number of retries for API calls.
|
|
82
|
+
(default: :obj:`3`)
|
|
83
|
+
**kwargs (Any): Additional arguments to pass to the client
|
|
84
|
+
initialization.
|
|
67
85
|
"""
|
|
68
86
|
|
|
69
87
|
@api_keys_required(
|
|
@@ -79,6 +97,8 @@ class GeminiModel(OpenAICompatibleModel):
|
|
|
79
97
|
url: Optional[str] = None,
|
|
80
98
|
token_counter: Optional[BaseTokenCounter] = None,
|
|
81
99
|
timeout: Optional[float] = None,
|
|
100
|
+
max_retries: int = 3,
|
|
101
|
+
**kwargs: Any,
|
|
82
102
|
) -> None:
|
|
83
103
|
if model_config_dict is None:
|
|
84
104
|
model_config_dict = GeminiConfig().as_dict()
|
|
@@ -95,20 +115,344 @@ class GeminiModel(OpenAICompatibleModel):
|
|
|
95
115
|
url=url,
|
|
96
116
|
token_counter=token_counter,
|
|
97
117
|
timeout=timeout,
|
|
118
|
+
max_retries=max_retries,
|
|
119
|
+
**kwargs,
|
|
98
120
|
)
|
|
99
121
|
|
|
100
122
|
def _process_messages(self, messages) -> List[OpenAIMessage]:
|
|
101
123
|
r"""Process the messages for Gemini API to ensure no empty content,
|
|
102
|
-
which is not accepted by Gemini.
|
|
124
|
+
which is not accepted by Gemini. Also preserves thought signatures
|
|
125
|
+
required for Gemini 3 Pro function calling.
|
|
126
|
+
|
|
127
|
+
This method also merges consecutive assistant messages with single
|
|
128
|
+
tool calls into a single assistant message with multiple tool calls,
|
|
129
|
+
as required by Gemini's OpenAI-compatible API for parallel function
|
|
130
|
+
calling.
|
|
103
131
|
"""
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
132
|
+
import copy
|
|
133
|
+
|
|
134
|
+
processed_messages: List[OpenAIMessage] = []
|
|
135
|
+
i = 0
|
|
136
|
+
n = len(messages)
|
|
137
|
+
|
|
138
|
+
while i < n:
|
|
139
|
+
msg = messages[i]
|
|
140
|
+
|
|
141
|
+
# Check if this is an assistant message with a single tool_call
|
|
142
|
+
# that might need to be merged with subsequent ones
|
|
143
|
+
if (
|
|
144
|
+
msg.get('role') == 'assistant'
|
|
145
|
+
and 'tool_calls' in msg
|
|
146
|
+
and isinstance(msg['tool_calls'], list)
|
|
147
|
+
and len(msg['tool_calls']) == 1
|
|
148
|
+
):
|
|
149
|
+
# Look ahead to check if there are more assistant messages
|
|
150
|
+
# with single tool calls (interleaved with their tool results)
|
|
151
|
+
j = i + 1
|
|
152
|
+
has_more_tool_calls = False
|
|
153
|
+
|
|
154
|
+
# Collect tool_call_ids we've seen so far
|
|
155
|
+
first_tool_call_id = msg['tool_calls'][0].get('id')
|
|
156
|
+
seen_tool_call_ids = (
|
|
157
|
+
{first_tool_call_id} if first_tool_call_id else set()
|
|
158
|
+
)
|
|
159
|
+
|
|
160
|
+
# Scan ahead to find pattern: tool_result, assistant,
|
|
161
|
+
# tool_result, ...
|
|
162
|
+
while j < n:
|
|
163
|
+
next_msg = messages[j]
|
|
164
|
+
next_role = next_msg.get('role')
|
|
165
|
+
|
|
166
|
+
if next_role == 'tool':
|
|
167
|
+
# Tool result - check if it belongs to our batch
|
|
168
|
+
if next_msg.get('tool_call_id') in seen_tool_call_ids:
|
|
169
|
+
j += 1
|
|
170
|
+
continue
|
|
171
|
+
else:
|
|
172
|
+
# Tool result for unknown call, stop scanning
|
|
173
|
+
break
|
|
174
|
+
elif (
|
|
175
|
+
next_role == 'assistant'
|
|
176
|
+
and 'tool_calls' in next_msg
|
|
177
|
+
and isinstance(next_msg['tool_calls'], list)
|
|
178
|
+
and len(next_msg['tool_calls']) == 1
|
|
179
|
+
):
|
|
180
|
+
# Another single tool call - mark for merging
|
|
181
|
+
has_more_tool_calls = True
|
|
182
|
+
tc_id = next_msg['tool_calls'][0].get('id')
|
|
183
|
+
if tc_id:
|
|
184
|
+
seen_tool_call_ids.add(tc_id)
|
|
185
|
+
j += 1
|
|
186
|
+
continue
|
|
187
|
+
else:
|
|
188
|
+
# Something else, stop scanning
|
|
189
|
+
break
|
|
190
|
+
|
|
191
|
+
if has_more_tool_calls:
|
|
192
|
+
# Need to merge: collect all tool calls and results
|
|
193
|
+
merged_tool_calls = []
|
|
194
|
+
tool_results = []
|
|
195
|
+
is_first = True
|
|
196
|
+
|
|
197
|
+
for k in range(i, j):
|
|
198
|
+
m = messages[k]
|
|
199
|
+
if m.get('role') == 'assistant' and 'tool_calls' in m:
|
|
200
|
+
tc = m['tool_calls'][0]
|
|
201
|
+
if is_first:
|
|
202
|
+
# Keep extra_content only on first tool call
|
|
203
|
+
merged_tool_calls.append(copy.deepcopy(tc))
|
|
204
|
+
is_first = False
|
|
205
|
+
else:
|
|
206
|
+
# Remove extra_content from subsequent tool
|
|
207
|
+
# calls
|
|
208
|
+
tc_copy = {
|
|
209
|
+
k: v
|
|
210
|
+
for k, v in tc.items()
|
|
211
|
+
if k != 'extra_content'
|
|
212
|
+
}
|
|
213
|
+
merged_tool_calls.append(tc_copy)
|
|
214
|
+
elif m.get('role') == 'tool':
|
|
215
|
+
tool_results.append(copy.deepcopy(m))
|
|
216
|
+
|
|
217
|
+
# Build merged assistant message
|
|
218
|
+
merged_msg = copy.deepcopy(msg)
|
|
219
|
+
merged_msg['tool_calls'] = merged_tool_calls
|
|
220
|
+
if 'content' in merged_msg and merged_msg['content'] == '':
|
|
221
|
+
merged_msg['content'] = 'null'
|
|
222
|
+
|
|
223
|
+
processed_messages.append(merged_msg)
|
|
224
|
+
processed_messages.extend(tool_results)
|
|
225
|
+
i = j
|
|
226
|
+
continue
|
|
227
|
+
|
|
228
|
+
# Regular message processing (no merging needed)
|
|
229
|
+
msg_copy = copy.deepcopy(msg)
|
|
107
230
|
if 'content' in msg_copy and msg_copy['content'] == '':
|
|
108
231
|
msg_copy['content'] = 'null'
|
|
109
232
|
processed_messages.append(msg_copy)
|
|
233
|
+
i += 1
|
|
234
|
+
|
|
110
235
|
return processed_messages
|
|
111
236
|
|
|
237
|
+
def _preserve_thought_signatures(
|
|
238
|
+
self,
|
|
239
|
+
response: Union[
|
|
240
|
+
ChatCompletion,
|
|
241
|
+
Stream[ChatCompletionChunk],
|
|
242
|
+
AsyncStream[ChatCompletionChunk],
|
|
243
|
+
],
|
|
244
|
+
) -> Union[
|
|
245
|
+
ChatCompletion,
|
|
246
|
+
Generator[ChatCompletionChunk, None, None],
|
|
247
|
+
AsyncGenerator[ChatCompletionChunk, None],
|
|
248
|
+
]:
|
|
249
|
+
r"""Preserve thought signatures from Gemini responses for future
|
|
250
|
+
requests.
|
|
251
|
+
|
|
252
|
+
According to the Gemini documentation, when a response contains tool
|
|
253
|
+
calls with thought signatures, these signatures must be preserved
|
|
254
|
+
exactly as received when the response is added to conversation history
|
|
255
|
+
for subsequent requests.
|
|
256
|
+
|
|
257
|
+
Args:
|
|
258
|
+
response: The response from Gemini API
|
|
259
|
+
|
|
260
|
+
Returns:
|
|
261
|
+
The response with thought signatures properly preserved.
|
|
262
|
+
For streaming responses, returns generators that preserve
|
|
263
|
+
signatures.
|
|
264
|
+
"""
|
|
265
|
+
# For streaming responses, we need to wrap the stream to preserve
|
|
266
|
+
# thought signatures in tool calls as they come in
|
|
267
|
+
if isinstance(response, Stream):
|
|
268
|
+
return self._wrap_stream_with_thought_preservation(response)
|
|
269
|
+
elif isinstance(response, AsyncStream):
|
|
270
|
+
return self._wrap_async_stream_with_thought_preservation(response)
|
|
271
|
+
|
|
272
|
+
# For non-streaming responses, thought signatures are already preserved
|
|
273
|
+
# in _process_messages when the response becomes part of conversation
|
|
274
|
+
# history
|
|
275
|
+
return response
|
|
276
|
+
|
|
277
|
+
def _wrap_stream_with_thought_preservation(
|
|
278
|
+
self, stream: Stream[ChatCompletionChunk]
|
|
279
|
+
) -> Generator[ChatCompletionChunk, None, None]:
|
|
280
|
+
r"""Wrap a streaming response to preserve thought signatures in tool
|
|
281
|
+
calls.
|
|
282
|
+
|
|
283
|
+
This method ensures that when Gemini streaming responses contain tool
|
|
284
|
+
calls with thought signatures, these are properly preserved in the
|
|
285
|
+
extra_content field for future conversation context.
|
|
286
|
+
|
|
287
|
+
Args:
|
|
288
|
+
stream: The original streaming response from Gemini
|
|
289
|
+
|
|
290
|
+
Returns:
|
|
291
|
+
A wrapped stream that preserves thought signatures
|
|
292
|
+
"""
|
|
293
|
+
|
|
294
|
+
def thought_preserving_generator():
|
|
295
|
+
accumulated_signatures = {} # Store signatures by tool call index
|
|
296
|
+
|
|
297
|
+
for chunk in stream:
|
|
298
|
+
# Process chunk normally first
|
|
299
|
+
processed_chunk = chunk
|
|
300
|
+
|
|
301
|
+
# Check if this chunk contains tool call deltas with thought
|
|
302
|
+
# signatures
|
|
303
|
+
if (
|
|
304
|
+
hasattr(chunk, 'choices')
|
|
305
|
+
and chunk.choices
|
|
306
|
+
and hasattr(chunk.choices[0], 'delta')
|
|
307
|
+
and hasattr(chunk.choices[0].delta, 'tool_calls')
|
|
308
|
+
):
|
|
309
|
+
delta_tool_calls = chunk.choices[0].delta.tool_calls
|
|
310
|
+
if delta_tool_calls:
|
|
311
|
+
for tool_call_delta in delta_tool_calls:
|
|
312
|
+
index = tool_call_delta.index
|
|
313
|
+
|
|
314
|
+
# Check for thought signatures in the tool call
|
|
315
|
+
# response Gemini may include these in custom
|
|
316
|
+
# fields
|
|
317
|
+
if hasattr(tool_call_delta, 'extra_content'):
|
|
318
|
+
extra_content = tool_call_delta.extra_content
|
|
319
|
+
if (
|
|
320
|
+
isinstance(extra_content, dict)
|
|
321
|
+
and 'google' in extra_content
|
|
322
|
+
):
|
|
323
|
+
google_content = extra_content['google']
|
|
324
|
+
if 'thought_signature' in google_content:
|
|
325
|
+
# Store the thought signature for this
|
|
326
|
+
# tool call
|
|
327
|
+
accumulated_signatures[index] = (
|
|
328
|
+
extra_content
|
|
329
|
+
)
|
|
330
|
+
|
|
331
|
+
# Also check if thought signature is in function
|
|
332
|
+
# response
|
|
333
|
+
elif hasattr(
|
|
334
|
+
tool_call_delta, 'function'
|
|
335
|
+
) and hasattr(
|
|
336
|
+
tool_call_delta.function, 'extra_content'
|
|
337
|
+
):
|
|
338
|
+
func_extra = (
|
|
339
|
+
tool_call_delta.function.extra_content
|
|
340
|
+
)
|
|
341
|
+
if (
|
|
342
|
+
isinstance(func_extra, dict)
|
|
343
|
+
and 'google' in func_extra
|
|
344
|
+
):
|
|
345
|
+
accumulated_signatures[index] = func_extra
|
|
346
|
+
|
|
347
|
+
# If we have accumulated signature for this tool
|
|
348
|
+
# call, ensure it's preserved in the chunk
|
|
349
|
+
if index in accumulated_signatures:
|
|
350
|
+
# Add extra_content to tool call delta if it
|
|
351
|
+
# doesn't exist
|
|
352
|
+
if not hasattr(
|
|
353
|
+
tool_call_delta, 'extra_content'
|
|
354
|
+
):
|
|
355
|
+
tool_call_delta.extra_content = (
|
|
356
|
+
accumulated_signatures[index]
|
|
357
|
+
)
|
|
358
|
+
elif tool_call_delta.extra_content is None:
|
|
359
|
+
tool_call_delta.extra_content = (
|
|
360
|
+
accumulated_signatures[index]
|
|
361
|
+
)
|
|
362
|
+
|
|
363
|
+
yield processed_chunk
|
|
364
|
+
|
|
365
|
+
return thought_preserving_generator()
|
|
366
|
+
|
|
367
|
+
def _wrap_async_stream_with_thought_preservation(
|
|
368
|
+
self, stream: AsyncStream[ChatCompletionChunk]
|
|
369
|
+
) -> AsyncGenerator[ChatCompletionChunk, None]:
|
|
370
|
+
r"""Wrap an async streaming response to preserve thought signatures in
|
|
371
|
+
tool calls.
|
|
372
|
+
|
|
373
|
+
This method ensures that when Gemini async streaming responses contain
|
|
374
|
+
tool calls with thought signatures, these are properly preserved in
|
|
375
|
+
the extra_content field for future conversation context.
|
|
376
|
+
|
|
377
|
+
Args:
|
|
378
|
+
stream: The original async streaming response from Gemini
|
|
379
|
+
|
|
380
|
+
Returns:
|
|
381
|
+
A wrapped async stream that preserves thought signatures
|
|
382
|
+
"""
|
|
383
|
+
|
|
384
|
+
async def async_thought_preserving_generator():
|
|
385
|
+
accumulated_signatures = {} # Store signatures by tool call index
|
|
386
|
+
|
|
387
|
+
async for chunk in stream:
|
|
388
|
+
# Process chunk normally first
|
|
389
|
+
processed_chunk = chunk
|
|
390
|
+
|
|
391
|
+
# Check if this chunk contains tool call deltas with thought
|
|
392
|
+
# signatures
|
|
393
|
+
if (
|
|
394
|
+
hasattr(chunk, 'choices')
|
|
395
|
+
and chunk.choices
|
|
396
|
+
and hasattr(chunk.choices[0], 'delta')
|
|
397
|
+
and hasattr(chunk.choices[0].delta, 'tool_calls')
|
|
398
|
+
):
|
|
399
|
+
delta_tool_calls = chunk.choices[0].delta.tool_calls
|
|
400
|
+
if delta_tool_calls:
|
|
401
|
+
for tool_call_delta in delta_tool_calls:
|
|
402
|
+
index = tool_call_delta.index
|
|
403
|
+
|
|
404
|
+
# Check for thought signatures in the tool call
|
|
405
|
+
# response
|
|
406
|
+
if hasattr(tool_call_delta, 'extra_content'):
|
|
407
|
+
extra_content = tool_call_delta.extra_content
|
|
408
|
+
if (
|
|
409
|
+
isinstance(extra_content, dict)
|
|
410
|
+
and 'google' in extra_content
|
|
411
|
+
):
|
|
412
|
+
google_content = extra_content['google']
|
|
413
|
+
if 'thought_signature' in google_content:
|
|
414
|
+
# Store the thought signature for this
|
|
415
|
+
# tool call
|
|
416
|
+
accumulated_signatures[index] = (
|
|
417
|
+
extra_content
|
|
418
|
+
)
|
|
419
|
+
|
|
420
|
+
# Also check if thought signature is in function
|
|
421
|
+
# response
|
|
422
|
+
elif hasattr(
|
|
423
|
+
tool_call_delta, 'function'
|
|
424
|
+
) and hasattr(
|
|
425
|
+
tool_call_delta.function, 'extra_content'
|
|
426
|
+
):
|
|
427
|
+
func_extra = (
|
|
428
|
+
tool_call_delta.function.extra_content
|
|
429
|
+
)
|
|
430
|
+
if (
|
|
431
|
+
isinstance(func_extra, dict)
|
|
432
|
+
and 'google' in func_extra
|
|
433
|
+
):
|
|
434
|
+
accumulated_signatures[index] = func_extra
|
|
435
|
+
|
|
436
|
+
# If we have accumulated signature for this tool
|
|
437
|
+
# call, ensure it's preserved in the chunk
|
|
438
|
+
if index in accumulated_signatures:
|
|
439
|
+
# Add extra_content to tool call delta if it
|
|
440
|
+
# doesn't exist
|
|
441
|
+
if not hasattr(
|
|
442
|
+
tool_call_delta, 'extra_content'
|
|
443
|
+
):
|
|
444
|
+
tool_call_delta.extra_content = (
|
|
445
|
+
accumulated_signatures[index]
|
|
446
|
+
)
|
|
447
|
+
elif tool_call_delta.extra_content is None:
|
|
448
|
+
tool_call_delta.extra_content = (
|
|
449
|
+
accumulated_signatures[index]
|
|
450
|
+
)
|
|
451
|
+
|
|
452
|
+
yield processed_chunk
|
|
453
|
+
|
|
454
|
+
return async_thought_preserving_generator()
|
|
455
|
+
|
|
112
456
|
@observe()
|
|
113
457
|
def _run(
|
|
114
458
|
self,
|
|
@@ -230,7 +574,7 @@ class GeminiModel(OpenAICompatibleModel):
|
|
|
230
574
|
function_dict = tool.get('function', {})
|
|
231
575
|
function_dict.pop("strict", None)
|
|
232
576
|
|
|
233
|
-
# Process parameters to remove anyOf
|
|
577
|
+
# Process parameters to remove anyOf and handle enum/format
|
|
234
578
|
if 'parameters' in function_dict:
|
|
235
579
|
params = function_dict['parameters']
|
|
236
580
|
if 'properties' in params:
|
|
@@ -247,14 +591,31 @@ class GeminiModel(OpenAICompatibleModel):
|
|
|
247
591
|
'description'
|
|
248
592
|
] = prop_value['description']
|
|
249
593
|
|
|
594
|
+
# Handle enum and format restrictions for Gemini
|
|
595
|
+
# API enum: only allowed for string type
|
|
596
|
+
if prop_value.get('type') != 'string':
|
|
597
|
+
prop_value.pop('enum', None)
|
|
598
|
+
|
|
599
|
+
# format: only allowed for string, integer, and
|
|
600
|
+
# number types
|
|
601
|
+
if prop_value.get('type') not in [
|
|
602
|
+
'string',
|
|
603
|
+
'integer',
|
|
604
|
+
'number',
|
|
605
|
+
]:
|
|
606
|
+
prop_value.pop('format', None)
|
|
607
|
+
|
|
250
608
|
request_config["tools"] = tools
|
|
251
609
|
|
|
252
|
-
|
|
610
|
+
response = self._client.chat.completions.create(
|
|
253
611
|
messages=messages,
|
|
254
612
|
model=self.model_type,
|
|
255
613
|
**request_config,
|
|
256
614
|
)
|
|
257
615
|
|
|
616
|
+
# Preserve thought signatures from the response for future requests
|
|
617
|
+
return self._preserve_thought_signatures(response) # type: ignore[return-value]
|
|
618
|
+
|
|
258
619
|
async def _arequest_chat_completion(
|
|
259
620
|
self,
|
|
260
621
|
messages: List[OpenAIMessage],
|
|
@@ -270,7 +631,7 @@ class GeminiModel(OpenAICompatibleModel):
|
|
|
270
631
|
function_dict = tool.get('function', {})
|
|
271
632
|
function_dict.pop("strict", None)
|
|
272
633
|
|
|
273
|
-
# Process parameters to remove anyOf
|
|
634
|
+
# Process parameters to remove anyOf and handle enum/format
|
|
274
635
|
if 'parameters' in function_dict:
|
|
275
636
|
params = function_dict['parameters']
|
|
276
637
|
if 'properties' in params:
|
|
@@ -287,25 +648,27 @@ class GeminiModel(OpenAICompatibleModel):
|
|
|
287
648
|
'description'
|
|
288
649
|
] = prop_value['description']
|
|
289
650
|
|
|
651
|
+
# Handle enum and format restrictions for Gemini
|
|
652
|
+
# API enum: only allowed for string type
|
|
653
|
+
if prop_value.get('type') != 'string':
|
|
654
|
+
prop_value.pop('enum', None)
|
|
655
|
+
|
|
656
|
+
# format: only allowed for string, integer, and
|
|
657
|
+
# number types
|
|
658
|
+
if prop_value.get('type') not in [
|
|
659
|
+
'string',
|
|
660
|
+
'integer',
|
|
661
|
+
'number',
|
|
662
|
+
]:
|
|
663
|
+
prop_value.pop('format', None)
|
|
664
|
+
|
|
290
665
|
request_config["tools"] = tools
|
|
291
666
|
|
|
292
|
-
|
|
667
|
+
response = await self._async_client.chat.completions.create(
|
|
293
668
|
messages=messages,
|
|
294
669
|
model=self.model_type,
|
|
295
670
|
**request_config,
|
|
296
671
|
)
|
|
297
672
|
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
unexpected arguments to Gemini API.
|
|
301
|
-
|
|
302
|
-
Raises:
|
|
303
|
-
ValueError: If the model configuration dictionary contains any
|
|
304
|
-
unexpected arguments to Gemini API.
|
|
305
|
-
"""
|
|
306
|
-
for param in self.model_config_dict:
|
|
307
|
-
if param not in Gemini_API_PARAMS:
|
|
308
|
-
raise ValueError(
|
|
309
|
-
f"Unexpected argument `{param}` is "
|
|
310
|
-
"input into Gemini model backend."
|
|
311
|
-
)
|
|
673
|
+
# Preserve thought signatures from the response for future requests
|
|
674
|
+
return self._preserve_thought_signatures(response) # type: ignore[return-value]
|