PraisonAI 3.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- praisonai/__init__.py +54 -0
- praisonai/__main__.py +15 -0
- praisonai/acp/__init__.py +54 -0
- praisonai/acp/config.py +159 -0
- praisonai/acp/server.py +587 -0
- praisonai/acp/session.py +219 -0
- praisonai/adapters/__init__.py +50 -0
- praisonai/adapters/readers.py +395 -0
- praisonai/adapters/rerankers.py +315 -0
- praisonai/adapters/retrievers.py +394 -0
- praisonai/adapters/vector_stores.py +409 -0
- praisonai/agent_scheduler.py +337 -0
- praisonai/agents_generator.py +903 -0
- praisonai/api/call.py +292 -0
- praisonai/auto.py +1197 -0
- praisonai/capabilities/__init__.py +275 -0
- praisonai/capabilities/a2a.py +140 -0
- praisonai/capabilities/assistants.py +283 -0
- praisonai/capabilities/audio.py +320 -0
- praisonai/capabilities/batches.py +469 -0
- praisonai/capabilities/completions.py +336 -0
- praisonai/capabilities/container_files.py +155 -0
- praisonai/capabilities/containers.py +93 -0
- praisonai/capabilities/embeddings.py +158 -0
- praisonai/capabilities/files.py +467 -0
- praisonai/capabilities/fine_tuning.py +293 -0
- praisonai/capabilities/guardrails.py +182 -0
- praisonai/capabilities/images.py +330 -0
- praisonai/capabilities/mcp.py +190 -0
- praisonai/capabilities/messages.py +270 -0
- praisonai/capabilities/moderations.py +154 -0
- praisonai/capabilities/ocr.py +217 -0
- praisonai/capabilities/passthrough.py +204 -0
- praisonai/capabilities/rag.py +207 -0
- praisonai/capabilities/realtime.py +160 -0
- praisonai/capabilities/rerank.py +165 -0
- praisonai/capabilities/responses.py +266 -0
- praisonai/capabilities/search.py +109 -0
- praisonai/capabilities/skills.py +133 -0
- praisonai/capabilities/vector_store_files.py +334 -0
- praisonai/capabilities/vector_stores.py +304 -0
- praisonai/capabilities/videos.py +141 -0
- praisonai/chainlit_ui.py +304 -0
- praisonai/chat/__init__.py +106 -0
- praisonai/chat/app.py +125 -0
- praisonai/cli/__init__.py +26 -0
- praisonai/cli/app.py +213 -0
- praisonai/cli/commands/__init__.py +75 -0
- praisonai/cli/commands/acp.py +70 -0
- praisonai/cli/commands/completion.py +333 -0
- praisonai/cli/commands/config.py +166 -0
- praisonai/cli/commands/debug.py +142 -0
- praisonai/cli/commands/diag.py +55 -0
- praisonai/cli/commands/doctor.py +166 -0
- praisonai/cli/commands/environment.py +179 -0
- praisonai/cli/commands/lsp.py +112 -0
- praisonai/cli/commands/mcp.py +210 -0
- praisonai/cli/commands/profile.py +457 -0
- praisonai/cli/commands/run.py +228 -0
- praisonai/cli/commands/schedule.py +150 -0
- praisonai/cli/commands/serve.py +97 -0
- praisonai/cli/commands/session.py +212 -0
- praisonai/cli/commands/traces.py +145 -0
- praisonai/cli/commands/version.py +101 -0
- praisonai/cli/configuration/__init__.py +18 -0
- praisonai/cli/configuration/loader.py +353 -0
- praisonai/cli/configuration/paths.py +114 -0
- praisonai/cli/configuration/schema.py +164 -0
- praisonai/cli/features/__init__.py +268 -0
- praisonai/cli/features/acp.py +236 -0
- praisonai/cli/features/action_orchestrator.py +546 -0
- praisonai/cli/features/agent_scheduler.py +773 -0
- praisonai/cli/features/agent_tools.py +474 -0
- praisonai/cli/features/agents.py +375 -0
- praisonai/cli/features/at_mentions.py +471 -0
- praisonai/cli/features/auto_memory.py +182 -0
- praisonai/cli/features/autonomy_mode.py +490 -0
- praisonai/cli/features/background.py +356 -0
- praisonai/cli/features/base.py +168 -0
- praisonai/cli/features/capabilities.py +1326 -0
- praisonai/cli/features/checkpoints.py +338 -0
- praisonai/cli/features/code_intelligence.py +652 -0
- praisonai/cli/features/compaction.py +294 -0
- praisonai/cli/features/compare.py +534 -0
- praisonai/cli/features/cost_tracker.py +514 -0
- praisonai/cli/features/debug.py +810 -0
- praisonai/cli/features/deploy.py +517 -0
- praisonai/cli/features/diag.py +289 -0
- praisonai/cli/features/doctor/__init__.py +63 -0
- praisonai/cli/features/doctor/checks/__init__.py +24 -0
- praisonai/cli/features/doctor/checks/acp_checks.py +240 -0
- praisonai/cli/features/doctor/checks/config_checks.py +366 -0
- praisonai/cli/features/doctor/checks/db_checks.py +366 -0
- praisonai/cli/features/doctor/checks/env_checks.py +543 -0
- praisonai/cli/features/doctor/checks/lsp_checks.py +199 -0
- praisonai/cli/features/doctor/checks/mcp_checks.py +349 -0
- praisonai/cli/features/doctor/checks/memory_checks.py +268 -0
- praisonai/cli/features/doctor/checks/network_checks.py +251 -0
- praisonai/cli/features/doctor/checks/obs_checks.py +328 -0
- praisonai/cli/features/doctor/checks/performance_checks.py +235 -0
- praisonai/cli/features/doctor/checks/permissions_checks.py +259 -0
- praisonai/cli/features/doctor/checks/selftest_checks.py +322 -0
- praisonai/cli/features/doctor/checks/serve_checks.py +426 -0
- praisonai/cli/features/doctor/checks/skills_checks.py +231 -0
- praisonai/cli/features/doctor/checks/tools_checks.py +371 -0
- praisonai/cli/features/doctor/engine.py +266 -0
- praisonai/cli/features/doctor/formatters.py +310 -0
- praisonai/cli/features/doctor/handler.py +397 -0
- praisonai/cli/features/doctor/models.py +264 -0
- praisonai/cli/features/doctor/registry.py +239 -0
- praisonai/cli/features/endpoints.py +1019 -0
- praisonai/cli/features/eval.py +560 -0
- praisonai/cli/features/external_agents.py +231 -0
- praisonai/cli/features/fast_context.py +410 -0
- praisonai/cli/features/flow_display.py +566 -0
- praisonai/cli/features/git_integration.py +651 -0
- praisonai/cli/features/guardrail.py +171 -0
- praisonai/cli/features/handoff.py +185 -0
- praisonai/cli/features/hooks.py +583 -0
- praisonai/cli/features/image.py +384 -0
- praisonai/cli/features/interactive_runtime.py +585 -0
- praisonai/cli/features/interactive_tools.py +380 -0
- praisonai/cli/features/interactive_tui.py +603 -0
- praisonai/cli/features/jobs.py +632 -0
- praisonai/cli/features/knowledge.py +531 -0
- praisonai/cli/features/lite.py +244 -0
- praisonai/cli/features/lsp_cli.py +225 -0
- praisonai/cli/features/mcp.py +169 -0
- praisonai/cli/features/message_queue.py +587 -0
- praisonai/cli/features/metrics.py +211 -0
- praisonai/cli/features/n8n.py +673 -0
- praisonai/cli/features/observability.py +293 -0
- praisonai/cli/features/ollama.py +361 -0
- praisonai/cli/features/output_style.py +273 -0
- praisonai/cli/features/package.py +631 -0
- praisonai/cli/features/performance.py +308 -0
- praisonai/cli/features/persistence.py +636 -0
- praisonai/cli/features/profile.py +226 -0
- praisonai/cli/features/profiler/__init__.py +81 -0
- praisonai/cli/features/profiler/core.py +558 -0
- praisonai/cli/features/profiler/optimizations.py +652 -0
- praisonai/cli/features/profiler/suite.py +386 -0
- praisonai/cli/features/profiling.py +350 -0
- praisonai/cli/features/queue/__init__.py +73 -0
- praisonai/cli/features/queue/manager.py +395 -0
- praisonai/cli/features/queue/models.py +286 -0
- praisonai/cli/features/queue/persistence.py +564 -0
- praisonai/cli/features/queue/scheduler.py +484 -0
- praisonai/cli/features/queue/worker.py +372 -0
- praisonai/cli/features/recipe.py +1723 -0
- praisonai/cli/features/recipes.py +449 -0
- praisonai/cli/features/registry.py +229 -0
- praisonai/cli/features/repo_map.py +860 -0
- praisonai/cli/features/router.py +466 -0
- praisonai/cli/features/sandbox_executor.py +515 -0
- praisonai/cli/features/serve.py +829 -0
- praisonai/cli/features/session.py +222 -0
- praisonai/cli/features/skills.py +856 -0
- praisonai/cli/features/slash_commands.py +650 -0
- praisonai/cli/features/telemetry.py +179 -0
- praisonai/cli/features/templates.py +1384 -0
- praisonai/cli/features/thinking.py +305 -0
- praisonai/cli/features/todo.py +334 -0
- praisonai/cli/features/tools.py +680 -0
- praisonai/cli/features/tui/__init__.py +83 -0
- praisonai/cli/features/tui/app.py +580 -0
- praisonai/cli/features/tui/cli.py +566 -0
- praisonai/cli/features/tui/debug.py +511 -0
- praisonai/cli/features/tui/events.py +99 -0
- praisonai/cli/features/tui/mock_provider.py +328 -0
- praisonai/cli/features/tui/orchestrator.py +652 -0
- praisonai/cli/features/tui/screens/__init__.py +50 -0
- praisonai/cli/features/tui/screens/main.py +245 -0
- praisonai/cli/features/tui/screens/queue.py +174 -0
- praisonai/cli/features/tui/screens/session.py +124 -0
- praisonai/cli/features/tui/screens/settings.py +148 -0
- praisonai/cli/features/tui/widgets/__init__.py +56 -0
- praisonai/cli/features/tui/widgets/chat.py +261 -0
- praisonai/cli/features/tui/widgets/composer.py +224 -0
- praisonai/cli/features/tui/widgets/queue_panel.py +200 -0
- praisonai/cli/features/tui/widgets/status.py +167 -0
- praisonai/cli/features/tui/widgets/tool_panel.py +248 -0
- praisonai/cli/features/workflow.py +720 -0
- praisonai/cli/legacy.py +236 -0
- praisonai/cli/main.py +5559 -0
- praisonai/cli/schedule_cli.py +54 -0
- praisonai/cli/state/__init__.py +31 -0
- praisonai/cli/state/identifiers.py +161 -0
- praisonai/cli/state/sessions.py +313 -0
- praisonai/code/__init__.py +93 -0
- praisonai/code/agent_tools.py +344 -0
- praisonai/code/diff/__init__.py +21 -0
- praisonai/code/diff/diff_strategy.py +432 -0
- praisonai/code/tools/__init__.py +27 -0
- praisonai/code/tools/apply_diff.py +221 -0
- praisonai/code/tools/execute_command.py +275 -0
- praisonai/code/tools/list_files.py +274 -0
- praisonai/code/tools/read_file.py +206 -0
- praisonai/code/tools/search_replace.py +248 -0
- praisonai/code/tools/write_file.py +217 -0
- praisonai/code/utils/__init__.py +46 -0
- praisonai/code/utils/file_utils.py +307 -0
- praisonai/code/utils/ignore_utils.py +308 -0
- praisonai/code/utils/text_utils.py +276 -0
- praisonai/db/__init__.py +64 -0
- praisonai/db/adapter.py +531 -0
- praisonai/deploy/__init__.py +62 -0
- praisonai/deploy/api.py +231 -0
- praisonai/deploy/docker.py +454 -0
- praisonai/deploy/doctor.py +367 -0
- praisonai/deploy/main.py +327 -0
- praisonai/deploy/models.py +179 -0
- praisonai/deploy/providers/__init__.py +33 -0
- praisonai/deploy/providers/aws.py +331 -0
- praisonai/deploy/providers/azure.py +358 -0
- praisonai/deploy/providers/base.py +101 -0
- praisonai/deploy/providers/gcp.py +314 -0
- praisonai/deploy/schema.py +208 -0
- praisonai/deploy.py +185 -0
- praisonai/endpoints/__init__.py +53 -0
- praisonai/endpoints/a2u_server.py +410 -0
- praisonai/endpoints/discovery.py +165 -0
- praisonai/endpoints/providers/__init__.py +28 -0
- praisonai/endpoints/providers/a2a.py +253 -0
- praisonai/endpoints/providers/a2u.py +208 -0
- praisonai/endpoints/providers/agents_api.py +171 -0
- praisonai/endpoints/providers/base.py +231 -0
- praisonai/endpoints/providers/mcp.py +263 -0
- praisonai/endpoints/providers/recipe.py +206 -0
- praisonai/endpoints/providers/tools_mcp.py +150 -0
- praisonai/endpoints/registry.py +131 -0
- praisonai/endpoints/server.py +161 -0
- praisonai/inbuilt_tools/__init__.py +24 -0
- praisonai/inbuilt_tools/autogen_tools.py +117 -0
- praisonai/inc/__init__.py +2 -0
- praisonai/inc/config.py +96 -0
- praisonai/inc/models.py +155 -0
- praisonai/integrations/__init__.py +56 -0
- praisonai/integrations/base.py +303 -0
- praisonai/integrations/claude_code.py +270 -0
- praisonai/integrations/codex_cli.py +255 -0
- praisonai/integrations/cursor_cli.py +195 -0
- praisonai/integrations/gemini_cli.py +222 -0
- praisonai/jobs/__init__.py +67 -0
- praisonai/jobs/executor.py +425 -0
- praisonai/jobs/models.py +230 -0
- praisonai/jobs/router.py +314 -0
- praisonai/jobs/server.py +186 -0
- praisonai/jobs/store.py +203 -0
- praisonai/llm/__init__.py +66 -0
- praisonai/llm/registry.py +382 -0
- praisonai/mcp_server/__init__.py +152 -0
- praisonai/mcp_server/adapters/__init__.py +74 -0
- praisonai/mcp_server/adapters/agents.py +128 -0
- praisonai/mcp_server/adapters/capabilities.py +168 -0
- praisonai/mcp_server/adapters/cli_tools.py +568 -0
- praisonai/mcp_server/adapters/extended_capabilities.py +462 -0
- praisonai/mcp_server/adapters/knowledge.py +93 -0
- praisonai/mcp_server/adapters/memory.py +104 -0
- praisonai/mcp_server/adapters/prompts.py +306 -0
- praisonai/mcp_server/adapters/resources.py +124 -0
- praisonai/mcp_server/adapters/tools_bridge.py +280 -0
- praisonai/mcp_server/auth/__init__.py +48 -0
- praisonai/mcp_server/auth/api_key.py +291 -0
- praisonai/mcp_server/auth/oauth.py +460 -0
- praisonai/mcp_server/auth/oidc.py +289 -0
- praisonai/mcp_server/auth/scopes.py +260 -0
- praisonai/mcp_server/cli.py +852 -0
- praisonai/mcp_server/elicitation.py +445 -0
- praisonai/mcp_server/icons.py +302 -0
- praisonai/mcp_server/recipe_adapter.py +573 -0
- praisonai/mcp_server/recipe_cli.py +824 -0
- praisonai/mcp_server/registry.py +703 -0
- praisonai/mcp_server/sampling.py +422 -0
- praisonai/mcp_server/server.py +490 -0
- praisonai/mcp_server/tasks.py +443 -0
- praisonai/mcp_server/transports/__init__.py +18 -0
- praisonai/mcp_server/transports/http_stream.py +376 -0
- praisonai/mcp_server/transports/stdio.py +132 -0
- praisonai/persistence/__init__.py +84 -0
- praisonai/persistence/config.py +238 -0
- praisonai/persistence/conversation/__init__.py +25 -0
- praisonai/persistence/conversation/async_mysql.py +427 -0
- praisonai/persistence/conversation/async_postgres.py +410 -0
- praisonai/persistence/conversation/async_sqlite.py +371 -0
- praisonai/persistence/conversation/base.py +151 -0
- praisonai/persistence/conversation/json_store.py +250 -0
- praisonai/persistence/conversation/mysql.py +387 -0
- praisonai/persistence/conversation/postgres.py +401 -0
- praisonai/persistence/conversation/singlestore.py +240 -0
- praisonai/persistence/conversation/sqlite.py +341 -0
- praisonai/persistence/conversation/supabase.py +203 -0
- praisonai/persistence/conversation/surrealdb.py +287 -0
- praisonai/persistence/factory.py +301 -0
- praisonai/persistence/hooks/__init__.py +18 -0
- praisonai/persistence/hooks/agent_hooks.py +297 -0
- praisonai/persistence/knowledge/__init__.py +26 -0
- praisonai/persistence/knowledge/base.py +144 -0
- praisonai/persistence/knowledge/cassandra.py +232 -0
- praisonai/persistence/knowledge/chroma.py +295 -0
- praisonai/persistence/knowledge/clickhouse.py +242 -0
- praisonai/persistence/knowledge/cosmosdb_vector.py +438 -0
- praisonai/persistence/knowledge/couchbase.py +286 -0
- praisonai/persistence/knowledge/lancedb.py +216 -0
- praisonai/persistence/knowledge/langchain_adapter.py +291 -0
- praisonai/persistence/knowledge/lightrag_adapter.py +212 -0
- praisonai/persistence/knowledge/llamaindex_adapter.py +256 -0
- praisonai/persistence/knowledge/milvus.py +277 -0
- praisonai/persistence/knowledge/mongodb_vector.py +306 -0
- praisonai/persistence/knowledge/pgvector.py +335 -0
- praisonai/persistence/knowledge/pinecone.py +253 -0
- praisonai/persistence/knowledge/qdrant.py +301 -0
- praisonai/persistence/knowledge/redis_vector.py +291 -0
- praisonai/persistence/knowledge/singlestore_vector.py +299 -0
- praisonai/persistence/knowledge/surrealdb_vector.py +309 -0
- praisonai/persistence/knowledge/upstash_vector.py +266 -0
- praisonai/persistence/knowledge/weaviate.py +223 -0
- praisonai/persistence/migrations/__init__.py +10 -0
- praisonai/persistence/migrations/manager.py +251 -0
- praisonai/persistence/orchestrator.py +406 -0
- praisonai/persistence/state/__init__.py +21 -0
- praisonai/persistence/state/async_mongodb.py +200 -0
- praisonai/persistence/state/base.py +107 -0
- praisonai/persistence/state/dynamodb.py +226 -0
- praisonai/persistence/state/firestore.py +175 -0
- praisonai/persistence/state/gcs.py +155 -0
- praisonai/persistence/state/memory.py +245 -0
- praisonai/persistence/state/mongodb.py +158 -0
- praisonai/persistence/state/redis.py +190 -0
- praisonai/persistence/state/upstash.py +144 -0
- praisonai/persistence/tests/__init__.py +3 -0
- praisonai/persistence/tests/test_all_backends.py +633 -0
- praisonai/profiler.py +1214 -0
- praisonai/recipe/__init__.py +134 -0
- praisonai/recipe/bridge.py +278 -0
- praisonai/recipe/core.py +893 -0
- praisonai/recipe/exceptions.py +54 -0
- praisonai/recipe/history.py +402 -0
- praisonai/recipe/models.py +266 -0
- praisonai/recipe/operations.py +440 -0
- praisonai/recipe/policy.py +422 -0
- praisonai/recipe/registry.py +849 -0
- praisonai/recipe/runtime.py +214 -0
- praisonai/recipe/security.py +711 -0
- praisonai/recipe/serve.py +859 -0
- praisonai/recipe/server.py +613 -0
- praisonai/scheduler/__init__.py +45 -0
- praisonai/scheduler/agent_scheduler.py +552 -0
- praisonai/scheduler/base.py +124 -0
- praisonai/scheduler/daemon_manager.py +225 -0
- praisonai/scheduler/state_manager.py +155 -0
- praisonai/scheduler/yaml_loader.py +193 -0
- praisonai/scheduler.py +194 -0
- praisonai/setup/__init__.py +1 -0
- praisonai/setup/build.py +21 -0
- praisonai/setup/post_install.py +23 -0
- praisonai/setup/setup_conda_env.py +25 -0
- praisonai/setup.py +16 -0
- praisonai/templates/__init__.py +116 -0
- praisonai/templates/cache.py +364 -0
- praisonai/templates/dependency_checker.py +358 -0
- praisonai/templates/discovery.py +391 -0
- praisonai/templates/loader.py +564 -0
- praisonai/templates/registry.py +511 -0
- praisonai/templates/resolver.py +206 -0
- praisonai/templates/security.py +327 -0
- praisonai/templates/tool_override.py +498 -0
- praisonai/templates/tools_doctor.py +256 -0
- praisonai/test.py +105 -0
- praisonai/train.py +562 -0
- praisonai/train_vision.py +306 -0
- praisonai/ui/agents.py +824 -0
- praisonai/ui/callbacks.py +57 -0
- praisonai/ui/chainlit_compat.py +246 -0
- praisonai/ui/chat.py +532 -0
- praisonai/ui/code.py +717 -0
- praisonai/ui/colab.py +474 -0
- praisonai/ui/colab_chainlit.py +81 -0
- praisonai/ui/components/aicoder.py +284 -0
- praisonai/ui/context.py +283 -0
- praisonai/ui/database_config.py +56 -0
- praisonai/ui/db.py +294 -0
- praisonai/ui/realtime.py +488 -0
- praisonai/ui/realtimeclient/__init__.py +756 -0
- praisonai/ui/realtimeclient/tools.py +242 -0
- praisonai/ui/sql_alchemy.py +710 -0
- praisonai/upload_vision.py +140 -0
- praisonai/version.py +1 -0
- praisonai-3.0.0.dist-info/METADATA +3493 -0
- praisonai-3.0.0.dist-info/RECORD +393 -0
- praisonai-3.0.0.dist-info/WHEEL +5 -0
- praisonai-3.0.0.dist-info/entry_points.txt +4 -0
- praisonai-3.0.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,255 @@
|
|
|
1
|
+
"""
|
|
2
|
+
OpenAI Codex CLI Integration.
|
|
3
|
+
|
|
4
|
+
Provides integration with OpenAI's Codex CLI for AI-powered coding tasks.
|
|
5
|
+
|
|
6
|
+
Features:
|
|
7
|
+
- Non-interactive execution with `codex exec`
|
|
8
|
+
- Full auto mode for file modifications
|
|
9
|
+
- Sandbox modes for security
|
|
10
|
+
- JSON streaming output
|
|
11
|
+
- Structured output with schemas
|
|
12
|
+
|
|
13
|
+
Usage:
|
|
14
|
+
from praisonai.integrations import CodexCLIIntegration
|
|
15
|
+
|
|
16
|
+
# Create integration
|
|
17
|
+
codex = CodexCLIIntegration(workspace="/path/to/project")
|
|
18
|
+
|
|
19
|
+
# Execute a coding task
|
|
20
|
+
result = await codex.execute("Fix the authentication bug")
|
|
21
|
+
|
|
22
|
+
# Execute with full auto (allows file modifications)
|
|
23
|
+
codex_auto = CodexCLIIntegration(full_auto=True)
|
|
24
|
+
result = await codex_auto.execute("Refactor the module")
|
|
25
|
+
|
|
26
|
+
# Use as agent tool
|
|
27
|
+
tool = codex.as_tool()
|
|
28
|
+
"""
|
|
29
|
+
|
|
30
|
+
import json
|
|
31
|
+
import os
|
|
32
|
+
from typing import AsyncIterator, Dict, Any, Optional, List
|
|
33
|
+
|
|
34
|
+
from .base import BaseCLIIntegration
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
class CodexCLIIntegration(BaseCLIIntegration):
|
|
38
|
+
"""
|
|
39
|
+
Integration with OpenAI's Codex CLI.
|
|
40
|
+
|
|
41
|
+
Attributes:
|
|
42
|
+
full_auto: Whether to allow file modifications
|
|
43
|
+
sandbox: Sandbox mode ("default", "danger-full-access")
|
|
44
|
+
json_output: Whether to use JSON streaming output
|
|
45
|
+
output_schema: Path to JSON schema for structured output
|
|
46
|
+
"""
|
|
47
|
+
|
|
48
|
+
def __init__(
|
|
49
|
+
self,
|
|
50
|
+
workspace: str = ".",
|
|
51
|
+
timeout: int = 300,
|
|
52
|
+
full_auto: bool = False,
|
|
53
|
+
sandbox: str = "default",
|
|
54
|
+
json_output: bool = False,
|
|
55
|
+
output_schema: Optional[str] = None,
|
|
56
|
+
output_file: Optional[str] = None,
|
|
57
|
+
):
|
|
58
|
+
"""
|
|
59
|
+
Initialize Codex CLI integration.
|
|
60
|
+
|
|
61
|
+
Args:
|
|
62
|
+
workspace: Working directory for CLI execution
|
|
63
|
+
timeout: Timeout in seconds for CLI execution
|
|
64
|
+
full_auto: Whether to allow file modifications (--full-auto)
|
|
65
|
+
sandbox: Sandbox mode ("default", "danger-full-access")
|
|
66
|
+
json_output: Whether to use JSON streaming output (--json)
|
|
67
|
+
output_schema: Path to JSON schema for structured output
|
|
68
|
+
output_file: Path to save the final output (-o)
|
|
69
|
+
"""
|
|
70
|
+
super().__init__(workspace=workspace, timeout=timeout)
|
|
71
|
+
|
|
72
|
+
self.full_auto = full_auto
|
|
73
|
+
self.sandbox = sandbox
|
|
74
|
+
self.json_output = json_output
|
|
75
|
+
self.output_schema = output_schema
|
|
76
|
+
self.output_file = output_file
|
|
77
|
+
|
|
78
|
+
@property
|
|
79
|
+
def cli_command(self) -> str:
|
|
80
|
+
"""Return the CLI command name."""
|
|
81
|
+
return "codex"
|
|
82
|
+
|
|
83
|
+
def _build_command(self, task: str, **options) -> List[str]:
|
|
84
|
+
"""
|
|
85
|
+
Build the Codex CLI command.
|
|
86
|
+
|
|
87
|
+
Args:
|
|
88
|
+
task: The task to execute
|
|
89
|
+
**options: Additional options
|
|
90
|
+
|
|
91
|
+
Returns:
|
|
92
|
+
List of command arguments
|
|
93
|
+
"""
|
|
94
|
+
cmd = ["codex", "exec"]
|
|
95
|
+
|
|
96
|
+
# Add working directory
|
|
97
|
+
cmd.extend(["-C", self.workspace])
|
|
98
|
+
|
|
99
|
+
# Add task
|
|
100
|
+
cmd.append(task)
|
|
101
|
+
|
|
102
|
+
# Add full auto flag if enabled
|
|
103
|
+
if self.full_auto:
|
|
104
|
+
cmd.append("--full-auto")
|
|
105
|
+
|
|
106
|
+
# Add sandbox mode if not default
|
|
107
|
+
if self.sandbox and self.sandbox != "default":
|
|
108
|
+
cmd.extend(["--sandbox", self.sandbox])
|
|
109
|
+
|
|
110
|
+
# Add JSON output flag if enabled
|
|
111
|
+
if self.json_output:
|
|
112
|
+
cmd.append("--json")
|
|
113
|
+
|
|
114
|
+
# Add output schema if specified
|
|
115
|
+
if self.output_schema:
|
|
116
|
+
cmd.extend(["--output-schema", self.output_schema])
|
|
117
|
+
|
|
118
|
+
# Add output file if specified
|
|
119
|
+
if self.output_file:
|
|
120
|
+
cmd.extend(["-o", self.output_file])
|
|
121
|
+
|
|
122
|
+
return cmd
|
|
123
|
+
|
|
124
|
+
async def execute(self, prompt: str, **options) -> str:
|
|
125
|
+
"""
|
|
126
|
+
Execute Codex CLI and return the result.
|
|
127
|
+
|
|
128
|
+
Args:
|
|
129
|
+
prompt: The task/prompt to execute
|
|
130
|
+
**options: Additional options
|
|
131
|
+
|
|
132
|
+
Returns:
|
|
133
|
+
str: The CLI output
|
|
134
|
+
"""
|
|
135
|
+
cmd = self._build_command(prompt, **options)
|
|
136
|
+
|
|
137
|
+
output = await self.execute_async(cmd)
|
|
138
|
+
|
|
139
|
+
# Parse JSON Lines output if json_output is enabled
|
|
140
|
+
if self.json_output:
|
|
141
|
+
return self._parse_json_events(output)
|
|
142
|
+
|
|
143
|
+
return output
|
|
144
|
+
|
|
145
|
+
def _parse_json_events(self, output: str) -> str:
|
|
146
|
+
"""
|
|
147
|
+
Parse JSON Lines output and extract the final result.
|
|
148
|
+
|
|
149
|
+
Args:
|
|
150
|
+
output: Raw JSON Lines output
|
|
151
|
+
|
|
152
|
+
Returns:
|
|
153
|
+
str: The extracted result
|
|
154
|
+
"""
|
|
155
|
+
result_parts = []
|
|
156
|
+
|
|
157
|
+
for line in output.strip().split('\n'):
|
|
158
|
+
if not line.strip():
|
|
159
|
+
continue
|
|
160
|
+
|
|
161
|
+
try:
|
|
162
|
+
event = json.loads(line)
|
|
163
|
+
event_type = event.get("type", "")
|
|
164
|
+
|
|
165
|
+
# Extract agent messages
|
|
166
|
+
if event_type == "item.completed":
|
|
167
|
+
item = event.get("item", {})
|
|
168
|
+
if item.get("type") == "agent_message":
|
|
169
|
+
text = item.get("text", "")
|
|
170
|
+
if text:
|
|
171
|
+
result_parts.append(text)
|
|
172
|
+
|
|
173
|
+
except json.JSONDecodeError:
|
|
174
|
+
continue
|
|
175
|
+
|
|
176
|
+
return '\n'.join(result_parts) if result_parts else output
|
|
177
|
+
|
|
178
|
+
async def stream(self, prompt: str, **options) -> AsyncIterator[Dict[str, Any]]:
|
|
179
|
+
"""
|
|
180
|
+
Stream output from Codex CLI.
|
|
181
|
+
|
|
182
|
+
Args:
|
|
183
|
+
prompt: The task/prompt to execute
|
|
184
|
+
**options: Additional options
|
|
185
|
+
|
|
186
|
+
Yields:
|
|
187
|
+
dict: Parsed JSON events from the CLI
|
|
188
|
+
"""
|
|
189
|
+
# Ensure JSON output is enabled for streaming
|
|
190
|
+
original_json = self.json_output
|
|
191
|
+
self.json_output = True
|
|
192
|
+
|
|
193
|
+
try:
|
|
194
|
+
cmd = self._build_command(prompt, **options)
|
|
195
|
+
|
|
196
|
+
async for line in self.stream_async(cmd):
|
|
197
|
+
if line.strip():
|
|
198
|
+
try:
|
|
199
|
+
event = json.loads(line)
|
|
200
|
+
yield event
|
|
201
|
+
except json.JSONDecodeError:
|
|
202
|
+
yield {"type": "text", "content": line}
|
|
203
|
+
finally:
|
|
204
|
+
self.json_output = original_json
|
|
205
|
+
|
|
206
|
+
async def execute_with_schema(
|
|
207
|
+
self,
|
|
208
|
+
prompt: str,
|
|
209
|
+
schema_path: str,
|
|
210
|
+
output_path: Optional[str] = None
|
|
211
|
+
) -> Dict[str, Any]:
|
|
212
|
+
"""
|
|
213
|
+
Execute Codex with structured output schema.
|
|
214
|
+
|
|
215
|
+
Args:
|
|
216
|
+
prompt: The task/prompt to execute
|
|
217
|
+
schema_path: Path to the JSON schema file
|
|
218
|
+
output_path: Optional path to save the output
|
|
219
|
+
|
|
220
|
+
Returns:
|
|
221
|
+
dict: Parsed structured output
|
|
222
|
+
"""
|
|
223
|
+
original_schema = self.output_schema
|
|
224
|
+
original_output = self.output_file
|
|
225
|
+
|
|
226
|
+
self.output_schema = schema_path
|
|
227
|
+
if output_path:
|
|
228
|
+
self.output_file = output_path
|
|
229
|
+
|
|
230
|
+
try:
|
|
231
|
+
cmd = self._build_command(prompt)
|
|
232
|
+
output = await self.execute_async(cmd)
|
|
233
|
+
|
|
234
|
+
# If output file was specified, read from it
|
|
235
|
+
if output_path and os.path.exists(output_path):
|
|
236
|
+
with open(output_path, 'r') as f:
|
|
237
|
+
return json.load(f)
|
|
238
|
+
|
|
239
|
+
# Otherwise parse the output
|
|
240
|
+
try:
|
|
241
|
+
return json.loads(output)
|
|
242
|
+
except json.JSONDecodeError:
|
|
243
|
+
return {"result": output}
|
|
244
|
+
finally:
|
|
245
|
+
self.output_schema = original_schema
|
|
246
|
+
self.output_file = original_output
|
|
247
|
+
|
|
248
|
+
def get_env(self) -> Dict[str, str]:
|
|
249
|
+
"""Get environment variables for CLI execution."""
|
|
250
|
+
env = super().get_env()
|
|
251
|
+
|
|
252
|
+
# Codex uses ChatGPT authentication or API key
|
|
253
|
+
# The CLI handles authentication internally
|
|
254
|
+
|
|
255
|
+
return env
|
|
@@ -0,0 +1,195 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Cursor CLI Integration.
|
|
3
|
+
|
|
4
|
+
Provides integration with Cursor's CLI (cursor-agent) for AI-powered coding tasks.
|
|
5
|
+
|
|
6
|
+
Features:
|
|
7
|
+
- Headless mode execution with JSON output
|
|
8
|
+
- Force mode for file modifications
|
|
9
|
+
- Model selection
|
|
10
|
+
- Session resume support
|
|
11
|
+
- Streaming output with partial deltas
|
|
12
|
+
|
|
13
|
+
Usage:
|
|
14
|
+
from praisonai.integrations import CursorCLIIntegration
|
|
15
|
+
|
|
16
|
+
# Create integration
|
|
17
|
+
cursor = CursorCLIIntegration(workspace="/path/to/project")
|
|
18
|
+
|
|
19
|
+
# Execute a coding task
|
|
20
|
+
result = await cursor.execute("Fix the authentication bug")
|
|
21
|
+
|
|
22
|
+
# Execute with force mode (allows file modifications)
|
|
23
|
+
cursor_force = CursorCLIIntegration(force=True)
|
|
24
|
+
result = await cursor_force.execute("Refactor the module")
|
|
25
|
+
|
|
26
|
+
# Use as agent tool
|
|
27
|
+
tool = cursor.as_tool()
|
|
28
|
+
"""
|
|
29
|
+
|
|
30
|
+
import json
|
|
31
|
+
import os
|
|
32
|
+
from typing import AsyncIterator, Dict, Any, Optional, List
|
|
33
|
+
|
|
34
|
+
from .base import BaseCLIIntegration
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
class CursorCLIIntegration(BaseCLIIntegration):
|
|
38
|
+
"""
|
|
39
|
+
Integration with Cursor CLI (cursor-agent).
|
|
40
|
+
|
|
41
|
+
Attributes:
|
|
42
|
+
output_format: Output format ("json", "text", "stream-json")
|
|
43
|
+
force: Whether to allow file modifications
|
|
44
|
+
model: Model to use (e.g., "gpt-5")
|
|
45
|
+
stream_partial: Whether to stream partial output
|
|
46
|
+
resume_session: Session ID to resume
|
|
47
|
+
"""
|
|
48
|
+
|
|
49
|
+
def __init__(
|
|
50
|
+
self,
|
|
51
|
+
workspace: str = ".",
|
|
52
|
+
timeout: int = 300,
|
|
53
|
+
output_format: str = "json",
|
|
54
|
+
force: bool = False,
|
|
55
|
+
model: Optional[str] = None,
|
|
56
|
+
stream_partial: bool = False,
|
|
57
|
+
resume_session: Optional[str] = None,
|
|
58
|
+
):
|
|
59
|
+
"""
|
|
60
|
+
Initialize Cursor CLI integration.
|
|
61
|
+
|
|
62
|
+
Args:
|
|
63
|
+
workspace: Working directory for CLI execution
|
|
64
|
+
timeout: Timeout in seconds for CLI execution
|
|
65
|
+
output_format: Output format ("json", "text", "stream-json")
|
|
66
|
+
force: Whether to allow file modifications (--force)
|
|
67
|
+
model: Model to use (e.g., "gpt-5")
|
|
68
|
+
stream_partial: Whether to stream partial output (--stream-partial-output)
|
|
69
|
+
resume_session: Session ID to resume (--resume)
|
|
70
|
+
"""
|
|
71
|
+
super().__init__(workspace=workspace, timeout=timeout)
|
|
72
|
+
|
|
73
|
+
self.output_format = output_format
|
|
74
|
+
self.force = force
|
|
75
|
+
self.model = model
|
|
76
|
+
self.stream_partial = stream_partial
|
|
77
|
+
self.resume_session = resume_session
|
|
78
|
+
|
|
79
|
+
@property
|
|
80
|
+
def cli_command(self) -> str:
|
|
81
|
+
"""Return the CLI command name."""
|
|
82
|
+
return "cursor-agent"
|
|
83
|
+
|
|
84
|
+
def _build_command(self, prompt: str, **options) -> List[str]:
|
|
85
|
+
"""
|
|
86
|
+
Build the Cursor CLI command.
|
|
87
|
+
|
|
88
|
+
Args:
|
|
89
|
+
prompt: The prompt to send
|
|
90
|
+
**options: Additional options
|
|
91
|
+
|
|
92
|
+
Returns:
|
|
93
|
+
List of command arguments
|
|
94
|
+
"""
|
|
95
|
+
cmd = ["cursor-agent"]
|
|
96
|
+
|
|
97
|
+
# Add print mode flag
|
|
98
|
+
cmd.append("-p")
|
|
99
|
+
|
|
100
|
+
# Add workspace
|
|
101
|
+
cmd.extend(["--workspace", self.workspace])
|
|
102
|
+
|
|
103
|
+
# Add force flag if enabled
|
|
104
|
+
if self.force:
|
|
105
|
+
cmd.append("--force")
|
|
106
|
+
|
|
107
|
+
# Add model if specified
|
|
108
|
+
if self.model:
|
|
109
|
+
cmd.extend(["-m", self.model])
|
|
110
|
+
|
|
111
|
+
# Add output format
|
|
112
|
+
cmd.extend(["--output-format", self.output_format])
|
|
113
|
+
|
|
114
|
+
# Add stream partial flag if enabled
|
|
115
|
+
if self.stream_partial:
|
|
116
|
+
cmd.append("--stream-partial-output")
|
|
117
|
+
|
|
118
|
+
# Add resume session if specified
|
|
119
|
+
if self.resume_session:
|
|
120
|
+
cmd.extend(["--resume", self.resume_session])
|
|
121
|
+
|
|
122
|
+
# Add prompt (must be last)
|
|
123
|
+
cmd.append(prompt)
|
|
124
|
+
|
|
125
|
+
return cmd
|
|
126
|
+
|
|
127
|
+
async def execute(self, prompt: str, **options) -> str:
|
|
128
|
+
"""
|
|
129
|
+
Execute Cursor CLI and return the result.
|
|
130
|
+
|
|
131
|
+
Args:
|
|
132
|
+
prompt: The prompt/query to send
|
|
133
|
+
**options: Additional options
|
|
134
|
+
|
|
135
|
+
Returns:
|
|
136
|
+
str: The CLI output (parsed from JSON if output_format is "json")
|
|
137
|
+
"""
|
|
138
|
+
cmd = self._build_command(prompt, **options)
|
|
139
|
+
|
|
140
|
+
output = await self.execute_async(cmd)
|
|
141
|
+
|
|
142
|
+
# Parse JSON output if applicable
|
|
143
|
+
if self.output_format == "json":
|
|
144
|
+
try:
|
|
145
|
+
data = json.loads(output)
|
|
146
|
+
# Extract the main result
|
|
147
|
+
if isinstance(data, dict):
|
|
148
|
+
return data.get("result", data.get("content", str(data)))
|
|
149
|
+
return str(data)
|
|
150
|
+
except json.JSONDecodeError:
|
|
151
|
+
return output
|
|
152
|
+
|
|
153
|
+
return output
|
|
154
|
+
|
|
155
|
+
async def stream(self, prompt: str, **options) -> AsyncIterator[Dict[str, Any]]:
|
|
156
|
+
"""
|
|
157
|
+
Stream output from Cursor CLI.
|
|
158
|
+
|
|
159
|
+
Args:
|
|
160
|
+
prompt: The prompt/query to send
|
|
161
|
+
**options: Additional options
|
|
162
|
+
|
|
163
|
+
Yields:
|
|
164
|
+
dict: Parsed JSON events from the CLI
|
|
165
|
+
"""
|
|
166
|
+
# Use stream-json format for streaming
|
|
167
|
+
original_format = self.output_format
|
|
168
|
+
original_partial = self.stream_partial
|
|
169
|
+
|
|
170
|
+
self.output_format = "stream-json"
|
|
171
|
+
self.stream_partial = True
|
|
172
|
+
|
|
173
|
+
try:
|
|
174
|
+
cmd = self._build_command(prompt, **options)
|
|
175
|
+
|
|
176
|
+
async for line in self.stream_async(cmd):
|
|
177
|
+
if line.strip():
|
|
178
|
+
try:
|
|
179
|
+
event = json.loads(line)
|
|
180
|
+
yield event
|
|
181
|
+
except json.JSONDecodeError:
|
|
182
|
+
yield {"type": "text", "content": line}
|
|
183
|
+
finally:
|
|
184
|
+
self.output_format = original_format
|
|
185
|
+
self.stream_partial = original_partial
|
|
186
|
+
|
|
187
|
+
def get_env(self) -> Dict[str, str]:
|
|
188
|
+
"""Get environment variables for CLI execution."""
|
|
189
|
+
env = super().get_env()
|
|
190
|
+
|
|
191
|
+
# Add Cursor API key if available
|
|
192
|
+
if "CURSOR_API_KEY" in os.environ:
|
|
193
|
+
env["CURSOR_API_KEY"] = os.environ["CURSOR_API_KEY"]
|
|
194
|
+
|
|
195
|
+
return env
|
|
@@ -0,0 +1,222 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Gemini CLI Integration.
|
|
3
|
+
|
|
4
|
+
Provides integration with Google's Gemini CLI for AI-powered coding tasks.
|
|
5
|
+
|
|
6
|
+
Features:
|
|
7
|
+
- Headless mode execution with JSON output
|
|
8
|
+
- Model selection (gemini-2.5-pro, gemini-2.5-flash, etc.)
|
|
9
|
+
- Multi-directory context support
|
|
10
|
+
- Google Search grounding
|
|
11
|
+
- Usage statistics
|
|
12
|
+
|
|
13
|
+
Usage:
|
|
14
|
+
from praisonai.integrations import GeminiCLIIntegration
|
|
15
|
+
|
|
16
|
+
# Create integration
|
|
17
|
+
gemini = GeminiCLIIntegration(workspace="/path/to/project")
|
|
18
|
+
|
|
19
|
+
# Execute a coding task
|
|
20
|
+
result = await gemini.execute("Analyze this codebase")
|
|
21
|
+
|
|
22
|
+
# Get result with stats
|
|
23
|
+
result, stats = await gemini.execute_with_stats("Explain the architecture")
|
|
24
|
+
|
|
25
|
+
# Use as agent tool
|
|
26
|
+
tool = gemini.as_tool()
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
import json
|
|
30
|
+
import os
|
|
31
|
+
from typing import AsyncIterator, Dict, Any, Optional, List, Tuple
|
|
32
|
+
|
|
33
|
+
from .base import BaseCLIIntegration
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
class GeminiCLIIntegration(BaseCLIIntegration):
|
|
37
|
+
"""
|
|
38
|
+
Integration with Google's Gemini CLI.
|
|
39
|
+
|
|
40
|
+
Attributes:
|
|
41
|
+
output_format: Output format ("json", "text", "stream-json")
|
|
42
|
+
model: Gemini model to use
|
|
43
|
+
include_directories: Additional directories to include in context
|
|
44
|
+
sandbox: Whether to run in sandbox mode
|
|
45
|
+
"""
|
|
46
|
+
|
|
47
|
+
def __init__(
|
|
48
|
+
self,
|
|
49
|
+
workspace: str = ".",
|
|
50
|
+
timeout: int = 300,
|
|
51
|
+
output_format: str = "json",
|
|
52
|
+
model: str = "gemini-2.5-pro",
|
|
53
|
+
include_directories: Optional[List[str]] = None,
|
|
54
|
+
sandbox: bool = False,
|
|
55
|
+
):
|
|
56
|
+
"""
|
|
57
|
+
Initialize Gemini CLI integration.
|
|
58
|
+
|
|
59
|
+
Args:
|
|
60
|
+
workspace: Working directory for CLI execution
|
|
61
|
+
timeout: Timeout in seconds for CLI execution
|
|
62
|
+
output_format: Output format ("json", "text", "stream-json")
|
|
63
|
+
model: Gemini model to use (e.g., "gemini-2.5-pro", "gemini-2.5-flash")
|
|
64
|
+
include_directories: Additional directories to include in context
|
|
65
|
+
sandbox: Whether to run in sandbox mode
|
|
66
|
+
"""
|
|
67
|
+
super().__init__(workspace=workspace, timeout=timeout)
|
|
68
|
+
|
|
69
|
+
self.output_format = output_format
|
|
70
|
+
self.model = model
|
|
71
|
+
self.include_directories = include_directories
|
|
72
|
+
self.sandbox = sandbox
|
|
73
|
+
|
|
74
|
+
# Store last stats for retrieval
|
|
75
|
+
self._last_stats: Optional[Dict[str, Any]] = None
|
|
76
|
+
|
|
77
|
+
@property
|
|
78
|
+
def cli_command(self) -> str:
|
|
79
|
+
"""Return the CLI command name."""
|
|
80
|
+
return "gemini"
|
|
81
|
+
|
|
82
|
+
def _build_command(self, prompt: str, **options) -> List[str]:
|
|
83
|
+
"""
|
|
84
|
+
Build the Gemini CLI command.
|
|
85
|
+
|
|
86
|
+
Args:
|
|
87
|
+
prompt: The prompt to send
|
|
88
|
+
**options: Additional options
|
|
89
|
+
|
|
90
|
+
Returns:
|
|
91
|
+
List of command arguments
|
|
92
|
+
"""
|
|
93
|
+
cmd = ["gemini"]
|
|
94
|
+
|
|
95
|
+
# Add model
|
|
96
|
+
cmd.extend(["-m", self.model])
|
|
97
|
+
|
|
98
|
+
# Add output format
|
|
99
|
+
cmd.extend(["--output-format", self.output_format])
|
|
100
|
+
|
|
101
|
+
# Add include directories if specified
|
|
102
|
+
if self.include_directories:
|
|
103
|
+
cmd.extend(["--include-directories", ",".join(self.include_directories)])
|
|
104
|
+
|
|
105
|
+
# Add sandbox flag if enabled
|
|
106
|
+
if self.sandbox:
|
|
107
|
+
cmd.append("--sandbox")
|
|
108
|
+
|
|
109
|
+
# Add YOLO mode for non-interactive execution
|
|
110
|
+
cmd.append("--yolo")
|
|
111
|
+
|
|
112
|
+
# Add prompt flag with prompt value
|
|
113
|
+
cmd.extend(["-p", prompt])
|
|
114
|
+
|
|
115
|
+
return cmd
|
|
116
|
+
|
|
117
|
+
async def execute(self, prompt: str, **options) -> str:
|
|
118
|
+
"""
|
|
119
|
+
Execute Gemini CLI and return the result.
|
|
120
|
+
|
|
121
|
+
Args:
|
|
122
|
+
prompt: The prompt/query to send
|
|
123
|
+
**options: Additional options
|
|
124
|
+
|
|
125
|
+
Returns:
|
|
126
|
+
str: The CLI output (parsed from JSON if output_format is "json")
|
|
127
|
+
"""
|
|
128
|
+
cmd = self._build_command(prompt, **options)
|
|
129
|
+
|
|
130
|
+
output = await self.execute_async(cmd)
|
|
131
|
+
|
|
132
|
+
# Parse JSON output if applicable
|
|
133
|
+
if self.output_format == "json":
|
|
134
|
+
try:
|
|
135
|
+
data = json.loads(output)
|
|
136
|
+
# Store stats for later retrieval
|
|
137
|
+
self._last_stats = data.get("stats")
|
|
138
|
+
# Extract the main response
|
|
139
|
+
return data.get("response", str(data))
|
|
140
|
+
except json.JSONDecodeError:
|
|
141
|
+
return output
|
|
142
|
+
|
|
143
|
+
return output
|
|
144
|
+
|
|
145
|
+
async def execute_with_stats(self, prompt: str, **options) -> Tuple[str, Optional[Dict[str, Any]]]:
|
|
146
|
+
"""
|
|
147
|
+
Execute Gemini CLI and return both result and usage stats.
|
|
148
|
+
|
|
149
|
+
Args:
|
|
150
|
+
prompt: The prompt/query to send
|
|
151
|
+
**options: Additional options
|
|
152
|
+
|
|
153
|
+
Returns:
|
|
154
|
+
Tuple[str, dict]: (result, stats) where stats contains usage information
|
|
155
|
+
"""
|
|
156
|
+
# Ensure JSON format for stats
|
|
157
|
+
original_format = self.output_format
|
|
158
|
+
self.output_format = "json"
|
|
159
|
+
|
|
160
|
+
try:
|
|
161
|
+
cmd = self._build_command(prompt, **options)
|
|
162
|
+
output = await self.execute_async(cmd)
|
|
163
|
+
|
|
164
|
+
try:
|
|
165
|
+
data = json.loads(output)
|
|
166
|
+
response = data.get("response", str(data))
|
|
167
|
+
stats = data.get("stats")
|
|
168
|
+
return response, stats
|
|
169
|
+
except json.JSONDecodeError:
|
|
170
|
+
return output, None
|
|
171
|
+
finally:
|
|
172
|
+
self.output_format = original_format
|
|
173
|
+
|
|
174
|
+
async def stream(self, prompt: str, **options) -> AsyncIterator[Dict[str, Any]]:
|
|
175
|
+
"""
|
|
176
|
+
Stream output from Gemini CLI.
|
|
177
|
+
|
|
178
|
+
Args:
|
|
179
|
+
prompt: The prompt/query to send
|
|
180
|
+
**options: Additional options
|
|
181
|
+
|
|
182
|
+
Yields:
|
|
183
|
+
dict: Parsed JSON events from the CLI
|
|
184
|
+
"""
|
|
185
|
+
# Use stream-json format for streaming
|
|
186
|
+
original_format = self.output_format
|
|
187
|
+
self.output_format = "stream-json"
|
|
188
|
+
|
|
189
|
+
try:
|
|
190
|
+
cmd = self._build_command(prompt, **options)
|
|
191
|
+
|
|
192
|
+
async for line in self.stream_async(cmd):
|
|
193
|
+
if line.strip():
|
|
194
|
+
try:
|
|
195
|
+
event = json.loads(line)
|
|
196
|
+
yield event
|
|
197
|
+
except json.JSONDecodeError:
|
|
198
|
+
yield {"type": "text", "content": line}
|
|
199
|
+
finally:
|
|
200
|
+
self.output_format = original_format
|
|
201
|
+
|
|
202
|
+
def get_last_stats(self) -> Optional[Dict[str, Any]]:
|
|
203
|
+
"""
|
|
204
|
+
Get the stats from the last execution.
|
|
205
|
+
|
|
206
|
+
Returns:
|
|
207
|
+
dict: Usage statistics or None if not available
|
|
208
|
+
"""
|
|
209
|
+
return self._last_stats
|
|
210
|
+
|
|
211
|
+
def get_env(self) -> Dict[str, str]:
|
|
212
|
+
"""Get environment variables for CLI execution."""
|
|
213
|
+
env = super().get_env()
|
|
214
|
+
|
|
215
|
+
# Add Google API key if available
|
|
216
|
+
if "GOOGLE_API_KEY" in os.environ:
|
|
217
|
+
env["GOOGLE_API_KEY"] = os.environ["GOOGLE_API_KEY"]
|
|
218
|
+
elif "GEMINI_API_KEY" in os.environ:
|
|
219
|
+
# Map GEMINI_API_KEY to GOOGLE_API_KEY
|
|
220
|
+
env["GOOGLE_API_KEY"] = os.environ["GEMINI_API_KEY"]
|
|
221
|
+
|
|
222
|
+
return env
|