PraisonAI 3.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- praisonai/__init__.py +54 -0
- praisonai/__main__.py +15 -0
- praisonai/acp/__init__.py +54 -0
- praisonai/acp/config.py +159 -0
- praisonai/acp/server.py +587 -0
- praisonai/acp/session.py +219 -0
- praisonai/adapters/__init__.py +50 -0
- praisonai/adapters/readers.py +395 -0
- praisonai/adapters/rerankers.py +315 -0
- praisonai/adapters/retrievers.py +394 -0
- praisonai/adapters/vector_stores.py +409 -0
- praisonai/agent_scheduler.py +337 -0
- praisonai/agents_generator.py +903 -0
- praisonai/api/call.py +292 -0
- praisonai/auto.py +1197 -0
- praisonai/capabilities/__init__.py +275 -0
- praisonai/capabilities/a2a.py +140 -0
- praisonai/capabilities/assistants.py +283 -0
- praisonai/capabilities/audio.py +320 -0
- praisonai/capabilities/batches.py +469 -0
- praisonai/capabilities/completions.py +336 -0
- praisonai/capabilities/container_files.py +155 -0
- praisonai/capabilities/containers.py +93 -0
- praisonai/capabilities/embeddings.py +158 -0
- praisonai/capabilities/files.py +467 -0
- praisonai/capabilities/fine_tuning.py +293 -0
- praisonai/capabilities/guardrails.py +182 -0
- praisonai/capabilities/images.py +330 -0
- praisonai/capabilities/mcp.py +190 -0
- praisonai/capabilities/messages.py +270 -0
- praisonai/capabilities/moderations.py +154 -0
- praisonai/capabilities/ocr.py +217 -0
- praisonai/capabilities/passthrough.py +204 -0
- praisonai/capabilities/rag.py +207 -0
- praisonai/capabilities/realtime.py +160 -0
- praisonai/capabilities/rerank.py +165 -0
- praisonai/capabilities/responses.py +266 -0
- praisonai/capabilities/search.py +109 -0
- praisonai/capabilities/skills.py +133 -0
- praisonai/capabilities/vector_store_files.py +334 -0
- praisonai/capabilities/vector_stores.py +304 -0
- praisonai/capabilities/videos.py +141 -0
- praisonai/chainlit_ui.py +304 -0
- praisonai/chat/__init__.py +106 -0
- praisonai/chat/app.py +125 -0
- praisonai/cli/__init__.py +26 -0
- praisonai/cli/app.py +213 -0
- praisonai/cli/commands/__init__.py +75 -0
- praisonai/cli/commands/acp.py +70 -0
- praisonai/cli/commands/completion.py +333 -0
- praisonai/cli/commands/config.py +166 -0
- praisonai/cli/commands/debug.py +142 -0
- praisonai/cli/commands/diag.py +55 -0
- praisonai/cli/commands/doctor.py +166 -0
- praisonai/cli/commands/environment.py +179 -0
- praisonai/cli/commands/lsp.py +112 -0
- praisonai/cli/commands/mcp.py +210 -0
- praisonai/cli/commands/profile.py +457 -0
- praisonai/cli/commands/run.py +228 -0
- praisonai/cli/commands/schedule.py +150 -0
- praisonai/cli/commands/serve.py +97 -0
- praisonai/cli/commands/session.py +212 -0
- praisonai/cli/commands/traces.py +145 -0
- praisonai/cli/commands/version.py +101 -0
- praisonai/cli/configuration/__init__.py +18 -0
- praisonai/cli/configuration/loader.py +353 -0
- praisonai/cli/configuration/paths.py +114 -0
- praisonai/cli/configuration/schema.py +164 -0
- praisonai/cli/features/__init__.py +268 -0
- praisonai/cli/features/acp.py +236 -0
- praisonai/cli/features/action_orchestrator.py +546 -0
- praisonai/cli/features/agent_scheduler.py +773 -0
- praisonai/cli/features/agent_tools.py +474 -0
- praisonai/cli/features/agents.py +375 -0
- praisonai/cli/features/at_mentions.py +471 -0
- praisonai/cli/features/auto_memory.py +182 -0
- praisonai/cli/features/autonomy_mode.py +490 -0
- praisonai/cli/features/background.py +356 -0
- praisonai/cli/features/base.py +168 -0
- praisonai/cli/features/capabilities.py +1326 -0
- praisonai/cli/features/checkpoints.py +338 -0
- praisonai/cli/features/code_intelligence.py +652 -0
- praisonai/cli/features/compaction.py +294 -0
- praisonai/cli/features/compare.py +534 -0
- praisonai/cli/features/cost_tracker.py +514 -0
- praisonai/cli/features/debug.py +810 -0
- praisonai/cli/features/deploy.py +517 -0
- praisonai/cli/features/diag.py +289 -0
- praisonai/cli/features/doctor/__init__.py +63 -0
- praisonai/cli/features/doctor/checks/__init__.py +24 -0
- praisonai/cli/features/doctor/checks/acp_checks.py +240 -0
- praisonai/cli/features/doctor/checks/config_checks.py +366 -0
- praisonai/cli/features/doctor/checks/db_checks.py +366 -0
- praisonai/cli/features/doctor/checks/env_checks.py +543 -0
- praisonai/cli/features/doctor/checks/lsp_checks.py +199 -0
- praisonai/cli/features/doctor/checks/mcp_checks.py +349 -0
- praisonai/cli/features/doctor/checks/memory_checks.py +268 -0
- praisonai/cli/features/doctor/checks/network_checks.py +251 -0
- praisonai/cli/features/doctor/checks/obs_checks.py +328 -0
- praisonai/cli/features/doctor/checks/performance_checks.py +235 -0
- praisonai/cli/features/doctor/checks/permissions_checks.py +259 -0
- praisonai/cli/features/doctor/checks/selftest_checks.py +322 -0
- praisonai/cli/features/doctor/checks/serve_checks.py +426 -0
- praisonai/cli/features/doctor/checks/skills_checks.py +231 -0
- praisonai/cli/features/doctor/checks/tools_checks.py +371 -0
- praisonai/cli/features/doctor/engine.py +266 -0
- praisonai/cli/features/doctor/formatters.py +310 -0
- praisonai/cli/features/doctor/handler.py +397 -0
- praisonai/cli/features/doctor/models.py +264 -0
- praisonai/cli/features/doctor/registry.py +239 -0
- praisonai/cli/features/endpoints.py +1019 -0
- praisonai/cli/features/eval.py +560 -0
- praisonai/cli/features/external_agents.py +231 -0
- praisonai/cli/features/fast_context.py +410 -0
- praisonai/cli/features/flow_display.py +566 -0
- praisonai/cli/features/git_integration.py +651 -0
- praisonai/cli/features/guardrail.py +171 -0
- praisonai/cli/features/handoff.py +185 -0
- praisonai/cli/features/hooks.py +583 -0
- praisonai/cli/features/image.py +384 -0
- praisonai/cli/features/interactive_runtime.py +585 -0
- praisonai/cli/features/interactive_tools.py +380 -0
- praisonai/cli/features/interactive_tui.py +603 -0
- praisonai/cli/features/jobs.py +632 -0
- praisonai/cli/features/knowledge.py +531 -0
- praisonai/cli/features/lite.py +244 -0
- praisonai/cli/features/lsp_cli.py +225 -0
- praisonai/cli/features/mcp.py +169 -0
- praisonai/cli/features/message_queue.py +587 -0
- praisonai/cli/features/metrics.py +211 -0
- praisonai/cli/features/n8n.py +673 -0
- praisonai/cli/features/observability.py +293 -0
- praisonai/cli/features/ollama.py +361 -0
- praisonai/cli/features/output_style.py +273 -0
- praisonai/cli/features/package.py +631 -0
- praisonai/cli/features/performance.py +308 -0
- praisonai/cli/features/persistence.py +636 -0
- praisonai/cli/features/profile.py +226 -0
- praisonai/cli/features/profiler/__init__.py +81 -0
- praisonai/cli/features/profiler/core.py +558 -0
- praisonai/cli/features/profiler/optimizations.py +652 -0
- praisonai/cli/features/profiler/suite.py +386 -0
- praisonai/cli/features/profiling.py +350 -0
- praisonai/cli/features/queue/__init__.py +73 -0
- praisonai/cli/features/queue/manager.py +395 -0
- praisonai/cli/features/queue/models.py +286 -0
- praisonai/cli/features/queue/persistence.py +564 -0
- praisonai/cli/features/queue/scheduler.py +484 -0
- praisonai/cli/features/queue/worker.py +372 -0
- praisonai/cli/features/recipe.py +1723 -0
- praisonai/cli/features/recipes.py +449 -0
- praisonai/cli/features/registry.py +229 -0
- praisonai/cli/features/repo_map.py +860 -0
- praisonai/cli/features/router.py +466 -0
- praisonai/cli/features/sandbox_executor.py +515 -0
- praisonai/cli/features/serve.py +829 -0
- praisonai/cli/features/session.py +222 -0
- praisonai/cli/features/skills.py +856 -0
- praisonai/cli/features/slash_commands.py +650 -0
- praisonai/cli/features/telemetry.py +179 -0
- praisonai/cli/features/templates.py +1384 -0
- praisonai/cli/features/thinking.py +305 -0
- praisonai/cli/features/todo.py +334 -0
- praisonai/cli/features/tools.py +680 -0
- praisonai/cli/features/tui/__init__.py +83 -0
- praisonai/cli/features/tui/app.py +580 -0
- praisonai/cli/features/tui/cli.py +566 -0
- praisonai/cli/features/tui/debug.py +511 -0
- praisonai/cli/features/tui/events.py +99 -0
- praisonai/cli/features/tui/mock_provider.py +328 -0
- praisonai/cli/features/tui/orchestrator.py +652 -0
- praisonai/cli/features/tui/screens/__init__.py +50 -0
- praisonai/cli/features/tui/screens/main.py +245 -0
- praisonai/cli/features/tui/screens/queue.py +174 -0
- praisonai/cli/features/tui/screens/session.py +124 -0
- praisonai/cli/features/tui/screens/settings.py +148 -0
- praisonai/cli/features/tui/widgets/__init__.py +56 -0
- praisonai/cli/features/tui/widgets/chat.py +261 -0
- praisonai/cli/features/tui/widgets/composer.py +224 -0
- praisonai/cli/features/tui/widgets/queue_panel.py +200 -0
- praisonai/cli/features/tui/widgets/status.py +167 -0
- praisonai/cli/features/tui/widgets/tool_panel.py +248 -0
- praisonai/cli/features/workflow.py +720 -0
- praisonai/cli/legacy.py +236 -0
- praisonai/cli/main.py +5559 -0
- praisonai/cli/schedule_cli.py +54 -0
- praisonai/cli/state/__init__.py +31 -0
- praisonai/cli/state/identifiers.py +161 -0
- praisonai/cli/state/sessions.py +313 -0
- praisonai/code/__init__.py +93 -0
- praisonai/code/agent_tools.py +344 -0
- praisonai/code/diff/__init__.py +21 -0
- praisonai/code/diff/diff_strategy.py +432 -0
- praisonai/code/tools/__init__.py +27 -0
- praisonai/code/tools/apply_diff.py +221 -0
- praisonai/code/tools/execute_command.py +275 -0
- praisonai/code/tools/list_files.py +274 -0
- praisonai/code/tools/read_file.py +206 -0
- praisonai/code/tools/search_replace.py +248 -0
- praisonai/code/tools/write_file.py +217 -0
- praisonai/code/utils/__init__.py +46 -0
- praisonai/code/utils/file_utils.py +307 -0
- praisonai/code/utils/ignore_utils.py +308 -0
- praisonai/code/utils/text_utils.py +276 -0
- praisonai/db/__init__.py +64 -0
- praisonai/db/adapter.py +531 -0
- praisonai/deploy/__init__.py +62 -0
- praisonai/deploy/api.py +231 -0
- praisonai/deploy/docker.py +454 -0
- praisonai/deploy/doctor.py +367 -0
- praisonai/deploy/main.py +327 -0
- praisonai/deploy/models.py +179 -0
- praisonai/deploy/providers/__init__.py +33 -0
- praisonai/deploy/providers/aws.py +331 -0
- praisonai/deploy/providers/azure.py +358 -0
- praisonai/deploy/providers/base.py +101 -0
- praisonai/deploy/providers/gcp.py +314 -0
- praisonai/deploy/schema.py +208 -0
- praisonai/deploy.py +185 -0
- praisonai/endpoints/__init__.py +53 -0
- praisonai/endpoints/a2u_server.py +410 -0
- praisonai/endpoints/discovery.py +165 -0
- praisonai/endpoints/providers/__init__.py +28 -0
- praisonai/endpoints/providers/a2a.py +253 -0
- praisonai/endpoints/providers/a2u.py +208 -0
- praisonai/endpoints/providers/agents_api.py +171 -0
- praisonai/endpoints/providers/base.py +231 -0
- praisonai/endpoints/providers/mcp.py +263 -0
- praisonai/endpoints/providers/recipe.py +206 -0
- praisonai/endpoints/providers/tools_mcp.py +150 -0
- praisonai/endpoints/registry.py +131 -0
- praisonai/endpoints/server.py +161 -0
- praisonai/inbuilt_tools/__init__.py +24 -0
- praisonai/inbuilt_tools/autogen_tools.py +117 -0
- praisonai/inc/__init__.py +2 -0
- praisonai/inc/config.py +96 -0
- praisonai/inc/models.py +155 -0
- praisonai/integrations/__init__.py +56 -0
- praisonai/integrations/base.py +303 -0
- praisonai/integrations/claude_code.py +270 -0
- praisonai/integrations/codex_cli.py +255 -0
- praisonai/integrations/cursor_cli.py +195 -0
- praisonai/integrations/gemini_cli.py +222 -0
- praisonai/jobs/__init__.py +67 -0
- praisonai/jobs/executor.py +425 -0
- praisonai/jobs/models.py +230 -0
- praisonai/jobs/router.py +314 -0
- praisonai/jobs/server.py +186 -0
- praisonai/jobs/store.py +203 -0
- praisonai/llm/__init__.py +66 -0
- praisonai/llm/registry.py +382 -0
- praisonai/mcp_server/__init__.py +152 -0
- praisonai/mcp_server/adapters/__init__.py +74 -0
- praisonai/mcp_server/adapters/agents.py +128 -0
- praisonai/mcp_server/adapters/capabilities.py +168 -0
- praisonai/mcp_server/adapters/cli_tools.py +568 -0
- praisonai/mcp_server/adapters/extended_capabilities.py +462 -0
- praisonai/mcp_server/adapters/knowledge.py +93 -0
- praisonai/mcp_server/adapters/memory.py +104 -0
- praisonai/mcp_server/adapters/prompts.py +306 -0
- praisonai/mcp_server/adapters/resources.py +124 -0
- praisonai/mcp_server/adapters/tools_bridge.py +280 -0
- praisonai/mcp_server/auth/__init__.py +48 -0
- praisonai/mcp_server/auth/api_key.py +291 -0
- praisonai/mcp_server/auth/oauth.py +460 -0
- praisonai/mcp_server/auth/oidc.py +289 -0
- praisonai/mcp_server/auth/scopes.py +260 -0
- praisonai/mcp_server/cli.py +852 -0
- praisonai/mcp_server/elicitation.py +445 -0
- praisonai/mcp_server/icons.py +302 -0
- praisonai/mcp_server/recipe_adapter.py +573 -0
- praisonai/mcp_server/recipe_cli.py +824 -0
- praisonai/mcp_server/registry.py +703 -0
- praisonai/mcp_server/sampling.py +422 -0
- praisonai/mcp_server/server.py +490 -0
- praisonai/mcp_server/tasks.py +443 -0
- praisonai/mcp_server/transports/__init__.py +18 -0
- praisonai/mcp_server/transports/http_stream.py +376 -0
- praisonai/mcp_server/transports/stdio.py +132 -0
- praisonai/persistence/__init__.py +84 -0
- praisonai/persistence/config.py +238 -0
- praisonai/persistence/conversation/__init__.py +25 -0
- praisonai/persistence/conversation/async_mysql.py +427 -0
- praisonai/persistence/conversation/async_postgres.py +410 -0
- praisonai/persistence/conversation/async_sqlite.py +371 -0
- praisonai/persistence/conversation/base.py +151 -0
- praisonai/persistence/conversation/json_store.py +250 -0
- praisonai/persistence/conversation/mysql.py +387 -0
- praisonai/persistence/conversation/postgres.py +401 -0
- praisonai/persistence/conversation/singlestore.py +240 -0
- praisonai/persistence/conversation/sqlite.py +341 -0
- praisonai/persistence/conversation/supabase.py +203 -0
- praisonai/persistence/conversation/surrealdb.py +287 -0
- praisonai/persistence/factory.py +301 -0
- praisonai/persistence/hooks/__init__.py +18 -0
- praisonai/persistence/hooks/agent_hooks.py +297 -0
- praisonai/persistence/knowledge/__init__.py +26 -0
- praisonai/persistence/knowledge/base.py +144 -0
- praisonai/persistence/knowledge/cassandra.py +232 -0
- praisonai/persistence/knowledge/chroma.py +295 -0
- praisonai/persistence/knowledge/clickhouse.py +242 -0
- praisonai/persistence/knowledge/cosmosdb_vector.py +438 -0
- praisonai/persistence/knowledge/couchbase.py +286 -0
- praisonai/persistence/knowledge/lancedb.py +216 -0
- praisonai/persistence/knowledge/langchain_adapter.py +291 -0
- praisonai/persistence/knowledge/lightrag_adapter.py +212 -0
- praisonai/persistence/knowledge/llamaindex_adapter.py +256 -0
- praisonai/persistence/knowledge/milvus.py +277 -0
- praisonai/persistence/knowledge/mongodb_vector.py +306 -0
- praisonai/persistence/knowledge/pgvector.py +335 -0
- praisonai/persistence/knowledge/pinecone.py +253 -0
- praisonai/persistence/knowledge/qdrant.py +301 -0
- praisonai/persistence/knowledge/redis_vector.py +291 -0
- praisonai/persistence/knowledge/singlestore_vector.py +299 -0
- praisonai/persistence/knowledge/surrealdb_vector.py +309 -0
- praisonai/persistence/knowledge/upstash_vector.py +266 -0
- praisonai/persistence/knowledge/weaviate.py +223 -0
- praisonai/persistence/migrations/__init__.py +10 -0
- praisonai/persistence/migrations/manager.py +251 -0
- praisonai/persistence/orchestrator.py +406 -0
- praisonai/persistence/state/__init__.py +21 -0
- praisonai/persistence/state/async_mongodb.py +200 -0
- praisonai/persistence/state/base.py +107 -0
- praisonai/persistence/state/dynamodb.py +226 -0
- praisonai/persistence/state/firestore.py +175 -0
- praisonai/persistence/state/gcs.py +155 -0
- praisonai/persistence/state/memory.py +245 -0
- praisonai/persistence/state/mongodb.py +158 -0
- praisonai/persistence/state/redis.py +190 -0
- praisonai/persistence/state/upstash.py +144 -0
- praisonai/persistence/tests/__init__.py +3 -0
- praisonai/persistence/tests/test_all_backends.py +633 -0
- praisonai/profiler.py +1214 -0
- praisonai/recipe/__init__.py +134 -0
- praisonai/recipe/bridge.py +278 -0
- praisonai/recipe/core.py +893 -0
- praisonai/recipe/exceptions.py +54 -0
- praisonai/recipe/history.py +402 -0
- praisonai/recipe/models.py +266 -0
- praisonai/recipe/operations.py +440 -0
- praisonai/recipe/policy.py +422 -0
- praisonai/recipe/registry.py +849 -0
- praisonai/recipe/runtime.py +214 -0
- praisonai/recipe/security.py +711 -0
- praisonai/recipe/serve.py +859 -0
- praisonai/recipe/server.py +613 -0
- praisonai/scheduler/__init__.py +45 -0
- praisonai/scheduler/agent_scheduler.py +552 -0
- praisonai/scheduler/base.py +124 -0
- praisonai/scheduler/daemon_manager.py +225 -0
- praisonai/scheduler/state_manager.py +155 -0
- praisonai/scheduler/yaml_loader.py +193 -0
- praisonai/scheduler.py +194 -0
- praisonai/setup/__init__.py +1 -0
- praisonai/setup/build.py +21 -0
- praisonai/setup/post_install.py +23 -0
- praisonai/setup/setup_conda_env.py +25 -0
- praisonai/setup.py +16 -0
- praisonai/templates/__init__.py +116 -0
- praisonai/templates/cache.py +364 -0
- praisonai/templates/dependency_checker.py +358 -0
- praisonai/templates/discovery.py +391 -0
- praisonai/templates/loader.py +564 -0
- praisonai/templates/registry.py +511 -0
- praisonai/templates/resolver.py +206 -0
- praisonai/templates/security.py +327 -0
- praisonai/templates/tool_override.py +498 -0
- praisonai/templates/tools_doctor.py +256 -0
- praisonai/test.py +105 -0
- praisonai/train.py +562 -0
- praisonai/train_vision.py +306 -0
- praisonai/ui/agents.py +824 -0
- praisonai/ui/callbacks.py +57 -0
- praisonai/ui/chainlit_compat.py +246 -0
- praisonai/ui/chat.py +532 -0
- praisonai/ui/code.py +717 -0
- praisonai/ui/colab.py +474 -0
- praisonai/ui/colab_chainlit.py +81 -0
- praisonai/ui/components/aicoder.py +284 -0
- praisonai/ui/context.py +283 -0
- praisonai/ui/database_config.py +56 -0
- praisonai/ui/db.py +294 -0
- praisonai/ui/realtime.py +488 -0
- praisonai/ui/realtimeclient/__init__.py +756 -0
- praisonai/ui/realtimeclient/tools.py +242 -0
- praisonai/ui/sql_alchemy.py +710 -0
- praisonai/upload_vision.py +140 -0
- praisonai/version.py +1 -0
- praisonai-3.0.0.dist-info/METADATA +3493 -0
- praisonai-3.0.0.dist-info/RECORD +393 -0
- praisonai-3.0.0.dist-info/WHEEL +5 -0
- praisonai-3.0.0.dist-info/entry_points.txt +4 -0
- praisonai-3.0.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,652 @@
|
|
|
1
|
+
"""
|
|
2
|
+
TUI Orchestrator for PraisonAI.
|
|
3
|
+
|
|
4
|
+
Provides unified event handling for both interactive TUI and headless simulation modes.
|
|
5
|
+
Inspired by gemini-cli's event-driven architecture and codex-cli's state management.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import asyncio
|
|
9
|
+
import json
|
|
10
|
+
import logging
|
|
11
|
+
import time
|
|
12
|
+
import uuid
|
|
13
|
+
from dataclasses import dataclass, field
|
|
14
|
+
from enum import Enum
|
|
15
|
+
from typing import Any, Callable, Dict, List, Optional, TextIO
|
|
16
|
+
import sys
|
|
17
|
+
|
|
18
|
+
from .events import TUIEvent, TUIEventType
|
|
19
|
+
from ..queue import QueueManager, QueueConfig, QueuedRun, RunState
|
|
20
|
+
|
|
21
|
+
logger = logging.getLogger(__name__)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class OutputMode(str, Enum):
|
|
25
|
+
"""Output mode for headless simulation."""
|
|
26
|
+
PRETTY = "pretty"
|
|
27
|
+
JSONL = "jsonl"
|
|
28
|
+
SILENT = "silent"
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
@dataclass
|
|
32
|
+
class UIStateModel:
|
|
33
|
+
"""
|
|
34
|
+
In-memory UI state model.
|
|
35
|
+
|
|
36
|
+
Mirrors the state that would be displayed in the TUI,
|
|
37
|
+
enabling headless simulation and snapshot generation.
|
|
38
|
+
"""
|
|
39
|
+
# Session info
|
|
40
|
+
session_id: str = ""
|
|
41
|
+
workspace: str = ""
|
|
42
|
+
|
|
43
|
+
# Model/config
|
|
44
|
+
model: str = "gpt-4o-mini"
|
|
45
|
+
|
|
46
|
+
# Chat history
|
|
47
|
+
messages: List[Dict[str, Any]] = field(default_factory=list)
|
|
48
|
+
max_messages: int = 1000
|
|
49
|
+
|
|
50
|
+
# Current streaming state
|
|
51
|
+
current_run_id: Optional[str] = None
|
|
52
|
+
streaming_content: str = ""
|
|
53
|
+
is_processing: bool = False
|
|
54
|
+
|
|
55
|
+
# Queue state
|
|
56
|
+
queued_runs: List[Dict[str, Any]] = field(default_factory=list)
|
|
57
|
+
running_runs: List[Dict[str, Any]] = field(default_factory=list)
|
|
58
|
+
|
|
59
|
+
# Tool calls
|
|
60
|
+
pending_tool_calls: List[Dict[str, Any]] = field(default_factory=list)
|
|
61
|
+
recent_tool_calls: List[Dict[str, Any]] = field(default_factory=list)
|
|
62
|
+
|
|
63
|
+
# Metrics
|
|
64
|
+
total_tokens: int = 0
|
|
65
|
+
total_cost: float = 0.0
|
|
66
|
+
|
|
67
|
+
# Focus/screen state (for simulation)
|
|
68
|
+
current_screen: str = "main"
|
|
69
|
+
focused_widget: str = "composer"
|
|
70
|
+
|
|
71
|
+
# Events log (for trace/replay)
|
|
72
|
+
events: List[Dict[str, Any]] = field(default_factory=list)
|
|
73
|
+
max_events: int = 10000
|
|
74
|
+
|
|
75
|
+
def add_message(self, role: str, content: str, **kwargs) -> None:
|
|
76
|
+
"""Add a message to history."""
|
|
77
|
+
msg = {
|
|
78
|
+
"role": role,
|
|
79
|
+
"content": content,
|
|
80
|
+
"timestamp": time.time(),
|
|
81
|
+
**kwargs
|
|
82
|
+
}
|
|
83
|
+
self.messages.append(msg)
|
|
84
|
+
if len(self.messages) > self.max_messages:
|
|
85
|
+
self.messages = self.messages[-self.max_messages:]
|
|
86
|
+
|
|
87
|
+
def add_event(self, event: TUIEvent) -> None:
|
|
88
|
+
"""Add an event to the log."""
|
|
89
|
+
evt = {
|
|
90
|
+
"type": event.event_type.value,
|
|
91
|
+
"timestamp": event.timestamp,
|
|
92
|
+
"run_id": event.run_id,
|
|
93
|
+
"session_id": event.session_id,
|
|
94
|
+
"agent_name": event.agent_name,
|
|
95
|
+
"data": event.data,
|
|
96
|
+
}
|
|
97
|
+
self.events.append(evt)
|
|
98
|
+
if len(self.events) > self.max_events:
|
|
99
|
+
self.events = self.events[-self.max_events:]
|
|
100
|
+
|
|
101
|
+
def to_snapshot(self) -> Dict[str, Any]:
|
|
102
|
+
"""Generate a snapshot of current state."""
|
|
103
|
+
return {
|
|
104
|
+
"session_id": self.session_id,
|
|
105
|
+
"model": self.model,
|
|
106
|
+
"current_screen": self.current_screen,
|
|
107
|
+
"focused_widget": self.focused_widget,
|
|
108
|
+
"is_processing": self.is_processing,
|
|
109
|
+
"current_run_id": self.current_run_id,
|
|
110
|
+
"streaming_content_length": len(self.streaming_content),
|
|
111
|
+
"message_count": len(self.messages),
|
|
112
|
+
"last_messages": self.messages[-5:] if self.messages else [],
|
|
113
|
+
"queued_count": len(self.queued_runs),
|
|
114
|
+
"running_count": len(self.running_runs),
|
|
115
|
+
"pending_tool_calls": len(self.pending_tool_calls),
|
|
116
|
+
"total_tokens": self.total_tokens,
|
|
117
|
+
"total_cost": self.total_cost,
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
def render_snapshot_pretty(self) -> str:
|
|
121
|
+
"""Render a pretty text snapshot like TUI would show."""
|
|
122
|
+
lines = []
|
|
123
|
+
|
|
124
|
+
# Status bar
|
|
125
|
+
status = f"◉ PraisonAI │ Session: {self.session_id[:8] if self.session_id else 'new'}"
|
|
126
|
+
status += f" │ Model: {self.model}"
|
|
127
|
+
if self.total_tokens > 0:
|
|
128
|
+
status += f" │ Tokens: {self.total_tokens:,}"
|
|
129
|
+
if self.total_cost > 0:
|
|
130
|
+
status += f" │ ${self.total_cost:.4f}"
|
|
131
|
+
if self.is_processing:
|
|
132
|
+
status += " │ ⟳ Processing..."
|
|
133
|
+
lines.append("─" * 60)
|
|
134
|
+
lines.append(status)
|
|
135
|
+
lines.append("─" * 60)
|
|
136
|
+
|
|
137
|
+
# Chat messages (last 5)
|
|
138
|
+
lines.append("\n[Chat History]")
|
|
139
|
+
for msg in self.messages[-5:]:
|
|
140
|
+
role = msg.get("role", "unknown")
|
|
141
|
+
content = msg.get("content", "")[:100]
|
|
142
|
+
if len(msg.get("content", "")) > 100:
|
|
143
|
+
content += "..."
|
|
144
|
+
lines.append(f" {role.upper()}: {content}")
|
|
145
|
+
|
|
146
|
+
# Streaming content
|
|
147
|
+
if self.streaming_content:
|
|
148
|
+
lines.append(f"\n[Streaming] ({len(self.streaming_content)} chars)")
|
|
149
|
+
preview = self.streaming_content[-200:]
|
|
150
|
+
if len(self.streaming_content) > 200:
|
|
151
|
+
preview = "..." + preview
|
|
152
|
+
lines.append(f" {preview}")
|
|
153
|
+
|
|
154
|
+
# Queue status
|
|
155
|
+
lines.append(f"\n[Queue] Queued: {len(self.queued_runs)} │ Running: {len(self.running_runs)}")
|
|
156
|
+
|
|
157
|
+
# Tool calls
|
|
158
|
+
if self.pending_tool_calls:
|
|
159
|
+
lines.append(f"\n[Pending Approvals] {len(self.pending_tool_calls)}")
|
|
160
|
+
for tc in self.pending_tool_calls[:3]:
|
|
161
|
+
lines.append(f" ⚠ {tc.get('tool_name', 'unknown')}")
|
|
162
|
+
|
|
163
|
+
# Screen/focus
|
|
164
|
+
lines.append(f"\n[UI] Screen: {self.current_screen} │ Focus: {self.focused_widget}")
|
|
165
|
+
lines.append("─" * 60)
|
|
166
|
+
|
|
167
|
+
return "\n".join(lines)
|
|
168
|
+
|
|
169
|
+
|
|
170
|
+
class TuiOrchestrator:
|
|
171
|
+
"""
|
|
172
|
+
Unified orchestrator for TUI and headless modes.
|
|
173
|
+
|
|
174
|
+
Subscribes to the event bus and maintains an in-memory UI state model.
|
|
175
|
+
Can drive Textual widgets (interactive) or output snapshots (headless).
|
|
176
|
+
"""
|
|
177
|
+
|
|
178
|
+
def __init__(
|
|
179
|
+
self,
|
|
180
|
+
queue_manager: Optional[QueueManager] = None,
|
|
181
|
+
queue_config: Optional[QueueConfig] = None,
|
|
182
|
+
output_mode: OutputMode = OutputMode.PRETTY,
|
|
183
|
+
output_stream: Optional[TextIO] = None,
|
|
184
|
+
jsonl_path: Optional[str] = None,
|
|
185
|
+
debug: bool = False,
|
|
186
|
+
):
|
|
187
|
+
self.queue_config = queue_config or QueueConfig()
|
|
188
|
+
self.queue_manager = queue_manager
|
|
189
|
+
self.output_mode = output_mode
|
|
190
|
+
self.output_stream = output_stream or sys.stdout
|
|
191
|
+
self.jsonl_path = jsonl_path
|
|
192
|
+
self.debug = debug
|
|
193
|
+
|
|
194
|
+
# State
|
|
195
|
+
self.state = UIStateModel()
|
|
196
|
+
self._event_callbacks: List[Callable[[TUIEvent], None]] = []
|
|
197
|
+
self._jsonl_file: Optional[TextIO] = None
|
|
198
|
+
self._running = False
|
|
199
|
+
|
|
200
|
+
# Trace ID for this orchestrator session
|
|
201
|
+
self.trace_id = str(uuid.uuid4())[:8]
|
|
202
|
+
|
|
203
|
+
async def start(self, session_id: Optional[str] = None, recover: bool = True) -> None:
|
|
204
|
+
"""Start the orchestrator."""
|
|
205
|
+
self.state.session_id = session_id or str(uuid.uuid4())[:8]
|
|
206
|
+
self._running = True
|
|
207
|
+
|
|
208
|
+
# Open JSONL file if specified
|
|
209
|
+
if self.jsonl_path:
|
|
210
|
+
self._jsonl_file = open(self.jsonl_path, "a")
|
|
211
|
+
|
|
212
|
+
# Initialize queue manager if not provided
|
|
213
|
+
if not self.queue_manager:
|
|
214
|
+
self.queue_manager = QueueManager(
|
|
215
|
+
config=self.queue_config,
|
|
216
|
+
on_output=self._handle_output,
|
|
217
|
+
on_complete=self._handle_complete,
|
|
218
|
+
on_error=self._handle_error,
|
|
219
|
+
)
|
|
220
|
+
await self.queue_manager.start(recover=recover)
|
|
221
|
+
self.queue_manager.set_session(self.state.session_id)
|
|
222
|
+
|
|
223
|
+
self._emit_event(TUIEvent(
|
|
224
|
+
event_type=TUIEventType.SESSION_STARTED,
|
|
225
|
+
session_id=self.state.session_id,
|
|
226
|
+
data={"trace_id": self.trace_id}
|
|
227
|
+
))
|
|
228
|
+
|
|
229
|
+
if self.debug:
|
|
230
|
+
self._log_debug(f"Orchestrator started: session={self.state.session_id}, trace={self.trace_id}")
|
|
231
|
+
|
|
232
|
+
async def stop(self) -> None:
|
|
233
|
+
"""Stop the orchestrator."""
|
|
234
|
+
self._running = False
|
|
235
|
+
|
|
236
|
+
if self.queue_manager:
|
|
237
|
+
await self.queue_manager.stop()
|
|
238
|
+
|
|
239
|
+
if self._jsonl_file:
|
|
240
|
+
self._jsonl_file.close()
|
|
241
|
+
self._jsonl_file = None
|
|
242
|
+
|
|
243
|
+
if self.debug:
|
|
244
|
+
self._log_debug("Orchestrator stopped")
|
|
245
|
+
|
|
246
|
+
def add_event_callback(self, callback: Callable[[TUIEvent], None]) -> None:
|
|
247
|
+
"""Add an event callback."""
|
|
248
|
+
self._event_callbacks.append(callback)
|
|
249
|
+
|
|
250
|
+
def _emit_event(self, event: TUIEvent) -> None:
|
|
251
|
+
"""Emit an event to all listeners."""
|
|
252
|
+
# Add to state log
|
|
253
|
+
self.state.add_event(event)
|
|
254
|
+
|
|
255
|
+
# Write to JSONL if enabled
|
|
256
|
+
if self._jsonl_file:
|
|
257
|
+
self._write_jsonl(event)
|
|
258
|
+
|
|
259
|
+
# Call callbacks
|
|
260
|
+
for callback in self._event_callbacks:
|
|
261
|
+
try:
|
|
262
|
+
callback(event)
|
|
263
|
+
except Exception as e:
|
|
264
|
+
logger.error(f"Event callback error: {e}")
|
|
265
|
+
|
|
266
|
+
# Output based on mode
|
|
267
|
+
if self.output_mode == OutputMode.JSONL:
|
|
268
|
+
self._print_jsonl(event)
|
|
269
|
+
elif self.output_mode == OutputMode.PRETTY and self.debug:
|
|
270
|
+
self._print_event_pretty(event)
|
|
271
|
+
|
|
272
|
+
def _write_jsonl(self, event: TUIEvent) -> None:
|
|
273
|
+
"""Write event to JSONL file."""
|
|
274
|
+
if not self._jsonl_file:
|
|
275
|
+
return
|
|
276
|
+
|
|
277
|
+
record = {
|
|
278
|
+
"timestamp": event.timestamp,
|
|
279
|
+
"trace_id": self.trace_id,
|
|
280
|
+
"session_id": event.session_id or self.state.session_id,
|
|
281
|
+
"event_type": event.event_type.value,
|
|
282
|
+
"run_id": event.run_id,
|
|
283
|
+
"agent_name": event.agent_name,
|
|
284
|
+
"data": event.data,
|
|
285
|
+
}
|
|
286
|
+
self._jsonl_file.write(json.dumps(record, default=str) + "\n")
|
|
287
|
+
self._jsonl_file.flush()
|
|
288
|
+
|
|
289
|
+
def _print_jsonl(self, event: TUIEvent) -> None:
|
|
290
|
+
"""Print event as JSONL to output stream."""
|
|
291
|
+
record = {
|
|
292
|
+
"timestamp": event.timestamp,
|
|
293
|
+
"event_type": event.event_type.value,
|
|
294
|
+
"run_id": event.run_id,
|
|
295
|
+
"data": event.data,
|
|
296
|
+
}
|
|
297
|
+
print(json.dumps(record, default=str), file=self.output_stream)
|
|
298
|
+
|
|
299
|
+
def _print_event_pretty(self, event: TUIEvent) -> None:
|
|
300
|
+
"""Print event in pretty format."""
|
|
301
|
+
ts = time.strftime("%H:%M:%S", time.localtime(event.timestamp))
|
|
302
|
+
print(f"[{ts}] {event.event_type.value}", end="", file=self.output_stream)
|
|
303
|
+
if event.run_id:
|
|
304
|
+
print(f" run={event.run_id[:8]}", end="", file=self.output_stream)
|
|
305
|
+
if event.data:
|
|
306
|
+
data_preview = str(event.data)[:50]
|
|
307
|
+
print(f" {data_preview}", end="", file=self.output_stream)
|
|
308
|
+
print(file=self.output_stream)
|
|
309
|
+
|
|
310
|
+
def _log_debug(self, message: str) -> None:
|
|
311
|
+
"""Log debug message."""
|
|
312
|
+
if self.debug:
|
|
313
|
+
ts = time.strftime("%H:%M:%S")
|
|
314
|
+
print(f"[DEBUG {ts}] {message}", file=self.output_stream)
|
|
315
|
+
|
|
316
|
+
# Queue callbacks
|
|
317
|
+
|
|
318
|
+
async def _handle_output(self, run_id: str, chunk: str) -> None:
|
|
319
|
+
"""Handle streaming output."""
|
|
320
|
+
if run_id == self.state.current_run_id:
|
|
321
|
+
self.state.streaming_content += chunk
|
|
322
|
+
|
|
323
|
+
self._emit_event(TUIEvent.output_chunk(run_id, chunk))
|
|
324
|
+
|
|
325
|
+
async def _handle_complete(self, run_id: str, run: QueuedRun) -> None:
|
|
326
|
+
"""Handle run completion."""
|
|
327
|
+
if run_id == self.state.current_run_id:
|
|
328
|
+
# Add assistant message
|
|
329
|
+
self.state.add_message(
|
|
330
|
+
"assistant",
|
|
331
|
+
run.output_content or self.state.streaming_content,
|
|
332
|
+
run_id=run_id,
|
|
333
|
+
agent_name=run.agent_name,
|
|
334
|
+
)
|
|
335
|
+
|
|
336
|
+
# Update metrics
|
|
337
|
+
if run.metrics:
|
|
338
|
+
self.state.total_tokens += run.metrics.get("tokens", 0)
|
|
339
|
+
self.state.total_cost += run.metrics.get("cost", 0.0)
|
|
340
|
+
|
|
341
|
+
# Clear streaming state
|
|
342
|
+
self.state.current_run_id = None
|
|
343
|
+
self.state.streaming_content = ""
|
|
344
|
+
self.state.is_processing = False
|
|
345
|
+
|
|
346
|
+
# Update queue state
|
|
347
|
+
self._update_queue_state()
|
|
348
|
+
|
|
349
|
+
self._emit_event(TUIEvent.run_completed(
|
|
350
|
+
run_id,
|
|
351
|
+
run.output_content or "",
|
|
352
|
+
agent_name=run.agent_name,
|
|
353
|
+
))
|
|
354
|
+
|
|
355
|
+
async def _handle_error(self, run_id: str, error: Exception) -> None:
|
|
356
|
+
"""Handle run error."""
|
|
357
|
+
if run_id == self.state.current_run_id:
|
|
358
|
+
self.state.add_message(
|
|
359
|
+
"system",
|
|
360
|
+
f"Error: {error}",
|
|
361
|
+
run_id=run_id,
|
|
362
|
+
)
|
|
363
|
+
self.state.current_run_id = None
|
|
364
|
+
self.state.streaming_content = ""
|
|
365
|
+
self.state.is_processing = False
|
|
366
|
+
|
|
367
|
+
self._update_queue_state()
|
|
368
|
+
|
|
369
|
+
self._emit_event(TUIEvent.error(str(error), run_id=run_id))
|
|
370
|
+
|
|
371
|
+
def _update_queue_state(self) -> None:
|
|
372
|
+
"""Update queue state from manager."""
|
|
373
|
+
if not self.queue_manager:
|
|
374
|
+
return
|
|
375
|
+
|
|
376
|
+
runs = self.queue_manager.list_runs(limit=100)
|
|
377
|
+
self.state.queued_runs = [
|
|
378
|
+
r.to_dict() for r in runs if r.state == RunState.QUEUED
|
|
379
|
+
]
|
|
380
|
+
self.state.running_runs = [
|
|
381
|
+
r.to_dict() for r in runs if r.state == RunState.RUNNING
|
|
382
|
+
]
|
|
383
|
+
|
|
384
|
+
# Public API
|
|
385
|
+
|
|
386
|
+
async def submit_message(self, content: str, agent_name: str = "Assistant") -> str:
|
|
387
|
+
"""Submit a message for processing."""
|
|
388
|
+
# Add user message
|
|
389
|
+
self.state.add_message("user", content)
|
|
390
|
+
self.state.is_processing = True
|
|
391
|
+
|
|
392
|
+
self._emit_event(TUIEvent.message_submitted(content))
|
|
393
|
+
|
|
394
|
+
# Submit to queue
|
|
395
|
+
run_id = await self.queue_manager.submit(
|
|
396
|
+
input_content=content,
|
|
397
|
+
agent_name=agent_name,
|
|
398
|
+
config={"agent_config": {"name": agent_name, "model": self.state.model}}
|
|
399
|
+
)
|
|
400
|
+
|
|
401
|
+
self.state.current_run_id = run_id
|
|
402
|
+
self.state.streaming_content = ""
|
|
403
|
+
|
|
404
|
+
# Add placeholder for streaming
|
|
405
|
+
self.state.add_message(
|
|
406
|
+
"assistant",
|
|
407
|
+
"",
|
|
408
|
+
run_id=run_id,
|
|
409
|
+
agent_name=agent_name,
|
|
410
|
+
is_streaming=True,
|
|
411
|
+
)
|
|
412
|
+
|
|
413
|
+
self._update_queue_state()
|
|
414
|
+
|
|
415
|
+
return run_id
|
|
416
|
+
|
|
417
|
+
async def cancel_run(self, run_id: Optional[str] = None) -> bool:
|
|
418
|
+
"""Cancel a run."""
|
|
419
|
+
target_id = run_id or self.state.current_run_id
|
|
420
|
+
if not target_id:
|
|
421
|
+
return False
|
|
422
|
+
|
|
423
|
+
result = await self.queue_manager.cancel(target_id)
|
|
424
|
+
|
|
425
|
+
if target_id == self.state.current_run_id:
|
|
426
|
+
self.state.current_run_id = None
|
|
427
|
+
self.state.streaming_content = ""
|
|
428
|
+
self.state.is_processing = False
|
|
429
|
+
|
|
430
|
+
self._update_queue_state()
|
|
431
|
+
|
|
432
|
+
self._emit_event(TUIEvent(
|
|
433
|
+
event_type=TUIEventType.RUN_CANCELLED,
|
|
434
|
+
run_id=target_id,
|
|
435
|
+
))
|
|
436
|
+
|
|
437
|
+
return result
|
|
438
|
+
|
|
439
|
+
async def retry_run(self, run_id: str) -> Optional[str]:
|
|
440
|
+
"""Retry a failed run."""
|
|
441
|
+
new_id = await self.queue_manager.retry(run_id)
|
|
442
|
+
self._update_queue_state()
|
|
443
|
+
return new_id
|
|
444
|
+
|
|
445
|
+
def set_model(self, model: str) -> None:
|
|
446
|
+
"""Set the current model."""
|
|
447
|
+
self.state.model = model
|
|
448
|
+
self._emit_event(TUIEvent.status_update(f"Model set to {model}"))
|
|
449
|
+
|
|
450
|
+
def navigate_screen(self, screen: str) -> None:
|
|
451
|
+
"""Navigate to a screen (for simulation)."""
|
|
452
|
+
self.state.current_screen = screen
|
|
453
|
+
self._emit_event(TUIEvent(
|
|
454
|
+
event_type=TUIEventType.SCREEN_CHANGED,
|
|
455
|
+
data={"screen": screen}
|
|
456
|
+
))
|
|
457
|
+
|
|
458
|
+
def set_focus(self, widget: str) -> None:
|
|
459
|
+
"""Set focus to a widget (for simulation)."""
|
|
460
|
+
self.state.focused_widget = widget
|
|
461
|
+
self._emit_event(TUIEvent(
|
|
462
|
+
event_type=TUIEventType.FOCUS_CHANGED,
|
|
463
|
+
data={"widget": widget}
|
|
464
|
+
))
|
|
465
|
+
|
|
466
|
+
def get_snapshot(self) -> Dict[str, Any]:
|
|
467
|
+
"""Get current state snapshot."""
|
|
468
|
+
return self.state.to_snapshot()
|
|
469
|
+
|
|
470
|
+
def render_snapshot(self) -> str:
|
|
471
|
+
"""Render pretty snapshot."""
|
|
472
|
+
return self.state.render_snapshot_pretty()
|
|
473
|
+
|
|
474
|
+
async def wait_for_idle(self, timeout: float = 60.0) -> bool:
|
|
475
|
+
"""Wait until no runs are processing."""
|
|
476
|
+
start = time.time()
|
|
477
|
+
while time.time() - start < timeout:
|
|
478
|
+
if not self.state.is_processing and not self.state.running_runs:
|
|
479
|
+
return True
|
|
480
|
+
await asyncio.sleep(0.1)
|
|
481
|
+
return False
|
|
482
|
+
|
|
483
|
+
async def wait_for_run(self, run_id: str, timeout: float = 60.0) -> bool:
|
|
484
|
+
"""Wait for a specific run to complete."""
|
|
485
|
+
start = time.time()
|
|
486
|
+
while time.time() - start < timeout:
|
|
487
|
+
run = self.queue_manager.get_run(run_id)
|
|
488
|
+
if run and run.state.is_terminal():
|
|
489
|
+
return True
|
|
490
|
+
await asyncio.sleep(0.1)
|
|
491
|
+
return False
|
|
492
|
+
|
|
493
|
+
|
|
494
|
+
@dataclass
|
|
495
|
+
class SimulationStep:
|
|
496
|
+
"""A step in a simulation script."""
|
|
497
|
+
action: str # "submit", "cancel", "retry", "navigate", "focus", "wait", "approve", "deny"
|
|
498
|
+
args: Dict[str, Any] = field(default_factory=dict)
|
|
499
|
+
expected: Optional[Dict[str, Any]] = None # For assertion mode
|
|
500
|
+
|
|
501
|
+
|
|
502
|
+
class SimulationRunner:
|
|
503
|
+
"""
|
|
504
|
+
Runs simulation scripts for headless TUI testing.
|
|
505
|
+
|
|
506
|
+
Script format (YAML):
|
|
507
|
+
```yaml
|
|
508
|
+
session_id: test-session
|
|
509
|
+
model: gpt-4o-mini
|
|
510
|
+
steps:
|
|
511
|
+
- action: submit
|
|
512
|
+
args:
|
|
513
|
+
content: "Hello, world!"
|
|
514
|
+
- action: wait
|
|
515
|
+
args:
|
|
516
|
+
condition: idle
|
|
517
|
+
timeout: 30
|
|
518
|
+
- action: navigate
|
|
519
|
+
args:
|
|
520
|
+
screen: queue
|
|
521
|
+
- action: cancel
|
|
522
|
+
args:
|
|
523
|
+
run_id: current
|
|
524
|
+
```
|
|
525
|
+
"""
|
|
526
|
+
|
|
527
|
+
def __init__(
|
|
528
|
+
self,
|
|
529
|
+
orchestrator: TuiOrchestrator,
|
|
530
|
+
assert_mode: bool = False,
|
|
531
|
+
):
|
|
532
|
+
self.orchestrator = orchestrator
|
|
533
|
+
self.assert_mode = assert_mode
|
|
534
|
+
self.assertions_passed = 0
|
|
535
|
+
self.assertions_failed = 0
|
|
536
|
+
self.errors: List[str] = []
|
|
537
|
+
|
|
538
|
+
async def run_script(self, script: Dict[str, Any]) -> bool:
|
|
539
|
+
"""Run a simulation script."""
|
|
540
|
+
# Initialize session
|
|
541
|
+
session_id = script.get("session_id")
|
|
542
|
+
await self.orchestrator.start(session_id=session_id)
|
|
543
|
+
|
|
544
|
+
if "model" in script:
|
|
545
|
+
self.orchestrator.set_model(script["model"])
|
|
546
|
+
|
|
547
|
+
# Run steps
|
|
548
|
+
steps = script.get("steps", [])
|
|
549
|
+
for i, step_data in enumerate(steps):
|
|
550
|
+
step = SimulationStep(
|
|
551
|
+
action=step_data.get("action", ""),
|
|
552
|
+
args=step_data.get("args", {}),
|
|
553
|
+
expected=step_data.get("expected"),
|
|
554
|
+
)
|
|
555
|
+
|
|
556
|
+
try:
|
|
557
|
+
await self._run_step(step, i)
|
|
558
|
+
except Exception as e:
|
|
559
|
+
self.errors.append(f"Step {i} ({step.action}): {e}")
|
|
560
|
+
if self.assert_mode:
|
|
561
|
+
break
|
|
562
|
+
|
|
563
|
+
await self.orchestrator.stop()
|
|
564
|
+
|
|
565
|
+
return len(self.errors) == 0
|
|
566
|
+
|
|
567
|
+
async def _run_step(self, step: SimulationStep, index: int) -> None:
|
|
568
|
+
"""Run a single simulation step."""
|
|
569
|
+
action = step.action.lower()
|
|
570
|
+
args = step.args
|
|
571
|
+
|
|
572
|
+
if action == "submit":
|
|
573
|
+
content = args.get("content", "")
|
|
574
|
+
agent = args.get("agent", "Assistant")
|
|
575
|
+
await self.orchestrator.submit_message(content, agent)
|
|
576
|
+
|
|
577
|
+
elif action == "cancel":
|
|
578
|
+
run_id = args.get("run_id")
|
|
579
|
+
if run_id == "current":
|
|
580
|
+
run_id = None
|
|
581
|
+
await self.orchestrator.cancel_run(run_id)
|
|
582
|
+
|
|
583
|
+
elif action == "retry":
|
|
584
|
+
run_id = args.get("run_id", "")
|
|
585
|
+
await self.orchestrator.retry_run(run_id)
|
|
586
|
+
|
|
587
|
+
elif action == "navigate":
|
|
588
|
+
screen = args.get("screen", "main")
|
|
589
|
+
self.orchestrator.navigate_screen(screen)
|
|
590
|
+
|
|
591
|
+
elif action == "focus":
|
|
592
|
+
widget = args.get("widget", "composer")
|
|
593
|
+
self.orchestrator.set_focus(widget)
|
|
594
|
+
|
|
595
|
+
elif action == "wait":
|
|
596
|
+
condition = args.get("condition", "idle")
|
|
597
|
+
timeout = args.get("timeout", 30.0)
|
|
598
|
+
|
|
599
|
+
if condition == "idle":
|
|
600
|
+
success = await self.orchestrator.wait_for_idle(timeout)
|
|
601
|
+
elif condition == "run":
|
|
602
|
+
run_id = args.get("run_id", "")
|
|
603
|
+
success = await self.orchestrator.wait_for_run(run_id, timeout)
|
|
604
|
+
else:
|
|
605
|
+
await asyncio.sleep(timeout)
|
|
606
|
+
success = True
|
|
607
|
+
|
|
608
|
+
if not success and self.assert_mode:
|
|
609
|
+
raise TimeoutError(f"Wait condition '{condition}' timed out")
|
|
610
|
+
|
|
611
|
+
elif action == "model":
|
|
612
|
+
model = args.get("model", "gpt-4o-mini")
|
|
613
|
+
self.orchestrator.set_model(model)
|
|
614
|
+
|
|
615
|
+
elif action == "snapshot":
|
|
616
|
+
snapshot = self.orchestrator.render_snapshot()
|
|
617
|
+
print(snapshot)
|
|
618
|
+
|
|
619
|
+
elif action == "sleep":
|
|
620
|
+
duration = args.get("seconds", 1.0)
|
|
621
|
+
await asyncio.sleep(duration)
|
|
622
|
+
|
|
623
|
+
else:
|
|
624
|
+
raise ValueError(f"Unknown action: {action}")
|
|
625
|
+
|
|
626
|
+
# Check assertions
|
|
627
|
+
if self.assert_mode and step.expected:
|
|
628
|
+
self._check_assertions(step.expected, index)
|
|
629
|
+
|
|
630
|
+
def _check_assertions(self, expected: Dict[str, Any], step_index: int) -> None:
|
|
631
|
+
"""Check assertions against current state."""
|
|
632
|
+
snapshot = self.orchestrator.get_snapshot()
|
|
633
|
+
|
|
634
|
+
for key, expected_value in expected.items():
|
|
635
|
+
actual_value = snapshot.get(key)
|
|
636
|
+
|
|
637
|
+
if actual_value != expected_value:
|
|
638
|
+
self.assertions_failed += 1
|
|
639
|
+
self.errors.append(
|
|
640
|
+
f"Step {step_index}: Expected {key}={expected_value}, got {actual_value}"
|
|
641
|
+
)
|
|
642
|
+
else:
|
|
643
|
+
self.assertions_passed += 1
|
|
644
|
+
|
|
645
|
+
def get_summary(self) -> Dict[str, Any]:
|
|
646
|
+
"""Get simulation summary."""
|
|
647
|
+
return {
|
|
648
|
+
"assertions_passed": self.assertions_passed,
|
|
649
|
+
"assertions_failed": self.assertions_failed,
|
|
650
|
+
"errors": self.errors,
|
|
651
|
+
"success": len(self.errors) == 0,
|
|
652
|
+
}
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
"""
|
|
2
|
+
TUI Screens for PraisonAI.
|
|
3
|
+
|
|
4
|
+
Screen definitions for different views in the TUI application.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from typing import TYPE_CHECKING
|
|
8
|
+
|
|
9
|
+
if TYPE_CHECKING:
|
|
10
|
+
from .main import MainScreen
|
|
11
|
+
from .queue import QueueScreen
|
|
12
|
+
from .settings import SettingsScreen
|
|
13
|
+
from .session import SessionScreen
|
|
14
|
+
|
|
15
|
+
_lazy_cache = {}
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def __getattr__(name: str):
|
|
19
|
+
"""Lazy load screens."""
|
|
20
|
+
global _lazy_cache
|
|
21
|
+
|
|
22
|
+
if name in _lazy_cache:
|
|
23
|
+
return _lazy_cache[name]
|
|
24
|
+
|
|
25
|
+
if name == "MainScreen":
|
|
26
|
+
from .main import MainScreen
|
|
27
|
+
_lazy_cache[name] = MainScreen
|
|
28
|
+
return MainScreen
|
|
29
|
+
elif name == "QueueScreen":
|
|
30
|
+
from .queue import QueueScreen
|
|
31
|
+
_lazy_cache[name] = QueueScreen
|
|
32
|
+
return QueueScreen
|
|
33
|
+
elif name == "SettingsScreen":
|
|
34
|
+
from .settings import SettingsScreen
|
|
35
|
+
_lazy_cache[name] = SettingsScreen
|
|
36
|
+
return SettingsScreen
|
|
37
|
+
elif name == "SessionScreen":
|
|
38
|
+
from .session import SessionScreen
|
|
39
|
+
_lazy_cache[name] = SessionScreen
|
|
40
|
+
return SessionScreen
|
|
41
|
+
|
|
42
|
+
raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
__all__ = [
|
|
46
|
+
"MainScreen",
|
|
47
|
+
"QueueScreen",
|
|
48
|
+
"SettingsScreen",
|
|
49
|
+
"SessionScreen",
|
|
50
|
+
]
|