PraisonAI 3.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- praisonai/__init__.py +54 -0
- praisonai/__main__.py +15 -0
- praisonai/acp/__init__.py +54 -0
- praisonai/acp/config.py +159 -0
- praisonai/acp/server.py +587 -0
- praisonai/acp/session.py +219 -0
- praisonai/adapters/__init__.py +50 -0
- praisonai/adapters/readers.py +395 -0
- praisonai/adapters/rerankers.py +315 -0
- praisonai/adapters/retrievers.py +394 -0
- praisonai/adapters/vector_stores.py +409 -0
- praisonai/agent_scheduler.py +337 -0
- praisonai/agents_generator.py +903 -0
- praisonai/api/call.py +292 -0
- praisonai/auto.py +1197 -0
- praisonai/capabilities/__init__.py +275 -0
- praisonai/capabilities/a2a.py +140 -0
- praisonai/capabilities/assistants.py +283 -0
- praisonai/capabilities/audio.py +320 -0
- praisonai/capabilities/batches.py +469 -0
- praisonai/capabilities/completions.py +336 -0
- praisonai/capabilities/container_files.py +155 -0
- praisonai/capabilities/containers.py +93 -0
- praisonai/capabilities/embeddings.py +158 -0
- praisonai/capabilities/files.py +467 -0
- praisonai/capabilities/fine_tuning.py +293 -0
- praisonai/capabilities/guardrails.py +182 -0
- praisonai/capabilities/images.py +330 -0
- praisonai/capabilities/mcp.py +190 -0
- praisonai/capabilities/messages.py +270 -0
- praisonai/capabilities/moderations.py +154 -0
- praisonai/capabilities/ocr.py +217 -0
- praisonai/capabilities/passthrough.py +204 -0
- praisonai/capabilities/rag.py +207 -0
- praisonai/capabilities/realtime.py +160 -0
- praisonai/capabilities/rerank.py +165 -0
- praisonai/capabilities/responses.py +266 -0
- praisonai/capabilities/search.py +109 -0
- praisonai/capabilities/skills.py +133 -0
- praisonai/capabilities/vector_store_files.py +334 -0
- praisonai/capabilities/vector_stores.py +304 -0
- praisonai/capabilities/videos.py +141 -0
- praisonai/chainlit_ui.py +304 -0
- praisonai/chat/__init__.py +106 -0
- praisonai/chat/app.py +125 -0
- praisonai/cli/__init__.py +26 -0
- praisonai/cli/app.py +213 -0
- praisonai/cli/commands/__init__.py +75 -0
- praisonai/cli/commands/acp.py +70 -0
- praisonai/cli/commands/completion.py +333 -0
- praisonai/cli/commands/config.py +166 -0
- praisonai/cli/commands/debug.py +142 -0
- praisonai/cli/commands/diag.py +55 -0
- praisonai/cli/commands/doctor.py +166 -0
- praisonai/cli/commands/environment.py +179 -0
- praisonai/cli/commands/lsp.py +112 -0
- praisonai/cli/commands/mcp.py +210 -0
- praisonai/cli/commands/profile.py +457 -0
- praisonai/cli/commands/run.py +228 -0
- praisonai/cli/commands/schedule.py +150 -0
- praisonai/cli/commands/serve.py +97 -0
- praisonai/cli/commands/session.py +212 -0
- praisonai/cli/commands/traces.py +145 -0
- praisonai/cli/commands/version.py +101 -0
- praisonai/cli/configuration/__init__.py +18 -0
- praisonai/cli/configuration/loader.py +353 -0
- praisonai/cli/configuration/paths.py +114 -0
- praisonai/cli/configuration/schema.py +164 -0
- praisonai/cli/features/__init__.py +268 -0
- praisonai/cli/features/acp.py +236 -0
- praisonai/cli/features/action_orchestrator.py +546 -0
- praisonai/cli/features/agent_scheduler.py +773 -0
- praisonai/cli/features/agent_tools.py +474 -0
- praisonai/cli/features/agents.py +375 -0
- praisonai/cli/features/at_mentions.py +471 -0
- praisonai/cli/features/auto_memory.py +182 -0
- praisonai/cli/features/autonomy_mode.py +490 -0
- praisonai/cli/features/background.py +356 -0
- praisonai/cli/features/base.py +168 -0
- praisonai/cli/features/capabilities.py +1326 -0
- praisonai/cli/features/checkpoints.py +338 -0
- praisonai/cli/features/code_intelligence.py +652 -0
- praisonai/cli/features/compaction.py +294 -0
- praisonai/cli/features/compare.py +534 -0
- praisonai/cli/features/cost_tracker.py +514 -0
- praisonai/cli/features/debug.py +810 -0
- praisonai/cli/features/deploy.py +517 -0
- praisonai/cli/features/diag.py +289 -0
- praisonai/cli/features/doctor/__init__.py +63 -0
- praisonai/cli/features/doctor/checks/__init__.py +24 -0
- praisonai/cli/features/doctor/checks/acp_checks.py +240 -0
- praisonai/cli/features/doctor/checks/config_checks.py +366 -0
- praisonai/cli/features/doctor/checks/db_checks.py +366 -0
- praisonai/cli/features/doctor/checks/env_checks.py +543 -0
- praisonai/cli/features/doctor/checks/lsp_checks.py +199 -0
- praisonai/cli/features/doctor/checks/mcp_checks.py +349 -0
- praisonai/cli/features/doctor/checks/memory_checks.py +268 -0
- praisonai/cli/features/doctor/checks/network_checks.py +251 -0
- praisonai/cli/features/doctor/checks/obs_checks.py +328 -0
- praisonai/cli/features/doctor/checks/performance_checks.py +235 -0
- praisonai/cli/features/doctor/checks/permissions_checks.py +259 -0
- praisonai/cli/features/doctor/checks/selftest_checks.py +322 -0
- praisonai/cli/features/doctor/checks/serve_checks.py +426 -0
- praisonai/cli/features/doctor/checks/skills_checks.py +231 -0
- praisonai/cli/features/doctor/checks/tools_checks.py +371 -0
- praisonai/cli/features/doctor/engine.py +266 -0
- praisonai/cli/features/doctor/formatters.py +310 -0
- praisonai/cli/features/doctor/handler.py +397 -0
- praisonai/cli/features/doctor/models.py +264 -0
- praisonai/cli/features/doctor/registry.py +239 -0
- praisonai/cli/features/endpoints.py +1019 -0
- praisonai/cli/features/eval.py +560 -0
- praisonai/cli/features/external_agents.py +231 -0
- praisonai/cli/features/fast_context.py +410 -0
- praisonai/cli/features/flow_display.py +566 -0
- praisonai/cli/features/git_integration.py +651 -0
- praisonai/cli/features/guardrail.py +171 -0
- praisonai/cli/features/handoff.py +185 -0
- praisonai/cli/features/hooks.py +583 -0
- praisonai/cli/features/image.py +384 -0
- praisonai/cli/features/interactive_runtime.py +585 -0
- praisonai/cli/features/interactive_tools.py +380 -0
- praisonai/cli/features/interactive_tui.py +603 -0
- praisonai/cli/features/jobs.py +632 -0
- praisonai/cli/features/knowledge.py +531 -0
- praisonai/cli/features/lite.py +244 -0
- praisonai/cli/features/lsp_cli.py +225 -0
- praisonai/cli/features/mcp.py +169 -0
- praisonai/cli/features/message_queue.py +587 -0
- praisonai/cli/features/metrics.py +211 -0
- praisonai/cli/features/n8n.py +673 -0
- praisonai/cli/features/observability.py +293 -0
- praisonai/cli/features/ollama.py +361 -0
- praisonai/cli/features/output_style.py +273 -0
- praisonai/cli/features/package.py +631 -0
- praisonai/cli/features/performance.py +308 -0
- praisonai/cli/features/persistence.py +636 -0
- praisonai/cli/features/profile.py +226 -0
- praisonai/cli/features/profiler/__init__.py +81 -0
- praisonai/cli/features/profiler/core.py +558 -0
- praisonai/cli/features/profiler/optimizations.py +652 -0
- praisonai/cli/features/profiler/suite.py +386 -0
- praisonai/cli/features/profiling.py +350 -0
- praisonai/cli/features/queue/__init__.py +73 -0
- praisonai/cli/features/queue/manager.py +395 -0
- praisonai/cli/features/queue/models.py +286 -0
- praisonai/cli/features/queue/persistence.py +564 -0
- praisonai/cli/features/queue/scheduler.py +484 -0
- praisonai/cli/features/queue/worker.py +372 -0
- praisonai/cli/features/recipe.py +1723 -0
- praisonai/cli/features/recipes.py +449 -0
- praisonai/cli/features/registry.py +229 -0
- praisonai/cli/features/repo_map.py +860 -0
- praisonai/cli/features/router.py +466 -0
- praisonai/cli/features/sandbox_executor.py +515 -0
- praisonai/cli/features/serve.py +829 -0
- praisonai/cli/features/session.py +222 -0
- praisonai/cli/features/skills.py +856 -0
- praisonai/cli/features/slash_commands.py +650 -0
- praisonai/cli/features/telemetry.py +179 -0
- praisonai/cli/features/templates.py +1384 -0
- praisonai/cli/features/thinking.py +305 -0
- praisonai/cli/features/todo.py +334 -0
- praisonai/cli/features/tools.py +680 -0
- praisonai/cli/features/tui/__init__.py +83 -0
- praisonai/cli/features/tui/app.py +580 -0
- praisonai/cli/features/tui/cli.py +566 -0
- praisonai/cli/features/tui/debug.py +511 -0
- praisonai/cli/features/tui/events.py +99 -0
- praisonai/cli/features/tui/mock_provider.py +328 -0
- praisonai/cli/features/tui/orchestrator.py +652 -0
- praisonai/cli/features/tui/screens/__init__.py +50 -0
- praisonai/cli/features/tui/screens/main.py +245 -0
- praisonai/cli/features/tui/screens/queue.py +174 -0
- praisonai/cli/features/tui/screens/session.py +124 -0
- praisonai/cli/features/tui/screens/settings.py +148 -0
- praisonai/cli/features/tui/widgets/__init__.py +56 -0
- praisonai/cli/features/tui/widgets/chat.py +261 -0
- praisonai/cli/features/tui/widgets/composer.py +224 -0
- praisonai/cli/features/tui/widgets/queue_panel.py +200 -0
- praisonai/cli/features/tui/widgets/status.py +167 -0
- praisonai/cli/features/tui/widgets/tool_panel.py +248 -0
- praisonai/cli/features/workflow.py +720 -0
- praisonai/cli/legacy.py +236 -0
- praisonai/cli/main.py +5559 -0
- praisonai/cli/schedule_cli.py +54 -0
- praisonai/cli/state/__init__.py +31 -0
- praisonai/cli/state/identifiers.py +161 -0
- praisonai/cli/state/sessions.py +313 -0
- praisonai/code/__init__.py +93 -0
- praisonai/code/agent_tools.py +344 -0
- praisonai/code/diff/__init__.py +21 -0
- praisonai/code/diff/diff_strategy.py +432 -0
- praisonai/code/tools/__init__.py +27 -0
- praisonai/code/tools/apply_diff.py +221 -0
- praisonai/code/tools/execute_command.py +275 -0
- praisonai/code/tools/list_files.py +274 -0
- praisonai/code/tools/read_file.py +206 -0
- praisonai/code/tools/search_replace.py +248 -0
- praisonai/code/tools/write_file.py +217 -0
- praisonai/code/utils/__init__.py +46 -0
- praisonai/code/utils/file_utils.py +307 -0
- praisonai/code/utils/ignore_utils.py +308 -0
- praisonai/code/utils/text_utils.py +276 -0
- praisonai/db/__init__.py +64 -0
- praisonai/db/adapter.py +531 -0
- praisonai/deploy/__init__.py +62 -0
- praisonai/deploy/api.py +231 -0
- praisonai/deploy/docker.py +454 -0
- praisonai/deploy/doctor.py +367 -0
- praisonai/deploy/main.py +327 -0
- praisonai/deploy/models.py +179 -0
- praisonai/deploy/providers/__init__.py +33 -0
- praisonai/deploy/providers/aws.py +331 -0
- praisonai/deploy/providers/azure.py +358 -0
- praisonai/deploy/providers/base.py +101 -0
- praisonai/deploy/providers/gcp.py +314 -0
- praisonai/deploy/schema.py +208 -0
- praisonai/deploy.py +185 -0
- praisonai/endpoints/__init__.py +53 -0
- praisonai/endpoints/a2u_server.py +410 -0
- praisonai/endpoints/discovery.py +165 -0
- praisonai/endpoints/providers/__init__.py +28 -0
- praisonai/endpoints/providers/a2a.py +253 -0
- praisonai/endpoints/providers/a2u.py +208 -0
- praisonai/endpoints/providers/agents_api.py +171 -0
- praisonai/endpoints/providers/base.py +231 -0
- praisonai/endpoints/providers/mcp.py +263 -0
- praisonai/endpoints/providers/recipe.py +206 -0
- praisonai/endpoints/providers/tools_mcp.py +150 -0
- praisonai/endpoints/registry.py +131 -0
- praisonai/endpoints/server.py +161 -0
- praisonai/inbuilt_tools/__init__.py +24 -0
- praisonai/inbuilt_tools/autogen_tools.py +117 -0
- praisonai/inc/__init__.py +2 -0
- praisonai/inc/config.py +96 -0
- praisonai/inc/models.py +155 -0
- praisonai/integrations/__init__.py +56 -0
- praisonai/integrations/base.py +303 -0
- praisonai/integrations/claude_code.py +270 -0
- praisonai/integrations/codex_cli.py +255 -0
- praisonai/integrations/cursor_cli.py +195 -0
- praisonai/integrations/gemini_cli.py +222 -0
- praisonai/jobs/__init__.py +67 -0
- praisonai/jobs/executor.py +425 -0
- praisonai/jobs/models.py +230 -0
- praisonai/jobs/router.py +314 -0
- praisonai/jobs/server.py +186 -0
- praisonai/jobs/store.py +203 -0
- praisonai/llm/__init__.py +66 -0
- praisonai/llm/registry.py +382 -0
- praisonai/mcp_server/__init__.py +152 -0
- praisonai/mcp_server/adapters/__init__.py +74 -0
- praisonai/mcp_server/adapters/agents.py +128 -0
- praisonai/mcp_server/adapters/capabilities.py +168 -0
- praisonai/mcp_server/adapters/cli_tools.py +568 -0
- praisonai/mcp_server/adapters/extended_capabilities.py +462 -0
- praisonai/mcp_server/adapters/knowledge.py +93 -0
- praisonai/mcp_server/adapters/memory.py +104 -0
- praisonai/mcp_server/adapters/prompts.py +306 -0
- praisonai/mcp_server/adapters/resources.py +124 -0
- praisonai/mcp_server/adapters/tools_bridge.py +280 -0
- praisonai/mcp_server/auth/__init__.py +48 -0
- praisonai/mcp_server/auth/api_key.py +291 -0
- praisonai/mcp_server/auth/oauth.py +460 -0
- praisonai/mcp_server/auth/oidc.py +289 -0
- praisonai/mcp_server/auth/scopes.py +260 -0
- praisonai/mcp_server/cli.py +852 -0
- praisonai/mcp_server/elicitation.py +445 -0
- praisonai/mcp_server/icons.py +302 -0
- praisonai/mcp_server/recipe_adapter.py +573 -0
- praisonai/mcp_server/recipe_cli.py +824 -0
- praisonai/mcp_server/registry.py +703 -0
- praisonai/mcp_server/sampling.py +422 -0
- praisonai/mcp_server/server.py +490 -0
- praisonai/mcp_server/tasks.py +443 -0
- praisonai/mcp_server/transports/__init__.py +18 -0
- praisonai/mcp_server/transports/http_stream.py +376 -0
- praisonai/mcp_server/transports/stdio.py +132 -0
- praisonai/persistence/__init__.py +84 -0
- praisonai/persistence/config.py +238 -0
- praisonai/persistence/conversation/__init__.py +25 -0
- praisonai/persistence/conversation/async_mysql.py +427 -0
- praisonai/persistence/conversation/async_postgres.py +410 -0
- praisonai/persistence/conversation/async_sqlite.py +371 -0
- praisonai/persistence/conversation/base.py +151 -0
- praisonai/persistence/conversation/json_store.py +250 -0
- praisonai/persistence/conversation/mysql.py +387 -0
- praisonai/persistence/conversation/postgres.py +401 -0
- praisonai/persistence/conversation/singlestore.py +240 -0
- praisonai/persistence/conversation/sqlite.py +341 -0
- praisonai/persistence/conversation/supabase.py +203 -0
- praisonai/persistence/conversation/surrealdb.py +287 -0
- praisonai/persistence/factory.py +301 -0
- praisonai/persistence/hooks/__init__.py +18 -0
- praisonai/persistence/hooks/agent_hooks.py +297 -0
- praisonai/persistence/knowledge/__init__.py +26 -0
- praisonai/persistence/knowledge/base.py +144 -0
- praisonai/persistence/knowledge/cassandra.py +232 -0
- praisonai/persistence/knowledge/chroma.py +295 -0
- praisonai/persistence/knowledge/clickhouse.py +242 -0
- praisonai/persistence/knowledge/cosmosdb_vector.py +438 -0
- praisonai/persistence/knowledge/couchbase.py +286 -0
- praisonai/persistence/knowledge/lancedb.py +216 -0
- praisonai/persistence/knowledge/langchain_adapter.py +291 -0
- praisonai/persistence/knowledge/lightrag_adapter.py +212 -0
- praisonai/persistence/knowledge/llamaindex_adapter.py +256 -0
- praisonai/persistence/knowledge/milvus.py +277 -0
- praisonai/persistence/knowledge/mongodb_vector.py +306 -0
- praisonai/persistence/knowledge/pgvector.py +335 -0
- praisonai/persistence/knowledge/pinecone.py +253 -0
- praisonai/persistence/knowledge/qdrant.py +301 -0
- praisonai/persistence/knowledge/redis_vector.py +291 -0
- praisonai/persistence/knowledge/singlestore_vector.py +299 -0
- praisonai/persistence/knowledge/surrealdb_vector.py +309 -0
- praisonai/persistence/knowledge/upstash_vector.py +266 -0
- praisonai/persistence/knowledge/weaviate.py +223 -0
- praisonai/persistence/migrations/__init__.py +10 -0
- praisonai/persistence/migrations/manager.py +251 -0
- praisonai/persistence/orchestrator.py +406 -0
- praisonai/persistence/state/__init__.py +21 -0
- praisonai/persistence/state/async_mongodb.py +200 -0
- praisonai/persistence/state/base.py +107 -0
- praisonai/persistence/state/dynamodb.py +226 -0
- praisonai/persistence/state/firestore.py +175 -0
- praisonai/persistence/state/gcs.py +155 -0
- praisonai/persistence/state/memory.py +245 -0
- praisonai/persistence/state/mongodb.py +158 -0
- praisonai/persistence/state/redis.py +190 -0
- praisonai/persistence/state/upstash.py +144 -0
- praisonai/persistence/tests/__init__.py +3 -0
- praisonai/persistence/tests/test_all_backends.py +633 -0
- praisonai/profiler.py +1214 -0
- praisonai/recipe/__init__.py +134 -0
- praisonai/recipe/bridge.py +278 -0
- praisonai/recipe/core.py +893 -0
- praisonai/recipe/exceptions.py +54 -0
- praisonai/recipe/history.py +402 -0
- praisonai/recipe/models.py +266 -0
- praisonai/recipe/operations.py +440 -0
- praisonai/recipe/policy.py +422 -0
- praisonai/recipe/registry.py +849 -0
- praisonai/recipe/runtime.py +214 -0
- praisonai/recipe/security.py +711 -0
- praisonai/recipe/serve.py +859 -0
- praisonai/recipe/server.py +613 -0
- praisonai/scheduler/__init__.py +45 -0
- praisonai/scheduler/agent_scheduler.py +552 -0
- praisonai/scheduler/base.py +124 -0
- praisonai/scheduler/daemon_manager.py +225 -0
- praisonai/scheduler/state_manager.py +155 -0
- praisonai/scheduler/yaml_loader.py +193 -0
- praisonai/scheduler.py +194 -0
- praisonai/setup/__init__.py +1 -0
- praisonai/setup/build.py +21 -0
- praisonai/setup/post_install.py +23 -0
- praisonai/setup/setup_conda_env.py +25 -0
- praisonai/setup.py +16 -0
- praisonai/templates/__init__.py +116 -0
- praisonai/templates/cache.py +364 -0
- praisonai/templates/dependency_checker.py +358 -0
- praisonai/templates/discovery.py +391 -0
- praisonai/templates/loader.py +564 -0
- praisonai/templates/registry.py +511 -0
- praisonai/templates/resolver.py +206 -0
- praisonai/templates/security.py +327 -0
- praisonai/templates/tool_override.py +498 -0
- praisonai/templates/tools_doctor.py +256 -0
- praisonai/test.py +105 -0
- praisonai/train.py +562 -0
- praisonai/train_vision.py +306 -0
- praisonai/ui/agents.py +824 -0
- praisonai/ui/callbacks.py +57 -0
- praisonai/ui/chainlit_compat.py +246 -0
- praisonai/ui/chat.py +532 -0
- praisonai/ui/code.py +717 -0
- praisonai/ui/colab.py +474 -0
- praisonai/ui/colab_chainlit.py +81 -0
- praisonai/ui/components/aicoder.py +284 -0
- praisonai/ui/context.py +283 -0
- praisonai/ui/database_config.py +56 -0
- praisonai/ui/db.py +294 -0
- praisonai/ui/realtime.py +488 -0
- praisonai/ui/realtimeclient/__init__.py +756 -0
- praisonai/ui/realtimeclient/tools.py +242 -0
- praisonai/ui/sql_alchemy.py +710 -0
- praisonai/upload_vision.py +140 -0
- praisonai/version.py +1 -0
- praisonai-3.0.0.dist-info/METADATA +3493 -0
- praisonai-3.0.0.dist-info/RECORD +393 -0
- praisonai-3.0.0.dist-info/WHEEL +5 -0
- praisonai-3.0.0.dist-info/entry_points.txt +4 -0
- praisonai-3.0.0.dist-info/top_level.txt +1 -0
praisonai/ui/code.py
ADDED
|
@@ -0,0 +1,717 @@
|
|
|
1
|
+
# Standard library imports
|
|
2
|
+
import os
|
|
3
|
+
from datetime import datetime
|
|
4
|
+
import logging
|
|
5
|
+
import json
|
|
6
|
+
import io
|
|
7
|
+
import base64
|
|
8
|
+
import asyncio
|
|
9
|
+
|
|
10
|
+
# Third-party imports
|
|
11
|
+
from dotenv import load_dotenv
|
|
12
|
+
from PIL import Image
|
|
13
|
+
from context import ContextGatherer
|
|
14
|
+
from tavily import TavilyClient
|
|
15
|
+
from crawl4ai import AsyncWebCrawler
|
|
16
|
+
import subprocess
|
|
17
|
+
|
|
18
|
+
# Local application/library imports
|
|
19
|
+
import chainlit as cl
|
|
20
|
+
from chainlit.input_widget import TextInput, Switch
|
|
21
|
+
from chainlit.types import ThreadDict
|
|
22
|
+
import chainlit.data as cl_data
|
|
23
|
+
from db import DatabaseManager
|
|
24
|
+
|
|
25
|
+
# PraisonAI Agents imports
|
|
26
|
+
try:
|
|
27
|
+
from praisonaiagents import Agent
|
|
28
|
+
PRAISONAI_AGENTS_AVAILABLE = True
|
|
29
|
+
except ImportError:
|
|
30
|
+
PRAISONAI_AGENTS_AVAILABLE = False
|
|
31
|
+
# Fallback to litellm for backward compatibility
|
|
32
|
+
from litellm import acompletion
|
|
33
|
+
import litellm
|
|
34
|
+
|
|
35
|
+
# Load environment variables
|
|
36
|
+
load_dotenv()
|
|
37
|
+
|
|
38
|
+
# Set up logging
|
|
39
|
+
logger = logging.getLogger(__name__)
|
|
40
|
+
log_level = os.getenv("LOGLEVEL", "INFO").upper() or "INFO"
|
|
41
|
+
logger.handlers = []
|
|
42
|
+
|
|
43
|
+
# Set up logging to console
|
|
44
|
+
console_handler = logging.StreamHandler()
|
|
45
|
+
console_handler.setLevel(log_level)
|
|
46
|
+
console_formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
|
47
|
+
console_handler.setFormatter(console_formatter)
|
|
48
|
+
logger.addHandler(console_handler)
|
|
49
|
+
|
|
50
|
+
# Set the logging level for the logger
|
|
51
|
+
logger.setLevel(log_level)
|
|
52
|
+
|
|
53
|
+
# Configure litellm for backward compatibility (only if praisonaiagents not available)
|
|
54
|
+
if not PRAISONAI_AGENTS_AVAILABLE:
|
|
55
|
+
import litellm
|
|
56
|
+
litellm.set_verbose = False
|
|
57
|
+
litellm.success_callback = []
|
|
58
|
+
litellm._async_success_callback = []
|
|
59
|
+
litellm.callbacks = []
|
|
60
|
+
litellm.drop_params = True
|
|
61
|
+
litellm.modify_params = True
|
|
62
|
+
litellm.suppress_debug_messages = True
|
|
63
|
+
|
|
64
|
+
# Claude Code Tool Function
|
|
65
|
+
async def claude_code_tool(query: str) -> str:
|
|
66
|
+
"""
|
|
67
|
+
Execute Claude Code CLI commands for file modifications and coding tasks.
|
|
68
|
+
|
|
69
|
+
Args:
|
|
70
|
+
query: The user's request that requires file modifications or coding assistance
|
|
71
|
+
|
|
72
|
+
Returns:
|
|
73
|
+
The output from Claude Code execution
|
|
74
|
+
"""
|
|
75
|
+
try:
|
|
76
|
+
# Check if the current working directory is a git repository
|
|
77
|
+
repo_path = os.environ.get("PRAISONAI_CODE_REPO_PATH", ".")
|
|
78
|
+
|
|
79
|
+
# Try to detect if git is available and if we're in a git repo
|
|
80
|
+
git_available = False
|
|
81
|
+
try:
|
|
82
|
+
subprocess.run(["git", "status"], cwd=repo_path, capture_output=True, check=True)
|
|
83
|
+
git_available = True
|
|
84
|
+
except (subprocess.CalledProcessError, FileNotFoundError):
|
|
85
|
+
git_available = False
|
|
86
|
+
|
|
87
|
+
# Build Claude Code command
|
|
88
|
+
claude_cmd = ["claude", "--dangerously-skip-permissions", "-p", query]
|
|
89
|
+
|
|
90
|
+
# Check if it's a continuation (simple heuristic)
|
|
91
|
+
user_session_context = cl.user_session.get("claude_code_context", False)
|
|
92
|
+
if user_session_context:
|
|
93
|
+
claude_cmd.insert(1, "--continue")
|
|
94
|
+
|
|
95
|
+
# Execute Claude Code command
|
|
96
|
+
result = subprocess.run(
|
|
97
|
+
claude_cmd,
|
|
98
|
+
cwd=repo_path,
|
|
99
|
+
capture_output=True,
|
|
100
|
+
text=True,
|
|
101
|
+
timeout=300 # 5 minutes timeout
|
|
102
|
+
)
|
|
103
|
+
|
|
104
|
+
# Set context for future requests
|
|
105
|
+
cl.user_session.set("claude_code_context", True)
|
|
106
|
+
|
|
107
|
+
output = result.stdout
|
|
108
|
+
if result.stderr:
|
|
109
|
+
output += f"\n\nErrors:\n{result.stderr}"
|
|
110
|
+
|
|
111
|
+
# If git is available and changes were made, try to create a branch and PR
|
|
112
|
+
if git_available and result.returncode == 0:
|
|
113
|
+
try:
|
|
114
|
+
# Check for changes
|
|
115
|
+
git_status = subprocess.run(
|
|
116
|
+
["git", "status", "--porcelain"],
|
|
117
|
+
cwd=repo_path,
|
|
118
|
+
capture_output=True,
|
|
119
|
+
text=True,
|
|
120
|
+
check=True
|
|
121
|
+
)
|
|
122
|
+
|
|
123
|
+
if git_status.stdout.strip():
|
|
124
|
+
# Create a branch for the changes
|
|
125
|
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
126
|
+
branch_name = f"claude-code-{timestamp}"
|
|
127
|
+
|
|
128
|
+
# Create and switch to new branch
|
|
129
|
+
subprocess.run(["git", "checkout", "-b", branch_name], cwd=repo_path, check=True)
|
|
130
|
+
|
|
131
|
+
# Add and commit changes
|
|
132
|
+
subprocess.run(["git", "add", "."], cwd=repo_path, check=True)
|
|
133
|
+
commit_message = f"Claude Code changes: {query[:50]}..."
|
|
134
|
+
subprocess.run(
|
|
135
|
+
["git", "commit", "-m", commit_message],
|
|
136
|
+
cwd=repo_path,
|
|
137
|
+
check=True
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
# Push to remote (if configured)
|
|
141
|
+
try:
|
|
142
|
+
subprocess.run(
|
|
143
|
+
["git", "push", "-u", "origin", branch_name],
|
|
144
|
+
cwd=repo_path,
|
|
145
|
+
check=True
|
|
146
|
+
)
|
|
147
|
+
|
|
148
|
+
# Generate PR URL (assuming GitHub)
|
|
149
|
+
remote_url = subprocess.run(
|
|
150
|
+
["git", "config", "--get", "remote.origin.url"],
|
|
151
|
+
cwd=repo_path,
|
|
152
|
+
capture_output=True,
|
|
153
|
+
text=True
|
|
154
|
+
)
|
|
155
|
+
|
|
156
|
+
if remote_url.returncode == 0:
|
|
157
|
+
repo_url = remote_url.stdout.strip()
|
|
158
|
+
if repo_url.endswith(".git"):
|
|
159
|
+
repo_url = repo_url[:-4]
|
|
160
|
+
if "github.com" in repo_url:
|
|
161
|
+
pr_url = f"{repo_url}/compare/main...{branch_name}?quick_pull=1"
|
|
162
|
+
output += f"\n\n📋 **Pull Request Created:**\n{pr_url}"
|
|
163
|
+
|
|
164
|
+
except subprocess.CalledProcessError:
|
|
165
|
+
output += f"\n\n🌲 **Branch created:** {branch_name} (push manually if needed)"
|
|
166
|
+
|
|
167
|
+
except subprocess.CalledProcessError as e:
|
|
168
|
+
output += f"\n\nGit operations failed: {e}"
|
|
169
|
+
|
|
170
|
+
return output
|
|
171
|
+
|
|
172
|
+
except subprocess.TimeoutExpired:
|
|
173
|
+
return "Claude Code execution timed out after 5 minutes."
|
|
174
|
+
except subprocess.CalledProcessError as e:
|
|
175
|
+
return f"Claude Code execution failed: {e}\nStdout: {e.stdout}\nStderr: {e.stderr}"
|
|
176
|
+
except Exception as e:
|
|
177
|
+
return f"Error executing Claude Code: {str(e)}"
|
|
178
|
+
|
|
179
|
+
CHAINLIT_AUTH_SECRET = os.getenv("CHAINLIT_AUTH_SECRET")
|
|
180
|
+
|
|
181
|
+
if not CHAINLIT_AUTH_SECRET:
|
|
182
|
+
os.environ["CHAINLIT_AUTH_SECRET"] = "p8BPhQChpg@J>jBz$wGxqLX2V>yTVgP*7Ky9H$aV:axW~ANNX-7_T:o@lnyCBu^U"
|
|
183
|
+
CHAINLIT_AUTH_SECRET = os.getenv("CHAINLIT_AUTH_SECRET")
|
|
184
|
+
|
|
185
|
+
now = datetime.now()
|
|
186
|
+
create_step_counter = 0
|
|
187
|
+
|
|
188
|
+
# Initialize database
|
|
189
|
+
db_manager = DatabaseManager()
|
|
190
|
+
db_manager.initialize()
|
|
191
|
+
|
|
192
|
+
deleted_thread_ids = [] # type: List[str]
|
|
193
|
+
|
|
194
|
+
def _build_completion_params(model_name, **override_params):
|
|
195
|
+
"""Build parameters for litellm completion calls with proper model handling"""
|
|
196
|
+
params = {
|
|
197
|
+
"model": model_name,
|
|
198
|
+
}
|
|
199
|
+
|
|
200
|
+
# Override with any provided parameters
|
|
201
|
+
params.update(override_params)
|
|
202
|
+
|
|
203
|
+
return params
|
|
204
|
+
|
|
205
|
+
def save_setting(key: str, value: str):
|
|
206
|
+
"""Saves a setting to the database.
|
|
207
|
+
|
|
208
|
+
Args:
|
|
209
|
+
key: The setting key.
|
|
210
|
+
value: The setting value.
|
|
211
|
+
"""
|
|
212
|
+
asyncio.run(db_manager.save_setting(key, value))
|
|
213
|
+
|
|
214
|
+
def load_setting(key: str) -> str:
|
|
215
|
+
"""Loads a setting from the database.
|
|
216
|
+
|
|
217
|
+
Args:
|
|
218
|
+
key: The setting key.
|
|
219
|
+
|
|
220
|
+
Returns:
|
|
221
|
+
The setting value, or None if the key is not found.
|
|
222
|
+
"""
|
|
223
|
+
return asyncio.run(db_manager.load_setting(key))
|
|
224
|
+
|
|
225
|
+
cl_data._data_layer = db_manager
|
|
226
|
+
|
|
227
|
+
@cl.on_chat_start
|
|
228
|
+
async def start():
|
|
229
|
+
model_name = load_setting("model_name")
|
|
230
|
+
|
|
231
|
+
if (model_name):
|
|
232
|
+
cl.user_session.set("model_name", model_name)
|
|
233
|
+
else:
|
|
234
|
+
# If no setting found, use default or environment variable
|
|
235
|
+
model_name = os.getenv("MODEL_NAME", "gpt-5-nano")
|
|
236
|
+
cl.user_session.set("model_name", model_name)
|
|
237
|
+
logger.debug(f"Model name: {model_name}")
|
|
238
|
+
|
|
239
|
+
# Load Claude Code setting (check CLI flag first, then database setting)
|
|
240
|
+
claude_code_enabled = os.getenv("PRAISONAI_CLAUDECODE_ENABLED", "false").lower() == "true"
|
|
241
|
+
if not claude_code_enabled:
|
|
242
|
+
claude_code_enabled = (load_setting("claude_code_enabled") or "false").lower() == "true"
|
|
243
|
+
|
|
244
|
+
settings = cl.ChatSettings(
|
|
245
|
+
[
|
|
246
|
+
TextInput(
|
|
247
|
+
id="model_name",
|
|
248
|
+
label="Enter the Model Name",
|
|
249
|
+
placeholder="e.g., gpt-5-nano",
|
|
250
|
+
initial=model_name
|
|
251
|
+
),
|
|
252
|
+
Switch(
|
|
253
|
+
id="claude_code_enabled",
|
|
254
|
+
label="Enable Claude Code (file modifications & coding)",
|
|
255
|
+
initial=claude_code_enabled
|
|
256
|
+
)
|
|
257
|
+
]
|
|
258
|
+
)
|
|
259
|
+
cl.user_session.set("settings", settings)
|
|
260
|
+
await settings.send()
|
|
261
|
+
repo_path_to_use = os.environ.get("PRAISONAI_CODE_REPO_PATH", ".")
|
|
262
|
+
gatherer = ContextGatherer(directory=repo_path_to_use)
|
|
263
|
+
context, token_count, context_tree = gatherer.run()
|
|
264
|
+
msg = cl.Message(content="""Token Count: {token_count},
|
|
265
|
+
Files include: \n```bash\n{context_tree}\n"""
|
|
266
|
+
.format(token_count=token_count, context_tree=context_tree))
|
|
267
|
+
await msg.send()
|
|
268
|
+
|
|
269
|
+
@cl.on_settings_update
|
|
270
|
+
async def setup_agent(settings):
|
|
271
|
+
logger.debug(settings)
|
|
272
|
+
cl.user_session.set("settings", settings)
|
|
273
|
+
model_name = settings["model_name"]
|
|
274
|
+
claude_code_enabled = settings.get("claude_code_enabled", False)
|
|
275
|
+
cl.user_session.set("model_name", model_name)
|
|
276
|
+
cl.user_session.set("claude_code_enabled", claude_code_enabled)
|
|
277
|
+
|
|
278
|
+
# Save in settings table
|
|
279
|
+
save_setting("model_name", model_name)
|
|
280
|
+
save_setting("claude_code_enabled", str(claude_code_enabled).lower())
|
|
281
|
+
|
|
282
|
+
# Save in thread metadata
|
|
283
|
+
thread_id = cl.user_session.get("thread_id")
|
|
284
|
+
if thread_id:
|
|
285
|
+
thread = await cl_data._data_layer.get_thread(thread_id)
|
|
286
|
+
if thread:
|
|
287
|
+
metadata = thread.get("metadata", {})
|
|
288
|
+
if isinstance(metadata, str):
|
|
289
|
+
try:
|
|
290
|
+
metadata = json.loads(metadata)
|
|
291
|
+
except json.JSONDecodeError:
|
|
292
|
+
metadata = {}
|
|
293
|
+
|
|
294
|
+
metadata["model_name"] = model_name
|
|
295
|
+
metadata["claude_code_enabled"] = claude_code_enabled
|
|
296
|
+
|
|
297
|
+
# Always store metadata as a dictionary
|
|
298
|
+
await cl_data._data_layer.update_thread(thread_id, metadata=metadata)
|
|
299
|
+
|
|
300
|
+
# Update the user session with the new metadata
|
|
301
|
+
cl.user_session.set("metadata", metadata)
|
|
302
|
+
|
|
303
|
+
# Set Tavily API key
|
|
304
|
+
tavily_api_key = os.getenv("TAVILY_API_KEY")
|
|
305
|
+
tavily_client = TavilyClient(api_key=tavily_api_key) if tavily_api_key else None
|
|
306
|
+
|
|
307
|
+
# Function to call Tavily Search API and crawl the results
|
|
308
|
+
async def tavily_web_search(query):
|
|
309
|
+
if not tavily_client:
|
|
310
|
+
return json.dumps({
|
|
311
|
+
"query": query,
|
|
312
|
+
"error": "Tavily API key is not set. Web search is unavailable."
|
|
313
|
+
})
|
|
314
|
+
|
|
315
|
+
response = tavily_client.search(query)
|
|
316
|
+
logger.debug(f"Tavily search response: {response}")
|
|
317
|
+
|
|
318
|
+
# Create an instance of AsyncAsyncWebCrawler
|
|
319
|
+
async with AsyncWebCrawler() as crawler:
|
|
320
|
+
# Prepare the results
|
|
321
|
+
results = []
|
|
322
|
+
for result in response.get('results', []):
|
|
323
|
+
url = result.get('url')
|
|
324
|
+
if url:
|
|
325
|
+
try:
|
|
326
|
+
# Run the crawler asynchronously on each URL
|
|
327
|
+
crawl_result = await crawler.arun(url=url)
|
|
328
|
+
results.append({
|
|
329
|
+
"content": result.get('content'),
|
|
330
|
+
"url": url,
|
|
331
|
+
"full_content": crawl_result.markdown
|
|
332
|
+
})
|
|
333
|
+
except Exception as e:
|
|
334
|
+
logger.error(f"Error crawling {url}: {str(e)}")
|
|
335
|
+
results.append({
|
|
336
|
+
"content": result.get('content'),
|
|
337
|
+
"url": url,
|
|
338
|
+
"full_content": "Error: Unable to crawl this URL"
|
|
339
|
+
})
|
|
340
|
+
|
|
341
|
+
return json.dumps({
|
|
342
|
+
"query": query,
|
|
343
|
+
"results": results
|
|
344
|
+
})
|
|
345
|
+
|
|
346
|
+
# Define the tool for function calling
|
|
347
|
+
tools = [{
|
|
348
|
+
"type": "function",
|
|
349
|
+
"function": {
|
|
350
|
+
"name": "tavily_web_search",
|
|
351
|
+
"description": "Search the web using Tavily API and crawl the resulting URLs",
|
|
352
|
+
"parameters": {
|
|
353
|
+
"type": "object",
|
|
354
|
+
"properties": {
|
|
355
|
+
"query": {"type": "string", "description": "Search query"}
|
|
356
|
+
},
|
|
357
|
+
"required": ["query"]
|
|
358
|
+
}
|
|
359
|
+
}
|
|
360
|
+
}] if tavily_api_key else []
|
|
361
|
+
|
|
362
|
+
@cl.on_message
|
|
363
|
+
async def main(message: cl.Message):
|
|
364
|
+
model_name = load_setting("model_name") or os.getenv("MODEL_NAME") or "gpt-5-nano"
|
|
365
|
+
claude_code_enabled = cl.user_session.get("claude_code_enabled", False)
|
|
366
|
+
message_history = cl.user_session.get("message_history", [])
|
|
367
|
+
repo_path_to_use = os.environ.get("PRAISONAI_CODE_REPO_PATH", ".")
|
|
368
|
+
gatherer = ContextGatherer(directory=repo_path_to_use)
|
|
369
|
+
context, token_count, context_tree = gatherer.run()
|
|
370
|
+
now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
|
371
|
+
|
|
372
|
+
# Check if an image was uploaded with this message
|
|
373
|
+
image = None
|
|
374
|
+
if message.elements and isinstance(message.elements[0], cl.Image):
|
|
375
|
+
image_element = message.elements[0]
|
|
376
|
+
try:
|
|
377
|
+
# Open the image and keep it in memory
|
|
378
|
+
image = Image.open(image_element.path)
|
|
379
|
+
image.load() # This ensures the file is fully loaded into memory
|
|
380
|
+
cl.user_session.set("image", image)
|
|
381
|
+
except Exception as e:
|
|
382
|
+
logger.error(f"Error processing image: {str(e)}")
|
|
383
|
+
await cl.Message(content="There was an error processing the uploaded image. Please try again.").send()
|
|
384
|
+
return
|
|
385
|
+
|
|
386
|
+
# Prepare user message
|
|
387
|
+
user_message = f"""
|
|
388
|
+
Answer the question and use tools if needed:\n{message.content}.\n\n
|
|
389
|
+
Current Date and Time: {now}
|
|
390
|
+
|
|
391
|
+
Context:
|
|
392
|
+
{context}
|
|
393
|
+
"""
|
|
394
|
+
|
|
395
|
+
if image:
|
|
396
|
+
user_message = f"Image uploaded. {user_message}"
|
|
397
|
+
|
|
398
|
+
message_history.append({"role": "user", "content": user_message})
|
|
399
|
+
|
|
400
|
+
msg = cl.Message(content="")
|
|
401
|
+
|
|
402
|
+
# Use PraisonAI Agents if available, otherwise fallback to litellm
|
|
403
|
+
if PRAISONAI_AGENTS_AVAILABLE:
|
|
404
|
+
await handle_with_praisonai_agents(message, user_message, model_name, claude_code_enabled, msg, image)
|
|
405
|
+
else:
|
|
406
|
+
await handle_with_litellm(user_message, model_name, message_history, msg, image)
|
|
407
|
+
|
|
408
|
+
async def handle_with_praisonai_agents(message, user_message, model_name, claude_code_enabled, msg, image):
|
|
409
|
+
"""Handle message using PraisonAI Agents framework with optional Claude Code tool"""
|
|
410
|
+
try:
|
|
411
|
+
# Prepare tools list
|
|
412
|
+
available_tools = []
|
|
413
|
+
|
|
414
|
+
# Add Tavily search tool if API key available
|
|
415
|
+
if tavily_api_key:
|
|
416
|
+
available_tools.append(tavily_web_search)
|
|
417
|
+
|
|
418
|
+
# Add Claude Code tool if enabled
|
|
419
|
+
if claude_code_enabled:
|
|
420
|
+
available_tools.append(claude_code_tool)
|
|
421
|
+
|
|
422
|
+
# Create agent instructions
|
|
423
|
+
instructions = """You are a helpful AI assistant. Use the available tools when needed to provide comprehensive responses.
|
|
424
|
+
|
|
425
|
+
If Claude Code tool is available and the user's request involves:
|
|
426
|
+
- File modifications, code changes, or implementation tasks
|
|
427
|
+
- Creating, editing, or debugging code
|
|
428
|
+
- Project setup or development tasks
|
|
429
|
+
- Git operations or version control
|
|
430
|
+
|
|
431
|
+
Then use the Claude Code tool to handle those requests.
|
|
432
|
+
|
|
433
|
+
For informational questions, explanations, or general conversations, respond normally without using Claude Code."""
|
|
434
|
+
|
|
435
|
+
# Create agent with streaming enabled
|
|
436
|
+
agent = Agent(
|
|
437
|
+
name="PraisonAI Assistant",
|
|
438
|
+
instructions=instructions,
|
|
439
|
+
llm=model_name,
|
|
440
|
+
tools=available_tools if available_tools else None,
|
|
441
|
+
stream=True
|
|
442
|
+
)
|
|
443
|
+
|
|
444
|
+
# Execute agent with streaming
|
|
445
|
+
full_response = ""
|
|
446
|
+
msg_sent = False
|
|
447
|
+
|
|
448
|
+
try:
|
|
449
|
+
# Use async chat for proper streaming
|
|
450
|
+
result = await agent.achat(user_message)
|
|
451
|
+
|
|
452
|
+
# Get the response text
|
|
453
|
+
if hasattr(result, 'raw'):
|
|
454
|
+
response_text = result.raw
|
|
455
|
+
else:
|
|
456
|
+
response_text = str(result)
|
|
457
|
+
|
|
458
|
+
# Send message on first content
|
|
459
|
+
if not msg_sent:
|
|
460
|
+
await msg.send()
|
|
461
|
+
msg_sent = True
|
|
462
|
+
|
|
463
|
+
# Stream in word chunks for better UX (not char-by-char which is too slow)
|
|
464
|
+
words = response_text.split(' ')
|
|
465
|
+
for i, word in enumerate(words):
|
|
466
|
+
token = word + (' ' if i < len(words) - 1 else '')
|
|
467
|
+
await msg.stream_token(token)
|
|
468
|
+
full_response += token
|
|
469
|
+
|
|
470
|
+
except Exception as e:
|
|
471
|
+
error_response = f"Error executing agent: {str(e)}"
|
|
472
|
+
if not msg_sent:
|
|
473
|
+
await msg.send()
|
|
474
|
+
await msg.stream_token(error_response)
|
|
475
|
+
full_response = error_response
|
|
476
|
+
|
|
477
|
+
msg.content = full_response
|
|
478
|
+
await msg.update()
|
|
479
|
+
|
|
480
|
+
except Exception as e:
|
|
481
|
+
error_msg = f"Failed to use PraisonAI Agents: {str(e)}"
|
|
482
|
+
logger.error(error_msg)
|
|
483
|
+
await msg.send()
|
|
484
|
+
await msg.stream_token(error_msg)
|
|
485
|
+
msg.content = error_msg
|
|
486
|
+
await msg.update()
|
|
487
|
+
|
|
488
|
+
async def handle_with_litellm(user_message, model_name, message_history, msg, image):
|
|
489
|
+
"""Fallback handler using litellm for backward compatibility"""
|
|
490
|
+
# Prepare the completion parameters using the helper function
|
|
491
|
+
completion_params = _build_completion_params(
|
|
492
|
+
model_name,
|
|
493
|
+
messages=message_history,
|
|
494
|
+
stream=True,
|
|
495
|
+
)
|
|
496
|
+
|
|
497
|
+
# If an image is uploaded, include it in the message
|
|
498
|
+
if image:
|
|
499
|
+
buffered = io.BytesIO()
|
|
500
|
+
image.save(buffered, format="PNG")
|
|
501
|
+
img_str = base64.b64encode(buffered.getvalue()).decode()
|
|
502
|
+
|
|
503
|
+
completion_params["messages"][-1] = {
|
|
504
|
+
"role": "user",
|
|
505
|
+
"content": [
|
|
506
|
+
{"type": "text", "text": user_message},
|
|
507
|
+
{"type": "image_url", "image_url": {"url": f"data:image/png;base64,{img_str}"}}
|
|
508
|
+
]
|
|
509
|
+
}
|
|
510
|
+
# Use a vision-capable model when an image is present
|
|
511
|
+
completion_params["model"] = "gpt-4-vision-preview"
|
|
512
|
+
|
|
513
|
+
# Only add tools and tool_choice if Tavily API key is available and no image is uploaded
|
|
514
|
+
if tavily_api_key:
|
|
515
|
+
completion_params["tools"] = tools
|
|
516
|
+
completion_params["tool_choice"] = "auto"
|
|
517
|
+
|
|
518
|
+
response = await acompletion(**completion_params)
|
|
519
|
+
logger.debug(f"LLM response: {response}")
|
|
520
|
+
|
|
521
|
+
full_response = ""
|
|
522
|
+
tool_calls = []
|
|
523
|
+
current_tool_call = None
|
|
524
|
+
msg_sent = False
|
|
525
|
+
|
|
526
|
+
async for part in response:
|
|
527
|
+
logger.debug(f"LLM part: {part}")
|
|
528
|
+
if 'choices' in part and len(part['choices']) > 0:
|
|
529
|
+
delta = part['choices'][0].get('delta', {})
|
|
530
|
+
|
|
531
|
+
if 'content' in delta and delta['content'] is not None:
|
|
532
|
+
token = delta['content']
|
|
533
|
+
# Send message on first token
|
|
534
|
+
if not msg_sent:
|
|
535
|
+
await msg.send()
|
|
536
|
+
msg_sent = True
|
|
537
|
+
await msg.stream_token(token)
|
|
538
|
+
full_response += token
|
|
539
|
+
|
|
540
|
+
if tavily_api_key and 'tool_calls' in delta and delta['tool_calls'] is not None:
|
|
541
|
+
for tool_call in delta['tool_calls']:
|
|
542
|
+
if current_tool_call is None or tool_call.index != current_tool_call['index']:
|
|
543
|
+
if current_tool_call:
|
|
544
|
+
tool_calls.append(current_tool_call)
|
|
545
|
+
current_tool_call = {
|
|
546
|
+
'id': tool_call.id,
|
|
547
|
+
'type': tool_call.type,
|
|
548
|
+
'index': tool_call.index,
|
|
549
|
+
'function': {
|
|
550
|
+
'name': tool_call.function.name if tool_call.function else None,
|
|
551
|
+
'arguments': ''
|
|
552
|
+
}
|
|
553
|
+
}
|
|
554
|
+
if tool_call.function:
|
|
555
|
+
if tool_call.function.name:
|
|
556
|
+
current_tool_call['function']['name'] = tool_call.function.name
|
|
557
|
+
if tool_call.function.arguments:
|
|
558
|
+
current_tool_call['function']['arguments'] += tool_call.function.arguments
|
|
559
|
+
|
|
560
|
+
if current_tool_call:
|
|
561
|
+
tool_calls.append(current_tool_call)
|
|
562
|
+
|
|
563
|
+
# Ensure message is sent even if no content (tool calls only)
|
|
564
|
+
if not msg_sent:
|
|
565
|
+
await msg.send()
|
|
566
|
+
|
|
567
|
+
logger.debug(f"Full response: {full_response}")
|
|
568
|
+
logger.debug(f"Tool calls: {tool_calls}")
|
|
569
|
+
message_history.append({"role": "assistant", "content": full_response})
|
|
570
|
+
logger.debug(f"Message history: {message_history}")
|
|
571
|
+
cl.user_session.set("message_history", message_history)
|
|
572
|
+
await msg.update()
|
|
573
|
+
|
|
574
|
+
if tavily_api_key and tool_calls:
|
|
575
|
+
available_functions = {
|
|
576
|
+
"tavily_web_search": tavily_web_search,
|
|
577
|
+
}
|
|
578
|
+
messages = message_history + [{"role": "assistant", "content": None, "function_call": {
|
|
579
|
+
"name": tool_calls[0]['function']['name'],
|
|
580
|
+
"arguments": tool_calls[0]['function']['arguments']
|
|
581
|
+
}}]
|
|
582
|
+
|
|
583
|
+
for tool_call in tool_calls:
|
|
584
|
+
function_name = tool_call['function']['name']
|
|
585
|
+
if function_name in available_functions:
|
|
586
|
+
function_to_call = available_functions[function_name]
|
|
587
|
+
function_args = tool_call['function']['arguments']
|
|
588
|
+
if function_args:
|
|
589
|
+
try:
|
|
590
|
+
function_args = json.loads(function_args)
|
|
591
|
+
# Call the function asynchronously
|
|
592
|
+
function_response = await function_to_call(
|
|
593
|
+
query=function_args.get("query"),
|
|
594
|
+
)
|
|
595
|
+
messages.append(
|
|
596
|
+
{
|
|
597
|
+
"role": "function",
|
|
598
|
+
"name": function_name,
|
|
599
|
+
"content": function_response,
|
|
600
|
+
}
|
|
601
|
+
)
|
|
602
|
+
except json.JSONDecodeError:
|
|
603
|
+
logger.error(f"Failed to parse function arguments: {function_args}")
|
|
604
|
+
|
|
605
|
+
second_response = await acompletion(
|
|
606
|
+
**_build_completion_params(
|
|
607
|
+
model_name,
|
|
608
|
+
stream=True,
|
|
609
|
+
messages=messages,
|
|
610
|
+
)
|
|
611
|
+
)
|
|
612
|
+
logger.debug(f"Second LLM response: {second_response}")
|
|
613
|
+
|
|
614
|
+
# Handle the streaming response
|
|
615
|
+
full_response = ""
|
|
616
|
+
async for part in second_response:
|
|
617
|
+
if 'choices' in part and len(part['choices']) > 0:
|
|
618
|
+
delta = part['choices'][0].get('delta', {})
|
|
619
|
+
if 'content' in delta and delta['content'] is not None:
|
|
620
|
+
token = delta['content']
|
|
621
|
+
await msg.stream_token(token)
|
|
622
|
+
full_response += token
|
|
623
|
+
|
|
624
|
+
# Update the message content
|
|
625
|
+
msg.content = full_response
|
|
626
|
+
await msg.update()
|
|
627
|
+
else:
|
|
628
|
+
# If no tool calls or Tavily API key is not set, the full_response is already set
|
|
629
|
+
msg.content = full_response
|
|
630
|
+
await msg.update()
|
|
631
|
+
|
|
632
|
+
# Authentication configuration
|
|
633
|
+
expected_username = os.getenv("CHAINLIT_USERNAME", "admin") # Default to "admin" if not found
|
|
634
|
+
expected_password = os.getenv("CHAINLIT_PASSWORD", "admin") # Default to "admin" if not found
|
|
635
|
+
|
|
636
|
+
# Warn if using default credentials
|
|
637
|
+
if expected_username == "admin" and expected_password == "admin":
|
|
638
|
+
logger.warning("⚠️ Using default admin credentials. Set CHAINLIT_USERNAME and CHAINLIT_PASSWORD environment variables for production.")
|
|
639
|
+
|
|
640
|
+
@cl.password_auth_callback
|
|
641
|
+
def auth_callback(input_username: str, input_password: str):
|
|
642
|
+
if (input_username, input_password) == (expected_username, expected_password):
|
|
643
|
+
return cl.User(
|
|
644
|
+
identifier=input_username, metadata={"role": "ADMIN", "provider": "credentials"}
|
|
645
|
+
)
|
|
646
|
+
else:
|
|
647
|
+
return None
|
|
648
|
+
|
|
649
|
+
async def send_count():
|
|
650
|
+
await cl.Message(
|
|
651
|
+
f"Create step counter: {create_step_counter}", disable_feedback=True
|
|
652
|
+
).send()
|
|
653
|
+
|
|
654
|
+
@cl.on_chat_resume
|
|
655
|
+
async def on_chat_resume(thread: ThreadDict):
|
|
656
|
+
logger.info(f"Resuming chat: {thread['id']}")
|
|
657
|
+
model_name = load_setting("model_name") or os.getenv("MODEL_NAME") or "gpt-5-nano"
|
|
658
|
+
# Load Claude Code setting (check CLI flag first, then database setting)
|
|
659
|
+
claude_code_enabled = os.getenv("PRAISONAI_CLAUDECODE_ENABLED", "false").lower() == "true"
|
|
660
|
+
if not claude_code_enabled:
|
|
661
|
+
claude_code_enabled = (load_setting("claude_code_enabled") or "false").lower() == "true"
|
|
662
|
+
logger.debug(f"Model name: {model_name}")
|
|
663
|
+
settings = cl.ChatSettings(
|
|
664
|
+
[
|
|
665
|
+
TextInput(
|
|
666
|
+
id="model_name",
|
|
667
|
+
label="Enter the Model Name",
|
|
668
|
+
placeholder="e.g., gpt-5-nano",
|
|
669
|
+
initial=model_name
|
|
670
|
+
),
|
|
671
|
+
Switch(
|
|
672
|
+
id="claude_code_enabled",
|
|
673
|
+
label="Enable Claude Code (file modifications & coding)",
|
|
674
|
+
initial=claude_code_enabled
|
|
675
|
+
)
|
|
676
|
+
]
|
|
677
|
+
)
|
|
678
|
+
await settings.send()
|
|
679
|
+
cl.user_session.set("thread_id", thread["id"])
|
|
680
|
+
|
|
681
|
+
# Ensure metadata is a dictionary
|
|
682
|
+
metadata = thread.get("metadata", {})
|
|
683
|
+
if isinstance(metadata, str):
|
|
684
|
+
try:
|
|
685
|
+
metadata = json.loads(metadata)
|
|
686
|
+
except json.JSONDecodeError:
|
|
687
|
+
metadata = {}
|
|
688
|
+
|
|
689
|
+
cl.user_session.set("metadata", metadata)
|
|
690
|
+
|
|
691
|
+
message_history = cl.user_session.get("message_history", [])
|
|
692
|
+
steps = thread["steps"]
|
|
693
|
+
|
|
694
|
+
for message in steps:
|
|
695
|
+
msg_type = message.get("type")
|
|
696
|
+
if msg_type == "user_message":
|
|
697
|
+
message_history.append({"role": "user", "content": message.get("output", "")})
|
|
698
|
+
elif msg_type == "assistant_message":
|
|
699
|
+
message_history.append({"role": "assistant", "content": message.get("output", "")})
|
|
700
|
+
elif msg_type == "run":
|
|
701
|
+
# Handle 'run' type messages
|
|
702
|
+
if message.get("isError"):
|
|
703
|
+
message_history.append({"role": "system", "content": f"Error: {message.get('output', '')}"})
|
|
704
|
+
else:
|
|
705
|
+
# You might want to handle non-error 'run' messages differently
|
|
706
|
+
pass
|
|
707
|
+
else:
|
|
708
|
+
logger.warning(f"Message without recognized type: {message}")
|
|
709
|
+
|
|
710
|
+
cl.user_session.set("message_history", message_history)
|
|
711
|
+
|
|
712
|
+
# Check if there's an image in the thread metadata
|
|
713
|
+
image_data = metadata.get("image")
|
|
714
|
+
if image_data:
|
|
715
|
+
image = Image.open(io.BytesIO(base64.b64decode(image_data)))
|
|
716
|
+
cl.user_session.set("image", image)
|
|
717
|
+
await cl.Message(content="Previous image loaded. You can continue asking questions about it, upload a new image, or just chat.").send()
|