PraisonAI 3.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- praisonai/__init__.py +54 -0
- praisonai/__main__.py +15 -0
- praisonai/acp/__init__.py +54 -0
- praisonai/acp/config.py +159 -0
- praisonai/acp/server.py +587 -0
- praisonai/acp/session.py +219 -0
- praisonai/adapters/__init__.py +50 -0
- praisonai/adapters/readers.py +395 -0
- praisonai/adapters/rerankers.py +315 -0
- praisonai/adapters/retrievers.py +394 -0
- praisonai/adapters/vector_stores.py +409 -0
- praisonai/agent_scheduler.py +337 -0
- praisonai/agents_generator.py +903 -0
- praisonai/api/call.py +292 -0
- praisonai/auto.py +1197 -0
- praisonai/capabilities/__init__.py +275 -0
- praisonai/capabilities/a2a.py +140 -0
- praisonai/capabilities/assistants.py +283 -0
- praisonai/capabilities/audio.py +320 -0
- praisonai/capabilities/batches.py +469 -0
- praisonai/capabilities/completions.py +336 -0
- praisonai/capabilities/container_files.py +155 -0
- praisonai/capabilities/containers.py +93 -0
- praisonai/capabilities/embeddings.py +158 -0
- praisonai/capabilities/files.py +467 -0
- praisonai/capabilities/fine_tuning.py +293 -0
- praisonai/capabilities/guardrails.py +182 -0
- praisonai/capabilities/images.py +330 -0
- praisonai/capabilities/mcp.py +190 -0
- praisonai/capabilities/messages.py +270 -0
- praisonai/capabilities/moderations.py +154 -0
- praisonai/capabilities/ocr.py +217 -0
- praisonai/capabilities/passthrough.py +204 -0
- praisonai/capabilities/rag.py +207 -0
- praisonai/capabilities/realtime.py +160 -0
- praisonai/capabilities/rerank.py +165 -0
- praisonai/capabilities/responses.py +266 -0
- praisonai/capabilities/search.py +109 -0
- praisonai/capabilities/skills.py +133 -0
- praisonai/capabilities/vector_store_files.py +334 -0
- praisonai/capabilities/vector_stores.py +304 -0
- praisonai/capabilities/videos.py +141 -0
- praisonai/chainlit_ui.py +304 -0
- praisonai/chat/__init__.py +106 -0
- praisonai/chat/app.py +125 -0
- praisonai/cli/__init__.py +26 -0
- praisonai/cli/app.py +213 -0
- praisonai/cli/commands/__init__.py +75 -0
- praisonai/cli/commands/acp.py +70 -0
- praisonai/cli/commands/completion.py +333 -0
- praisonai/cli/commands/config.py +166 -0
- praisonai/cli/commands/debug.py +142 -0
- praisonai/cli/commands/diag.py +55 -0
- praisonai/cli/commands/doctor.py +166 -0
- praisonai/cli/commands/environment.py +179 -0
- praisonai/cli/commands/lsp.py +112 -0
- praisonai/cli/commands/mcp.py +210 -0
- praisonai/cli/commands/profile.py +457 -0
- praisonai/cli/commands/run.py +228 -0
- praisonai/cli/commands/schedule.py +150 -0
- praisonai/cli/commands/serve.py +97 -0
- praisonai/cli/commands/session.py +212 -0
- praisonai/cli/commands/traces.py +145 -0
- praisonai/cli/commands/version.py +101 -0
- praisonai/cli/configuration/__init__.py +18 -0
- praisonai/cli/configuration/loader.py +353 -0
- praisonai/cli/configuration/paths.py +114 -0
- praisonai/cli/configuration/schema.py +164 -0
- praisonai/cli/features/__init__.py +268 -0
- praisonai/cli/features/acp.py +236 -0
- praisonai/cli/features/action_orchestrator.py +546 -0
- praisonai/cli/features/agent_scheduler.py +773 -0
- praisonai/cli/features/agent_tools.py +474 -0
- praisonai/cli/features/agents.py +375 -0
- praisonai/cli/features/at_mentions.py +471 -0
- praisonai/cli/features/auto_memory.py +182 -0
- praisonai/cli/features/autonomy_mode.py +490 -0
- praisonai/cli/features/background.py +356 -0
- praisonai/cli/features/base.py +168 -0
- praisonai/cli/features/capabilities.py +1326 -0
- praisonai/cli/features/checkpoints.py +338 -0
- praisonai/cli/features/code_intelligence.py +652 -0
- praisonai/cli/features/compaction.py +294 -0
- praisonai/cli/features/compare.py +534 -0
- praisonai/cli/features/cost_tracker.py +514 -0
- praisonai/cli/features/debug.py +810 -0
- praisonai/cli/features/deploy.py +517 -0
- praisonai/cli/features/diag.py +289 -0
- praisonai/cli/features/doctor/__init__.py +63 -0
- praisonai/cli/features/doctor/checks/__init__.py +24 -0
- praisonai/cli/features/doctor/checks/acp_checks.py +240 -0
- praisonai/cli/features/doctor/checks/config_checks.py +366 -0
- praisonai/cli/features/doctor/checks/db_checks.py +366 -0
- praisonai/cli/features/doctor/checks/env_checks.py +543 -0
- praisonai/cli/features/doctor/checks/lsp_checks.py +199 -0
- praisonai/cli/features/doctor/checks/mcp_checks.py +349 -0
- praisonai/cli/features/doctor/checks/memory_checks.py +268 -0
- praisonai/cli/features/doctor/checks/network_checks.py +251 -0
- praisonai/cli/features/doctor/checks/obs_checks.py +328 -0
- praisonai/cli/features/doctor/checks/performance_checks.py +235 -0
- praisonai/cli/features/doctor/checks/permissions_checks.py +259 -0
- praisonai/cli/features/doctor/checks/selftest_checks.py +322 -0
- praisonai/cli/features/doctor/checks/serve_checks.py +426 -0
- praisonai/cli/features/doctor/checks/skills_checks.py +231 -0
- praisonai/cli/features/doctor/checks/tools_checks.py +371 -0
- praisonai/cli/features/doctor/engine.py +266 -0
- praisonai/cli/features/doctor/formatters.py +310 -0
- praisonai/cli/features/doctor/handler.py +397 -0
- praisonai/cli/features/doctor/models.py +264 -0
- praisonai/cli/features/doctor/registry.py +239 -0
- praisonai/cli/features/endpoints.py +1019 -0
- praisonai/cli/features/eval.py +560 -0
- praisonai/cli/features/external_agents.py +231 -0
- praisonai/cli/features/fast_context.py +410 -0
- praisonai/cli/features/flow_display.py +566 -0
- praisonai/cli/features/git_integration.py +651 -0
- praisonai/cli/features/guardrail.py +171 -0
- praisonai/cli/features/handoff.py +185 -0
- praisonai/cli/features/hooks.py +583 -0
- praisonai/cli/features/image.py +384 -0
- praisonai/cli/features/interactive_runtime.py +585 -0
- praisonai/cli/features/interactive_tools.py +380 -0
- praisonai/cli/features/interactive_tui.py +603 -0
- praisonai/cli/features/jobs.py +632 -0
- praisonai/cli/features/knowledge.py +531 -0
- praisonai/cli/features/lite.py +244 -0
- praisonai/cli/features/lsp_cli.py +225 -0
- praisonai/cli/features/mcp.py +169 -0
- praisonai/cli/features/message_queue.py +587 -0
- praisonai/cli/features/metrics.py +211 -0
- praisonai/cli/features/n8n.py +673 -0
- praisonai/cli/features/observability.py +293 -0
- praisonai/cli/features/ollama.py +361 -0
- praisonai/cli/features/output_style.py +273 -0
- praisonai/cli/features/package.py +631 -0
- praisonai/cli/features/performance.py +308 -0
- praisonai/cli/features/persistence.py +636 -0
- praisonai/cli/features/profile.py +226 -0
- praisonai/cli/features/profiler/__init__.py +81 -0
- praisonai/cli/features/profiler/core.py +558 -0
- praisonai/cli/features/profiler/optimizations.py +652 -0
- praisonai/cli/features/profiler/suite.py +386 -0
- praisonai/cli/features/profiling.py +350 -0
- praisonai/cli/features/queue/__init__.py +73 -0
- praisonai/cli/features/queue/manager.py +395 -0
- praisonai/cli/features/queue/models.py +286 -0
- praisonai/cli/features/queue/persistence.py +564 -0
- praisonai/cli/features/queue/scheduler.py +484 -0
- praisonai/cli/features/queue/worker.py +372 -0
- praisonai/cli/features/recipe.py +1723 -0
- praisonai/cli/features/recipes.py +449 -0
- praisonai/cli/features/registry.py +229 -0
- praisonai/cli/features/repo_map.py +860 -0
- praisonai/cli/features/router.py +466 -0
- praisonai/cli/features/sandbox_executor.py +515 -0
- praisonai/cli/features/serve.py +829 -0
- praisonai/cli/features/session.py +222 -0
- praisonai/cli/features/skills.py +856 -0
- praisonai/cli/features/slash_commands.py +650 -0
- praisonai/cli/features/telemetry.py +179 -0
- praisonai/cli/features/templates.py +1384 -0
- praisonai/cli/features/thinking.py +305 -0
- praisonai/cli/features/todo.py +334 -0
- praisonai/cli/features/tools.py +680 -0
- praisonai/cli/features/tui/__init__.py +83 -0
- praisonai/cli/features/tui/app.py +580 -0
- praisonai/cli/features/tui/cli.py +566 -0
- praisonai/cli/features/tui/debug.py +511 -0
- praisonai/cli/features/tui/events.py +99 -0
- praisonai/cli/features/tui/mock_provider.py +328 -0
- praisonai/cli/features/tui/orchestrator.py +652 -0
- praisonai/cli/features/tui/screens/__init__.py +50 -0
- praisonai/cli/features/tui/screens/main.py +245 -0
- praisonai/cli/features/tui/screens/queue.py +174 -0
- praisonai/cli/features/tui/screens/session.py +124 -0
- praisonai/cli/features/tui/screens/settings.py +148 -0
- praisonai/cli/features/tui/widgets/__init__.py +56 -0
- praisonai/cli/features/tui/widgets/chat.py +261 -0
- praisonai/cli/features/tui/widgets/composer.py +224 -0
- praisonai/cli/features/tui/widgets/queue_panel.py +200 -0
- praisonai/cli/features/tui/widgets/status.py +167 -0
- praisonai/cli/features/tui/widgets/tool_panel.py +248 -0
- praisonai/cli/features/workflow.py +720 -0
- praisonai/cli/legacy.py +236 -0
- praisonai/cli/main.py +5559 -0
- praisonai/cli/schedule_cli.py +54 -0
- praisonai/cli/state/__init__.py +31 -0
- praisonai/cli/state/identifiers.py +161 -0
- praisonai/cli/state/sessions.py +313 -0
- praisonai/code/__init__.py +93 -0
- praisonai/code/agent_tools.py +344 -0
- praisonai/code/diff/__init__.py +21 -0
- praisonai/code/diff/diff_strategy.py +432 -0
- praisonai/code/tools/__init__.py +27 -0
- praisonai/code/tools/apply_diff.py +221 -0
- praisonai/code/tools/execute_command.py +275 -0
- praisonai/code/tools/list_files.py +274 -0
- praisonai/code/tools/read_file.py +206 -0
- praisonai/code/tools/search_replace.py +248 -0
- praisonai/code/tools/write_file.py +217 -0
- praisonai/code/utils/__init__.py +46 -0
- praisonai/code/utils/file_utils.py +307 -0
- praisonai/code/utils/ignore_utils.py +308 -0
- praisonai/code/utils/text_utils.py +276 -0
- praisonai/db/__init__.py +64 -0
- praisonai/db/adapter.py +531 -0
- praisonai/deploy/__init__.py +62 -0
- praisonai/deploy/api.py +231 -0
- praisonai/deploy/docker.py +454 -0
- praisonai/deploy/doctor.py +367 -0
- praisonai/deploy/main.py +327 -0
- praisonai/deploy/models.py +179 -0
- praisonai/deploy/providers/__init__.py +33 -0
- praisonai/deploy/providers/aws.py +331 -0
- praisonai/deploy/providers/azure.py +358 -0
- praisonai/deploy/providers/base.py +101 -0
- praisonai/deploy/providers/gcp.py +314 -0
- praisonai/deploy/schema.py +208 -0
- praisonai/deploy.py +185 -0
- praisonai/endpoints/__init__.py +53 -0
- praisonai/endpoints/a2u_server.py +410 -0
- praisonai/endpoints/discovery.py +165 -0
- praisonai/endpoints/providers/__init__.py +28 -0
- praisonai/endpoints/providers/a2a.py +253 -0
- praisonai/endpoints/providers/a2u.py +208 -0
- praisonai/endpoints/providers/agents_api.py +171 -0
- praisonai/endpoints/providers/base.py +231 -0
- praisonai/endpoints/providers/mcp.py +263 -0
- praisonai/endpoints/providers/recipe.py +206 -0
- praisonai/endpoints/providers/tools_mcp.py +150 -0
- praisonai/endpoints/registry.py +131 -0
- praisonai/endpoints/server.py +161 -0
- praisonai/inbuilt_tools/__init__.py +24 -0
- praisonai/inbuilt_tools/autogen_tools.py +117 -0
- praisonai/inc/__init__.py +2 -0
- praisonai/inc/config.py +96 -0
- praisonai/inc/models.py +155 -0
- praisonai/integrations/__init__.py +56 -0
- praisonai/integrations/base.py +303 -0
- praisonai/integrations/claude_code.py +270 -0
- praisonai/integrations/codex_cli.py +255 -0
- praisonai/integrations/cursor_cli.py +195 -0
- praisonai/integrations/gemini_cli.py +222 -0
- praisonai/jobs/__init__.py +67 -0
- praisonai/jobs/executor.py +425 -0
- praisonai/jobs/models.py +230 -0
- praisonai/jobs/router.py +314 -0
- praisonai/jobs/server.py +186 -0
- praisonai/jobs/store.py +203 -0
- praisonai/llm/__init__.py +66 -0
- praisonai/llm/registry.py +382 -0
- praisonai/mcp_server/__init__.py +152 -0
- praisonai/mcp_server/adapters/__init__.py +74 -0
- praisonai/mcp_server/adapters/agents.py +128 -0
- praisonai/mcp_server/adapters/capabilities.py +168 -0
- praisonai/mcp_server/adapters/cli_tools.py +568 -0
- praisonai/mcp_server/adapters/extended_capabilities.py +462 -0
- praisonai/mcp_server/adapters/knowledge.py +93 -0
- praisonai/mcp_server/adapters/memory.py +104 -0
- praisonai/mcp_server/adapters/prompts.py +306 -0
- praisonai/mcp_server/adapters/resources.py +124 -0
- praisonai/mcp_server/adapters/tools_bridge.py +280 -0
- praisonai/mcp_server/auth/__init__.py +48 -0
- praisonai/mcp_server/auth/api_key.py +291 -0
- praisonai/mcp_server/auth/oauth.py +460 -0
- praisonai/mcp_server/auth/oidc.py +289 -0
- praisonai/mcp_server/auth/scopes.py +260 -0
- praisonai/mcp_server/cli.py +852 -0
- praisonai/mcp_server/elicitation.py +445 -0
- praisonai/mcp_server/icons.py +302 -0
- praisonai/mcp_server/recipe_adapter.py +573 -0
- praisonai/mcp_server/recipe_cli.py +824 -0
- praisonai/mcp_server/registry.py +703 -0
- praisonai/mcp_server/sampling.py +422 -0
- praisonai/mcp_server/server.py +490 -0
- praisonai/mcp_server/tasks.py +443 -0
- praisonai/mcp_server/transports/__init__.py +18 -0
- praisonai/mcp_server/transports/http_stream.py +376 -0
- praisonai/mcp_server/transports/stdio.py +132 -0
- praisonai/persistence/__init__.py +84 -0
- praisonai/persistence/config.py +238 -0
- praisonai/persistence/conversation/__init__.py +25 -0
- praisonai/persistence/conversation/async_mysql.py +427 -0
- praisonai/persistence/conversation/async_postgres.py +410 -0
- praisonai/persistence/conversation/async_sqlite.py +371 -0
- praisonai/persistence/conversation/base.py +151 -0
- praisonai/persistence/conversation/json_store.py +250 -0
- praisonai/persistence/conversation/mysql.py +387 -0
- praisonai/persistence/conversation/postgres.py +401 -0
- praisonai/persistence/conversation/singlestore.py +240 -0
- praisonai/persistence/conversation/sqlite.py +341 -0
- praisonai/persistence/conversation/supabase.py +203 -0
- praisonai/persistence/conversation/surrealdb.py +287 -0
- praisonai/persistence/factory.py +301 -0
- praisonai/persistence/hooks/__init__.py +18 -0
- praisonai/persistence/hooks/agent_hooks.py +297 -0
- praisonai/persistence/knowledge/__init__.py +26 -0
- praisonai/persistence/knowledge/base.py +144 -0
- praisonai/persistence/knowledge/cassandra.py +232 -0
- praisonai/persistence/knowledge/chroma.py +295 -0
- praisonai/persistence/knowledge/clickhouse.py +242 -0
- praisonai/persistence/knowledge/cosmosdb_vector.py +438 -0
- praisonai/persistence/knowledge/couchbase.py +286 -0
- praisonai/persistence/knowledge/lancedb.py +216 -0
- praisonai/persistence/knowledge/langchain_adapter.py +291 -0
- praisonai/persistence/knowledge/lightrag_adapter.py +212 -0
- praisonai/persistence/knowledge/llamaindex_adapter.py +256 -0
- praisonai/persistence/knowledge/milvus.py +277 -0
- praisonai/persistence/knowledge/mongodb_vector.py +306 -0
- praisonai/persistence/knowledge/pgvector.py +335 -0
- praisonai/persistence/knowledge/pinecone.py +253 -0
- praisonai/persistence/knowledge/qdrant.py +301 -0
- praisonai/persistence/knowledge/redis_vector.py +291 -0
- praisonai/persistence/knowledge/singlestore_vector.py +299 -0
- praisonai/persistence/knowledge/surrealdb_vector.py +309 -0
- praisonai/persistence/knowledge/upstash_vector.py +266 -0
- praisonai/persistence/knowledge/weaviate.py +223 -0
- praisonai/persistence/migrations/__init__.py +10 -0
- praisonai/persistence/migrations/manager.py +251 -0
- praisonai/persistence/orchestrator.py +406 -0
- praisonai/persistence/state/__init__.py +21 -0
- praisonai/persistence/state/async_mongodb.py +200 -0
- praisonai/persistence/state/base.py +107 -0
- praisonai/persistence/state/dynamodb.py +226 -0
- praisonai/persistence/state/firestore.py +175 -0
- praisonai/persistence/state/gcs.py +155 -0
- praisonai/persistence/state/memory.py +245 -0
- praisonai/persistence/state/mongodb.py +158 -0
- praisonai/persistence/state/redis.py +190 -0
- praisonai/persistence/state/upstash.py +144 -0
- praisonai/persistence/tests/__init__.py +3 -0
- praisonai/persistence/tests/test_all_backends.py +633 -0
- praisonai/profiler.py +1214 -0
- praisonai/recipe/__init__.py +134 -0
- praisonai/recipe/bridge.py +278 -0
- praisonai/recipe/core.py +893 -0
- praisonai/recipe/exceptions.py +54 -0
- praisonai/recipe/history.py +402 -0
- praisonai/recipe/models.py +266 -0
- praisonai/recipe/operations.py +440 -0
- praisonai/recipe/policy.py +422 -0
- praisonai/recipe/registry.py +849 -0
- praisonai/recipe/runtime.py +214 -0
- praisonai/recipe/security.py +711 -0
- praisonai/recipe/serve.py +859 -0
- praisonai/recipe/server.py +613 -0
- praisonai/scheduler/__init__.py +45 -0
- praisonai/scheduler/agent_scheduler.py +552 -0
- praisonai/scheduler/base.py +124 -0
- praisonai/scheduler/daemon_manager.py +225 -0
- praisonai/scheduler/state_manager.py +155 -0
- praisonai/scheduler/yaml_loader.py +193 -0
- praisonai/scheduler.py +194 -0
- praisonai/setup/__init__.py +1 -0
- praisonai/setup/build.py +21 -0
- praisonai/setup/post_install.py +23 -0
- praisonai/setup/setup_conda_env.py +25 -0
- praisonai/setup.py +16 -0
- praisonai/templates/__init__.py +116 -0
- praisonai/templates/cache.py +364 -0
- praisonai/templates/dependency_checker.py +358 -0
- praisonai/templates/discovery.py +391 -0
- praisonai/templates/loader.py +564 -0
- praisonai/templates/registry.py +511 -0
- praisonai/templates/resolver.py +206 -0
- praisonai/templates/security.py +327 -0
- praisonai/templates/tool_override.py +498 -0
- praisonai/templates/tools_doctor.py +256 -0
- praisonai/test.py +105 -0
- praisonai/train.py +562 -0
- praisonai/train_vision.py +306 -0
- praisonai/ui/agents.py +824 -0
- praisonai/ui/callbacks.py +57 -0
- praisonai/ui/chainlit_compat.py +246 -0
- praisonai/ui/chat.py +532 -0
- praisonai/ui/code.py +717 -0
- praisonai/ui/colab.py +474 -0
- praisonai/ui/colab_chainlit.py +81 -0
- praisonai/ui/components/aicoder.py +284 -0
- praisonai/ui/context.py +283 -0
- praisonai/ui/database_config.py +56 -0
- praisonai/ui/db.py +294 -0
- praisonai/ui/realtime.py +488 -0
- praisonai/ui/realtimeclient/__init__.py +756 -0
- praisonai/ui/realtimeclient/tools.py +242 -0
- praisonai/ui/sql_alchemy.py +710 -0
- praisonai/upload_vision.py +140 -0
- praisonai/version.py +1 -0
- praisonai-3.0.0.dist-info/METADATA +3493 -0
- praisonai-3.0.0.dist-info/RECORD +393 -0
- praisonai-3.0.0.dist-info/WHEEL +5 -0
- praisonai-3.0.0.dist-info/entry_points.txt +4 -0
- praisonai-3.0.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,306 @@
|
|
|
1
|
+
#!/usr/bin/env python
|
|
2
|
+
# -*- coding: utf-8 -*-
|
|
3
|
+
"""
|
|
4
|
+
This script finetunes a vision language model using Unsloth's fast training framework.
|
|
5
|
+
It supports vision tasks by converting raw image-caption samples into a conversation format,
|
|
6
|
+
adding vision-specific LoRA adapters, and training using TRL's SFTTrainer with UnslothVisionDataCollator.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import os
|
|
10
|
+
import sys
|
|
11
|
+
import yaml
|
|
12
|
+
import torch
|
|
13
|
+
import shutil
|
|
14
|
+
import subprocess
|
|
15
|
+
import gc # For garbage collection
|
|
16
|
+
|
|
17
|
+
from datasets import load_dataset, concatenate_datasets, Dataset
|
|
18
|
+
from unsloth import FastVisionModel, is_bf16_supported
|
|
19
|
+
from unsloth.trainer import UnslothVisionDataCollator
|
|
20
|
+
from transformers import TrainingArguments
|
|
21
|
+
from trl import SFTTrainer
|
|
22
|
+
from tqdm import tqdm # Add progress bar
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class TrainVisionModel:
|
|
26
|
+
def __init__(self, config_path="config.yaml"):
|
|
27
|
+
self.load_config(config_path)
|
|
28
|
+
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
29
|
+
self.model = None
|
|
30
|
+
self.hf_tokenizer = None # The underlying tokenizer
|
|
31
|
+
|
|
32
|
+
def load_config(self, path):
|
|
33
|
+
with open(path, "r") as file:
|
|
34
|
+
self.config = yaml.safe_load(file)
|
|
35
|
+
print("DEBUG: Loaded config:", self.config)
|
|
36
|
+
|
|
37
|
+
def print_system_info(self):
|
|
38
|
+
print("DEBUG: PyTorch version:", torch.__version__)
|
|
39
|
+
print("DEBUG: CUDA version:", torch.version.cuda)
|
|
40
|
+
if torch.cuda.is_available():
|
|
41
|
+
print("DEBUG: CUDA Device Capability:", torch.cuda.get_device_capability())
|
|
42
|
+
else:
|
|
43
|
+
print("DEBUG: CUDA is not available")
|
|
44
|
+
print("DEBUG: Python Version:", sys.version)
|
|
45
|
+
print("DEBUG: Python Path:", sys.executable)
|
|
46
|
+
|
|
47
|
+
def check_gpu(self):
|
|
48
|
+
gpu_stats = torch.cuda.get_device_properties(0)
|
|
49
|
+
print(f"DEBUG: GPU = {gpu_stats.name}. Max memory = {round(gpu_stats.total_memory/(1024**3),3)} GB.")
|
|
50
|
+
|
|
51
|
+
def check_ram(self):
|
|
52
|
+
from psutil import virtual_memory
|
|
53
|
+
ram_gb = virtual_memory().total / 1e9
|
|
54
|
+
print(f"DEBUG: Your runtime has {ram_gb:.1f} gigabytes of available RAM")
|
|
55
|
+
if ram_gb < 20:
|
|
56
|
+
print("DEBUG: Not using a high-RAM runtime")
|
|
57
|
+
else:
|
|
58
|
+
print("DEBUG: You are using a high-RAM runtime!")
|
|
59
|
+
|
|
60
|
+
def prepare_model(self):
|
|
61
|
+
print("DEBUG: Preparing vision model and tokenizer...")
|
|
62
|
+
self.model, original_tokenizer = FastVisionModel.from_pretrained(
|
|
63
|
+
model_name=self.config["model_name"],
|
|
64
|
+
load_in_4bit=self.config["load_in_4bit"],
|
|
65
|
+
use_gradient_checkpointing="unsloth"
|
|
66
|
+
)
|
|
67
|
+
print("DEBUG: Vision model and original tokenizer loaded.")
|
|
68
|
+
|
|
69
|
+
# Use the full processor that supports image inputs.
|
|
70
|
+
self.hf_tokenizer = original_tokenizer
|
|
71
|
+
|
|
72
|
+
# Set pad token if needed
|
|
73
|
+
if not hasattr(self.hf_tokenizer, 'pad_token') or self.hf_tokenizer.pad_token is None:
|
|
74
|
+
if hasattr(self.hf_tokenizer, 'eos_token'):
|
|
75
|
+
self.hf_tokenizer.pad_token = self.hf_tokenizer.eos_token
|
|
76
|
+
elif hasattr(self.hf_tokenizer, 'bos_token'):
|
|
77
|
+
self.hf_tokenizer.pad_token = self.hf_tokenizer.bos_token
|
|
78
|
+
|
|
79
|
+
# Set max length
|
|
80
|
+
if hasattr(self.hf_tokenizer, 'model_max_length'):
|
|
81
|
+
self.hf_tokenizer.model_max_length = self.config.get("max_seq_length", 2048)
|
|
82
|
+
|
|
83
|
+
# Add vision-specific LoRA adapters
|
|
84
|
+
self.model = FastVisionModel.get_peft_model(
|
|
85
|
+
self.model,
|
|
86
|
+
finetune_vision_layers=self.config.get("finetune_vision_layers", False),
|
|
87
|
+
finetune_language_layers=self.config.get("finetune_language_layers", True),
|
|
88
|
+
finetune_attention_modules=self.config.get("finetune_attention_modules", True),
|
|
89
|
+
finetune_mlp_modules=self.config.get("finetune_mlp_modules", True),
|
|
90
|
+
r=16,
|
|
91
|
+
lora_alpha=16,
|
|
92
|
+
lora_dropout=0,
|
|
93
|
+
bias="none",
|
|
94
|
+
random_state=3407,
|
|
95
|
+
use_rslora=False,
|
|
96
|
+
loftq_config=None
|
|
97
|
+
)
|
|
98
|
+
print("DEBUG: Vision LoRA adapters added.")
|
|
99
|
+
|
|
100
|
+
def convert_sample(self, sample):
|
|
101
|
+
|
|
102
|
+
instruction = self.config.get(
|
|
103
|
+
"vision_instruction",
|
|
104
|
+
"You are an expert radiographer. Describe accurately what you see in this image."
|
|
105
|
+
)
|
|
106
|
+
conversation = [
|
|
107
|
+
{
|
|
108
|
+
"role": "user",
|
|
109
|
+
"content": [
|
|
110
|
+
{"type": "text", "text": instruction},
|
|
111
|
+
{"type": "image", "image": sample["image"]}
|
|
112
|
+
]
|
|
113
|
+
},
|
|
114
|
+
{
|
|
115
|
+
"role": "assistant",
|
|
116
|
+
"content": [
|
|
117
|
+
{"type": "text", "text": sample["caption"]}
|
|
118
|
+
]
|
|
119
|
+
},
|
|
120
|
+
]
|
|
121
|
+
|
|
122
|
+
return {"messages": conversation}
|
|
123
|
+
|
|
124
|
+
def load_datasets(self):
|
|
125
|
+
all_converted = []
|
|
126
|
+
for dataset_info in self.config["dataset"]:
|
|
127
|
+
print("\nDEBUG: Loading vision dataset:", dataset_info)
|
|
128
|
+
ds = load_dataset(
|
|
129
|
+
dataset_info["name"],
|
|
130
|
+
split=dataset_info.get("split_type", "train")
|
|
131
|
+
)
|
|
132
|
+
print("DEBUG: Dataset size:", len(ds))
|
|
133
|
+
print("DEBUG: First raw sample:", ds[0])
|
|
134
|
+
print("DEBUG: Dataset features:", ds.features)
|
|
135
|
+
|
|
136
|
+
print("\nDEBUG: Converting dataset to vision conversation format...")
|
|
137
|
+
converted_ds = [self.convert_sample(sample) for sample in ds]
|
|
138
|
+
|
|
139
|
+
# Debug first converted sample
|
|
140
|
+
print("\nDEBUG: First converted sample structure:")
|
|
141
|
+
first = converted_ds[0]
|
|
142
|
+
print("DEBUG: Message keys:", first["messages"][0]["content"][1].keys())
|
|
143
|
+
print("DEBUG: Image type in converted:", type(first["messages"][0]["content"][1].get("image")))
|
|
144
|
+
|
|
145
|
+
all_converted.extend(converted_ds)
|
|
146
|
+
|
|
147
|
+
print("\nDEBUG: Combined vision dataset has", len(all_converted), "examples.")
|
|
148
|
+
return all_converted
|
|
149
|
+
|
|
150
|
+
def train_model(self):
|
|
151
|
+
print("DEBUG: Starting vision training...")
|
|
152
|
+
raw_dataset = self.load_datasets()
|
|
153
|
+
|
|
154
|
+
# Build training arguments using TrainingArguments
|
|
155
|
+
training_args = TrainingArguments(
|
|
156
|
+
per_device_train_batch_size=self.config.get("per_device_train_batch_size", 1),
|
|
157
|
+
gradient_accumulation_steps=self.config.get("gradient_accumulation_steps", 4),
|
|
158
|
+
warmup_steps=self.config.get("warmup_steps", 5),
|
|
159
|
+
max_steps=self.config.get("max_steps", 30),
|
|
160
|
+
learning_rate=self.config.get("learning_rate", 2e-4),
|
|
161
|
+
fp16=self.config.get("fp16", not is_bf16_supported()),
|
|
162
|
+
bf16=self.config.get("bf16", is_bf16_supported()),
|
|
163
|
+
logging_steps=self.config.get("logging_steps", 1),
|
|
164
|
+
optim=self.config.get("optim", "adamw_8bit"),
|
|
165
|
+
weight_decay=self.config.get("weight_decay", 0.01),
|
|
166
|
+
lr_scheduler_type=self.config.get("lr_scheduler_type", "linear"),
|
|
167
|
+
seed=self.config.get("seed", 3407),
|
|
168
|
+
output_dir=self.config.get("output_dir", "outputs"),
|
|
169
|
+
report_to="none" if not os.getenv("PRAISON_WANDB") else "wandb",
|
|
170
|
+
remove_unused_columns=False,
|
|
171
|
+
# Add memory optimization settings
|
|
172
|
+
gradient_checkpointing=True,
|
|
173
|
+
max_grad_norm=1.0,
|
|
174
|
+
)
|
|
175
|
+
|
|
176
|
+
trainer = SFTTrainer(
|
|
177
|
+
model=self.model,
|
|
178
|
+
tokenizer=self.hf_tokenizer,
|
|
179
|
+
data_collator=UnslothVisionDataCollator(self.model, self.hf_tokenizer),
|
|
180
|
+
train_dataset=raw_dataset,
|
|
181
|
+
args=training_args,
|
|
182
|
+
max_seq_length=self.config.get("max_seq_length", 2048),
|
|
183
|
+
dataset_text_field="", # Required for vision training
|
|
184
|
+
dataset_kwargs={"skip_prepare_dataset": True}, # Required for vision training
|
|
185
|
+
packing=False # Explicitly set packing to False
|
|
186
|
+
)
|
|
187
|
+
print("DEBUG: Beginning vision trainer.train() ...")
|
|
188
|
+
trainer.train()
|
|
189
|
+
print("DEBUG: Vision training complete. Saving model and tokenizer locally...")
|
|
190
|
+
self.model.save_pretrained("lora_vision_model")
|
|
191
|
+
self.hf_tokenizer.save_pretrained("lora_vision_model")
|
|
192
|
+
print("DEBUG: Saved vision model and tokenizer to 'lora_vision_model'.")
|
|
193
|
+
|
|
194
|
+
def vision_inference(self, instruction, image):
|
|
195
|
+
FastVisionModel.for_inference(self.model)
|
|
196
|
+
messages = [
|
|
197
|
+
{"role": "user", "content": [
|
|
198
|
+
{"type": "image"},
|
|
199
|
+
{"type": "text", "text": instruction}
|
|
200
|
+
]}
|
|
201
|
+
]
|
|
202
|
+
input_text = self.hf_tokenizer.apply_chat_template(messages, add_generation_prompt=True)
|
|
203
|
+
inputs = self.hf_tokenizer(
|
|
204
|
+
image,
|
|
205
|
+
input_text,
|
|
206
|
+
add_special_tokens=False,
|
|
207
|
+
return_tensors="pt"
|
|
208
|
+
).to("cuda")
|
|
209
|
+
outputs = self.model.generate(
|
|
210
|
+
**inputs,
|
|
211
|
+
max_new_tokens=128,
|
|
212
|
+
use_cache=True,
|
|
213
|
+
temperature=1.5,
|
|
214
|
+
min_p=0.1
|
|
215
|
+
)
|
|
216
|
+
print("DEBUG: Vision inference output:", self.hf_tokenizer.batch_decode(outputs))
|
|
217
|
+
|
|
218
|
+
def save_model_merged(self):
|
|
219
|
+
if os.path.exists(self.config["hf_model_name"]):
|
|
220
|
+
shutil.rmtree(self.config["hf_model_name"])
|
|
221
|
+
self.model.push_to_hub_merged(
|
|
222
|
+
self.config["hf_model_name"],
|
|
223
|
+
self.hf_tokenizer,
|
|
224
|
+
save_method="merged_16bit",
|
|
225
|
+
token=os.getenv("HF_TOKEN")
|
|
226
|
+
)
|
|
227
|
+
|
|
228
|
+
def push_model_gguf(self):
|
|
229
|
+
self.model.push_to_hub_gguf(
|
|
230
|
+
self.config["hf_model_name"],
|
|
231
|
+
self.hf_tokenizer,
|
|
232
|
+
quantization_method=self.config.get("quantization_method", "q4_k_m"),
|
|
233
|
+
token=os.getenv("HF_TOKEN")
|
|
234
|
+
)
|
|
235
|
+
|
|
236
|
+
def save_model_gguf(self):
|
|
237
|
+
self.model.save_pretrained_gguf(
|
|
238
|
+
self.config["hf_model_name"],
|
|
239
|
+
self.hf_tokenizer,
|
|
240
|
+
quantization_method="q4_k_m"
|
|
241
|
+
)
|
|
242
|
+
|
|
243
|
+
def prepare_modelfile_content(self):
|
|
244
|
+
output_model = self.config["hf_model_name"]
|
|
245
|
+
|
|
246
|
+
template = '''{{- range $index, $_ := .Messages }}<|start_header_id|>{{ .Role }}<|end_header_id|>
|
|
247
|
+
|
|
248
|
+
{{ .Content }}
|
|
249
|
+
{{- if gt (len (slice $.Messages $index)) 1 }}<|eot_id|>
|
|
250
|
+
{{- else if ne .Role "assistant" }}<|eot_id|><|start_header_id|>assistant<|end_header_id|>
|
|
251
|
+
|
|
252
|
+
{{ end }}
|
|
253
|
+
{{- end }}'''
|
|
254
|
+
|
|
255
|
+
return f"""FROM {output_model}
|
|
256
|
+
TEMPLATE {template}
|
|
257
|
+
PARAMETER temperature 0.6
|
|
258
|
+
PARAMETER top_p 0.9
|
|
259
|
+
"""
|
|
260
|
+
|
|
261
|
+
def create_and_push_ollama_model(self):
|
|
262
|
+
modelfile_content = self.prepare_modelfile_content()
|
|
263
|
+
with open("Modelfile", "w") as file:
|
|
264
|
+
file.write(modelfile_content)
|
|
265
|
+
subprocess.run(["ollama", "serve"])
|
|
266
|
+
subprocess.run(["ollama", "create", f"{self.config['ollama_model']}:{self.config['model_parameters']}", "-f", "Modelfile"])
|
|
267
|
+
subprocess.run(["ollama", "push", f"{self.config['ollama_model']}:{self.config['model_parameters']}"])
|
|
268
|
+
|
|
269
|
+
def run(self):
|
|
270
|
+
self.print_system_info()
|
|
271
|
+
self.check_gpu()
|
|
272
|
+
self.check_ram()
|
|
273
|
+
if self.config.get("train", "true").lower() == "true":
|
|
274
|
+
self.prepare_model()
|
|
275
|
+
self.train_model()
|
|
276
|
+
if self.config.get("huggingface_save", "true").lower() == "true":
|
|
277
|
+
self.save_model_merged()
|
|
278
|
+
if self.config.get("huggingface_save_gguf", "true").lower() == "true":
|
|
279
|
+
self.push_model_gguf()
|
|
280
|
+
if self.config.get("ollama_save", "true").lower() == "true":
|
|
281
|
+
self.create_and_push_ollama_model()
|
|
282
|
+
|
|
283
|
+
|
|
284
|
+
def main():
|
|
285
|
+
import argparse
|
|
286
|
+
parser = argparse.ArgumentParser(description="PraisonAI Vision Training Script")
|
|
287
|
+
parser.add_argument("command", choices=["train", "inference"], help="Command to execute")
|
|
288
|
+
parser.add_argument("--config", default="config.yaml", help="Path to configuration file")
|
|
289
|
+
args = parser.parse_args()
|
|
290
|
+
|
|
291
|
+
trainer_obj = TrainVisionModel(config_path=args.config)
|
|
292
|
+
if args.command == "train":
|
|
293
|
+
trainer_obj.run()
|
|
294
|
+
elif args.command == "inference":
|
|
295
|
+
# For inference, we load a sample image from the first dataset
|
|
296
|
+
instr = trainer_obj.config.get("vision_instruction", "You are an expert radiographer. Describe accurately what you see in this image.")
|
|
297
|
+
ds_info = trainer_obj.config["dataset"][0]
|
|
298
|
+
ds = load_dataset(ds_info["name"], split=ds_info.get("split_type", "train"))
|
|
299
|
+
sample_image = ds[0]["image"]
|
|
300
|
+
if trainer_obj.model is None or trainer_obj.hf_tokenizer is None:
|
|
301
|
+
trainer_obj.prepare_model()
|
|
302
|
+
trainer_obj.vision_inference(instr, sample_image)
|
|
303
|
+
|
|
304
|
+
|
|
305
|
+
if __name__ == "__main__":
|
|
306
|
+
main()
|