PraisonAI 3.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- praisonai/__init__.py +54 -0
- praisonai/__main__.py +15 -0
- praisonai/acp/__init__.py +54 -0
- praisonai/acp/config.py +159 -0
- praisonai/acp/server.py +587 -0
- praisonai/acp/session.py +219 -0
- praisonai/adapters/__init__.py +50 -0
- praisonai/adapters/readers.py +395 -0
- praisonai/adapters/rerankers.py +315 -0
- praisonai/adapters/retrievers.py +394 -0
- praisonai/adapters/vector_stores.py +409 -0
- praisonai/agent_scheduler.py +337 -0
- praisonai/agents_generator.py +903 -0
- praisonai/api/call.py +292 -0
- praisonai/auto.py +1197 -0
- praisonai/capabilities/__init__.py +275 -0
- praisonai/capabilities/a2a.py +140 -0
- praisonai/capabilities/assistants.py +283 -0
- praisonai/capabilities/audio.py +320 -0
- praisonai/capabilities/batches.py +469 -0
- praisonai/capabilities/completions.py +336 -0
- praisonai/capabilities/container_files.py +155 -0
- praisonai/capabilities/containers.py +93 -0
- praisonai/capabilities/embeddings.py +158 -0
- praisonai/capabilities/files.py +467 -0
- praisonai/capabilities/fine_tuning.py +293 -0
- praisonai/capabilities/guardrails.py +182 -0
- praisonai/capabilities/images.py +330 -0
- praisonai/capabilities/mcp.py +190 -0
- praisonai/capabilities/messages.py +270 -0
- praisonai/capabilities/moderations.py +154 -0
- praisonai/capabilities/ocr.py +217 -0
- praisonai/capabilities/passthrough.py +204 -0
- praisonai/capabilities/rag.py +207 -0
- praisonai/capabilities/realtime.py +160 -0
- praisonai/capabilities/rerank.py +165 -0
- praisonai/capabilities/responses.py +266 -0
- praisonai/capabilities/search.py +109 -0
- praisonai/capabilities/skills.py +133 -0
- praisonai/capabilities/vector_store_files.py +334 -0
- praisonai/capabilities/vector_stores.py +304 -0
- praisonai/capabilities/videos.py +141 -0
- praisonai/chainlit_ui.py +304 -0
- praisonai/chat/__init__.py +106 -0
- praisonai/chat/app.py +125 -0
- praisonai/cli/__init__.py +26 -0
- praisonai/cli/app.py +213 -0
- praisonai/cli/commands/__init__.py +75 -0
- praisonai/cli/commands/acp.py +70 -0
- praisonai/cli/commands/completion.py +333 -0
- praisonai/cli/commands/config.py +166 -0
- praisonai/cli/commands/debug.py +142 -0
- praisonai/cli/commands/diag.py +55 -0
- praisonai/cli/commands/doctor.py +166 -0
- praisonai/cli/commands/environment.py +179 -0
- praisonai/cli/commands/lsp.py +112 -0
- praisonai/cli/commands/mcp.py +210 -0
- praisonai/cli/commands/profile.py +457 -0
- praisonai/cli/commands/run.py +228 -0
- praisonai/cli/commands/schedule.py +150 -0
- praisonai/cli/commands/serve.py +97 -0
- praisonai/cli/commands/session.py +212 -0
- praisonai/cli/commands/traces.py +145 -0
- praisonai/cli/commands/version.py +101 -0
- praisonai/cli/configuration/__init__.py +18 -0
- praisonai/cli/configuration/loader.py +353 -0
- praisonai/cli/configuration/paths.py +114 -0
- praisonai/cli/configuration/schema.py +164 -0
- praisonai/cli/features/__init__.py +268 -0
- praisonai/cli/features/acp.py +236 -0
- praisonai/cli/features/action_orchestrator.py +546 -0
- praisonai/cli/features/agent_scheduler.py +773 -0
- praisonai/cli/features/agent_tools.py +474 -0
- praisonai/cli/features/agents.py +375 -0
- praisonai/cli/features/at_mentions.py +471 -0
- praisonai/cli/features/auto_memory.py +182 -0
- praisonai/cli/features/autonomy_mode.py +490 -0
- praisonai/cli/features/background.py +356 -0
- praisonai/cli/features/base.py +168 -0
- praisonai/cli/features/capabilities.py +1326 -0
- praisonai/cli/features/checkpoints.py +338 -0
- praisonai/cli/features/code_intelligence.py +652 -0
- praisonai/cli/features/compaction.py +294 -0
- praisonai/cli/features/compare.py +534 -0
- praisonai/cli/features/cost_tracker.py +514 -0
- praisonai/cli/features/debug.py +810 -0
- praisonai/cli/features/deploy.py +517 -0
- praisonai/cli/features/diag.py +289 -0
- praisonai/cli/features/doctor/__init__.py +63 -0
- praisonai/cli/features/doctor/checks/__init__.py +24 -0
- praisonai/cli/features/doctor/checks/acp_checks.py +240 -0
- praisonai/cli/features/doctor/checks/config_checks.py +366 -0
- praisonai/cli/features/doctor/checks/db_checks.py +366 -0
- praisonai/cli/features/doctor/checks/env_checks.py +543 -0
- praisonai/cli/features/doctor/checks/lsp_checks.py +199 -0
- praisonai/cli/features/doctor/checks/mcp_checks.py +349 -0
- praisonai/cli/features/doctor/checks/memory_checks.py +268 -0
- praisonai/cli/features/doctor/checks/network_checks.py +251 -0
- praisonai/cli/features/doctor/checks/obs_checks.py +328 -0
- praisonai/cli/features/doctor/checks/performance_checks.py +235 -0
- praisonai/cli/features/doctor/checks/permissions_checks.py +259 -0
- praisonai/cli/features/doctor/checks/selftest_checks.py +322 -0
- praisonai/cli/features/doctor/checks/serve_checks.py +426 -0
- praisonai/cli/features/doctor/checks/skills_checks.py +231 -0
- praisonai/cli/features/doctor/checks/tools_checks.py +371 -0
- praisonai/cli/features/doctor/engine.py +266 -0
- praisonai/cli/features/doctor/formatters.py +310 -0
- praisonai/cli/features/doctor/handler.py +397 -0
- praisonai/cli/features/doctor/models.py +264 -0
- praisonai/cli/features/doctor/registry.py +239 -0
- praisonai/cli/features/endpoints.py +1019 -0
- praisonai/cli/features/eval.py +560 -0
- praisonai/cli/features/external_agents.py +231 -0
- praisonai/cli/features/fast_context.py +410 -0
- praisonai/cli/features/flow_display.py +566 -0
- praisonai/cli/features/git_integration.py +651 -0
- praisonai/cli/features/guardrail.py +171 -0
- praisonai/cli/features/handoff.py +185 -0
- praisonai/cli/features/hooks.py +583 -0
- praisonai/cli/features/image.py +384 -0
- praisonai/cli/features/interactive_runtime.py +585 -0
- praisonai/cli/features/interactive_tools.py +380 -0
- praisonai/cli/features/interactive_tui.py +603 -0
- praisonai/cli/features/jobs.py +632 -0
- praisonai/cli/features/knowledge.py +531 -0
- praisonai/cli/features/lite.py +244 -0
- praisonai/cli/features/lsp_cli.py +225 -0
- praisonai/cli/features/mcp.py +169 -0
- praisonai/cli/features/message_queue.py +587 -0
- praisonai/cli/features/metrics.py +211 -0
- praisonai/cli/features/n8n.py +673 -0
- praisonai/cli/features/observability.py +293 -0
- praisonai/cli/features/ollama.py +361 -0
- praisonai/cli/features/output_style.py +273 -0
- praisonai/cli/features/package.py +631 -0
- praisonai/cli/features/performance.py +308 -0
- praisonai/cli/features/persistence.py +636 -0
- praisonai/cli/features/profile.py +226 -0
- praisonai/cli/features/profiler/__init__.py +81 -0
- praisonai/cli/features/profiler/core.py +558 -0
- praisonai/cli/features/profiler/optimizations.py +652 -0
- praisonai/cli/features/profiler/suite.py +386 -0
- praisonai/cli/features/profiling.py +350 -0
- praisonai/cli/features/queue/__init__.py +73 -0
- praisonai/cli/features/queue/manager.py +395 -0
- praisonai/cli/features/queue/models.py +286 -0
- praisonai/cli/features/queue/persistence.py +564 -0
- praisonai/cli/features/queue/scheduler.py +484 -0
- praisonai/cli/features/queue/worker.py +372 -0
- praisonai/cli/features/recipe.py +1723 -0
- praisonai/cli/features/recipes.py +449 -0
- praisonai/cli/features/registry.py +229 -0
- praisonai/cli/features/repo_map.py +860 -0
- praisonai/cli/features/router.py +466 -0
- praisonai/cli/features/sandbox_executor.py +515 -0
- praisonai/cli/features/serve.py +829 -0
- praisonai/cli/features/session.py +222 -0
- praisonai/cli/features/skills.py +856 -0
- praisonai/cli/features/slash_commands.py +650 -0
- praisonai/cli/features/telemetry.py +179 -0
- praisonai/cli/features/templates.py +1384 -0
- praisonai/cli/features/thinking.py +305 -0
- praisonai/cli/features/todo.py +334 -0
- praisonai/cli/features/tools.py +680 -0
- praisonai/cli/features/tui/__init__.py +83 -0
- praisonai/cli/features/tui/app.py +580 -0
- praisonai/cli/features/tui/cli.py +566 -0
- praisonai/cli/features/tui/debug.py +511 -0
- praisonai/cli/features/tui/events.py +99 -0
- praisonai/cli/features/tui/mock_provider.py +328 -0
- praisonai/cli/features/tui/orchestrator.py +652 -0
- praisonai/cli/features/tui/screens/__init__.py +50 -0
- praisonai/cli/features/tui/screens/main.py +245 -0
- praisonai/cli/features/tui/screens/queue.py +174 -0
- praisonai/cli/features/tui/screens/session.py +124 -0
- praisonai/cli/features/tui/screens/settings.py +148 -0
- praisonai/cli/features/tui/widgets/__init__.py +56 -0
- praisonai/cli/features/tui/widgets/chat.py +261 -0
- praisonai/cli/features/tui/widgets/composer.py +224 -0
- praisonai/cli/features/tui/widgets/queue_panel.py +200 -0
- praisonai/cli/features/tui/widgets/status.py +167 -0
- praisonai/cli/features/tui/widgets/tool_panel.py +248 -0
- praisonai/cli/features/workflow.py +720 -0
- praisonai/cli/legacy.py +236 -0
- praisonai/cli/main.py +5559 -0
- praisonai/cli/schedule_cli.py +54 -0
- praisonai/cli/state/__init__.py +31 -0
- praisonai/cli/state/identifiers.py +161 -0
- praisonai/cli/state/sessions.py +313 -0
- praisonai/code/__init__.py +93 -0
- praisonai/code/agent_tools.py +344 -0
- praisonai/code/diff/__init__.py +21 -0
- praisonai/code/diff/diff_strategy.py +432 -0
- praisonai/code/tools/__init__.py +27 -0
- praisonai/code/tools/apply_diff.py +221 -0
- praisonai/code/tools/execute_command.py +275 -0
- praisonai/code/tools/list_files.py +274 -0
- praisonai/code/tools/read_file.py +206 -0
- praisonai/code/tools/search_replace.py +248 -0
- praisonai/code/tools/write_file.py +217 -0
- praisonai/code/utils/__init__.py +46 -0
- praisonai/code/utils/file_utils.py +307 -0
- praisonai/code/utils/ignore_utils.py +308 -0
- praisonai/code/utils/text_utils.py +276 -0
- praisonai/db/__init__.py +64 -0
- praisonai/db/adapter.py +531 -0
- praisonai/deploy/__init__.py +62 -0
- praisonai/deploy/api.py +231 -0
- praisonai/deploy/docker.py +454 -0
- praisonai/deploy/doctor.py +367 -0
- praisonai/deploy/main.py +327 -0
- praisonai/deploy/models.py +179 -0
- praisonai/deploy/providers/__init__.py +33 -0
- praisonai/deploy/providers/aws.py +331 -0
- praisonai/deploy/providers/azure.py +358 -0
- praisonai/deploy/providers/base.py +101 -0
- praisonai/deploy/providers/gcp.py +314 -0
- praisonai/deploy/schema.py +208 -0
- praisonai/deploy.py +185 -0
- praisonai/endpoints/__init__.py +53 -0
- praisonai/endpoints/a2u_server.py +410 -0
- praisonai/endpoints/discovery.py +165 -0
- praisonai/endpoints/providers/__init__.py +28 -0
- praisonai/endpoints/providers/a2a.py +253 -0
- praisonai/endpoints/providers/a2u.py +208 -0
- praisonai/endpoints/providers/agents_api.py +171 -0
- praisonai/endpoints/providers/base.py +231 -0
- praisonai/endpoints/providers/mcp.py +263 -0
- praisonai/endpoints/providers/recipe.py +206 -0
- praisonai/endpoints/providers/tools_mcp.py +150 -0
- praisonai/endpoints/registry.py +131 -0
- praisonai/endpoints/server.py +161 -0
- praisonai/inbuilt_tools/__init__.py +24 -0
- praisonai/inbuilt_tools/autogen_tools.py +117 -0
- praisonai/inc/__init__.py +2 -0
- praisonai/inc/config.py +96 -0
- praisonai/inc/models.py +155 -0
- praisonai/integrations/__init__.py +56 -0
- praisonai/integrations/base.py +303 -0
- praisonai/integrations/claude_code.py +270 -0
- praisonai/integrations/codex_cli.py +255 -0
- praisonai/integrations/cursor_cli.py +195 -0
- praisonai/integrations/gemini_cli.py +222 -0
- praisonai/jobs/__init__.py +67 -0
- praisonai/jobs/executor.py +425 -0
- praisonai/jobs/models.py +230 -0
- praisonai/jobs/router.py +314 -0
- praisonai/jobs/server.py +186 -0
- praisonai/jobs/store.py +203 -0
- praisonai/llm/__init__.py +66 -0
- praisonai/llm/registry.py +382 -0
- praisonai/mcp_server/__init__.py +152 -0
- praisonai/mcp_server/adapters/__init__.py +74 -0
- praisonai/mcp_server/adapters/agents.py +128 -0
- praisonai/mcp_server/adapters/capabilities.py +168 -0
- praisonai/mcp_server/adapters/cli_tools.py +568 -0
- praisonai/mcp_server/adapters/extended_capabilities.py +462 -0
- praisonai/mcp_server/adapters/knowledge.py +93 -0
- praisonai/mcp_server/adapters/memory.py +104 -0
- praisonai/mcp_server/adapters/prompts.py +306 -0
- praisonai/mcp_server/adapters/resources.py +124 -0
- praisonai/mcp_server/adapters/tools_bridge.py +280 -0
- praisonai/mcp_server/auth/__init__.py +48 -0
- praisonai/mcp_server/auth/api_key.py +291 -0
- praisonai/mcp_server/auth/oauth.py +460 -0
- praisonai/mcp_server/auth/oidc.py +289 -0
- praisonai/mcp_server/auth/scopes.py +260 -0
- praisonai/mcp_server/cli.py +852 -0
- praisonai/mcp_server/elicitation.py +445 -0
- praisonai/mcp_server/icons.py +302 -0
- praisonai/mcp_server/recipe_adapter.py +573 -0
- praisonai/mcp_server/recipe_cli.py +824 -0
- praisonai/mcp_server/registry.py +703 -0
- praisonai/mcp_server/sampling.py +422 -0
- praisonai/mcp_server/server.py +490 -0
- praisonai/mcp_server/tasks.py +443 -0
- praisonai/mcp_server/transports/__init__.py +18 -0
- praisonai/mcp_server/transports/http_stream.py +376 -0
- praisonai/mcp_server/transports/stdio.py +132 -0
- praisonai/persistence/__init__.py +84 -0
- praisonai/persistence/config.py +238 -0
- praisonai/persistence/conversation/__init__.py +25 -0
- praisonai/persistence/conversation/async_mysql.py +427 -0
- praisonai/persistence/conversation/async_postgres.py +410 -0
- praisonai/persistence/conversation/async_sqlite.py +371 -0
- praisonai/persistence/conversation/base.py +151 -0
- praisonai/persistence/conversation/json_store.py +250 -0
- praisonai/persistence/conversation/mysql.py +387 -0
- praisonai/persistence/conversation/postgres.py +401 -0
- praisonai/persistence/conversation/singlestore.py +240 -0
- praisonai/persistence/conversation/sqlite.py +341 -0
- praisonai/persistence/conversation/supabase.py +203 -0
- praisonai/persistence/conversation/surrealdb.py +287 -0
- praisonai/persistence/factory.py +301 -0
- praisonai/persistence/hooks/__init__.py +18 -0
- praisonai/persistence/hooks/agent_hooks.py +297 -0
- praisonai/persistence/knowledge/__init__.py +26 -0
- praisonai/persistence/knowledge/base.py +144 -0
- praisonai/persistence/knowledge/cassandra.py +232 -0
- praisonai/persistence/knowledge/chroma.py +295 -0
- praisonai/persistence/knowledge/clickhouse.py +242 -0
- praisonai/persistence/knowledge/cosmosdb_vector.py +438 -0
- praisonai/persistence/knowledge/couchbase.py +286 -0
- praisonai/persistence/knowledge/lancedb.py +216 -0
- praisonai/persistence/knowledge/langchain_adapter.py +291 -0
- praisonai/persistence/knowledge/lightrag_adapter.py +212 -0
- praisonai/persistence/knowledge/llamaindex_adapter.py +256 -0
- praisonai/persistence/knowledge/milvus.py +277 -0
- praisonai/persistence/knowledge/mongodb_vector.py +306 -0
- praisonai/persistence/knowledge/pgvector.py +335 -0
- praisonai/persistence/knowledge/pinecone.py +253 -0
- praisonai/persistence/knowledge/qdrant.py +301 -0
- praisonai/persistence/knowledge/redis_vector.py +291 -0
- praisonai/persistence/knowledge/singlestore_vector.py +299 -0
- praisonai/persistence/knowledge/surrealdb_vector.py +309 -0
- praisonai/persistence/knowledge/upstash_vector.py +266 -0
- praisonai/persistence/knowledge/weaviate.py +223 -0
- praisonai/persistence/migrations/__init__.py +10 -0
- praisonai/persistence/migrations/manager.py +251 -0
- praisonai/persistence/orchestrator.py +406 -0
- praisonai/persistence/state/__init__.py +21 -0
- praisonai/persistence/state/async_mongodb.py +200 -0
- praisonai/persistence/state/base.py +107 -0
- praisonai/persistence/state/dynamodb.py +226 -0
- praisonai/persistence/state/firestore.py +175 -0
- praisonai/persistence/state/gcs.py +155 -0
- praisonai/persistence/state/memory.py +245 -0
- praisonai/persistence/state/mongodb.py +158 -0
- praisonai/persistence/state/redis.py +190 -0
- praisonai/persistence/state/upstash.py +144 -0
- praisonai/persistence/tests/__init__.py +3 -0
- praisonai/persistence/tests/test_all_backends.py +633 -0
- praisonai/profiler.py +1214 -0
- praisonai/recipe/__init__.py +134 -0
- praisonai/recipe/bridge.py +278 -0
- praisonai/recipe/core.py +893 -0
- praisonai/recipe/exceptions.py +54 -0
- praisonai/recipe/history.py +402 -0
- praisonai/recipe/models.py +266 -0
- praisonai/recipe/operations.py +440 -0
- praisonai/recipe/policy.py +422 -0
- praisonai/recipe/registry.py +849 -0
- praisonai/recipe/runtime.py +214 -0
- praisonai/recipe/security.py +711 -0
- praisonai/recipe/serve.py +859 -0
- praisonai/recipe/server.py +613 -0
- praisonai/scheduler/__init__.py +45 -0
- praisonai/scheduler/agent_scheduler.py +552 -0
- praisonai/scheduler/base.py +124 -0
- praisonai/scheduler/daemon_manager.py +225 -0
- praisonai/scheduler/state_manager.py +155 -0
- praisonai/scheduler/yaml_loader.py +193 -0
- praisonai/scheduler.py +194 -0
- praisonai/setup/__init__.py +1 -0
- praisonai/setup/build.py +21 -0
- praisonai/setup/post_install.py +23 -0
- praisonai/setup/setup_conda_env.py +25 -0
- praisonai/setup.py +16 -0
- praisonai/templates/__init__.py +116 -0
- praisonai/templates/cache.py +364 -0
- praisonai/templates/dependency_checker.py +358 -0
- praisonai/templates/discovery.py +391 -0
- praisonai/templates/loader.py +564 -0
- praisonai/templates/registry.py +511 -0
- praisonai/templates/resolver.py +206 -0
- praisonai/templates/security.py +327 -0
- praisonai/templates/tool_override.py +498 -0
- praisonai/templates/tools_doctor.py +256 -0
- praisonai/test.py +105 -0
- praisonai/train.py +562 -0
- praisonai/train_vision.py +306 -0
- praisonai/ui/agents.py +824 -0
- praisonai/ui/callbacks.py +57 -0
- praisonai/ui/chainlit_compat.py +246 -0
- praisonai/ui/chat.py +532 -0
- praisonai/ui/code.py +717 -0
- praisonai/ui/colab.py +474 -0
- praisonai/ui/colab_chainlit.py +81 -0
- praisonai/ui/components/aicoder.py +284 -0
- praisonai/ui/context.py +283 -0
- praisonai/ui/database_config.py +56 -0
- praisonai/ui/db.py +294 -0
- praisonai/ui/realtime.py +488 -0
- praisonai/ui/realtimeclient/__init__.py +756 -0
- praisonai/ui/realtimeclient/tools.py +242 -0
- praisonai/ui/sql_alchemy.py +710 -0
- praisonai/upload_vision.py +140 -0
- praisonai/version.py +1 -0
- praisonai-3.0.0.dist-info/METADATA +3493 -0
- praisonai-3.0.0.dist-info/RECORD +393 -0
- praisonai-3.0.0.dist-info/WHEEL +5 -0
- praisonai-3.0.0.dist-info/entry_points.txt +4 -0
- praisonai-3.0.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,204 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Passthrough Capabilities Module
|
|
3
|
+
|
|
4
|
+
Provides generic API passthrough functionality for provider-specific endpoints.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from dataclasses import dataclass, field
|
|
8
|
+
from typing import Optional, Any, Dict
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
@dataclass
|
|
12
|
+
class PassthroughResult:
|
|
13
|
+
"""Result from passthrough API call."""
|
|
14
|
+
data: Any
|
|
15
|
+
status_code: int = 200
|
|
16
|
+
headers: Optional[Dict[str, str]] = None
|
|
17
|
+
metadata: Dict[str, Any] = field(default_factory=dict)
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def passthrough(
|
|
21
|
+
endpoint: str,
|
|
22
|
+
method: str = "POST",
|
|
23
|
+
model: Optional[str] = None,
|
|
24
|
+
custom_llm_provider: Optional[str] = None,
|
|
25
|
+
data: Optional[Dict[str, Any]] = None,
|
|
26
|
+
json_data: Optional[Dict[str, Any]] = None,
|
|
27
|
+
headers: Optional[Dict[str, str]] = None,
|
|
28
|
+
timeout: float = 600.0,
|
|
29
|
+
api_key: Optional[str] = None,
|
|
30
|
+
api_base: Optional[str] = None,
|
|
31
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
32
|
+
**kwargs
|
|
33
|
+
) -> PassthroughResult:
|
|
34
|
+
"""
|
|
35
|
+
Make a passthrough API call to a provider endpoint.
|
|
36
|
+
|
|
37
|
+
Args:
|
|
38
|
+
endpoint: The endpoint path (e.g., "/v1/custom/endpoint")
|
|
39
|
+
method: HTTP method ("GET", "POST", "PUT", "DELETE")
|
|
40
|
+
model: Optional model name for routing
|
|
41
|
+
custom_llm_provider: Provider name
|
|
42
|
+
data: Form data
|
|
43
|
+
json_data: JSON body data
|
|
44
|
+
headers: Additional headers
|
|
45
|
+
timeout: Request timeout in seconds
|
|
46
|
+
api_key: Optional API key override
|
|
47
|
+
api_base: Optional API base URL override
|
|
48
|
+
metadata: Optional metadata for tracing
|
|
49
|
+
|
|
50
|
+
Returns:
|
|
51
|
+
PassthroughResult with response data
|
|
52
|
+
|
|
53
|
+
Example:
|
|
54
|
+
>>> result = passthrough(
|
|
55
|
+
... "/v1/custom/endpoint",
|
|
56
|
+
... method="POST",
|
|
57
|
+
... json_data={"key": "value"},
|
|
58
|
+
... custom_llm_provider="openai"
|
|
59
|
+
... )
|
|
60
|
+
>>> print(result.data)
|
|
61
|
+
"""
|
|
62
|
+
import litellm
|
|
63
|
+
|
|
64
|
+
call_kwargs = {
|
|
65
|
+
'endpoint': endpoint,
|
|
66
|
+
'method': method,
|
|
67
|
+
'timeout': timeout,
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
if model:
|
|
71
|
+
call_kwargs['model'] = model
|
|
72
|
+
if custom_llm_provider:
|
|
73
|
+
call_kwargs['custom_llm_provider'] = custom_llm_provider
|
|
74
|
+
if data:
|
|
75
|
+
call_kwargs['data'] = data
|
|
76
|
+
if json_data:
|
|
77
|
+
call_kwargs['json'] = json_data
|
|
78
|
+
if headers:
|
|
79
|
+
call_kwargs['request_headers'] = headers
|
|
80
|
+
if api_key:
|
|
81
|
+
call_kwargs['api_key'] = api_key
|
|
82
|
+
if api_base:
|
|
83
|
+
call_kwargs['api_base'] = api_base
|
|
84
|
+
|
|
85
|
+
call_kwargs.update(kwargs)
|
|
86
|
+
|
|
87
|
+
if metadata:
|
|
88
|
+
call_kwargs['metadata'] = metadata
|
|
89
|
+
|
|
90
|
+
# Use passthrough route if available
|
|
91
|
+
try:
|
|
92
|
+
response = litellm.llm_passthrough_route(**call_kwargs)
|
|
93
|
+
|
|
94
|
+
return PassthroughResult(
|
|
95
|
+
data=response.json() if hasattr(response, 'json') else response,
|
|
96
|
+
status_code=getattr(response, 'status_code', 200),
|
|
97
|
+
headers=dict(response.headers) if hasattr(response, 'headers') else None,
|
|
98
|
+
metadata=metadata or {},
|
|
99
|
+
)
|
|
100
|
+
except AttributeError:
|
|
101
|
+
# Fallback to httpx if passthrough not available
|
|
102
|
+
import httpx
|
|
103
|
+
|
|
104
|
+
url = f"{api_base or 'https://api.openai.com'}{endpoint}"
|
|
105
|
+
request_headers = headers or {}
|
|
106
|
+
if api_key:
|
|
107
|
+
request_headers['Authorization'] = f"Bearer {api_key}"
|
|
108
|
+
|
|
109
|
+
with httpx.Client(timeout=timeout) as client:
|
|
110
|
+
response = client.request(
|
|
111
|
+
method=method,
|
|
112
|
+
url=url,
|
|
113
|
+
headers=request_headers,
|
|
114
|
+
json=json_data,
|
|
115
|
+
data=data,
|
|
116
|
+
)
|
|
117
|
+
|
|
118
|
+
return PassthroughResult(
|
|
119
|
+
data=response.json() if response.headers.get('content-type', '').startswith('application/json') else response.text,
|
|
120
|
+
status_code=response.status_code,
|
|
121
|
+
headers=dict(response.headers),
|
|
122
|
+
metadata=metadata or {},
|
|
123
|
+
)
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
async def apassthrough(
|
|
127
|
+
endpoint: str,
|
|
128
|
+
method: str = "POST",
|
|
129
|
+
model: Optional[str] = None,
|
|
130
|
+
custom_llm_provider: Optional[str] = None,
|
|
131
|
+
data: Optional[Dict[str, Any]] = None,
|
|
132
|
+
json_data: Optional[Dict[str, Any]] = None,
|
|
133
|
+
headers: Optional[Dict[str, str]] = None,
|
|
134
|
+
timeout: float = 600.0,
|
|
135
|
+
api_key: Optional[str] = None,
|
|
136
|
+
api_base: Optional[str] = None,
|
|
137
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
138
|
+
**kwargs
|
|
139
|
+
) -> PassthroughResult:
|
|
140
|
+
"""
|
|
141
|
+
Async: Make a passthrough API call to a provider endpoint.
|
|
142
|
+
|
|
143
|
+
See passthrough() for full documentation.
|
|
144
|
+
"""
|
|
145
|
+
import litellm
|
|
146
|
+
|
|
147
|
+
call_kwargs = {
|
|
148
|
+
'endpoint': endpoint,
|
|
149
|
+
'method': method,
|
|
150
|
+
'timeout': timeout,
|
|
151
|
+
}
|
|
152
|
+
|
|
153
|
+
if model:
|
|
154
|
+
call_kwargs['model'] = model
|
|
155
|
+
if custom_llm_provider:
|
|
156
|
+
call_kwargs['custom_llm_provider'] = custom_llm_provider
|
|
157
|
+
if data:
|
|
158
|
+
call_kwargs['data'] = data
|
|
159
|
+
if json_data:
|
|
160
|
+
call_kwargs['json'] = json_data
|
|
161
|
+
if headers:
|
|
162
|
+
call_kwargs['request_headers'] = headers
|
|
163
|
+
if api_key:
|
|
164
|
+
call_kwargs['api_key'] = api_key
|
|
165
|
+
if api_base:
|
|
166
|
+
call_kwargs['api_base'] = api_base
|
|
167
|
+
|
|
168
|
+
call_kwargs.update(kwargs)
|
|
169
|
+
|
|
170
|
+
if metadata:
|
|
171
|
+
call_kwargs['metadata'] = metadata
|
|
172
|
+
|
|
173
|
+
try:
|
|
174
|
+
response = await litellm.allm_passthrough_route(**call_kwargs)
|
|
175
|
+
|
|
176
|
+
return PassthroughResult(
|
|
177
|
+
data=response.json() if hasattr(response, 'json') else response,
|
|
178
|
+
status_code=getattr(response, 'status_code', 200),
|
|
179
|
+
headers=dict(response.headers) if hasattr(response, 'headers') else None,
|
|
180
|
+
metadata=metadata or {},
|
|
181
|
+
)
|
|
182
|
+
except AttributeError:
|
|
183
|
+
import httpx
|
|
184
|
+
|
|
185
|
+
url = f"{api_base or 'https://api.openai.com'}{endpoint}"
|
|
186
|
+
request_headers = headers or {}
|
|
187
|
+
if api_key:
|
|
188
|
+
request_headers['Authorization'] = f"Bearer {api_key}"
|
|
189
|
+
|
|
190
|
+
async with httpx.AsyncClient(timeout=timeout) as client:
|
|
191
|
+
response = await client.request(
|
|
192
|
+
method=method,
|
|
193
|
+
url=url,
|
|
194
|
+
headers=request_headers,
|
|
195
|
+
json=json_data,
|
|
196
|
+
data=data,
|
|
197
|
+
)
|
|
198
|
+
|
|
199
|
+
return PassthroughResult(
|
|
200
|
+
data=response.json() if response.headers.get('content-type', '').startswith('application/json') else response.text,
|
|
201
|
+
status_code=response.status_code,
|
|
202
|
+
headers=dict(response.headers),
|
|
203
|
+
metadata=metadata or {},
|
|
204
|
+
)
|
|
@@ -0,0 +1,207 @@
|
|
|
1
|
+
"""
|
|
2
|
+
RAG (Retrieval-Augmented Generation) Capabilities Module
|
|
3
|
+
|
|
4
|
+
Provides RAG functionality for document retrieval and generation.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from dataclasses import dataclass, field
|
|
8
|
+
from typing import Optional, Any, Dict, List
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
@dataclass
|
|
12
|
+
class RAGResult:
|
|
13
|
+
"""Result from RAG operations."""
|
|
14
|
+
answer: str
|
|
15
|
+
sources: Optional[List[Dict[str, Any]]] = None
|
|
16
|
+
model: Optional[str] = None
|
|
17
|
+
usage: Optional[Dict[str, int]] = None
|
|
18
|
+
metadata: Dict[str, Any] = field(default_factory=dict)
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def rag_query(
|
|
22
|
+
query: str,
|
|
23
|
+
documents: Optional[List[str]] = None,
|
|
24
|
+
vector_store_id: Optional[str] = None,
|
|
25
|
+
model: str = "gpt-4o-mini",
|
|
26
|
+
max_results: int = 5,
|
|
27
|
+
timeout: float = 600.0,
|
|
28
|
+
api_key: Optional[str] = None,
|
|
29
|
+
api_base: Optional[str] = None,
|
|
30
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
31
|
+
**kwargs
|
|
32
|
+
) -> RAGResult:
|
|
33
|
+
"""
|
|
34
|
+
Perform a RAG query.
|
|
35
|
+
|
|
36
|
+
Args:
|
|
37
|
+
query: Query string
|
|
38
|
+
documents: Optional list of documents to search
|
|
39
|
+
vector_store_id: Optional vector store ID to search
|
|
40
|
+
model: Model to use for generation
|
|
41
|
+
max_results: Maximum number of results to retrieve
|
|
42
|
+
timeout: Request timeout in seconds
|
|
43
|
+
api_key: Optional API key override
|
|
44
|
+
api_base: Optional API base URL override
|
|
45
|
+
metadata: Optional metadata for tracing
|
|
46
|
+
|
|
47
|
+
Returns:
|
|
48
|
+
RAGResult with answer and sources
|
|
49
|
+
|
|
50
|
+
Example:
|
|
51
|
+
>>> result = rag_query("What is AI?", documents=["AI is..."])
|
|
52
|
+
>>> print(result.answer)
|
|
53
|
+
"""
|
|
54
|
+
import litellm
|
|
55
|
+
|
|
56
|
+
sources = []
|
|
57
|
+
context = ""
|
|
58
|
+
|
|
59
|
+
# If vector store provided, search it
|
|
60
|
+
if vector_store_id:
|
|
61
|
+
try:
|
|
62
|
+
from .vector_stores import vector_store_search
|
|
63
|
+
search_result = vector_store_search(
|
|
64
|
+
vector_store_id=vector_store_id,
|
|
65
|
+
query=query,
|
|
66
|
+
max_num_results=max_results,
|
|
67
|
+
api_key=api_key,
|
|
68
|
+
api_base=api_base,
|
|
69
|
+
)
|
|
70
|
+
for r in search_result.results:
|
|
71
|
+
if 'content' in r:
|
|
72
|
+
for c in r['content']:
|
|
73
|
+
text = c.get('text', '')
|
|
74
|
+
context += f"\n{text}"
|
|
75
|
+
sources.append({
|
|
76
|
+
'score': r.get('score', 0),
|
|
77
|
+
'text': text[:200],
|
|
78
|
+
})
|
|
79
|
+
except Exception:
|
|
80
|
+
pass
|
|
81
|
+
|
|
82
|
+
# If documents provided, use them as context
|
|
83
|
+
if documents:
|
|
84
|
+
for i, doc in enumerate(documents[:max_results]):
|
|
85
|
+
context += f"\n{doc}"
|
|
86
|
+
sources.append({
|
|
87
|
+
'index': i,
|
|
88
|
+
'text': doc[:200],
|
|
89
|
+
})
|
|
90
|
+
|
|
91
|
+
# Generate answer
|
|
92
|
+
messages = [
|
|
93
|
+
{"role": "system", "content": "You are a helpful assistant. Answer the question based on the provided context. If the context doesn't contain relevant information, say so."},
|
|
94
|
+
{"role": "user", "content": f"Context:\n{context}\n\nQuestion: {query}"}
|
|
95
|
+
]
|
|
96
|
+
|
|
97
|
+
response = litellm.completion(
|
|
98
|
+
model=model,
|
|
99
|
+
messages=messages,
|
|
100
|
+
timeout=timeout,
|
|
101
|
+
api_key=api_key,
|
|
102
|
+
api_base=api_base,
|
|
103
|
+
**kwargs
|
|
104
|
+
)
|
|
105
|
+
|
|
106
|
+
answer = response.choices[0].message.content if response.choices else ""
|
|
107
|
+
|
|
108
|
+
usage = None
|
|
109
|
+
if hasattr(response, 'usage') and response.usage:
|
|
110
|
+
usage = {
|
|
111
|
+
'prompt_tokens': getattr(response.usage, 'prompt_tokens', 0),
|
|
112
|
+
'completion_tokens': getattr(response.usage, 'completion_tokens', 0),
|
|
113
|
+
'total_tokens': getattr(response.usage, 'total_tokens', 0),
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
return RAGResult(
|
|
117
|
+
answer=answer,
|
|
118
|
+
sources=sources if sources else None,
|
|
119
|
+
model=model,
|
|
120
|
+
usage=usage,
|
|
121
|
+
metadata=metadata or {},
|
|
122
|
+
)
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
async def arag_query(
|
|
126
|
+
query: str,
|
|
127
|
+
documents: Optional[List[str]] = None,
|
|
128
|
+
vector_store_id: Optional[str] = None,
|
|
129
|
+
model: str = "gpt-4o-mini",
|
|
130
|
+
max_results: int = 5,
|
|
131
|
+
timeout: float = 600.0,
|
|
132
|
+
api_key: Optional[str] = None,
|
|
133
|
+
api_base: Optional[str] = None,
|
|
134
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
135
|
+
**kwargs
|
|
136
|
+
) -> RAGResult:
|
|
137
|
+
"""
|
|
138
|
+
Async: Perform a RAG query.
|
|
139
|
+
|
|
140
|
+
See rag_query() for full documentation.
|
|
141
|
+
"""
|
|
142
|
+
import litellm
|
|
143
|
+
|
|
144
|
+
sources = []
|
|
145
|
+
context = ""
|
|
146
|
+
|
|
147
|
+
if vector_store_id:
|
|
148
|
+
try:
|
|
149
|
+
from .vector_stores import avector_store_search
|
|
150
|
+
search_result = await avector_store_search(
|
|
151
|
+
vector_store_id=vector_store_id,
|
|
152
|
+
query=query,
|
|
153
|
+
max_num_results=max_results,
|
|
154
|
+
api_key=api_key,
|
|
155
|
+
api_base=api_base,
|
|
156
|
+
)
|
|
157
|
+
for r in search_result.results:
|
|
158
|
+
if 'content' in r:
|
|
159
|
+
for c in r['content']:
|
|
160
|
+
text = c.get('text', '')
|
|
161
|
+
context += f"\n{text}"
|
|
162
|
+
sources.append({
|
|
163
|
+
'score': r.get('score', 0),
|
|
164
|
+
'text': text[:200],
|
|
165
|
+
})
|
|
166
|
+
except Exception:
|
|
167
|
+
pass
|
|
168
|
+
|
|
169
|
+
if documents:
|
|
170
|
+
for i, doc in enumerate(documents[:max_results]):
|
|
171
|
+
context += f"\n{doc}"
|
|
172
|
+
sources.append({
|
|
173
|
+
'index': i,
|
|
174
|
+
'text': doc[:200],
|
|
175
|
+
})
|
|
176
|
+
|
|
177
|
+
messages = [
|
|
178
|
+
{"role": "system", "content": "You are a helpful assistant. Answer the question based on the provided context. If the context doesn't contain relevant information, say so."},
|
|
179
|
+
{"role": "user", "content": f"Context:\n{context}\n\nQuestion: {query}"}
|
|
180
|
+
]
|
|
181
|
+
|
|
182
|
+
response = await litellm.acompletion(
|
|
183
|
+
model=model,
|
|
184
|
+
messages=messages,
|
|
185
|
+
timeout=timeout,
|
|
186
|
+
api_key=api_key,
|
|
187
|
+
api_base=api_base,
|
|
188
|
+
**kwargs
|
|
189
|
+
)
|
|
190
|
+
|
|
191
|
+
answer = response.choices[0].message.content if response.choices else ""
|
|
192
|
+
|
|
193
|
+
usage = None
|
|
194
|
+
if hasattr(response, 'usage') and response.usage:
|
|
195
|
+
usage = {
|
|
196
|
+
'prompt_tokens': getattr(response.usage, 'prompt_tokens', 0),
|
|
197
|
+
'completion_tokens': getattr(response.usage, 'completion_tokens', 0),
|
|
198
|
+
'total_tokens': getattr(response.usage, 'total_tokens', 0),
|
|
199
|
+
}
|
|
200
|
+
|
|
201
|
+
return RAGResult(
|
|
202
|
+
answer=answer,
|
|
203
|
+
sources=sources if sources else None,
|
|
204
|
+
model=model,
|
|
205
|
+
usage=usage,
|
|
206
|
+
metadata=metadata or {},
|
|
207
|
+
)
|
|
@@ -0,0 +1,160 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Realtime Capabilities Module
|
|
3
|
+
|
|
4
|
+
Provides realtime audio/video streaming functionality.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from dataclasses import dataclass, field
|
|
8
|
+
from typing import Optional, Any, Dict, List, Callable
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
@dataclass
|
|
12
|
+
class RealtimeSession:
|
|
13
|
+
"""Realtime session information."""
|
|
14
|
+
id: str
|
|
15
|
+
status: str = "created"
|
|
16
|
+
model: Optional[str] = None
|
|
17
|
+
url: Optional[str] = None
|
|
18
|
+
metadata: Dict[str, Any] = field(default_factory=dict)
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
@dataclass
|
|
22
|
+
class RealtimeEvent:
|
|
23
|
+
"""Realtime event."""
|
|
24
|
+
type: str
|
|
25
|
+
data: Optional[Any] = None
|
|
26
|
+
session_id: Optional[str] = None
|
|
27
|
+
metadata: Dict[str, Any] = field(default_factory=dict)
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def realtime_connect(
|
|
31
|
+
model: str = "gpt-4o-realtime-preview",
|
|
32
|
+
modalities: Optional[List[str]] = None,
|
|
33
|
+
instructions: Optional[str] = None,
|
|
34
|
+
voice: str = "alloy",
|
|
35
|
+
api_key: Optional[str] = None,
|
|
36
|
+
api_base: Optional[str] = None,
|
|
37
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
38
|
+
**kwargs
|
|
39
|
+
) -> RealtimeSession:
|
|
40
|
+
"""
|
|
41
|
+
Create a realtime session.
|
|
42
|
+
|
|
43
|
+
Args:
|
|
44
|
+
model: Model to use
|
|
45
|
+
modalities: List of modalities (e.g., ["text", "audio"])
|
|
46
|
+
instructions: System instructions
|
|
47
|
+
voice: Voice for audio output
|
|
48
|
+
api_key: Optional API key override
|
|
49
|
+
api_base: Optional API base URL override
|
|
50
|
+
metadata: Optional metadata for tracing
|
|
51
|
+
|
|
52
|
+
Returns:
|
|
53
|
+
RealtimeSession with connection info
|
|
54
|
+
|
|
55
|
+
Example:
|
|
56
|
+
>>> session = realtime_connect()
|
|
57
|
+
>>> print(session.id)
|
|
58
|
+
"""
|
|
59
|
+
import uuid
|
|
60
|
+
import os
|
|
61
|
+
|
|
62
|
+
session_id = f"realtime-{uuid.uuid4().hex[:12]}"
|
|
63
|
+
|
|
64
|
+
# Build WebSocket URL
|
|
65
|
+
base = api_base or os.environ.get("OPENAI_API_BASE", "wss://api.openai.com")
|
|
66
|
+
if base.startswith("http"):
|
|
67
|
+
base = base.replace("https://", "wss://").replace("http://", "ws://")
|
|
68
|
+
|
|
69
|
+
url = f"{base.rstrip('/')}/v1/realtime?model={model}"
|
|
70
|
+
|
|
71
|
+
return RealtimeSession(
|
|
72
|
+
id=session_id,
|
|
73
|
+
status="created",
|
|
74
|
+
model=model,
|
|
75
|
+
url=url,
|
|
76
|
+
metadata={
|
|
77
|
+
"modalities": modalities or ["text", "audio"],
|
|
78
|
+
"instructions": instructions,
|
|
79
|
+
"voice": voice,
|
|
80
|
+
**(metadata or {}),
|
|
81
|
+
},
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
async def arealtime_connect(
|
|
86
|
+
model: str = "gpt-4o-realtime-preview",
|
|
87
|
+
modalities: Optional[List[str]] = None,
|
|
88
|
+
instructions: Optional[str] = None,
|
|
89
|
+
voice: str = "alloy",
|
|
90
|
+
api_key: Optional[str] = None,
|
|
91
|
+
api_base: Optional[str] = None,
|
|
92
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
93
|
+
**kwargs
|
|
94
|
+
) -> RealtimeSession:
|
|
95
|
+
"""
|
|
96
|
+
Async: Create a realtime session.
|
|
97
|
+
|
|
98
|
+
See realtime_connect() for full documentation.
|
|
99
|
+
"""
|
|
100
|
+
return realtime_connect(
|
|
101
|
+
model=model,
|
|
102
|
+
modalities=modalities,
|
|
103
|
+
instructions=instructions,
|
|
104
|
+
voice=voice,
|
|
105
|
+
api_key=api_key,
|
|
106
|
+
api_base=api_base,
|
|
107
|
+
metadata=metadata,
|
|
108
|
+
**kwargs
|
|
109
|
+
)
|
|
110
|
+
|
|
111
|
+
|
|
112
|
+
def realtime_send(
|
|
113
|
+
session_id: str,
|
|
114
|
+
event_type: str,
|
|
115
|
+
data: Optional[Any] = None,
|
|
116
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
117
|
+
**kwargs
|
|
118
|
+
) -> RealtimeEvent:
|
|
119
|
+
"""
|
|
120
|
+
Send an event to a realtime session.
|
|
121
|
+
|
|
122
|
+
Args:
|
|
123
|
+
session_id: Session ID
|
|
124
|
+
event_type: Event type (e.g., "input_audio_buffer.append")
|
|
125
|
+
data: Event data
|
|
126
|
+
metadata: Optional metadata for tracing
|
|
127
|
+
|
|
128
|
+
Returns:
|
|
129
|
+
RealtimeEvent with send confirmation
|
|
130
|
+
|
|
131
|
+
Note:
|
|
132
|
+
This is a placeholder. Actual implementation requires WebSocket connection.
|
|
133
|
+
"""
|
|
134
|
+
return RealtimeEvent(
|
|
135
|
+
type=event_type,
|
|
136
|
+
data=data,
|
|
137
|
+
session_id=session_id,
|
|
138
|
+
metadata=metadata or {},
|
|
139
|
+
)
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
async def arealtime_send(
|
|
143
|
+
session_id: str,
|
|
144
|
+
event_type: str,
|
|
145
|
+
data: Optional[Any] = None,
|
|
146
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
147
|
+
**kwargs
|
|
148
|
+
) -> RealtimeEvent:
|
|
149
|
+
"""
|
|
150
|
+
Async: Send an event to a realtime session.
|
|
151
|
+
|
|
152
|
+
See realtime_send() for full documentation.
|
|
153
|
+
"""
|
|
154
|
+
return realtime_send(
|
|
155
|
+
session_id=session_id,
|
|
156
|
+
event_type=event_type,
|
|
157
|
+
data=data,
|
|
158
|
+
metadata=metadata,
|
|
159
|
+
**kwargs
|
|
160
|
+
)
|