create-tether-app 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. package/dist/index.d.ts +1 -0
  2. package/dist/index.js +729 -0
  3. package/package.json +59 -0
  4. package/template/.env.example +18 -0
  5. package/template/README.md.template +123 -0
  6. package/template/backend/app/__init__.py.template +5 -0
  7. package/template/backend/app/main.py +66 -0
  8. package/template/backend/app/routes/__init__.py +3 -0
  9. package/template/backend/app/routes/chat.py +151 -0
  10. package/template/backend/app/routes/health.py +28 -0
  11. package/template/backend/app/routes/models.py +126 -0
  12. package/template/backend/app/services/__init__.py +3 -0
  13. package/template/backend/app/services/llm.py +526 -0
  14. package/template/backend/pyproject.toml.template +34 -0
  15. package/template/backend/scripts/build.py +112 -0
  16. package/template/frontend/App.css +58 -0
  17. package/template/frontend/App.tsx +62 -0
  18. package/template/frontend/components/Chat.css +220 -0
  19. package/template/frontend/components/Chat.tsx +284 -0
  20. package/template/frontend/components/ChatMessage.css +206 -0
  21. package/template/frontend/components/ChatMessage.tsx +62 -0
  22. package/template/frontend/components/ModelStatus.css +62 -0
  23. package/template/frontend/components/ModelStatus.tsx +103 -0
  24. package/template/frontend/hooks/useApi.ts +334 -0
  25. package/template/frontend/index.css +92 -0
  26. package/template/frontend/main.tsx +10 -0
  27. package/template/frontend/vite-env.d.ts +1 -0
  28. package/template/index.html.template +13 -0
  29. package/template/package.json.template +33 -0
  30. package/template/postcss.config.js.template +6 -0
  31. package/template/public/tether.svg +15 -0
  32. package/template/src-tauri/.cargo/config.toml +66 -0
  33. package/template/src-tauri/Cargo.lock +4764 -0
  34. package/template/src-tauri/Cargo.toml +24 -0
  35. package/template/src-tauri/build.rs +3 -0
  36. package/template/src-tauri/capabilities/default.json +40 -0
  37. package/template/src-tauri/icons/128x128.png +0 -0
  38. package/template/src-tauri/icons/128x128@2x.png +0 -0
  39. package/template/src-tauri/icons/32x32.png +0 -0
  40. package/template/src-tauri/icons/icon.icns +0 -0
  41. package/template/src-tauri/icons/icon.ico +0 -0
  42. package/template/src-tauri/src/main.rs +65 -0
  43. package/template/src-tauri/src/sidecar.rs +110 -0
  44. package/template/src-tauri/tauri.conf.json.template +44 -0
  45. package/template/tailwind.config.js.template +19 -0
  46. package/template/tsconfig.json +21 -0
  47. package/template/tsconfig.node.json +11 -0
  48. package/template/vite.config.ts +27 -0
package/package.json ADDED
@@ -0,0 +1,59 @@
1
+ {
2
+ "name": "create-tether-app",
3
+ "version": "0.1.0",
4
+ "description": "CLI tool for scaffolding Tether AI/ML desktop applications",
5
+ "type": "module",
6
+ "bin": {
7
+ "create-tether-app": "./dist/index.js"
8
+ },
9
+ "files": [
10
+ "dist",
11
+ "template"
12
+ ],
13
+ "scripts": {
14
+ "build": "tsup src/index.ts --format esm --dts --clean",
15
+ "dev": "tsup src/index.ts --format esm --watch",
16
+ "typecheck": "tsc --noEmit",
17
+ "clean": "rm -rf dist template",
18
+ "prepublishOnly": "rm -rf template && cp -r ../../template ./template && pnpm build"
19
+ },
20
+ "dependencies": {
21
+ "chalk": "^5.3.0",
22
+ "commander": "^12.0.0",
23
+ "fs-extra": "^11.2.0",
24
+ "inquirer": "^9.2.0",
25
+ "ora": "^8.0.0",
26
+ "validate-npm-package-name": "^5.0.0"
27
+ },
28
+ "devDependencies": {
29
+ "@types/fs-extra": "^11.0.4",
30
+ "@types/inquirer": "^9.0.7",
31
+ "@types/node": "^20.11.0",
32
+ "@types/validate-npm-package-name": "^4.0.2",
33
+ "tsup": "^8.0.0",
34
+ "typescript": "^5.4.0"
35
+ },
36
+ "engines": {
37
+ "node": ">=18.0.0"
38
+ },
39
+ "keywords": [
40
+ "cli",
41
+ "tauri",
42
+ "react",
43
+ "python",
44
+ "ai",
45
+ "ml",
46
+ "scaffold",
47
+ "create"
48
+ ],
49
+ "license": "MIT",
50
+ "repository": {
51
+ "type": "git",
52
+ "url": "https://github.com/FabianUB/tether.git",
53
+ "directory": "packages/create-tether-app"
54
+ },
55
+ "homepage": "https://github.com/FabianUB/tether#readme",
56
+ "bugs": {
57
+ "url": "https://github.com/FabianUB/tether/issues"
58
+ }
59
+ }
@@ -0,0 +1,18 @@
1
+ # Python Backend Configuration
2
+ TETHER_HOST=127.0.0.1
3
+ TETHER_PORT=8000
4
+
5
+ # LLM Backend: local, openai, or mock
6
+ TETHER_LLM_BACKEND=local
7
+
8
+ # For local LLM (llama-cpp-python)
9
+ TETHER_MODEL_PATH=./models/your-model.gguf
10
+ TETHER_CONTEXT_LENGTH=4096
11
+
12
+ # For OpenAI API
13
+ OPENAI_API_KEY=sk-your-api-key
14
+ TETHER_OPENAI_MODEL=gpt-4o-mini
15
+
16
+ # Model parameters
17
+ TETHER_DEFAULT_TEMPERATURE=0.7
18
+ TETHER_DEFAULT_MAX_TOKENS=1024
@@ -0,0 +1,123 @@
1
+ # {{PROJECT_NAME}}
2
+
3
+ A desktop AI/ML application built with [Tether](https://github.com/your-org/tether).
4
+
5
+ ## Requirements
6
+
7
+ | Tool | Version | Installation |
8
+ |------|---------|--------------|
9
+ | Node.js | 18+ | [nodejs.org](https://nodejs.org/) |
10
+ | pnpm | 8+ | `npm install -g pnpm` |
11
+ | Python | 3.11+ | [python.org](https://www.python.org/) |
12
+ | uv | latest | [docs.astral.sh/uv](https://docs.astral.sh/uv/) |
13
+ | Rust | latest | [rustup.rs](https://rustup.rs/) |
14
+
15
+ For Ollama backend (default):
16
+ - [Ollama](https://ollama.com/) installed and running
17
+
18
+ ## Quick Start
19
+
20
+ ```bash
21
+ # Install dependencies
22
+ pnpm install
23
+
24
+ # Start development (frontend + backend)
25
+ pnpm dev:all
26
+ ```
27
+
28
+ Open http://localhost:5173 in your browser.
29
+
30
+ ## Commands
31
+
32
+ ### Development
33
+
34
+ | Command | Description | Port |
35
+ |---------|-------------|------|
36
+ | `pnpm dev` | Start frontend only (Vite) | 5173 |
37
+ | `pnpm dev:py` | Start Python backend only | 8000 |
38
+ | `pnpm dev:all` | Start both frontend and backend | 5173 + 8000 |
39
+ | `pnpm tauri:dev` | Start as desktop app (with hot reload) | — |
40
+
41
+ ### Building
42
+
43
+ | Command | Description |
44
+ |---------|-------------|
45
+ | `pnpm build` | Build frontend only (→ `dist/`) |
46
+ | `pnpm python:build` | Build Python sidecar (→ `binaries/`) |
47
+ | `pnpm tauri:build` | Build Tauri shell (requires frontend + sidecar) |
48
+ | `pnpm build:app` | **Build everything** (runs all three above in sequence) |
49
+
50
+ The final app will be in `src-tauri/target/release/bundle/`.
51
+
52
+ ## Configuration
53
+
54
+ Copy `.env.example` to `.env`:
55
+
56
+ ```bash
57
+ cp .env.example .env
58
+ ```
59
+
60
+ ### Ollama (Default)
61
+
62
+ 1. Install [Ollama](https://ollama.com/)
63
+ 2. Pull a model:
64
+ ```bash
65
+ ollama pull llama3.2
66
+ ```
67
+ 3. Start Ollama:
68
+ ```bash
69
+ ollama serve
70
+ ```
71
+
72
+ ### Local LLM (Embedded)
73
+
74
+ 1. Download a GGUF model from [Hugging Face](https://huggingface.co/models?library=gguf)
75
+ 2. Set in `.env`:
76
+ ```
77
+ TETHER_LLM_BACKEND=local
78
+ TETHER_MODEL_PATH=./models/your-model.gguf
79
+ ```
80
+
81
+ ### OpenAI API (Experimental)
82
+
83
+ > **Note:** OpenAI support is experimental and not thoroughly tested.
84
+
85
+ 1. Get an API key from [OpenAI](https://platform.openai.com/)
86
+ 2. Set in `.env`:
87
+ ```
88
+ TETHER_LLM_BACKEND=openai
89
+ OPENAI_API_KEY=sk-...
90
+ ```
91
+
92
+ ## Supported Models
93
+
94
+ | Backend | Models | Notes |
95
+ |---------|--------|-------|
96
+ | **Ollama** | llama3.2, gemma3, qwen3, deepseek-r1, mistral, etc. | See [ollama.com/library](https://ollama.com/library) |
97
+ | **Local LLM** | Any GGUF model | See [Hugging Face GGUF models](https://huggingface.co/models?library=gguf) |
98
+ | **OpenAI** | gpt-4o, gpt-4o-mini, gpt-4-turbo | Experimental |
99
+
100
+ ### Thinking Models
101
+
102
+ Models with reasoning capabilities (like `deepseek-r1`, `qwen3`) support "thinking mode" which shows the model's reasoning process. This is automatically enabled when available.
103
+
104
+ ## Project Structure
105
+
106
+ ```
107
+ {{PROJECT_NAME}}/
108
+ ├── frontend/ # React frontend
109
+ │ ├── components/ # React components
110
+ │ ├── hooks/ # Custom hooks
111
+ │ └── App.tsx # Main app component
112
+ ├── backend/ # Python backend
113
+ │ ├── app/ # FastAPI application
114
+ │ │ ├── routes/ # API endpoints
115
+ │ │ └── services/ # Business logic
116
+ │ └── scripts/ # Build scripts
117
+ └── src-tauri/ # Tauri (Rust) shell
118
+ └── src/ # Rust source code
119
+ ```
120
+
121
+ ## License
122
+
123
+ MIT License
@@ -0,0 +1,5 @@
1
+ """
2
+ {{PROJECT_NAME}} Python backend.
3
+ """
4
+
5
+ __version__ = "0.1.0"
@@ -0,0 +1,66 @@
1
+ """
2
+ FastAPI application entry point.
3
+ """
4
+
5
+ from contextlib import asynccontextmanager
6
+ from typing import AsyncIterator
7
+
8
+ from fastapi import FastAPI
9
+ from fastapi.middleware.cors import CORSMiddleware
10
+
11
+ from app.routes import health, chat, models
12
+ from app.services.llm import get_llm_service
13
+
14
+
15
+ @asynccontextmanager
16
+ async def lifespan(app: FastAPI) -> AsyncIterator[None]:
17
+ """Application lifespan manager."""
18
+ # Startup: Initialize LLM service
19
+ llm_service = get_llm_service()
20
+ await llm_service.initialize()
21
+ app.state.llm_service = llm_service
22
+
23
+ yield
24
+
25
+ # Shutdown: Cleanup
26
+ await llm_service.cleanup()
27
+
28
+
29
+ def create_app() -> FastAPI:
30
+ """Create and configure the FastAPI application."""
31
+ app = FastAPI(
32
+ title="Tether Backend",
33
+ version="0.1.0",
34
+ lifespan=lifespan,
35
+ )
36
+
37
+ # Configure CORS
38
+ app.add_middleware(
39
+ CORSMiddleware,
40
+ allow_origins=["*"],
41
+ allow_credentials=True,
42
+ allow_methods=["*"],
43
+ allow_headers=["*"],
44
+ )
45
+
46
+ # Include routers
47
+ app.include_router(health.router)
48
+ app.include_router(chat.router)
49
+ app.include_router(models.router)
50
+
51
+ return app
52
+
53
+
54
+ app = create_app()
55
+
56
+
57
+ if __name__ == "__main__":
58
+ import argparse
59
+ import uvicorn
60
+
61
+ parser = argparse.ArgumentParser(description="Tether Backend API Server")
62
+ parser.add_argument("--port", type=int, default=8000, help="Port to run the server on")
63
+ parser.add_argument("--host", type=str, default="127.0.0.1", help="Host to bind to")
64
+ args = parser.parse_args()
65
+
66
+ uvicorn.run(app, host=args.host, port=args.port)
@@ -0,0 +1,3 @@
1
+ """
2
+ API routes.
3
+ """
@@ -0,0 +1,151 @@
1
+ """
2
+ Chat completion endpoints.
3
+ """
4
+
5
+ import re
6
+ from typing import Literal, Optional
7
+
8
+ from fastapi import APIRouter, HTTPException, Request
9
+ from pydantic import BaseModel, Field
10
+
11
+
12
+ class ChatMessage(BaseModel):
13
+ role: Literal["user", "assistant", "system"]
14
+ content: str
15
+ images: Optional[list[str]] = Field(
16
+ default=None, description="Base64-encoded images for vision models"
17
+ )
18
+ timestamp: Optional[int] = None
19
+
20
+
21
+ class ChatRequest(BaseModel):
22
+ message: str = Field(..., description="The user's message")
23
+ images: Optional[list[str]] = Field(
24
+ default=None, description="Base64-encoded images for vision models"
25
+ )
26
+ history: Optional[list[ChatMessage]] = Field(
27
+ default=None, description="Previous messages in the conversation"
28
+ )
29
+ model: Optional[str] = Field(default=None, description="Model to use")
30
+ temperature: Optional[float] = Field(
31
+ default=0.7, ge=0.0, le=2.0, description="Sampling temperature"
32
+ )
33
+ max_tokens: Optional[int] = Field(
34
+ default=None, ge=1, description="Maximum tokens to generate"
35
+ )
36
+ think: Optional[bool] = Field(
37
+ default=True, description="Enable thinking mode for reasoning models"
38
+ )
39
+
40
+
41
+ class ChatResponse(BaseModel):
42
+ response: str = Field(..., description="The assistant's response")
43
+ thinking: Optional[str] = Field(
44
+ default=None, description="Model's reasoning/thinking content (for thinking models)"
45
+ )
46
+ tokens_used: Optional[int] = Field(
47
+ default=None, description="Number of tokens used"
48
+ )
49
+ model: Optional[str] = Field(default=None, description="Model used")
50
+ finish_reason: Optional[Literal["stop", "length", "error"]] = Field(
51
+ default="stop", description="Reason for completion"
52
+ )
53
+
54
+
55
+ def parse_thinking_content(text: str) -> tuple[str, Optional[str]]:
56
+ """
57
+ Parse thinking content from model response.
58
+
59
+ Thinking models like Qwen3 wrap reasoning in <think>...</think> tags.
60
+
61
+ Returns:
62
+ Tuple of (response_without_thinking, thinking_content)
63
+ """
64
+ # Match <think>...</think> tags (case insensitive, multiline)
65
+ think_pattern = re.compile(r"<think>(.*?)</think>", re.DOTALL | re.IGNORECASE)
66
+
67
+ thinking_parts = think_pattern.findall(text)
68
+
69
+ if not thinking_parts:
70
+ return text, None
71
+
72
+ # Remove thinking tags from response
73
+ response = think_pattern.sub("", text).strip()
74
+
75
+ # Combine all thinking parts (in case there are multiple)
76
+ thinking = "\n\n".join(part.strip() for part in thinking_parts)
77
+
78
+ return response, thinking
79
+
80
+
81
+ router = APIRouter()
82
+
83
+
84
+ @router.post("/chat", response_model=ChatResponse)
85
+ async def chat(request: Request, body: ChatRequest) -> ChatResponse:
86
+ """Generate a chat completion."""
87
+ llm_service = getattr(request.app.state, "llm_service", None)
88
+
89
+ if llm_service is None or not llm_service.is_ready():
90
+ raise HTTPException(status_code=503, detail="LLM service not available")
91
+
92
+ try:
93
+ # Build messages list for chat API
94
+ # Note: We don't include images from history because:
95
+ # 1. Non-vision models can't handle them (Ollama converts to [img-0] placeholders)
96
+ # 2. Vision models typically can't "remember" images from earlier turns anyway
97
+ messages = []
98
+ if body.history:
99
+ for msg in body.history:
100
+ # Strip images from history - only text content
101
+ messages.append({"role": msg.role, "content": msg.content})
102
+
103
+ # Add current message with optional images (only current turn gets images)
104
+ current_msg = {"role": "user", "content": body.message}
105
+ if body.images:
106
+ current_msg["images"] = body.images
107
+ messages.append(current_msg)
108
+
109
+ # Check if current message has images (vision models don't support thinking)
110
+ has_images = bool(body.images)
111
+ # Disable thinking for vision requests (not supported by Ollama)
112
+ use_thinking = False if has_images else (body.think if body.think is not None else True)
113
+
114
+ # Use chat API if available (supports thinking models), fallback to complete
115
+ if hasattr(llm_service, "chat"):
116
+ result = await llm_service.chat(
117
+ messages,
118
+ temperature=body.temperature or 0.7,
119
+ max_tokens=body.max_tokens,
120
+ think=use_thinking,
121
+ )
122
+ # chat() returns dict with 'content' and 'thinking'
123
+ if isinstance(result, dict):
124
+ response = result.get("content", "")
125
+ thinking = result.get("thinking")
126
+ else:
127
+ # Fallback if chat returns string
128
+ response, thinking = parse_thinking_content(result)
129
+ else:
130
+ # Fallback for services without chat method
131
+ prompt = body.message
132
+ if body.history:
133
+ history_text = "\n".join(
134
+ f"{msg.role}: {msg.content}" for msg in body.history
135
+ )
136
+ prompt = f"{history_text}\nuser: {body.message}\nassistant:"
137
+ raw_response = await llm_service.complete(
138
+ prompt,
139
+ temperature=body.temperature or 0.7,
140
+ max_tokens=body.max_tokens,
141
+ )
142
+ response, thinking = parse_thinking_content(raw_response)
143
+
144
+ return ChatResponse(
145
+ response=response,
146
+ thinking=thinking,
147
+ model=llm_service.model_name,
148
+ finish_reason="stop",
149
+ )
150
+ except Exception as e:
151
+ raise HTTPException(status_code=500, detail=str(e))
@@ -0,0 +1,28 @@
1
+ """
2
+ Health check endpoint.
3
+ """
4
+
5
+ from fastapi import APIRouter, Request
6
+ from pydantic import BaseModel
7
+
8
+
9
+ class HealthResponse(BaseModel):
10
+ status: str
11
+ model_loaded: bool
12
+ version: str
13
+
14
+
15
+ router = APIRouter()
16
+
17
+
18
+ @router.get("/health", response_model=HealthResponse)
19
+ async def health(request: Request) -> HealthResponse:
20
+ """Health check endpoint."""
21
+ llm_service = getattr(request.app.state, "llm_service", None)
22
+ model_loaded = llm_service.is_ready() if llm_service else False
23
+
24
+ return HealthResponse(
25
+ status="healthy",
26
+ model_loaded=model_loaded,
27
+ version="0.1.0",
28
+ )
@@ -0,0 +1,126 @@
1
+ """
2
+ Model discovery and switching endpoints.
3
+ """
4
+
5
+ from fastapi import APIRouter, HTTPException, Request
6
+ from pydantic import BaseModel, Field
7
+
8
+ from app.services.llm import discover_ollama, get_ollama_base_url
9
+
10
+
11
+ class ModelsResponse(BaseModel):
12
+ available: bool
13
+ current_model: str | None
14
+ models: list[str]
15
+ backend: str
16
+ error: str | None = None
17
+
18
+
19
+ class SwitchModelRequest(BaseModel):
20
+ model: str = Field(..., description="Model name to switch to")
21
+
22
+
23
+ class SwitchModelResponse(BaseModel):
24
+ success: bool
25
+ previous_model: str | None
26
+ current_model: str
27
+ message: str
28
+
29
+
30
+ router = APIRouter()
31
+
32
+
33
+ @router.get("/models", response_model=ModelsResponse)
34
+ async def list_models(request: Request) -> ModelsResponse:
35
+ """
36
+ List available models.
37
+
38
+ For Ollama backend, discovers available models from the Ollama API.
39
+ For other backends, returns the configured model.
40
+ """
41
+ llm_service = getattr(request.app.state, "llm_service", None)
42
+
43
+ if not llm_service:
44
+ return ModelsResponse(
45
+ available=False,
46
+ current_model=None,
47
+ models=[],
48
+ backend="none",
49
+ error="No LLM service configured",
50
+ )
51
+
52
+ backend = llm_service.service_type
53
+
54
+ # For Ollama, use discovery
55
+ if backend == "ollama":
56
+ discovery = await discover_ollama(get_ollama_base_url())
57
+ return ModelsResponse(
58
+ available=discovery.available,
59
+ current_model=llm_service.model_name if llm_service.is_ready() else None,
60
+ models=discovery.models,
61
+ backend=backend,
62
+ error=discovery.error,
63
+ )
64
+
65
+ # For other backends, return the configured model
66
+ return ModelsResponse(
67
+ available=llm_service.is_ready(),
68
+ current_model=llm_service.model_name,
69
+ models=[llm_service.model_name] if llm_service.is_ready() else [],
70
+ backend=backend,
71
+ )
72
+
73
+
74
+ @router.post("/models/switch", response_model=SwitchModelResponse)
75
+ async def switch_model(request: Request, body: SwitchModelRequest) -> SwitchModelResponse:
76
+ """
77
+ Switch to a different model.
78
+
79
+ For Ollama backend, switches to the specified model.
80
+ Other backends may not support runtime model switching.
81
+ """
82
+ llm_service = getattr(request.app.state, "llm_service", None)
83
+
84
+ if not llm_service:
85
+ raise HTTPException(status_code=503, detail="No LLM service configured")
86
+
87
+ if not llm_service.is_ready():
88
+ raise HTTPException(status_code=503, detail="LLM service not ready")
89
+
90
+ backend = llm_service.service_type
91
+ previous_model = llm_service.model_name
92
+
93
+ # For Ollama, we can switch models at runtime
94
+ if backend == "ollama":
95
+ # Verify the model exists
96
+ discovery = await discover_ollama(get_ollama_base_url())
97
+ if not discovery.available:
98
+ raise HTTPException(status_code=503, detail="Ollama not available")
99
+
100
+ # Check if model is in available models (exact match or base name match)
101
+ model_found = any(
102
+ body.model == m or body.model == m.split(":")[0]
103
+ for m in discovery.models
104
+ )
105
+
106
+ if not model_found:
107
+ raise HTTPException(
108
+ status_code=404,
109
+ detail=f"Model '{body.model}' not found. Available: {', '.join(discovery.models)}",
110
+ )
111
+
112
+ # Switch the model
113
+ llm_service._model = body.model
114
+
115
+ return SwitchModelResponse(
116
+ success=True,
117
+ previous_model=previous_model,
118
+ current_model=body.model,
119
+ message=f"Switched from {previous_model} to {body.model}",
120
+ )
121
+
122
+ # Other backends don't support runtime switching
123
+ raise HTTPException(
124
+ status_code=400,
125
+ detail=f"Backend '{backend}' does not support runtime model switching",
126
+ )
@@ -0,0 +1,3 @@
1
+ """
2
+ Application services.
3
+ """