meshapi-code 0.2.1__tar.gz → 0.3.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -4,6 +4,7 @@ __pycache__/
4
4
  *.egg-info/
5
5
  .venv/
6
6
  venv/
7
+ .build-venv/
7
8
  dist/
8
9
  build/
9
10
  .pytest_cache/
@@ -0,0 +1,155 @@
1
+ Metadata-Version: 2.4
2
+ Name: meshapi-code
3
+ Version: 0.3.1
4
+ Summary: Terminal chat for Mesh API — OpenAI-compatible LLM gateway
5
+ Project-URL: Homepage, https://meshapi.ai
6
+ Project-URL: Documentation, https://docs.meshapi.ai
7
+ Project-URL: Repository, https://github.com/aifiesta/meshapi-code
8
+ Author: Mesh API
9
+ License: MIT
10
+ License-File: LICENSE
11
+ Keywords: anthropic,chat,cli,gateway,llm,mesh,openai
12
+ Classifier: Development Status :: 3 - Alpha
13
+ Classifier: Environment :: Console
14
+ Classifier: Intended Audience :: Developers
15
+ Classifier: License :: OSI Approved :: MIT License
16
+ Classifier: Programming Language :: Python :: 3
17
+ Classifier: Programming Language :: Python :: 3.10
18
+ Classifier: Programming Language :: Python :: 3.11
19
+ Classifier: Programming Language :: Python :: 3.12
20
+ Classifier: Programming Language :: Python :: 3.13
21
+ Classifier: Topic :: Software Development
22
+ Requires-Python: >=3.10
23
+ Requires-Dist: httpx>=0.27
24
+ Requires-Dist: prompt-toolkit>=3.0
25
+ Requires-Dist: rich>=13.7
26
+ Description-Content-Type: text/markdown
27
+
28
+ # meshapi-code
29
+
30
+ Terminal chat REPL for [Mesh API](https://meshapi.ai) — one OpenAI-compatible key, 300+ models. Streaming responses, live markdown, file/shell tool calls with approval, real-time cost.
31
+
32
+ ```
33
+ $ meshapi
34
+ ███╗ ███╗███████╗███████╗██╗ ██╗ ✦ meshapi 0.3.0
35
+ ████╗ ████║██╔════╝██╔════╝██║ ██║ cwd: ~/code/myproj
36
+ ██╔████╔██║█████╗ ███████╗███████║ model: anthropic/claude-sonnet-4.5
37
+ ██║╚██╔╝██║██╔══╝ ╚════██║██╔══██║ route: cheapest
38
+ ██║ ╚═╝ ██║███████╗███████║██║ ██║
39
+ ╚═╝ ╚═╝╚══════╝╚══════╝╚═╝ ╚═╝
40
+ type /help for commands, /exit to quit
41
+
42
+ › add a healthcheck endpoint to server.py and run the tests
43
+ … streamed markdown reply …
44
+ ⚙ approve tool call? write_file: server.py (1240 chars) y/n › y
45
+ ⚙ approve tool call? run_bash: pytest -q y/n › y
46
+ anthropic/claude-sonnet-4.5 • 942→318 tok • $0.001234 • session $0.001234
47
+ mode: approve each model can request file/shell ops; you confirm each one shift+tab to cycle
48
+ ```
49
+
50
+ ## Install
51
+
52
+ ```bash
53
+ pipx install meshapi-code # recommended
54
+ uv tool install meshapi-code # if you use uv
55
+ pip install meshapi-code # plain pip
56
+ ```
57
+
58
+ PyPI package is `meshapi-code`; the command on your `$PATH` is `meshapi` (same split Claude Code uses: `@anthropic-ai/claude-code` → `claude`).
59
+
60
+ ```bash
61
+ export MESHAPI_API_KEY=rsk_your_key_here
62
+ meshapi
63
+ ```
64
+
65
+ Get a key at [meshapi.ai](https://meshapi.ai).
66
+
67
+ ## What it does
68
+
69
+ - **Streaming completions** with live markdown rendering (`rich`).
70
+ - **Real cost per turn** — Mesh returns `cost` in the SSE tail; we surface it after every reply and accumulate `session $…`.
71
+ - **Tool calling** — the model can read files, write files, and run shell commands in the launch directory. Off by default behind an approval prompt; toggle with one key.
72
+ - **Permission modes** — `approve each` (default), `bypass perms` (auto-execute, for trusted prompts), or `no access` (chat only). Cycle live with **Shift+Tab**.
73
+ - **Mid-session switching** — `/model openai/gpt-4o-mini`, `/route cheapest`, `/mode bypass`.
74
+ - **Smart routing** — `/route cheapest|fastest|balanced` hands model selection to Mesh's gateway, so you don't have to.
75
+ - **Persistent input history** — up-arrow recalls past prompts across sessions.
76
+ - **Config + env-var override** — `~/.meshapi/config.json`, `MESHAPI_API_KEY`.
77
+
78
+ ## Tool calling
79
+
80
+ When tools are enabled, the model can call:
81
+
82
+ | Tool | What it does |
83
+ |---|---|
84
+ | `read_file` | Read a file from the working directory (or absolute path). |
85
+ | `write_file` | Create or overwrite a file. Parent dirs are created. |
86
+ | `run_bash` | Run a shell command in the working directory. 60s timeout, 8000-char output cap. |
87
+
88
+ The launch CWD is baked into the system prompt, so relative paths the model produces resolve where you'd expect. Three permission modes, cycled live with Shift+Tab or set with `--mode` / `/mode`:
89
+
90
+ - **`ask`** (default) — every tool call requires a `y/n` confirmation. Safe.
91
+ - **`bypass`** — the model auto-executes. Fast, like Claude Code's `--dangerously-skip-permissions`. Use only when you trust the prompt.
92
+ - **`none`** — tools aren't sent to the model at all. Pure chat.
93
+
94
+ ```bash
95
+ meshapi --mode bypass # start in auto-execute mode
96
+ meshapi # default ask; press Shift+Tab to cycle
97
+ ```
98
+
99
+ ## Slash commands
100
+
101
+ | Command | What it does |
102
+ |---|---|
103
+ | `/help` | List commands |
104
+ | `/model <name>` | Switch model (e.g. `anthropic/claude-sonnet-4.5`, `openai/gpt-4o-mini`) |
105
+ | `/route <mode>` | `cheapest`, `fastest`, `balanced`, or `default` |
106
+ | `/mode <perm>` | `ask`, `bypass`, or `none` (Shift+Tab also cycles) |
107
+ | `/file <path>` | Inject a file into the conversation |
108
+ | `/system <text>` | Replace system prompt and reset chat |
109
+ | `/cost` | Show cumulative session spend |
110
+ | `/clear` | Reset conversation |
111
+ | `/exit` | Quit |
112
+
113
+ ## Config
114
+
115
+ `~/.meshapi/config.json`:
116
+
117
+ ```json
118
+ {
119
+ "base_url": "https://api.meshapi.ai/v1",
120
+ "model": "anthropic/claude-sonnet-4.5",
121
+ "system": "You are a helpful coding assistant. Be concise.",
122
+ "route": null
123
+ }
124
+ ```
125
+
126
+ The API key is read from `MESHAPI_API_KEY` (preferred) or stored in the same file. Input history lives at `~/.meshapi/history`.
127
+
128
+ ## About Mesh API
129
+
130
+ [Mesh API](https://meshapi.ai) is a unified LLM gateway: one API key, 300+ models from OpenAI, Anthropic, Google, Meta, Mistral, DeepSeek, Alibaba, and more. It's OpenAI-compatible — change the model name in your request, leave everything else alone.
131
+
132
+ - **Zero platform fees for 12 months.** You only pay for tokens.
133
+ - **Smart auto routing.** `route: cheapest|fastest|balanced` and the gateway picks for you.
134
+ - **Automatic failover.** If a provider goes down, your request routes to another. Your users won't know.
135
+ - **Highest rate limits.** Capacity is pooled across providers, so you hit ceilings later than going direct.
136
+ - **Zero data retention.** Prompts and completions pass through; we don't store them.
137
+ - **Multi-currency billing.** USD and INR (for India-based teams) at launch.
138
+ - **Ready-made workflows.** Pre-built prompt templates you can plug into any model.
139
+ - **Full observability.** Every request, token, cost, error, and model usage tracked in real time. Per-key spending limits and usage controls.
140
+
141
+ Built by the founders of [TagMango](https://tagmango.com) (YC W20) and [AI Fiesta](https://aifiesta.ai) (1M+ users across India). We got tired of managing five different provider dashboards ourselves, so we built this.
142
+
143
+ ## Why this CLI exists
144
+
145
+ Any generic OpenAI-compatible chat CLI talks to Mesh. `meshapi` adds three things a generic CLI can't: (1) the gateway-only `cost` field shown after every turn, (2) `/route` controls that drive Mesh's gateway-side model selection, and (3) tool calling that resolves paths against the directory you launched from.
146
+
147
+ ## Roadmap
148
+
149
+ - ✅ v0.3 — tool calling, ask/bypass/none permission modes, CWD-aware system prompt
150
+ - v0.4 — repo-aware mode, diff apply, `/cd` to change working dir mid-session
151
+ - v0.5 — `npm i -g meshapi-code` (Node port using `ink` + `chalk`), Homebrew tap, curl|sh installer at `meshapi.ai/install.sh`
152
+
153
+ ## License
154
+
155
+ MIT
@@ -0,0 +1,128 @@
1
+ # meshapi-code
2
+
3
+ Terminal chat REPL for [Mesh API](https://meshapi.ai) — one OpenAI-compatible key, 300+ models. Streaming responses, live markdown, file/shell tool calls with approval, real-time cost.
4
+
5
+ ```
6
+ $ meshapi
7
+ ███╗ ███╗███████╗███████╗██╗ ██╗ ✦ meshapi 0.3.0
8
+ ████╗ ████║██╔════╝██╔════╝██║ ██║ cwd: ~/code/myproj
9
+ ██╔████╔██║█████╗ ███████╗███████║ model: anthropic/claude-sonnet-4.5
10
+ ██║╚██╔╝██║██╔══╝ ╚════██║██╔══██║ route: cheapest
11
+ ██║ ╚═╝ ██║███████╗███████║██║ ██║
12
+ ╚═╝ ╚═╝╚══════╝╚══════╝╚═╝ ╚═╝
13
+ type /help for commands, /exit to quit
14
+
15
+ › add a healthcheck endpoint to server.py and run the tests
16
+ … streamed markdown reply …
17
+ ⚙ approve tool call? write_file: server.py (1240 chars) y/n › y
18
+ ⚙ approve tool call? run_bash: pytest -q y/n › y
19
+ anthropic/claude-sonnet-4.5 • 942→318 tok • $0.001234 • session $0.001234
20
+ mode: approve each model can request file/shell ops; you confirm each one shift+tab to cycle
21
+ ```
22
+
23
+ ## Install
24
+
25
+ ```bash
26
+ pipx install meshapi-code # recommended
27
+ uv tool install meshapi-code # if you use uv
28
+ pip install meshapi-code # plain pip
29
+ ```
30
+
31
+ PyPI package is `meshapi-code`; the command on your `$PATH` is `meshapi` (same split Claude Code uses: `@anthropic-ai/claude-code` → `claude`).
32
+
33
+ ```bash
34
+ export MESHAPI_API_KEY=rsk_your_key_here
35
+ meshapi
36
+ ```
37
+
38
+ Get a key at [meshapi.ai](https://meshapi.ai).
39
+
40
+ ## What it does
41
+
42
+ - **Streaming completions** with live markdown rendering (`rich`).
43
+ - **Real cost per turn** — Mesh returns `cost` in the SSE tail; we surface it after every reply and accumulate `session $…`.
44
+ - **Tool calling** — the model can read files, write files, and run shell commands in the launch directory. Off by default behind an approval prompt; toggle with one key.
45
+ - **Permission modes** — `approve each` (default), `bypass perms` (auto-execute, for trusted prompts), or `no access` (chat only). Cycle live with **Shift+Tab**.
46
+ - **Mid-session switching** — `/model openai/gpt-4o-mini`, `/route cheapest`, `/mode bypass`.
47
+ - **Smart routing** — `/route cheapest|fastest|balanced` hands model selection to Mesh's gateway, so you don't have to.
48
+ - **Persistent input history** — up-arrow recalls past prompts across sessions.
49
+ - **Config + env-var override** — `~/.meshapi/config.json`, `MESHAPI_API_KEY`.
50
+
51
+ ## Tool calling
52
+
53
+ When tools are enabled, the model can call:
54
+
55
+ | Tool | What it does |
56
+ |---|---|
57
+ | `read_file` | Read a file from the working directory (or absolute path). |
58
+ | `write_file` | Create or overwrite a file. Parent dirs are created. |
59
+ | `run_bash` | Run a shell command in the working directory. 60s timeout, 8000-char output cap. |
60
+
61
+ The launch CWD is baked into the system prompt, so relative paths the model produces resolve where you'd expect. Three permission modes, cycled live with Shift+Tab or set with `--mode` / `/mode`:
62
+
63
+ - **`ask`** (default) — every tool call requires a `y/n` confirmation. Safe.
64
+ - **`bypass`** — the model auto-executes. Fast, like Claude Code's `--dangerously-skip-permissions`. Use only when you trust the prompt.
65
+ - **`none`** — tools aren't sent to the model at all. Pure chat.
66
+
67
+ ```bash
68
+ meshapi --mode bypass # start in auto-execute mode
69
+ meshapi # default ask; press Shift+Tab to cycle
70
+ ```
71
+
72
+ ## Slash commands
73
+
74
+ | Command | What it does |
75
+ |---|---|
76
+ | `/help` | List commands |
77
+ | `/model <name>` | Switch model (e.g. `anthropic/claude-sonnet-4.5`, `openai/gpt-4o-mini`) |
78
+ | `/route <mode>` | `cheapest`, `fastest`, `balanced`, or `default` |
79
+ | `/mode <perm>` | `ask`, `bypass`, or `none` (Shift+Tab also cycles) |
80
+ | `/file <path>` | Inject a file into the conversation |
81
+ | `/system <text>` | Replace system prompt and reset chat |
82
+ | `/cost` | Show cumulative session spend |
83
+ | `/clear` | Reset conversation |
84
+ | `/exit` | Quit |
85
+
86
+ ## Config
87
+
88
+ `~/.meshapi/config.json`:
89
+
90
+ ```json
91
+ {
92
+ "base_url": "https://api.meshapi.ai/v1",
93
+ "model": "anthropic/claude-sonnet-4.5",
94
+ "system": "You are a helpful coding assistant. Be concise.",
95
+ "route": null
96
+ }
97
+ ```
98
+
99
+ The API key is read from `MESHAPI_API_KEY` (preferred) or stored in the same file. Input history lives at `~/.meshapi/history`.
100
+
101
+ ## About Mesh API
102
+
103
+ [Mesh API](https://meshapi.ai) is a unified LLM gateway: one API key, 300+ models from OpenAI, Anthropic, Google, Meta, Mistral, DeepSeek, Alibaba, and more. It's OpenAI-compatible — change the model name in your request, leave everything else alone.
104
+
105
+ - **Zero platform fees for 12 months.** You only pay for tokens.
106
+ - **Smart auto routing.** `route: cheapest|fastest|balanced` and the gateway picks for you.
107
+ - **Automatic failover.** If a provider goes down, your request routes to another. Your users won't know.
108
+ - **Highest rate limits.** Capacity is pooled across providers, so you hit ceilings later than going direct.
109
+ - **Zero data retention.** Prompts and completions pass through; we don't store them.
110
+ - **Multi-currency billing.** USD and INR (for India-based teams) at launch.
111
+ - **Ready-made workflows.** Pre-built prompt templates you can plug into any model.
112
+ - **Full observability.** Every request, token, cost, error, and model usage tracked in real time. Per-key spending limits and usage controls.
113
+
114
+ Built by the founders of [TagMango](https://tagmango.com) (YC W20) and [AI Fiesta](https://aifiesta.ai) (1M+ users across India). We got tired of managing five different provider dashboards ourselves, so we built this.
115
+
116
+ ## Why this CLI exists
117
+
118
+ Any generic OpenAI-compatible chat CLI talks to Mesh. `meshapi` adds three things a generic CLI can't: (1) the gateway-only `cost` field shown after every turn, (2) `/route` controls that drive Mesh's gateway-side model selection, and (3) tool calling that resolves paths against the directory you launched from.
119
+
120
+ ## Roadmap
121
+
122
+ - ✅ v0.3 — tool calling, ask/bypass/none permission modes, CWD-aware system prompt
123
+ - v0.4 — repo-aware mode, diff apply, `/cd` to change working dir mid-session
124
+ - v0.5 — `npm i -g meshapi-code` (Node port using `ink` + `chalk`), Homebrew tap, curl|sh installer at `meshapi.ai/install.sh`
125
+
126
+ ## License
127
+
128
+ MIT
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "meshapi-code"
3
- version = "0.2.1"
3
+ version = "0.3.1"
4
4
  description = "Terminal chat for Mesh API — OpenAI-compatible LLM gateway"
5
5
  readme = "README.md"
6
6
  license = { text = "MIT" }
@@ -0,0 +1 @@
1
+ __version__ = "0.3.1"
@@ -0,0 +1,254 @@
1
+ """meshapi — terminal chat REPL for Mesh API."""
2
+ import argparse
3
+ import json
4
+ import sys
5
+ from pathlib import Path
6
+
7
+ import httpx
8
+ from prompt_toolkit import PromptSession
9
+ from prompt_toolkit.formatted_text import FormattedText
10
+ from prompt_toolkit.history import FileHistory
11
+ from prompt_toolkit.key_binding import KeyBindings
12
+ from prompt_toolkit.styles import Style
13
+ from rich.text import Text
14
+
15
+ from . import __version__
16
+ from .client import stream_chat
17
+ from .commands import handle_command
18
+ from .config import CONFIG_FILE, HISTORY_FILE, load_config
19
+ from .permissions import HINTS, LABELS, Mode, from_str, next_mode
20
+ from .render import (
21
+ BRAND, BRAND_BG, BRAND_BG_FG, BRAND_DIM, console, fmt_usd, pretty_cwd, render_stream,
22
+ )
23
+ from .tools import TOOLS, build_system_prompt, execute as exec_tool, summarize_call
24
+
25
+ # ANSI Shadow figlet font
26
+ MESH_LOGO_LINES = [
27
+ "███╗ ███╗███████╗███████╗██╗ ██╗",
28
+ "████╗ ████║██╔════╝██╔════╝██║ ██║",
29
+ "██╔████╔██║█████╗ ███████╗███████║",
30
+ "██║╚██╔╝██║██╔══╝ ╚════██║██╔══██║",
31
+ "██║ ╚═╝ ██║███████╗███████║██║ ██║",
32
+ "╚═╝ ╚═╝╚══════╝╚══════╝╚═╝ ╚═╝",
33
+ ]
34
+ LOGO_WIDTH = 35 # chars per line
35
+ LOGO_GUTTER = 3 # spaces between logo and info column
36
+
37
+ def parse_args(argv=None) -> argparse.Namespace:
38
+ p = argparse.ArgumentParser(prog="meshapi", description="Terminal chat for Mesh API")
39
+ p.add_argument("--version", action="version", version=f"meshapi {__version__}")
40
+ p.add_argument("--model", help="Override model for this session (e.g. openai/gpt-4o-mini)")
41
+ p.add_argument("--route", choices=["cheapest", "fastest", "balanced"], help="Routing mode")
42
+ p.add_argument(
43
+ "--mode",
44
+ choices=[m.value for m in Mode],
45
+ default="ask",
46
+ help="Tool permission mode (default: ask). Cycle in-session with shift+tab.",
47
+ )
48
+ return p.parse_args(argv)
49
+
50
+
51
+ def render_banner(cfg: dict) -> None:
52
+ info_per_line: list = [
53
+ None,
54
+ None,
55
+ Text.from_markup(f"[bold {BRAND}]✦ meshapi {__version__}[/bold {BRAND}]"),
56
+ Text.from_markup(f"cwd: [{BRAND}]{pretty_cwd()}[/{BRAND}]"),
57
+ Text.from_markup(f"model: [bold {BRAND}]{cfg['model']}[/bold {BRAND}]"),
58
+ Text.from_markup(f"route: [{BRAND}]{cfg.get('route') or 'default'}[/{BRAND}]"),
59
+ ]
60
+ console.print()
61
+ for i, logo_line in enumerate(MESH_LOGO_LINES):
62
+ line = Text()
63
+ line.append(logo_line, style=BRAND)
64
+ info = info_per_line[i] if i < len(info_per_line) else None
65
+ if info is not None:
66
+ pad = max(0, LOGO_WIDTH - len(logo_line))
67
+ line.append(" " * (pad + LOGO_GUTTER))
68
+ line.append(info)
69
+ console.print(line)
70
+ console.print()
71
+ console.print("type /help for commands, /exit to quit", style=BRAND_DIM)
72
+ console.print()
73
+
74
+
75
+ def confirm_tool_call(name: str, args: dict) -> bool:
76
+ """ASK-mode prompt for a single tool call. Returns True if approved."""
77
+ summary = summarize_call(name, args)
78
+ console.print(f"[bold {BRAND}]⚙ approve tool call?[/bold {BRAND}] [dim]{summary}[/dim]")
79
+ if name == "write_file":
80
+ preview = (args.get("content") or "")[:300]
81
+ console.print(f"[dim]──[/dim]\n{preview}{'…' if len(args.get('content') or '') > 300 else ''}\n[dim]──[/dim]")
82
+ elif name == "run_bash":
83
+ console.print(f"[dim]$ {args.get('command')}[/dim]")
84
+ try:
85
+ ans = console.input("[bold]y[/bold] (yes) / [bold]n[/bold] (no) › ").strip().lower()
86
+ except (KeyboardInterrupt, EOFError):
87
+ return False
88
+ return ans in ("y", "yes")
89
+
90
+
91
+ def handle_tool_calls(tool_calls: list, mode: Mode, state: dict) -> None:
92
+ """Append assistant tool_calls message + tool result messages to state."""
93
+ state["messages"].append({
94
+ "role": "assistant",
95
+ "content": None,
96
+ "tool_calls": [
97
+ {
98
+ "id": tc["id"],
99
+ "type": "function",
100
+ "function": {"name": tc["name"], "arguments": tc["arguments"]},
101
+ }
102
+ for tc in tool_calls
103
+ ],
104
+ })
105
+ for tc in tool_calls:
106
+ try:
107
+ args = json.loads(tc["arguments"]) if tc["arguments"] else {}
108
+ except json.JSONDecodeError:
109
+ args = {}
110
+ approved = mode == Mode.BYPASS or confirm_tool_call(tc["name"], args)
111
+ if approved:
112
+ console.print(f"[{BRAND_DIM}]⚙ {summarize_call(tc['name'], args)}[/{BRAND_DIM}]")
113
+ result = exec_tool(tc["name"], args)
114
+ preview = result[:200].replace("\n", " ")
115
+ tail = "…" if len(result) > 200 else ""
116
+ console.print(f"[dim] → {preview}{tail}[/dim]")
117
+ else:
118
+ result = "User denied this tool call."
119
+ console.print(f"[dim] → denied by user[/dim]")
120
+ state["messages"].append({
121
+ "role": "tool",
122
+ "tool_call_id": tc["id"],
123
+ "content": result,
124
+ })
125
+
126
+
127
+ def main() -> None:
128
+ args = parse_args()
129
+ cfg = load_config()
130
+ if args.model:
131
+ cfg["model"] = args.model
132
+ if args.route:
133
+ cfg["route"] = args.route
134
+
135
+ if not cfg["api_key"]:
136
+ console.print(
137
+ "[red]No API key found. Set MESHAPI_API_KEY env var or edit "
138
+ f"{CONFIG_FILE}[/red]"
139
+ )
140
+ sys.exit(1)
141
+
142
+ state = {
143
+ "cfg": cfg,
144
+ "messages": [{"role": "system", "content": build_system_prompt(cfg)}],
145
+ "session_cost": 0.0,
146
+ "mode": from_str(args.mode),
147
+ }
148
+
149
+ kb = KeyBindings()
150
+
151
+ @kb.add("s-tab") # Shift+Tab
152
+ def _(event):
153
+ state["mode"] = next_mode(state["mode"])
154
+ event.app.invalidate()
155
+
156
+ def bottom_toolbar():
157
+ m = state["mode"]
158
+ color = "ansired" if m == Mode.BYPASS else "ansiyellow" if m == Mode.NONE else "ansigreen"
159
+ return FormattedText([
160
+ ("", " mode: "),
161
+ (f"bold {color}", LABELS[m]),
162
+ ("", f" {HINTS[m]} "),
163
+ ("ansibrightblack", "shift+tab to cycle"),
164
+ ])
165
+
166
+ session = PromptSession(
167
+ history=FileHistory(str(HISTORY_FILE)),
168
+ key_bindings=kb,
169
+ bottom_toolbar=bottom_toolbar,
170
+ )
171
+
172
+ render_banner(cfg)
173
+
174
+ while True:
175
+ try:
176
+ console.rule(
177
+ title=f"[{BRAND_DIM}]{Path.cwd().name}[/{BRAND_DIM}]",
178
+ align="right",
179
+ style=BRAND_DIM,
180
+ characters="─",
181
+ )
182
+ user_input = session.prompt(
183
+ "› ",
184
+ style=Style.from_dict({
185
+ "prompt": f"bold fg:{BRAND} bg:{BRAND_BG}",
186
+ "": f"fg:{BRAND_BG_FG} bg:{BRAND_BG}",
187
+ "bottom-toolbar": f"fg:{BRAND_BG_FG} bg:{BRAND_BG}",
188
+ }),
189
+ )
190
+ console.rule(style=BRAND_DIM, characters="─")
191
+ except (KeyboardInterrupt, EOFError):
192
+ console.print("\n[dim]bye[/dim]")
193
+ break
194
+
195
+ if not user_input.strip():
196
+ continue
197
+ if user_input.startswith("/"):
198
+ if not handle_command(user_input, state):
199
+ break
200
+ continue
201
+
202
+ state["messages"].append({"role": "user", "content": user_input})
203
+ console.print()
204
+
205
+ # Tool-calling loop: keep streaming until model returns text without tool_calls.
206
+ agg_cost = 0.0
207
+ last_model = state["cfg"]["model"]
208
+ last_usage: dict = {}
209
+ last_elapsed = 0.0
210
+ try:
211
+ for _hop in range(8): # safety cap
212
+ tools_arg = TOOLS if state["mode"] != Mode.NONE else None
213
+ reply, meta = render_stream(
214
+ stream_chat(state["messages"], state["cfg"], tools=tools_arg)
215
+ )
216
+ cost = meta.get("cost")
217
+ if cost is not None:
218
+ try:
219
+ agg_cost += float(cost)
220
+ except (TypeError, ValueError):
221
+ pass
222
+ last_model = meta.get("model") or last_model
223
+ last_usage = meta.get("usage") or last_usage
224
+ last_elapsed += meta.get("elapsed", 0.0)
225
+
226
+ tool_calls = meta.get("tool_calls") or []
227
+ if not tool_calls:
228
+ state["messages"].append({"role": "assistant", "content": reply})
229
+ break
230
+
231
+ # Model called tools — execute and loop.
232
+ handle_tool_calls(tool_calls, state["mode"], state)
233
+
234
+ state["session_cost"] += agg_cost
235
+ prompt_t = last_usage.get("prompt_tokens", "?")
236
+ completion_t = last_usage.get("completion_tokens", "?")
237
+ cost_str = fmt_usd(agg_cost) if agg_cost else "—"
238
+ console.rule(style=BRAND_DIM, characters="─")
239
+ console.print(
240
+ f"[dim]{last_model} • {prompt_t}→{completion_t} tok • {cost_str} • "
241
+ f"session {fmt_usd(state['session_cost'])} • {last_elapsed:.1f}s[/dim]"
242
+ )
243
+ except httpx.HTTPStatusError as e:
244
+ console.rule(style="dim red", characters="─")
245
+ console.print(f"[red]API error {e.response.status_code}: {e.response.text}[/red]")
246
+ state["messages"].pop()
247
+ except Exception as e:
248
+ console.rule(style="dim red", characters="─")
249
+ console.print(f"[red]Error: {e}[/red]")
250
+ state["messages"].pop()
251
+
252
+
253
+ if __name__ == "__main__":
254
+ main()
@@ -0,0 +1,87 @@
1
+ """Streaming OpenAI-compatible HTTP client for Mesh API."""
2
+ import json
3
+ from typing import Iterable, Optional
4
+
5
+ import httpx
6
+
7
+
8
+ def stream_chat(
9
+ messages: list,
10
+ cfg: dict,
11
+ tools: Optional[list] = None,
12
+ ) -> Iterable:
13
+ """Yield content deltas, then a final dict with usage/cost/model/tool_calls.
14
+
15
+ Mesh API is OpenAI-compatible:
16
+ - `cost` arrives in the final SSE chunk alongside `usage`.
17
+ - `tool_calls` arrive as deltas indexed by position; we accumulate them
18
+ and surface as the meta dict's `tool_calls` field.
19
+ """
20
+ url = f"{cfg['base_url']}/chat/completions"
21
+ headers = {
22
+ "Authorization": f"Bearer {cfg['api_key']}",
23
+ "Content-Type": "application/json",
24
+ }
25
+ payload: dict = {
26
+ "model": cfg["model"],
27
+ "messages": messages,
28
+ "stream": True,
29
+ }
30
+ if cfg.get("route"):
31
+ payload["route"] = cfg["route"]
32
+ if tools:
33
+ payload["tools"] = tools
34
+ payload["tool_choice"] = "auto"
35
+
36
+ last_meta: dict = {}
37
+ last_model: str = ""
38
+ tool_calls_accum: dict = {} # index -> {id, name, arguments}
39
+
40
+ with httpx.stream("POST", url, json=payload, headers=headers, timeout=120) as r:
41
+ r.raise_for_status()
42
+ for line in r.iter_lines():
43
+ if not line or not line.startswith("data: "):
44
+ continue
45
+ data = line[6:]
46
+ if data.strip() == "[DONE]":
47
+ break
48
+ try:
49
+ obj = json.loads(data)
50
+ except json.JSONDecodeError:
51
+ continue
52
+
53
+ if obj.get("model"):
54
+ last_model = obj["model"]
55
+
56
+ choices = obj.get("choices") or []
57
+ if choices:
58
+ delta = choices[0].get("delta", {})
59
+
60
+ content = delta.get("content")
61
+ if content:
62
+ yield content
63
+
64
+ for tc in delta.get("tool_calls") or []:
65
+ idx = tc.get("index", 0)
66
+ bucket = tool_calls_accum.setdefault(
67
+ idx, {"id": "", "name": "", "arguments": ""}
68
+ )
69
+ if tc.get("id"):
70
+ bucket["id"] = tc["id"]
71
+ fn = tc.get("function") or {}
72
+ if fn.get("name"):
73
+ bucket["name"] = fn["name"]
74
+ if fn.get("arguments"):
75
+ bucket["arguments"] += fn["arguments"]
76
+
77
+ usage = obj.get("usage")
78
+ cost = obj.get("cost")
79
+ if usage or cost:
80
+ last_meta = {"usage": usage, "cost": cost}
81
+
82
+ if last_model:
83
+ last_meta["model"] = last_model
84
+ if tool_calls_accum:
85
+ last_meta["tool_calls"] = [tool_calls_accum[i] for i in sorted(tool_calls_accum)]
86
+ if last_meta:
87
+ yield last_meta
@@ -4,7 +4,9 @@ from pathlib import Path
4
4
  from rich.panel import Panel
5
5
 
6
6
  from .config import save_config
7
+ from .permissions import LABELS, Mode, from_str
7
8
  from .render import console, fmt_usd
9
+ from .tools import build_system_prompt
8
10
 
9
11
  ROUTES = {"cheapest", "fastest", "balanced"}
10
12
 
@@ -19,7 +21,7 @@ def handle_command(cmd: str, state: dict) -> bool:
19
21
  return False
20
22
 
21
23
  if name == "/clear":
22
- state["messages"] = [{"role": "system", "content": state["cfg"]["system"]}]
24
+ state["messages"] = [{"role": "system", "content": build_system_prompt(state["cfg"])}]
23
25
  state["session_cost"] = 0.0
24
26
  console.print("[dim]Conversation cleared.[/dim]")
25
27
 
@@ -56,7 +58,7 @@ def handle_command(cmd: str, state: dict) -> bool:
56
58
  elif name == "/system":
57
59
  if arg:
58
60
  state["cfg"]["system"] = arg
59
- state["messages"] = [{"role": "system", "content": arg}]
61
+ state["messages"] = [{"role": "system", "content": build_system_prompt(state["cfg"])}]
60
62
  console.print("[dim]System prompt updated and conversation reset.[/dim]")
61
63
  else:
62
64
  console.print(f"[dim]{state['cfg']['system']}[/dim]")
@@ -64,12 +66,24 @@ def handle_command(cmd: str, state: dict) -> bool:
64
66
  elif name == "/cost":
65
67
  console.print(f"[dim]Session spend: {fmt_usd(state.get('session_cost', 0))}[/dim]")
66
68
 
69
+ elif name == "/mode":
70
+ if not arg:
71
+ cur = state.get("mode", Mode.ASK)
72
+ console.print(f"[dim]Current mode: {LABELS[cur]} ({cur.value})[/dim]")
73
+ else:
74
+ try:
75
+ state["mode"] = from_str(arg)
76
+ console.print(f"[dim]Mode set to {LABELS[state['mode']]}[/dim]")
77
+ except ValueError as e:
78
+ console.print(f"[red]{e}[/red]")
79
+
67
80
  elif name == "/help":
68
81
  console.print(Panel.fit(
69
82
  "/exit end session\n"
70
83
  "/clear reset conversation\n"
71
84
  "/model <name> switch model (e.g. anthropic/claude-sonnet-4.5)\n"
72
85
  "/route <mode> cheapest|fastest|balanced|default\n"
86
+ "/mode <perm> ask|bypass|none (or shift+tab to cycle)\n"
73
87
  "/file <path> add file to context\n"
74
88
  "/system <txt> set system prompt\n"
75
89
  "/cost show session spend\n"
@@ -0,0 +1,35 @@
1
+ """Permission modes for tool calls — cycle with Shift+Tab."""
2
+ from enum import Enum
3
+
4
+
5
+ class Mode(Enum):
6
+ ASK = "ask" # prompt for each tool call (default — safest)
7
+ BYPASS = "bypass" # auto-execute without asking (fast — like `--yolo`)
8
+ NONE = "none" # don't expose tools to the model at all (read-only chat)
9
+
10
+
11
+ ORDER = [Mode.ASK, Mode.BYPASS, Mode.NONE]
12
+
13
+ LABELS = {
14
+ Mode.ASK: "approve each",
15
+ Mode.BYPASS: "bypass perms",
16
+ Mode.NONE: "no access",
17
+ }
18
+
19
+ HINTS = {
20
+ Mode.ASK: "model can request file/shell ops; you confirm each one",
21
+ Mode.BYPASS: "model executes file/shell ops automatically — be careful",
22
+ Mode.NONE: "chat only — model has no filesystem or shell access",
23
+ }
24
+
25
+
26
+ def next_mode(m: Mode) -> Mode:
27
+ return ORDER[(ORDER.index(m) + 1) % len(ORDER)]
28
+
29
+
30
+ def from_str(s: str) -> Mode:
31
+ s = s.strip().lower()
32
+ for m in Mode:
33
+ if m.value == s:
34
+ return m
35
+ raise ValueError(f"unknown mode: {s} (try {', '.join(m.value for m in Mode)})")
@@ -0,0 +1,129 @@
1
+ """Tool definitions sent to the model + local executors."""
2
+ import subprocess
3
+ from pathlib import Path
4
+
5
+
6
+ def build_system_prompt(cfg: dict) -> str:
7
+ """Append working-dir + tool guidance to the user's base system prompt.
8
+
9
+ Naming tools in prose (read_file/write_file/run_bash) makes Anthropic
10
+ models drop into XML tool-use mode and emit `<function_calls>` as
11
+ text — keep this section deliberately tool-name-free.
12
+ """
13
+ base = cfg.get("system") or ""
14
+ cwd = str(Path.cwd())
15
+ return (
16
+ f"{base}\n\n"
17
+ f"Working directory: {cwd}\n"
18
+ "Resolve any relative path the user gives against this working "
19
+ "directory. When you create or edit files without an explicit "
20
+ "absolute path, place them inside this working directory. Use "
21
+ "the available tools to inspect and modify the filesystem and "
22
+ "run shell commands — do not ask the user to run commands."
23
+ )
24
+
25
+ # OpenAI-compatible tool spec — Mesh API forwards these to the underlying provider.
26
+ TOOLS = [
27
+ {
28
+ "type": "function",
29
+ "function": {
30
+ "name": "read_file",
31
+ "description": "Read a file from the user's filesystem and return its contents.",
32
+ "parameters": {
33
+ "type": "object",
34
+ "properties": {
35
+ "path": {
36
+ "type": "string",
37
+ "description": "Path to the file (absolute, or relative to the cwd)",
38
+ }
39
+ },
40
+ "required": ["path"],
41
+ },
42
+ },
43
+ },
44
+ {
45
+ "type": "function",
46
+ "function": {
47
+ "name": "write_file",
48
+ "description": "Create or overwrite a file with the given content. Parent directories are created if missing.",
49
+ "parameters": {
50
+ "type": "object",
51
+ "properties": {
52
+ "path": {"type": "string", "description": "File path to write"},
53
+ "content": {"type": "string", "description": "Full file contents"},
54
+ },
55
+ "required": ["path", "content"],
56
+ },
57
+ },
58
+ },
59
+ {
60
+ "type": "function",
61
+ "function": {
62
+ "name": "run_bash",
63
+ "description": "Run a shell command (zsh/bash) and return combined stdout+stderr plus exit code. Times out at 60s.",
64
+ "parameters": {
65
+ "type": "object",
66
+ "properties": {
67
+ "command": {"type": "string", "description": "The shell command to run"}
68
+ },
69
+ "required": ["command"],
70
+ },
71
+ },
72
+ },
73
+ ]
74
+
75
+ OUTPUT_LIMIT = 8000
76
+
77
+
78
+ def execute(name: str, arguments: dict) -> str:
79
+ """Run a tool locally and return a string result for the model."""
80
+ if name == "read_file":
81
+ try:
82
+ return Path(arguments["path"]).expanduser().read_text()
83
+ except Exception as e:
84
+ return f"Error: {e}"
85
+
86
+ if name == "write_file":
87
+ try:
88
+ p = Path(arguments["path"]).expanduser()
89
+ p.parent.mkdir(parents=True, exist_ok=True)
90
+ content = arguments["content"]
91
+ p.write_text(content)
92
+ return f"OK — wrote {len(content)} chars to {p}"
93
+ except Exception as e:
94
+ return f"Error: {e}"
95
+
96
+ if name == "run_bash":
97
+ try:
98
+ r = subprocess.run(
99
+ arguments["command"],
100
+ shell=True,
101
+ capture_output=True,
102
+ text=True,
103
+ timeout=60,
104
+ cwd=str(Path.cwd()),
105
+ )
106
+ out = (r.stdout or "") + (r.stderr or "")
107
+ tail = "...[truncated]" if len(out) > OUTPUT_LIMIT else ""
108
+ return f"{out[:OUTPUT_LIMIT]}{tail}\n[exit {r.returncode}]"
109
+ except subprocess.TimeoutExpired:
110
+ return "Error: command timed out after 60s"
111
+ except Exception as e:
112
+ return f"Error: {e}"
113
+
114
+ return f"Error: unknown tool `{name}`"
115
+
116
+
117
+ def summarize_call(name: str, arguments: dict) -> str:
118
+ """One-line summary used in the approval prompt and progress log."""
119
+ if name == "read_file":
120
+ return f"read_file: {arguments.get('path')}"
121
+ if name == "write_file":
122
+ n = len(arguments.get("content", ""))
123
+ return f"write_file: {arguments.get('path')} ({n} chars)"
124
+ if name == "run_bash":
125
+ cmd = arguments.get("command", "")
126
+ if len(cmd) > 200:
127
+ cmd = cmd[:200] + "…"
128
+ return f"run_bash: {cmd}"
129
+ return f"{name}({arguments})"
@@ -1,112 +0,0 @@
1
- Metadata-Version: 2.4
2
- Name: meshapi-code
3
- Version: 0.2.1
4
- Summary: Terminal chat for Mesh API — OpenAI-compatible LLM gateway
5
- Project-URL: Homepage, https://meshapi.ai
6
- Project-URL: Documentation, https://docs.meshapi.ai
7
- Project-URL: Repository, https://github.com/aifiesta/meshapi-code
8
- Author: Mesh API
9
- License: MIT
10
- License-File: LICENSE
11
- Keywords: anthropic,chat,cli,gateway,llm,mesh,openai
12
- Classifier: Development Status :: 3 - Alpha
13
- Classifier: Environment :: Console
14
- Classifier: Intended Audience :: Developers
15
- Classifier: License :: OSI Approved :: MIT License
16
- Classifier: Programming Language :: Python :: 3
17
- Classifier: Programming Language :: Python :: 3.10
18
- Classifier: Programming Language :: Python :: 3.11
19
- Classifier: Programming Language :: Python :: 3.12
20
- Classifier: Programming Language :: Python :: 3.13
21
- Classifier: Topic :: Software Development
22
- Requires-Python: >=3.10
23
- Requires-Dist: httpx>=0.27
24
- Requires-Dist: prompt-toolkit>=3.0
25
- Requires-Dist: rich>=13.7
26
- Description-Content-Type: text/markdown
27
-
28
- # meshapi-code
29
-
30
- Terminal chat for [Mesh API](https://meshapi.ai) — the OpenAI-compatible LLM gateway. Streaming responses, live markdown, slash commands, real-time cost.
31
-
32
- ```
33
- $ meshapi
34
- ╭───────────────────────────────╮
35
- │ meshapi 0.1.0 │
36
- │ model: anthropic/claude-… │
37
- │ route: default │
38
- ╰───────────────────────────────╯
39
- you > how do I parse SSE in python
40
- … streamed markdown reply …
41
- 142 → 318 tok • $0.001234 • session $0.001234
42
- ```
43
-
44
- ## Install
45
-
46
- ```bash
47
- pipx install meshapi-code # recommended
48
- uv tool install meshapi-code # if you use uv
49
- pip install meshapi-code # plain pip
50
- ```
51
-
52
- The PyPI package is `meshapi-code`; the command on your `$PATH` is `meshapi`.
53
-
54
- Then:
55
-
56
- ```bash
57
- export MESHAPI_API_KEY=rsk_your_key_here
58
- meshapi
59
- ```
60
-
61
- Get a key at [meshapi.ai](https://meshapi.ai).
62
-
63
- ## What it does
64
-
65
- - **Streaming completions** with live markdown rendering (`rich`)
66
- - **Real cost per turn** — Mesh API returns `cost` in the SSE tail; we show it
67
- - **Slash commands** — `/model`, `/route`, `/file`, `/system`, `/cost`, `/clear`
68
- - **Mid-session model switching** — `/model openai/gpt-4o-mini`
69
- - **Smart routing** — `/route cheapest` lets the gateway pick (Mesh-specific)
70
- - **Persistent input history** — up-arrow recalls past prompts
71
- - **Config + env-var override** — `~/.meshapi/config.json`, `MESHAPI_API_KEY`
72
-
73
- ## Slash commands
74
-
75
- | Command | What it does |
76
- |---|---|
77
- | `/help` | List commands |
78
- | `/model <name>` | Switch model (e.g. `anthropic/claude-sonnet-4.5`) |
79
- | `/route <mode>` | `cheapest`, `fastest`, `balanced`, or `default` |
80
- | `/file <path>` | Inject a file into the conversation |
81
- | `/system <text>` | Replace system prompt and reset chat |
82
- | `/cost` | Show cumulative session spend |
83
- | `/clear` | Reset conversation |
84
- | `/exit` | Quit |
85
-
86
- ## Config
87
-
88
- `~/.meshapi/config.json`:
89
-
90
- ```json
91
- {
92
- "base_url": "https://api.meshapi.ai/v1",
93
- "model": "anthropic/claude-sonnet-4.5",
94
- "system": "You are a helpful coding assistant. Be concise.",
95
- "route": null
96
- }
97
- ```
98
-
99
- The API key is read from `MESHAPI_API_KEY` (preferred) or stored in the same file.
100
-
101
- ## Why it exists
102
-
103
- Mesh API is OpenAI-compatible, so any generic chat CLI works against it. `meshapi` adds two things a generic CLI can't: (1) the gateway-only `cost` field shown after every turn, and (2) routing controls (`/route cheapest`) that hit Mesh's gateway-side model selection.
104
-
105
- ## Roadmap
106
-
107
- - v0.2 — tool calling, repo-aware mode, diff apply, `npm i -g meshapi-code`
108
- - v0.3 — Homebrew tap, curl|sh installer at `meshapi.ai/install.sh`
109
-
110
- ## License
111
-
112
- MIT
@@ -1,85 +0,0 @@
1
- # meshapi-code
2
-
3
- Terminal chat for [Mesh API](https://meshapi.ai) — the OpenAI-compatible LLM gateway. Streaming responses, live markdown, slash commands, real-time cost.
4
-
5
- ```
6
- $ meshapi
7
- ╭───────────────────────────────╮
8
- │ meshapi 0.1.0 │
9
- │ model: anthropic/claude-… │
10
- │ route: default │
11
- ╰───────────────────────────────╯
12
- you > how do I parse SSE in python
13
- … streamed markdown reply …
14
- 142 → 318 tok • $0.001234 • session $0.001234
15
- ```
16
-
17
- ## Install
18
-
19
- ```bash
20
- pipx install meshapi-code # recommended
21
- uv tool install meshapi-code # if you use uv
22
- pip install meshapi-code # plain pip
23
- ```
24
-
25
- The PyPI package is `meshapi-code`; the command on your `$PATH` is `meshapi`.
26
-
27
- Then:
28
-
29
- ```bash
30
- export MESHAPI_API_KEY=rsk_your_key_here
31
- meshapi
32
- ```
33
-
34
- Get a key at [meshapi.ai](https://meshapi.ai).
35
-
36
- ## What it does
37
-
38
- - **Streaming completions** with live markdown rendering (`rich`)
39
- - **Real cost per turn** — Mesh API returns `cost` in the SSE tail; we show it
40
- - **Slash commands** — `/model`, `/route`, `/file`, `/system`, `/cost`, `/clear`
41
- - **Mid-session model switching** — `/model openai/gpt-4o-mini`
42
- - **Smart routing** — `/route cheapest` lets the gateway pick (Mesh-specific)
43
- - **Persistent input history** — up-arrow recalls past prompts
44
- - **Config + env-var override** — `~/.meshapi/config.json`, `MESHAPI_API_KEY`
45
-
46
- ## Slash commands
47
-
48
- | Command | What it does |
49
- |---|---|
50
- | `/help` | List commands |
51
- | `/model <name>` | Switch model (e.g. `anthropic/claude-sonnet-4.5`) |
52
- | `/route <mode>` | `cheapest`, `fastest`, `balanced`, or `default` |
53
- | `/file <path>` | Inject a file into the conversation |
54
- | `/system <text>` | Replace system prompt and reset chat |
55
- | `/cost` | Show cumulative session spend |
56
- | `/clear` | Reset conversation |
57
- | `/exit` | Quit |
58
-
59
- ## Config
60
-
61
- `~/.meshapi/config.json`:
62
-
63
- ```json
64
- {
65
- "base_url": "https://api.meshapi.ai/v1",
66
- "model": "anthropic/claude-sonnet-4.5",
67
- "system": "You are a helpful coding assistant. Be concise.",
68
- "route": null
69
- }
70
- ```
71
-
72
- The API key is read from `MESHAPI_API_KEY` (preferred) or stored in the same file.
73
-
74
- ## Why it exists
75
-
76
- Mesh API is OpenAI-compatible, so any generic chat CLI works against it. `meshapi` adds two things a generic CLI can't: (1) the gateway-only `cost` field shown after every turn, and (2) routing controls (`/route cheapest`) that hit Mesh's gateway-side model selection.
77
-
78
- ## Roadmap
79
-
80
- - v0.2 — tool calling, repo-aware mode, diff apply, `npm i -g meshapi-code`
81
- - v0.3 — Homebrew tap, curl|sh installer at `meshapi.ai/install.sh`
82
-
83
- ## License
84
-
85
- MIT
@@ -1 +0,0 @@
1
- __version__ = "0.2.1"
@@ -1,146 +0,0 @@
1
- """meshapi — terminal chat REPL for Mesh API."""
2
- import argparse
3
- import sys
4
- from pathlib import Path
5
-
6
- import httpx
7
- from prompt_toolkit import PromptSession
8
- from prompt_toolkit.history import FileHistory
9
- from prompt_toolkit.styles import Style
10
- from rich.text import Text
11
-
12
- from . import __version__
13
- from .client import stream_chat
14
- from .commands import handle_command
15
- from .config import CONFIG_FILE, HISTORY_FILE, load_config
16
- from .render import BRAND, BRAND_BG, BRAND_BG_FG, BRAND_DIM, console, fmt_usd, pretty_cwd, render_stream
17
-
18
- # ANSI Shadow figlet font
19
- MESH_LOGO_LINES = [
20
- "███╗ ███╗███████╗███████╗██╗ ██╗",
21
- "████╗ ████║██╔════╝██╔════╝██║ ██║",
22
- "██╔████╔██║█████╗ ███████╗███████║",
23
- "██║╚██╔╝██║██╔══╝ ╚════██║██╔══██║",
24
- "██║ ╚═╝ ██║███████╗███████║██║ ██║",
25
- "╚═╝ ╚═╝╚══════╝╚══════╝╚═╝ ╚═╝",
26
- ]
27
- LOGO_WIDTH = 35 # chars per line
28
- LOGO_GUTTER = 3 # spaces between logo and info column
29
-
30
-
31
- def parse_args(argv=None) -> argparse.Namespace:
32
- p = argparse.ArgumentParser(prog="meshapi", description="Terminal chat for Mesh API")
33
- p.add_argument("--version", action="version", version=f"meshapi {__version__}")
34
- p.add_argument("--model", help="Override model for this session (e.g. openai/gpt-4o-mini)")
35
- p.add_argument("--route", choices=["cheapest", "fastest", "balanced"], help="Routing mode")
36
- return p.parse_args(argv)
37
-
38
-
39
- def main() -> None:
40
- args = parse_args()
41
- cfg = load_config()
42
- if args.model:
43
- cfg["model"] = args.model
44
- if args.route:
45
- cfg["route"] = args.route
46
-
47
- if not cfg["api_key"]:
48
- console.print(
49
- "[red]No API key found. Set MESHAPI_API_KEY env var or edit "
50
- f"{CONFIG_FILE}[/red]"
51
- )
52
- sys.exit(1)
53
-
54
- state = {
55
- "cfg": cfg,
56
- "messages": [{"role": "system", "content": cfg["system"]}],
57
- "session_cost": 0.0,
58
- }
59
-
60
- session = PromptSession(history=FileHistory(str(HISTORY_FILE)))
61
-
62
- info_per_line: list = [
63
- None,
64
- None,
65
- Text.from_markup(f"[bold {BRAND}]✦ meshapi {__version__}[/bold {BRAND}]"),
66
- Text.from_markup(f"cwd: [{BRAND}]{pretty_cwd()}[/{BRAND}]"),
67
- Text.from_markup(f"model: [bold {BRAND}]{cfg['model']}[/bold {BRAND}]"),
68
- Text.from_markup(f"route: [{BRAND}]{cfg.get('route') or 'default'}[/{BRAND}]"),
69
- ]
70
-
71
- console.print() # top gap so banner doesn't crowd the shell prompt
72
- for i, logo_line in enumerate(MESH_LOGO_LINES):
73
- line = Text()
74
- line.append(logo_line, style=BRAND)
75
- info = info_per_line[i] if i < len(info_per_line) else None
76
- if info is not None:
77
- pad = max(0, LOGO_WIDTH - len(logo_line))
78
- line.append(" " * (pad + LOGO_GUTTER))
79
- line.append(info)
80
- console.print(line)
81
- console.print()
82
- console.print("type /help for commands, /exit to quit", style=BRAND_DIM)
83
- console.print() # bottom gap before the first prompt rule
84
-
85
- while True:
86
- try:
87
- console.rule(
88
- title=f"[{BRAND_DIM}]{Path.cwd().name}[/{BRAND_DIM}]",
89
- align="right",
90
- style=BRAND_DIM,
91
- characters="─",
92
- )
93
- user_input = session.prompt(
94
- "› ",
95
- style=Style.from_dict({
96
- "prompt": f"bold fg:{BRAND} bg:{BRAND_BG}",
97
- "": f"fg:{BRAND_BG_FG} bg:{BRAND_BG}",
98
- }),
99
- )
100
- console.rule(style=BRAND_DIM, characters="─")
101
- except (KeyboardInterrupt, EOFError):
102
- console.print("\n[dim]bye[/dim]")
103
- break
104
-
105
- if not user_input.strip():
106
- continue
107
- if user_input.startswith("/"):
108
- if not handle_command(user_input, state):
109
- break
110
- continue
111
-
112
- state["messages"].append({"role": "user", "content": user_input})
113
- console.print()
114
- try:
115
- reply, meta = render_stream(stream_chat(state["messages"], state["cfg"]))
116
- state["messages"].append({"role": "assistant", "content": reply})
117
-
118
- cost = meta.get("cost")
119
- if cost is not None:
120
- try:
121
- state["session_cost"] += float(cost)
122
- except (TypeError, ValueError):
123
- pass
124
- usage = meta.get("usage") or {}
125
- model = meta.get("model") or state["cfg"]["model"]
126
- elapsed = meta.get("elapsed", 0.0)
127
- prompt_t = usage.get("prompt_tokens", "?")
128
- completion_t = usage.get("completion_tokens", "?")
129
- cost_str = fmt_usd(cost) if cost is not None else "—"
130
- console.rule(style=BRAND_DIM, characters="─")
131
- console.print(
132
- f"[dim]{model} • {prompt_t}→{completion_t} tok • {cost_str} • "
133
- f"session {fmt_usd(state['session_cost'])} • {elapsed:.1f}s[/dim]"
134
- )
135
- except httpx.HTTPStatusError as e:
136
- console.rule(style="dim red", characters="─")
137
- console.print(f"[red]API error {e.response.status_code}: {e.response.text}[/red]")
138
- state["messages"].pop()
139
- except Exception as e:
140
- console.rule(style="dim red", characters="─")
141
- console.print(f"[red]Error: {e}[/red]")
142
- state["messages"].pop()
143
-
144
-
145
- if __name__ == "__main__":
146
- main()
@@ -1,58 +0,0 @@
1
- """Streaming OpenAI-compatible HTTP client for Mesh API."""
2
- import json
3
- from typing import Iterable
4
-
5
- import httpx
6
-
7
-
8
- def stream_chat(messages: list, cfg: dict) -> Iterable:
9
- """Yield content deltas, then a final {'usage':..., 'cost':...} dict.
10
-
11
- Mesh API is OpenAI-compatible but adds `cost` to the final SSE chunk.
12
- """
13
- url = f"{cfg['base_url']}/chat/completions"
14
- headers = {
15
- "Authorization": f"Bearer {cfg['api_key']}",
16
- "Content-Type": "application/json",
17
- }
18
- payload: dict = {
19
- "model": cfg["model"],
20
- "messages": messages,
21
- "stream": True,
22
- }
23
- if cfg.get("route"):
24
- payload["route"] = cfg["route"]
25
-
26
- last_meta: dict = {}
27
- last_model: str = ""
28
- with httpx.stream("POST", url, json=payload, headers=headers, timeout=120) as r:
29
- r.raise_for_status()
30
- for line in r.iter_lines():
31
- if not line or not line.startswith("data: "):
32
- continue
33
- data = line[6:]
34
- if data.strip() == "[DONE]":
35
- break
36
- try:
37
- obj = json.loads(data)
38
- except json.JSONDecodeError:
39
- continue
40
-
41
- if obj.get("model"):
42
- last_model = obj["model"]
43
-
44
- choices = obj.get("choices") or []
45
- if choices:
46
- delta = choices[0].get("delta", {}).get("content")
47
- if delta:
48
- yield delta
49
-
50
- usage = obj.get("usage")
51
- cost = obj.get("cost")
52
- if usage or cost:
53
- last_meta = {"usage": usage, "cost": cost}
54
-
55
- if last_model:
56
- last_meta["model"] = last_model
57
- if last_meta:
58
- yield last_meta
File without changes
File without changes