meshapi-code 0.2.1__tar.gz → 0.3.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -4,6 +4,7 @@ __pycache__/
4
4
  *.egg-info/
5
5
  .venv/
6
6
  venv/
7
+ .build-venv/
7
8
  dist/
8
9
  build/
9
10
  .pytest_cache/
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: meshapi-code
3
- Version: 0.2.1
3
+ Version: 0.3.0
4
4
  Summary: Terminal chat for Mesh API — OpenAI-compatible LLM gateway
5
5
  Project-URL: Homepage, https://meshapi.ai
6
6
  Project-URL: Documentation, https://docs.meshapi.ai
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "meshapi-code"
3
- version = "0.2.1"
3
+ version = "0.3.0"
4
4
  description = "Terminal chat for Mesh API — OpenAI-compatible LLM gateway"
5
5
  readme = "README.md"
6
6
  license = { text = "MIT" }
@@ -0,0 +1 @@
1
+ __version__ = "0.3.0"
@@ -0,0 +1,254 @@
1
+ """meshapi — terminal chat REPL for Mesh API."""
2
+ import argparse
3
+ import json
4
+ import sys
5
+ from pathlib import Path
6
+
7
+ import httpx
8
+ from prompt_toolkit import PromptSession
9
+ from prompt_toolkit.formatted_text import FormattedText
10
+ from prompt_toolkit.history import FileHistory
11
+ from prompt_toolkit.key_binding import KeyBindings
12
+ from prompt_toolkit.styles import Style
13
+ from rich.text import Text
14
+
15
+ from . import __version__
16
+ from .client import stream_chat
17
+ from .commands import handle_command
18
+ from .config import CONFIG_FILE, HISTORY_FILE, load_config
19
+ from .permissions import HINTS, LABELS, Mode, from_str, next_mode
20
+ from .render import (
21
+ BRAND, BRAND_BG, BRAND_BG_FG, BRAND_DIM, console, fmt_usd, pretty_cwd, render_stream,
22
+ )
23
+ from .tools import TOOLS, build_system_prompt, execute as exec_tool, summarize_call
24
+
25
+ # ANSI Shadow figlet font
26
+ MESH_LOGO_LINES = [
27
+ "███╗ ███╗███████╗███████╗██╗ ██╗",
28
+ "████╗ ████║██╔════╝██╔════╝██║ ██║",
29
+ "██╔████╔██║█████╗ ███████╗███████║",
30
+ "██║╚██╔╝██║██╔══╝ ╚════██║██╔══██║",
31
+ "██║ ╚═╝ ██║███████╗███████║██║ ██║",
32
+ "╚═╝ ╚═╝╚══════╝╚══════╝╚═╝ ╚═╝",
33
+ ]
34
+ LOGO_WIDTH = 35 # chars per line
35
+ LOGO_GUTTER = 3 # spaces between logo and info column
36
+
37
+ def parse_args(argv=None) -> argparse.Namespace:
38
+ p = argparse.ArgumentParser(prog="meshapi", description="Terminal chat for Mesh API")
39
+ p.add_argument("--version", action="version", version=f"meshapi {__version__}")
40
+ p.add_argument("--model", help="Override model for this session (e.g. openai/gpt-4o-mini)")
41
+ p.add_argument("--route", choices=["cheapest", "fastest", "balanced"], help="Routing mode")
42
+ p.add_argument(
43
+ "--mode",
44
+ choices=[m.value for m in Mode],
45
+ default="ask",
46
+ help="Tool permission mode (default: ask). Cycle in-session with shift+tab.",
47
+ )
48
+ return p.parse_args(argv)
49
+
50
+
51
+ def render_banner(cfg: dict) -> None:
52
+ info_per_line: list = [
53
+ None,
54
+ None,
55
+ Text.from_markup(f"[bold {BRAND}]✦ meshapi {__version__}[/bold {BRAND}]"),
56
+ Text.from_markup(f"cwd: [{BRAND}]{pretty_cwd()}[/{BRAND}]"),
57
+ Text.from_markup(f"model: [bold {BRAND}]{cfg['model']}[/bold {BRAND}]"),
58
+ Text.from_markup(f"route: [{BRAND}]{cfg.get('route') or 'default'}[/{BRAND}]"),
59
+ ]
60
+ console.print()
61
+ for i, logo_line in enumerate(MESH_LOGO_LINES):
62
+ line = Text()
63
+ line.append(logo_line, style=BRAND)
64
+ info = info_per_line[i] if i < len(info_per_line) else None
65
+ if info is not None:
66
+ pad = max(0, LOGO_WIDTH - len(logo_line))
67
+ line.append(" " * (pad + LOGO_GUTTER))
68
+ line.append(info)
69
+ console.print(line)
70
+ console.print()
71
+ console.print("type /help for commands, /exit to quit", style=BRAND_DIM)
72
+ console.print()
73
+
74
+
75
+ def confirm_tool_call(name: str, args: dict) -> bool:
76
+ """ASK-mode prompt for a single tool call. Returns True if approved."""
77
+ summary = summarize_call(name, args)
78
+ console.print(f"[bold {BRAND}]⚙ approve tool call?[/bold {BRAND}] [dim]{summary}[/dim]")
79
+ if name == "write_file":
80
+ preview = (args.get("content") or "")[:300]
81
+ console.print(f"[dim]──[/dim]\n{preview}{'…' if len(args.get('content') or '') > 300 else ''}\n[dim]──[/dim]")
82
+ elif name == "run_bash":
83
+ console.print(f"[dim]$ {args.get('command')}[/dim]")
84
+ try:
85
+ ans = console.input("[bold]y[/bold] (yes) / [bold]n[/bold] (no) › ").strip().lower()
86
+ except (KeyboardInterrupt, EOFError):
87
+ return False
88
+ return ans in ("y", "yes")
89
+
90
+
91
+ def handle_tool_calls(tool_calls: list, mode: Mode, state: dict) -> None:
92
+ """Append assistant tool_calls message + tool result messages to state."""
93
+ state["messages"].append({
94
+ "role": "assistant",
95
+ "content": None,
96
+ "tool_calls": [
97
+ {
98
+ "id": tc["id"],
99
+ "type": "function",
100
+ "function": {"name": tc["name"], "arguments": tc["arguments"]},
101
+ }
102
+ for tc in tool_calls
103
+ ],
104
+ })
105
+ for tc in tool_calls:
106
+ try:
107
+ args = json.loads(tc["arguments"]) if tc["arguments"] else {}
108
+ except json.JSONDecodeError:
109
+ args = {}
110
+ approved = mode == Mode.BYPASS or confirm_tool_call(tc["name"], args)
111
+ if approved:
112
+ console.print(f"[{BRAND_DIM}]⚙ {summarize_call(tc['name'], args)}[/{BRAND_DIM}]")
113
+ result = exec_tool(tc["name"], args)
114
+ preview = result[:200].replace("\n", " ")
115
+ tail = "…" if len(result) > 200 else ""
116
+ console.print(f"[dim] → {preview}{tail}[/dim]")
117
+ else:
118
+ result = "User denied this tool call."
119
+ console.print(f"[dim] → denied by user[/dim]")
120
+ state["messages"].append({
121
+ "role": "tool",
122
+ "tool_call_id": tc["id"],
123
+ "content": result,
124
+ })
125
+
126
+
127
+ def main() -> None:
128
+ args = parse_args()
129
+ cfg = load_config()
130
+ if args.model:
131
+ cfg["model"] = args.model
132
+ if args.route:
133
+ cfg["route"] = args.route
134
+
135
+ if not cfg["api_key"]:
136
+ console.print(
137
+ "[red]No API key found. Set MESHAPI_API_KEY env var or edit "
138
+ f"{CONFIG_FILE}[/red]"
139
+ )
140
+ sys.exit(1)
141
+
142
+ state = {
143
+ "cfg": cfg,
144
+ "messages": [{"role": "system", "content": build_system_prompt(cfg)}],
145
+ "session_cost": 0.0,
146
+ "mode": from_str(args.mode),
147
+ }
148
+
149
+ kb = KeyBindings()
150
+
151
+ @kb.add("s-tab") # Shift+Tab
152
+ def _(event):
153
+ state["mode"] = next_mode(state["mode"])
154
+ event.app.invalidate()
155
+
156
+ def bottom_toolbar():
157
+ m = state["mode"]
158
+ color = "ansired" if m == Mode.BYPASS else "ansiyellow" if m == Mode.NONE else "ansigreen"
159
+ return FormattedText([
160
+ ("", " mode: "),
161
+ (f"bold {color}", LABELS[m]),
162
+ ("", f" {HINTS[m]} "),
163
+ ("ansibrightblack", "shift+tab to cycle"),
164
+ ])
165
+
166
+ session = PromptSession(
167
+ history=FileHistory(str(HISTORY_FILE)),
168
+ key_bindings=kb,
169
+ bottom_toolbar=bottom_toolbar,
170
+ )
171
+
172
+ render_banner(cfg)
173
+
174
+ while True:
175
+ try:
176
+ console.rule(
177
+ title=f"[{BRAND_DIM}]{Path.cwd().name}[/{BRAND_DIM}]",
178
+ align="right",
179
+ style=BRAND_DIM,
180
+ characters="─",
181
+ )
182
+ user_input = session.prompt(
183
+ "› ",
184
+ style=Style.from_dict({
185
+ "prompt": f"bold fg:{BRAND} bg:{BRAND_BG}",
186
+ "": f"fg:{BRAND_BG_FG} bg:{BRAND_BG}",
187
+ "bottom-toolbar": f"fg:{BRAND_BG_FG} bg:{BRAND_BG}",
188
+ }),
189
+ )
190
+ console.rule(style=BRAND_DIM, characters="─")
191
+ except (KeyboardInterrupt, EOFError):
192
+ console.print("\n[dim]bye[/dim]")
193
+ break
194
+
195
+ if not user_input.strip():
196
+ continue
197
+ if user_input.startswith("/"):
198
+ if not handle_command(user_input, state):
199
+ break
200
+ continue
201
+
202
+ state["messages"].append({"role": "user", "content": user_input})
203
+ console.print()
204
+
205
+ # Tool-calling loop: keep streaming until model returns text without tool_calls.
206
+ agg_cost = 0.0
207
+ last_model = state["cfg"]["model"]
208
+ last_usage: dict = {}
209
+ last_elapsed = 0.0
210
+ try:
211
+ for _hop in range(8): # safety cap
212
+ tools_arg = TOOLS if state["mode"] != Mode.NONE else None
213
+ reply, meta = render_stream(
214
+ stream_chat(state["messages"], state["cfg"], tools=tools_arg)
215
+ )
216
+ cost = meta.get("cost")
217
+ if cost is not None:
218
+ try:
219
+ agg_cost += float(cost)
220
+ except (TypeError, ValueError):
221
+ pass
222
+ last_model = meta.get("model") or last_model
223
+ last_usage = meta.get("usage") or last_usage
224
+ last_elapsed += meta.get("elapsed", 0.0)
225
+
226
+ tool_calls = meta.get("tool_calls") or []
227
+ if not tool_calls:
228
+ state["messages"].append({"role": "assistant", "content": reply})
229
+ break
230
+
231
+ # Model called tools — execute and loop.
232
+ handle_tool_calls(tool_calls, state["mode"], state)
233
+
234
+ state["session_cost"] += agg_cost
235
+ prompt_t = last_usage.get("prompt_tokens", "?")
236
+ completion_t = last_usage.get("completion_tokens", "?")
237
+ cost_str = fmt_usd(agg_cost) if agg_cost else "—"
238
+ console.rule(style=BRAND_DIM, characters="─")
239
+ console.print(
240
+ f"[dim]{last_model} • {prompt_t}→{completion_t} tok • {cost_str} • "
241
+ f"session {fmt_usd(state['session_cost'])} • {last_elapsed:.1f}s[/dim]"
242
+ )
243
+ except httpx.HTTPStatusError as e:
244
+ console.rule(style="dim red", characters="─")
245
+ console.print(f"[red]API error {e.response.status_code}: {e.response.text}[/red]")
246
+ state["messages"].pop()
247
+ except Exception as e:
248
+ console.rule(style="dim red", characters="─")
249
+ console.print(f"[red]Error: {e}[/red]")
250
+ state["messages"].pop()
251
+
252
+
253
+ if __name__ == "__main__":
254
+ main()
@@ -0,0 +1,87 @@
1
+ """Streaming OpenAI-compatible HTTP client for Mesh API."""
2
+ import json
3
+ from typing import Iterable, Optional
4
+
5
+ import httpx
6
+
7
+
8
+ def stream_chat(
9
+ messages: list,
10
+ cfg: dict,
11
+ tools: Optional[list] = None,
12
+ ) -> Iterable:
13
+ """Yield content deltas, then a final dict with usage/cost/model/tool_calls.
14
+
15
+ Mesh API is OpenAI-compatible:
16
+ - `cost` arrives in the final SSE chunk alongside `usage`.
17
+ - `tool_calls` arrive as deltas indexed by position; we accumulate them
18
+ and surface as the meta dict's `tool_calls` field.
19
+ """
20
+ url = f"{cfg['base_url']}/chat/completions"
21
+ headers = {
22
+ "Authorization": f"Bearer {cfg['api_key']}",
23
+ "Content-Type": "application/json",
24
+ }
25
+ payload: dict = {
26
+ "model": cfg["model"],
27
+ "messages": messages,
28
+ "stream": True,
29
+ }
30
+ if cfg.get("route"):
31
+ payload["route"] = cfg["route"]
32
+ if tools:
33
+ payload["tools"] = tools
34
+ payload["tool_choice"] = "auto"
35
+
36
+ last_meta: dict = {}
37
+ last_model: str = ""
38
+ tool_calls_accum: dict = {} # index -> {id, name, arguments}
39
+
40
+ with httpx.stream("POST", url, json=payload, headers=headers, timeout=120) as r:
41
+ r.raise_for_status()
42
+ for line in r.iter_lines():
43
+ if not line or not line.startswith("data: "):
44
+ continue
45
+ data = line[6:]
46
+ if data.strip() == "[DONE]":
47
+ break
48
+ try:
49
+ obj = json.loads(data)
50
+ except json.JSONDecodeError:
51
+ continue
52
+
53
+ if obj.get("model"):
54
+ last_model = obj["model"]
55
+
56
+ choices = obj.get("choices") or []
57
+ if choices:
58
+ delta = choices[0].get("delta", {})
59
+
60
+ content = delta.get("content")
61
+ if content:
62
+ yield content
63
+
64
+ for tc in delta.get("tool_calls") or []:
65
+ idx = tc.get("index", 0)
66
+ bucket = tool_calls_accum.setdefault(
67
+ idx, {"id": "", "name": "", "arguments": ""}
68
+ )
69
+ if tc.get("id"):
70
+ bucket["id"] = tc["id"]
71
+ fn = tc.get("function") or {}
72
+ if fn.get("name"):
73
+ bucket["name"] = fn["name"]
74
+ if fn.get("arguments"):
75
+ bucket["arguments"] += fn["arguments"]
76
+
77
+ usage = obj.get("usage")
78
+ cost = obj.get("cost")
79
+ if usage or cost:
80
+ last_meta = {"usage": usage, "cost": cost}
81
+
82
+ if last_model:
83
+ last_meta["model"] = last_model
84
+ if tool_calls_accum:
85
+ last_meta["tool_calls"] = [tool_calls_accum[i] for i in sorted(tool_calls_accum)]
86
+ if last_meta:
87
+ yield last_meta
@@ -4,7 +4,9 @@ from pathlib import Path
4
4
  from rich.panel import Panel
5
5
 
6
6
  from .config import save_config
7
+ from .permissions import LABELS, Mode, from_str
7
8
  from .render import console, fmt_usd
9
+ from .tools import build_system_prompt
8
10
 
9
11
  ROUTES = {"cheapest", "fastest", "balanced"}
10
12
 
@@ -19,7 +21,7 @@ def handle_command(cmd: str, state: dict) -> bool:
19
21
  return False
20
22
 
21
23
  if name == "/clear":
22
- state["messages"] = [{"role": "system", "content": state["cfg"]["system"]}]
24
+ state["messages"] = [{"role": "system", "content": build_system_prompt(state["cfg"])}]
23
25
  state["session_cost"] = 0.0
24
26
  console.print("[dim]Conversation cleared.[/dim]")
25
27
 
@@ -56,7 +58,7 @@ def handle_command(cmd: str, state: dict) -> bool:
56
58
  elif name == "/system":
57
59
  if arg:
58
60
  state["cfg"]["system"] = arg
59
- state["messages"] = [{"role": "system", "content": arg}]
61
+ state["messages"] = [{"role": "system", "content": build_system_prompt(state["cfg"])}]
60
62
  console.print("[dim]System prompt updated and conversation reset.[/dim]")
61
63
  else:
62
64
  console.print(f"[dim]{state['cfg']['system']}[/dim]")
@@ -64,12 +66,24 @@ def handle_command(cmd: str, state: dict) -> bool:
64
66
  elif name == "/cost":
65
67
  console.print(f"[dim]Session spend: {fmt_usd(state.get('session_cost', 0))}[/dim]")
66
68
 
69
+ elif name == "/mode":
70
+ if not arg:
71
+ cur = state.get("mode", Mode.ASK)
72
+ console.print(f"[dim]Current mode: {LABELS[cur]} ({cur.value})[/dim]")
73
+ else:
74
+ try:
75
+ state["mode"] = from_str(arg)
76
+ console.print(f"[dim]Mode set to {LABELS[state['mode']]}[/dim]")
77
+ except ValueError as e:
78
+ console.print(f"[red]{e}[/red]")
79
+
67
80
  elif name == "/help":
68
81
  console.print(Panel.fit(
69
82
  "/exit end session\n"
70
83
  "/clear reset conversation\n"
71
84
  "/model <name> switch model (e.g. anthropic/claude-sonnet-4.5)\n"
72
85
  "/route <mode> cheapest|fastest|balanced|default\n"
86
+ "/mode <perm> ask|bypass|none (or shift+tab to cycle)\n"
73
87
  "/file <path> add file to context\n"
74
88
  "/system <txt> set system prompt\n"
75
89
  "/cost show session spend\n"
@@ -0,0 +1,35 @@
1
+ """Permission modes for tool calls — cycle with Shift+Tab."""
2
+ from enum import Enum
3
+
4
+
5
+ class Mode(Enum):
6
+ ASK = "ask" # prompt for each tool call (default — safest)
7
+ BYPASS = "bypass" # auto-execute without asking (fast — like `--yolo`)
8
+ NONE = "none" # don't expose tools to the model at all (read-only chat)
9
+
10
+
11
+ ORDER = [Mode.ASK, Mode.BYPASS, Mode.NONE]
12
+
13
+ LABELS = {
14
+ Mode.ASK: "approve each",
15
+ Mode.BYPASS: "bypass perms",
16
+ Mode.NONE: "no access",
17
+ }
18
+
19
+ HINTS = {
20
+ Mode.ASK: "model can request file/shell ops; you confirm each one",
21
+ Mode.BYPASS: "model executes file/shell ops automatically — be careful",
22
+ Mode.NONE: "chat only — model has no filesystem or shell access",
23
+ }
24
+
25
+
26
+ def next_mode(m: Mode) -> Mode:
27
+ return ORDER[(ORDER.index(m) + 1) % len(ORDER)]
28
+
29
+
30
+ def from_str(s: str) -> Mode:
31
+ s = s.strip().lower()
32
+ for m in Mode:
33
+ if m.value == s:
34
+ return m
35
+ raise ValueError(f"unknown mode: {s} (try {', '.join(m.value for m in Mode)})")
@@ -0,0 +1,129 @@
1
+ """Tool definitions sent to the model + local executors."""
2
+ import subprocess
3
+ from pathlib import Path
4
+
5
+
6
+ def build_system_prompt(cfg: dict) -> str:
7
+ """Append working-dir + tool guidance to the user's base system prompt.
8
+
9
+ Naming tools in prose (read_file/write_file/run_bash) makes Anthropic
10
+ models drop into XML tool-use mode and emit `<function_calls>` as
11
+ text — keep this section deliberately tool-name-free.
12
+ """
13
+ base = cfg.get("system") or ""
14
+ cwd = str(Path.cwd())
15
+ return (
16
+ f"{base}\n\n"
17
+ f"Working directory: {cwd}\n"
18
+ "Resolve any relative path the user gives against this working "
19
+ "directory. When you create or edit files without an explicit "
20
+ "absolute path, place them inside this working directory. Use "
21
+ "the available tools to inspect and modify the filesystem and "
22
+ "run shell commands — do not ask the user to run commands."
23
+ )
24
+
25
+ # OpenAI-compatible tool spec — Mesh API forwards these to the underlying provider.
26
+ TOOLS = [
27
+ {
28
+ "type": "function",
29
+ "function": {
30
+ "name": "read_file",
31
+ "description": "Read a file from the user's filesystem and return its contents.",
32
+ "parameters": {
33
+ "type": "object",
34
+ "properties": {
35
+ "path": {
36
+ "type": "string",
37
+ "description": "Path to the file (absolute, or relative to the cwd)",
38
+ }
39
+ },
40
+ "required": ["path"],
41
+ },
42
+ },
43
+ },
44
+ {
45
+ "type": "function",
46
+ "function": {
47
+ "name": "write_file",
48
+ "description": "Create or overwrite a file with the given content. Parent directories are created if missing.",
49
+ "parameters": {
50
+ "type": "object",
51
+ "properties": {
52
+ "path": {"type": "string", "description": "File path to write"},
53
+ "content": {"type": "string", "description": "Full file contents"},
54
+ },
55
+ "required": ["path", "content"],
56
+ },
57
+ },
58
+ },
59
+ {
60
+ "type": "function",
61
+ "function": {
62
+ "name": "run_bash",
63
+ "description": "Run a shell command (zsh/bash) and return combined stdout+stderr plus exit code. Times out at 60s.",
64
+ "parameters": {
65
+ "type": "object",
66
+ "properties": {
67
+ "command": {"type": "string", "description": "The shell command to run"}
68
+ },
69
+ "required": ["command"],
70
+ },
71
+ },
72
+ },
73
+ ]
74
+
75
+ OUTPUT_LIMIT = 8000
76
+
77
+
78
+ def execute(name: str, arguments: dict) -> str:
79
+ """Run a tool locally and return a string result for the model."""
80
+ if name == "read_file":
81
+ try:
82
+ return Path(arguments["path"]).expanduser().read_text()
83
+ except Exception as e:
84
+ return f"Error: {e}"
85
+
86
+ if name == "write_file":
87
+ try:
88
+ p = Path(arguments["path"]).expanduser()
89
+ p.parent.mkdir(parents=True, exist_ok=True)
90
+ content = arguments["content"]
91
+ p.write_text(content)
92
+ return f"OK — wrote {len(content)} chars to {p}"
93
+ except Exception as e:
94
+ return f"Error: {e}"
95
+
96
+ if name == "run_bash":
97
+ try:
98
+ r = subprocess.run(
99
+ arguments["command"],
100
+ shell=True,
101
+ capture_output=True,
102
+ text=True,
103
+ timeout=60,
104
+ cwd=str(Path.cwd()),
105
+ )
106
+ out = (r.stdout or "") + (r.stderr or "")
107
+ tail = "...[truncated]" if len(out) > OUTPUT_LIMIT else ""
108
+ return f"{out[:OUTPUT_LIMIT]}{tail}\n[exit {r.returncode}]"
109
+ except subprocess.TimeoutExpired:
110
+ return "Error: command timed out after 60s"
111
+ except Exception as e:
112
+ return f"Error: {e}"
113
+
114
+ return f"Error: unknown tool `{name}`"
115
+
116
+
117
+ def summarize_call(name: str, arguments: dict) -> str:
118
+ """One-line summary used in the approval prompt and progress log."""
119
+ if name == "read_file":
120
+ return f"read_file: {arguments.get('path')}"
121
+ if name == "write_file":
122
+ n = len(arguments.get("content", ""))
123
+ return f"write_file: {arguments.get('path')} ({n} chars)"
124
+ if name == "run_bash":
125
+ cmd = arguments.get("command", "")
126
+ if len(cmd) > 200:
127
+ cmd = cmd[:200] + "…"
128
+ return f"run_bash: {cmd}"
129
+ return f"{name}({arguments})"
@@ -1 +0,0 @@
1
- __version__ = "0.2.1"
@@ -1,146 +0,0 @@
1
- """meshapi — terminal chat REPL for Mesh API."""
2
- import argparse
3
- import sys
4
- from pathlib import Path
5
-
6
- import httpx
7
- from prompt_toolkit import PromptSession
8
- from prompt_toolkit.history import FileHistory
9
- from prompt_toolkit.styles import Style
10
- from rich.text import Text
11
-
12
- from . import __version__
13
- from .client import stream_chat
14
- from .commands import handle_command
15
- from .config import CONFIG_FILE, HISTORY_FILE, load_config
16
- from .render import BRAND, BRAND_BG, BRAND_BG_FG, BRAND_DIM, console, fmt_usd, pretty_cwd, render_stream
17
-
18
- # ANSI Shadow figlet font
19
- MESH_LOGO_LINES = [
20
- "███╗ ███╗███████╗███████╗██╗ ██╗",
21
- "████╗ ████║██╔════╝██╔════╝██║ ██║",
22
- "██╔████╔██║█████╗ ███████╗███████║",
23
- "██║╚██╔╝██║██╔══╝ ╚════██║██╔══██║",
24
- "██║ ╚═╝ ██║███████╗███████║██║ ██║",
25
- "╚═╝ ╚═╝╚══════╝╚══════╝╚═╝ ╚═╝",
26
- ]
27
- LOGO_WIDTH = 35 # chars per line
28
- LOGO_GUTTER = 3 # spaces between logo and info column
29
-
30
-
31
- def parse_args(argv=None) -> argparse.Namespace:
32
- p = argparse.ArgumentParser(prog="meshapi", description="Terminal chat for Mesh API")
33
- p.add_argument("--version", action="version", version=f"meshapi {__version__}")
34
- p.add_argument("--model", help="Override model for this session (e.g. openai/gpt-4o-mini)")
35
- p.add_argument("--route", choices=["cheapest", "fastest", "balanced"], help="Routing mode")
36
- return p.parse_args(argv)
37
-
38
-
39
- def main() -> None:
40
- args = parse_args()
41
- cfg = load_config()
42
- if args.model:
43
- cfg["model"] = args.model
44
- if args.route:
45
- cfg["route"] = args.route
46
-
47
- if not cfg["api_key"]:
48
- console.print(
49
- "[red]No API key found. Set MESHAPI_API_KEY env var or edit "
50
- f"{CONFIG_FILE}[/red]"
51
- )
52
- sys.exit(1)
53
-
54
- state = {
55
- "cfg": cfg,
56
- "messages": [{"role": "system", "content": cfg["system"]}],
57
- "session_cost": 0.0,
58
- }
59
-
60
- session = PromptSession(history=FileHistory(str(HISTORY_FILE)))
61
-
62
- info_per_line: list = [
63
- None,
64
- None,
65
- Text.from_markup(f"[bold {BRAND}]✦ meshapi {__version__}[/bold {BRAND}]"),
66
- Text.from_markup(f"cwd: [{BRAND}]{pretty_cwd()}[/{BRAND}]"),
67
- Text.from_markup(f"model: [bold {BRAND}]{cfg['model']}[/bold {BRAND}]"),
68
- Text.from_markup(f"route: [{BRAND}]{cfg.get('route') or 'default'}[/{BRAND}]"),
69
- ]
70
-
71
- console.print() # top gap so banner doesn't crowd the shell prompt
72
- for i, logo_line in enumerate(MESH_LOGO_LINES):
73
- line = Text()
74
- line.append(logo_line, style=BRAND)
75
- info = info_per_line[i] if i < len(info_per_line) else None
76
- if info is not None:
77
- pad = max(0, LOGO_WIDTH - len(logo_line))
78
- line.append(" " * (pad + LOGO_GUTTER))
79
- line.append(info)
80
- console.print(line)
81
- console.print()
82
- console.print("type /help for commands, /exit to quit", style=BRAND_DIM)
83
- console.print() # bottom gap before the first prompt rule
84
-
85
- while True:
86
- try:
87
- console.rule(
88
- title=f"[{BRAND_DIM}]{Path.cwd().name}[/{BRAND_DIM}]",
89
- align="right",
90
- style=BRAND_DIM,
91
- characters="─",
92
- )
93
- user_input = session.prompt(
94
- "› ",
95
- style=Style.from_dict({
96
- "prompt": f"bold fg:{BRAND} bg:{BRAND_BG}",
97
- "": f"fg:{BRAND_BG_FG} bg:{BRAND_BG}",
98
- }),
99
- )
100
- console.rule(style=BRAND_DIM, characters="─")
101
- except (KeyboardInterrupt, EOFError):
102
- console.print("\n[dim]bye[/dim]")
103
- break
104
-
105
- if not user_input.strip():
106
- continue
107
- if user_input.startswith("/"):
108
- if not handle_command(user_input, state):
109
- break
110
- continue
111
-
112
- state["messages"].append({"role": "user", "content": user_input})
113
- console.print()
114
- try:
115
- reply, meta = render_stream(stream_chat(state["messages"], state["cfg"]))
116
- state["messages"].append({"role": "assistant", "content": reply})
117
-
118
- cost = meta.get("cost")
119
- if cost is not None:
120
- try:
121
- state["session_cost"] += float(cost)
122
- except (TypeError, ValueError):
123
- pass
124
- usage = meta.get("usage") or {}
125
- model = meta.get("model") or state["cfg"]["model"]
126
- elapsed = meta.get("elapsed", 0.0)
127
- prompt_t = usage.get("prompt_tokens", "?")
128
- completion_t = usage.get("completion_tokens", "?")
129
- cost_str = fmt_usd(cost) if cost is not None else "—"
130
- console.rule(style=BRAND_DIM, characters="─")
131
- console.print(
132
- f"[dim]{model} • {prompt_t}→{completion_t} tok • {cost_str} • "
133
- f"session {fmt_usd(state['session_cost'])} • {elapsed:.1f}s[/dim]"
134
- )
135
- except httpx.HTTPStatusError as e:
136
- console.rule(style="dim red", characters="─")
137
- console.print(f"[red]API error {e.response.status_code}: {e.response.text}[/red]")
138
- state["messages"].pop()
139
- except Exception as e:
140
- console.rule(style="dim red", characters="─")
141
- console.print(f"[red]Error: {e}[/red]")
142
- state["messages"].pop()
143
-
144
-
145
- if __name__ == "__main__":
146
- main()
@@ -1,58 +0,0 @@
1
- """Streaming OpenAI-compatible HTTP client for Mesh API."""
2
- import json
3
- from typing import Iterable
4
-
5
- import httpx
6
-
7
-
8
- def stream_chat(messages: list, cfg: dict) -> Iterable:
9
- """Yield content deltas, then a final {'usage':..., 'cost':...} dict.
10
-
11
- Mesh API is OpenAI-compatible but adds `cost` to the final SSE chunk.
12
- """
13
- url = f"{cfg['base_url']}/chat/completions"
14
- headers = {
15
- "Authorization": f"Bearer {cfg['api_key']}",
16
- "Content-Type": "application/json",
17
- }
18
- payload: dict = {
19
- "model": cfg["model"],
20
- "messages": messages,
21
- "stream": True,
22
- }
23
- if cfg.get("route"):
24
- payload["route"] = cfg["route"]
25
-
26
- last_meta: dict = {}
27
- last_model: str = ""
28
- with httpx.stream("POST", url, json=payload, headers=headers, timeout=120) as r:
29
- r.raise_for_status()
30
- for line in r.iter_lines():
31
- if not line or not line.startswith("data: "):
32
- continue
33
- data = line[6:]
34
- if data.strip() == "[DONE]":
35
- break
36
- try:
37
- obj = json.loads(data)
38
- except json.JSONDecodeError:
39
- continue
40
-
41
- if obj.get("model"):
42
- last_model = obj["model"]
43
-
44
- choices = obj.get("choices") or []
45
- if choices:
46
- delta = choices[0].get("delta", {}).get("content")
47
- if delta:
48
- yield delta
49
-
50
- usage = obj.get("usage")
51
- cost = obj.get("cost")
52
- if usage or cost:
53
- last_meta = {"usage": usage, "cost": cost}
54
-
55
- if last_model:
56
- last_meta["model"] = last_model
57
- if last_meta:
58
- yield last_meta
File without changes
File without changes
File without changes