meshapi-code 0.2.0__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
meshapi/__init__.py CHANGED
@@ -1 +1 @@
1
- __version__ = "0.2.0"
1
+ __version__ = "0.3.0"
meshapi/cli.py CHANGED
@@ -1,29 +1,129 @@
1
1
  """meshapi — terminal chat REPL for Mesh API."""
2
2
  import argparse
3
+ import json
3
4
  import sys
4
5
  from pathlib import Path
5
6
 
6
7
  import httpx
7
8
  from prompt_toolkit import PromptSession
9
+ from prompt_toolkit.formatted_text import FormattedText
8
10
  from prompt_toolkit.history import FileHistory
11
+ from prompt_toolkit.key_binding import KeyBindings
9
12
  from prompt_toolkit.styles import Style
10
- from rich.panel import Panel
13
+ from rich.text import Text
11
14
 
12
15
  from . import __version__
13
16
  from .client import stream_chat
14
17
  from .commands import handle_command
15
18
  from .config import CONFIG_FILE, HISTORY_FILE, load_config
16
- from .render import BRAND, BRAND_BG, BRAND_DIM, console, fmt_usd, pretty_cwd, render_stream
19
+ from .permissions import HINTS, LABELS, Mode, from_str, next_mode
20
+ from .render import (
21
+ BRAND, BRAND_BG, BRAND_BG_FG, BRAND_DIM, console, fmt_usd, pretty_cwd, render_stream,
22
+ )
23
+ from .tools import TOOLS, build_system_prompt, execute as exec_tool, summarize_call
17
24
 
25
+ # ANSI Shadow figlet font
26
+ MESH_LOGO_LINES = [
27
+ "███╗ ███╗███████╗███████╗██╗ ██╗",
28
+ "████╗ ████║██╔════╝██╔════╝██║ ██║",
29
+ "██╔████╔██║█████╗ ███████╗███████║",
30
+ "██║╚██╔╝██║██╔══╝ ╚════██║██╔══██║",
31
+ "██║ ╚═╝ ██║███████╗███████║██║ ██║",
32
+ "╚═╝ ╚═╝╚══════╝╚══════╝╚═╝ ╚═╝",
33
+ ]
34
+ LOGO_WIDTH = 35 # chars per line
35
+ LOGO_GUTTER = 3 # spaces between logo and info column
18
36
 
19
37
  def parse_args(argv=None) -> argparse.Namespace:
20
38
  p = argparse.ArgumentParser(prog="meshapi", description="Terminal chat for Mesh API")
21
39
  p.add_argument("--version", action="version", version=f"meshapi {__version__}")
22
40
  p.add_argument("--model", help="Override model for this session (e.g. openai/gpt-4o-mini)")
23
41
  p.add_argument("--route", choices=["cheapest", "fastest", "balanced"], help="Routing mode")
42
+ p.add_argument(
43
+ "--mode",
44
+ choices=[m.value for m in Mode],
45
+ default="ask",
46
+ help="Tool permission mode (default: ask). Cycle in-session with shift+tab.",
47
+ )
24
48
  return p.parse_args(argv)
25
49
 
26
50
 
51
+ def render_banner(cfg: dict) -> None:
52
+ info_per_line: list = [
53
+ None,
54
+ None,
55
+ Text.from_markup(f"[bold {BRAND}]✦ meshapi {__version__}[/bold {BRAND}]"),
56
+ Text.from_markup(f"cwd: [{BRAND}]{pretty_cwd()}[/{BRAND}]"),
57
+ Text.from_markup(f"model: [bold {BRAND}]{cfg['model']}[/bold {BRAND}]"),
58
+ Text.from_markup(f"route: [{BRAND}]{cfg.get('route') or 'default'}[/{BRAND}]"),
59
+ ]
60
+ console.print()
61
+ for i, logo_line in enumerate(MESH_LOGO_LINES):
62
+ line = Text()
63
+ line.append(logo_line, style=BRAND)
64
+ info = info_per_line[i] if i < len(info_per_line) else None
65
+ if info is not None:
66
+ pad = max(0, LOGO_WIDTH - len(logo_line))
67
+ line.append(" " * (pad + LOGO_GUTTER))
68
+ line.append(info)
69
+ console.print(line)
70
+ console.print()
71
+ console.print("type /help for commands, /exit to quit", style=BRAND_DIM)
72
+ console.print()
73
+
74
+
75
+ def confirm_tool_call(name: str, args: dict) -> bool:
76
+ """ASK-mode prompt for a single tool call. Returns True if approved."""
77
+ summary = summarize_call(name, args)
78
+ console.print(f"[bold {BRAND}]⚙ approve tool call?[/bold {BRAND}] [dim]{summary}[/dim]")
79
+ if name == "write_file":
80
+ preview = (args.get("content") or "")[:300]
81
+ console.print(f"[dim]──[/dim]\n{preview}{'…' if len(args.get('content') or '') > 300 else ''}\n[dim]──[/dim]")
82
+ elif name == "run_bash":
83
+ console.print(f"[dim]$ {args.get('command')}[/dim]")
84
+ try:
85
+ ans = console.input("[bold]y[/bold] (yes) / [bold]n[/bold] (no) › ").strip().lower()
86
+ except (KeyboardInterrupt, EOFError):
87
+ return False
88
+ return ans in ("y", "yes")
89
+
90
+
91
+ def handle_tool_calls(tool_calls: list, mode: Mode, state: dict) -> None:
92
+ """Append assistant tool_calls message + tool result messages to state."""
93
+ state["messages"].append({
94
+ "role": "assistant",
95
+ "content": None,
96
+ "tool_calls": [
97
+ {
98
+ "id": tc["id"],
99
+ "type": "function",
100
+ "function": {"name": tc["name"], "arguments": tc["arguments"]},
101
+ }
102
+ for tc in tool_calls
103
+ ],
104
+ })
105
+ for tc in tool_calls:
106
+ try:
107
+ args = json.loads(tc["arguments"]) if tc["arguments"] else {}
108
+ except json.JSONDecodeError:
109
+ args = {}
110
+ approved = mode == Mode.BYPASS or confirm_tool_call(tc["name"], args)
111
+ if approved:
112
+ console.print(f"[{BRAND_DIM}]⚙ {summarize_call(tc['name'], args)}[/{BRAND_DIM}]")
113
+ result = exec_tool(tc["name"], args)
114
+ preview = result[:200].replace("\n", " ")
115
+ tail = "…" if len(result) > 200 else ""
116
+ console.print(f"[dim] → {preview}{tail}[/dim]")
117
+ else:
118
+ result = "User denied this tool call."
119
+ console.print(f"[dim] → denied by user[/dim]")
120
+ state["messages"].append({
121
+ "role": "tool",
122
+ "tool_call_id": tc["id"],
123
+ "content": result,
124
+ })
125
+
126
+
27
127
  def main() -> None:
28
128
  args = parse_args()
29
129
  cfg = load_config()
@@ -41,19 +141,35 @@ def main() -> None:
41
141
 
42
142
  state = {
43
143
  "cfg": cfg,
44
- "messages": [{"role": "system", "content": cfg["system"]}],
144
+ "messages": [{"role": "system", "content": build_system_prompt(cfg)}],
45
145
  "session_cost": 0.0,
146
+ "mode": from_str(args.mode),
46
147
  }
47
148
 
48
- session = PromptSession(history=FileHistory(str(HISTORY_FILE)))
49
- console.print(Panel.fit(
50
- f"meshapi {__version__}\n"
51
- f"cwd: [{BRAND}]{pretty_cwd()}[/{BRAND}]\n"
52
- f"model: [bold {BRAND}]{cfg['model']}[/bold {BRAND}]\n"
53
- f"route: [{BRAND}]{cfg.get('route') or 'default'}[/{BRAND}]\n"
54
- "type /help for commands, /exit to quit",
55
- border_style=BRAND,
56
- ))
149
+ kb = KeyBindings()
150
+
151
+ @kb.add("s-tab") # Shift+Tab
152
+ def _(event):
153
+ state["mode"] = next_mode(state["mode"])
154
+ event.app.invalidate()
155
+
156
+ def bottom_toolbar():
157
+ m = state["mode"]
158
+ color = "ansired" if m == Mode.BYPASS else "ansiyellow" if m == Mode.NONE else "ansigreen"
159
+ return FormattedText([
160
+ ("", " mode: "),
161
+ (f"bold {color}", LABELS[m]),
162
+ ("", f" {HINTS[m]} "),
163
+ ("ansibrightblack", "shift+tab to cycle"),
164
+ ])
165
+
166
+ session = PromptSession(
167
+ history=FileHistory(str(HISTORY_FILE)),
168
+ key_bindings=kb,
169
+ bottom_toolbar=bottom_toolbar,
170
+ )
171
+
172
+ render_banner(cfg)
57
173
 
58
174
  while True:
59
175
  try:
@@ -67,7 +183,8 @@ def main() -> None:
67
183
  "› ",
68
184
  style=Style.from_dict({
69
185
  "prompt": f"bold fg:{BRAND} bg:{BRAND_BG}",
70
- "": f"bg:{BRAND_BG}",
186
+ "": f"fg:{BRAND_BG_FG} bg:{BRAND_BG}",
187
+ "bottom-toolbar": f"fg:{BRAND_BG_FG} bg:{BRAND_BG}",
71
188
  }),
72
189
  )
73
190
  console.rule(style=BRAND_DIM, characters="─")
@@ -84,26 +201,44 @@ def main() -> None:
84
201
 
85
202
  state["messages"].append({"role": "user", "content": user_input})
86
203
  console.print()
204
+
205
+ # Tool-calling loop: keep streaming until model returns text without tool_calls.
206
+ agg_cost = 0.0
207
+ last_model = state["cfg"]["model"]
208
+ last_usage: dict = {}
209
+ last_elapsed = 0.0
87
210
  try:
88
- reply, meta = render_stream(stream_chat(state["messages"], state["cfg"]))
89
- state["messages"].append({"role": "assistant", "content": reply})
90
-
91
- cost = meta.get("cost")
92
- if cost is not None:
93
- try:
94
- state["session_cost"] += float(cost)
95
- except (TypeError, ValueError):
96
- pass
97
- usage = meta.get("usage") or {}
98
- model = meta.get("model") or state["cfg"]["model"]
99
- elapsed = meta.get("elapsed", 0.0)
100
- prompt_t = usage.get("prompt_tokens", "?")
101
- completion_t = usage.get("completion_tokens", "?")
102
- cost_str = fmt_usd(cost) if cost is not None else "—"
211
+ for _hop in range(8): # safety cap
212
+ tools_arg = TOOLS if state["mode"] != Mode.NONE else None
213
+ reply, meta = render_stream(
214
+ stream_chat(state["messages"], state["cfg"], tools=tools_arg)
215
+ )
216
+ cost = meta.get("cost")
217
+ if cost is not None:
218
+ try:
219
+ agg_cost += float(cost)
220
+ except (TypeError, ValueError):
221
+ pass
222
+ last_model = meta.get("model") or last_model
223
+ last_usage = meta.get("usage") or last_usage
224
+ last_elapsed += meta.get("elapsed", 0.0)
225
+
226
+ tool_calls = meta.get("tool_calls") or []
227
+ if not tool_calls:
228
+ state["messages"].append({"role": "assistant", "content": reply})
229
+ break
230
+
231
+ # Model called tools — execute and loop.
232
+ handle_tool_calls(tool_calls, state["mode"], state)
233
+
234
+ state["session_cost"] += agg_cost
235
+ prompt_t = last_usage.get("prompt_tokens", "?")
236
+ completion_t = last_usage.get("completion_tokens", "?")
237
+ cost_str = fmt_usd(agg_cost) if agg_cost else "—"
103
238
  console.rule(style=BRAND_DIM, characters="─")
104
239
  console.print(
105
- f"[dim]{model} • {prompt_t}→{completion_t} tok • {cost_str} • "
106
- f"session {fmt_usd(state['session_cost'])} • {elapsed:.1f}s[/dim]"
240
+ f"[dim]{last_model} • {prompt_t}→{completion_t} tok • {cost_str} • "
241
+ f"session {fmt_usd(state['session_cost'])} • {last_elapsed:.1f}s[/dim]"
107
242
  )
108
243
  except httpx.HTTPStatusError as e:
109
244
  console.rule(style="dim red", characters="─")
meshapi/client.py CHANGED
@@ -1,14 +1,21 @@
1
1
  """Streaming OpenAI-compatible HTTP client for Mesh API."""
2
2
  import json
3
- from typing import Iterable
3
+ from typing import Iterable, Optional
4
4
 
5
5
  import httpx
6
6
 
7
7
 
8
- def stream_chat(messages: list, cfg: dict) -> Iterable:
9
- """Yield content deltas, then a final {'usage':..., 'cost':...} dict.
8
+ def stream_chat(
9
+ messages: list,
10
+ cfg: dict,
11
+ tools: Optional[list] = None,
12
+ ) -> Iterable:
13
+ """Yield content deltas, then a final dict with usage/cost/model/tool_calls.
10
14
 
11
- Mesh API is OpenAI-compatible but adds `cost` to the final SSE chunk.
15
+ Mesh API is OpenAI-compatible:
16
+ - `cost` arrives in the final SSE chunk alongside `usage`.
17
+ - `tool_calls` arrive as deltas indexed by position; we accumulate them
18
+ and surface as the meta dict's `tool_calls` field.
12
19
  """
13
20
  url = f"{cfg['base_url']}/chat/completions"
14
21
  headers = {
@@ -22,9 +29,14 @@ def stream_chat(messages: list, cfg: dict) -> Iterable:
22
29
  }
23
30
  if cfg.get("route"):
24
31
  payload["route"] = cfg["route"]
32
+ if tools:
33
+ payload["tools"] = tools
34
+ payload["tool_choice"] = "auto"
25
35
 
26
36
  last_meta: dict = {}
27
37
  last_model: str = ""
38
+ tool_calls_accum: dict = {} # index -> {id, name, arguments}
39
+
28
40
  with httpx.stream("POST", url, json=payload, headers=headers, timeout=120) as r:
29
41
  r.raise_for_status()
30
42
  for line in r.iter_lines():
@@ -43,9 +55,24 @@ def stream_chat(messages: list, cfg: dict) -> Iterable:
43
55
 
44
56
  choices = obj.get("choices") or []
45
57
  if choices:
46
- delta = choices[0].get("delta", {}).get("content")
47
- if delta:
48
- yield delta
58
+ delta = choices[0].get("delta", {})
59
+
60
+ content = delta.get("content")
61
+ if content:
62
+ yield content
63
+
64
+ for tc in delta.get("tool_calls") or []:
65
+ idx = tc.get("index", 0)
66
+ bucket = tool_calls_accum.setdefault(
67
+ idx, {"id": "", "name": "", "arguments": ""}
68
+ )
69
+ if tc.get("id"):
70
+ bucket["id"] = tc["id"]
71
+ fn = tc.get("function") or {}
72
+ if fn.get("name"):
73
+ bucket["name"] = fn["name"]
74
+ if fn.get("arguments"):
75
+ bucket["arguments"] += fn["arguments"]
49
76
 
50
77
  usage = obj.get("usage")
51
78
  cost = obj.get("cost")
@@ -54,5 +81,7 @@ def stream_chat(messages: list, cfg: dict) -> Iterable:
54
81
 
55
82
  if last_model:
56
83
  last_meta["model"] = last_model
84
+ if tool_calls_accum:
85
+ last_meta["tool_calls"] = [tool_calls_accum[i] for i in sorted(tool_calls_accum)]
57
86
  if last_meta:
58
87
  yield last_meta
meshapi/commands.py CHANGED
@@ -4,7 +4,9 @@ from pathlib import Path
4
4
  from rich.panel import Panel
5
5
 
6
6
  from .config import save_config
7
+ from .permissions import LABELS, Mode, from_str
7
8
  from .render import console, fmt_usd
9
+ from .tools import build_system_prompt
8
10
 
9
11
  ROUTES = {"cheapest", "fastest", "balanced"}
10
12
 
@@ -19,7 +21,7 @@ def handle_command(cmd: str, state: dict) -> bool:
19
21
  return False
20
22
 
21
23
  if name == "/clear":
22
- state["messages"] = [{"role": "system", "content": state["cfg"]["system"]}]
24
+ state["messages"] = [{"role": "system", "content": build_system_prompt(state["cfg"])}]
23
25
  state["session_cost"] = 0.0
24
26
  console.print("[dim]Conversation cleared.[/dim]")
25
27
 
@@ -56,7 +58,7 @@ def handle_command(cmd: str, state: dict) -> bool:
56
58
  elif name == "/system":
57
59
  if arg:
58
60
  state["cfg"]["system"] = arg
59
- state["messages"] = [{"role": "system", "content": arg}]
61
+ state["messages"] = [{"role": "system", "content": build_system_prompt(state["cfg"])}]
60
62
  console.print("[dim]System prompt updated and conversation reset.[/dim]")
61
63
  else:
62
64
  console.print(f"[dim]{state['cfg']['system']}[/dim]")
@@ -64,12 +66,24 @@ def handle_command(cmd: str, state: dict) -> bool:
64
66
  elif name == "/cost":
65
67
  console.print(f"[dim]Session spend: {fmt_usd(state.get('session_cost', 0))}[/dim]")
66
68
 
69
+ elif name == "/mode":
70
+ if not arg:
71
+ cur = state.get("mode", Mode.ASK)
72
+ console.print(f"[dim]Current mode: {LABELS[cur]} ({cur.value})[/dim]")
73
+ else:
74
+ try:
75
+ state["mode"] = from_str(arg)
76
+ console.print(f"[dim]Mode set to {LABELS[state['mode']]}[/dim]")
77
+ except ValueError as e:
78
+ console.print(f"[red]{e}[/red]")
79
+
67
80
  elif name == "/help":
68
81
  console.print(Panel.fit(
69
82
  "/exit end session\n"
70
83
  "/clear reset conversation\n"
71
84
  "/model <name> switch model (e.g. anthropic/claude-sonnet-4.5)\n"
72
85
  "/route <mode> cheapest|fastest|balanced|default\n"
86
+ "/mode <perm> ask|bypass|none (or shift+tab to cycle)\n"
73
87
  "/file <path> add file to context\n"
74
88
  "/system <txt> set system prompt\n"
75
89
  "/cost show session spend\n"
meshapi/permissions.py ADDED
@@ -0,0 +1,35 @@
1
+ """Permission modes for tool calls — cycle with Shift+Tab."""
2
+ from enum import Enum
3
+
4
+
5
+ class Mode(Enum):
6
+ ASK = "ask" # prompt for each tool call (default — safest)
7
+ BYPASS = "bypass" # auto-execute without asking (fast — like `--yolo`)
8
+ NONE = "none" # don't expose tools to the model at all (read-only chat)
9
+
10
+
11
+ ORDER = [Mode.ASK, Mode.BYPASS, Mode.NONE]
12
+
13
+ LABELS = {
14
+ Mode.ASK: "approve each",
15
+ Mode.BYPASS: "bypass perms",
16
+ Mode.NONE: "no access",
17
+ }
18
+
19
+ HINTS = {
20
+ Mode.ASK: "model can request file/shell ops; you confirm each one",
21
+ Mode.BYPASS: "model executes file/shell ops automatically — be careful",
22
+ Mode.NONE: "chat only — model has no filesystem or shell access",
23
+ }
24
+
25
+
26
+ def next_mode(m: Mode) -> Mode:
27
+ return ORDER[(ORDER.index(m) + 1) % len(ORDER)]
28
+
29
+
30
+ def from_str(s: str) -> Mode:
31
+ s = s.strip().lower()
32
+ for m in Mode:
33
+ if m.value == s:
34
+ return m
35
+ raise ValueError(f"unknown mode: {s} (try {', '.join(m.value for m in Mode)})")
meshapi/render.py CHANGED
@@ -29,13 +29,16 @@ def _detect_theme() -> str:
29
29
 
30
30
 
31
31
  # Brand palette — Mesh API purple, theme-adaptive
32
- BRAND = "#6f5af5" # foreground brand, same on both themes
33
32
  if _detect_theme() == "dark":
34
- BRAND_DIM = "#9d92e8" # lighter dimvisible on dark bg
35
- BRAND_BG = "#2d2454" # darker, brand-tinted highlight against ~#000-#1e1e1e
33
+ BRAND = "#8b78f7" # bumped lighter on dark official #6f5af5 reads dim on dark wine/black
34
+ BRAND_DIM = "#aea3f0" # lighter dim clearly visible on dark backgrounds
35
+ BRAND_BG = "#372d73" # mid-dark purple — clearly visible without being loud
36
+ BRAND_BG_FG = "#f5f0ff" # near-white with slight purple tint for input text
36
37
  else:
38
+ BRAND = "#6f5af5" # official brand color — strong contrast on white
37
39
  BRAND_DIM = "#5a4ec4" # darker dim — visible on light bg
38
40
  BRAND_BG = "#ebe4fc" # pale lavender highlight against white
41
+ BRAND_BG_FG = "#2c2540" # near-black with purple tint for input text on light theme
39
42
 
40
43
 
41
44
  def fmt_usd(value) -> str:
meshapi/tools.py ADDED
@@ -0,0 +1,129 @@
1
+ """Tool definitions sent to the model + local executors."""
2
+ import subprocess
3
+ from pathlib import Path
4
+
5
+
6
+ def build_system_prompt(cfg: dict) -> str:
7
+ """Append working-dir + tool guidance to the user's base system prompt.
8
+
9
+ Naming tools in prose (read_file/write_file/run_bash) makes Anthropic
10
+ models drop into XML tool-use mode and emit `<function_calls>` as
11
+ text — keep this section deliberately tool-name-free.
12
+ """
13
+ base = cfg.get("system") or ""
14
+ cwd = str(Path.cwd())
15
+ return (
16
+ f"{base}\n\n"
17
+ f"Working directory: {cwd}\n"
18
+ "Resolve any relative path the user gives against this working "
19
+ "directory. When you create or edit files without an explicit "
20
+ "absolute path, place them inside this working directory. Use "
21
+ "the available tools to inspect and modify the filesystem and "
22
+ "run shell commands — do not ask the user to run commands."
23
+ )
24
+
25
+ # OpenAI-compatible tool spec — Mesh API forwards these to the underlying provider.
26
+ TOOLS = [
27
+ {
28
+ "type": "function",
29
+ "function": {
30
+ "name": "read_file",
31
+ "description": "Read a file from the user's filesystem and return its contents.",
32
+ "parameters": {
33
+ "type": "object",
34
+ "properties": {
35
+ "path": {
36
+ "type": "string",
37
+ "description": "Path to the file (absolute, or relative to the cwd)",
38
+ }
39
+ },
40
+ "required": ["path"],
41
+ },
42
+ },
43
+ },
44
+ {
45
+ "type": "function",
46
+ "function": {
47
+ "name": "write_file",
48
+ "description": "Create or overwrite a file with the given content. Parent directories are created if missing.",
49
+ "parameters": {
50
+ "type": "object",
51
+ "properties": {
52
+ "path": {"type": "string", "description": "File path to write"},
53
+ "content": {"type": "string", "description": "Full file contents"},
54
+ },
55
+ "required": ["path", "content"],
56
+ },
57
+ },
58
+ },
59
+ {
60
+ "type": "function",
61
+ "function": {
62
+ "name": "run_bash",
63
+ "description": "Run a shell command (zsh/bash) and return combined stdout+stderr plus exit code. Times out at 60s.",
64
+ "parameters": {
65
+ "type": "object",
66
+ "properties": {
67
+ "command": {"type": "string", "description": "The shell command to run"}
68
+ },
69
+ "required": ["command"],
70
+ },
71
+ },
72
+ },
73
+ ]
74
+
75
+ OUTPUT_LIMIT = 8000
76
+
77
+
78
+ def execute(name: str, arguments: dict) -> str:
79
+ """Run a tool locally and return a string result for the model."""
80
+ if name == "read_file":
81
+ try:
82
+ return Path(arguments["path"]).expanduser().read_text()
83
+ except Exception as e:
84
+ return f"Error: {e}"
85
+
86
+ if name == "write_file":
87
+ try:
88
+ p = Path(arguments["path"]).expanduser()
89
+ p.parent.mkdir(parents=True, exist_ok=True)
90
+ content = arguments["content"]
91
+ p.write_text(content)
92
+ return f"OK — wrote {len(content)} chars to {p}"
93
+ except Exception as e:
94
+ return f"Error: {e}"
95
+
96
+ if name == "run_bash":
97
+ try:
98
+ r = subprocess.run(
99
+ arguments["command"],
100
+ shell=True,
101
+ capture_output=True,
102
+ text=True,
103
+ timeout=60,
104
+ cwd=str(Path.cwd()),
105
+ )
106
+ out = (r.stdout or "") + (r.stderr or "")
107
+ tail = "...[truncated]" if len(out) > OUTPUT_LIMIT else ""
108
+ return f"{out[:OUTPUT_LIMIT]}{tail}\n[exit {r.returncode}]"
109
+ except subprocess.TimeoutExpired:
110
+ return "Error: command timed out after 60s"
111
+ except Exception as e:
112
+ return f"Error: {e}"
113
+
114
+ return f"Error: unknown tool `{name}`"
115
+
116
+
117
+ def summarize_call(name: str, arguments: dict) -> str:
118
+ """One-line summary used in the approval prompt and progress log."""
119
+ if name == "read_file":
120
+ return f"read_file: {arguments.get('path')}"
121
+ if name == "write_file":
122
+ n = len(arguments.get("content", ""))
123
+ return f"write_file: {arguments.get('path')} ({n} chars)"
124
+ if name == "run_bash":
125
+ cmd = arguments.get("command", "")
126
+ if len(cmd) > 200:
127
+ cmd = cmd[:200] + "…"
128
+ return f"run_bash: {cmd}"
129
+ return f"{name}({arguments})"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: meshapi-code
3
- Version: 0.2.0
3
+ Version: 0.3.0
4
4
  Summary: Terminal chat for Mesh API — OpenAI-compatible LLM gateway
5
5
  Project-URL: Homepage, https://meshapi.ai
6
6
  Project-URL: Documentation, https://docs.meshapi.ai
@@ -0,0 +1,14 @@
1
+ meshapi/__init__.py,sha256=VrXpHDu3erkzwl_WXrqINBm9xWkcyUy53IQOj042dOs,22
2
+ meshapi/__main__.py,sha256=MSmt_5Xg84uHqzTN38JwgseJK8rsJn_11A8WD99VtEo,61
3
+ meshapi/cli.py,sha256=d1-WSXTUiwKUbFhn6sOzUe_Gb9I8CxbsMdFXVX_k_YU,9777
4
+ meshapi/client.py,sha256=-kkPKMsl98wX_N0X6Rr8GNygJH4HSlrWloiWlhQuwgU,2824
5
+ meshapi/commands.py,sha256=fc_qnTa_oZgn5CRQ7GrpUm1Zclmat4McLdEV5YX5Wvg,3662
6
+ meshapi/config.py,sha256=DcqOjZtAufuSDg2vDs0lsBynRjThL_8znLOl_Y2_PN4,1159
7
+ meshapi/permissions.py,sha256=NOZy7DyP05HyuIoYYO3-YT_U0wnU4wg_syfxDqh3Oz8,1016
8
+ meshapi/render.py,sha256=z7ggnk8BqwcGK3tJm5PunEnROiVQmDWAD4hvJeg9gF4,4251
9
+ meshapi/tools.py,sha256=BIy_kJrIuY0RXDfTSOo8MrcjuIKR7LR8PYl36JONY-g,4578
10
+ meshapi_code-0.3.0.dist-info/METADATA,sha256=o97mwuZc8kd0v5NkYfZ-pZpafUwmFoxwHX1HmmsX8Wg,3765
11
+ meshapi_code-0.3.0.dist-info/WHEEL,sha256=QccIxa26bgl1E6uMy58deGWi-0aeIkkangHcxk2kWfw,87
12
+ meshapi_code-0.3.0.dist-info/entry_points.txt,sha256=ZCXZ_SgrhWIQEHSjAXz0pUlyGbIQKZ68vp_Cg1Y0rME,45
13
+ meshapi_code-0.3.0.dist-info/licenses/LICENSE,sha256=oALrQSPnF5cbhoBcSv6uDDsPUGl7q5Y2AL-dJ--wzt8,1065
14
+ meshapi_code-0.3.0.dist-info/RECORD,,
@@ -1,12 +0,0 @@
1
- meshapi/__init__.py,sha256=Zn1KFblwuFHiDRdRAiRnDBRkbPttWh44jKa5zG2ov0E,22
2
- meshapi/__main__.py,sha256=MSmt_5Xg84uHqzTN38JwgseJK8rsJn_11A8WD99VtEo,61
3
- meshapi/cli.py,sha256=v4whFCFSP-4uIACcWCwOOSyB3j6_dPlykYLaNa5-29s,4298
4
- meshapi/client.py,sha256=ZJ9ecsMjE8T0UlksacNwoIEKznsOa4W2bbnSMjIGI6E,1708
5
- meshapi/commands.py,sha256=0ZiIh9n94f6ix-hH3u4v3gW7N8sLSWsPv9sdDPEUbg0,3040
6
- meshapi/config.py,sha256=DcqOjZtAufuSDg2vDs0lsBynRjThL_8znLOl_Y2_PN4,1159
7
- meshapi/render.py,sha256=V3R56VBYqpQYq_BYEaoWB3IPSXnxlRHEJAuNvuK4rog,3940
8
- meshapi_code-0.2.0.dist-info/METADATA,sha256=lk8bRW8ceI9YQy4NjzCibEt0OsJ9slXNKqPxApnokMI,3765
9
- meshapi_code-0.2.0.dist-info/WHEEL,sha256=QccIxa26bgl1E6uMy58deGWi-0aeIkkangHcxk2kWfw,87
10
- meshapi_code-0.2.0.dist-info/entry_points.txt,sha256=ZCXZ_SgrhWIQEHSjAXz0pUlyGbIQKZ68vp_Cg1Y0rME,45
11
- meshapi_code-0.2.0.dist-info/licenses/LICENSE,sha256=oALrQSPnF5cbhoBcSv6uDDsPUGl7q5Y2AL-dJ--wzt8,1065
12
- meshapi_code-0.2.0.dist-info/RECORD,,