gemcode 0.2.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- gemcode/__init__.py +3 -0
- gemcode/__main__.py +3 -0
- gemcode/agent.py +146 -0
- gemcode/audit.py +16 -0
- gemcode/callbacks.py +473 -0
- gemcode/capability_routing.py +137 -0
- gemcode/cli.py +658 -0
- gemcode/compaction.py +35 -0
- gemcode/computer_use/__init__.py +0 -0
- gemcode/computer_use/browser_computer.py +275 -0
- gemcode/config.py +247 -0
- gemcode/interactions.py +15 -0
- gemcode/invoke.py +151 -0
- gemcode/kairos_daemon.py +221 -0
- gemcode/limits.py +83 -0
- gemcode/live_audio_engine.py +124 -0
- gemcode/mcp_loader.py +57 -0
- gemcode/memory/__init__.py +0 -0
- gemcode/memory/embedding_memory_service.py +292 -0
- gemcode/memory/file_memory_service.py +176 -0
- gemcode/modality_tools.py +216 -0
- gemcode/model_routing.py +179 -0
- gemcode/paths.py +29 -0
- gemcode/permissions.py +5 -0
- gemcode/plugins/__init__.py +0 -0
- gemcode/plugins/terminal_hooks_plugin.py +168 -0
- gemcode/plugins/tool_recovery_plugin.py +135 -0
- gemcode/prompt_suggestions.py +80 -0
- gemcode/query/__init__.py +36 -0
- gemcode/query/config.py +35 -0
- gemcode/query/deps.py +20 -0
- gemcode/query/engine.py +55 -0
- gemcode/query/stop_hooks.py +63 -0
- gemcode/query/token_budget.py +109 -0
- gemcode/query/transitions.py +41 -0
- gemcode/session_runtime.py +81 -0
- gemcode/thinking.py +136 -0
- gemcode/tool_prompt_manifest.py +118 -0
- gemcode/tool_registry.py +50 -0
- gemcode/tools/__init__.py +25 -0
- gemcode/tools/edit.py +53 -0
- gemcode/tools/filesystem.py +73 -0
- gemcode/tools/search.py +85 -0
- gemcode/tools/shell.py +73 -0
- gemcode/tools_inspector.py +132 -0
- gemcode/trust.py +54 -0
- gemcode/tui/app.py +697 -0
- gemcode/tui/scrollback.py +312 -0
- gemcode/vertex.py +22 -0
- gemcode/web/__init__.py +2 -0
- gemcode/web/claude_sse_adapter.py +282 -0
- gemcode/web/terminal_repl.py +147 -0
- gemcode-0.2.2.dist-info/METADATA +440 -0
- gemcode-0.2.2.dist-info/RECORD +58 -0
- gemcode-0.2.2.dist-info/WHEEL +5 -0
- gemcode-0.2.2.dist-info/entry_points.txt +2 -0
- gemcode-0.2.2.dist-info/licenses/LICENSE +151 -0
- gemcode-0.2.2.dist-info/top_level.txt +1 -0
gemcode/kairos_daemon.py
ADDED
|
@@ -0,0 +1,221 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import copy
|
|
5
|
+
import sys
|
|
6
|
+
import uuid
|
|
7
|
+
from dataclasses import dataclass
|
|
8
|
+
from typing import Awaitable, Callable
|
|
9
|
+
|
|
10
|
+
from google.adk.runners import Runner
|
|
11
|
+
|
|
12
|
+
from gemcode.config import GemCodeConfig
|
|
13
|
+
from gemcode.capability_routing import apply_capability_routing
|
|
14
|
+
from gemcode.model_routing import pick_effective_model
|
|
15
|
+
from gemcode.invoke import run_turn
|
|
16
|
+
from gemcode.session_runtime import create_runner
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def _events_to_text(events) -> str:
|
|
20
|
+
parts: list[str] = []
|
|
21
|
+
for event in events:
|
|
22
|
+
if not getattr(event, "content", None) or not getattr(
|
|
23
|
+
event.content, "parts", None
|
|
24
|
+
):
|
|
25
|
+
continue
|
|
26
|
+
for part in event.content.parts:
|
|
27
|
+
if getattr(part, "text", None) and getattr(event, "author", None) != "user":
|
|
28
|
+
parts.append(part.text)
|
|
29
|
+
return "".join(parts)
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
@dataclass(frozen=True)
|
|
33
|
+
class KairosJob:
|
|
34
|
+
job_id: str
|
|
35
|
+
prompt: str
|
|
36
|
+
priority: int
|
|
37
|
+
session_id: str
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
class KairosDaemon:
|
|
41
|
+
"""Kairos-like proactive scheduler (stdin -> priority queue -> job runners)."""
|
|
42
|
+
|
|
43
|
+
def __init__(
|
|
44
|
+
self,
|
|
45
|
+
*,
|
|
46
|
+
cfg: GemCodeConfig,
|
|
47
|
+
concurrency: int = 2,
|
|
48
|
+
default_priority: int = 0,
|
|
49
|
+
user_id: str = "local",
|
|
50
|
+
job_runner: Callable[[KairosJob], Awaitable[None]] | None = None,
|
|
51
|
+
) -> None:
|
|
52
|
+
self.cfg = cfg
|
|
53
|
+
self.concurrency = max(1, int(concurrency))
|
|
54
|
+
self.default_priority = int(default_priority)
|
|
55
|
+
self.user_id = user_id
|
|
56
|
+
|
|
57
|
+
# Queue items are (sort_key, seq, KairosJob).
|
|
58
|
+
self._queue: asyncio.PriorityQueue[
|
|
59
|
+
tuple[int, int, KairosJob]
|
|
60
|
+
] = asyncio.PriorityQueue()
|
|
61
|
+
self._seq = 0
|
|
62
|
+
self._sem = asyncio.Semaphore(self.concurrency)
|
|
63
|
+
self._stop_event = asyncio.Event()
|
|
64
|
+
|
|
65
|
+
self._job_runner = job_runner or self._default_job_runner
|
|
66
|
+
|
|
67
|
+
def enqueue_prompt(
|
|
68
|
+
self,
|
|
69
|
+
*,
|
|
70
|
+
prompt: str,
|
|
71
|
+
priority: int | None = None,
|
|
72
|
+
session_id: str,
|
|
73
|
+
) -> str:
|
|
74
|
+
"""Enqueue a new job into the priority queue and return job_id."""
|
|
75
|
+
job_id = f"job_{uuid.uuid4().hex[:10]}"
|
|
76
|
+
pr = self.default_priority if priority is None else int(priority)
|
|
77
|
+
self._seq += 1
|
|
78
|
+
job = KairosJob(
|
|
79
|
+
job_id=job_id,
|
|
80
|
+
prompt=prompt,
|
|
81
|
+
priority=pr,
|
|
82
|
+
session_id=session_id,
|
|
83
|
+
)
|
|
84
|
+
# Higher priority should run first => use negative sort key.
|
|
85
|
+
self._queue.put_nowait((-pr, self._seq, job))
|
|
86
|
+
return job_id
|
|
87
|
+
|
|
88
|
+
async def _default_job_runner(self, job: KairosJob) -> None:
|
|
89
|
+
runner: Runner | None = None
|
|
90
|
+
try:
|
|
91
|
+
# Route model/capabilities based on this job's prompt, without mutating
|
|
92
|
+
# the daemon's base config shared across jobs.
|
|
93
|
+
job_cfg = copy.deepcopy(self.cfg)
|
|
94
|
+
apply_capability_routing(job_cfg, job.prompt, context="prompt")
|
|
95
|
+
job_cfg.model = pick_effective_model(job_cfg, job.prompt)
|
|
96
|
+
|
|
97
|
+
# For the initial MVP, we inject Kairos tools via `_build_extra_tools_for_job()`;
|
|
98
|
+
# this keeps scheduling logic independent from tool declarations.
|
|
99
|
+
extra_tools = self._build_extra_tools_for_job(job)
|
|
100
|
+
runner = create_runner(job_cfg, extra_tools=extra_tools or None)
|
|
101
|
+
events = await run_turn(
|
|
102
|
+
runner,
|
|
103
|
+
user_id=self.user_id,
|
|
104
|
+
session_id=job.session_id,
|
|
105
|
+
prompt=job.prompt,
|
|
106
|
+
max_llm_calls=job_cfg.max_llm_calls,
|
|
107
|
+
cfg=job_cfg,
|
|
108
|
+
)
|
|
109
|
+
text = _events_to_text(events).strip()
|
|
110
|
+
if text:
|
|
111
|
+
print(f"\n[kairos {job.job_id}] {text}\n", flush=True)
|
|
112
|
+
else:
|
|
113
|
+
print(f"\n[kairos {job.job_id}] (no text output)\n", flush=True)
|
|
114
|
+
finally:
|
|
115
|
+
if runner is not None:
|
|
116
|
+
await runner.close()
|
|
117
|
+
|
|
118
|
+
def _build_extra_tools_for_job(self, job: KairosJob) -> list | None:
|
|
119
|
+
"""Inject per-job tools for the model to call."""
|
|
120
|
+
|
|
121
|
+
async def kairos_sleep_ms(duration_ms: int) -> dict:
|
|
122
|
+
"""Pause this job for `duration_ms` (does not block other jobs)."""
|
|
123
|
+
duration_ms = max(0, int(duration_ms))
|
|
124
|
+
await asyncio.sleep(duration_ms / 1000.0)
|
|
125
|
+
return {"slept_ms": duration_ms}
|
|
126
|
+
|
|
127
|
+
def kairos_enqueue_prompt(
|
|
128
|
+
prompt: str,
|
|
129
|
+
priority: int = 0,
|
|
130
|
+
session_id: str | None = None,
|
|
131
|
+
) -> dict:
|
|
132
|
+
"""Enqueue a new Kairos job from the model.
|
|
133
|
+
|
|
134
|
+
If `session_id` is not provided, it defaults to the current job's
|
|
135
|
+
session_id.
|
|
136
|
+
"""
|
|
137
|
+
sid = job.session_id if session_id is None else str(session_id)
|
|
138
|
+
enqueued_id = self.enqueue_prompt(
|
|
139
|
+
prompt=prompt,
|
|
140
|
+
priority=priority,
|
|
141
|
+
session_id=sid,
|
|
142
|
+
)
|
|
143
|
+
return {"enqueued_job_id": enqueued_id}
|
|
144
|
+
|
|
145
|
+
return [kairos_sleep_ms, kairos_enqueue_prompt]
|
|
146
|
+
|
|
147
|
+
async def _run_job_with_semaphore(self, job: KairosJob) -> None:
|
|
148
|
+
async with self._sem:
|
|
149
|
+
await self._job_runner(job)
|
|
150
|
+
|
|
151
|
+
async def _run_job_and_release(self, job: KairosJob) -> None:
|
|
152
|
+
try:
|
|
153
|
+
await self._job_runner(job)
|
|
154
|
+
finally:
|
|
155
|
+
self._sem.release()
|
|
156
|
+
|
|
157
|
+
async def drain(self, *, max_jobs: int | None = None) -> int:
|
|
158
|
+
"""Process jobs already queued, useful for unit tests."""
|
|
159
|
+
processed = 0
|
|
160
|
+
while not self._queue.empty() and (max_jobs is None or processed < max_jobs):
|
|
161
|
+
_, _, job = await self._queue.get()
|
|
162
|
+
await self._run_job_with_semaphore(job)
|
|
163
|
+
processed += 1
|
|
164
|
+
return processed
|
|
165
|
+
|
|
166
|
+
async def _stdin_loop(self, *, session_id: str) -> None:
|
|
167
|
+
"""Read stdin lines and enqueue each as a new job."""
|
|
168
|
+
# Use a background thread so the asyncio loop stays responsive.
|
|
169
|
+
prompt_prefix = "kairos> "
|
|
170
|
+
while not self._stop_event.is_set():
|
|
171
|
+
try:
|
|
172
|
+
# Print prompt only in interactive terminals.
|
|
173
|
+
if sys.stdin.isatty():
|
|
174
|
+
print(prompt_prefix, end="", flush=True)
|
|
175
|
+
line = await asyncio.to_thread(sys.stdin.readline)
|
|
176
|
+
except Exception:
|
|
177
|
+
break
|
|
178
|
+
if not line:
|
|
179
|
+
# EOF.
|
|
180
|
+
break
|
|
181
|
+
|
|
182
|
+
s = line.strip()
|
|
183
|
+
if not s:
|
|
184
|
+
continue
|
|
185
|
+
if s.lower() in ("quit", "exit", "q"):
|
|
186
|
+
self._stop_event.set()
|
|
187
|
+
break
|
|
188
|
+
|
|
189
|
+
# MVP: one line => one job at default priority.
|
|
190
|
+
self.enqueue_prompt(
|
|
191
|
+
prompt=s,
|
|
192
|
+
priority=self.default_priority,
|
|
193
|
+
session_id=session_id,
|
|
194
|
+
)
|
|
195
|
+
|
|
196
|
+
async def run_forever(self, *, session_id: str) -> None:
|
|
197
|
+
"""Start the scheduler and keep running until stdin EOF/quit."""
|
|
198
|
+
|
|
199
|
+
scheduler_task = asyncio.create_task(self._scheduler_loop())
|
|
200
|
+
stdin_task = asyncio.create_task(self._stdin_loop(session_id=session_id))
|
|
201
|
+
|
|
202
|
+
# Wait for either scheduler to stop (shouldn't happen) or stdin loop to end.
|
|
203
|
+
done, pending = await asyncio.wait(
|
|
204
|
+
{scheduler_task, stdin_task},
|
|
205
|
+
return_when=asyncio.FIRST_COMPLETED,
|
|
206
|
+
)
|
|
207
|
+
for p in pending:
|
|
208
|
+
p.cancel()
|
|
209
|
+
|
|
210
|
+
async def _scheduler_loop(self) -> None:
|
|
211
|
+
"""Continuously dequeue jobs by priority and run them."""
|
|
212
|
+
while not self._stop_event.is_set():
|
|
213
|
+
try:
|
|
214
|
+
# Don't dequeue from the priority queue unless we can start work
|
|
215
|
+
# immediately. This preserves priority ordering for "next starts".
|
|
216
|
+
await self._sem.acquire()
|
|
217
|
+
_, _, job = await self._queue.get()
|
|
218
|
+
except asyncio.CancelledError:
|
|
219
|
+
break
|
|
220
|
+
asyncio.create_task(self._run_job_and_release(job))
|
|
221
|
+
|
gemcode/limits.py
ADDED
|
@@ -0,0 +1,83 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Pre-model limits (cf. Claude `calculateTokenWarningState` / blocking limit checks).
|
|
3
|
+
|
|
4
|
+
Uses session state updated in `callbacks.make_after_model_callback`.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
from gemcode.config import GemCodeConfig
|
|
10
|
+
|
|
11
|
+
SESSION_TOTAL_TOKENS_KEY = "gemcode:session_total_tokens"
|
|
12
|
+
TOKEN_BUDGET_STOP_KEY = "gemcode:bt_token_budget_stop"
|
|
13
|
+
TERMINAL_REASON_KEY = "gemcode:terminal_reason"
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def make_before_model_limits_callback(cfg: GemCodeConfig):
|
|
17
|
+
"""Block the next LLM call when cumulative session tokens exceed ceiling."""
|
|
18
|
+
if cfg.max_session_tokens is None:
|
|
19
|
+
return None
|
|
20
|
+
|
|
21
|
+
async def before_model(callback_context, llm_request):
|
|
22
|
+
st = callback_context.state
|
|
23
|
+
total = int(st.get(SESSION_TOTAL_TOKENS_KEY, 0) or 0)
|
|
24
|
+
if total >= cfg.max_session_tokens:
|
|
25
|
+
from google.adk.models.llm_response import LlmResponse
|
|
26
|
+
from google.genai import types
|
|
27
|
+
|
|
28
|
+
# Record a terminal reason for stopHooks-like taxonomy.
|
|
29
|
+
callback_state = callback_context.state
|
|
30
|
+
if not callback_state.get(TERMINAL_REASON_KEY):
|
|
31
|
+
callback_state[TERMINAL_REASON_KEY] = "session_token_limit"
|
|
32
|
+
|
|
33
|
+
return LlmResponse(
|
|
34
|
+
content=types.Content(
|
|
35
|
+
role="model",
|
|
36
|
+
parts=[
|
|
37
|
+
types.Part(
|
|
38
|
+
text=(
|
|
39
|
+
f"GemCode: session token ceiling ({cfg.max_session_tokens}) reached "
|
|
40
|
+
"(see GEMCODE_MAX_SESSION_TOKENS). Start a new session or raise the limit."
|
|
41
|
+
)
|
|
42
|
+
)
|
|
43
|
+
],
|
|
44
|
+
),
|
|
45
|
+
turn_complete=True,
|
|
46
|
+
)
|
|
47
|
+
return None
|
|
48
|
+
|
|
49
|
+
return before_model
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
def make_before_model_token_budget_callback(cfg: GemCodeConfig):
|
|
53
|
+
"""Short-circuit the next model call after token-budget stop flag."""
|
|
54
|
+
if cfg.token_budget is None:
|
|
55
|
+
return None
|
|
56
|
+
|
|
57
|
+
async def before_model(callback_context, llm_request):
|
|
58
|
+
st = callback_context.state
|
|
59
|
+
if not st.get(TOKEN_BUDGET_STOP_KEY, False):
|
|
60
|
+
return None
|
|
61
|
+
|
|
62
|
+
from google.adk.models.llm_response import LlmResponse
|
|
63
|
+
from google.genai import types
|
|
64
|
+
|
|
65
|
+
if not st.get(TERMINAL_REASON_KEY):
|
|
66
|
+
st[TERMINAL_REASON_KEY] = "token_budget_stop"
|
|
67
|
+
|
|
68
|
+
return LlmResponse(
|
|
69
|
+
content=types.Content(
|
|
70
|
+
role="model",
|
|
71
|
+
parts=[
|
|
72
|
+
types.Part(
|
|
73
|
+
text=(
|
|
74
|
+
f"GemCode: token budget ({cfg.token_budget}) exhausted for this turn. "
|
|
75
|
+
"Start a new request to continue."
|
|
76
|
+
)
|
|
77
|
+
)
|
|
78
|
+
],
|
|
79
|
+
),
|
|
80
|
+
turn_complete=True,
|
|
81
|
+
)
|
|
82
|
+
|
|
83
|
+
return before_model
|
|
@@ -0,0 +1,124 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Live audio engine (Gemini Live API via ADK).
|
|
3
|
+
|
|
4
|
+
This wires GemCode's existing outer session + callbacks into ADK's
|
|
5
|
+
`Runner.run_live()` path for real-time audio input/output.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
|
|
10
|
+
import asyncio
|
|
11
|
+
import sys
|
|
12
|
+
from typing import Optional
|
|
13
|
+
|
|
14
|
+
from google.adk.agents.live_request_queue import LiveRequestQueue
|
|
15
|
+
from google.adk.agents.run_config import RunConfig
|
|
16
|
+
from google.genai import types
|
|
17
|
+
|
|
18
|
+
from gemcode.config import GemCodeConfig
|
|
19
|
+
from gemcode.session_runtime import create_runner
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def _mime_type_for_rate(rate: int) -> str:
|
|
23
|
+
# ADK/examples commonly use this mime type.
|
|
24
|
+
return f"audio/pcm;rate={rate}"
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def _record_mic_pcm_blocking(*, rate: int, seconds: int) -> bytes:
|
|
28
|
+
try:
|
|
29
|
+
import sounddevice as sd
|
|
30
|
+
import numpy as np
|
|
31
|
+
except ImportError as e:
|
|
32
|
+
raise RuntimeError(
|
|
33
|
+
"Mic capture requires `sounddevice` and `numpy`. Install them to use `gemcode live-audio`."
|
|
34
|
+
) from e
|
|
35
|
+
|
|
36
|
+
frames = int(rate * seconds)
|
|
37
|
+
# mono int16
|
|
38
|
+
audio = sd.rec(frames, samplerate=rate, channels=1, dtype="int16")
|
|
39
|
+
sd.wait()
|
|
40
|
+
pcm = np.asarray(audio).astype("int16", copy=False)
|
|
41
|
+
return pcm.tobytes()
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
async def run_live_audio(
|
|
45
|
+
cfg: GemCodeConfig,
|
|
46
|
+
*,
|
|
47
|
+
session_id: str,
|
|
48
|
+
user_id: str = "local",
|
|
49
|
+
seconds: int = 10,
|
|
50
|
+
input_rate: int = 24_000,
|
|
51
|
+
language_code: Optional[str] = None,
|
|
52
|
+
) -> None:
|
|
53
|
+
"""
|
|
54
|
+
Record microphone audio for `seconds` and send it to Gemini Live.
|
|
55
|
+
|
|
56
|
+
MVP behavior:
|
|
57
|
+
- sends the entire recorded buffer as a single audio blob
|
|
58
|
+
- prints any model text parts it returns (typically transcriptions)
|
|
59
|
+
"""
|
|
60
|
+
|
|
61
|
+
runner = create_runner(cfg)
|
|
62
|
+
live_queue = LiveRequestQueue()
|
|
63
|
+
|
|
64
|
+
speech_config = None
|
|
65
|
+
if language_code:
|
|
66
|
+
speech_config = types.SpeechConfig(language_code=language_code)
|
|
67
|
+
|
|
68
|
+
run_config = RunConfig(
|
|
69
|
+
response_modalities=["AUDIO"],
|
|
70
|
+
speech_config=speech_config,
|
|
71
|
+
# Keep SDK defaults for STT/TTS transcription configs.
|
|
72
|
+
)
|
|
73
|
+
|
|
74
|
+
agen = runner.run_live(
|
|
75
|
+
user_id=user_id,
|
|
76
|
+
session_id=session_id,
|
|
77
|
+
live_request_queue=live_queue,
|
|
78
|
+
run_config=run_config,
|
|
79
|
+
)
|
|
80
|
+
|
|
81
|
+
printed_any = False
|
|
82
|
+
|
|
83
|
+
async def _consume_events() -> None:
|
|
84
|
+
nonlocal printed_any
|
|
85
|
+
try:
|
|
86
|
+
async for event in agen:
|
|
87
|
+
if not event.content or not event.content.parts:
|
|
88
|
+
continue
|
|
89
|
+
for part in event.content.parts:
|
|
90
|
+
part_text = getattr(part, "text", None)
|
|
91
|
+
# We only print model-authored text to avoid echoing user input.
|
|
92
|
+
if part_text and getattr(event, "author", None) and event.author != "user":
|
|
93
|
+
sys.stdout.write(part_text)
|
|
94
|
+
sys.stdout.flush()
|
|
95
|
+
printed_any = True
|
|
96
|
+
except Exception:
|
|
97
|
+
# Runner/live failures are expected to be surfaced as terminal errors
|
|
98
|
+
# in session state + audit logs; don't crash the CLI.
|
|
99
|
+
raise
|
|
100
|
+
|
|
101
|
+
consumer_task = asyncio.create_task(_consume_events())
|
|
102
|
+
|
|
103
|
+
# Send "user started speaking" signal.
|
|
104
|
+
live_queue.send_activity_start()
|
|
105
|
+
|
|
106
|
+
pcm_bytes = await asyncio.to_thread(
|
|
107
|
+
_record_mic_pcm_blocking, rate=input_rate, seconds=seconds
|
|
108
|
+
)
|
|
109
|
+
live_queue.send_realtime(
|
|
110
|
+
types.Blob(data=pcm_bytes, mime_type=_mime_type_for_rate(input_rate))
|
|
111
|
+
)
|
|
112
|
+
|
|
113
|
+
# Send "user finished speaking" signal and close the queue.
|
|
114
|
+
live_queue.send_activity_end()
|
|
115
|
+
live_queue.close()
|
|
116
|
+
|
|
117
|
+
# Wait for event stream to drain.
|
|
118
|
+
await consumer_task
|
|
119
|
+
|
|
120
|
+
if not printed_any:
|
|
121
|
+
print("\n[gemcode live-audio] No model text received (audio may have been silent).")
|
|
122
|
+
|
|
123
|
+
await runner.close()
|
|
124
|
+
|
gemcode/mcp_loader.py
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Optional MCP toolsets from `.gemcode/mcp.json`.
|
|
3
|
+
|
|
4
|
+
Schema (example):
|
|
5
|
+
{
|
|
6
|
+
"servers": [
|
|
7
|
+
{
|
|
8
|
+
"name": "docs",
|
|
9
|
+
"stdio": { "command": "npx", "args": ["-y", "@some/mcp-server"] }
|
|
10
|
+
}
|
|
11
|
+
]
|
|
12
|
+
}
|
|
13
|
+
|
|
14
|
+
Requires: pip install gemcode[mcp]
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
from __future__ import annotations
|
|
18
|
+
|
|
19
|
+
import json
|
|
20
|
+
from pathlib import Path
|
|
21
|
+
from typing import Any
|
|
22
|
+
|
|
23
|
+
from gemcode.config import GemCodeConfig
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def load_mcp_toolsets(cfg: GemCodeConfig) -> list:
|
|
27
|
+
path = cfg.project_root / ".gemcode" / "mcp.json"
|
|
28
|
+
if not path.is_file():
|
|
29
|
+
return []
|
|
30
|
+
try:
|
|
31
|
+
data = json.loads(path.read_text(encoding="utf-8"))
|
|
32
|
+
except json.JSONDecodeError as e:
|
|
33
|
+
raise ValueError(f"Invalid mcp.json: {e}") from e
|
|
34
|
+
|
|
35
|
+
try:
|
|
36
|
+
from google.adk.tools.mcp_tool.mcp_toolset import McpToolset
|
|
37
|
+
from mcp import StdioServerParameters
|
|
38
|
+
except ImportError as e:
|
|
39
|
+
raise ImportError("Install MCP extras: pip install gemcode[mcp]") from e
|
|
40
|
+
|
|
41
|
+
servers = data.get("servers") or []
|
|
42
|
+
toolsets: list[Any] = []
|
|
43
|
+
for s in servers:
|
|
44
|
+
stdio = s.get("stdio") or {}
|
|
45
|
+
cmd = stdio.get("command")
|
|
46
|
+
args = stdio.get("args") or []
|
|
47
|
+
if not cmd:
|
|
48
|
+
continue
|
|
49
|
+
params = StdioServerParameters(command=cmd, args=args)
|
|
50
|
+
prefix = s.get("name") or "mcp"
|
|
51
|
+
toolsets.append(
|
|
52
|
+
McpToolset(
|
|
53
|
+
connection_params=params,
|
|
54
|
+
tool_name_prefix=prefix,
|
|
55
|
+
)
|
|
56
|
+
)
|
|
57
|
+
return toolsets
|
|
File without changes
|