@agentunion/kite 1.0.6 → 1.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (112) hide show
  1. package/cli.js +127 -25
  2. package/core/event_hub/entry.py +384 -61
  3. package/core/event_hub/hub.py +8 -0
  4. package/core/event_hub/module.md +0 -1
  5. package/core/event_hub/server.py +169 -38
  6. package/core/kite_log.py +241 -0
  7. package/core/launcher/entry.py +1306 -425
  8. package/core/launcher/module_scanner.py +10 -9
  9. package/core/launcher/process_manager.py +555 -121
  10. package/core/registry/entry.py +335 -30
  11. package/core/registry/server.py +339 -256
  12. package/core/registry/store.py +13 -2
  13. package/extensions/agents/__init__.py +1 -0
  14. package/extensions/agents/assistant/__init__.py +1 -0
  15. package/extensions/agents/assistant/entry.py +380 -0
  16. package/extensions/agents/assistant/module.md +22 -0
  17. package/extensions/agents/assistant/server.py +236 -0
  18. package/extensions/channels/__init__.py +1 -0
  19. package/extensions/channels/acp_channel/__init__.py +1 -0
  20. package/extensions/channels/acp_channel/entry.py +380 -0
  21. package/extensions/channels/acp_channel/module.md +22 -0
  22. package/extensions/channels/acp_channel/server.py +236 -0
  23. package/{core → extensions}/event_hub_bench/entry.py +664 -371
  24. package/{core → extensions}/event_hub_bench/module.md +4 -2
  25. package/extensions/services/backup/__init__.py +1 -0
  26. package/extensions/services/backup/entry.py +380 -0
  27. package/extensions/services/backup/module.md +22 -0
  28. package/extensions/services/backup/server.py +244 -0
  29. package/extensions/services/model_service/__init__.py +1 -0
  30. package/extensions/services/model_service/entry.py +380 -0
  31. package/extensions/services/model_service/module.md +22 -0
  32. package/extensions/services/model_service/server.py +236 -0
  33. package/extensions/services/watchdog/entry.py +460 -143
  34. package/extensions/services/watchdog/module.md +3 -0
  35. package/extensions/services/watchdog/monitor.py +128 -13
  36. package/extensions/services/watchdog/server.py +75 -13
  37. package/extensions/services/web/__init__.py +1 -0
  38. package/extensions/services/web/config.yaml +149 -0
  39. package/extensions/services/web/entry.py +487 -0
  40. package/extensions/services/web/module.md +24 -0
  41. package/extensions/services/web/routes/__init__.py +1 -0
  42. package/extensions/services/web/routes/routes_call.py +189 -0
  43. package/extensions/services/web/routes/routes_config.py +512 -0
  44. package/extensions/services/web/routes/routes_contacts.py +98 -0
  45. package/extensions/services/web/routes/routes_devlog.py +99 -0
  46. package/extensions/services/web/routes/routes_phone.py +81 -0
  47. package/extensions/services/web/routes/routes_sms.py +48 -0
  48. package/extensions/services/web/routes/routes_stats.py +17 -0
  49. package/extensions/services/web/routes/routes_voicechat.py +554 -0
  50. package/extensions/services/web/routes/schemas.py +216 -0
  51. package/extensions/services/web/server.py +332 -0
  52. package/extensions/services/web/static/css/style.css +1064 -0
  53. package/extensions/services/web/static/index.html +1445 -0
  54. package/extensions/services/web/static/js/app.js +4671 -0
  55. package/extensions/services/web/vendor/__init__.py +1 -0
  56. package/extensions/services/web/vendor/bluetooth/audio.py +348 -0
  57. package/extensions/services/web/vendor/bluetooth/contacts.py +251 -0
  58. package/extensions/services/web/vendor/bluetooth/manager.py +395 -0
  59. package/extensions/services/web/vendor/bluetooth/sms.py +290 -0
  60. package/extensions/services/web/vendor/bluetooth/telephony.py +274 -0
  61. package/extensions/services/web/vendor/config.py +139 -0
  62. package/extensions/services/web/vendor/conversation/__init__.py +0 -0
  63. package/extensions/services/web/vendor/conversation/asr.py +936 -0
  64. package/extensions/services/web/vendor/conversation/engine.py +548 -0
  65. package/extensions/services/web/vendor/conversation/llm.py +534 -0
  66. package/extensions/services/web/vendor/conversation/mcp_tools.py +190 -0
  67. package/extensions/services/web/vendor/conversation/tts.py +322 -0
  68. package/extensions/services/web/vendor/conversation/vad.py +138 -0
  69. package/extensions/services/web/vendor/storage/__init__.py +1 -0
  70. package/extensions/services/web/vendor/storage/identity.py +312 -0
  71. package/extensions/services/web/vendor/storage/store.py +507 -0
  72. package/extensions/services/web/vendor/task/__init__.py +0 -0
  73. package/extensions/services/web/vendor/task/manager.py +864 -0
  74. package/extensions/services/web/vendor/task/models.py +45 -0
  75. package/extensions/services/web/vendor/task/webhook.py +263 -0
  76. package/extensions/services/web/vendor/tools/__init__.py +0 -0
  77. package/extensions/services/web/vendor/tools/registry.py +321 -0
  78. package/main.py +344 -4
  79. package/package.json +11 -2
  80. package/core/__pycache__/__init__.cpython-313.pyc +0 -0
  81. package/core/__pycache__/data_dir.cpython-313.pyc +0 -0
  82. package/core/data_dir.py +0 -62
  83. package/core/event_hub/__pycache__/__init__.cpython-313.pyc +0 -0
  84. package/core/event_hub/__pycache__/bench.cpython-313.pyc +0 -0
  85. package/core/event_hub/__pycache__/bench_perf.cpython-313.pyc +0 -0
  86. package/core/event_hub/__pycache__/dedup.cpython-313.pyc +0 -0
  87. package/core/event_hub/__pycache__/entry.cpython-313.pyc +0 -0
  88. package/core/event_hub/__pycache__/hub.cpython-313.pyc +0 -0
  89. package/core/event_hub/__pycache__/router.cpython-313.pyc +0 -0
  90. package/core/event_hub/__pycache__/server.cpython-313.pyc +0 -0
  91. package/core/event_hub/bench_results/2026-02-28_13-26-48.json +0 -51
  92. package/core/event_hub/bench_results/2026-02-28_13-44-45.json +0 -51
  93. package/core/event_hub/bench_results/2026-02-28_13-45-39.json +0 -51
  94. package/core/launcher/__pycache__/__init__.cpython-313.pyc +0 -0
  95. package/core/launcher/__pycache__/entry.cpython-313.pyc +0 -0
  96. package/core/launcher/__pycache__/module_scanner.cpython-313.pyc +0 -0
  97. package/core/launcher/__pycache__/process_manager.cpython-313.pyc +0 -0
  98. package/core/launcher/data/log/lifecycle.jsonl +0 -1158
  99. package/core/launcher/data/token.txt +0 -1
  100. package/core/registry/__pycache__/__init__.cpython-313.pyc +0 -0
  101. package/core/registry/__pycache__/entry.cpython-313.pyc +0 -0
  102. package/core/registry/__pycache__/server.cpython-313.pyc +0 -0
  103. package/core/registry/__pycache__/store.cpython-313.pyc +0 -0
  104. package/core/registry/data/port.txt +0 -1
  105. package/core/registry/data/port_484.txt +0 -1
  106. package/extensions/__pycache__/__init__.cpython-313.pyc +0 -0
  107. package/extensions/services/__pycache__/__init__.cpython-313.pyc +0 -0
  108. package/extensions/services/watchdog/__pycache__/__init__.cpython-313.pyc +0 -0
  109. package/extensions/services/watchdog/__pycache__/entry.cpython-313.pyc +0 -0
  110. package/extensions/services/watchdog/__pycache__/monitor.cpython-313.pyc +0 -0
  111. package/extensions/services/watchdog/__pycache__/server.cpython-313.pyc +0 -0
  112. /package/{core/event_hub/bench_results/.gitkeep → extensions/services/web/vendor/bluetooth/__init__.py} +0 -0
@@ -0,0 +1,534 @@
1
+ """LLM (Large Language Model) abstraction with OpenAI / Claude / Gemini support."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import json
6
+ import logging
7
+ from abc import ABC, abstractmethod
8
+ from typing import Any
9
+
10
+ import httpx
11
+
12
+ from .. import config as cfg
13
+
14
+ logger = logging.getLogger(__name__)
15
+
16
+
17
+ # ---------------------------------------------------------------------------
18
+ # Abstract base
19
+ # ---------------------------------------------------------------------------
20
+
21
+ class LLMProvider(ABC):
22
+ """Base class for all LLM providers."""
23
+
24
+ @abstractmethod
25
+ async def generate(
26
+ self,
27
+ messages: list[dict[str, Any]],
28
+ tools: list[dict[str, Any]] | None = None,
29
+ ) -> dict[str, Any]:
30
+ """Generate a response.
31
+
32
+ Returns ``{"content": str, "tool_calls": list | None}``.
33
+ """
34
+ ...
35
+
36
+ @abstractmethod
37
+ async def list_models(self) -> list[dict[str, Any]]:
38
+ """List models available on this provider's endpoint."""
39
+ ...
40
+
41
+
42
+ # ---------------------------------------------------------------------------
43
+ # Helper utilities shared by all providers
44
+ # ---------------------------------------------------------------------------
45
+
46
+ async def should_end_call(provider: LLMProvider, messages: list[dict[str, Any]]) -> bool:
47
+ """Ask the LLM whether the conversation should naturally end now.
48
+
49
+ Appends a lightweight system prompt asking for a yes/no JSON answer.
50
+ """
51
+ check_messages = messages.copy()
52
+ check_messages.append({
53
+ "role": "system",
54
+ "content": (
55
+ "Based on the conversation so far, should this phone call end now? "
56
+ "Answer with ONLY a JSON object: {\"should_end\": true} or {\"should_end\": false}. "
57
+ "The call should end if: the task is complete, the other party said goodbye, "
58
+ "or there is nothing more to discuss."
59
+ ),
60
+ })
61
+ try:
62
+ result = await provider.generate(check_messages)
63
+ text = result.get("content", "")
64
+ # Try to parse JSON from the response
65
+ if "{" in text:
66
+ payload = json.loads(text[text.index("{"):text.rindex("}") + 1])
67
+ return bool(payload.get("should_end", False))
68
+ except Exception:
69
+ logger.debug("should_end_call check failed, defaulting to False")
70
+ return False
71
+
72
+
73
+ async def generate_summary(provider: LLMProvider, messages: list[dict[str, Any]]) -> str:
74
+ """Generate a concise call summary from the conversation history."""
75
+ summary_messages = messages.copy()
76
+ summary_messages.append({
77
+ "role": "system",
78
+ "content": (
79
+ "Please generate a concise summary of this phone call in Chinese. Include:\n"
80
+ "1. The purpose of the call\n"
81
+ "2. Key points discussed\n"
82
+ "3. Outcome / agreements reached\n"
83
+ "4. Any follow-up items\n"
84
+ "Keep it under 300 characters."
85
+ ),
86
+ })
87
+ try:
88
+ result = await provider.generate(summary_messages)
89
+ return result.get("content", "")
90
+ except Exception:
91
+ logger.exception("Failed to generate call summary")
92
+ return ""
93
+
94
+
95
+ # ---------------------------------------------------------------------------
96
+ # OpenAI-compatible provider
97
+ # ---------------------------------------------------------------------------
98
+
99
+ class OpenAILLM(LLMProvider):
100
+ """OpenAI / OpenAI-compatible chat completions API."""
101
+
102
+ def __init__(
103
+ self,
104
+ base_url: str,
105
+ api_key: str,
106
+ model: str,
107
+ temperature: float = 0.7,
108
+ max_tokens: int = 1024,
109
+ ) -> None:
110
+ self.base_url = base_url.rstrip("/")
111
+ self.api_key = api_key
112
+ self.model = model
113
+ self.temperature = temperature
114
+ self.max_tokens = max_tokens
115
+
116
+ async def generate(
117
+ self,
118
+ messages: list[dict[str, Any]],
119
+ tools: list[dict[str, Any]] | None = None,
120
+ ) -> dict[str, Any]:
121
+ url = f"{self.base_url}/chat/completions"
122
+ headers = {
123
+ "Authorization": f"Bearer {self.api_key}",
124
+ "Content-Type": "application/json",
125
+ }
126
+ body: dict[str, Any] = {
127
+ "model": self.model,
128
+ "messages": messages,
129
+ "temperature": self.temperature,
130
+ "max_tokens": self.max_tokens,
131
+ }
132
+ if tools:
133
+ body["tools"] = tools
134
+ body["tool_choice"] = "auto"
135
+
136
+ async with httpx.AsyncClient(timeout=60.0) as client:
137
+ resp = await client.post(url, headers=headers, json=body)
138
+ resp.raise_for_status()
139
+ data = resp.json()
140
+
141
+ choice = data["choices"][0]
142
+ message = choice["message"]
143
+ content = message.get("content", "") or ""
144
+ tool_calls_raw = message.get("tool_calls")
145
+
146
+ tool_calls: list[dict[str, Any]] | None = None
147
+ if tool_calls_raw:
148
+ tool_calls = []
149
+ for tc in tool_calls_raw:
150
+ func = tc.get("function", {})
151
+ arguments = func.get("arguments", "{}")
152
+ if isinstance(arguments, str):
153
+ try:
154
+ arguments = json.loads(arguments)
155
+ except json.JSONDecodeError:
156
+ arguments = {}
157
+ tool_calls.append({
158
+ "id": tc.get("id", ""),
159
+ "name": func.get("name", ""),
160
+ "arguments": arguments,
161
+ })
162
+
163
+ return {
164
+ "content": content,
165
+ "tool_calls": tool_calls,
166
+ "raw_tool_calls": tool_calls_raw,
167
+ }
168
+
169
+ async def list_models(self) -> list[dict[str, Any]]:
170
+ url = f"{self.base_url}/models"
171
+ headers = {"Authorization": f"Bearer {self.api_key}"}
172
+ async with httpx.AsyncClient(timeout=30.0) as client:
173
+ resp = await client.get(url, headers=headers)
174
+ resp.raise_for_status()
175
+ data = resp.json()
176
+ return data.get("data", [])
177
+
178
+
179
+ # ---------------------------------------------------------------------------
180
+ # Anthropic Claude provider
181
+ # ---------------------------------------------------------------------------
182
+
183
+ class ClaudeLLM(LLMProvider):
184
+ """Anthropic Claude Messages API."""
185
+
186
+ ANTHROPIC_VERSION = "2023-06-01"
187
+
188
+ def __init__(
189
+ self,
190
+ base_url: str,
191
+ api_key: str,
192
+ model: str,
193
+ temperature: float = 0.7,
194
+ max_tokens: int = 1024,
195
+ ) -> None:
196
+ self.base_url = base_url.rstrip("/")
197
+ self.api_key = api_key
198
+ self.model = model
199
+ self.temperature = temperature
200
+ self.max_tokens = max_tokens
201
+
202
+ async def generate(
203
+ self,
204
+ messages: list[dict[str, Any]],
205
+ tools: list[dict[str, Any]] | None = None,
206
+ ) -> dict[str, Any]:
207
+ url = f"{self.base_url}/messages"
208
+ headers = {
209
+ "x-api-key": self.api_key,
210
+ "anthropic-version": self.ANTHROPIC_VERSION,
211
+ "Content-Type": "application/json",
212
+ }
213
+
214
+ # Convert OpenAI-style messages to Claude format
215
+ system_text, claude_messages = self._convert_messages(messages)
216
+
217
+ body: dict[str, Any] = {
218
+ "model": self.model,
219
+ "messages": claude_messages,
220
+ "temperature": self.temperature,
221
+ "max_tokens": self.max_tokens,
222
+ }
223
+ if system_text:
224
+ body["system"] = system_text
225
+ if tools:
226
+ body["tools"] = self._convert_tools(tools)
227
+
228
+ async with httpx.AsyncClient(timeout=60.0) as client:
229
+ resp = await client.post(url, headers=headers, json=body)
230
+ resp.raise_for_status()
231
+ data = resp.json()
232
+
233
+ # Parse Claude response content blocks
234
+ content_text = ""
235
+ tool_calls: list[dict[str, Any]] | None = None
236
+
237
+ for block in data.get("content", []):
238
+ if block["type"] == "text":
239
+ content_text += block.get("text", "")
240
+ elif block["type"] == "tool_use":
241
+ if tool_calls is None:
242
+ tool_calls = []
243
+ tool_calls.append({
244
+ "id": block.get("id", ""),
245
+ "name": block.get("name", ""),
246
+ "arguments": block.get("input", {}),
247
+ })
248
+
249
+ return {"content": content_text, "tool_calls": tool_calls}
250
+
251
+ async def list_models(self) -> list[dict[str, Any]]:
252
+ url = f"{self.base_url}/models"
253
+ headers = {
254
+ "x-api-key": self.api_key,
255
+ "anthropic-version": self.ANTHROPIC_VERSION,
256
+ }
257
+ async with httpx.AsyncClient(timeout=30.0) as client:
258
+ resp = await client.get(url, headers=headers)
259
+ resp.raise_for_status()
260
+ data = resp.json()
261
+ return data.get("data", [])
262
+
263
+ # -- format converters --
264
+
265
+ @staticmethod
266
+ def _convert_messages(
267
+ messages: list[dict[str, Any]],
268
+ ) -> tuple[str, list[dict[str, Any]]]:
269
+ """Convert OpenAI-style messages to Claude format.
270
+
271
+ Returns (system_text, claude_messages).
272
+ """
273
+ system_parts: list[str] = []
274
+ claude_msgs: list[dict[str, Any]] = []
275
+
276
+ for msg in messages:
277
+ role = msg.get("role", "")
278
+ content = msg.get("content", "")
279
+
280
+ if role == "system":
281
+ system_parts.append(content)
282
+ elif role == "assistant":
283
+ # Check if this is a tool-call result message (has tool_calls)
284
+ if msg.get("tool_calls"):
285
+ # Build content blocks for tool use
286
+ blocks: list[dict[str, Any]] = []
287
+ if content:
288
+ blocks.append({"type": "text", "text": content})
289
+ for tc in msg["tool_calls"]:
290
+ blocks.append({
291
+ "type": "tool_use",
292
+ "id": tc.get("id", ""),
293
+ "name": tc.get("name", ""),
294
+ "input": tc.get("arguments", {}),
295
+ })
296
+ claude_msgs.append({"role": "assistant", "content": blocks})
297
+ else:
298
+ claude_msgs.append({"role": "assistant", "content": content})
299
+ elif role == "tool":
300
+ # Tool result -- Claude uses role "user" with tool_result content blocks
301
+ claude_msgs.append({
302
+ "role": "user",
303
+ "content": [{
304
+ "type": "tool_result",
305
+ "tool_use_id": msg.get("tool_call_id", ""),
306
+ "content": content,
307
+ }],
308
+ })
309
+ elif role == "user":
310
+ claude_msgs.append({"role": "user", "content": content})
311
+
312
+ system_text = "\n\n".join(system_parts)
313
+ return system_text, claude_msgs
314
+
315
+ @staticmethod
316
+ def _convert_tools(tools: list[dict[str, Any]]) -> list[dict[str, Any]]:
317
+ """Convert OpenAI-style tool definitions to Claude format.
318
+
319
+ OpenAI: {"type": "function", "function": {"name": ..., "description": ..., "parameters": ...}}
320
+ Claude: {"name": ..., "description": ..., "input_schema": ...}
321
+ """
322
+ claude_tools: list[dict[str, Any]] = []
323
+ for tool in tools:
324
+ if tool.get("type") == "function":
325
+ func = tool["function"]
326
+ claude_tools.append({
327
+ "name": func["name"],
328
+ "description": func.get("description", ""),
329
+ "input_schema": func.get("parameters", {"type": "object", "properties": {}}),
330
+ })
331
+ else:
332
+ # Assume already in a compatible format
333
+ claude_tools.append(tool)
334
+ return claude_tools
335
+
336
+
337
+ # ---------------------------------------------------------------------------
338
+ # Google Gemini provider
339
+ # ---------------------------------------------------------------------------
340
+
341
+ class GeminiLLM(LLMProvider):
342
+ """Google Gemini (Generative Language API)."""
343
+
344
+ def __init__(
345
+ self,
346
+ base_url: str,
347
+ api_key: str,
348
+ model: str,
349
+ temperature: float = 0.7,
350
+ max_tokens: int = 1024,
351
+ ) -> None:
352
+ self.base_url = base_url.rstrip("/")
353
+ self.api_key = api_key
354
+ self.model = model
355
+ self.temperature = temperature
356
+ self.max_tokens = max_tokens
357
+
358
+ async def generate(
359
+ self,
360
+ messages: list[dict[str, Any]],
361
+ tools: list[dict[str, Any]] | None = None,
362
+ ) -> dict[str, Any]:
363
+ url = f"{self.base_url}/models/{self.model}:generateContent?key={self.api_key}"
364
+ headers = {"Content-Type": "application/json"}
365
+
366
+ system_instruction, contents = self._convert_messages(messages)
367
+
368
+ body: dict[str, Any] = {
369
+ "contents": contents,
370
+ "generationConfig": {
371
+ "temperature": self.temperature,
372
+ "maxOutputTokens": self.max_tokens,
373
+ },
374
+ }
375
+ if system_instruction:
376
+ body["systemInstruction"] = {"parts": [{"text": system_instruction}]}
377
+ if tools:
378
+ body["tools"] = self._convert_tools(tools)
379
+
380
+ async with httpx.AsyncClient(timeout=60.0) as client:
381
+ resp = await client.post(url, headers=headers, json=body)
382
+ resp.raise_for_status()
383
+ data = resp.json()
384
+
385
+ # Parse Gemini response
386
+ content_text = ""
387
+ tool_calls: list[dict[str, Any]] | None = None
388
+
389
+ candidates = data.get("candidates", [])
390
+ if candidates:
391
+ parts = candidates[0].get("content", {}).get("parts", [])
392
+ for part in parts:
393
+ if "text" in part:
394
+ content_text += part["text"]
395
+ elif "functionCall" in part:
396
+ if tool_calls is None:
397
+ tool_calls = []
398
+ fc = part["functionCall"]
399
+ tool_calls.append({
400
+ "id": f"gemini_{fc.get('name', '')}",
401
+ "name": fc.get("name", ""),
402
+ "arguments": fc.get("args", {}),
403
+ })
404
+
405
+ return {"content": content_text, "tool_calls": tool_calls}
406
+
407
+ async def list_models(self) -> list[dict[str, Any]]:
408
+ url = f"{self.base_url}/models?key={self.api_key}"
409
+ async with httpx.AsyncClient(timeout=30.0) as client:
410
+ resp = await client.get(url)
411
+ resp.raise_for_status()
412
+ data = resp.json()
413
+ return data.get("models", [])
414
+
415
+ # -- format converters --
416
+
417
+ @staticmethod
418
+ def _convert_messages(
419
+ messages: list[dict[str, Any]],
420
+ ) -> tuple[str, list[dict[str, Any]]]:
421
+ """Convert OpenAI-style messages to Gemini format.
422
+
423
+ Returns (system_instruction_text, contents).
424
+ """
425
+ system_parts: list[str] = []
426
+ contents: list[dict[str, Any]] = []
427
+
428
+ for msg in messages:
429
+ role = msg.get("role", "")
430
+ content = msg.get("content", "")
431
+
432
+ if role == "system":
433
+ system_parts.append(content)
434
+ elif role == "user":
435
+ contents.append({"role": "user", "parts": [{"text": content}]})
436
+ elif role == "assistant":
437
+ parts: list[dict[str, Any]] = []
438
+ if content:
439
+ parts.append({"text": content})
440
+ # Include tool calls as functionCall parts
441
+ if msg.get("tool_calls"):
442
+ for tc in msg["tool_calls"]:
443
+ parts.append({
444
+ "functionCall": {
445
+ "name": tc.get("name", ""),
446
+ "args": tc.get("arguments", {}),
447
+ }
448
+ })
449
+ if parts:
450
+ contents.append({"role": "model", "parts": parts})
451
+ elif role == "tool":
452
+ # Tool result -- Gemini uses functionResponse
453
+ contents.append({
454
+ "role": "function",
455
+ "parts": [{
456
+ "functionResponse": {
457
+ "name": msg.get("name", ""),
458
+ "response": {"result": content},
459
+ }
460
+ }],
461
+ })
462
+
463
+ system_text = "\n\n".join(system_parts)
464
+ return system_text, contents
465
+
466
+ @staticmethod
467
+ def _convert_tools(tools: list[dict[str, Any]]) -> list[dict[str, Any]]:
468
+ """Convert OpenAI-style tool definitions to Gemini format.
469
+
470
+ Gemini expects: [{"function_declarations": [{"name":..., "description":..., "parameters":...}]}]
471
+ """
472
+ declarations: list[dict[str, Any]] = []
473
+ for tool in tools:
474
+ if tool.get("type") == "function":
475
+ func = tool["function"]
476
+ declarations.append({
477
+ "name": func["name"],
478
+ "description": func.get("description", ""),
479
+ "parameters": func.get("parameters", {"type": "object", "properties": {}}),
480
+ })
481
+ else:
482
+ # Assume it has name/description/parameters directly
483
+ declarations.append({
484
+ "name": tool.get("name", ""),
485
+ "description": tool.get("description", ""),
486
+ "parameters": tool.get("parameters", {"type": "object", "properties": {}}),
487
+ })
488
+ return [{"function_declarations": declarations}]
489
+
490
+
491
+ # ---------------------------------------------------------------------------
492
+ # Factory
493
+ # ---------------------------------------------------------------------------
494
+
495
+ def create_llm_provider() -> LLMProvider:
496
+ """Create an LLM provider instance based on the active provider configuration."""
497
+ active = cfg.get("llm.active_provider", "openai")
498
+ provider_cfg = cfg.get(f"llm.providers.{active}", {})
499
+
500
+ if not provider_cfg:
501
+ raise ValueError(f"No configuration found for LLM provider: {active}")
502
+
503
+ base_url = provider_cfg.get("base_url", "")
504
+ api_key = provider_cfg.get("api_key", "")
505
+ model = provider_cfg.get("model", "")
506
+ temperature = provider_cfg.get("temperature", 0.7)
507
+ max_tokens = provider_cfg.get("max_tokens", 1024)
508
+
509
+ if active == "openai":
510
+ return OpenAILLM(
511
+ base_url=base_url,
512
+ api_key=api_key,
513
+ model=model,
514
+ temperature=temperature,
515
+ max_tokens=max_tokens,
516
+ )
517
+ elif active == "claude":
518
+ return ClaudeLLM(
519
+ base_url=base_url,
520
+ api_key=api_key,
521
+ model=model,
522
+ temperature=temperature,
523
+ max_tokens=max_tokens,
524
+ )
525
+ elif active == "gemini":
526
+ return GeminiLLM(
527
+ base_url=base_url,
528
+ api_key=api_key,
529
+ model=model,
530
+ temperature=temperature,
531
+ max_tokens=max_tokens,
532
+ )
533
+ else:
534
+ raise ValueError(f"Unknown LLM provider: {active}")