kairo-code 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (144) hide show
  1. image-service/main.py +178 -0
  2. infra/chat/app/main.py +84 -0
  3. kairo/backend/__init__.py +0 -0
  4. kairo/backend/api/__init__.py +0 -0
  5. kairo/backend/api/admin/__init__.py +23 -0
  6. kairo/backend/api/admin/audit.py +54 -0
  7. kairo/backend/api/admin/content.py +142 -0
  8. kairo/backend/api/admin/incidents.py +148 -0
  9. kairo/backend/api/admin/stats.py +125 -0
  10. kairo/backend/api/admin/system.py +87 -0
  11. kairo/backend/api/admin/users.py +279 -0
  12. kairo/backend/api/agents.py +94 -0
  13. kairo/backend/api/api_keys.py +85 -0
  14. kairo/backend/api/auth.py +116 -0
  15. kairo/backend/api/billing.py +41 -0
  16. kairo/backend/api/chat.py +72 -0
  17. kairo/backend/api/conversations.py +125 -0
  18. kairo/backend/api/device_auth.py +100 -0
  19. kairo/backend/api/files.py +83 -0
  20. kairo/backend/api/health.py +36 -0
  21. kairo/backend/api/images.py +80 -0
  22. kairo/backend/api/openai_compat.py +225 -0
  23. kairo/backend/api/projects.py +102 -0
  24. kairo/backend/api/usage.py +32 -0
  25. kairo/backend/api/webhooks.py +79 -0
  26. kairo/backend/app.py +297 -0
  27. kairo/backend/config.py +179 -0
  28. kairo/backend/core/__init__.py +0 -0
  29. kairo/backend/core/admin_auth.py +24 -0
  30. kairo/backend/core/api_key_auth.py +55 -0
  31. kairo/backend/core/database.py +28 -0
  32. kairo/backend/core/dependencies.py +70 -0
  33. kairo/backend/core/logging.py +23 -0
  34. kairo/backend/core/rate_limit.py +73 -0
  35. kairo/backend/core/security.py +29 -0
  36. kairo/backend/models/__init__.py +19 -0
  37. kairo/backend/models/agent.py +30 -0
  38. kairo/backend/models/api_key.py +25 -0
  39. kairo/backend/models/api_usage.py +29 -0
  40. kairo/backend/models/audit_log.py +26 -0
  41. kairo/backend/models/conversation.py +48 -0
  42. kairo/backend/models/device_code.py +30 -0
  43. kairo/backend/models/feature_flag.py +21 -0
  44. kairo/backend/models/image_generation.py +24 -0
  45. kairo/backend/models/incident.py +28 -0
  46. kairo/backend/models/project.py +28 -0
  47. kairo/backend/models/uptime_record.py +24 -0
  48. kairo/backend/models/usage.py +24 -0
  49. kairo/backend/models/user.py +49 -0
  50. kairo/backend/schemas/__init__.py +0 -0
  51. kairo/backend/schemas/admin/__init__.py +0 -0
  52. kairo/backend/schemas/admin/audit.py +28 -0
  53. kairo/backend/schemas/admin/content.py +53 -0
  54. kairo/backend/schemas/admin/stats.py +77 -0
  55. kairo/backend/schemas/admin/system.py +44 -0
  56. kairo/backend/schemas/admin/users.py +48 -0
  57. kairo/backend/schemas/agent.py +42 -0
  58. kairo/backend/schemas/api_key.py +30 -0
  59. kairo/backend/schemas/auth.py +57 -0
  60. kairo/backend/schemas/chat.py +26 -0
  61. kairo/backend/schemas/conversation.py +39 -0
  62. kairo/backend/schemas/device_auth.py +40 -0
  63. kairo/backend/schemas/image.py +15 -0
  64. kairo/backend/schemas/openai_compat.py +76 -0
  65. kairo/backend/schemas/project.py +21 -0
  66. kairo/backend/schemas/status.py +81 -0
  67. kairo/backend/schemas/usage.py +15 -0
  68. kairo/backend/services/__init__.py +0 -0
  69. kairo/backend/services/admin/__init__.py +0 -0
  70. kairo/backend/services/admin/audit_service.py +78 -0
  71. kairo/backend/services/admin/content_service.py +119 -0
  72. kairo/backend/services/admin/incident_service.py +94 -0
  73. kairo/backend/services/admin/stats_service.py +281 -0
  74. kairo/backend/services/admin/system_service.py +126 -0
  75. kairo/backend/services/admin/user_service.py +157 -0
  76. kairo/backend/services/agent_service.py +107 -0
  77. kairo/backend/services/api_key_service.py +66 -0
  78. kairo/backend/services/api_usage_service.py +126 -0
  79. kairo/backend/services/auth_service.py +101 -0
  80. kairo/backend/services/chat_service.py +501 -0
  81. kairo/backend/services/conversation_service.py +264 -0
  82. kairo/backend/services/device_auth_service.py +193 -0
  83. kairo/backend/services/email_service.py +55 -0
  84. kairo/backend/services/image_service.py +181 -0
  85. kairo/backend/services/llm_service.py +186 -0
  86. kairo/backend/services/project_service.py +109 -0
  87. kairo/backend/services/status_service.py +167 -0
  88. kairo/backend/services/stripe_service.py +78 -0
  89. kairo/backend/services/usage_service.py +150 -0
  90. kairo/backend/services/web_search_service.py +96 -0
  91. kairo/migrations/env.py +60 -0
  92. kairo/migrations/versions/001_initial.py +55 -0
  93. kairo/migrations/versions/002_usage_tracking_and_indexes.py +66 -0
  94. kairo/migrations/versions/003_username_to_email.py +21 -0
  95. kairo/migrations/versions/004_add_plans_and_verification.py +67 -0
  96. kairo/migrations/versions/005_add_projects.py +52 -0
  97. kairo/migrations/versions/006_add_image_generation.py +63 -0
  98. kairo/migrations/versions/007_add_admin_portal.py +107 -0
  99. kairo/migrations/versions/008_add_device_code_auth.py +76 -0
  100. kairo/migrations/versions/009_add_status_page.py +65 -0
  101. kairo/tools/extract_claude_data.py +465 -0
  102. kairo/tools/filter_claude_data.py +303 -0
  103. kairo/tools/generate_curated_data.py +157 -0
  104. kairo/tools/mix_training_data.py +295 -0
  105. kairo_code/__init__.py +3 -0
  106. kairo_code/agents/__init__.py +25 -0
  107. kairo_code/agents/architect.py +98 -0
  108. kairo_code/agents/audit.py +100 -0
  109. kairo_code/agents/base.py +463 -0
  110. kairo_code/agents/coder.py +155 -0
  111. kairo_code/agents/database.py +77 -0
  112. kairo_code/agents/docs.py +88 -0
  113. kairo_code/agents/explorer.py +62 -0
  114. kairo_code/agents/guardian.py +80 -0
  115. kairo_code/agents/planner.py +66 -0
  116. kairo_code/agents/reviewer.py +91 -0
  117. kairo_code/agents/security.py +94 -0
  118. kairo_code/agents/terraform.py +88 -0
  119. kairo_code/agents/testing.py +97 -0
  120. kairo_code/agents/uiux.py +88 -0
  121. kairo_code/auth.py +232 -0
  122. kairo_code/config.py +172 -0
  123. kairo_code/conversation.py +173 -0
  124. kairo_code/heartbeat.py +63 -0
  125. kairo_code/llm.py +291 -0
  126. kairo_code/logging_config.py +156 -0
  127. kairo_code/main.py +818 -0
  128. kairo_code/router.py +217 -0
  129. kairo_code/sandbox.py +248 -0
  130. kairo_code/settings.py +183 -0
  131. kairo_code/tools/__init__.py +51 -0
  132. kairo_code/tools/analysis.py +509 -0
  133. kairo_code/tools/base.py +417 -0
  134. kairo_code/tools/code.py +58 -0
  135. kairo_code/tools/definitions.py +617 -0
  136. kairo_code/tools/files.py +315 -0
  137. kairo_code/tools/review.py +390 -0
  138. kairo_code/tools/search.py +185 -0
  139. kairo_code/ui.py +418 -0
  140. kairo_code-0.1.0.dist-info/METADATA +13 -0
  141. kairo_code-0.1.0.dist-info/RECORD +144 -0
  142. kairo_code-0.1.0.dist-info/WHEEL +5 -0
  143. kairo_code-0.1.0.dist-info/entry_points.txt +2 -0
  144. kairo_code-0.1.0.dist-info/top_level.txt +4 -0
@@ -0,0 +1,63 @@
1
+ """Agent heartbeat for Kairo Code CLI.
2
+
3
+ Sends periodic heartbeat to the Kairo backend to register this CLI
4
+ instance as an active agent. Requires a valid API key and agent ID.
5
+ """
6
+
7
+ import threading
8
+ import time
9
+ import logging
10
+
11
+ import httpx
12
+
13
+ logger = logging.getLogger(__name__)
14
+
15
+ HEARTBEAT_INTERVAL = 30 # seconds
16
+ BACKEND_URL = "https://app.kaironlabs.io"
17
+
18
+
19
+ class Heartbeat:
20
+ """Background heartbeat sender."""
21
+
22
+ def __init__(self, api_key: str, agent_id: str):
23
+ self.api_key = api_key
24
+ self.agent_id = agent_id
25
+ self._stop = threading.Event()
26
+ self._thread: threading.Thread | None = None
27
+
28
+ def start(self) -> None:
29
+ """Start the heartbeat background thread."""
30
+ if not self.api_key or not self.agent_id:
31
+ return
32
+
33
+ self._thread = threading.Thread(target=self._loop, daemon=True)
34
+ self._thread.start()
35
+ logger.info("Heartbeat started for agent %s", self.agent_id)
36
+
37
+ def stop(self) -> None:
38
+ """Stop the heartbeat."""
39
+ self._stop.set()
40
+ if self._thread:
41
+ self._thread.join(timeout=5)
42
+ logger.info("Heartbeat stopped")
43
+
44
+ def _loop(self) -> None:
45
+ """Heartbeat loop running in background thread."""
46
+ while not self._stop.is_set():
47
+ self._stop.wait(HEARTBEAT_INTERVAL)
48
+ if self._stop.is_set():
49
+ break
50
+
51
+ try:
52
+ resp = httpx.post(
53
+ f"{BACKEND_URL}/agents/heartbeat",
54
+ json={"agent_id": self.agent_id, "status": "online"},
55
+ headers={"Authorization": f"Bearer {self.api_key}"},
56
+ timeout=10,
57
+ )
58
+ if resp.status_code == 200:
59
+ logger.debug("Heartbeat OK for agent %s", self.agent_id)
60
+ else:
61
+ logger.warning("Heartbeat returned %s", resp.status_code)
62
+ except Exception as e:
63
+ logger.warning("Heartbeat failed: %s", e)
kairo_code/llm.py ADDED
@@ -0,0 +1,291 @@
1
+ """Cloud LLM wrapper with streaming support via OpenAI-compatible API (vLLM)"""
2
+
3
+ import time
4
+ from typing import Generator, Any
5
+
6
+ from openai import OpenAI
7
+
8
+ from .config import Config
9
+ from .logging_config import get_llm_logger, log_model_selection
10
+
11
+ logger = get_llm_logger()
12
+
13
+
14
+ class LLM:
15
+ """Wrapper around OpenAI-compatible API (vLLM) with streaming and model switching."""
16
+
17
+ def __init__(self):
18
+ self.config = Config()
19
+ self.client = OpenAI(
20
+ base_url=self.config.cloud_endpoint,
21
+ api_key=self.config.cloud_api_key,
22
+ )
23
+ self._available_models: list[str] | None = None
24
+ # Don't cache resolved models - always re-resolve from config
25
+ self._resolved_models: dict[str, str] = {}
26
+
27
+ def check_health(self) -> bool:
28
+ """Check if the vLLM backend is reachable."""
29
+ import httpx
30
+ try:
31
+ base = self.config.cloud_endpoint.rstrip("/v1").rstrip("/")
32
+ resp = httpx.get(f"{base}/health", timeout=5.0)
33
+ return resp.status_code == 200
34
+ except Exception:
35
+ return False
36
+
37
+ def clear_cache(self):
38
+ """Clear model cache to force re-resolution."""
39
+ self._available_models = None
40
+ self._resolved_models = {}
41
+
42
+ def _get_available_models(self) -> list[str]:
43
+ """Get list of available models (cached)."""
44
+ if self._available_models is None:
45
+ self._available_models = self.list_models()
46
+ return self._available_models
47
+
48
+ def _model_matches(self, available: str, wanted: str) -> bool:
49
+ """Check if an available model matches a wanted model name."""
50
+ # Exact match
51
+ if available == wanted:
52
+ return True
53
+ # Partial match (e.g., "deepseek-coder-33b" matches "deepseek")
54
+ if available.startswith(wanted) or wanted in available:
55
+ return True
56
+ return False
57
+
58
+ def select_model(self, category: str) -> str:
59
+ """
60
+ Select the best available model for a category.
61
+
62
+ Args:
63
+ category: One of 'coder', 'router', 'chat'
64
+
65
+ Returns:
66
+ The best available model name
67
+ """
68
+ logger.debug(f"Selecting model for category: {category}")
69
+
70
+ # Check cache first
71
+ if category in self._resolved_models:
72
+ logger.debug(f"Using cached model: {self._resolved_models[category]}")
73
+ return self._resolved_models[category]
74
+
75
+ # Get configured model for this category
76
+ configured = self.config.get(f"models.{category}", "auto")
77
+ logger.debug(f"Configured model: {configured}")
78
+
79
+ # If not "auto", use the configured model directly
80
+ if configured != "auto":
81
+ self._resolved_models[category] = configured
82
+ logger.info(f"Model selection: {category} -> {configured} (explicit)")
83
+ return configured
84
+
85
+ # Auto-select from preferences
86
+ preferences = self.config.get(f"models.preferences.{category}", [])
87
+ available = self._get_available_models()
88
+ logger.debug(f"Available models: {available}")
89
+ logger.debug(f"Preferences for {category}: {preferences}")
90
+
91
+ for preferred in preferences:
92
+ for avail in available:
93
+ if self._model_matches(avail, preferred):
94
+ self._resolved_models[category] = avail
95
+ logger.info(f"Model selection: {category} -> {avail} (matched {preferred})")
96
+ log_model_selection(category, avail, available)
97
+ return avail
98
+
99
+ # Fallback: use any available model
100
+ if available:
101
+ fallback = available[0]
102
+ self._resolved_models[category] = fallback
103
+ logger.warning(f"Model selection: {category} -> {fallback} (fallback)")
104
+ return fallback
105
+
106
+ # Ultimate fallback: use default model from config
107
+ default_model = self.config.get("cloud.default_model", "deepseek-ai/deepseek-coder-33b-instruct")
108
+ self._resolved_models[category] = default_model
109
+ logger.warning(f"Model selection: {category} -> {default_model} (default)")
110
+ return default_model
111
+
112
+ def generate(
113
+ self,
114
+ prompt: str,
115
+ model: str | None = None,
116
+ system: str | None = None,
117
+ stream: bool = True,
118
+ ) -> Generator[str, None, None] | str:
119
+ """
120
+ Generate a response from the LLM.
121
+
122
+ Args:
123
+ prompt: The user prompt
124
+ model: Model to use (defaults to coder model)
125
+ system: System prompt (defaults to config system prompt)
126
+ stream: Whether to stream the response
127
+
128
+ Returns:
129
+ Generator of tokens if streaming, otherwise full response string
130
+ """
131
+ model = model or self.select_model("coder")
132
+ system = system or self.config.system_prompt
133
+
134
+ # Convert to chat format for OpenAI API
135
+ messages = []
136
+ if system:
137
+ messages.append({"role": "system", "content": system})
138
+ messages.append({"role": "user", "content": prompt})
139
+
140
+ if stream:
141
+ return self._stream_chat(messages, model)
142
+ else:
143
+ response = self.client.chat.completions.create(
144
+ model=model,
145
+ messages=messages,
146
+ max_tokens=self.config.get("cloud.max_tokens", 4096),
147
+ temperature=self.config.get("cloud.temperature", 0.1),
148
+ )
149
+ return response.choices[0].message.content
150
+
151
+ def _stream_generate(
152
+ self,
153
+ prompt: str,
154
+ model: str,
155
+ system: str,
156
+ ) -> Generator[str, None, None]:
157
+ """Stream tokens from the LLM."""
158
+ messages = []
159
+ if system:
160
+ messages.append({"role": "system", "content": system})
161
+ messages.append({"role": "user", "content": prompt})
162
+
163
+ return self._stream_chat(messages, model)
164
+
165
+ def chat(
166
+ self,
167
+ messages: list[dict[str, str]],
168
+ model: str | None = None,
169
+ stream: bool = True,
170
+ ) -> Generator[str, None, None] | str:
171
+ """
172
+ Chat with conversation history.
173
+
174
+ Args:
175
+ messages: List of message dicts with 'role' and 'content'
176
+ model: Model to use (defaults to coder model)
177
+ stream: Whether to stream the response
178
+
179
+ Returns:
180
+ Generator of tokens if streaming, otherwise full response string
181
+ """
182
+ model = model or self.select_model("coder")
183
+
184
+ if stream:
185
+ return self._stream_chat(messages, model)
186
+ else:
187
+ response = self.client.chat.completions.create(
188
+ model=model,
189
+ messages=messages,
190
+ max_tokens=self.config.get("cloud.max_tokens", 4096),
191
+ temperature=self.config.get("cloud.temperature", 0.1),
192
+ )
193
+ return response.choices[0].message.content
194
+
195
+ def _stream_chat(
196
+ self,
197
+ messages: list[dict[str, str]],
198
+ model: str,
199
+ ) -> Generator[str, None, None]:
200
+ """Stream chat response."""
201
+ logger.info(f"Starting streaming chat with model: {model}")
202
+ logger.debug(f"Message count: {len(messages)}")
203
+ if messages:
204
+ last_msg = messages[-1]
205
+ logger.debug(f"Last message ({last_msg.get('role')}): {last_msg.get('content', '')[:200]}...")
206
+
207
+ start_time = time.time()
208
+ token_count = 0
209
+
210
+ try:
211
+ logger.debug("Calling cloud API...")
212
+ stream = self.client.chat.completions.create(
213
+ model=model,
214
+ messages=messages,
215
+ max_tokens=self.config.get("cloud.max_tokens", 4096),
216
+ temperature=self.config.get("cloud.temperature", 0.1),
217
+ stream=True,
218
+ )
219
+
220
+ logger.debug("Stream started, receiving tokens...")
221
+ for chunk in stream:
222
+ if chunk.choices and chunk.choices[0].delta.content:
223
+ token_count += 1
224
+ yield chunk.choices[0].delta.content
225
+
226
+ elapsed = time.time() - start_time
227
+ logger.info(f"Chat completed: {token_count} chunks in {elapsed:.2f}s")
228
+
229
+ except Exception as e:
230
+ elapsed = time.time() - start_time
231
+ logger.error(f"Chat failed after {elapsed:.2f}s: {e}")
232
+ raise
233
+
234
+ def classify(self, prompt: str, categories: list[str] | None = None) -> str:
235
+ """
236
+ Use the router model to classify intent.
237
+
238
+ Args:
239
+ prompt: The user's input to classify
240
+ categories: Optional list of valid categories
241
+
242
+ Returns:
243
+ The classified intent category
244
+ """
245
+ logger.info(f"Classifying intent for: {prompt[:100]}...")
246
+ system = self.config.router_prompt
247
+ router_model = self.select_model("router")
248
+
249
+ start_time = time.time()
250
+ logger.debug(f"Calling router model: {router_model}")
251
+
252
+ messages = [
253
+ {"role": "system", "content": system},
254
+ {"role": "user", "content": prompt},
255
+ ]
256
+
257
+ response = self.client.chat.completions.create(
258
+ model=router_model,
259
+ messages=messages,
260
+ max_tokens=50,
261
+ temperature=0.0,
262
+ )
263
+
264
+ elapsed = time.time() - start_time
265
+ result = response.choices[0].message.content.strip().lower()
266
+ logger.info(f"Classification result: '{result}' (took {elapsed:.2f}s)")
267
+
268
+ # Validate against categories if provided
269
+ if categories:
270
+ for cat in categories:
271
+ if cat.lower() in result:
272
+ return cat.lower()
273
+ # Default to chat if no match
274
+ return "chat"
275
+
276
+ return result
277
+
278
+ def list_models(self) -> list[str]:
279
+ """List available models from the cloud endpoint."""
280
+ try:
281
+ result = self.client.models.list()
282
+ return [m.id for m in result.data]
283
+ except Exception as e:
284
+ logger.warning(f"Failed to list models: {e}")
285
+ # Return default model if listing fails
286
+ return [self.config.get("cloud.default_model", "deepseek-ai/deepseek-coder-33b-instruct")]
287
+
288
+ def check_model(self, model: str) -> bool:
289
+ """Check if a model is available."""
290
+ available = self.list_models()
291
+ return any(model in m for m in available)
@@ -0,0 +1,156 @@
1
+ """Logging configuration for Kairo Code"""
2
+
3
+ import logging
4
+ import os
5
+ import stat
6
+ import sys
7
+ from datetime import datetime
8
+ from pathlib import Path
9
+ from typing import Any, Optional
10
+
11
+
12
+ # Log directory
13
+ LOG_DIR = Path.home() / ".kairo_code" / "logs"
14
+
15
+
16
+ def setup_logging(
17
+ level: int = logging.DEBUG,
18
+ console_level: int = logging.WARNING,
19
+ log_file: Optional[Path] = None,
20
+ ) -> logging.Logger:
21
+ """
22
+ Setup logging for Kairo Code.
23
+
24
+ Args:
25
+ level: File logging level (default DEBUG for full detail)
26
+ console_level: Console logging level (default WARNING to not clutter output)
27
+ log_file: Optional custom log file path
28
+
29
+ Returns:
30
+ Configured logger instance
31
+ """
32
+ # Create log directory with secure permissions
33
+ LOG_DIR.mkdir(parents=True, exist_ok=True)
34
+ try:
35
+ os.chmod(LOG_DIR, stat.S_IRWXU) # 0700 — owner only
36
+ except OSError:
37
+ pass
38
+
39
+ # Create session log file with timestamp
40
+ if log_file is None:
41
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
42
+ log_file = LOG_DIR / f"kairo_{timestamp}.log"
43
+
44
+ # Get or create logger
45
+ logger = logging.getLogger("kairo_code")
46
+ logger.setLevel(logging.DEBUG) # Capture everything, handlers filter
47
+
48
+ # Clear existing handlers
49
+ logger.handlers.clear()
50
+
51
+ # File handler - detailed logging
52
+ file_handler = logging.FileHandler(log_file, encoding="utf-8")
53
+ file_handler.setLevel(level)
54
+ file_formatter = logging.Formatter(
55
+ "%(asctime)s | %(levelname)-8s | %(name)s.%(funcName)s:%(lineno)d | %(message)s",
56
+ datefmt="%Y-%m-%d %H:%M:%S"
57
+ )
58
+ file_handler.setFormatter(file_formatter)
59
+ logger.addHandler(file_handler)
60
+
61
+ # Console handler - only warnings/errors (to not clutter UI)
62
+ console_handler = logging.StreamHandler(sys.stderr)
63
+ console_handler.setLevel(console_level)
64
+ console_formatter = logging.Formatter("[%(levelname)s] %(message)s")
65
+ console_handler.setFormatter(console_formatter)
66
+ logger.addHandler(console_handler)
67
+
68
+ # Store log file path for reference
69
+ logger.log_file = log_file
70
+
71
+ logger.info(f"=== Kairo Code Session Started ===")
72
+ logger.info(f"Log file: {log_file}")
73
+
74
+ return logger
75
+
76
+
77
+ def get_logger(name: str = "kairo_code") -> logging.Logger:
78
+ """Get a logger instance (child of main kairo_code logger)."""
79
+ return logging.getLogger(name)
80
+
81
+
82
+ # Convenience loggers for different components
83
+ def get_llm_logger() -> logging.Logger:
84
+ return logging.getLogger("kairo_code.llm")
85
+
86
+ def get_tool_logger() -> logging.Logger:
87
+ return logging.getLogger("kairo_code.tools")
88
+
89
+ def get_agent_logger() -> logging.Logger:
90
+ return logging.getLogger("kairo_code.agents")
91
+
92
+ def get_router_logger() -> logging.Logger:
93
+ return logging.getLogger("kairo_code.router")
94
+
95
+
96
+ class LogContext:
97
+ """Context manager for logging sections with clear boundaries."""
98
+
99
+ def __init__(self, logger: logging.Logger, section: str, level: int = logging.DEBUG):
100
+ self.logger = logger
101
+ self.section = section
102
+ self.level = level
103
+
104
+ def __enter__(self):
105
+ self.logger.log(self.level, f">>> START: {self.section}")
106
+ return self
107
+
108
+ def __exit__(self, exc_type, exc_val, exc_tb):
109
+ if exc_type:
110
+ self.logger.error(f"<<< END: {self.section} (ERROR: {exc_val})")
111
+ else:
112
+ self.logger.log(self.level, f"<<< END: {self.section}")
113
+ return False
114
+
115
+
116
+ def log_tool_call(tool_name: str, params: dict, result: "Any") -> None:
117
+ """Log a tool call with its parameters and result."""
118
+ logger = get_tool_logger()
119
+ logger.info(f"TOOL CALL: {tool_name}")
120
+ logger.debug(f" Params: {params}")
121
+ if result.success:
122
+ logger.debug(f" Result: SUCCESS")
123
+ # Truncate long output
124
+ output = result.output[:500] + "..." if len(result.output) > 500 else result.output
125
+ logger.debug(f" Output: {output}")
126
+ else:
127
+ logger.warning(f" Result: FAILED - {result.error}")
128
+
129
+
130
+ def log_llm_request(model: str, messages: list, system: str = None) -> None:
131
+ """Log an LLM request."""
132
+ logger = get_llm_logger()
133
+ logger.info(f"LLM REQUEST: model={model}")
134
+ if system:
135
+ logger.debug(f" System prompt: {system[:200]}...")
136
+ logger.debug(f" Messages: {len(messages)} total")
137
+ for i, msg in enumerate(messages[-3:]): # Log last 3 messages
138
+ content = msg.get('content', '')[:200]
139
+ logger.debug(f" [{i}] {msg.get('role')}: {content}...")
140
+
141
+
142
+ def log_llm_response(model: str, response: str, tokens: int = None) -> None:
143
+ """Log an LLM response."""
144
+ logger = get_llm_logger()
145
+ truncated = response[:500] + "..." if len(response) > 500 else response
146
+ logger.info(f"LLM RESPONSE: model={model}, length={len(response)}")
147
+ logger.debug(f" Content: {truncated}")
148
+ if tokens:
149
+ logger.debug(f" Tokens: {tokens}")
150
+
151
+
152
+ def log_model_selection(category: str, selected: str, available: list) -> None:
153
+ """Log model auto-selection."""
154
+ logger = get_llm_logger()
155
+ logger.info(f"MODEL SELECTION: {category} -> {selected}")
156
+ logger.debug(f" Available models: {available}")