router-maestro 0.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. router_maestro/__init__.py +3 -0
  2. router_maestro/__main__.py +6 -0
  3. router_maestro/auth/__init__.py +18 -0
  4. router_maestro/auth/github_oauth.py +181 -0
  5. router_maestro/auth/manager.py +136 -0
  6. router_maestro/auth/storage.py +91 -0
  7. router_maestro/cli/__init__.py +1 -0
  8. router_maestro/cli/auth.py +167 -0
  9. router_maestro/cli/client.py +322 -0
  10. router_maestro/cli/config.py +132 -0
  11. router_maestro/cli/context.py +146 -0
  12. router_maestro/cli/main.py +42 -0
  13. router_maestro/cli/model.py +288 -0
  14. router_maestro/cli/server.py +117 -0
  15. router_maestro/cli/stats.py +76 -0
  16. router_maestro/config/__init__.py +72 -0
  17. router_maestro/config/contexts.py +29 -0
  18. router_maestro/config/paths.py +50 -0
  19. router_maestro/config/priorities.py +93 -0
  20. router_maestro/config/providers.py +34 -0
  21. router_maestro/config/server.py +115 -0
  22. router_maestro/config/settings.py +76 -0
  23. router_maestro/providers/__init__.py +31 -0
  24. router_maestro/providers/anthropic.py +203 -0
  25. router_maestro/providers/base.py +123 -0
  26. router_maestro/providers/copilot.py +346 -0
  27. router_maestro/providers/openai.py +188 -0
  28. router_maestro/providers/openai_compat.py +175 -0
  29. router_maestro/routing/__init__.py +5 -0
  30. router_maestro/routing/router.py +526 -0
  31. router_maestro/server/__init__.py +5 -0
  32. router_maestro/server/app.py +87 -0
  33. router_maestro/server/middleware/__init__.py +11 -0
  34. router_maestro/server/middleware/auth.py +66 -0
  35. router_maestro/server/oauth_sessions.py +159 -0
  36. router_maestro/server/routes/__init__.py +8 -0
  37. router_maestro/server/routes/admin.py +358 -0
  38. router_maestro/server/routes/anthropic.py +228 -0
  39. router_maestro/server/routes/chat.py +142 -0
  40. router_maestro/server/routes/models.py +34 -0
  41. router_maestro/server/schemas/__init__.py +57 -0
  42. router_maestro/server/schemas/admin.py +87 -0
  43. router_maestro/server/schemas/anthropic.py +246 -0
  44. router_maestro/server/schemas/openai.py +107 -0
  45. router_maestro/server/translation.py +636 -0
  46. router_maestro/stats/__init__.py +14 -0
  47. router_maestro/stats/heatmap.py +154 -0
  48. router_maestro/stats/storage.py +228 -0
  49. router_maestro/stats/tracker.py +73 -0
  50. router_maestro/utils/__init__.py +16 -0
  51. router_maestro/utils/logging.py +81 -0
  52. router_maestro/utils/tokens.py +51 -0
  53. router_maestro-0.1.2.dist-info/METADATA +383 -0
  54. router_maestro-0.1.2.dist-info/RECORD +57 -0
  55. router_maestro-0.1.2.dist-info/WHEEL +4 -0
  56. router_maestro-0.1.2.dist-info/entry_points.txt +2 -0
  57. router_maestro-0.1.2.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,154 @@
1
+ """Terminal heatmap generation for token usage."""
2
+
3
+ from datetime import datetime, timedelta
4
+
5
+ import plotext as plt
6
+ from rich.console import Console
7
+ from rich.table import Table
8
+
9
+ from router_maestro.stats.storage import StatsStorage
10
+
11
+ console = Console()
12
+
13
+
14
+ def generate_heatmap(days: int = 7, provider: str | None = None, model: str | None = None) -> None:
15
+ """Generate and display a terminal heatmap of token usage.
16
+
17
+ Args:
18
+ days: Number of days to show
19
+ provider: Filter by provider (optional)
20
+ model: Filter by model (optional)
21
+ """
22
+ storage = StatsStorage()
23
+ hourly_data = storage.get_usage_by_hour(days=days, provider=provider, model=model)
24
+
25
+ if not hourly_data:
26
+ console.print("[dim]No usage data available for the specified period.[/dim]")
27
+ return
28
+
29
+ # Build a matrix for the heatmap: rows = days, columns = hours
30
+ # Initialize with zeros
31
+ today = datetime.now().date()
32
+ dates = [(today - timedelta(days=i)) for i in range(days - 1, -1, -1)]
33
+
34
+ # Create a 2D matrix (days x 24 hours)
35
+ matrix = [[0 for _ in range(24)] for _ in range(len(dates))]
36
+ date_to_idx = {d: i for i, d in enumerate(dates)}
37
+
38
+ for record in hourly_data:
39
+ date = datetime.fromisoformat(record["date"]).date()
40
+ hour = record["hour"]
41
+ tokens = record["total_tokens"]
42
+
43
+ if date in date_to_idx:
44
+ matrix[date_to_idx[date]][hour] = tokens
45
+
46
+ # Display using plotext
47
+ plt.clear_figure()
48
+ plt.title(f"Token Usage Heatmap (Last {days} Days)")
49
+
50
+ # Create a simple bar chart by day since plotext doesn't have heatmap
51
+ daily_data = storage.get_usage_by_day(days=days, provider=provider, model=model)
52
+
53
+ if daily_data:
54
+ dates_str = [record["date"] for record in daily_data]
55
+ tokens = [record["total_tokens"] for record in daily_data]
56
+
57
+ plt.bar(dates_str, tokens)
58
+ plt.xlabel("Date")
59
+ plt.ylabel("Total Tokens")
60
+ plt.show()
61
+
62
+ # Also show a text-based heatmap using Rich
63
+ _display_text_heatmap(dates, matrix)
64
+
65
+
66
+ def _display_text_heatmap(dates: list, matrix: list[list[int]]) -> None:
67
+ """Display a text-based heatmap using Rich.
68
+
69
+ Args:
70
+ dates: List of dates
71
+ matrix: 2D matrix of token counts (days x hours)
72
+ """
73
+ console.print("\n[bold]Hourly Activity Heatmap:[/bold]")
74
+
75
+ # Find max value for scaling
76
+ max_val = max(max(row) for row in matrix) if matrix else 1
77
+ if max_val == 0:
78
+ max_val = 1
79
+
80
+ # Create intensity characters
81
+ intensity_chars = " ░▒▓█"
82
+
83
+ # Build the heatmap
84
+ hour_labels = " " + "".join(f"{h:2d}" for h in range(0, 24, 2))
85
+ console.print(f"[dim]{hour_labels}[/dim]")
86
+
87
+ for i, date in enumerate(dates):
88
+ row_str = f"{date.strftime('%m/%d')} "
89
+ for h in range(24):
90
+ value = matrix[i][h]
91
+ intensity = int((value / max_val) * (len(intensity_chars) - 1))
92
+ char = intensity_chars[intensity]
93
+ row_str += char
94
+ console.print(row_str)
95
+
96
+ # Legend
97
+ console.print(f"\n[dim]Legend: {' '.join(intensity_chars)} (low to high)[/dim]")
98
+
99
+
100
+ def display_stats_summary(days: int = 7) -> None:
101
+ """Display a summary of token usage statistics.
102
+
103
+ Args:
104
+ days: Number of days to summarize
105
+ """
106
+ storage = StatsStorage()
107
+ total = storage.get_total_usage(days=days)
108
+ by_model = storage.get_usage_by_model(days=days)
109
+
110
+ if not total or total.get("total_tokens") is None:
111
+ console.print("[dim]No usage data available.[/dim]")
112
+ return
113
+
114
+ # Summary table
115
+ console.print(f"\n[bold]Token Usage Summary (Last {days} Days)[/bold]\n")
116
+
117
+ summary_table = Table(show_header=False, box=None)
118
+ summary_table.add_column("Metric", style="cyan")
119
+ summary_table.add_column("Value", style="green", justify="right")
120
+
121
+ summary_table.add_row("Total Requests", f"{total.get('request_count', 0):,}")
122
+ summary_table.add_row("Successful", f"{total.get('success_count', 0):,}")
123
+ summary_table.add_row("Total Tokens", f"{total.get('total_tokens', 0):,}")
124
+ summary_table.add_row(" Prompt", f"{total.get('prompt_tokens', 0):,}")
125
+ summary_table.add_row(" Completion", f"{total.get('completion_tokens', 0):,}")
126
+
127
+ if total.get("avg_latency_ms"):
128
+ summary_table.add_row("Avg Latency", f"{total.get('avg_latency_ms', 0):.0f} ms")
129
+
130
+ console.print(summary_table)
131
+
132
+ # By model table
133
+ if by_model:
134
+ console.print("\n[bold]Usage by Model[/bold]\n")
135
+
136
+ model_table = Table()
137
+ model_table.add_column("Model", style="cyan")
138
+ model_table.add_column("Provider", style="magenta")
139
+ model_table.add_column("Requests", justify="right")
140
+ model_table.add_column("Total Tokens", justify="right", style="green")
141
+ model_table.add_column("Avg Latency", justify="right")
142
+
143
+ for record in by_model:
144
+ avg_latency = record.get("avg_latency_ms")
145
+ latency = f"{avg_latency:.0f} ms" if avg_latency else "-"
146
+ model_table.add_row(
147
+ record["model"],
148
+ record["provider"],
149
+ f"{record['request_count']:,}",
150
+ f"{record['total_tokens']:,}",
151
+ latency,
152
+ )
153
+
154
+ console.print(model_table)
@@ -0,0 +1,228 @@
1
+ """SQLite storage for token usage statistics."""
2
+
3
+ import sqlite3
4
+ from collections.abc import Iterator
5
+ from contextlib import contextmanager
6
+ from datetime import datetime
7
+ from pathlib import Path
8
+
9
+ from pydantic import BaseModel
10
+
11
+ from router_maestro.config import STATS_DB_FILE
12
+
13
+
14
+ class UsageRecord(BaseModel):
15
+ """A single token usage record."""
16
+
17
+ id: int | None = None
18
+ timestamp: datetime
19
+ provider: str
20
+ model: str
21
+ prompt_tokens: int
22
+ completion_tokens: int
23
+ total_tokens: int
24
+ success: bool
25
+ latency_ms: int | None = None
26
+
27
+
28
+ class StatsStorage:
29
+ """SQLite storage for token usage statistics."""
30
+
31
+ def __init__(self, db_path: Path = STATS_DB_FILE) -> None:
32
+ self.db_path = db_path
33
+ self._init_db()
34
+
35
+ def _init_db(self) -> None:
36
+ """Initialize the database schema."""
37
+ self.db_path.parent.mkdir(parents=True, exist_ok=True)
38
+
39
+ with self._get_connection() as conn:
40
+ conn.execute("""
41
+ CREATE TABLE IF NOT EXISTS usage (
42
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
43
+ timestamp TEXT NOT NULL,
44
+ provider TEXT NOT NULL,
45
+ model TEXT NOT NULL,
46
+ prompt_tokens INTEGER NOT NULL,
47
+ completion_tokens INTEGER NOT NULL,
48
+ total_tokens INTEGER NOT NULL,
49
+ success INTEGER NOT NULL,
50
+ latency_ms INTEGER
51
+ )
52
+ """)
53
+ conn.execute("""
54
+ CREATE INDEX IF NOT EXISTS idx_usage_timestamp ON usage(timestamp)
55
+ """)
56
+ conn.execute("""
57
+ CREATE INDEX IF NOT EXISTS idx_usage_provider ON usage(provider)
58
+ """)
59
+ conn.execute("""
60
+ CREATE INDEX IF NOT EXISTS idx_usage_model ON usage(model)
61
+ """)
62
+ conn.commit()
63
+
64
+ @contextmanager
65
+ def _get_connection(self) -> Iterator[sqlite3.Connection]:
66
+ """Get a database connection."""
67
+ conn = sqlite3.connect(self.db_path)
68
+ conn.row_factory = sqlite3.Row
69
+ try:
70
+ yield conn
71
+ finally:
72
+ conn.close()
73
+
74
+ def record(self, record: UsageRecord) -> None:
75
+ """Record a usage event."""
76
+ with self._get_connection() as conn:
77
+ conn.execute(
78
+ """
79
+ INSERT INTO usage (
80
+ timestamp, provider, model,
81
+ prompt_tokens, completion_tokens, total_tokens,
82
+ success, latency_ms
83
+ ) VALUES (?, ?, ?, ?, ?, ?, ?, ?)
84
+ """,
85
+ (
86
+ record.timestamp.isoformat(),
87
+ record.provider,
88
+ record.model,
89
+ record.prompt_tokens,
90
+ record.completion_tokens,
91
+ record.total_tokens,
92
+ 1 if record.success else 0,
93
+ record.latency_ms,
94
+ ),
95
+ )
96
+ conn.commit()
97
+
98
+ def get_usage_by_day(
99
+ self, days: int = 7, provider: str | None = None, model: str | None = None
100
+ ) -> list[dict]:
101
+ """Get usage aggregated by day.
102
+
103
+ Args:
104
+ days: Number of days to look back
105
+ provider: Filter by provider (optional)
106
+ model: Filter by model (optional)
107
+
108
+ Returns:
109
+ List of dicts with date, total_tokens, request_count
110
+ """
111
+ with self._get_connection() as conn:
112
+ query = """
113
+ SELECT
114
+ DATE(timestamp) as date,
115
+ SUM(total_tokens) as total_tokens,
116
+ SUM(prompt_tokens) as prompt_tokens,
117
+ SUM(completion_tokens) as completion_tokens,
118
+ COUNT(*) as request_count,
119
+ SUM(CASE WHEN success = 1 THEN 1 ELSE 0 END) as success_count
120
+ FROM usage
121
+ WHERE timestamp >= datetime('now', ?)
122
+ """
123
+ params: list = [f"-{days} days"]
124
+
125
+ if provider:
126
+ query += " AND provider = ?"
127
+ params.append(provider)
128
+ if model:
129
+ query += " AND model = ?"
130
+ params.append(model)
131
+
132
+ query += " GROUP BY DATE(timestamp) ORDER BY date"
133
+
134
+ cursor = conn.execute(query, params)
135
+ return [dict(row) for row in cursor.fetchall()]
136
+
137
+ def get_usage_by_hour(
138
+ self, days: int = 7, provider: str | None = None, model: str | None = None
139
+ ) -> list[dict]:
140
+ """Get usage aggregated by hour.
141
+
142
+ Args:
143
+ days: Number of days to look back
144
+ provider: Filter by provider (optional)
145
+ model: Filter by model (optional)
146
+
147
+ Returns:
148
+ List of dicts with date, hour, total_tokens, request_count
149
+ """
150
+ with self._get_connection() as conn:
151
+ query = """
152
+ SELECT
153
+ DATE(timestamp) as date,
154
+ CAST(strftime('%H', timestamp) AS INTEGER) as hour,
155
+ SUM(total_tokens) as total_tokens,
156
+ COUNT(*) as request_count
157
+ FROM usage
158
+ WHERE timestamp >= datetime('now', ?)
159
+ """
160
+ params: list = [f"-{days} days"]
161
+
162
+ if provider:
163
+ query += " AND provider = ?"
164
+ params.append(provider)
165
+ if model:
166
+ query += " AND model = ?"
167
+ params.append(model)
168
+
169
+ query += " GROUP BY DATE(timestamp), hour ORDER BY date, hour"
170
+
171
+ cursor = conn.execute(query, params)
172
+ return [dict(row) for row in cursor.fetchall()]
173
+
174
+ def get_usage_by_model(self, days: int = 7) -> list[dict]:
175
+ """Get usage aggregated by model.
176
+
177
+ Args:
178
+ days: Number of days to look back
179
+
180
+ Returns:
181
+ List of dicts with model, provider, total_tokens, request_count
182
+ """
183
+ with self._get_connection() as conn:
184
+ cursor = conn.execute(
185
+ """
186
+ SELECT
187
+ model,
188
+ provider,
189
+ SUM(total_tokens) as total_tokens,
190
+ SUM(prompt_tokens) as prompt_tokens,
191
+ SUM(completion_tokens) as completion_tokens,
192
+ COUNT(*) as request_count,
193
+ AVG(latency_ms) as avg_latency_ms
194
+ FROM usage
195
+ WHERE timestamp >= datetime('now', ?)
196
+ GROUP BY model, provider
197
+ ORDER BY total_tokens DESC
198
+ """,
199
+ (f"-{days} days",),
200
+ )
201
+ return [dict(row) for row in cursor.fetchall()]
202
+
203
+ def get_total_usage(self, days: int = 7) -> dict:
204
+ """Get total usage statistics.
205
+
206
+ Args:
207
+ days: Number of days to look back
208
+
209
+ Returns:
210
+ Dict with total statistics
211
+ """
212
+ with self._get_connection() as conn:
213
+ cursor = conn.execute(
214
+ """
215
+ SELECT
216
+ SUM(total_tokens) as total_tokens,
217
+ SUM(prompt_tokens) as prompt_tokens,
218
+ SUM(completion_tokens) as completion_tokens,
219
+ COUNT(*) as request_count,
220
+ SUM(CASE WHEN success = 1 THEN 1 ELSE 0 END) as success_count,
221
+ AVG(latency_ms) as avg_latency_ms
222
+ FROM usage
223
+ WHERE timestamp >= datetime('now', ?)
224
+ """,
225
+ (f"-{days} days",),
226
+ )
227
+ row = cursor.fetchone()
228
+ return dict(row) if row else {}
@@ -0,0 +1,73 @@
1
+ """Token usage tracker."""
2
+
3
+ import time
4
+ from datetime import datetime
5
+
6
+ from router_maestro.stats.storage import StatsStorage, UsageRecord
7
+
8
+
9
+ class UsageTracker:
10
+ """Tracks token usage for requests."""
11
+
12
+ _instance: "UsageTracker | None" = None
13
+
14
+ def __init__(self) -> None:
15
+ self.storage = StatsStorage()
16
+
17
+ @classmethod
18
+ def get_instance(cls) -> "UsageTracker":
19
+ """Get the singleton instance."""
20
+ if cls._instance is None:
21
+ cls._instance = cls()
22
+ return cls._instance
23
+
24
+ def record(
25
+ self,
26
+ provider: str,
27
+ model: str,
28
+ prompt_tokens: int,
29
+ completion_tokens: int,
30
+ success: bool = True,
31
+ latency_ms: int | None = None,
32
+ ) -> None:
33
+ """Record a usage event.
34
+
35
+ Args:
36
+ provider: Provider name
37
+ model: Model name
38
+ prompt_tokens: Number of prompt tokens
39
+ completion_tokens: Number of completion tokens
40
+ success: Whether the request was successful
41
+ latency_ms: Latency in milliseconds
42
+ """
43
+ record = UsageRecord(
44
+ timestamp=datetime.now(),
45
+ provider=provider,
46
+ model=model,
47
+ prompt_tokens=prompt_tokens,
48
+ completion_tokens=completion_tokens,
49
+ total_tokens=prompt_tokens + completion_tokens,
50
+ success=success,
51
+ latency_ms=latency_ms,
52
+ )
53
+ self.storage.record(record)
54
+
55
+
56
+ class RequestTimer:
57
+ """Context manager for timing requests."""
58
+
59
+ def __init__(self) -> None:
60
+ self.start_time: float = 0
61
+ self.end_time: float = 0
62
+
63
+ def __enter__(self) -> "RequestTimer":
64
+ self.start_time = time.perf_counter()
65
+ return self
66
+
67
+ def __exit__(self, *args) -> None:
68
+ self.end_time = time.perf_counter()
69
+
70
+ @property
71
+ def elapsed_ms(self) -> int:
72
+ """Get elapsed time in milliseconds."""
73
+ return int((self.end_time - self.start_time) * 1000)
@@ -0,0 +1,16 @@
1
+ """Utils module for router-maestro."""
2
+
3
+ from router_maestro.utils.logging import get_logger, setup_logging
4
+ from router_maestro.utils.tokens import (
5
+ estimate_tokens,
6
+ estimate_tokens_from_char_count,
7
+ map_openai_stop_reason_to_anthropic,
8
+ )
9
+
10
+ __all__ = [
11
+ "get_logger",
12
+ "setup_logging",
13
+ "estimate_tokens",
14
+ "estimate_tokens_from_char_count",
15
+ "map_openai_stop_reason_to_anthropic",
16
+ ]
@@ -0,0 +1,81 @@
1
+ """Unified logging configuration for router-maestro."""
2
+
3
+ import logging
4
+ from logging.handlers import RotatingFileHandler
5
+ from pathlib import Path
6
+
7
+ from rich.logging import RichHandler
8
+
9
+ from router_maestro.config.paths import get_data_dir
10
+
11
+
12
+ def get_log_dir() -> Path:
13
+ """Get the log directory ~/.local/share/router-maestro/logs/."""
14
+ log_dir = get_data_dir() / "logs"
15
+ log_dir.mkdir(parents=True, exist_ok=True)
16
+ return log_dir
17
+
18
+
19
+ def setup_logging(
20
+ level: str = "INFO",
21
+ console: bool = True,
22
+ file: bool = True,
23
+ ) -> None:
24
+ """Configure unified logging with console (Rich) and file output.
25
+
26
+ Args:
27
+ level: Log level (DEBUG, INFO, WARNING, ERROR)
28
+ console: Enable Rich console handler with colors
29
+ file: Enable rotating file handler
30
+ """
31
+ # Get the root logger for router_maestro
32
+ logger = logging.getLogger("router_maestro")
33
+ logger.setLevel(getattr(logging, level.upper(), logging.INFO))
34
+
35
+ # Remove existing handlers to avoid duplicates on reconfig
36
+ logger.handlers.clear()
37
+
38
+ # File format (no colors)
39
+ file_formatter = logging.Formatter(
40
+ "%(asctime)s | %(levelname)-8s | %(name)s | %(message)s",
41
+ datefmt="%Y-%m-%d %H:%M:%S",
42
+ )
43
+
44
+ # Console handler (Rich with colors)
45
+ if console:
46
+ console_handler = RichHandler(
47
+ show_time=True,
48
+ show_path=False,
49
+ rich_tracebacks=True,
50
+ markup=True,
51
+ )
52
+ console_handler.setLevel(getattr(logging, level.upper(), logging.INFO))
53
+ logger.addHandler(console_handler)
54
+
55
+ # File handler (rotating, 10MB max, 5 backups)
56
+ if file:
57
+ log_file = get_log_dir() / "router-maestro.log"
58
+ file_handler = RotatingFileHandler(
59
+ log_file,
60
+ maxBytes=10 * 1024 * 1024, # 10MB
61
+ backupCount=5,
62
+ encoding="utf-8",
63
+ )
64
+ file_handler.setLevel(getattr(logging, level.upper(), logging.INFO))
65
+ file_handler.setFormatter(file_formatter)
66
+ logger.addHandler(file_handler)
67
+
68
+ # Prevent propagation to root logger
69
+ logger.propagate = False
70
+
71
+
72
+ def get_logger(name: str) -> logging.Logger:
73
+ """Get a logger for a specific module.
74
+
75
+ Args:
76
+ name: Module name (will be prefixed with 'router_maestro.')
77
+
78
+ Returns:
79
+ Logger instance for the module
80
+ """
81
+ return logging.getLogger(f"router_maestro.{name}")
@@ -0,0 +1,51 @@
1
+ """Token estimation utilities."""
2
+
3
+ # Approximate characters per token for English text
4
+ CHARS_PER_TOKEN = 4
5
+
6
+
7
+ def estimate_tokens(text: str) -> int:
8
+ """Estimate token count from text.
9
+
10
+ Uses a rough approximation of ~4 characters per token for English text.
11
+ This provides an estimate for context display before actual usage is known.
12
+
13
+ Args:
14
+ text: The text to estimate tokens for
15
+
16
+ Returns:
17
+ Estimated token count
18
+ """
19
+ return len(text) // CHARS_PER_TOKEN
20
+
21
+
22
+ def estimate_tokens_from_char_count(char_count: int) -> int:
23
+ """Estimate token count from character count.
24
+
25
+ Args:
26
+ char_count: Number of characters
27
+
28
+ Returns:
29
+ Estimated token count
30
+ """
31
+ return char_count // CHARS_PER_TOKEN
32
+
33
+
34
+ def map_openai_stop_reason_to_anthropic(openai_reason: str | None) -> str | None:
35
+ """Map OpenAI finish reason to Anthropic stop reason.
36
+
37
+ Args:
38
+ openai_reason: OpenAI finish reason (stop, length, tool_calls, content_filter)
39
+
40
+ Returns:
41
+ Anthropic stop reason (end_turn, max_tokens, tool_use)
42
+ """
43
+ if openai_reason is None:
44
+ return None
45
+ mapping = {
46
+ "stop": "end_turn",
47
+ "length": "max_tokens",
48
+ "tool_calls": "tool_use",
49
+ "content_filter": "end_turn",
50
+ }
51
+ return mapping.get(openai_reason, "end_turn")