kubiya-control-plane-api 0.1.0__py3-none-any.whl → 0.3.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kubiya-control-plane-api might be problematic. Click here for more details.

Files changed (185) hide show
  1. control_plane_api/README.md +266 -0
  2. control_plane_api/__init__.py +0 -0
  3. control_plane_api/__version__.py +1 -0
  4. control_plane_api/alembic/README +1 -0
  5. control_plane_api/alembic/env.py +98 -0
  6. control_plane_api/alembic/script.py.mako +28 -0
  7. control_plane_api/alembic/versions/1382bec74309_initial_migration_with_all_models.py +251 -0
  8. control_plane_api/alembic/versions/1f54bc2a37e3_add_analytics_tables.py +162 -0
  9. control_plane_api/alembic/versions/2e4cb136dc10_rename_toolset_ids_to_skill_ids_in_teams.py +30 -0
  10. control_plane_api/alembic/versions/31cd69a644ce_add_skill_templates_table.py +28 -0
  11. control_plane_api/alembic/versions/89e127caa47d_add_jobs_and_job_executions_tables.py +161 -0
  12. control_plane_api/alembic/versions/add_llm_models_table.py +51 -0
  13. control_plane_api/alembic/versions/b0e10697f212_add_runtime_column_to_teams_simple.py +42 -0
  14. control_plane_api/alembic/versions/ce43b24b63bf_add_execution_trigger_source_and_fix_.py +155 -0
  15. control_plane_api/alembic/versions/d4eaf16e3f8d_rename_toolsets_to_skills.py +84 -0
  16. control_plane_api/alembic/versions/efa2dc427da1_rename_metadata_to_custom_metadata.py +32 -0
  17. control_plane_api/alembic/versions/f973b431d1ce_add_workflow_executor_to_skill_types.py +44 -0
  18. control_plane_api/alembic.ini +148 -0
  19. control_plane_api/api/index.py +12 -0
  20. control_plane_api/app/__init__.py +11 -0
  21. control_plane_api/app/activities/__init__.py +20 -0
  22. control_plane_api/app/activities/agent_activities.py +379 -0
  23. control_plane_api/app/activities/team_activities.py +410 -0
  24. control_plane_api/app/activities/temporal_cloud_activities.py +577 -0
  25. control_plane_api/app/config/__init__.py +35 -0
  26. control_plane_api/app/config/api_config.py +354 -0
  27. control_plane_api/app/config/model_pricing.py +318 -0
  28. control_plane_api/app/config.py +95 -0
  29. control_plane_api/app/database.py +135 -0
  30. control_plane_api/app/exceptions.py +408 -0
  31. control_plane_api/app/lib/__init__.py +11 -0
  32. control_plane_api/app/lib/job_executor.py +312 -0
  33. control_plane_api/app/lib/kubiya_client.py +235 -0
  34. control_plane_api/app/lib/litellm_pricing.py +166 -0
  35. control_plane_api/app/lib/planning_tools/__init__.py +22 -0
  36. control_plane_api/app/lib/planning_tools/agents.py +155 -0
  37. control_plane_api/app/lib/planning_tools/base.py +189 -0
  38. control_plane_api/app/lib/planning_tools/environments.py +214 -0
  39. control_plane_api/app/lib/planning_tools/resources.py +240 -0
  40. control_plane_api/app/lib/planning_tools/teams.py +198 -0
  41. control_plane_api/app/lib/policy_enforcer_client.py +939 -0
  42. control_plane_api/app/lib/redis_client.py +436 -0
  43. control_plane_api/app/lib/supabase.py +71 -0
  44. control_plane_api/app/lib/temporal_client.py +138 -0
  45. control_plane_api/app/lib/validation/__init__.py +20 -0
  46. control_plane_api/app/lib/validation/runtime_validation.py +287 -0
  47. control_plane_api/app/main.py +128 -0
  48. control_plane_api/app/middleware/__init__.py +8 -0
  49. control_plane_api/app/middleware/auth.py +513 -0
  50. control_plane_api/app/middleware/exception_handler.py +267 -0
  51. control_plane_api/app/middleware/rate_limiting.py +384 -0
  52. control_plane_api/app/middleware/request_id.py +202 -0
  53. control_plane_api/app/models/__init__.py +27 -0
  54. control_plane_api/app/models/agent.py +79 -0
  55. control_plane_api/app/models/analytics.py +206 -0
  56. control_plane_api/app/models/associations.py +81 -0
  57. control_plane_api/app/models/environment.py +63 -0
  58. control_plane_api/app/models/execution.py +93 -0
  59. control_plane_api/app/models/job.py +179 -0
  60. control_plane_api/app/models/llm_model.py +75 -0
  61. control_plane_api/app/models/presence.py +49 -0
  62. control_plane_api/app/models/project.py +47 -0
  63. control_plane_api/app/models/session.py +38 -0
  64. control_plane_api/app/models/team.py +66 -0
  65. control_plane_api/app/models/workflow.py +55 -0
  66. control_plane_api/app/policies/README.md +121 -0
  67. control_plane_api/app/policies/approved_users.rego +62 -0
  68. control_plane_api/app/policies/business_hours.rego +51 -0
  69. control_plane_api/app/policies/rate_limiting.rego +100 -0
  70. control_plane_api/app/policies/tool_restrictions.rego +86 -0
  71. control_plane_api/app/routers/__init__.py +4 -0
  72. control_plane_api/app/routers/agents.py +364 -0
  73. control_plane_api/app/routers/agents_v2.py +1260 -0
  74. control_plane_api/app/routers/analytics.py +1014 -0
  75. control_plane_api/app/routers/context_manager.py +562 -0
  76. control_plane_api/app/routers/environment_context.py +270 -0
  77. control_plane_api/app/routers/environments.py +715 -0
  78. control_plane_api/app/routers/execution_environment.py +517 -0
  79. control_plane_api/app/routers/executions.py +1911 -0
  80. control_plane_api/app/routers/health.py +92 -0
  81. control_plane_api/app/routers/health_v2.py +326 -0
  82. control_plane_api/app/routers/integrations.py +274 -0
  83. control_plane_api/app/routers/jobs.py +1344 -0
  84. control_plane_api/app/routers/models.py +82 -0
  85. control_plane_api/app/routers/models_v2.py +361 -0
  86. control_plane_api/app/routers/policies.py +639 -0
  87. control_plane_api/app/routers/presence.py +234 -0
  88. control_plane_api/app/routers/projects.py +902 -0
  89. control_plane_api/app/routers/runners.py +379 -0
  90. control_plane_api/app/routers/runtimes.py +172 -0
  91. control_plane_api/app/routers/secrets.py +155 -0
  92. control_plane_api/app/routers/skills.py +1001 -0
  93. control_plane_api/app/routers/skills_definitions.py +140 -0
  94. control_plane_api/app/routers/task_planning.py +1256 -0
  95. control_plane_api/app/routers/task_queues.py +654 -0
  96. control_plane_api/app/routers/team_context.py +270 -0
  97. control_plane_api/app/routers/teams.py +1400 -0
  98. control_plane_api/app/routers/worker_queues.py +1545 -0
  99. control_plane_api/app/routers/workers.py +935 -0
  100. control_plane_api/app/routers/workflows.py +204 -0
  101. control_plane_api/app/runtimes/__init__.py +6 -0
  102. control_plane_api/app/runtimes/validation.py +344 -0
  103. control_plane_api/app/schemas/job_schemas.py +295 -0
  104. control_plane_api/app/services/__init__.py +1 -0
  105. control_plane_api/app/services/agno_service.py +619 -0
  106. control_plane_api/app/services/litellm_service.py +190 -0
  107. control_plane_api/app/services/policy_service.py +525 -0
  108. control_plane_api/app/services/temporal_cloud_provisioning.py +150 -0
  109. control_plane_api/app/skills/__init__.py +44 -0
  110. control_plane_api/app/skills/base.py +229 -0
  111. control_plane_api/app/skills/business_intelligence.py +189 -0
  112. control_plane_api/app/skills/data_visualization.py +154 -0
  113. control_plane_api/app/skills/docker.py +104 -0
  114. control_plane_api/app/skills/file_generation.py +94 -0
  115. control_plane_api/app/skills/file_system.py +110 -0
  116. control_plane_api/app/skills/python.py +92 -0
  117. control_plane_api/app/skills/registry.py +65 -0
  118. control_plane_api/app/skills/shell.py +102 -0
  119. control_plane_api/app/skills/workflow_executor.py +469 -0
  120. control_plane_api/app/utils/workflow_executor.py +354 -0
  121. control_plane_api/app/workflows/__init__.py +11 -0
  122. control_plane_api/app/workflows/agent_execution.py +507 -0
  123. control_plane_api/app/workflows/agent_execution_with_skills.py +222 -0
  124. control_plane_api/app/workflows/namespace_provisioning.py +326 -0
  125. control_plane_api/app/workflows/team_execution.py +399 -0
  126. control_plane_api/scripts/seed_models.py +239 -0
  127. control_plane_api/worker/__init__.py +0 -0
  128. control_plane_api/worker/activities/__init__.py +0 -0
  129. control_plane_api/worker/activities/agent_activities.py +1241 -0
  130. control_plane_api/worker/activities/approval_activities.py +234 -0
  131. control_plane_api/worker/activities/runtime_activities.py +388 -0
  132. control_plane_api/worker/activities/skill_activities.py +267 -0
  133. control_plane_api/worker/activities/team_activities.py +1217 -0
  134. control_plane_api/worker/config/__init__.py +31 -0
  135. control_plane_api/worker/config/worker_config.py +275 -0
  136. control_plane_api/worker/control_plane_client.py +529 -0
  137. control_plane_api/worker/examples/analytics_integration_example.py +362 -0
  138. control_plane_api/worker/models/__init__.py +1 -0
  139. control_plane_api/worker/models/inputs.py +89 -0
  140. control_plane_api/worker/runtimes/__init__.py +31 -0
  141. control_plane_api/worker/runtimes/base.py +789 -0
  142. control_plane_api/worker/runtimes/claude_code_runtime.py +1443 -0
  143. control_plane_api/worker/runtimes/default_runtime.py +617 -0
  144. control_plane_api/worker/runtimes/factory.py +173 -0
  145. control_plane_api/worker/runtimes/validation.py +93 -0
  146. control_plane_api/worker/services/__init__.py +1 -0
  147. control_plane_api/worker/services/agent_executor.py +422 -0
  148. control_plane_api/worker/services/agent_executor_v2.py +383 -0
  149. control_plane_api/worker/services/analytics_collector.py +457 -0
  150. control_plane_api/worker/services/analytics_service.py +464 -0
  151. control_plane_api/worker/services/approval_tools.py +310 -0
  152. control_plane_api/worker/services/approval_tools_agno.py +207 -0
  153. control_plane_api/worker/services/cancellation_manager.py +177 -0
  154. control_plane_api/worker/services/data_visualization.py +827 -0
  155. control_plane_api/worker/services/jira_tools.py +257 -0
  156. control_plane_api/worker/services/runtime_analytics.py +328 -0
  157. control_plane_api/worker/services/session_service.py +194 -0
  158. control_plane_api/worker/services/skill_factory.py +175 -0
  159. control_plane_api/worker/services/team_executor.py +574 -0
  160. control_plane_api/worker/services/team_executor_v2.py +465 -0
  161. control_plane_api/worker/services/workflow_executor_tools.py +1418 -0
  162. control_plane_api/worker/tests/__init__.py +1 -0
  163. control_plane_api/worker/tests/e2e/__init__.py +0 -0
  164. control_plane_api/worker/tests/e2e/test_execution_flow.py +571 -0
  165. control_plane_api/worker/tests/integration/__init__.py +0 -0
  166. control_plane_api/worker/tests/integration/test_control_plane_integration.py +308 -0
  167. control_plane_api/worker/tests/unit/__init__.py +0 -0
  168. control_plane_api/worker/tests/unit/test_control_plane_client.py +401 -0
  169. control_plane_api/worker/utils/__init__.py +1 -0
  170. control_plane_api/worker/utils/chunk_batcher.py +305 -0
  171. control_plane_api/worker/utils/retry_utils.py +60 -0
  172. control_plane_api/worker/utils/streaming_utils.py +373 -0
  173. control_plane_api/worker/worker.py +753 -0
  174. control_plane_api/worker/workflows/__init__.py +0 -0
  175. control_plane_api/worker/workflows/agent_execution.py +589 -0
  176. control_plane_api/worker/workflows/team_execution.py +429 -0
  177. kubiya_control_plane_api-0.3.4.dist-info/METADATA +229 -0
  178. kubiya_control_plane_api-0.3.4.dist-info/RECORD +182 -0
  179. kubiya_control_plane_api-0.3.4.dist-info/entry_points.txt +2 -0
  180. kubiya_control_plane_api-0.3.4.dist-info/top_level.txt +1 -0
  181. kubiya_control_plane_api-0.1.0.dist-info/METADATA +0 -66
  182. kubiya_control_plane_api-0.1.0.dist-info/RECORD +0 -5
  183. kubiya_control_plane_api-0.1.0.dist-info/top_level.txt +0 -1
  184. {kubiya_control_plane_api-0.1.0.dist-info/licenses → control_plane_api}/LICENSE +0 -0
  185. {kubiya_control_plane_api-0.1.0.dist-info → kubiya_control_plane_api-0.3.4.dist-info}/WHEEL +0 -0
@@ -0,0 +1,1014 @@
1
+ """
2
+ Analytics router for execution metrics and reporting.
3
+
4
+ This router provides endpoints for:
5
+ 1. Persisting analytics data from workers (turns, tool calls, tasks)
6
+ 2. Querying aggregated analytics for reporting
7
+ 3. Organization-level metrics and cost tracking
8
+ """
9
+
10
+ from fastapi import APIRouter, Depends, HTTPException, status, Request, Query
11
+ from typing import List, Optional
12
+ from datetime import datetime, timedelta
13
+ from pydantic import BaseModel, Field
14
+ import structlog
15
+ import uuid as uuid_lib
16
+
17
+ from control_plane_api.app.middleware.auth import get_current_organization
18
+ from control_plane_api.app.lib.supabase import get_supabase
19
+
20
+ logger = structlog.get_logger()
21
+
22
+ router = APIRouter()
23
+
24
+
25
+ # ============================================================================
26
+ # Pydantic Schemas for Analytics Data
27
+ # ============================================================================
28
+
29
+ class TurnMetricsCreate(BaseModel):
30
+ """Schema for creating a turn metrics record"""
31
+ execution_id: str
32
+ turn_number: int
33
+ turn_id: Optional[str] = None
34
+ model: str
35
+ model_provider: Optional[str] = None
36
+ started_at: str # ISO timestamp
37
+ completed_at: Optional[str] = None
38
+ duration_ms: Optional[int] = None
39
+ input_tokens: int = 0
40
+ output_tokens: int = 0
41
+ cache_read_tokens: int = 0
42
+ cache_creation_tokens: int = 0
43
+ total_tokens: int = 0
44
+ input_cost: float = 0.0
45
+ output_cost: float = 0.0
46
+ cache_read_cost: float = 0.0
47
+ cache_creation_cost: float = 0.0
48
+ total_cost: float = 0.0
49
+ finish_reason: Optional[str] = None
50
+ response_preview: Optional[str] = None
51
+ tools_called_count: int = 0
52
+ tools_called_names: List[str] = Field(default_factory=list)
53
+ error_message: Optional[str] = None
54
+ metrics: dict = Field(default_factory=dict)
55
+ # Agentic Engineering Minutes (AEM) fields
56
+ runtime_minutes: float = 0.0
57
+ model_weight: float = 1.0
58
+ tool_calls_weight: float = 1.0
59
+ aem_value: float = 0.0
60
+ aem_cost: float = 0.0
61
+
62
+
63
+ class ToolCallCreate(BaseModel):
64
+ """Schema for creating a tool call record"""
65
+ execution_id: str
66
+ turn_id: Optional[str] = None # UUID of the turn (if available)
67
+ tool_name: str
68
+ tool_use_id: Optional[str] = None
69
+ started_at: str # ISO timestamp
70
+ completed_at: Optional[str] = None
71
+ duration_ms: Optional[int] = None
72
+ tool_input: Optional[dict] = None
73
+ tool_output: Optional[str] = None
74
+ tool_output_size: Optional[int] = None
75
+ success: bool = True
76
+ error_message: Optional[str] = None
77
+ error_type: Optional[str] = None
78
+ metadata: dict = Field(default_factory=dict)
79
+
80
+
81
+ class TaskCreate(BaseModel):
82
+ """Schema for creating a task record"""
83
+ execution_id: str
84
+ task_number: Optional[int] = None
85
+ task_id: Optional[str] = None
86
+ task_description: str
87
+ task_type: Optional[str] = None
88
+ status: str = "pending"
89
+ started_at: Optional[str] = None
90
+ completed_at: Optional[str] = None
91
+ duration_ms: Optional[int] = None
92
+ result: Optional[str] = None
93
+ error_message: Optional[str] = None
94
+ metadata: dict = Field(default_factory=dict)
95
+
96
+
97
+ class TaskUpdate(BaseModel):
98
+ """Schema for updating a task's status"""
99
+ status: Optional[str] = None
100
+ completed_at: Optional[str] = None
101
+ duration_ms: Optional[int] = None
102
+ result: Optional[str] = None
103
+ error_message: Optional[str] = None
104
+
105
+
106
+ class BatchAnalyticsCreate(BaseModel):
107
+ """Schema for batch creating analytics data (used by workers to send all data at once)"""
108
+ execution_id: str
109
+ turns: List[TurnMetricsCreate] = Field(default_factory=list)
110
+ tool_calls: List[ToolCallCreate] = Field(default_factory=list)
111
+ tasks: List[TaskCreate] = Field(default_factory=list)
112
+
113
+
114
+ # ============================================================================
115
+ # Data Persistence Endpoints (Used by Workers)
116
+ # ============================================================================
117
+
118
+ @router.post("/turns", status_code=status.HTTP_201_CREATED)
119
+ async def create_turn_metrics(
120
+ turn_data: TurnMetricsCreate,
121
+ request: Request,
122
+ organization: dict = Depends(get_current_organization),
123
+ ):
124
+ """
125
+ Create a turn metrics record.
126
+
127
+ This endpoint is called by workers to persist per-turn LLM metrics
128
+ including tokens, cost, duration, and tool usage.
129
+ """
130
+ try:
131
+ client = get_supabase()
132
+
133
+ # Verify execution belongs to organization
134
+ exec_result = client.table("executions").select("id").eq("id", turn_data.execution_id).eq("organization_id", organization["id"]).execute()
135
+ if not exec_result.data:
136
+ raise HTTPException(status_code=404, detail="Execution not found")
137
+
138
+ turn_record = {
139
+ "id": str(uuid_lib.uuid4()),
140
+ "organization_id": organization["id"],
141
+ "execution_id": turn_data.execution_id,
142
+ "turn_number": turn_data.turn_number,
143
+ "turn_id": turn_data.turn_id,
144
+ "model": turn_data.model,
145
+ "model_provider": turn_data.model_provider,
146
+ "started_at": turn_data.started_at,
147
+ "completed_at": turn_data.completed_at,
148
+ "duration_ms": turn_data.duration_ms,
149
+ "input_tokens": turn_data.input_tokens,
150
+ "output_tokens": turn_data.output_tokens,
151
+ "cache_read_tokens": turn_data.cache_read_tokens,
152
+ "cache_creation_tokens": turn_data.cache_creation_tokens,
153
+ "total_tokens": turn_data.total_tokens,
154
+ "input_cost": turn_data.input_cost,
155
+ "output_cost": turn_data.output_cost,
156
+ "cache_read_cost": turn_data.cache_read_cost,
157
+ "cache_creation_cost": turn_data.cache_creation_cost,
158
+ "total_cost": turn_data.total_cost,
159
+ "finish_reason": turn_data.finish_reason,
160
+ "response_preview": turn_data.response_preview[:500] if turn_data.response_preview else None,
161
+ "tools_called_count": turn_data.tools_called_count,
162
+ "tools_called_names": turn_data.tools_called_names,
163
+ "error_message": turn_data.error_message,
164
+ "metrics": turn_data.metrics,
165
+ # AEM fields
166
+ "runtime_minutes": turn_data.runtime_minutes,
167
+ "model_weight": turn_data.model_weight,
168
+ "tool_calls_weight": turn_data.tool_calls_weight,
169
+ "aem_value": turn_data.aem_value,
170
+ "aem_cost": turn_data.aem_cost,
171
+ }
172
+
173
+ result = client.table("execution_turns").insert(turn_record).execute()
174
+
175
+ if not result.data:
176
+ raise HTTPException(status_code=500, detail="Failed to create turn metrics")
177
+
178
+ logger.info(
179
+ "turn_metrics_created",
180
+ execution_id=turn_data.execution_id,
181
+ turn_number=turn_data.turn_number,
182
+ model=turn_data.model,
183
+ tokens=turn_data.total_tokens,
184
+ cost=turn_data.total_cost,
185
+ org_id=organization["id"]
186
+ )
187
+
188
+ return {"success": True, "turn_id": result.data[0]["id"]}
189
+
190
+ except HTTPException:
191
+ raise
192
+ except Exception as e:
193
+ logger.error("turn_metrics_create_failed", error=str(e), execution_id=turn_data.execution_id)
194
+ raise HTTPException(status_code=500, detail=f"Failed to create turn metrics: {str(e)}")
195
+
196
+
197
+ @router.post("/tool-calls", status_code=status.HTTP_201_CREATED)
198
+ async def create_tool_call(
199
+ tool_call_data: ToolCallCreate,
200
+ request: Request,
201
+ organization: dict = Depends(get_current_organization),
202
+ ):
203
+ """
204
+ Create a tool call record.
205
+
206
+ This endpoint is called by workers to persist tool execution details
207
+ including timing, success/failure, and error information.
208
+ """
209
+ try:
210
+ client = get_supabase()
211
+
212
+ # Verify execution belongs to organization
213
+ exec_result = client.table("executions").select("id").eq("id", tool_call_data.execution_id).eq("organization_id", organization["id"]).execute()
214
+ if not exec_result.data:
215
+ raise HTTPException(status_code=404, detail="Execution not found")
216
+
217
+ # Truncate tool_output if too large (store first 10KB)
218
+ tool_output = tool_call_data.tool_output
219
+ tool_output_size = len(tool_output) if tool_output else 0
220
+ if tool_output and len(tool_output) > 10000:
221
+ tool_output = tool_output[:10000] + "... [truncated]"
222
+
223
+ tool_call_record = {
224
+ "id": str(uuid_lib.uuid4()),
225
+ "organization_id": organization["id"],
226
+ "execution_id": tool_call_data.execution_id,
227
+ "turn_id": tool_call_data.turn_id,
228
+ "tool_name": tool_call_data.tool_name,
229
+ "tool_use_id": tool_call_data.tool_use_id,
230
+ "started_at": tool_call_data.started_at,
231
+ "completed_at": tool_call_data.completed_at,
232
+ "duration_ms": tool_call_data.duration_ms,
233
+ "tool_input": tool_call_data.tool_input,
234
+ "tool_output": tool_output,
235
+ "tool_output_size": tool_output_size,
236
+ "success": tool_call_data.success,
237
+ "error_message": tool_call_data.error_message,
238
+ "error_type": tool_call_data.error_type,
239
+ "custom_metadata": tool_call_data.metadata,
240
+ }
241
+
242
+ result = client.table("execution_tool_calls").insert(tool_call_record).execute()
243
+
244
+ if not result.data:
245
+ raise HTTPException(status_code=500, detail="Failed to create tool call record")
246
+
247
+ logger.info(
248
+ "tool_call_created",
249
+ execution_id=tool_call_data.execution_id,
250
+ tool_name=tool_call_data.tool_name,
251
+ success=tool_call_data.success,
252
+ duration_ms=tool_call_data.duration_ms,
253
+ org_id=organization["id"]
254
+ )
255
+
256
+ return {"success": True, "tool_call_id": result.data[0]["id"]}
257
+
258
+ except HTTPException:
259
+ raise
260
+ except Exception as e:
261
+ logger.error("tool_call_create_failed", error=str(e), execution_id=tool_call_data.execution_id)
262
+ raise HTTPException(status_code=500, detail=f"Failed to create tool call: {str(e)}")
263
+
264
+
265
+ @router.post("/tasks", status_code=status.HTTP_201_CREATED)
266
+ async def create_task(
267
+ task_data: TaskCreate,
268
+ request: Request,
269
+ organization: dict = Depends(get_current_organization),
270
+ ):
271
+ """
272
+ Create a task record.
273
+
274
+ This endpoint is called by workers to persist task tracking information.
275
+ """
276
+ try:
277
+ client = get_supabase()
278
+
279
+ # Verify execution belongs to organization
280
+ exec_result = client.table("executions").select("id").eq("id", task_data.execution_id).eq("organization_id", organization["id"]).execute()
281
+ if not exec_result.data:
282
+ raise HTTPException(status_code=404, detail="Execution not found")
283
+
284
+ task_record = {
285
+ "id": str(uuid_lib.uuid4()),
286
+ "organization_id": organization["id"],
287
+ "execution_id": task_data.execution_id,
288
+ "task_number": task_data.task_number,
289
+ "task_id": task_data.task_id,
290
+ "task_description": task_data.task_description,
291
+ "task_type": task_data.task_type,
292
+ "status": task_data.status,
293
+ "started_at": task_data.started_at,
294
+ "completed_at": task_data.completed_at,
295
+ "duration_ms": task_data.duration_ms,
296
+ "result": task_data.result,
297
+ "error_message": task_data.error_message,
298
+ "custom_metadata": task_data.metadata,
299
+ }
300
+
301
+ result = client.table("execution_tasks").insert(task_record).execute()
302
+
303
+ if not result.data:
304
+ raise HTTPException(status_code=500, detail="Failed to create task")
305
+
306
+ logger.info(
307
+ "task_created",
308
+ execution_id=task_data.execution_id,
309
+ task_description=task_data.task_description[:100],
310
+ status=task_data.status,
311
+ org_id=organization["id"]
312
+ )
313
+
314
+ return {"success": True, "task_id": result.data[0]["id"]}
315
+
316
+ except HTTPException:
317
+ raise
318
+ except Exception as e:
319
+ logger.error("task_create_failed", error=str(e), execution_id=task_data.execution_id)
320
+ raise HTTPException(status_code=500, detail=f"Failed to create task: {str(e)}")
321
+
322
+
323
+ @router.post("/batch", status_code=status.HTTP_201_CREATED)
324
+ async def create_batch_analytics(
325
+ batch_data: BatchAnalyticsCreate,
326
+ request: Request,
327
+ organization: dict = Depends(get_current_organization),
328
+ ):
329
+ """
330
+ Create analytics data in batch.
331
+
332
+ This endpoint allows workers to send all analytics data (turns, tool calls, tasks)
333
+ in a single request, reducing round trips and improving performance.
334
+ """
335
+ try:
336
+ client = get_supabase()
337
+
338
+ # Verify execution belongs to organization
339
+ exec_result = client.table("executions").select("id").eq("id", batch_data.execution_id).eq("organization_id", organization["id"]).execute()
340
+ if not exec_result.data:
341
+ raise HTTPException(status_code=404, detail="Execution not found")
342
+
343
+ results = {
344
+ "turns_created": 0,
345
+ "tool_calls_created": 0,
346
+ "tasks_created": 0,
347
+ "errors": []
348
+ }
349
+
350
+ # Create turns
351
+ if batch_data.turns:
352
+ for turn in batch_data.turns:
353
+ try:
354
+ turn_record = {
355
+ "id": str(uuid_lib.uuid4()),
356
+ "organization_id": organization["id"],
357
+ "execution_id": batch_data.execution_id,
358
+ "turn_number": turn.turn_number,
359
+ "turn_id": turn.turn_id,
360
+ "model": turn.model,
361
+ "model_provider": turn.model_provider,
362
+ "started_at": turn.started_at,
363
+ "completed_at": turn.completed_at,
364
+ "duration_ms": turn.duration_ms,
365
+ "input_tokens": turn.input_tokens,
366
+ "output_tokens": turn.output_tokens,
367
+ "cache_read_tokens": turn.cache_read_tokens,
368
+ "cache_creation_tokens": turn.cache_creation_tokens,
369
+ "total_tokens": turn.total_tokens,
370
+ "input_cost": turn.input_cost,
371
+ "output_cost": turn.output_cost,
372
+ "cache_read_cost": turn.cache_read_cost,
373
+ "cache_creation_cost": turn.cache_creation_cost,
374
+ "total_cost": turn.total_cost,
375
+ "finish_reason": turn.finish_reason,
376
+ "response_preview": turn.response_preview[:500] if turn.response_preview else None,
377
+ "tools_called_count": turn.tools_called_count,
378
+ "tools_called_names": turn.tools_called_names,
379
+ "error_message": turn.error_message,
380
+ "metrics": turn.metrics,
381
+ }
382
+ client.table("execution_turns").insert(turn_record).execute()
383
+ results["turns_created"] += 1
384
+ except Exception as e:
385
+ results["errors"].append(f"Turn {turn.turn_number}: {str(e)}")
386
+
387
+ # Create tool calls
388
+ if batch_data.tool_calls:
389
+ for tool_call in batch_data.tool_calls:
390
+ try:
391
+ tool_output = tool_call.tool_output
392
+ tool_output_size = len(tool_output) if tool_output else 0
393
+ if tool_output and len(tool_output) > 10000:
394
+ tool_output = tool_output[:10000] + "... [truncated]"
395
+
396
+ tool_call_record = {
397
+ "id": str(uuid_lib.uuid4()),
398
+ "organization_id": organization["id"],
399
+ "execution_id": batch_data.execution_id,
400
+ "turn_id": tool_call.turn_id,
401
+ "tool_name": tool_call.tool_name,
402
+ "tool_use_id": tool_call.tool_use_id,
403
+ "started_at": tool_call.started_at,
404
+ "completed_at": tool_call.completed_at,
405
+ "duration_ms": tool_call.duration_ms,
406
+ "tool_input": tool_call.tool_input,
407
+ "tool_output": tool_output,
408
+ "tool_output_size": tool_output_size,
409
+ "success": tool_call.success,
410
+ "error_message": tool_call.error_message,
411
+ "error_type": tool_call.error_type,
412
+ "custom_metadata": tool_call.metadata,
413
+ }
414
+ client.table("execution_tool_calls").insert(tool_call_record).execute()
415
+ results["tool_calls_created"] += 1
416
+ except Exception as e:
417
+ results["errors"].append(f"Tool call {tool_call.tool_name}: {str(e)}")
418
+
419
+ # Create tasks
420
+ if batch_data.tasks:
421
+ for task in batch_data.tasks:
422
+ try:
423
+ task_record = {
424
+ "id": str(uuid_lib.uuid4()),
425
+ "organization_id": organization["id"],
426
+ "execution_id": batch_data.execution_id,
427
+ "task_number": task.task_number,
428
+ "task_id": task.task_id,
429
+ "task_description": task.task_description,
430
+ "task_type": task.task_type,
431
+ "status": task.status,
432
+ "started_at": task.started_at,
433
+ "completed_at": task.completed_at,
434
+ "duration_ms": task.duration_ms,
435
+ "result": task.result,
436
+ "error_message": task.error_message,
437
+ "custom_metadata": task.metadata,
438
+ }
439
+ client.table("execution_tasks").insert(task_record).execute()
440
+ results["tasks_created"] += 1
441
+ except Exception as e:
442
+ results["errors"].append(f"Task {task.task_description[:50]}: {str(e)}")
443
+
444
+ logger.info(
445
+ "batch_analytics_created",
446
+ execution_id=batch_data.execution_id,
447
+ turns_created=results["turns_created"],
448
+ tool_calls_created=results["tool_calls_created"],
449
+ tasks_created=results["tasks_created"],
450
+ errors=len(results["errors"]),
451
+ org_id=organization["id"]
452
+ )
453
+
454
+ return {
455
+ "success": len(results["errors"]) == 0,
456
+ "execution_id": batch_data.execution_id,
457
+ **results
458
+ }
459
+
460
+ except HTTPException:
461
+ raise
462
+ except Exception as e:
463
+ logger.error("batch_analytics_create_failed", error=str(e), execution_id=batch_data.execution_id)
464
+ raise HTTPException(status_code=500, detail=f"Failed to create batch analytics: {str(e)}")
465
+
466
+
467
+ @router.patch("/tasks/{task_id}", status_code=status.HTTP_200_OK)
468
+ async def update_task(
469
+ task_id: str,
470
+ task_update: TaskUpdate,
471
+ request: Request,
472
+ organization: dict = Depends(get_current_organization),
473
+ ):
474
+ """
475
+ Update a task's status and completion information.
476
+
477
+ This endpoint is called by workers to update task progress.
478
+ """
479
+ try:
480
+ client = get_supabase()
481
+
482
+ update_data = {}
483
+ if task_update.status is not None:
484
+ update_data["status"] = task_update.status
485
+ if task_update.completed_at is not None:
486
+ update_data["completed_at"] = task_update.completed_at
487
+ if task_update.duration_ms is not None:
488
+ update_data["duration_ms"] = task_update.duration_ms
489
+ if task_update.result is not None:
490
+ update_data["result"] = task_update.result
491
+ if task_update.error_message is not None:
492
+ update_data["error_message"] = task_update.error_message
493
+
494
+ update_data["updated_at"] = datetime.utcnow().isoformat()
495
+
496
+ result = client.table("execution_tasks").update(update_data).eq("id", task_id).eq("organization_id", organization["id"]).execute()
497
+
498
+ if not result.data:
499
+ raise HTTPException(status_code=404, detail="Task not found")
500
+
501
+ logger.info(
502
+ "task_updated",
503
+ task_id=task_id,
504
+ status=task_update.status,
505
+ org_id=organization["id"]
506
+ )
507
+
508
+ return {"success": True, "task_id": task_id}
509
+
510
+ except HTTPException:
511
+ raise
512
+ except Exception as e:
513
+ logger.error("task_update_failed", error=str(e), task_id=task_id)
514
+ raise HTTPException(status_code=500, detail=f"Failed to update task: {str(e)}")
515
+
516
+
517
+ # ============================================================================
518
+ # Reporting Endpoints (For Analytics Dashboard)
519
+ # ============================================================================
520
+
521
+ @router.get("/executions/{execution_id}/details")
522
+ async def get_execution_analytics(
523
+ execution_id: str,
524
+ request: Request,
525
+ organization: dict = Depends(get_current_organization),
526
+ ):
527
+ """
528
+ Get comprehensive analytics for a specific execution.
529
+
530
+ Returns:
531
+ - Execution summary
532
+ - Per-turn metrics
533
+ - Tool call details
534
+ - Task breakdown
535
+ - Total costs and token usage
536
+ """
537
+ try:
538
+ client = get_supabase()
539
+
540
+ # Get execution
541
+ exec_result = client.table("executions").select("*").eq("id", execution_id).eq("organization_id", organization["id"]).single().execute()
542
+ if not exec_result.data:
543
+ raise HTTPException(status_code=404, detail="Execution not found")
544
+
545
+ execution = exec_result.data
546
+
547
+ # Get turns
548
+ turns_result = client.table("execution_turns").select("*").eq("execution_id", execution_id).eq("organization_id", organization["id"]).order("turn_number").execute()
549
+ turns = turns_result.data if turns_result.data else []
550
+
551
+ # Get tool calls
552
+ tool_calls_result = client.table("execution_tool_calls").select("*").eq("execution_id", execution_id).eq("organization_id", organization["id"]).order("started_at").execute()
553
+ tool_calls = tool_calls_result.data if tool_calls_result.data else []
554
+
555
+ # Get tasks
556
+ tasks_result = client.table("execution_tasks").select("*").eq("execution_id", execution_id).eq("organization_id", organization["id"]).order("task_number").execute()
557
+ tasks = tasks_result.data if tasks_result.data else []
558
+
559
+ # Calculate aggregated metrics
560
+ total_turns = len(turns)
561
+ total_tokens = sum(turn.get("total_tokens", 0) for turn in turns)
562
+ total_cost = sum(turn.get("total_cost", 0.0) for turn in turns)
563
+ total_duration_ms = sum(turn.get("duration_ms", 0) or 0 for turn in turns)
564
+
565
+ total_tool_calls = len(tool_calls)
566
+ successful_tool_calls = sum(1 for tc in tool_calls if tc.get("success", False))
567
+ failed_tool_calls = total_tool_calls - successful_tool_calls
568
+
569
+ unique_tools_used = list(set(tc.get("tool_name") for tc in tool_calls))
570
+
571
+ # Task statistics
572
+ total_tasks = len(tasks)
573
+ completed_tasks = sum(1 for task in tasks if task.get("status") == "completed")
574
+ failed_tasks = sum(1 for task in tasks if task.get("status") == "failed")
575
+ pending_tasks = sum(1 for task in tasks if task.get("status") in ["pending", "in_progress"])
576
+
577
+ return {
578
+ "execution": execution,
579
+ "summary": {
580
+ "execution_id": execution_id,
581
+ "total_turns": total_turns,
582
+ "total_tokens": total_tokens,
583
+ "total_cost": total_cost,
584
+ "total_duration_ms": total_duration_ms,
585
+ "total_tool_calls": total_tool_calls,
586
+ "successful_tool_calls": successful_tool_calls,
587
+ "failed_tool_calls": failed_tool_calls,
588
+ "unique_tools_used": unique_tools_used,
589
+ "total_tasks": total_tasks,
590
+ "completed_tasks": completed_tasks,
591
+ "failed_tasks": failed_tasks,
592
+ "pending_tasks": pending_tasks,
593
+ },
594
+ "turns": turns,
595
+ "tool_calls": tool_calls,
596
+ "tasks": tasks,
597
+ }
598
+
599
+ except HTTPException:
600
+ raise
601
+ except Exception as e:
602
+ logger.error("get_execution_analytics_failed", error=str(e), execution_id=execution_id)
603
+ raise HTTPException(status_code=500, detail=f"Failed to get execution analytics: {str(e)}")
604
+
605
+
606
+ @router.get("/summary")
607
+ async def get_organization_analytics_summary(
608
+ request: Request,
609
+ organization: dict = Depends(get_current_organization),
610
+ days: int = Query(default=30, ge=1, le=365, description="Number of days to include in the summary"),
611
+ ):
612
+ """
613
+ Get aggregated analytics summary for the organization.
614
+
615
+ Returns high-level metrics over the specified time period:
616
+ - Total executions
617
+ - Total cost
618
+ - Total tokens used
619
+ - Model usage breakdown
620
+ - Tool usage statistics
621
+ - Success rates
622
+ """
623
+ try:
624
+ client = get_supabase()
625
+
626
+ # Calculate date range
627
+ end_date = datetime.utcnow()
628
+ start_date = end_date - timedelta(days=days)
629
+ start_date_iso = start_date.isoformat()
630
+
631
+ # Get executions in date range
632
+ executions_result = client.table("executions").select("id, status, created_at").eq("organization_id", organization["id"]).gte("created_at", start_date_iso).execute()
633
+ executions = executions_result.data if executions_result.data else []
634
+ execution_ids = [exec["id"] for exec in executions]
635
+
636
+ if not execution_ids:
637
+ return {
638
+ "period_days": days,
639
+ "start_date": start_date_iso,
640
+ "end_date": end_date.isoformat(),
641
+ "total_executions": 0,
642
+ "total_cost": 0.0,
643
+ "total_tokens": 0,
644
+ "total_turns": 0,
645
+ "total_tool_calls": 0,
646
+ "models_used": {},
647
+ "tools_used": {},
648
+ "success_rate": 0.0,
649
+ }
650
+
651
+ # Get all turns for these executions
652
+ turns_result = client.table("execution_turns").select("*").eq("organization_id", organization["id"]).gte("created_at", start_date_iso).execute()
653
+ turns = turns_result.data if turns_result.data else []
654
+
655
+ # Get all tool calls for these executions
656
+ tool_calls_result = client.table("execution_tool_calls").select("tool_name, success, duration_ms").eq("organization_id", organization["id"]).gte("created_at", start_date_iso).execute()
657
+ tool_calls = tool_calls_result.data if tool_calls_result.data else []
658
+
659
+ # Calculate aggregates
660
+ total_executions = len(executions)
661
+ successful_executions = sum(1 for exec in executions if exec.get("status") == "completed")
662
+ success_rate = (successful_executions / total_executions * 100) if total_executions > 0 else 0.0
663
+
664
+ total_turns = len(turns)
665
+ total_tokens = sum(turn.get("total_tokens", 0) for turn in turns)
666
+ total_cost = sum(turn.get("total_cost", 0.0) for turn in turns)
667
+
668
+ # Model usage breakdown
669
+ models_used = {}
670
+ for turn in turns:
671
+ model = turn.get("model", "unknown")
672
+ if model not in models_used:
673
+ models_used[model] = {
674
+ "count": 0,
675
+ "total_tokens": 0,
676
+ "total_cost": 0.0,
677
+ }
678
+ models_used[model]["count"] += 1
679
+ models_used[model]["total_tokens"] += turn.get("total_tokens", 0)
680
+ models_used[model]["total_cost"] += turn.get("total_cost", 0.0)
681
+
682
+ # Tool usage breakdown
683
+ tools_used = {}
684
+ total_tool_calls = len(tool_calls)
685
+ for tool_call in tool_calls:
686
+ tool_name = tool_call.get("tool_name", "unknown")
687
+ if tool_name not in tools_used:
688
+ tools_used[tool_name] = {
689
+ "count": 0,
690
+ "success_count": 0,
691
+ "fail_count": 0,
692
+ "avg_duration_ms": 0,
693
+ "total_duration_ms": 0,
694
+ }
695
+ tools_used[tool_name]["count"] += 1
696
+ if tool_call.get("success", False):
697
+ tools_used[tool_name]["success_count"] += 1
698
+ else:
699
+ tools_used[tool_name]["fail_count"] += 1
700
+
701
+ duration = tool_call.get("duration_ms", 0) or 0
702
+ tools_used[tool_name]["total_duration_ms"] += duration
703
+
704
+ # Calculate average durations
705
+ for tool_name, stats in tools_used.items():
706
+ if stats["count"] > 0:
707
+ stats["avg_duration_ms"] = stats["total_duration_ms"] / stats["count"]
708
+
709
+ return {
710
+ "period_days": days,
711
+ "start_date": start_date_iso,
712
+ "end_date": end_date.isoformat(),
713
+ "total_executions": total_executions,
714
+ "successful_executions": successful_executions,
715
+ "failed_executions": total_executions - successful_executions,
716
+ "success_rate": round(success_rate, 2),
717
+ "total_cost": round(total_cost, 4),
718
+ "total_tokens": total_tokens,
719
+ "total_turns": total_turns,
720
+ "total_tool_calls": total_tool_calls,
721
+ "models_used": models_used,
722
+ "tools_used": tools_used,
723
+ }
724
+
725
+ except HTTPException:
726
+ raise
727
+ except Exception as e:
728
+ logger.error("get_analytics_summary_failed", error=str(e), org_id=organization["id"])
729
+ raise HTTPException(status_code=500, detail=f"Failed to get analytics summary: {str(e)}")
730
+
731
+
732
+ @router.get("/costs")
733
+ async def get_cost_breakdown(
734
+ request: Request,
735
+ organization: dict = Depends(get_current_organization),
736
+ days: int = Query(default=30, ge=1, le=365, description="Number of days to include"),
737
+ group_by: str = Query(default="day", regex="^(day|week|month)$", description="Group costs by time period"),
738
+ ):
739
+ """
740
+ Get detailed cost breakdown over time.
741
+
742
+ Returns cost metrics grouped by the specified time period.
743
+ """
744
+ try:
745
+ client = get_supabase()
746
+
747
+ # Calculate date range
748
+ end_date = datetime.utcnow()
749
+ start_date = end_date - timedelta(days=days)
750
+ start_date_iso = start_date.isoformat()
751
+
752
+ # Get all turns in date range
753
+ turns_result = client.table("execution_turns").select("created_at, model, total_cost, total_tokens, input_tokens, output_tokens").eq("organization_id", organization["id"]).gte("created_at", start_date_iso).order("created_at").execute()
754
+ turns = turns_result.data if turns_result.data else []
755
+
756
+ # Group by time period
757
+ cost_by_period = {}
758
+ for turn in turns:
759
+ created_at = datetime.fromisoformat(turn["created_at"].replace("Z", "+00:00"))
760
+
761
+ # Determine period key
762
+ if group_by == "day":
763
+ period_key = created_at.strftime("%Y-%m-%d")
764
+ elif group_by == "week":
765
+ period_key = created_at.strftime("%Y-W%U")
766
+ else: # month
767
+ period_key = created_at.strftime("%Y-%m")
768
+
769
+ if period_key not in cost_by_period:
770
+ cost_by_period[period_key] = {
771
+ "period": period_key,
772
+ "total_cost": 0.0,
773
+ "total_tokens": 0,
774
+ "total_input_tokens": 0,
775
+ "total_output_tokens": 0,
776
+ "turn_count": 0,
777
+ "models": {},
778
+ }
779
+
780
+ cost_by_period[period_key]["total_cost"] += turn.get("total_cost", 0.0)
781
+ cost_by_period[period_key]["total_tokens"] += turn.get("total_tokens", 0)
782
+ cost_by_period[period_key]["total_input_tokens"] += turn.get("input_tokens", 0)
783
+ cost_by_period[period_key]["total_output_tokens"] += turn.get("output_tokens", 0)
784
+ cost_by_period[period_key]["turn_count"] += 1
785
+
786
+ # Track by model
787
+ model = turn.get("model", "unknown")
788
+ if model not in cost_by_period[period_key]["models"]:
789
+ cost_by_period[period_key]["models"][model] = {
790
+ "cost": 0.0,
791
+ "tokens": 0,
792
+ "turns": 0,
793
+ }
794
+ cost_by_period[period_key]["models"][model]["cost"] += turn.get("total_cost", 0.0)
795
+ cost_by_period[period_key]["models"][model]["tokens"] += turn.get("total_tokens", 0)
796
+ cost_by_period[period_key]["models"][model]["turns"] += 1
797
+
798
+ # Convert to list and sort
799
+ cost_breakdown = sorted(cost_by_period.values(), key=lambda x: x["period"])
800
+
801
+ # Calculate totals
802
+ total_cost = sum(period["total_cost"] for period in cost_breakdown)
803
+ total_tokens = sum(period["total_tokens"] for period in cost_breakdown)
804
+
805
+ return {
806
+ "period_days": days,
807
+ "group_by": group_by,
808
+ "start_date": start_date_iso,
809
+ "end_date": end_date.isoformat(),
810
+ "total_cost": round(total_cost, 4),
811
+ "total_tokens": total_tokens,
812
+ "breakdown": cost_breakdown,
813
+ }
814
+
815
+ except HTTPException:
816
+ raise
817
+ except Exception as e:
818
+ logger.error("get_cost_breakdown_failed", error=str(e), org_id=organization["id"])
819
+ raise HTTPException(status_code=500, detail=f"Failed to get cost breakdown: {str(e)}")
820
+
821
+
822
+ @router.get("/aem/summary")
823
+ async def get_aem_summary(
824
+ request: Request,
825
+ organization: dict = Depends(get_current_organization),
826
+ days: int = Query(default=30, ge=1, le=365, description="Number of days to include"),
827
+ ):
828
+ """
829
+ Get Agentic Engineering Minutes (AEM) summary.
830
+
831
+ Returns:
832
+ - Total AEM consumed
833
+ - Total AEM cost
834
+ - Breakdown by model tier (Opus, Sonnet, Haiku)
835
+ - Average runtime, model weight, tool complexity
836
+ """
837
+ try:
838
+ client = get_supabase()
839
+
840
+ # Calculate date range
841
+ end_date = datetime.utcnow()
842
+ start_date = end_date - timedelta(days=days)
843
+ start_date_iso = start_date.isoformat()
844
+
845
+ # Get all turns with AEM data
846
+ turns_result = client.table("execution_turns").select(
847
+ "runtime_minutes, model_weight, tool_calls_weight, aem_value, aem_cost, model, model_provider"
848
+ ).eq("organization_id", organization["id"]).gte("created_at", start_date_iso).execute()
849
+ turns = turns_result.data if turns_result.data else []
850
+
851
+ if not turns:
852
+ return {
853
+ "period_days": days,
854
+ "total_aem": 0.0,
855
+ "total_aem_cost": 0.0,
856
+ "total_runtime_minutes": 0.0,
857
+ "turn_count": 0,
858
+ "by_model_tier": {},
859
+ "average_model_weight": 0.0,
860
+ "average_tool_complexity": 0.0,
861
+ }
862
+
863
+ # Calculate totals
864
+ total_aem = sum(turn.get("aem_value", 0.0) for turn in turns)
865
+ total_aem_cost = sum(turn.get("aem_cost", 0.0) for turn in turns)
866
+ total_runtime_minutes = sum(turn.get("runtime_minutes", 0.0) for turn in turns)
867
+ total_model_weight = sum(turn.get("model_weight", 1.0) for turn in turns)
868
+ total_tool_weight = sum(turn.get("tool_calls_weight", 1.0) for turn in turns)
869
+
870
+ # Breakdown by model tier
871
+ by_tier = {}
872
+ for turn in turns:
873
+ weight = turn.get("model_weight", 1.0)
874
+
875
+ # Classify into tiers
876
+ if weight >= 1.5:
877
+ tier = "opus"
878
+ elif weight >= 0.8:
879
+ tier = "sonnet"
880
+ else:
881
+ tier = "haiku"
882
+
883
+ if tier not in by_tier:
884
+ by_tier[tier] = {
885
+ "tier": tier,
886
+ "turn_count": 0,
887
+ "total_aem": 0.0,
888
+ "total_aem_cost": 0.0,
889
+ "total_runtime_minutes": 0.0,
890
+ "models": set(),
891
+ }
892
+
893
+ by_tier[tier]["turn_count"] += 1
894
+ by_tier[tier]["total_aem"] += turn.get("aem_value", 0.0)
895
+ by_tier[tier]["total_aem_cost"] += turn.get("aem_cost", 0.0)
896
+ by_tier[tier]["total_runtime_minutes"] += turn.get("runtime_minutes", 0.0)
897
+ by_tier[tier]["models"].add(turn.get("model", "unknown"))
898
+
899
+ # Convert sets to lists for JSON serialization
900
+ for tier_data in by_tier.values():
901
+ tier_data["models"] = list(tier_data["models"])
902
+
903
+ return {
904
+ "period_days": days,
905
+ "start_date": start_date_iso,
906
+ "end_date": end_date.isoformat(),
907
+ "total_aem": round(total_aem, 2),
908
+ "total_aem_cost": round(total_aem_cost, 2),
909
+ "total_runtime_minutes": round(total_runtime_minutes, 2),
910
+ "turn_count": len(turns),
911
+ "average_aem_per_turn": round(total_aem / len(turns), 2) if turns else 0.0,
912
+ "average_model_weight": round(total_model_weight / len(turns), 2) if turns else 0.0,
913
+ "average_tool_complexity": round(total_tool_weight / len(turns), 2) if turns else 0.0,
914
+ "by_model_tier": by_tier,
915
+ }
916
+
917
+ except HTTPException:
918
+ raise
919
+ except Exception as e:
920
+ logger.error("get_aem_summary_failed", error=str(e), org_id=organization["id"])
921
+ raise HTTPException(status_code=500, detail=f"Failed to get AEM summary: {str(e)}")
922
+
923
+
924
+ @router.get("/aem/trends")
925
+ async def get_aem_trends(
926
+ request: Request,
927
+ organization: dict = Depends(get_current_organization),
928
+ days: int = Query(default=30, ge=1, le=365, description="Number of days to include"),
929
+ group_by: str = Query(default="day", regex="^(day|week|month)$", description="Group by time period"),
930
+ ):
931
+ """
932
+ Get AEM trends over time.
933
+
934
+ Returns AEM consumption grouped by time period for trend analysis.
935
+ """
936
+ try:
937
+ client = get_supabase()
938
+
939
+ # Calculate date range
940
+ end_date = datetime.utcnow()
941
+ start_date = end_date - timedelta(days=days)
942
+ start_date_iso = start_date.isoformat()
943
+
944
+ # Get all turns with AEM data
945
+ turns_result = client.table("execution_turns").select(
946
+ "created_at, runtime_minutes, model_weight, tool_calls_weight, aem_value, aem_cost, model"
947
+ ).eq("organization_id", organization["id"]).gte("created_at", start_date_iso).order("created_at").execute()
948
+ turns = turns_result.data if turns_result.data else []
949
+
950
+ # Group by time period
951
+ aem_by_period = {}
952
+ for turn in turns:
953
+ created_at = datetime.fromisoformat(turn["created_at"].replace("Z", "+00:00"))
954
+
955
+ # Determine period key
956
+ if group_by == "day":
957
+ period_key = created_at.strftime("%Y-%m-%d")
958
+ elif group_by == "week":
959
+ period_key = created_at.strftime("%Y-W%U")
960
+ else: # month
961
+ period_key = created_at.strftime("%Y-%m")
962
+
963
+ if period_key not in aem_by_period:
964
+ aem_by_period[period_key] = {
965
+ "period": period_key,
966
+ "total_aem": 0.0,
967
+ "total_aem_cost": 0.0,
968
+ "total_runtime_minutes": 0.0,
969
+ "turn_count": 0,
970
+ "average_model_weight": 0.0,
971
+ "average_tool_complexity": 0.0,
972
+ }
973
+
974
+ aem_by_period[period_key]["total_aem"] += turn.get("aem_value", 0.0)
975
+ aem_by_period[period_key]["total_aem_cost"] += turn.get("aem_cost", 0.0)
976
+ aem_by_period[period_key]["total_runtime_minutes"] += turn.get("runtime_minutes", 0.0)
977
+ aem_by_period[period_key]["turn_count"] += 1
978
+
979
+ # Calculate averages
980
+ for period_data in aem_by_period.values():
981
+ if period_data["turn_count"] > 0:
982
+ # Get turns for this period to calculate weighted averages
983
+ period_turns = [t for t in turns if datetime.fromisoformat(t["created_at"].replace("Z", "+00:00")).strftime(
984
+ "%Y-%m-%d" if group_by == "day" else "%Y-W%U" if group_by == "week" else "%Y-%m"
985
+ ) == period_data["period"]]
986
+
987
+ total_weight = sum(t.get("model_weight", 1.0) for t in period_turns)
988
+ total_tool_weight = sum(t.get("tool_calls_weight", 1.0) for t in period_turns)
989
+
990
+ period_data["average_model_weight"] = round(total_weight / len(period_turns), 2)
991
+ period_data["average_tool_complexity"] = round(total_tool_weight / len(period_turns), 2)
992
+
993
+ # Convert to list and sort
994
+ aem_trends = sorted(aem_by_period.values(), key=lambda x: x["period"])
995
+
996
+ # Calculate totals
997
+ total_aem = sum(period["total_aem"] for period in aem_trends)
998
+ total_aem_cost = sum(period["total_aem_cost"] for period in aem_trends)
999
+
1000
+ return {
1001
+ "period_days": days,
1002
+ "group_by": group_by,
1003
+ "start_date": start_date_iso,
1004
+ "end_date": end_date.isoformat(),
1005
+ "total_aem": round(total_aem, 2),
1006
+ "total_aem_cost": round(total_aem_cost, 2),
1007
+ "trends": aem_trends,
1008
+ }
1009
+
1010
+ except HTTPException:
1011
+ raise
1012
+ except Exception as e:
1013
+ logger.error("get_aem_trends_failed", error=str(e), org_id=organization["id"])
1014
+ raise HTTPException(status_code=500, detail=f"Failed to get AEM trends: {str(e)}")