synth-ai 0.2.9.dev2__py3-none-any.whl → 0.2.9.dev4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of synth-ai might be problematic. Click here for more details.
- examples/analyze_semantic_words.sh +17 -0
- examples/common_old/backend.py +21 -0
- examples/crafter_debug_render.py +180 -0
- examples/evals_old/README.md +98 -0
- examples/evals_old/__init__.py +6 -0
- examples/evals_old/compare_models.py +1037 -0
- examples/evals_old/example_log.md +145 -0
- examples/evals_old/run_demo.sh +126 -0
- examples/evals_old/trace_analysis.py +270 -0
- examples/finetuning_old/_backup_synth_qwen/config.toml +29 -0
- examples/finetuning_old/_backup_synth_qwen/example_log.md +324 -0
- examples/finetuning_old/_backup_synth_qwen/filter_traces.py +60 -0
- examples/finetuning_old/_backup_synth_qwen/filter_traces_achievements.py +239 -0
- examples/finetuning_old/_backup_synth_qwen/purge_v3_traces.py +109 -0
- examples/finetuning_old/_backup_synth_qwen/react_agent_lm.py +1924 -0
- examples/finetuning_old/_backup_synth_qwen/readme.md +49 -0
- examples/finetuning_old/_backup_synth_qwen/run_crafter_qwen4b.py +114 -0
- examples/finetuning_old/_backup_synth_qwen/run_demo.sh +195 -0
- examples/finetuning_old/_backup_synth_qwen/sft_kickoff.py +118 -0
- examples/finetuning_old/synth_qwen_v1/README.md +68 -0
- examples/finetuning_old/synth_qwen_v1/filter_traces.py +60 -0
- examples/finetuning_old/synth_qwen_v1/filter_traces_achievements.py +239 -0
- examples/finetuning_old/synth_qwen_v1/finetune.py +46 -0
- examples/finetuning_old/synth_qwen_v1/hello_ft_model.py +71 -0
- examples/finetuning_old/synth_qwen_v1/infer.py +37 -0
- examples/finetuning_old/synth_qwen_v1/poll.py +44 -0
- examples/finetuning_old/synth_qwen_v1/prepare_data.py +35 -0
- examples/finetuning_old/synth_qwen_v1/purge_v3_traces.py +109 -0
- examples/finetuning_old/synth_qwen_v1/react_agent_lm.py +1932 -0
- examples/finetuning_old/synth_qwen_v1/run_crafter_sft_job.py +207 -0
- examples/finetuning_old/synth_qwen_v1/run_ft_job.py +232 -0
- examples/finetuning_old/synth_qwen_v1/upload_data.py +34 -0
- examples/finetuning_old/synth_qwen_v1/util.py +147 -0
- examples/rl/README.md +169 -0
- examples/rl/configs/eval_base_qwen.toml +15 -0
- examples/rl/configs/eval_rl_qwen.toml +11 -0
- examples/rl/configs/rl_from_base_qwen.toml +35 -0
- examples/rl/configs/rl_from_base_qwen17.toml +74 -0
- examples/rl/configs/rl_from_ft_qwen.toml +35 -0
- examples/rl/download_dataset.py +64 -0
- examples/rl/run_eval.py +435 -0
- examples/rl/run_rl_and_save.py +94 -0
- examples/rl/task_app/README.md +22 -0
- {synth_ai/task/apps → examples/rl/task_app}/math_single_step.py +8 -8
- examples/rl/task_app/math_task_app.py +107 -0
- examples/rl_old/task_app.py +962 -0
- examples/run_crafter_demo.sh +10 -0
- examples/warming_up_to_rl/analyze_trace_db.py +420 -0
- examples/warming_up_to_rl/configs/crafter_fft.toml +48 -0
- examples/warming_up_to_rl/configs/crafter_fft_4b.toml +54 -0
- examples/warming_up_to_rl/configs/eval_fft_qwen4b.toml +20 -0
- examples/warming_up_to_rl/configs/eval_groq_qwen32b.toml +13 -0
- examples/warming_up_to_rl/configs/eval_modal_qwen4b.toml +23 -0
- examples/warming_up_to_rl/configs/rl_from_base_qwen4b.toml +73 -0
- examples/warming_up_to_rl/configs/rl_from_ft.toml +56 -0
- examples/warming_up_to_rl/export_trace_sft.py +541 -0
- examples/warming_up_to_rl/groq_test.py +88 -0
- examples/warming_up_to_rl/manage_secrets.py +127 -0
- examples/warming_up_to_rl/old/event_rewards.md +234 -0
- examples/warming_up_to_rl/old/notes.md +73 -0
- examples/warming_up_to_rl/readme.md +172 -0
- examples/warming_up_to_rl/run_eval.py +434 -0
- examples/warming_up_to_rl/run_fft_and_save.py +309 -0
- examples/warming_up_to_rl/run_local_rollout.py +188 -0
- examples/warming_up_to_rl/run_local_rollout_modal.py +160 -0
- examples/warming_up_to_rl/run_local_rollout_parallel.py +342 -0
- examples/warming_up_to_rl/run_local_rollout_traced.py +372 -0
- examples/warming_up_to_rl/run_rl_and_save.py +101 -0
- examples/warming_up_to_rl/run_rollout_remote.py +129 -0
- examples/warming_up_to_rl/task_app/README.md +38 -0
- {synth_ai/task/apps → examples/warming_up_to_rl/task_app}/grpo_crafter.py +7 -7
- examples/warming_up_to_rl/task_app/grpo_crafter_task_app.py +165 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/README.md +173 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/__init__.py +5 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/branching.py +145 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/environment_routes.py +1271 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/__init__.py +1 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/__init__.py +6 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/app.py +1 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/environment.py +429 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/policy.py +442 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/react_agent.py +96 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/shared.py +302 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/tools.py +47 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/hosted_app.py +202 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/inference/__init__.py +5 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/inference/openai_client.py +512 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/main.py +102 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/policy_routes.py +985 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/registry.py +197 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/rollout.py +1749 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/storage/__init__.py +5 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/storage/volume.py +217 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/test_agents.py +160 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/test_service.py +146 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/test_stepwise_rewards.py +58 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/utils.py +61 -0
- synth_ai/api/train/config_finder.py +18 -18
- synth_ai/api/train/env_resolver.py +28 -1
- synth_ai/cli/task_apps.py +264 -55
- synth_ai/demo_registry.py +7 -7
- synth_ai/demos/demo_task_apps/crafter/__init__.py +1 -0
- synth_ai/demos/demo_task_apps/crafter/configs/crafter_fft_4b.toml +54 -0
- synth_ai/demos/demo_task_apps/crafter/configs/rl_from_base_qwen4b.toml +73 -0
- synth_ai/demos/demo_task_apps/crafter/grpo_crafter_task_app.py +165 -0
- synth_ai/task/apps/__init__.py +54 -13
- {synth_ai-0.2.9.dev2.dist-info → synth_ai-0.2.9.dev4.dist-info}/METADATA +1 -1
- {synth_ai-0.2.9.dev2.dist-info → synth_ai-0.2.9.dev4.dist-info}/RECORD +112 -13
- {synth_ai-0.2.9.dev2.dist-info → synth_ai-0.2.9.dev4.dist-info}/top_level.txt +1 -0
- {synth_ai-0.2.9.dev2.dist-info → synth_ai-0.2.9.dev4.dist-info}/WHEEL +0 -0
- {synth_ai-0.2.9.dev2.dist-info → synth_ai-0.2.9.dev4.dist-info}/entry_points.txt +0 -0
- {synth_ai-0.2.9.dev2.dist-info → synth_ai-0.2.9.dev4.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,985 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
from datetime import datetime
|
|
5
|
+
from typing import Any, Dict, List, Optional
|
|
6
|
+
|
|
7
|
+
from fastapi import APIRouter, HTTPException, Request
|
|
8
|
+
from pydantic import BaseModel
|
|
9
|
+
|
|
10
|
+
from .envs.crafter.policy import CrafterPolicy
|
|
11
|
+
from .inference.openai_client import create_inference_client
|
|
12
|
+
from .registry import registry
|
|
13
|
+
from .storage.volume import storage
|
|
14
|
+
import os
|
|
15
|
+
from typing import Tuple
|
|
16
|
+
|
|
17
|
+
# Token budgeting (shared logic with inference server)
|
|
18
|
+
try:
|
|
19
|
+
from ..core.algorithms.gspo.inference.token_limits import (
|
|
20
|
+
clamp_effective_max_ctx,
|
|
21
|
+
)
|
|
22
|
+
except Exception: # pragma: no cover - defensive import path fallback
|
|
23
|
+
clamp_effective_max_ctx = None # type: ignore
|
|
24
|
+
|
|
25
|
+
try:
|
|
26
|
+
import tiktoken # type: ignore
|
|
27
|
+
except Exception: # pragma: no cover
|
|
28
|
+
tiktoken = None # type: ignore
|
|
29
|
+
|
|
30
|
+
logger = logging.getLogger(__name__)
|
|
31
|
+
|
|
32
|
+
router = APIRouter()
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
class PolicyCreateRequest(BaseModel):
|
|
36
|
+
policy_name: str
|
|
37
|
+
config: Dict[str, Any] = {}
|
|
38
|
+
parent_policy_id: Optional[str] = None
|
|
39
|
+
rl_run_id: str
|
|
40
|
+
bound_env_id: Optional[str] = None
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
class PolicyCreateResponse(BaseModel):
|
|
44
|
+
policy_id: str
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
class PolicyStepRequest(BaseModel):
|
|
48
|
+
policy_id: str
|
|
49
|
+
observation: Dict[str, Any]
|
|
50
|
+
state: Optional[Dict[str, Any]] = None
|
|
51
|
+
metadata: Optional[Dict[str, Any]] = None
|
|
52
|
+
dry_run: bool = False
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
class PolicyStepResponse(BaseModel):
|
|
56
|
+
tool_calls: List[Dict[str, Any]]
|
|
57
|
+
meta: Dict[str, Any]
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
class PolicySnapshotRequest(BaseModel):
|
|
61
|
+
policy_id: str
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
class PolicySnapshotResponse(BaseModel):
|
|
65
|
+
snapshot_id: str
|
|
66
|
+
path: str
|
|
67
|
+
rl_run_id: str
|
|
68
|
+
size: int
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
class PolicyRestoreRequest(BaseModel):
|
|
72
|
+
snapshot_id: str
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
class PolicyRestoreResponse(BaseModel):
|
|
76
|
+
policy_id: str
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
class PolicyTerminateRequest(BaseModel):
|
|
80
|
+
policy_id: str
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
class PolicyTerminateResponse(BaseModel):
|
|
84
|
+
ok: bool
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
@router.post("/create", response_model=PolicyCreateResponse)
|
|
88
|
+
async def create_policy(
|
|
89
|
+
request: PolicyCreateRequest,
|
|
90
|
+
req: Request,
|
|
91
|
+
) -> PolicyCreateResponse:
|
|
92
|
+
"""Create a new policy instance."""
|
|
93
|
+
try:
|
|
94
|
+
task_app = req.app.state.task_app
|
|
95
|
+
|
|
96
|
+
# Set defaults from TaskApp if not provided
|
|
97
|
+
config = request.config.copy()
|
|
98
|
+
if "inference_url" not in config:
|
|
99
|
+
config["inference_url"] = task_app.vllm_base_url
|
|
100
|
+
if "model" not in config and task_app.default_model:
|
|
101
|
+
config["model"] = task_app.default_model
|
|
102
|
+
|
|
103
|
+
# Create policy instance based on name
|
|
104
|
+
pname = request.policy_name.lower()
|
|
105
|
+
if pname in ["crafter-react", "crafter"]:
|
|
106
|
+
policy = CrafterPolicy(
|
|
107
|
+
inference_url=config["inference_url"],
|
|
108
|
+
model=config["model"],
|
|
109
|
+
)
|
|
110
|
+
await policy.initialize(config)
|
|
111
|
+
elif pname in ["wordle-react", "wordle"]:
|
|
112
|
+
try:
|
|
113
|
+
from .envs.wordle.policy import WordlePolicy as _WordlePolicy
|
|
114
|
+
except Exception as e:
|
|
115
|
+
raise HTTPException(status_code=500, detail=f"Wordle policy unavailable: {e}")
|
|
116
|
+
|
|
117
|
+
policy = _WordlePolicy(
|
|
118
|
+
inference_url=config["inference_url"],
|
|
119
|
+
model=config["model"],
|
|
120
|
+
word_length=int(config["word_length"]),
|
|
121
|
+
max_guesses=int(config["max_guesses"]),
|
|
122
|
+
)
|
|
123
|
+
await policy.initialize(config)
|
|
124
|
+
elif pname in ["sokoban-react", "sokoban"]:
|
|
125
|
+
try:
|
|
126
|
+
from .envs.sokoban.policy import SokobanPolicy as _SokobanPolicy
|
|
127
|
+
except Exception as e:
|
|
128
|
+
raise HTTPException(status_code=500, detail=f"Sokoban policy unavailable: {e}")
|
|
129
|
+
|
|
130
|
+
policy = _SokobanPolicy(
|
|
131
|
+
inference_url=config["inference_url"],
|
|
132
|
+
model=config["model"],
|
|
133
|
+
)
|
|
134
|
+
await policy.initialize(config)
|
|
135
|
+
elif pname in ["math-react", "math"]:
|
|
136
|
+
try:
|
|
137
|
+
from .envs.math.policy import MathPolicy as _MathPolicy
|
|
138
|
+
except Exception as e:
|
|
139
|
+
raise HTTPException(status_code=500, detail=f"Math policy unavailable: {e}")
|
|
140
|
+
|
|
141
|
+
policy = _MathPolicy(
|
|
142
|
+
inference_url=config["inference_url"],
|
|
143
|
+
model=config["model"],
|
|
144
|
+
)
|
|
145
|
+
await policy.initialize(config)
|
|
146
|
+
else:
|
|
147
|
+
raise HTTPException(
|
|
148
|
+
status_code=422,
|
|
149
|
+
detail=f"Unknown policy name: {request.policy_name}",
|
|
150
|
+
)
|
|
151
|
+
|
|
152
|
+
# Register in memory
|
|
153
|
+
policy_id = registry.register_policy(
|
|
154
|
+
policy=policy,
|
|
155
|
+
rl_run_id=request.rl_run_id,
|
|
156
|
+
bound_env_id=request.bound_env_id,
|
|
157
|
+
)
|
|
158
|
+
|
|
159
|
+
return PolicyCreateResponse(policy_id=policy_id)
|
|
160
|
+
|
|
161
|
+
except Exception as e:
|
|
162
|
+
logger.error(f"Failed to create policy: {e}")
|
|
163
|
+
raise HTTPException(status_code=500, detail=str(e))
|
|
164
|
+
|
|
165
|
+
|
|
166
|
+
@router.post("/step", response_model=PolicyStepResponse)
|
|
167
|
+
async def step_policy(
|
|
168
|
+
request: PolicyStepRequest,
|
|
169
|
+
req: Request,
|
|
170
|
+
) -> PolicyStepResponse:
|
|
171
|
+
"""Execute a policy step to generate actions."""
|
|
172
|
+
handle = registry.get_policy(request.policy_id)
|
|
173
|
+
if not handle:
|
|
174
|
+
raise HTTPException(
|
|
175
|
+
status_code=404, detail=f"Policy {request.policy_id} not found"
|
|
176
|
+
)
|
|
177
|
+
|
|
178
|
+
try:
|
|
179
|
+
task_app = req.app.state.task_app
|
|
180
|
+
policy = handle.policy
|
|
181
|
+
tracing_context = getattr(req.state, "rollout_tracing", None)
|
|
182
|
+
|
|
183
|
+
# Format observation text conditionally for each env
|
|
184
|
+
if isinstance(request.observation, dict):
|
|
185
|
+
if isinstance(policy, CrafterPolicy):
|
|
186
|
+
from .envs.crafter.shared import format_observation as format_crafter
|
|
187
|
+
|
|
188
|
+
obs_text = format_crafter(request.observation)
|
|
189
|
+
elif True:
|
|
190
|
+
try:
|
|
191
|
+
from .envs.wordle.policy import WordlePolicy as _WordlePolicy
|
|
192
|
+
except Exception:
|
|
193
|
+
_WordlePolicy = None # type: ignore
|
|
194
|
+
|
|
195
|
+
if _WordlePolicy is not None and isinstance(policy, _WordlePolicy):
|
|
196
|
+
from .envs.wordle.shared import format_observation_wordle
|
|
197
|
+
|
|
198
|
+
# ASSERTION: Validate observation structure
|
|
199
|
+
assert request.observation is not None, (
|
|
200
|
+
"request.observation cannot be None"
|
|
201
|
+
)
|
|
202
|
+
assert isinstance(request.observation, dict), (
|
|
203
|
+
f"request.observation must be dict, got {type(request.observation)}"
|
|
204
|
+
)
|
|
205
|
+
|
|
206
|
+
# Required keys for Wordle observation
|
|
207
|
+
required_keys = {
|
|
208
|
+
"text",
|
|
209
|
+
"status",
|
|
210
|
+
"remaining_guesses",
|
|
211
|
+
"guesses",
|
|
212
|
+
"feedback",
|
|
213
|
+
"reward_last",
|
|
214
|
+
"total_reward",
|
|
215
|
+
"terminated",
|
|
216
|
+
}
|
|
217
|
+
missing_keys = required_keys - set(request.observation.keys())
|
|
218
|
+
assert not missing_keys, (
|
|
219
|
+
f"Wordle observation missing required keys: {missing_keys}"
|
|
220
|
+
)
|
|
221
|
+
|
|
222
|
+
print("DEBUG POLICY_ROUTES: About to format Wordle observation")
|
|
223
|
+
print(
|
|
224
|
+
f"DEBUG POLICY_ROUTES: Observation type: {type(request.observation)}"
|
|
225
|
+
)
|
|
226
|
+
print(
|
|
227
|
+
f"DEBUG POLICY_ROUTES: Observation keys: {list(request.observation.keys())}"
|
|
228
|
+
)
|
|
229
|
+
feedback_val = request.observation["feedback"]
|
|
230
|
+
print(f"DEBUG POLICY_ROUTES: Observation feedback: {feedback_val}")
|
|
231
|
+
print(
|
|
232
|
+
f"DEBUG POLICY_ROUTES: Observation guesses: {request.observation['guesses']}"
|
|
233
|
+
)
|
|
234
|
+
print(
|
|
235
|
+
f"DEBUG POLICY_ROUTES: Observation text length: {len(request.observation['text'])}"
|
|
236
|
+
)
|
|
237
|
+
|
|
238
|
+
# ASSERTION: Validate feedback data
|
|
239
|
+
guesses = request.observation["guesses"]
|
|
240
|
+
feedback = request.observation["feedback"]
|
|
241
|
+
assert isinstance(guesses, list), (
|
|
242
|
+
f"guesses must be list, got {type(guesses)}"
|
|
243
|
+
)
|
|
244
|
+
assert isinstance(feedback, list), (
|
|
245
|
+
f"feedback must be list, got {type(feedback)}"
|
|
246
|
+
)
|
|
247
|
+
# Note: We don't assert equal lengths here since the environment is broken
|
|
248
|
+
|
|
249
|
+
obs_text = format_observation_wordle(request.observation)
|
|
250
|
+
|
|
251
|
+
# ASSERTION: Validate formatted output
|
|
252
|
+
assert isinstance(obs_text, str), (
|
|
253
|
+
f"obs_text must be string, got {type(obs_text)}"
|
|
254
|
+
)
|
|
255
|
+
assert len(obs_text) > 0, "obs_text cannot be empty"
|
|
256
|
+
assert "WORDLE" in obs_text, "obs_text must contain 'WORDLE' header"
|
|
257
|
+
assert "Respond with a single tool call" in obs_text, (
|
|
258
|
+
"obs_text must contain instruction text"
|
|
259
|
+
)
|
|
260
|
+
|
|
261
|
+
print(
|
|
262
|
+
f"DEBUG POLICY_ROUTES: Formatted obs_text length: {len(obs_text)}"
|
|
263
|
+
)
|
|
264
|
+
print(
|
|
265
|
+
f"DEBUG POLICY_ROUTES: Formatted obs_text contains 🟩: {'🟩' in obs_text}"
|
|
266
|
+
)
|
|
267
|
+
print(
|
|
268
|
+
f"DEBUG POLICY_ROUTES: Formatted obs_text contains 🟨: {'🟨' in obs_text}"
|
|
269
|
+
)
|
|
270
|
+
print(
|
|
271
|
+
f"DEBUG POLICY_ROUTES: Formatted obs_text contains ⬛: {'⬛' in obs_text}"
|
|
272
|
+
)
|
|
273
|
+
print(
|
|
274
|
+
f"DEBUG POLICY_ROUTES: Formatted obs_text first 200 chars: {obs_text[:200]}"
|
|
275
|
+
)
|
|
276
|
+
elif True:
|
|
277
|
+
try:
|
|
278
|
+
from .envs.sokoban.policy import SokobanPolicy as _SokobanPolicy
|
|
279
|
+
except Exception:
|
|
280
|
+
_SokobanPolicy = None # type: ignore
|
|
281
|
+
|
|
282
|
+
if _SokobanPolicy is not None and isinstance(policy, _SokobanPolicy):
|
|
283
|
+
from .envs.sokoban.shared import format_observation_sokoban
|
|
284
|
+
|
|
285
|
+
obs_text = format_observation_sokoban(request.observation)
|
|
286
|
+
elif True:
|
|
287
|
+
try:
|
|
288
|
+
from .envs.math.policy import MathPolicy as _MathPolicy
|
|
289
|
+
except Exception:
|
|
290
|
+
_MathPolicy = None # type: ignore
|
|
291
|
+
if _MathPolicy is not None and isinstance(policy, _MathPolicy):
|
|
292
|
+
# Simple extraction of problem text
|
|
293
|
+
try:
|
|
294
|
+
obs_text = str(request.observation.get("problem_text") or request.observation)
|
|
295
|
+
except Exception:
|
|
296
|
+
obs_text = str(request.observation)
|
|
297
|
+
else:
|
|
298
|
+
obs_text = str(request.observation)
|
|
299
|
+
else:
|
|
300
|
+
obs_text = request.observation
|
|
301
|
+
|
|
302
|
+
# Execute policy step to get inference request
|
|
303
|
+
tool_calls, meta = await policy.step(
|
|
304
|
+
observation_text=obs_text,
|
|
305
|
+
state=request.state,
|
|
306
|
+
metadata=request.metadata,
|
|
307
|
+
)
|
|
308
|
+
|
|
309
|
+
# If not dry run, perform inference
|
|
310
|
+
if not request.dry_run and "inference_request" in meta:
|
|
311
|
+
# CRITICAL: Validate that the inference request contains the correct prompts for the policy
|
|
312
|
+
inf_req = meta["inference_request"]
|
|
313
|
+
msgs = inf_req["messages"]
|
|
314
|
+
model_name = inf_req.get("model") or getattr(policy, "model", None) or ""
|
|
315
|
+
system_messages: List[str] = []
|
|
316
|
+
user_messages: List[str] = []
|
|
317
|
+
if msgs and len(msgs) > 0 and msgs[0]["role"] == "system":
|
|
318
|
+
sys_text = msgs[0]["content"]
|
|
319
|
+
policy_name = (
|
|
320
|
+
getattr(policy, "name", "") or type(policy).__name__.lower()
|
|
321
|
+
)
|
|
322
|
+
|
|
323
|
+
# Assert environment-specific prompts match the policy
|
|
324
|
+
if policy_name in ("wordle-react", "wordle"):
|
|
325
|
+
if "Wordle" not in sys_text:
|
|
326
|
+
raise ValueError(
|
|
327
|
+
f"PROMPT MISMATCH: Wordle policy {policy_name} received system prompt without 'Wordle' keyword: {sys_text[:200]}..."
|
|
328
|
+
)
|
|
329
|
+
if "Crafter" in sys_text:
|
|
330
|
+
raise ValueError(
|
|
331
|
+
f"PROMPT MISMATCH: Wordle policy {policy_name} received Crafter system prompt: {sys_text[:200]}..."
|
|
332
|
+
)
|
|
333
|
+
|
|
334
|
+
elif policy_name in ("crafter-react", "crafter") or isinstance(
|
|
335
|
+
policy, CrafterPolicy
|
|
336
|
+
):
|
|
337
|
+
if "Crafter" not in sys_text:
|
|
338
|
+
raise ValueError(
|
|
339
|
+
f"PROMPT MISMATCH: Crafter policy {policy_name} received system prompt without 'Crafter' keyword: {sys_text[:200]}..."
|
|
340
|
+
)
|
|
341
|
+
if "Wordle" in sys_text:
|
|
342
|
+
raise ValueError(
|
|
343
|
+
f"PROMPT MISMATCH: Crafter policy {policy_name} received Wordle system prompt: {sys_text[:200]}..."
|
|
344
|
+
)
|
|
345
|
+
|
|
346
|
+
elif policy_name in ("sokoban-react", "sokoban"):
|
|
347
|
+
if "Sokoban" not in sys_text:
|
|
348
|
+
raise ValueError(
|
|
349
|
+
f"PROMPT MISMATCH: Sokoban policy {policy_name} received system prompt without 'Sokoban' keyword: {sys_text[:200]}..."
|
|
350
|
+
)
|
|
351
|
+
if "Crafter" in sys_text or "Wordle" in sys_text:
|
|
352
|
+
raise ValueError(
|
|
353
|
+
f"PROMPT MISMATCH: Sokoban policy {policy_name} received wrong environment system prompt: {sys_text[:200]}..."
|
|
354
|
+
)
|
|
355
|
+
|
|
356
|
+
logger.info(
|
|
357
|
+
f"✅ PROMPT VALIDATION: {policy_name} policy has correct system prompt containing expected environment keywords"
|
|
358
|
+
)
|
|
359
|
+
else:
|
|
360
|
+
logger.warning(
|
|
361
|
+
f"⚠️ PROMPT VALIDATION: No system message found in inference request for policy {getattr(policy, 'name', type(policy).__name__)}"
|
|
362
|
+
)
|
|
363
|
+
|
|
364
|
+
# Emit full system/user prompts for observability (no secrets included)
|
|
365
|
+
try:
|
|
366
|
+
def _as_text(content: object) -> str:
|
|
367
|
+
if isinstance(content, str):
|
|
368
|
+
return content
|
|
369
|
+
if isinstance(content, list):
|
|
370
|
+
# Concatenate any dict segments that resemble OpenAI content parts
|
|
371
|
+
parts: list[str] = []
|
|
372
|
+
for seg in content:
|
|
373
|
+
try:
|
|
374
|
+
if isinstance(seg, dict):
|
|
375
|
+
txt = seg.get("text") or seg.get("content") or ""
|
|
376
|
+
if isinstance(txt, str):
|
|
377
|
+
parts.append(txt)
|
|
378
|
+
except Exception:
|
|
379
|
+
continue
|
|
380
|
+
return "".join(parts)
|
|
381
|
+
return str(content)
|
|
382
|
+
|
|
383
|
+
system_messages: list[str] = []
|
|
384
|
+
user_messages: list[str] = []
|
|
385
|
+
for message in msgs:
|
|
386
|
+
role = message.get("role")
|
|
387
|
+
content = _as_text(message.get("content"))
|
|
388
|
+
if role == "system":
|
|
389
|
+
system_messages.append(content)
|
|
390
|
+
elif role == "user":
|
|
391
|
+
user_messages.append(content)
|
|
392
|
+
|
|
393
|
+
if system_messages:
|
|
394
|
+
logger.info("PROMPT_DUMP_SYSTEM_BEGIN")
|
|
395
|
+
for idx, smsg in enumerate(system_messages):
|
|
396
|
+
logger.info(f"SYSTEM[{idx}]\n{smsg}")
|
|
397
|
+
logger.info("PROMPT_DUMP_SYSTEM_END")
|
|
398
|
+
|
|
399
|
+
if user_messages:
|
|
400
|
+
logger.info("PROMPT_DUMP_USER_BEGIN")
|
|
401
|
+
for idx, umsg in enumerate(user_messages):
|
|
402
|
+
logger.info(f"USER[{idx}]\n{umsg}")
|
|
403
|
+
logger.info("PROMPT_DUMP_USER_END")
|
|
404
|
+
# Print concise preview for visibility in standard logs
|
|
405
|
+
try:
|
|
406
|
+
last_user = user_messages[-1] if user_messages else ""
|
|
407
|
+
#preview = last_user[:400] if isinstance(last_user, str) else str(last_user)[:400]
|
|
408
|
+
print(f"[task:crafter] user prompt: {last_user}", flush=True)
|
|
409
|
+
except Exception:
|
|
410
|
+
pass
|
|
411
|
+
except Exception as e:
|
|
412
|
+
logger.warning(f"PROMPT_DUMP_FAILED: {e}")
|
|
413
|
+
|
|
414
|
+
if tracing_context is not None:
|
|
415
|
+
try:
|
|
416
|
+
await tracing_context.record_policy_prompts(system_messages, user_messages)
|
|
417
|
+
except Exception as exc:
|
|
418
|
+
logger.debug(f"TRACING_PROMPTS_FAIL: {exc}")
|
|
419
|
+
|
|
420
|
+
# Create inference client (choose API key by target provider)
|
|
421
|
+
# Require inference_url to be set explicitly by the rollout policy config.
|
|
422
|
+
target_url = (
|
|
423
|
+
meta.get("inference_url")
|
|
424
|
+
or getattr(policy, "inference_url", None)
|
|
425
|
+
or getattr(task_app, "vllm_base_url", None)
|
|
426
|
+
)
|
|
427
|
+
|
|
428
|
+
# Ensure meta carries the final target URL for downstream logging/clients
|
|
429
|
+
try:
|
|
430
|
+
meta["inference_url"] = target_url
|
|
431
|
+
except Exception:
|
|
432
|
+
pass
|
|
433
|
+
|
|
434
|
+
# Select API key based on resolved target URL
|
|
435
|
+
api_key_override = None
|
|
436
|
+
try:
|
|
437
|
+
import os as _os
|
|
438
|
+
if isinstance(target_url, str):
|
|
439
|
+
low_url = target_url.lower()
|
|
440
|
+
if "openai.com" in low_url:
|
|
441
|
+
api_key_override = _os.getenv("OPENAI_API_KEY") or getattr(task_app, "openai_api_key", None)
|
|
442
|
+
elif "groq.com" in low_url:
|
|
443
|
+
api_key_override = _os.getenv("GROQ_API_KEY")
|
|
444
|
+
else:
|
|
445
|
+
api_key_override = _os.getenv("SYNTH_API_KEY") or _os.getenv("OPENAI_API_KEY") or getattr(task_app, "openai_api_key", None)
|
|
446
|
+
else:
|
|
447
|
+
api_key_override = _os.getenv("SYNTH_API_KEY") or _os.getenv("OPENAI_API_KEY") or getattr(task_app, "openai_api_key", None)
|
|
448
|
+
except Exception:
|
|
449
|
+
api_key_override = None
|
|
450
|
+
|
|
451
|
+
if api_key_override:
|
|
452
|
+
try:
|
|
453
|
+
masked = f"{api_key_override[:6]}…{api_key_override[-4:]}"
|
|
454
|
+
except Exception:
|
|
455
|
+
masked = "<masked>"
|
|
456
|
+
logger.debug(f"INFERENCE_AUTH: Using bearer key {masked}")
|
|
457
|
+
else:
|
|
458
|
+
logger.warning("INFERENCE_AUTH: No API key resolved for inference request; downstream may 401")
|
|
459
|
+
|
|
460
|
+
client = create_inference_client(task_app, api_key=api_key_override)
|
|
461
|
+
|
|
462
|
+
# Add policy identification header for observability
|
|
463
|
+
policy_name = getattr(policy, "name", "") or type(policy).__name__.lower()
|
|
464
|
+
extra_headers = {"X-Policy-Name": policy_name}
|
|
465
|
+
|
|
466
|
+
# Apply input truncation to avoid 422 from inference server
|
|
467
|
+
try:
|
|
468
|
+
model_name = inf_req.get("model") or getattr(policy, "model", None) or ""
|
|
469
|
+
env_max_ctx = None
|
|
470
|
+
try:
|
|
471
|
+
_env_max = int(os.getenv("CHAT_MAX_MODEL_LEN", "0") or 0)
|
|
472
|
+
env_max_ctx = _env_max if _env_max > 0 else None
|
|
473
|
+
except Exception:
|
|
474
|
+
env_max_ctx = None
|
|
475
|
+
# Compute effective max context and safety margin
|
|
476
|
+
eff_ctx = None
|
|
477
|
+
if clamp_effective_max_ctx is not None:
|
|
478
|
+
eff_ctx = clamp_effective_max_ctx(
|
|
479
|
+
model_name=model_name,
|
|
480
|
+
configured_max_model_len=None,
|
|
481
|
+
env_max_model_len=env_max_ctx,
|
|
482
|
+
)
|
|
483
|
+
# Hard lower-only chat input cap if provided
|
|
484
|
+
try:
|
|
485
|
+
hard_input_cap = int(os.getenv("CHAT_MAX_INPUT_TOKENS", "0") or 0)
|
|
486
|
+
hard_input_cap = hard_input_cap if hard_input_cap > 0 else None
|
|
487
|
+
except Exception:
|
|
488
|
+
hard_input_cap = None
|
|
489
|
+
try:
|
|
490
|
+
safety_margin = int(os.getenv("CHAT_BUDGET_SAFETY", "64").strip() or 64)
|
|
491
|
+
except Exception:
|
|
492
|
+
safety_margin = 64
|
|
493
|
+
|
|
494
|
+
# Determine budget
|
|
495
|
+
budget = None
|
|
496
|
+
if isinstance(eff_ctx, int) and eff_ctx > 0:
|
|
497
|
+
budget = max(256, eff_ctx - safety_margin)
|
|
498
|
+
if isinstance(hard_input_cap, int) and hard_input_cap > 0:
|
|
499
|
+
budget = min(budget, hard_input_cap) if budget is not None else hard_input_cap
|
|
500
|
+
|
|
501
|
+
if budget is not None and budget > 0 and isinstance(msgs, list):
|
|
502
|
+
# Choose tokenizer
|
|
503
|
+
enc = None
|
|
504
|
+
if tiktoken is not None:
|
|
505
|
+
try:
|
|
506
|
+
if model_name:
|
|
507
|
+
enc = tiktoken.encoding_for_model(model_name)
|
|
508
|
+
else:
|
|
509
|
+
enc = tiktoken.get_encoding("cl100k_base")
|
|
510
|
+
except Exception:
|
|
511
|
+
try:
|
|
512
|
+
enc = tiktoken.get_encoding("cl100k_base")
|
|
513
|
+
except Exception:
|
|
514
|
+
enc = None
|
|
515
|
+
|
|
516
|
+
def _content_to_text(content: object) -> str:
|
|
517
|
+
if isinstance(content, str):
|
|
518
|
+
return content
|
|
519
|
+
if isinstance(content, list):
|
|
520
|
+
parts: list[str] = []
|
|
521
|
+
for seg in content:
|
|
522
|
+
try:
|
|
523
|
+
if isinstance(seg, dict):
|
|
524
|
+
txt = seg.get("text") or seg.get("content") or ""
|
|
525
|
+
if isinstance(txt, str):
|
|
526
|
+
parts.append(txt)
|
|
527
|
+
except Exception:
|
|
528
|
+
continue
|
|
529
|
+
return "".join(parts)
|
|
530
|
+
try:
|
|
531
|
+
return str(content)
|
|
532
|
+
except Exception:
|
|
533
|
+
return ""
|
|
534
|
+
|
|
535
|
+
def _count_tokens(text: str) -> int:
|
|
536
|
+
if enc is None:
|
|
537
|
+
# Fall back to character count heuristic (~4 chars per token)
|
|
538
|
+
try:
|
|
539
|
+
return max(1, int(len(text) / 4))
|
|
540
|
+
except Exception:
|
|
541
|
+
return len(text)
|
|
542
|
+
try:
|
|
543
|
+
return len(enc.encode(text))
|
|
544
|
+
except Exception:
|
|
545
|
+
return max(1, int(len(text) / 4))
|
|
546
|
+
|
|
547
|
+
def _count_messages_tokens(messages: List[Dict[str, Any]]) -> int:
|
|
548
|
+
total = 0
|
|
549
|
+
for m in messages:
|
|
550
|
+
total += _count_tokens(_content_to_text(m.get("content")))
|
|
551
|
+
return total
|
|
552
|
+
|
|
553
|
+
def _truncate_messages_to_budget(
|
|
554
|
+
messages: List[Dict[str, Any]],
|
|
555
|
+
max_tokens: int,
|
|
556
|
+
) -> Tuple[List[Dict[str, Any]], int, int, int]:
|
|
557
|
+
before = _count_messages_tokens(messages)
|
|
558
|
+
if before <= max_tokens:
|
|
559
|
+
return messages, before, before, len(messages)
|
|
560
|
+
# Always try to preserve the first system message if present
|
|
561
|
+
system_msg = None
|
|
562
|
+
start_idx = 0
|
|
563
|
+
if messages and messages[0].get("role") == "system":
|
|
564
|
+
system_msg = messages[0]
|
|
565
|
+
start_idx = 1
|
|
566
|
+
kept_rev: List[Dict[str, Any]] = []
|
|
567
|
+
total = _count_messages_tokens([system_msg] if system_msg else [])
|
|
568
|
+
# Walk from the end keeping most recent messages
|
|
569
|
+
for m in reversed(messages[start_idx:]):
|
|
570
|
+
t = _count_tokens(_content_to_text(m.get("content")))
|
|
571
|
+
if total + t <= max_tokens:
|
|
572
|
+
kept_rev.append(m)
|
|
573
|
+
total += t
|
|
574
|
+
else:
|
|
575
|
+
# Try to keep a truncated version of this message if we have some budget left
|
|
576
|
+
remaining = max_tokens - total
|
|
577
|
+
if remaining > 16: # keep at least a little context
|
|
578
|
+
txt = _content_to_text(m.get("content"))
|
|
579
|
+
# Binary search-ish trim by tokens
|
|
580
|
+
low, high = 0, len(txt)
|
|
581
|
+
best = None
|
|
582
|
+
while low <= high:
|
|
583
|
+
mid = (low + high) // 2
|
|
584
|
+
candidate = txt[-mid:]
|
|
585
|
+
if _count_tokens(candidate) <= remaining:
|
|
586
|
+
best = candidate
|
|
587
|
+
low = mid + 1
|
|
588
|
+
else:
|
|
589
|
+
high = mid - 1
|
|
590
|
+
if best is not None and best:
|
|
591
|
+
m2 = dict(m)
|
|
592
|
+
m2["content"] = best
|
|
593
|
+
kept_rev.append(m2)
|
|
594
|
+
total += _count_tokens(best)
|
|
595
|
+
break
|
|
596
|
+
kept = list(reversed(kept_rev))
|
|
597
|
+
if system_msg is not None:
|
|
598
|
+
kept = [system_msg] + kept
|
|
599
|
+
after = _count_messages_tokens(kept)
|
|
600
|
+
return kept, before, after, len(kept)
|
|
601
|
+
|
|
602
|
+
new_msgs, before_toks, after_toks, kept_count = _truncate_messages_to_budget(
|
|
603
|
+
msgs, int(budget)
|
|
604
|
+
)
|
|
605
|
+
if new_msgs is not msgs:
|
|
606
|
+
inf_req["messages"] = new_msgs
|
|
607
|
+
try:
|
|
608
|
+
logger.info(
|
|
609
|
+
{
|
|
610
|
+
"chat_truncated": True,
|
|
611
|
+
"token_budget": int(budget),
|
|
612
|
+
"before_tokens": int(before_toks),
|
|
613
|
+
"after_tokens": int(after_toks),
|
|
614
|
+
"kept_msgs": int(kept_count),
|
|
615
|
+
}
|
|
616
|
+
)
|
|
617
|
+
except Exception:
|
|
618
|
+
pass
|
|
619
|
+
except Exception as _trunc_e:
|
|
620
|
+
logger.warning(f"CHAT_TRUNCATION_FAILED: {type(_trunc_e).__name__}: {_trunc_e}")
|
|
621
|
+
|
|
622
|
+
# Formal assertion: If tools are expected, ensure tool_choice and tools are set
|
|
623
|
+
if policy_name in (
|
|
624
|
+
"wordle-react",
|
|
625
|
+
"sokoban-react",
|
|
626
|
+
"crafter-react",
|
|
627
|
+
) and getattr(policy, "use_tools", True):
|
|
628
|
+
req_tools = meta["inference_request"]["tools"]
|
|
629
|
+
req_tool_choice = meta["inference_request"]["tool_choice"]
|
|
630
|
+
req_stop_after = meta["inference_request"]["stop_after_tool_calls"]
|
|
631
|
+
logger.info(
|
|
632
|
+
f"TOOLCALL_CONFIG: policy={policy_name} tools_present={bool(req_tools)} tool_choice={req_tool_choice} stop_after={req_stop_after}"
|
|
633
|
+
)
|
|
634
|
+
if not req_tools or req_tool_choice != "required":
|
|
635
|
+
raise HTTPException(
|
|
636
|
+
status_code=500,
|
|
637
|
+
detail=f"TOOLCALL_ASSERTION_FAIL: Missing tools or tool_choice!=required for policy {policy_name}",
|
|
638
|
+
)
|
|
639
|
+
|
|
640
|
+
# Call inference service with retries for Flash cold-start (503)
|
|
641
|
+
import time as _t
|
|
642
|
+
|
|
643
|
+
# Prompt diagnostics before sending to inference: build chat template locally,
|
|
644
|
+
# count tokens, and log the first 10k tokens if oversized. Also stash a
|
|
645
|
+
# compact preview in meta so the trainer can surface it.
|
|
646
|
+
try:
|
|
647
|
+
req_for_diag = meta.get("inference_request", {})
|
|
648
|
+
model_for_diag = req_for_diag.get("model") or getattr(policy, "model", None) or ""
|
|
649
|
+
messages_for_diag = req_for_diag.get("messages") or []
|
|
650
|
+
if model_for_diag and messages_for_diag:
|
|
651
|
+
try:
|
|
652
|
+
from transformers import AutoTokenizer
|
|
653
|
+
tok = AutoTokenizer.from_pretrained(model_for_diag)
|
|
654
|
+
prompt_preview = tok.apply_chat_template(
|
|
655
|
+
messages_for_diag,
|
|
656
|
+
add_generation_prompt=True,
|
|
657
|
+
tokenize=False,
|
|
658
|
+
)
|
|
659
|
+
ids = tok.encode(prompt_preview, add_special_tokens=False)
|
|
660
|
+
max_len = getattr(tok, "model_max_length", None)
|
|
661
|
+
over_limit = False
|
|
662
|
+
try:
|
|
663
|
+
over_limit = isinstance(max_len, int) and max_len > 0 and len(ids) > int(max_len)
|
|
664
|
+
except Exception:
|
|
665
|
+
over_limit = False
|
|
666
|
+
if over_limit or len(ids) > 10000:
|
|
667
|
+
preview_ids = ids[:10000]
|
|
668
|
+
preview_text = tok.decode(preview_ids, skip_special_tokens=False)
|
|
669
|
+
try:
|
|
670
|
+
logger.warning(
|
|
671
|
+
{
|
|
672
|
+
"prompt_token_overflow_local": True,
|
|
673
|
+
"model": str(model_for_diag),
|
|
674
|
+
"token_count": int(len(ids)),
|
|
675
|
+
"model_max_length": int(max_len) if isinstance(max_len, int) else None,
|
|
676
|
+
"preview_tokens_logged": int(len(preview_ids)),
|
|
677
|
+
"prompt_preview_first_10k_tokens": preview_text,
|
|
678
|
+
}
|
|
679
|
+
)
|
|
680
|
+
except Exception:
|
|
681
|
+
pass
|
|
682
|
+
try:
|
|
683
|
+
meta["prompt_debug"] = {
|
|
684
|
+
"token_count": int(len(ids)),
|
|
685
|
+
"model_max_length": int(max_len) if isinstance(max_len, int) else None,
|
|
686
|
+
"preview_first_10k_tokens": preview_text,
|
|
687
|
+
}
|
|
688
|
+
except Exception:
|
|
689
|
+
pass
|
|
690
|
+
except Exception:
|
|
691
|
+
pass
|
|
692
|
+
except Exception:
|
|
693
|
+
pass
|
|
694
|
+
|
|
695
|
+
# Emit the exact prompt/messages and tools before calling the LLM (bounded preview)
|
|
696
|
+
try:
|
|
697
|
+
req_dump = meta.get("inference_request", {})
|
|
698
|
+
msgs = req_dump.get("messages")
|
|
699
|
+
tools_dump = req_dump.get("tools")
|
|
700
|
+
if isinstance(msgs, list):
|
|
701
|
+
# Print compact messages structure and tool schema with bounded length
|
|
702
|
+
import json as _json
|
|
703
|
+
msgs_compact = _json.dumps(msgs)[:20000]
|
|
704
|
+
tools_compact = _json.dumps(tools_dump)[:8000] if tools_dump is not None else None
|
|
705
|
+
print({
|
|
706
|
+
"llm.call": True,
|
|
707
|
+
"policy": str(policy_name),
|
|
708
|
+
"messages_preview": msgs_compact,
|
|
709
|
+
"tools_preview": tools_compact,
|
|
710
|
+
})
|
|
711
|
+
except Exception:
|
|
712
|
+
pass
|
|
713
|
+
|
|
714
|
+
# Normalize request for non-OpenAI endpoints (strict schemas)
|
|
715
|
+
try:
|
|
716
|
+
base = str(target_url or "")
|
|
717
|
+
is_openai_dotcom = "openai.com" in base.lower()
|
|
718
|
+
if not is_openai_dotcom:
|
|
719
|
+
req_body = meta.get("inference_request", {})
|
|
720
|
+
if isinstance(req_body, dict):
|
|
721
|
+
# Force structured tool_choice if a bare "required" is present
|
|
722
|
+
if req_body.get("tool_choice") == "required":
|
|
723
|
+
func_name = "interact_many"
|
|
724
|
+
try:
|
|
725
|
+
tools_arr = req_body.get("tools") or []
|
|
726
|
+
if isinstance(tools_arr, list) and tools_arr:
|
|
727
|
+
f = tools_arr[0].get("function") if isinstance(tools_arr[0], dict) else None
|
|
728
|
+
cand = (f or {}).get("name") if isinstance(f, dict) else None
|
|
729
|
+
if isinstance(cand, str) and cand:
|
|
730
|
+
func_name = cand
|
|
731
|
+
except Exception:
|
|
732
|
+
pass
|
|
733
|
+
req_body["tool_choice"] = {"type": "function", "function": {"name": func_name}}
|
|
734
|
+
req_body["parallel_tool_calls"] = False
|
|
735
|
+
req_body.setdefault("function_call", {"name": func_name})
|
|
736
|
+
# Inject extra_body for thinking controls expected by Modal service
|
|
737
|
+
try:
|
|
738
|
+
tb = req_body.get("thinking_budget")
|
|
739
|
+
tm = str(req_body.get("thinking_mode") or "").lower()
|
|
740
|
+
enable_thinking = bool(tb) or tm == "think"
|
|
741
|
+
extra = dict(req_body.get("extra_body") or {})
|
|
742
|
+
chat_kwargs = dict(extra.get("chat_template_kwargs") or {})
|
|
743
|
+
if enable_thinking:
|
|
744
|
+
chat_kwargs["enable_thinking"] = True
|
|
745
|
+
if isinstance(tb, (int, float, str)) and str(tb).strip():
|
|
746
|
+
try:
|
|
747
|
+
chat_kwargs["thinking_budget"] = int(tb)
|
|
748
|
+
except Exception:
|
|
749
|
+
pass
|
|
750
|
+
if chat_kwargs:
|
|
751
|
+
extra["chat_template_kwargs"] = chat_kwargs
|
|
752
|
+
# Ensure stop_after_tool_calls honored via extra_body for stricter servers
|
|
753
|
+
extra.setdefault("stop_after_tool_calls", 1)
|
|
754
|
+
if extra:
|
|
755
|
+
req_body["extra_body"] = extra
|
|
756
|
+
except Exception:
|
|
757
|
+
pass
|
|
758
|
+
# Provide a conservative default temperature if missing
|
|
759
|
+
if "temperature" not in req_body:
|
|
760
|
+
req_body["temperature"] = 0.1
|
|
761
|
+
meta["inference_request"] = req_body
|
|
762
|
+
except Exception:
|
|
763
|
+
pass
|
|
764
|
+
|
|
765
|
+
_t_start = _t.time()
|
|
766
|
+
call_started_at = datetime.utcnow()
|
|
767
|
+
inference_response = await client.generate_with_retries(
|
|
768
|
+
request=meta["inference_request"],
|
|
769
|
+
base_url=meta["inference_url"],
|
|
770
|
+
max_retries=12,
|
|
771
|
+
backoff_factor=2.0,
|
|
772
|
+
extra_headers=extra_headers,
|
|
773
|
+
)
|
|
774
|
+
meta["inference_ms"] = int((_t.time() - _t_start) * 1000)
|
|
775
|
+
call_completed_at = datetime.utcnow()
|
|
776
|
+
|
|
777
|
+
provider_url = str(meta.get("inference_url") or "")
|
|
778
|
+
low_url = provider_url.lower()
|
|
779
|
+
if "groq" in low_url:
|
|
780
|
+
provider_name = "groq"
|
|
781
|
+
elif "openai" in low_url:
|
|
782
|
+
provider_name = "openai"
|
|
783
|
+
else:
|
|
784
|
+
provider_name = "custom"
|
|
785
|
+
|
|
786
|
+
# Parse response to tool calls
|
|
787
|
+
tool_calls = policy.parse_response_to_tool_calls(
|
|
788
|
+
response=inference_response,
|
|
789
|
+
use_tools=getattr(policy, "use_tools", True),
|
|
790
|
+
)
|
|
791
|
+
|
|
792
|
+
# Debug logging (echo tool calls)
|
|
793
|
+
if not tool_calls:
|
|
794
|
+
# Structured error log with small preview; avoid dumping full response repeatedly
|
|
795
|
+
preview = str(inference_response)[:400]
|
|
796
|
+
logger.error(
|
|
797
|
+
f"TOOLCALL_PARSE_FAIL: policy={policy_name} parsed=0 preview={preview}"
|
|
798
|
+
)
|
|
799
|
+
else:
|
|
800
|
+
try:
|
|
801
|
+
import json as _json
|
|
802
|
+
print({
|
|
803
|
+
"tool_calls_parsed": int(len(tool_calls)),
|
|
804
|
+
"tool_calls_preview": _json.dumps(tool_calls)[:20000],
|
|
805
|
+
})
|
|
806
|
+
except Exception:
|
|
807
|
+
logger.info(f"Parsed {len(tool_calls)} tool calls: {tool_calls}")
|
|
808
|
+
|
|
809
|
+
# Add response to metadata
|
|
810
|
+
# Parse tool calls from model response using policy-specific parser
|
|
811
|
+
try:
|
|
812
|
+
if hasattr(policy, "parse_response_to_tool_calls"):
|
|
813
|
+
parsed = policy.parse_response_to_tool_calls(
|
|
814
|
+
inference_response, getattr(policy, "use_tools", True)
|
|
815
|
+
)
|
|
816
|
+
else:
|
|
817
|
+
parsed = policy.parse_model_response(
|
|
818
|
+
inference_response, request.observation
|
|
819
|
+
)
|
|
820
|
+
# Replace tool_calls with parsed result
|
|
821
|
+
if isinstance(parsed, list):
|
|
822
|
+
tool_calls = parsed
|
|
823
|
+
try:
|
|
824
|
+
logger.info(
|
|
825
|
+
"TOOLCALL_PARSE: parsed=%d has_tools=%s example=%r",
|
|
826
|
+
len(tool_calls) if isinstance(tool_calls, list) else -1,
|
|
827
|
+
bool(getattr(policy, "use_tools", True)),
|
|
828
|
+
(tool_calls[0] if isinstance(tool_calls, list) and tool_calls else None),
|
|
829
|
+
)
|
|
830
|
+
except Exception:
|
|
831
|
+
pass
|
|
832
|
+
except Exception as _pe:
|
|
833
|
+
logger.warning(f"Failed to parse tool calls: {str(_pe)}")
|
|
834
|
+
# Attach raw response + usage for observability
|
|
835
|
+
meta["raw_response"] = inference_response
|
|
836
|
+
if "usage" in inference_response:
|
|
837
|
+
meta["usage"] = inference_response["usage"]
|
|
838
|
+
|
|
839
|
+
if tracing_context is not None:
|
|
840
|
+
try:
|
|
841
|
+
await tracing_context.record_llm_call(
|
|
842
|
+
inference_request=meta["inference_request"],
|
|
843
|
+
inference_response=inference_response,
|
|
844
|
+
tool_calls=tool_calls,
|
|
845
|
+
provider=provider_name,
|
|
846
|
+
model_name=model_name,
|
|
847
|
+
started_at=call_started_at,
|
|
848
|
+
completed_at=call_completed_at,
|
|
849
|
+
latency_ms=meta.get("inference_ms"),
|
|
850
|
+
)
|
|
851
|
+
except Exception as exc:
|
|
852
|
+
logger.debug(f"TRACING_LLM_FAIL: {exc}")
|
|
853
|
+
|
|
854
|
+
return PolicyStepResponse(
|
|
855
|
+
tool_calls=tool_calls,
|
|
856
|
+
meta=meta,
|
|
857
|
+
)
|
|
858
|
+
|
|
859
|
+
except Exception as e:
|
|
860
|
+
logger.error(f"Failed to step policy {request.policy_id}: {e}")
|
|
861
|
+
raise HTTPException(status_code=500, detail=str(e))
|
|
862
|
+
|
|
863
|
+
|
|
864
|
+
@router.post("/snapshot", response_model=PolicySnapshotResponse)
|
|
865
|
+
async def snapshot_policy(request: PolicySnapshotRequest) -> PolicySnapshotResponse:
|
|
866
|
+
"""Create a snapshot of the policy state."""
|
|
867
|
+
handle = registry.get_policy(request.policy_id)
|
|
868
|
+
if not handle:
|
|
869
|
+
raise HTTPException(
|
|
870
|
+
status_code=404, detail=f"Policy {request.policy_id} not found"
|
|
871
|
+
)
|
|
872
|
+
|
|
873
|
+
try:
|
|
874
|
+
# Serialize policy state
|
|
875
|
+
state_dict = await handle.policy.serialize()
|
|
876
|
+
|
|
877
|
+
# Save to volume
|
|
878
|
+
snapshot_id, path, size = storage.save_snapshot(
|
|
879
|
+
rl_run_id=handle.rl_run_id,
|
|
880
|
+
kind="policy",
|
|
881
|
+
state_dict=state_dict,
|
|
882
|
+
)
|
|
883
|
+
|
|
884
|
+
# Register snapshot
|
|
885
|
+
registry.register_snapshot(
|
|
886
|
+
kind="policy",
|
|
887
|
+
rl_run_id=handle.rl_run_id,
|
|
888
|
+
size=size,
|
|
889
|
+
path=path,
|
|
890
|
+
)
|
|
891
|
+
|
|
892
|
+
return PolicySnapshotResponse(
|
|
893
|
+
snapshot_id=snapshot_id,
|
|
894
|
+
path=path,
|
|
895
|
+
rl_run_id=handle.rl_run_id,
|
|
896
|
+
size=size,
|
|
897
|
+
)
|
|
898
|
+
|
|
899
|
+
except Exception as e:
|
|
900
|
+
logger.error(f"Failed to snapshot policy {request.policy_id}: {e}")
|
|
901
|
+
raise HTTPException(status_code=500, detail=str(e))
|
|
902
|
+
|
|
903
|
+
|
|
904
|
+
@router.post("/restore", response_model=PolicyRestoreResponse)
|
|
905
|
+
async def restore_policy(request: PolicyRestoreRequest) -> PolicyRestoreResponse:
|
|
906
|
+
"""Restore a policy from a snapshot."""
|
|
907
|
+
snapshot = registry.get_snapshot(request.snapshot_id)
|
|
908
|
+
if not snapshot:
|
|
909
|
+
raise HTTPException(
|
|
910
|
+
status_code=404, detail=f"Snapshot {request.snapshot_id} not found"
|
|
911
|
+
)
|
|
912
|
+
|
|
913
|
+
if snapshot.kind != "policy":
|
|
914
|
+
raise HTTPException(
|
|
915
|
+
status_code=422,
|
|
916
|
+
detail=f"Snapshot {request.snapshot_id} is not a policy snapshot",
|
|
917
|
+
)
|
|
918
|
+
|
|
919
|
+
try:
|
|
920
|
+
# Load snapshot from volume
|
|
921
|
+
state_dict, meta = storage.load_snapshot(
|
|
922
|
+
rl_run_id=snapshot.rl_run_id,
|
|
923
|
+
kind="policy",
|
|
924
|
+
snapshot_id=request.snapshot_id,
|
|
925
|
+
)
|
|
926
|
+
|
|
927
|
+
# Recreate policy
|
|
928
|
+
policy_name = state_dict["name"]
|
|
929
|
+
low = policy_name.lower()
|
|
930
|
+
if low in ["crafter-react", "crafter"]:
|
|
931
|
+
policy = await CrafterPolicy.deserialize(state_dict)
|
|
932
|
+
elif low in ["wordle-react", "wordle"]:
|
|
933
|
+
try:
|
|
934
|
+
from .envs.wordle.policy import WordlePolicy as _WordlePolicy
|
|
935
|
+
except Exception as e:
|
|
936
|
+
raise HTTPException(status_code=500, detail=f"Wordle policy unavailable: {e}")
|
|
937
|
+
policy = await _WordlePolicy.deserialize(state_dict)
|
|
938
|
+
elif low in ["sokoban-react", "sokoban"]:
|
|
939
|
+
try:
|
|
940
|
+
from .envs.sokoban.policy import SokobanPolicy as _SokobanPolicy
|
|
941
|
+
except Exception as e:
|
|
942
|
+
raise HTTPException(status_code=500, detail=f"Sokoban policy unavailable: {e}")
|
|
943
|
+
policy = await _SokobanPolicy.deserialize(state_dict)
|
|
944
|
+
else:
|
|
945
|
+
raise HTTPException(
|
|
946
|
+
status_code=422,
|
|
947
|
+
detail=f"Unknown policy name in snapshot: {policy_name}",
|
|
948
|
+
)
|
|
949
|
+
|
|
950
|
+
# Register new instance
|
|
951
|
+
policy_id = registry.register_policy(
|
|
952
|
+
policy=policy,
|
|
953
|
+
rl_run_id=snapshot.rl_run_id,
|
|
954
|
+
)
|
|
955
|
+
|
|
956
|
+
return PolicyRestoreResponse(policy_id=policy_id)
|
|
957
|
+
|
|
958
|
+
except Exception as e:
|
|
959
|
+
logger.error(
|
|
960
|
+
f"Failed to restore policy from snapshot {request.snapshot_id}: {e}"
|
|
961
|
+
)
|
|
962
|
+
raise HTTPException(status_code=500, detail=str(e))
|
|
963
|
+
|
|
964
|
+
|
|
965
|
+
@router.post("/terminate", response_model=PolicyTerminateResponse)
|
|
966
|
+
async def terminate_policy(request: PolicyTerminateRequest) -> PolicyTerminateResponse:
|
|
967
|
+
"""Terminate a policy and clean up resources."""
|
|
968
|
+
handle = registry.get_policy(request.policy_id)
|
|
969
|
+
if not handle:
|
|
970
|
+
raise HTTPException(
|
|
971
|
+
status_code=404, detail=f"Policy {request.policy_id} not found"
|
|
972
|
+
)
|
|
973
|
+
|
|
974
|
+
try:
|
|
975
|
+
# Call terminate on the policy
|
|
976
|
+
await handle.policy.terminate()
|
|
977
|
+
|
|
978
|
+
# Remove from registry
|
|
979
|
+
registry.remove_policy(request.policy_id)
|
|
980
|
+
|
|
981
|
+
return PolicyTerminateResponse(ok=True)
|
|
982
|
+
|
|
983
|
+
except Exception as e:
|
|
984
|
+
logger.error(f"Failed to terminate policy {request.policy_id}: {e}")
|
|
985
|
+
raise HTTPException(status_code=500, detail=str(e))
|