tactus 0.31.0__py3-none-any.whl → 0.34.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tactus/__init__.py +1 -1
- tactus/adapters/__init__.py +18 -1
- tactus/adapters/broker_log.py +127 -34
- tactus/adapters/channels/__init__.py +153 -0
- tactus/adapters/channels/base.py +174 -0
- tactus/adapters/channels/broker.py +179 -0
- tactus/adapters/channels/cli.py +448 -0
- tactus/adapters/channels/host.py +225 -0
- tactus/adapters/channels/ipc.py +297 -0
- tactus/adapters/channels/sse.py +305 -0
- tactus/adapters/cli_hitl.py +223 -1
- tactus/adapters/control_loop.py +879 -0
- tactus/adapters/file_storage.py +35 -2
- tactus/adapters/ide_log.py +7 -1
- tactus/backends/http_backend.py +0 -1
- tactus/broker/client.py +31 -1
- tactus/broker/server.py +416 -92
- tactus/cli/app.py +270 -7
- tactus/cli/control.py +393 -0
- tactus/core/config_manager.py +33 -6
- tactus/core/dsl_stubs.py +102 -18
- tactus/core/execution_context.py +265 -8
- tactus/core/lua_sandbox.py +8 -9
- tactus/core/registry.py +19 -2
- tactus/core/runtime.py +235 -27
- tactus/docker/Dockerfile.pypi +49 -0
- tactus/docs/__init__.py +33 -0
- tactus/docs/extractor.py +326 -0
- tactus/docs/html_renderer.py +72 -0
- tactus/docs/models.py +121 -0
- tactus/docs/templates/base.html +204 -0
- tactus/docs/templates/index.html +58 -0
- tactus/docs/templates/module.html +96 -0
- tactus/dspy/agent.py +403 -22
- tactus/dspy/broker_lm.py +57 -6
- tactus/dspy/config.py +14 -3
- tactus/dspy/history.py +2 -1
- tactus/dspy/module.py +136 -11
- tactus/dspy/signature.py +0 -1
- tactus/ide/config_server.py +536 -0
- tactus/ide/server.py +345 -21
- tactus/primitives/human.py +619 -47
- tactus/primitives/system.py +0 -1
- tactus/protocols/__init__.py +25 -0
- tactus/protocols/control.py +427 -0
- tactus/protocols/notification.py +207 -0
- tactus/sandbox/container_runner.py +79 -11
- tactus/sandbox/docker_manager.py +23 -0
- tactus/sandbox/entrypoint.py +26 -0
- tactus/sandbox/protocol.py +3 -0
- tactus/stdlib/README.md +77 -0
- tactus/stdlib/__init__.py +27 -1
- tactus/stdlib/classify/__init__.py +165 -0
- tactus/stdlib/classify/classify.spec.tac +195 -0
- tactus/stdlib/classify/classify.tac +257 -0
- tactus/stdlib/classify/fuzzy.py +282 -0
- tactus/stdlib/classify/llm.py +319 -0
- tactus/stdlib/classify/primitive.py +287 -0
- tactus/stdlib/core/__init__.py +57 -0
- tactus/stdlib/core/base.py +320 -0
- tactus/stdlib/core/confidence.py +211 -0
- tactus/stdlib/core/models.py +161 -0
- tactus/stdlib/core/retry.py +171 -0
- tactus/stdlib/core/validation.py +274 -0
- tactus/stdlib/extract/__init__.py +125 -0
- tactus/stdlib/extract/llm.py +330 -0
- tactus/stdlib/extract/primitive.py +256 -0
- tactus/stdlib/tac/tactus/classify/base.tac +51 -0
- tactus/stdlib/tac/tactus/classify/fuzzy.tac +87 -0
- tactus/stdlib/tac/tactus/classify/index.md +77 -0
- tactus/stdlib/tac/tactus/classify/init.tac +29 -0
- tactus/stdlib/tac/tactus/classify/llm.tac +150 -0
- tactus/stdlib/tac/tactus/classify.spec.tac +191 -0
- tactus/stdlib/tac/tactus/extract/base.tac +138 -0
- tactus/stdlib/tac/tactus/extract/index.md +96 -0
- tactus/stdlib/tac/tactus/extract/init.tac +27 -0
- tactus/stdlib/tac/tactus/extract/llm.tac +201 -0
- tactus/stdlib/tac/tactus/extract.spec.tac +153 -0
- tactus/stdlib/tac/tactus/generate/base.tac +142 -0
- tactus/stdlib/tac/tactus/generate/index.md +195 -0
- tactus/stdlib/tac/tactus/generate/init.tac +28 -0
- tactus/stdlib/tac/tactus/generate/llm.tac +169 -0
- tactus/stdlib/tac/tactus/generate.spec.tac +210 -0
- tactus/testing/behave_integration.py +171 -7
- tactus/testing/context.py +0 -1
- tactus/testing/evaluation_runner.py +0 -1
- tactus/testing/gherkin_parser.py +0 -1
- tactus/testing/mock_hitl.py +0 -1
- tactus/testing/mock_tools.py +0 -1
- tactus/testing/models.py +0 -1
- tactus/testing/steps/builtin.py +0 -1
- tactus/testing/steps/custom.py +81 -22
- tactus/testing/steps/registry.py +0 -1
- tactus/testing/test_runner.py +7 -1
- tactus/validation/semantic_visitor.py +11 -5
- tactus/validation/validator.py +0 -1
- {tactus-0.31.0.dist-info → tactus-0.34.1.dist-info}/METADATA +16 -2
- {tactus-0.31.0.dist-info → tactus-0.34.1.dist-info}/RECORD +101 -49
- {tactus-0.31.0.dist-info → tactus-0.34.1.dist-info}/WHEEL +0 -0
- {tactus-0.31.0.dist-info → tactus-0.34.1.dist-info}/entry_points.txt +0 -0
- {tactus-0.31.0.dist-info → tactus-0.34.1.dist-info}/licenses/LICENSE +0 -0
tactus/dspy/broker_lm.py
CHANGED
|
@@ -10,6 +10,7 @@ while still supporting streaming via DSPy's `streamify()` mechanism.
|
|
|
10
10
|
|
|
11
11
|
from __future__ import annotations
|
|
12
12
|
|
|
13
|
+
import logging
|
|
13
14
|
from typing import Any
|
|
14
15
|
|
|
15
16
|
import dspy
|
|
@@ -19,6 +20,8 @@ from litellm import ModelResponse, ModelResponseStream
|
|
|
19
20
|
|
|
20
21
|
from tactus.broker.client import BrokerClient
|
|
21
22
|
|
|
23
|
+
logger = logging.getLogger(__name__)
|
|
24
|
+
|
|
22
25
|
|
|
23
26
|
def _split_provider_model(model: str) -> tuple[str, str]:
|
|
24
27
|
if "/" not in model:
|
|
@@ -99,8 +102,16 @@ class BrokeredLM(dspy.BaseLM):
|
|
|
99
102
|
caller_predict = dspy.settings.caller_predict
|
|
100
103
|
caller_predict_id = id(caller_predict) if caller_predict else None
|
|
101
104
|
|
|
105
|
+
# Extract tools and tool_choice from kwargs
|
|
106
|
+
tools = merged_kwargs.get("tools")
|
|
107
|
+
tool_choice = merged_kwargs.get("tool_choice")
|
|
108
|
+
|
|
109
|
+
logger.debug(
|
|
110
|
+
f"[BROKER_LM] Calling LM with streaming={send_stream is not None}, tools={len(tools) if tools else 0}"
|
|
111
|
+
)
|
|
102
112
|
if send_stream is not None:
|
|
103
113
|
chunks: list[ModelResponseStream] = []
|
|
114
|
+
tool_calls_data = None
|
|
104
115
|
async for event in self._client.llm_chat(
|
|
105
116
|
provider="openai",
|
|
106
117
|
model=model_id,
|
|
@@ -108,6 +119,8 @@ class BrokeredLM(dspy.BaseLM):
|
|
|
108
119
|
temperature=temperature,
|
|
109
120
|
max_tokens=max_tokens,
|
|
110
121
|
stream=True,
|
|
122
|
+
tools=tools,
|
|
123
|
+
tool_choice=tool_choice,
|
|
111
124
|
):
|
|
112
125
|
event_type = event.get("event")
|
|
113
126
|
if event_type == "delta":
|
|
@@ -125,23 +138,51 @@ class BrokeredLM(dspy.BaseLM):
|
|
|
125
138
|
continue
|
|
126
139
|
|
|
127
140
|
if event_type == "done":
|
|
141
|
+
# Capture tool calls from done event
|
|
142
|
+
data = event.get("data") or {}
|
|
143
|
+
tool_calls_data = data.get("tool_calls")
|
|
144
|
+
logger.debug(
|
|
145
|
+
f"[BROKER_LM] Stream complete with {len(tool_calls_data) if tool_calls_data else 0} tool calls"
|
|
146
|
+
)
|
|
128
147
|
break
|
|
129
148
|
|
|
130
149
|
if event_type == "error":
|
|
131
150
|
err = event.get("error") or {}
|
|
132
151
|
raise RuntimeError(err.get("message") or "Broker LLM error")
|
|
133
152
|
|
|
153
|
+
# Build response manually to ensure tool_calls stay as plain dicts
|
|
154
|
+
# (stream_chunk_builder might convert them to typed objects)
|
|
155
|
+
full_text = ""
|
|
134
156
|
if chunks:
|
|
135
|
-
|
|
157
|
+
final_response = litellm.stream_chunk_builder(chunks)
|
|
158
|
+
if final_response.choices:
|
|
159
|
+
message = (
|
|
160
|
+
final_response.choices[0].get("message")
|
|
161
|
+
if isinstance(final_response.choices[0], dict)
|
|
162
|
+
else getattr(final_response.choices[0], "message", None)
|
|
163
|
+
)
|
|
164
|
+
if message:
|
|
165
|
+
full_text = (
|
|
166
|
+
message.get("content")
|
|
167
|
+
if isinstance(message, dict)
|
|
168
|
+
else getattr(message, "content", "") or ""
|
|
169
|
+
)
|
|
170
|
+
|
|
171
|
+
message_data = {"role": "assistant", "content": full_text}
|
|
172
|
+
finish_reason = "stop"
|
|
173
|
+
|
|
174
|
+
if tool_calls_data:
|
|
175
|
+
# Keep tool calls as plain dictionaries (already in OpenAI format from broker)
|
|
176
|
+
message_data["tool_calls"] = tool_calls_data
|
|
177
|
+
finish_reason = "tool_calls"
|
|
136
178
|
|
|
137
|
-
# No streamed chunks; return an empty completion.
|
|
138
179
|
return ModelResponse(
|
|
139
180
|
model=model_id,
|
|
140
181
|
choices=[
|
|
141
182
|
{
|
|
142
183
|
"index": 0,
|
|
143
|
-
"finish_reason":
|
|
144
|
-
"message":
|
|
184
|
+
"finish_reason": finish_reason,
|
|
185
|
+
"message": message_data,
|
|
145
186
|
}
|
|
146
187
|
],
|
|
147
188
|
usage={"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0},
|
|
@@ -149,6 +190,7 @@ class BrokeredLM(dspy.BaseLM):
|
|
|
149
190
|
|
|
150
191
|
# Non-streaming path
|
|
151
192
|
final_text = ""
|
|
193
|
+
tool_calls_data = None
|
|
152
194
|
usage = {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}
|
|
153
195
|
async for event in self._client.llm_chat(
|
|
154
196
|
provider="openai",
|
|
@@ -157,24 +199,33 @@ class BrokeredLM(dspy.BaseLM):
|
|
|
157
199
|
temperature=temperature,
|
|
158
200
|
max_tokens=max_tokens,
|
|
159
201
|
stream=False,
|
|
202
|
+
tools=tools,
|
|
203
|
+
tool_choice=tool_choice,
|
|
160
204
|
):
|
|
161
205
|
event_type = event.get("event")
|
|
162
206
|
if event_type == "done":
|
|
163
207
|
data = event.get("data") or {}
|
|
164
208
|
final_text = data.get("text") or ""
|
|
209
|
+
tool_calls_data = data.get("tool_calls")
|
|
165
210
|
usage = data.get("usage") or usage
|
|
166
211
|
break
|
|
167
212
|
if event_type == "error":
|
|
168
213
|
err = event.get("error") or {}
|
|
169
214
|
raise RuntimeError(err.get("message") or "Broker LLM error")
|
|
170
215
|
|
|
216
|
+
# Build message response with tool calls if present
|
|
217
|
+
message_data = {"role": "assistant", "content": final_text}
|
|
218
|
+
if tool_calls_data:
|
|
219
|
+
# Keep tool calls as plain dictionaries (already in OpenAI format from broker)
|
|
220
|
+
message_data["tool_calls"] = tool_calls_data
|
|
221
|
+
|
|
171
222
|
return ModelResponse(
|
|
172
223
|
model=model_id,
|
|
173
224
|
choices=[
|
|
174
225
|
{
|
|
175
226
|
"index": 0,
|
|
176
|
-
"finish_reason": "stop",
|
|
177
|
-
"message":
|
|
227
|
+
"finish_reason": "tool_calls" if tool_calls_data else "stop",
|
|
228
|
+
"message": message_data,
|
|
178
229
|
}
|
|
179
230
|
],
|
|
180
231
|
usage=usage,
|
tactus/dspy/config.py
CHANGED
|
@@ -9,7 +9,6 @@ from typing import Optional, Any
|
|
|
9
9
|
|
|
10
10
|
import dspy
|
|
11
11
|
|
|
12
|
-
|
|
13
12
|
# Global reference to the current LM configuration
|
|
14
13
|
_current_lm: Optional[dspy.BaseLM] = None
|
|
15
14
|
|
|
@@ -98,8 +97,20 @@ def configure_lm(
|
|
|
98
97
|
# Create and configure the standard DSPy LM (LiteLLM-backed)
|
|
99
98
|
lm = dspy.LM(model, **lm_kwargs)
|
|
100
99
|
|
|
101
|
-
#
|
|
102
|
-
dspy.
|
|
100
|
+
# Create adapter with native function calling enabled
|
|
101
|
+
from dspy.adapters.chat_adapter import ChatAdapter
|
|
102
|
+
import logging
|
|
103
|
+
|
|
104
|
+
logger = logging.getLogger(__name__)
|
|
105
|
+
|
|
106
|
+
adapter = ChatAdapter(use_native_function_calling=True)
|
|
107
|
+
logger.info(
|
|
108
|
+
f"[ADAPTER] Created ChatAdapter with use_native_function_calling={adapter.use_native_function_calling}"
|
|
109
|
+
)
|
|
110
|
+
|
|
111
|
+
# Set as global default with adapter
|
|
112
|
+
dspy.configure(lm=lm, adapter=adapter)
|
|
113
|
+
logger.info(f"[ADAPTER] Configured DSPy with adapter: {adapter}")
|
|
103
114
|
_current_lm = lm
|
|
104
115
|
|
|
105
116
|
return lm
|
tactus/dspy/history.py
CHANGED
|
@@ -79,7 +79,8 @@ class TactusHistory:
|
|
|
79
79
|
raise ValueError("Message must include 'content' key")
|
|
80
80
|
|
|
81
81
|
# Validate role
|
|
82
|
-
|
|
82
|
+
# Note: "tool" role is required for OpenAI function calling responses
|
|
83
|
+
valid_roles = ["system", "user", "assistant", "tool"]
|
|
83
84
|
if message["role"] not in valid_roles:
|
|
84
85
|
raise ValueError(f"Invalid role. Must be one of {valid_roles}")
|
|
85
86
|
|
tactus/dspy/module.py
CHANGED
|
@@ -64,7 +64,13 @@ class RawModule(dspy.Module):
|
|
|
64
64
|
return [field.strip() for field in output_part.split(",")]
|
|
65
65
|
|
|
66
66
|
def forward(
|
|
67
|
-
self,
|
|
67
|
+
self,
|
|
68
|
+
system_prompt: str,
|
|
69
|
+
history,
|
|
70
|
+
user_message: str,
|
|
71
|
+
available_tools: str = "",
|
|
72
|
+
tools=None,
|
|
73
|
+
**kwargs,
|
|
68
74
|
):
|
|
69
75
|
"""
|
|
70
76
|
Forward pass with direct LM call (no formatting delimiters).
|
|
@@ -73,7 +79,8 @@ class RawModule(dspy.Module):
|
|
|
73
79
|
system_prompt: System prompt (overrides init if provided)
|
|
74
80
|
history: Conversation history (dspy.History, TactusHistory, or string)
|
|
75
81
|
user_message: Current user message
|
|
76
|
-
available_tools: Optional tools description (for agents with tools)
|
|
82
|
+
available_tools: Optional tools description (for agents with tools) - legacy, prefer tools param
|
|
83
|
+
tools: Optional list of dspy.Tool objects for native function calling
|
|
77
84
|
**kwargs: Additional args passed to LM
|
|
78
85
|
|
|
79
86
|
Returns:
|
|
@@ -92,8 +99,52 @@ class RawModule(dspy.Module):
|
|
|
92
99
|
# Add history messages
|
|
93
100
|
if history:
|
|
94
101
|
if hasattr(history, "messages"):
|
|
95
|
-
# It's a History object -
|
|
96
|
-
|
|
102
|
+
# It's a History object - sanitize messages to ensure JSON serializability
|
|
103
|
+
for msg in history.messages:
|
|
104
|
+
logger.debug(f"[RAWMODULE] Sanitizing history message: role={msg.get('role')}")
|
|
105
|
+
sanitized_msg = {"role": msg.get("role"), "content": msg.get("content")}
|
|
106
|
+
|
|
107
|
+
# If message is a tool result, preserve tool_call_id and name
|
|
108
|
+
if msg.get("role") == "tool":
|
|
109
|
+
if "tool_call_id" in msg:
|
|
110
|
+
sanitized_msg["tool_call_id"] = msg["tool_call_id"]
|
|
111
|
+
logger.debug("[RAWMODULE] Preserved tool_call_id for tool message")
|
|
112
|
+
if "name" in msg:
|
|
113
|
+
sanitized_msg["name"] = msg["name"]
|
|
114
|
+
|
|
115
|
+
# If message has tool_calls, ensure they're plain dicts
|
|
116
|
+
if "tool_calls" in msg:
|
|
117
|
+
tool_calls = msg["tool_calls"]
|
|
118
|
+
# Convert any non-dict tool calls to dicts
|
|
119
|
+
if tool_calls and not isinstance(tool_calls, list):
|
|
120
|
+
tool_calls = [tool_calls]
|
|
121
|
+
if tool_calls:
|
|
122
|
+
sanitized_tool_calls = []
|
|
123
|
+
for tc in tool_calls:
|
|
124
|
+
if isinstance(tc, dict):
|
|
125
|
+
sanitized_tool_calls.append(tc)
|
|
126
|
+
else:
|
|
127
|
+
# It's a typed object - convert to dict
|
|
128
|
+
tc_dict = {
|
|
129
|
+
"id": getattr(tc, "id", ""),
|
|
130
|
+
"type": getattr(tc, "type", "function"),
|
|
131
|
+
"function": {
|
|
132
|
+
"name": (
|
|
133
|
+
getattr(tc.function, "name", "")
|
|
134
|
+
if hasattr(tc, "function")
|
|
135
|
+
else ""
|
|
136
|
+
),
|
|
137
|
+
"arguments": (
|
|
138
|
+
getattr(tc.function, "arguments", "{}")
|
|
139
|
+
if hasattr(tc, "function")
|
|
140
|
+
else "{}"
|
|
141
|
+
),
|
|
142
|
+
},
|
|
143
|
+
}
|
|
144
|
+
logger.debug("[RAWMODULE] Converted typed tool call to dict")
|
|
145
|
+
sanitized_tool_calls.append(tc_dict)
|
|
146
|
+
sanitized_msg["tool_calls"] = sanitized_tool_calls
|
|
147
|
+
messages.append(sanitized_msg)
|
|
97
148
|
elif isinstance(history, str) and history.strip():
|
|
98
149
|
# It's a formatted string - parse it
|
|
99
150
|
for line in history.strip().split("\n"):
|
|
@@ -104,7 +155,7 @@ class RawModule(dspy.Module):
|
|
|
104
155
|
|
|
105
156
|
# Add current user message
|
|
106
157
|
if user_message:
|
|
107
|
-
# If tools are available, include them in the user message
|
|
158
|
+
# If tools are available (legacy string format), include them in the user message
|
|
108
159
|
if available_tools and "available_tools" in self.signature:
|
|
109
160
|
user_content = f"{user_message}\n\nAvailable tools:\n{available_tools}"
|
|
110
161
|
messages.append({"role": "user", "content": user_content})
|
|
@@ -116,20 +167,94 @@ class RawModule(dspy.Module):
|
|
|
116
167
|
if lm is None:
|
|
117
168
|
raise RuntimeError("No LM configured. Call dspy.configure(lm=...) first.")
|
|
118
169
|
|
|
170
|
+
# Convert DSPy Tool objects to LiteLLM format for native function calling
|
|
171
|
+
if tools and isinstance(tools, list) and len(tools) > 0:
|
|
172
|
+
litellm_tools = []
|
|
173
|
+
for tool in tools:
|
|
174
|
+
if hasattr(tool, "format_as_litellm_function_call"):
|
|
175
|
+
litellm_tools.append(tool.format_as_litellm_function_call())
|
|
176
|
+
if litellm_tools:
|
|
177
|
+
kwargs["tools"] = litellm_tools
|
|
178
|
+
# Ensure tool_choice is passed if set on the LM
|
|
179
|
+
if (
|
|
180
|
+
hasattr(lm, "kwargs")
|
|
181
|
+
and "tool_choice" in lm.kwargs
|
|
182
|
+
and "tool_choice" not in kwargs
|
|
183
|
+
):
|
|
184
|
+
kwargs["tool_choice"] = lm.kwargs["tool_choice"]
|
|
185
|
+
logger.debug(
|
|
186
|
+
f"[RAWMODULE] Passing {len(litellm_tools)} tools to LM with tool_choice={kwargs.get('tool_choice')}"
|
|
187
|
+
)
|
|
188
|
+
|
|
189
|
+
# Log summary of messages being sent
|
|
190
|
+
logger.debug(f"[RAWMODULE] Sending {len(messages)} messages to LM")
|
|
191
|
+
|
|
119
192
|
# Call LM directly - streamify() will intercept this call if streaming is enabled
|
|
120
193
|
response = lm(messages=messages, **kwargs)
|
|
121
194
|
|
|
122
|
-
# Extract response text from LM result
|
|
123
|
-
# LM returns
|
|
124
|
-
|
|
195
|
+
# Extract response text and tool calls from LM result
|
|
196
|
+
# LM returns either:
|
|
197
|
+
# - list of strings (when no tool calls): ["response text"]
|
|
198
|
+
# - list of dicts (when tool calls present): [{"text": "...", "tool_calls": [...]}]
|
|
199
|
+
response_text = ""
|
|
200
|
+
tool_calls_from_lm = None
|
|
201
|
+
|
|
202
|
+
if isinstance(response, list) and len(response) > 0:
|
|
203
|
+
first_output = response[0]
|
|
204
|
+
if isinstance(first_output, dict):
|
|
205
|
+
# Response is a dict with text and possibly tool_calls
|
|
206
|
+
response_text = first_output.get("text", "")
|
|
207
|
+
tool_calls_from_lm = first_output.get("tool_calls")
|
|
208
|
+
logger.debug(
|
|
209
|
+
f"[RAWMODULE] Extracted response with {len(tool_calls_from_lm) if tool_calls_from_lm else 0} tool calls"
|
|
210
|
+
)
|
|
211
|
+
else:
|
|
212
|
+
# Response is a plain string
|
|
213
|
+
response_text = str(first_output)
|
|
214
|
+
else:
|
|
215
|
+
response_text = str(response)
|
|
125
216
|
|
|
126
217
|
# Build prediction result based on signature
|
|
127
218
|
prediction_kwargs = {"response": response_text}
|
|
128
219
|
|
|
129
|
-
# If signature includes tool_calls,
|
|
130
|
-
# (Real tool call parsing would happen here in a full implementation)
|
|
220
|
+
# If signature includes tool_calls, use the tool_calls we extracted from the LM response
|
|
131
221
|
if "tool_calls" in self.output_fields:
|
|
132
|
-
|
|
222
|
+
if tool_calls_from_lm:
|
|
223
|
+
# Convert to DSPy ToolCalls format
|
|
224
|
+
# tool_calls_from_lm is a list of ChatCompletionMessageToolCall objects from LiteLLM
|
|
225
|
+
from dspy.adapters.types.tool import ToolCalls
|
|
226
|
+
import json
|
|
227
|
+
|
|
228
|
+
tool_calls_list = []
|
|
229
|
+
for tc in tool_calls_from_lm:
|
|
230
|
+
# Handle both dict and object access patterns
|
|
231
|
+
func_name = (
|
|
232
|
+
tc.get("function", {}).get("name")
|
|
233
|
+
if isinstance(tc, dict)
|
|
234
|
+
else tc.function.name
|
|
235
|
+
)
|
|
236
|
+
func_args = (
|
|
237
|
+
tc.get("function", {}).get("arguments")
|
|
238
|
+
if isinstance(tc, dict)
|
|
239
|
+
else tc.function.arguments
|
|
240
|
+
)
|
|
241
|
+
tool_calls_list.append(
|
|
242
|
+
{
|
|
243
|
+
"name": func_name,
|
|
244
|
+
"args": (
|
|
245
|
+
json.loads(func_args) if isinstance(func_args, str) else func_args
|
|
246
|
+
),
|
|
247
|
+
}
|
|
248
|
+
)
|
|
249
|
+
prediction_kwargs["tool_calls"] = ToolCalls.from_dict_list(tool_calls_list)
|
|
250
|
+
logger.debug(
|
|
251
|
+
f"[RAWMODULE] Converted {len(tool_calls_list)} tool calls to DSPy format"
|
|
252
|
+
)
|
|
253
|
+
else:
|
|
254
|
+
# No tool calls in response
|
|
255
|
+
from dspy.adapters.types.tool import ToolCalls
|
|
256
|
+
|
|
257
|
+
prediction_kwargs["tool_calls"] = ToolCalls.from_dict_list([])
|
|
133
258
|
|
|
134
259
|
# Return as Prediction for DSPy compatibility
|
|
135
260
|
return dspy.Prediction(**prediction_kwargs)
|