planar 0.7.0__py3-none-any.whl → 0.9.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- planar/_version.py +1 -1
- planar/ai/agent.py +169 -318
- planar/ai/agent_base.py +166 -0
- planar/ai/agent_utils.py +4 -69
- planar/ai/models.py +30 -0
- planar/ai/pydantic_ai.py +86 -17
- planar/ai/test_agent_serialization.py +1 -1
- planar/app.py +1 -7
- planar/config.py +2 -0
- planar/data/__init__.py +17 -0
- planar/data/config.py +49 -0
- planar/data/dataset.py +272 -0
- planar/data/exceptions.py +19 -0
- planar/data/test_dataset.py +354 -0
- planar/dependencies.py +30 -0
- planar/routers/agents_router.py +52 -4
- planar/routers/test_agents_router.py +1 -1
- planar/routers/test_routes_security.py +3 -2
- planar/rules/__init__.py +12 -18
- planar/scaffold_templates/planar.dev.yaml.j2 +9 -0
- planar/scaffold_templates/planar.prod.yaml.j2 +14 -0
- planar/testing/workflow_observer.py +2 -2
- planar/workflows/notifications.py +39 -3
- {planar-0.7.0.dist-info → planar-0.9.0.dist-info}/METADATA +5 -1
- {planar-0.7.0.dist-info → planar-0.9.0.dist-info}/RECORD +27 -24
- planar/ai/providers.py +0 -1088
- planar/ai/pydantic_ai_agent.py +0 -329
- planar/ai/test_agent.py +0 -1298
- planar/ai/test_providers.py +0 -463
- {planar-0.7.0.dist-info → planar-0.9.0.dist-info}/WHEEL +0 -0
- {planar-0.7.0.dist-info → planar-0.9.0.dist-info}/entry_points.txt +0 -0
planar/ai/pydantic_ai_agent.py
DELETED
@@ -1,329 +0,0 @@
|
|
1
|
-
import inspect
|
2
|
-
from dataclasses import dataclass
|
3
|
-
from typing import Any, Type, cast
|
4
|
-
|
5
|
-
from pydantic import BaseModel
|
6
|
-
from pydantic_ai import models
|
7
|
-
|
8
|
-
from planar.ai.agent import AgentBase
|
9
|
-
from planar.ai.agent_utils import (
|
10
|
-
AgentEventType,
|
11
|
-
ToolCallResult,
|
12
|
-
create_tool_definition,
|
13
|
-
extract_files_from_model,
|
14
|
-
get_agent_config,
|
15
|
-
render_template,
|
16
|
-
)
|
17
|
-
from planar.ai.models import (
|
18
|
-
AgentRunResult,
|
19
|
-
AssistantMessage,
|
20
|
-
ModelMessage,
|
21
|
-
SystemMessage,
|
22
|
-
ToolDefinition,
|
23
|
-
ToolMessage,
|
24
|
-
ToolResponse,
|
25
|
-
UserMessage,
|
26
|
-
)
|
27
|
-
from planar.ai.providers import ModelSpec
|
28
|
-
from planar.ai.pydantic_ai import ModelRunResponse, model_run
|
29
|
-
from planar.logging import get_logger
|
30
|
-
from planar.utils import utc_now
|
31
|
-
from planar.workflows.models import StepType
|
32
|
-
|
33
|
-
logger = get_logger(__name__)
|
34
|
-
|
35
|
-
|
36
|
-
@dataclass
|
37
|
-
class Agent[
|
38
|
-
TInput: BaseModel | str,
|
39
|
-
TOutput: BaseModel | str,
|
40
|
-
](AgentBase[TInput, TOutput]):
|
41
|
-
model: models.KnownModelName | models.Model = "openai:gpt-4o"
|
42
|
-
|
43
|
-
async def run_step(
|
44
|
-
self,
|
45
|
-
input_value: TInput,
|
46
|
-
) -> AgentRunResult[TOutput]:
|
47
|
-
"""Execute the agent with the provided inputs.
|
48
|
-
|
49
|
-
Args:
|
50
|
-
input_value: The primary input value to the agent, can be a string or Pydantic model
|
51
|
-
**kwargs: Alternative way to pass inputs as keyword arguments
|
52
|
-
|
53
|
-
Returns:
|
54
|
-
AgentRunResult containing the agent's response
|
55
|
-
"""
|
56
|
-
event_emitter = self.event_emitter
|
57
|
-
logger.debug(
|
58
|
-
"agent run_step called", agent_name=self.name, input_type=type(input_value)
|
59
|
-
)
|
60
|
-
result = None
|
61
|
-
|
62
|
-
config = await get_agent_config(self.name, self.to_config())
|
63
|
-
logger.debug("agent using config", agent_name=self.name, config=config)
|
64
|
-
|
65
|
-
input_map: dict[str, str | dict[str, Any]] = {}
|
66
|
-
|
67
|
-
files = extract_files_from_model(input_value)
|
68
|
-
logger.debug(
|
69
|
-
"extracted files from input for agent",
|
70
|
-
num_files=len(files),
|
71
|
-
agent_name=self.name,
|
72
|
-
)
|
73
|
-
match input_value:
|
74
|
-
case BaseModel():
|
75
|
-
if self.input_type and not isinstance(input_value, self.input_type):
|
76
|
-
logger.warning(
|
77
|
-
"input value type mismatch for agent",
|
78
|
-
agent_name=self.name,
|
79
|
-
expected_type=self.input_type,
|
80
|
-
got_type=type(input_value),
|
81
|
-
)
|
82
|
-
raise ValueError(
|
83
|
-
f"Input value must be of type {self.input_type}, but got {type(input_value)}"
|
84
|
-
)
|
85
|
-
input_map["input"] = cast(BaseModel, input_value).model_dump()
|
86
|
-
case str():
|
87
|
-
input_map["input"] = input_value
|
88
|
-
case _:
|
89
|
-
logger.warning(
|
90
|
-
"unexpected input value type for agent",
|
91
|
-
agent_name=self.name,
|
92
|
-
type=type(input_value),
|
93
|
-
)
|
94
|
-
raise ValueError(f"Unexpected input value type: {type(input_value)}")
|
95
|
-
|
96
|
-
# Add built-in variables
|
97
|
-
# TODO: Make deterministic or step
|
98
|
-
built_in_vars = {
|
99
|
-
"datetime_now": utc_now().isoformat(),
|
100
|
-
"date_today": utc_now().date().isoformat(),
|
101
|
-
}
|
102
|
-
input_map.update(built_in_vars)
|
103
|
-
|
104
|
-
# Format the prompts with the provided arguments using Jinja templates
|
105
|
-
try:
|
106
|
-
formatted_system_prompt = (
|
107
|
-
render_template(config.system_prompt, input_map)
|
108
|
-
if config.system_prompt
|
109
|
-
else ""
|
110
|
-
)
|
111
|
-
formatted_user_prompt = (
|
112
|
-
render_template(config.user_prompt, input_map)
|
113
|
-
if config.user_prompt
|
114
|
-
else ""
|
115
|
-
)
|
116
|
-
except ValueError as e:
|
117
|
-
logger.exception("error formatting prompts for agent", agent_name=self.name)
|
118
|
-
raise ValueError(f"Missing required parameter for prompt formatting: {e}")
|
119
|
-
|
120
|
-
# Get the LLM provider and model
|
121
|
-
if isinstance(self.model, str):
|
122
|
-
model = models.infer_model(self.model)
|
123
|
-
else:
|
124
|
-
model = self.model
|
125
|
-
|
126
|
-
# Apply model parameters if specified
|
127
|
-
model_settings = None
|
128
|
-
if config.model_parameters:
|
129
|
-
model_settings = config.model_parameters
|
130
|
-
|
131
|
-
# Prepare structured messages
|
132
|
-
messages: list[ModelMessage] = []
|
133
|
-
if formatted_system_prompt:
|
134
|
-
messages.append(SystemMessage(content=formatted_system_prompt))
|
135
|
-
|
136
|
-
if formatted_user_prompt:
|
137
|
-
messages.append(UserMessage(content=formatted_user_prompt, files=files))
|
138
|
-
|
139
|
-
# Prepare tools if provided
|
140
|
-
tool_definitions = None
|
141
|
-
if self.tools:
|
142
|
-
tool_definitions = [create_tool_definition(tool) for tool in self.tools]
|
143
|
-
|
144
|
-
# Determine output type for the agent call
|
145
|
-
# Pass the Pydantic model type if output_type is a subclass of BaseModel,
|
146
|
-
# otherwise pass None (indicating string output is expected).
|
147
|
-
output_type: Type[BaseModel] | None = None
|
148
|
-
# Use issubclass safely by checking if output_type is a type first
|
149
|
-
if inspect.isclass(self.output_type) and issubclass(
|
150
|
-
self.output_type, BaseModel
|
151
|
-
):
|
152
|
-
output_type = cast(Type[BaseModel], self.output_type)
|
153
|
-
|
154
|
-
# Execute the LLM call
|
155
|
-
max_turns = config.max_turns
|
156
|
-
|
157
|
-
# We use this inner function to pass "model" and "event_emitter",
|
158
|
-
# which are not serializable as step parameters.
|
159
|
-
async def agent_run_step(
|
160
|
-
model_spec: ModelSpec,
|
161
|
-
messages: list[ModelMessage],
|
162
|
-
turns_left: int,
|
163
|
-
tools: list[ToolDefinition] | None = None,
|
164
|
-
output_type: Type[BaseModel] | None = None,
|
165
|
-
):
|
166
|
-
logger.debug(
|
167
|
-
"agent running",
|
168
|
-
agent_name=self.name,
|
169
|
-
model=model_spec,
|
170
|
-
model_settings=model_settings,
|
171
|
-
output_type=output_type,
|
172
|
-
)
|
173
|
-
if output_type is None:
|
174
|
-
return await model_run(
|
175
|
-
model=model,
|
176
|
-
max_extra_turns=turns_left,
|
177
|
-
model_settings=model_settings,
|
178
|
-
messages=messages,
|
179
|
-
tools=tools or [],
|
180
|
-
event_handler=cast(Any, event_emitter),
|
181
|
-
)
|
182
|
-
else:
|
183
|
-
return await model_run(
|
184
|
-
model=model,
|
185
|
-
max_extra_turns=turns_left,
|
186
|
-
model_settings=model_settings,
|
187
|
-
messages=messages,
|
188
|
-
output_type=output_type,
|
189
|
-
tools=tools or [],
|
190
|
-
event_handler=cast(Any, event_emitter),
|
191
|
-
)
|
192
|
-
|
193
|
-
model_spec = ModelSpec(
|
194
|
-
model_id=str(model),
|
195
|
-
parameters=config.model_parameters,
|
196
|
-
)
|
197
|
-
result = None
|
198
|
-
logger.debug(
|
199
|
-
"agent performing multi-turn completion with tools",
|
200
|
-
agent_name=self.name,
|
201
|
-
max_turns=max_turns,
|
202
|
-
)
|
203
|
-
turns_left = max_turns
|
204
|
-
while turns_left > 0:
|
205
|
-
turns_left -= 1
|
206
|
-
logger.debug("agent turn", agent_name=self.name, turns_left=turns_left)
|
207
|
-
|
208
|
-
# Get model response
|
209
|
-
run_response = await self.as_step_if_durable(
|
210
|
-
agent_run_step,
|
211
|
-
step_type=StepType.AGENT,
|
212
|
-
return_type=ModelRunResponse[output_type or str],
|
213
|
-
)(
|
214
|
-
model_spec=model_spec,
|
215
|
-
messages=messages,
|
216
|
-
turns_left=turns_left,
|
217
|
-
output_type=output_type,
|
218
|
-
tools=tool_definitions or [],
|
219
|
-
)
|
220
|
-
response = run_response.response
|
221
|
-
turns_left -= run_response.extra_turns_used
|
222
|
-
|
223
|
-
# Emit response event if event_emitter is provided
|
224
|
-
if event_emitter:
|
225
|
-
event_emitter.emit(AgentEventType.RESPONSE, response.content)
|
226
|
-
|
227
|
-
# If no tool calls or last turn, return content
|
228
|
-
if not response.tool_calls or turns_left == 0:
|
229
|
-
logger.debug(
|
230
|
-
"agent completion: no tool calls or last turn",
|
231
|
-
agent_name=self.name,
|
232
|
-
has_content=response.content is not None,
|
233
|
-
)
|
234
|
-
result = response.content
|
235
|
-
break
|
236
|
-
|
237
|
-
# Process tool calls
|
238
|
-
logger.debug(
|
239
|
-
"agent received tool calls",
|
240
|
-
agent_name=self.name,
|
241
|
-
num_tool_calls=len(response.tool_calls),
|
242
|
-
)
|
243
|
-
assistant_message = AssistantMessage(
|
244
|
-
content=None,
|
245
|
-
tool_calls=response.tool_calls,
|
246
|
-
)
|
247
|
-
messages.append(assistant_message)
|
248
|
-
|
249
|
-
# Execute each tool and add tool responses to messages
|
250
|
-
for tool_call_idx, tool_call in enumerate(response.tool_calls):
|
251
|
-
logger.debug(
|
252
|
-
"agent processing tool call",
|
253
|
-
agent_name=self.name,
|
254
|
-
tool_call_index=tool_call_idx + 1,
|
255
|
-
tool_call_id=tool_call.id,
|
256
|
-
tool_call_name=tool_call.name,
|
257
|
-
)
|
258
|
-
# Find the matching tool function
|
259
|
-
tool_fn = next(
|
260
|
-
(t for t in self.tools if t.__name__ == tool_call.name),
|
261
|
-
None,
|
262
|
-
)
|
263
|
-
|
264
|
-
if not tool_fn:
|
265
|
-
tool_result = f"Error: Tool '{tool_call.name}' not found."
|
266
|
-
logger.warning(
|
267
|
-
"tool not found for agent",
|
268
|
-
tool_name=tool_call.name,
|
269
|
-
agent_name=self.name,
|
270
|
-
)
|
271
|
-
else:
|
272
|
-
# Execute the tool with the provided arguments
|
273
|
-
tool_result = await self.as_step_if_durable(
|
274
|
-
tool_fn,
|
275
|
-
step_type=StepType.TOOL_CALL,
|
276
|
-
)(**tool_call.arguments)
|
277
|
-
logger.info(
|
278
|
-
"tool executed by agent",
|
279
|
-
tool_name=tool_call.name,
|
280
|
-
agent_name=self.name,
|
281
|
-
result_type=type(tool_result),
|
282
|
-
)
|
283
|
-
|
284
|
-
# Create a tool response
|
285
|
-
tool_response = ToolResponse(
|
286
|
-
tool_call_id=tool_call.id or "call_1", content=str(tool_result)
|
287
|
-
)
|
288
|
-
|
289
|
-
# Emit tool response event if event_emitter is provided
|
290
|
-
if event_emitter:
|
291
|
-
event_emitter.emit(
|
292
|
-
AgentEventType.TOOL_RESPONSE,
|
293
|
-
ToolCallResult(
|
294
|
-
tool_call_id=tool_call.id or "call_1",
|
295
|
-
tool_call_name=tool_call.name,
|
296
|
-
content=tool_result,
|
297
|
-
),
|
298
|
-
)
|
299
|
-
|
300
|
-
tool_message = ToolMessage(
|
301
|
-
content=tool_response.content,
|
302
|
-
tool_call_id=tool_response.tool_call_id or "call_1",
|
303
|
-
)
|
304
|
-
messages.append(tool_message)
|
305
|
-
|
306
|
-
# Continue to next turn
|
307
|
-
|
308
|
-
if result is None:
|
309
|
-
logger.warning(
|
310
|
-
"agent completed tool interactions but result is none",
|
311
|
-
agent_name=self.name,
|
312
|
-
expected_type=self.output_type,
|
313
|
-
)
|
314
|
-
raise ValueError(
|
315
|
-
f"Expected result of type {self.output_type} but got none after tool interactions."
|
316
|
-
)
|
317
|
-
|
318
|
-
if event_emitter:
|
319
|
-
event_emitter.emit(AgentEventType.COMPLETED, result)
|
320
|
-
|
321
|
-
logger.info(
|
322
|
-
"agent completed",
|
323
|
-
agent_name=self.name,
|
324
|
-
final_result_type=type(result),
|
325
|
-
)
|
326
|
-
return AgentRunResult[TOutput](output=cast(TOutput, result))
|
327
|
-
|
328
|
-
def get_model_str(self) -> str:
|
329
|
-
return str(self.model)
|