planar 0.5.0__py3-none-any.whl → 0.8.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- planar/_version.py +1 -1
- planar/ai/agent.py +155 -283
- planar/ai/agent_base.py +170 -0
- planar/ai/agent_utils.py +7 -0
- planar/ai/pydantic_ai.py +638 -0
- planar/ai/test_agent_serialization.py +1 -1
- planar/app.py +64 -20
- planar/cli.py +39 -27
- planar/config.py +45 -36
- planar/db/db.py +2 -1
- planar/files/storage/azure_blob.py +343 -0
- planar/files/storage/base.py +7 -0
- planar/files/storage/config.py +70 -7
- planar/files/storage/s3.py +6 -6
- planar/files/storage/test_azure_blob.py +435 -0
- planar/logging/formatter.py +17 -4
- planar/logging/test_formatter.py +327 -0
- planar/registry_items.py +2 -1
- planar/routers/agents_router.py +3 -1
- planar/routers/files.py +11 -2
- planar/routers/models.py +14 -1
- planar/routers/test_agents_router.py +1 -1
- planar/routers/test_files_router.py +49 -0
- planar/routers/test_routes_security.py +5 -7
- planar/routers/test_workflow_router.py +270 -3
- planar/routers/workflow.py +95 -36
- planar/rules/models.py +36 -39
- planar/rules/test_data/account_dormancy_management.json +223 -0
- planar/rules/test_data/airline_loyalty_points_calculator.json +262 -0
- planar/rules/test_data/applicant_risk_assessment.json +435 -0
- planar/rules/test_data/booking_fraud_detection.json +407 -0
- planar/rules/test_data/cellular_data_rollover_system.json +258 -0
- planar/rules/test_data/clinical_trial_eligibility_screener.json +437 -0
- planar/rules/test_data/customer_lifetime_value.json +143 -0
- planar/rules/test_data/import_duties_calculator.json +289 -0
- planar/rules/test_data/insurance_prior_authorization.json +443 -0
- planar/rules/test_data/online_check_in_eligibility_system.json +254 -0
- planar/rules/test_data/order_consolidation_system.json +375 -0
- planar/rules/test_data/portfolio_risk_monitor.json +471 -0
- planar/rules/test_data/supply_chain_risk.json +253 -0
- planar/rules/test_data/warehouse_cross_docking.json +237 -0
- planar/rules/test_rules.py +750 -6
- planar/scaffold_templates/planar.dev.yaml.j2 +6 -6
- planar/scaffold_templates/planar.prod.yaml.j2 +9 -5
- planar/scaffold_templates/pyproject.toml.j2 +1 -1
- planar/security/auth_context.py +21 -0
- planar/security/{jwt_middleware.py → auth_middleware.py} +70 -17
- planar/security/authorization.py +9 -15
- planar/security/tests/test_auth_middleware.py +162 -0
- planar/sse/proxy.py +4 -9
- planar/test_app.py +92 -1
- planar/test_cli.py +81 -59
- planar/test_config.py +17 -14
- planar/testing/fixtures.py +325 -0
- planar/testing/planar_test_client.py +5 -2
- planar/utils.py +41 -1
- planar/workflows/execution.py +1 -1
- planar/workflows/orchestrator.py +5 -0
- planar/workflows/serialization.py +12 -6
- planar/workflows/step_core.py +3 -1
- planar/workflows/test_serialization.py +9 -1
- {planar-0.5.0.dist-info → planar-0.8.0.dist-info}/METADATA +30 -5
- planar-0.8.0.dist-info/RECORD +166 -0
- planar/.__init__.py.un~ +0 -0
- planar/._version.py.un~ +0 -0
- planar/.app.py.un~ +0 -0
- planar/.cli.py.un~ +0 -0
- planar/.config.py.un~ +0 -0
- planar/.context.py.un~ +0 -0
- planar/.db.py.un~ +0 -0
- planar/.di.py.un~ +0 -0
- planar/.engine.py.un~ +0 -0
- planar/.files.py.un~ +0 -0
- planar/.log_context.py.un~ +0 -0
- planar/.log_metadata.py.un~ +0 -0
- planar/.logging.py.un~ +0 -0
- planar/.object_registry.py.un~ +0 -0
- planar/.otel.py.un~ +0 -0
- planar/.server.py.un~ +0 -0
- planar/.session.py.un~ +0 -0
- planar/.sqlalchemy.py.un~ +0 -0
- planar/.task_local.py.un~ +0 -0
- planar/.test_app.py.un~ +0 -0
- planar/.test_config.py.un~ +0 -0
- planar/.test_object_config.py.un~ +0 -0
- planar/.test_sqlalchemy.py.un~ +0 -0
- planar/.test_utils.py.un~ +0 -0
- planar/.util.py.un~ +0 -0
- planar/.utils.py.un~ +0 -0
- planar/ai/.__init__.py.un~ +0 -0
- planar/ai/._models.py.un~ +0 -0
- planar/ai/.agent.py.un~ +0 -0
- planar/ai/.agent_utils.py.un~ +0 -0
- planar/ai/.events.py.un~ +0 -0
- planar/ai/.files.py.un~ +0 -0
- planar/ai/.models.py.un~ +0 -0
- planar/ai/.providers.py.un~ +0 -0
- planar/ai/.pydantic_ai.py.un~ +0 -0
- planar/ai/.pydantic_ai_agent.py.un~ +0 -0
- planar/ai/.pydantic_ai_provider.py.un~ +0 -0
- planar/ai/.step.py.un~ +0 -0
- planar/ai/.test_agent.py.un~ +0 -0
- planar/ai/.test_agent_serialization.py.un~ +0 -0
- planar/ai/.test_providers.py.un~ +0 -0
- planar/ai/.utils.py.un~ +0 -0
- planar/ai/providers.py +0 -1088
- planar/ai/test_agent.py +0 -1298
- planar/ai/test_providers.py +0 -463
- planar/db/.db.py.un~ +0 -0
- planar/files/.config.py.un~ +0 -0
- planar/files/.local.py.un~ +0 -0
- planar/files/.local_filesystem.py.un~ +0 -0
- planar/files/.model.py.un~ +0 -0
- planar/files/.models.py.un~ +0 -0
- planar/files/.s3.py.un~ +0 -0
- planar/files/.storage.py.un~ +0 -0
- planar/files/.test_files.py.un~ +0 -0
- planar/files/storage/.__init__.py.un~ +0 -0
- planar/files/storage/.base.py.un~ +0 -0
- planar/files/storage/.config.py.un~ +0 -0
- planar/files/storage/.context.py.un~ +0 -0
- planar/files/storage/.local_directory.py.un~ +0 -0
- planar/files/storage/.test_local_directory.py.un~ +0 -0
- planar/files/storage/.test_s3.py.un~ +0 -0
- planar/human/.human.py.un~ +0 -0
- planar/human/.test_human.py.un~ +0 -0
- planar/logging/.__init__.py.un~ +0 -0
- planar/logging/.attributes.py.un~ +0 -0
- planar/logging/.formatter.py.un~ +0 -0
- planar/logging/.logger.py.un~ +0 -0
- planar/logging/.otel.py.un~ +0 -0
- planar/logging/.tracer.py.un~ +0 -0
- planar/modeling/.mixin.py.un~ +0 -0
- planar/modeling/.storage.py.un~ +0 -0
- planar/modeling/orm/.planar_base_model.py.un~ +0 -0
- planar/object_config/.object_config.py.un~ +0 -0
- planar/routers/.__init__.py.un~ +0 -0
- planar/routers/.agents_router.py.un~ +0 -0
- planar/routers/.crud.py.un~ +0 -0
- planar/routers/.decision.py.un~ +0 -0
- planar/routers/.event.py.un~ +0 -0
- planar/routers/.file_attachment.py.un~ +0 -0
- planar/routers/.files.py.un~ +0 -0
- planar/routers/.files_router.py.un~ +0 -0
- planar/routers/.human.py.un~ +0 -0
- planar/routers/.info.py.un~ +0 -0
- planar/routers/.models.py.un~ +0 -0
- planar/routers/.object_config_router.py.un~ +0 -0
- planar/routers/.rule.py.un~ +0 -0
- planar/routers/.test_object_config_router.py.un~ +0 -0
- planar/routers/.test_workflow_router.py.un~ +0 -0
- planar/routers/.workflow.py.un~ +0 -0
- planar/rules/.decorator.py.un~ +0 -0
- planar/rules/.runner.py.un~ +0 -0
- planar/rules/.test_rules.py.un~ +0 -0
- planar/security/.jwt_middleware.py.un~ +0 -0
- planar/sse/.constants.py.un~ +0 -0
- planar/sse/.example.html.un~ +0 -0
- planar/sse/.hub.py.un~ +0 -0
- planar/sse/.model.py.un~ +0 -0
- planar/sse/.proxy.py.un~ +0 -0
- planar/testing/.client.py.un~ +0 -0
- planar/testing/.memory_storage.py.un~ +0 -0
- planar/testing/.planar_test_client.py.un~ +0 -0
- planar/testing/.predictable_tracer.py.un~ +0 -0
- planar/testing/.synchronizable_tracer.py.un~ +0 -0
- planar/testing/.test_memory_storage.py.un~ +0 -0
- planar/testing/.workflow_observer.py.un~ +0 -0
- planar/workflows/.__init__.py.un~ +0 -0
- planar/workflows/.builtin_steps.py.un~ +0 -0
- planar/workflows/.concurrency_tracing.py.un~ +0 -0
- planar/workflows/.context.py.un~ +0 -0
- planar/workflows/.contrib.py.un~ +0 -0
- planar/workflows/.decorators.py.un~ +0 -0
- planar/workflows/.durable_test.py.un~ +0 -0
- planar/workflows/.errors.py.un~ +0 -0
- planar/workflows/.events.py.un~ +0 -0
- planar/workflows/.exceptions.py.un~ +0 -0
- planar/workflows/.execution.py.un~ +0 -0
- planar/workflows/.human.py.un~ +0 -0
- planar/workflows/.lock.py.un~ +0 -0
- planar/workflows/.misc.py.un~ +0 -0
- planar/workflows/.model.py.un~ +0 -0
- planar/workflows/.models.py.un~ +0 -0
- planar/workflows/.notifications.py.un~ +0 -0
- planar/workflows/.orchestrator.py.un~ +0 -0
- planar/workflows/.runtime.py.un~ +0 -0
- planar/workflows/.serialization.py.un~ +0 -0
- planar/workflows/.step.py.un~ +0 -0
- planar/workflows/.step_core.py.un~ +0 -0
- planar/workflows/.sub_workflow_runner.py.un~ +0 -0
- planar/workflows/.sub_workflow_scheduler.py.un~ +0 -0
- planar/workflows/.test_concurrency.py.un~ +0 -0
- planar/workflows/.test_concurrency_detection.py.un~ +0 -0
- planar/workflows/.test_human.py.un~ +0 -0
- planar/workflows/.test_lock_timeout.py.un~ +0 -0
- planar/workflows/.test_orchestrator.py.un~ +0 -0
- planar/workflows/.test_race_conditions.py.un~ +0 -0
- planar/workflows/.test_serialization.py.un~ +0 -0
- planar/workflows/.test_suspend_deserialization.py.un~ +0 -0
- planar/workflows/.test_workflow.py.un~ +0 -0
- planar/workflows/.tracing.py.un~ +0 -0
- planar/workflows/.types.py.un~ +0 -0
- planar/workflows/.util.py.un~ +0 -0
- planar/workflows/.utils.py.un~ +0 -0
- planar/workflows/.workflow.py.un~ +0 -0
- planar/workflows/.workflow_wrapper.py.un~ +0 -0
- planar/workflows/.wrappers.py.un~ +0 -0
- planar-0.5.0.dist-info/RECORD +0 -289
- {planar-0.5.0.dist-info → planar-0.8.0.dist-info}/WHEEL +0 -0
- {planar-0.5.0.dist-info → planar-0.8.0.dist-info}/entry_points.txt +0 -0
planar/ai/providers.py
DELETED
@@ -1,1088 +0,0 @@
|
|
1
|
-
"""
|
2
|
-
Provider module for AI model integrations.
|
3
|
-
"""
|
4
|
-
|
5
|
-
import base64
|
6
|
-
import json
|
7
|
-
import re
|
8
|
-
from abc import ABC, abstractmethod
|
9
|
-
from typing import Any, Literal, Type, TypeAlias
|
10
|
-
|
11
|
-
# TODO: Make provider imports lazy based on providers instealled
|
12
|
-
from openai import AsyncOpenAI
|
13
|
-
from pydantic import BaseModel
|
14
|
-
from pydantic_ai import messages as pydantic_ai
|
15
|
-
from pydantic_ai._output import OutputObjectDefinition
|
16
|
-
from pydantic_ai.direct import model_request as pydantic_ai_model_request
|
17
|
-
from pydantic_ai.models import (
|
18
|
-
ModelRequestParameters as PydanticAIModelRequestParameters,
|
19
|
-
)
|
20
|
-
from pydantic_ai.tools import ToolDefinition as PydanticAIToolDefinition
|
21
|
-
|
22
|
-
from planar.logging import get_logger
|
23
|
-
from planar.session import get_config
|
24
|
-
|
25
|
-
from .models import (
|
26
|
-
AssistantMessage,
|
27
|
-
Base64Content,
|
28
|
-
CompletionResponse,
|
29
|
-
FileContent,
|
30
|
-
FileIdContent,
|
31
|
-
FileMap,
|
32
|
-
ModelMessage,
|
33
|
-
SystemMessage,
|
34
|
-
T,
|
35
|
-
ToolCall,
|
36
|
-
ToolDefinition,
|
37
|
-
ToolMessage,
|
38
|
-
ToolResponse,
|
39
|
-
UserMessage,
|
40
|
-
)
|
41
|
-
|
42
|
-
logger = get_logger(__name__)
|
43
|
-
|
44
|
-
AnthropicKwargs: TypeAlias = dict[Literal["api_key", "base_url"], str]
|
45
|
-
|
46
|
-
|
47
|
-
class ModelSpec(BaseModel):
|
48
|
-
"""Pydantic model for AI model specifications."""
|
49
|
-
|
50
|
-
model_id: str
|
51
|
-
parameters: dict[str, Any] = {}
|
52
|
-
|
53
|
-
|
54
|
-
class Model:
|
55
|
-
"""Base class for AI model specifications."""
|
56
|
-
|
57
|
-
provider_class: Type["Provider"] # set by subclasses
|
58
|
-
name: str
|
59
|
-
|
60
|
-
def __init__(self, model_id: str):
|
61
|
-
self.model_spec = ModelSpec(model_id=model_id)
|
62
|
-
|
63
|
-
def with_parameters(self, **kwargs) -> "Model":
|
64
|
-
updated_params = self.model_spec.parameters.copy()
|
65
|
-
updated_params.update(kwargs)
|
66
|
-
new_instance = self.__class__(self.model_spec.model_id)
|
67
|
-
new_instance.model_spec.parameters = updated_params
|
68
|
-
return new_instance
|
69
|
-
|
70
|
-
def __str__(self) -> str:
|
71
|
-
return f"{self.name}:{self.model_spec.model_id}"
|
72
|
-
|
73
|
-
def __repr__(self) -> str:
|
74
|
-
return self.__str__()
|
75
|
-
|
76
|
-
|
77
|
-
class Provider(ABC):
|
78
|
-
"""Base class for AI model providers with tool support."""
|
79
|
-
|
80
|
-
@staticmethod
|
81
|
-
@abstractmethod
|
82
|
-
async def complete(
|
83
|
-
model_spec: ModelSpec,
|
84
|
-
messages: list[ModelMessage],
|
85
|
-
output_type: Type[T] | None = None,
|
86
|
-
tools: list[ToolDefinition] | None = None,
|
87
|
-
) -> CompletionResponse[T | str]:
|
88
|
-
"""
|
89
|
-
Generate a completion, potentially using tools.
|
90
|
-
|
91
|
-
Args:
|
92
|
-
model_spec: The model specification to use.
|
93
|
-
messages: List of structured messages for the model.
|
94
|
-
output_type: Optional desired output type (Pydantic model) for structured output.
|
95
|
-
tools: Optional list of tools the model can use.
|
96
|
-
|
97
|
-
Returns:
|
98
|
-
CompletionResponse containing either content or tool calls.
|
99
|
-
"""
|
100
|
-
pass
|
101
|
-
|
102
|
-
@staticmethod
|
103
|
-
@abstractmethod
|
104
|
-
def model(model_id: str) -> Model:
|
105
|
-
"""Create a model instance for a custom model ID."""
|
106
|
-
pass
|
107
|
-
|
108
|
-
@staticmethod
|
109
|
-
@abstractmethod
|
110
|
-
def format_tool_response(tool_response: ToolResponse) -> ToolMessage:
|
111
|
-
"""Format a tool response into a message for the provider.
|
112
|
-
|
113
|
-
Args:
|
114
|
-
tool_response: The tool response to format.
|
115
|
-
|
116
|
-
Returns:
|
117
|
-
A formatted tool message for the provider.
|
118
|
-
"""
|
119
|
-
raise NotImplementedError("Subclasses must implement format_tool_response")
|
120
|
-
|
121
|
-
@staticmethod
|
122
|
-
@abstractmethod
|
123
|
-
def prepare_messages(
|
124
|
-
messages: list[ModelMessage], file_map: FileMap
|
125
|
-
) -> list[dict[str, Any]]:
|
126
|
-
"""Prepare messages from Planar representations into the format expected by the provider, including file upload or conversion.
|
127
|
-
|
128
|
-
Args:
|
129
|
-
messages: List of structured messages.
|
130
|
-
|
131
|
-
Returns:
|
132
|
-
List of messages in the format expected by the provider.
|
133
|
-
"""
|
134
|
-
raise NotImplementedError("Subclasses must implement prepare_messages")
|
135
|
-
|
136
|
-
|
137
|
-
class OpenAIProvider(Provider):
|
138
|
-
"""OpenAI provider implementation."""
|
139
|
-
|
140
|
-
@staticmethod
|
141
|
-
def format_tool_response(tool_response: ToolResponse) -> ToolMessage:
|
142
|
-
"""Format a tool response into a message for OpenAI.
|
143
|
-
|
144
|
-
Args:
|
145
|
-
tool_response: The tool response to format.
|
146
|
-
|
147
|
-
Returns:
|
148
|
-
A formatted tool message.
|
149
|
-
"""
|
150
|
-
return ToolMessage(
|
151
|
-
content=tool_response.content,
|
152
|
-
tool_call_id=tool_response.tool_call_id or "call_1",
|
153
|
-
)
|
154
|
-
|
155
|
-
@staticmethod
|
156
|
-
def prepare_messages(
|
157
|
-
messages: list[ModelMessage], file_map: FileMap | None = None
|
158
|
-
) -> list[dict[str, Any]]:
|
159
|
-
"""Prepare messages from Planar representations into the format expected by the provider, including file upload or conversion.
|
160
|
-
|
161
|
-
Args:
|
162
|
-
messages: List of structured messages.
|
163
|
-
|
164
|
-
Returns:
|
165
|
-
List of messages in OpenAI format.
|
166
|
-
"""
|
167
|
-
|
168
|
-
formatted_messages = []
|
169
|
-
|
170
|
-
for message in messages:
|
171
|
-
if isinstance(message, SystemMessage):
|
172
|
-
formatted_messages.append(
|
173
|
-
{"role": "system", "content": message.content}
|
174
|
-
)
|
175
|
-
elif isinstance(message, UserMessage):
|
176
|
-
content = []
|
177
|
-
files: list[FileContent] = []
|
178
|
-
if message.files:
|
179
|
-
if not file_map:
|
180
|
-
raise ValueError("File map empty while user message has files.")
|
181
|
-
for file in message.files:
|
182
|
-
if str(file.id) not in file_map.mapping:
|
183
|
-
raise ValueError(
|
184
|
-
f"File {file} not found in file map {file_map}."
|
185
|
-
)
|
186
|
-
files.append(file_map.mapping[str(file.id)])
|
187
|
-
|
188
|
-
if files:
|
189
|
-
for file in files:
|
190
|
-
match file:
|
191
|
-
case Base64Content():
|
192
|
-
content.extend(
|
193
|
-
[
|
194
|
-
{
|
195
|
-
"type": "image_url",
|
196
|
-
"image_url": {
|
197
|
-
"url": f"data:{file.content_type};base64,{file.content}",
|
198
|
-
},
|
199
|
-
}
|
200
|
-
]
|
201
|
-
)
|
202
|
-
case FileIdContent():
|
203
|
-
content.extend(
|
204
|
-
[
|
205
|
-
{
|
206
|
-
"type": "file",
|
207
|
-
"file": {"file_id": file.content},
|
208
|
-
}
|
209
|
-
]
|
210
|
-
)
|
211
|
-
case _:
|
212
|
-
raise ValueError(f"Unsupported file type: {type(file)}")
|
213
|
-
|
214
|
-
content.append({"type": "text", "text": message.content})
|
215
|
-
formatted_messages.append({"role": "user", "content": content})
|
216
|
-
elif isinstance(message, ToolMessage):
|
217
|
-
formatted_messages.append(
|
218
|
-
{
|
219
|
-
"role": "tool",
|
220
|
-
"tool_call_id": message.tool_call_id,
|
221
|
-
"content": message.content,
|
222
|
-
}
|
223
|
-
)
|
224
|
-
elif isinstance(message, AssistantMessage):
|
225
|
-
if message.tool_calls:
|
226
|
-
assistant_msg = {
|
227
|
-
"role": "assistant",
|
228
|
-
"content": message.content,
|
229
|
-
"tool_calls": [],
|
230
|
-
}
|
231
|
-
|
232
|
-
for tool_call in message.tool_calls:
|
233
|
-
formatted_tool_call = {
|
234
|
-
"id": tool_call.id
|
235
|
-
or f"call_{len(assistant_msg['tool_calls']) + 1}",
|
236
|
-
"type": "function",
|
237
|
-
"function": {
|
238
|
-
"name": tool_call.name,
|
239
|
-
"arguments": json.dumps(tool_call.arguments),
|
240
|
-
},
|
241
|
-
}
|
242
|
-
assistant_msg["tool_calls"].append(formatted_tool_call)
|
243
|
-
|
244
|
-
formatted_messages.append(assistant_msg)
|
245
|
-
else:
|
246
|
-
formatted_messages.append(
|
247
|
-
{"role": "assistant", "content": message.content}
|
248
|
-
)
|
249
|
-
|
250
|
-
return formatted_messages
|
251
|
-
|
252
|
-
@staticmethod
|
253
|
-
async def _build_file_map(
|
254
|
-
client: AsyncOpenAI, messages: list[ModelMessage]
|
255
|
-
) -> FileMap:
|
256
|
-
logger.debug("building file map", num_messages=len(messages))
|
257
|
-
file_dict = {}
|
258
|
-
for message_idx, message in enumerate(messages):
|
259
|
-
if isinstance(message, UserMessage) and message.files:
|
260
|
-
logger.debug(
|
261
|
-
"processing files in message",
|
262
|
-
num_files=len(message.files),
|
263
|
-
message_index=message_idx,
|
264
|
-
)
|
265
|
-
for file_idx, file in enumerate(message.files):
|
266
|
-
logger.debug(
|
267
|
-
"processing file",
|
268
|
-
file_index=file_idx,
|
269
|
-
file_id=file.id,
|
270
|
-
content_type=file.content_type,
|
271
|
-
)
|
272
|
-
match file.content_type:
|
273
|
-
case "application/pdf":
|
274
|
-
logger.debug(
|
275
|
-
"uploading pdf file to openai", filename=file.filename
|
276
|
-
)
|
277
|
-
# upload the file to the provider
|
278
|
-
openai_file = await client.files.create(
|
279
|
-
file=(
|
280
|
-
file.filename,
|
281
|
-
await file.get_content(),
|
282
|
-
file.content_type,
|
283
|
-
),
|
284
|
-
purpose="user_data",
|
285
|
-
)
|
286
|
-
logger.info(
|
287
|
-
"uploaded pdf file to openai",
|
288
|
-
filename=file.filename,
|
289
|
-
openai_file_id=openai_file.id,
|
290
|
-
)
|
291
|
-
file_dict[str(file.id)] = FileIdContent(
|
292
|
-
content=openai_file.id
|
293
|
-
)
|
294
|
-
case "image/png" | "image/jpeg" | "image/gif" | "image/webp":
|
295
|
-
logger.debug(
|
296
|
-
"encoding image file to base64", filename=file.filename
|
297
|
-
)
|
298
|
-
file_dict[str(file.id)] = Base64Content(
|
299
|
-
content=base64.b64encode(
|
300
|
-
await file.get_content()
|
301
|
-
).decode("utf-8"),
|
302
|
-
content_type=file.content_type,
|
303
|
-
)
|
304
|
-
case _:
|
305
|
-
logger.warning(
|
306
|
-
"unsupported file type for openai",
|
307
|
-
content_type=file.content_type,
|
308
|
-
)
|
309
|
-
raise ValueError(
|
310
|
-
f"Unsupported file type: {file.content_type}"
|
311
|
-
)
|
312
|
-
logger.debug("file map built", num_entries=len(file_dict))
|
313
|
-
return FileMap(mapping=file_dict)
|
314
|
-
|
315
|
-
@staticmethod
|
316
|
-
async def complete(
|
317
|
-
model_spec: ModelSpec,
|
318
|
-
messages: list[ModelMessage],
|
319
|
-
output_type: Type[T] | None = None,
|
320
|
-
tools: list[ToolDefinition] | None = None,
|
321
|
-
) -> CompletionResponse[T | str]:
|
322
|
-
"""
|
323
|
-
Generate a completion using OpenAI.
|
324
|
-
|
325
|
-
Args:
|
326
|
-
model_spec: The model specification to use.
|
327
|
-
messages: List of structured messages.
|
328
|
-
output_type: Optional desired output type (Pydantic model) for structured output.
|
329
|
-
tools: Optional list of tools the model can use.
|
330
|
-
|
331
|
-
Returns:
|
332
|
-
CompletionResponse containing either content or tool calls.
|
333
|
-
"""
|
334
|
-
logger.debug(
|
335
|
-
"openaiprovider.complete called",
|
336
|
-
model_spec=model_spec,
|
337
|
-
output_type=output_type,
|
338
|
-
has_tools=tools is not None,
|
339
|
-
)
|
340
|
-
try:
|
341
|
-
from openai import AsyncOpenAI # noqa: PLC0415
|
342
|
-
except ImportError as e:
|
343
|
-
logger.exception("openai package not installed")
|
344
|
-
raise ImportError(
|
345
|
-
"OpenAI package is not installed. Install it with 'pip install openai'"
|
346
|
-
) from e
|
347
|
-
|
348
|
-
try:
|
349
|
-
# Get config from context
|
350
|
-
config = get_config()
|
351
|
-
|
352
|
-
# Check if OpenAI config is available
|
353
|
-
if not config or not config.ai_providers or not config.ai_providers.openai:
|
354
|
-
logger.warning("openai configuration is missing in planarconfig")
|
355
|
-
raise ValueError(
|
356
|
-
"OpenAI configuration is missing. Please provide OpenAI credentials in your config."
|
357
|
-
)
|
358
|
-
|
359
|
-
openai_config = config.ai_providers.openai
|
360
|
-
logger.debug("openai client configured from planarconfig")
|
361
|
-
client = AsyncOpenAI(
|
362
|
-
api_key=openai_config.api_key.get_secret_value(),
|
363
|
-
base_url=openai_config.base_url,
|
364
|
-
organization=openai_config.organization,
|
365
|
-
)
|
366
|
-
except (RuntimeError, ValueError) as e:
|
367
|
-
# Fallback to environment variables when running outside of HTTP context
|
368
|
-
# or when configuration is incomplete
|
369
|
-
# client = AsyncOpenAI() # Uses OPENAI_API_KEY from environment
|
370
|
-
logger.exception(
|
371
|
-
"failed to configure openai client from planarconfig or context"
|
372
|
-
)
|
373
|
-
raise ValueError("OpenAI configuration is missing.") from e
|
374
|
-
|
375
|
-
file_map = await OpenAIProvider._build_file_map(client, messages)
|
376
|
-
|
377
|
-
formatted_messages = OpenAIProvider.prepare_messages(messages, file_map)
|
378
|
-
|
379
|
-
# TODO: Properly validate parameters
|
380
|
-
kwargs = {
|
381
|
-
"model": model_spec.model_id,
|
382
|
-
"messages": formatted_messages,
|
383
|
-
**model_spec.parameters,
|
384
|
-
}
|
385
|
-
|
386
|
-
# Handle function calling via tools
|
387
|
-
if tools:
|
388
|
-
formatted_tools = []
|
389
|
-
for tool in tools:
|
390
|
-
# Convert our Pydantic model to OpenAI's expected format
|
391
|
-
schema = tool.parameters
|
392
|
-
openai_params = {
|
393
|
-
"type": "object",
|
394
|
-
"properties": schema.get("properties", {}),
|
395
|
-
"required": schema.get("required", []),
|
396
|
-
"additionalProperties": False,
|
397
|
-
}
|
398
|
-
|
399
|
-
formatted_tools.append(
|
400
|
-
{
|
401
|
-
"type": "function",
|
402
|
-
"function": {
|
403
|
-
"name": tool.name,
|
404
|
-
"description": tool.description,
|
405
|
-
"parameters": openai_params,
|
406
|
-
"strict": True,
|
407
|
-
},
|
408
|
-
}
|
409
|
-
)
|
410
|
-
kwargs["tools"] = formatted_tools
|
411
|
-
|
412
|
-
# Handle structured output if output_type is provided
|
413
|
-
completion = None
|
414
|
-
if output_type is not None:
|
415
|
-
if not issubclass(output_type, BaseModel):
|
416
|
-
raise ValueError("Non-Pydantic structured output not supported yet.")
|
417
|
-
|
418
|
-
# Verify name conforms to regex, otherwise OpenAI will throw an error
|
419
|
-
if not re.match(r"^[a-zA-Z0-9_-]+$", output_type.__name__):
|
420
|
-
output_type.__name__ = re.sub(
|
421
|
-
r"[^a-zA-Z0-9_-]", "_", output_type.__name__
|
422
|
-
)
|
423
|
-
|
424
|
-
completion = await client.beta.chat.completions.parse(
|
425
|
-
response_format=output_type, **kwargs
|
426
|
-
)
|
427
|
-
logger.debug(
|
428
|
-
"called openai beta.chat.completions.parse for structured output"
|
429
|
-
)
|
430
|
-
else:
|
431
|
-
# Make the API call
|
432
|
-
completion = await client.chat.completions.create(**kwargs)
|
433
|
-
logger.debug("called openai chat.completions.create for standard output")
|
434
|
-
|
435
|
-
assert completion
|
436
|
-
# Process the response
|
437
|
-
choice = completion.choices[0]
|
438
|
-
logger.debug("openai completion choice", choice=choice)
|
439
|
-
|
440
|
-
# Check for tool calls
|
441
|
-
if choice.message.tool_calls:
|
442
|
-
logger.debug(
|
443
|
-
"openai response contains tool calls",
|
444
|
-
num_tool_calls=len(choice.message.tool_calls),
|
445
|
-
)
|
446
|
-
tool_calls = []
|
447
|
-
for tool_call_idx, tool_call in enumerate(choice.message.tool_calls):
|
448
|
-
# Parse the function arguments from JSON string
|
449
|
-
try:
|
450
|
-
arguments = json.loads(tool_call.function.arguments)
|
451
|
-
except json.JSONDecodeError:
|
452
|
-
logger.exception(
|
453
|
-
"failed to parse json arguments for tool call",
|
454
|
-
tool_name=tool_call.function.name,
|
455
|
-
arguments=tool_call.function.arguments,
|
456
|
-
)
|
457
|
-
arguments = {"raw_arguments": tool_call.function.arguments}
|
458
|
-
|
459
|
-
tool_calls.append(
|
460
|
-
ToolCall(
|
461
|
-
id=tool_call.id,
|
462
|
-
name=tool_call.function.name,
|
463
|
-
arguments=arguments,
|
464
|
-
)
|
465
|
-
)
|
466
|
-
|
467
|
-
return CompletionResponse(content=None, tool_calls=tool_calls)
|
468
|
-
|
469
|
-
# Process regular content
|
470
|
-
content = choice.message.content
|
471
|
-
|
472
|
-
# Parse JSON content if needed
|
473
|
-
if output_type and issubclass(output_type, BaseModel) and content:
|
474
|
-
try:
|
475
|
-
if isinstance(content, str):
|
476
|
-
parsed_content = json.loads(content)
|
477
|
-
content = output_type.model_validate(parsed_content)
|
478
|
-
except Exception:
|
479
|
-
# If parsing fails, return the raw content
|
480
|
-
logger.exception(
|
481
|
-
"failed to parse/validate structured output content",
|
482
|
-
content=content,
|
483
|
-
)
|
484
|
-
pass
|
485
|
-
logger.debug("openai completion successful", content_type=type(content))
|
486
|
-
return CompletionResponse(content=content, tool_calls=None)
|
487
|
-
|
488
|
-
|
489
|
-
class OpenAIModel(Model):
|
490
|
-
"""OpenAI-specific model implementation."""
|
491
|
-
|
492
|
-
provider_class = OpenAIProvider
|
493
|
-
name = "OpenAI"
|
494
|
-
|
495
|
-
def __init__(self, model_id: str):
|
496
|
-
super().__init__(model_id)
|
497
|
-
|
498
|
-
|
499
|
-
class OpenAI:
|
500
|
-
# builder of OpenAI models
|
501
|
-
@staticmethod
|
502
|
-
def model(model_id: str) -> OpenAIModel:
|
503
|
-
"""Create a model instance for a custom OpenAI model ID."""
|
504
|
-
return OpenAIModel(model_id)
|
505
|
-
|
506
|
-
# OpenAI models using the model method
|
507
|
-
gpt_4o = model("gpt-4o")
|
508
|
-
gpt_4_1 = model("gpt-4.1")
|
509
|
-
gpt_4_turbo = model("gpt-4-turbo")
|
510
|
-
|
511
|
-
|
512
|
-
class AnthropicProvider(Provider):
|
513
|
-
"""Anthropic provider implementation."""
|
514
|
-
|
515
|
-
@staticmethod
|
516
|
-
def model(model_id: str) -> "AnthropicModel":
|
517
|
-
"""Create a model instance for a custom Anthropic model ID."""
|
518
|
-
return AnthropicModel(model_id)
|
519
|
-
|
520
|
-
@staticmethod
|
521
|
-
def format_tool_response(tool_response: ToolResponse) -> ToolMessage:
|
522
|
-
"""Format a tool response into a message for Anthropic.
|
523
|
-
|
524
|
-
Args:
|
525
|
-
tool_response: The tool response to format.
|
526
|
-
|
527
|
-
Returns:
|
528
|
-
A formatted tool message.
|
529
|
-
"""
|
530
|
-
return ToolMessage(
|
531
|
-
content=tool_response.content,
|
532
|
-
tool_call_id=tool_response.tool_call_id or "call_1",
|
533
|
-
)
|
534
|
-
|
535
|
-
@staticmethod
|
536
|
-
def prepare_messages(
|
537
|
-
messages: list[ModelMessage], file_map: FileMap | None = None
|
538
|
-
) -> list[dict[str, Any]]:
|
539
|
-
"""Prepare messages from Planar representations into the format expected by the provider, including file upload or conversion.
|
540
|
-
|
541
|
-
Args:
|
542
|
-
messages: List of structured messages.
|
543
|
-
|
544
|
-
Returns:
|
545
|
-
List of messages in Anthropic format.
|
546
|
-
"""
|
547
|
-
formatted_messages = []
|
548
|
-
|
549
|
-
for message in messages:
|
550
|
-
if isinstance(message, SystemMessage):
|
551
|
-
formatted_messages.append(
|
552
|
-
{"role": "system", "content": message.content}
|
553
|
-
)
|
554
|
-
elif isinstance(message, UserMessage):
|
555
|
-
formatted_messages.append({"role": "user", "content": message.content})
|
556
|
-
elif isinstance(message, ToolMessage):
|
557
|
-
formatted_messages.append(
|
558
|
-
{
|
559
|
-
"role": "tool",
|
560
|
-
"tool_call_id": message.tool_call_id,
|
561
|
-
"content": message.content,
|
562
|
-
}
|
563
|
-
)
|
564
|
-
elif isinstance(message, AssistantMessage):
|
565
|
-
if message.tool_calls:
|
566
|
-
assistant_msg = {
|
567
|
-
"role": "assistant",
|
568
|
-
"content": message.content,
|
569
|
-
"tool_calls": [],
|
570
|
-
}
|
571
|
-
|
572
|
-
for tool_call in message.tool_calls:
|
573
|
-
formatted_tool_call = {
|
574
|
-
"id": tool_call.id
|
575
|
-
or f"call_{len(assistant_msg['tool_calls']) + 1}",
|
576
|
-
"type": "function",
|
577
|
-
"function": {
|
578
|
-
"name": tool_call.name,
|
579
|
-
"arguments": json.dumps(tool_call.arguments),
|
580
|
-
},
|
581
|
-
}
|
582
|
-
assistant_msg["tool_calls"].append(formatted_tool_call)
|
583
|
-
|
584
|
-
formatted_messages.append(assistant_msg)
|
585
|
-
else:
|
586
|
-
formatted_messages.append(
|
587
|
-
{"role": "assistant", "content": message.content}
|
588
|
-
)
|
589
|
-
|
590
|
-
return formatted_messages
|
591
|
-
|
592
|
-
@staticmethod
|
593
|
-
async def complete(
|
594
|
-
model_spec: ModelSpec,
|
595
|
-
messages: list[ModelMessage],
|
596
|
-
output_type: Type[T] | None = None,
|
597
|
-
tools: list[ToolDefinition] | None = None,
|
598
|
-
) -> CompletionResponse[T | str]:
|
599
|
-
"""
|
600
|
-
Generate a completion using Anthropic.
|
601
|
-
|
602
|
-
Args:
|
603
|
-
model_spec: The model specification to use.
|
604
|
-
messages: List of structured message objects.
|
605
|
-
output_type: Optional desired output type (Pydantic model) for structured output.
|
606
|
-
tools: Optional list of tools the model can use.
|
607
|
-
|
608
|
-
Returns:
|
609
|
-
CompletionResponse containing either content or tool calls.
|
610
|
-
"""
|
611
|
-
logger.debug(
|
612
|
-
"anthropicprovider.complete called",
|
613
|
-
model_spec=model_spec,
|
614
|
-
output_type=output_type,
|
615
|
-
has_tools=tools is not None,
|
616
|
-
)
|
617
|
-
try:
|
618
|
-
import anthropic # noqa: PLC0415
|
619
|
-
except ImportError as e:
|
620
|
-
logger.exception("anthropic package not installed")
|
621
|
-
raise ImportError(
|
622
|
-
"Anthropic package is not installed. Install it with 'pip install anthropic'"
|
623
|
-
) from e
|
624
|
-
|
625
|
-
try:
|
626
|
-
# Get config from context
|
627
|
-
config = get_config()
|
628
|
-
|
629
|
-
# Check if Anthropic config is available
|
630
|
-
if (
|
631
|
-
not config
|
632
|
-
or not config.ai_providers
|
633
|
-
or not config.ai_providers.anthropic
|
634
|
-
):
|
635
|
-
logger.warning("anthropic configuration is missing in planarconfig")
|
636
|
-
raise ValueError(
|
637
|
-
"Anthropic configuration is missing. Please provide Anthropic credentials in your config."
|
638
|
-
)
|
639
|
-
|
640
|
-
anthropic_config = config.ai_providers.anthropic
|
641
|
-
logger.debug("anthropic client configured from planarconfig")
|
642
|
-
# Initialize Anthropic client with credentials from config
|
643
|
-
client_kwargs: AnthropicKwargs = {
|
644
|
-
"api_key": anthropic_config.api_key.get_secret_value(),
|
645
|
-
}
|
646
|
-
|
647
|
-
# Add optional parameters if they exist
|
648
|
-
if anthropic_config.base_url:
|
649
|
-
client_kwargs["base_url"] = anthropic_config.base_url
|
650
|
-
|
651
|
-
# Initialize client - currently unused in stub implementation
|
652
|
-
_ = anthropic.Anthropic(**client_kwargs)
|
653
|
-
except (RuntimeError, ValueError) as e:
|
654
|
-
# Fallback to environment variables when running outside of HTTP context
|
655
|
-
# or when configuration is incomplete
|
656
|
-
# client = anthropic.Anthropic() # Uses ANTHROPIC_API_KEY from environment
|
657
|
-
logger.exception(
|
658
|
-
"failed to configure anthropic client from planarconfig or context"
|
659
|
-
)
|
660
|
-
raise ValueError("Anthropic configuration is missing.") from e
|
661
|
-
|
662
|
-
# Format messages for Anthropic
|
663
|
-
file_map = None # TODO: Implement file map
|
664
|
-
formatted_messages = AnthropicProvider.prepare_messages(messages, file_map)
|
665
|
-
|
666
|
-
# Prepare API call parameters
|
667
|
-
kwargs = {
|
668
|
-
"model": model_spec.model_id,
|
669
|
-
"messages": formatted_messages,
|
670
|
-
**model_spec.parameters,
|
671
|
-
}
|
672
|
-
|
673
|
-
# Handle tools
|
674
|
-
if tools:
|
675
|
-
formatted_tools = []
|
676
|
-
for tool in tools:
|
677
|
-
# Convert our Pydantic model to Anthropic's expected format
|
678
|
-
schema = tool.parameters
|
679
|
-
anthropic_params = {
|
680
|
-
"type": "object",
|
681
|
-
"properties": schema.get("properties", {}),
|
682
|
-
"required": schema.get("required", []),
|
683
|
-
}
|
684
|
-
|
685
|
-
formatted_tools.append(
|
686
|
-
{
|
687
|
-
"name": tool.name,
|
688
|
-
"description": tool.description,
|
689
|
-
"input_schema": anthropic_params,
|
690
|
-
}
|
691
|
-
)
|
692
|
-
|
693
|
-
kwargs["tools"] = formatted_tools
|
694
|
-
|
695
|
-
# Handle structured output if output_type is provided
|
696
|
-
if output_type is not None:
|
697
|
-
if not issubclass(output_type, BaseModel):
|
698
|
-
raise ValueError("Non-Pydantic structured output not supported yet.")
|
699
|
-
|
700
|
-
schema_json = output_type.model_json_schema()
|
701
|
-
kwargs["system"] = (
|
702
|
-
f"You must respond with valid JSON that matches the following schema:\n{schema_json}"
|
703
|
-
)
|
704
|
-
|
705
|
-
# This is a stub implementation that would be filled out with the actual API call
|
706
|
-
# In a real implementation, would make an API call to Anthropic:
|
707
|
-
# message = await client.messages.create(
|
708
|
-
# **kwargs
|
709
|
-
# )
|
710
|
-
|
711
|
-
# Process tool calls (stub implementation)
|
712
|
-
# if message.content[0].type == "tool_use":
|
713
|
-
# tool_calls = []
|
714
|
-
# for tool_use in message.content:
|
715
|
-
# if tool_use.type == "tool_use":
|
716
|
-
# tool_calls.append(
|
717
|
-
# ToolCall(
|
718
|
-
# id=tool_use.id,
|
719
|
-
# name=tool_use.name,
|
720
|
-
# arguments=tool_use.input,
|
721
|
-
# )
|
722
|
-
# )
|
723
|
-
# return CompletionResponse(content=None, tool_calls=tool_calls)
|
724
|
-
# else:
|
725
|
-
# content = message.content[0].text
|
726
|
-
|
727
|
-
# For now, return a stub response
|
728
|
-
return CompletionResponse(content="Anthropic response", tool_calls=None)
|
729
|
-
|
730
|
-
|
731
|
-
class AnthropicModel(Model):
|
732
|
-
"""Anthropic-specific model implementation."""
|
733
|
-
|
734
|
-
provider_class = AnthropicProvider
|
735
|
-
name = "Anthropic"
|
736
|
-
|
737
|
-
def __init__(self, model_id: str):
|
738
|
-
super().__init__(model_id)
|
739
|
-
|
740
|
-
|
741
|
-
class Anthropic:
|
742
|
-
# builder of Anthropic models
|
743
|
-
@staticmethod
|
744
|
-
def model(model_id: str) -> AnthropicModel:
|
745
|
-
"""Create a model instance for a custom Anthropic model ID."""
|
746
|
-
return AnthropicModel(model_id)
|
747
|
-
|
748
|
-
# Class-level models
|
749
|
-
claude_3_opus = model("claude-3-opus")
|
750
|
-
claude_3_sonnet = model("claude-3-sonnet")
|
751
|
-
claude_3_haiku = model("claude-3-haiku")
|
752
|
-
claude_sonnet_4_20250514 = model("claude-sonnet-4-20250514")
|
753
|
-
claude_opus_4_20250514 = model("claude-opus-4-20250514")
|
754
|
-
claude_sonnet_4 = model("claude-sonnet-4")
|
755
|
-
claude_opus_4 = model("claude-opus-4")
|
756
|
-
|
757
|
-
|
758
|
-
class GeminiProvider(Provider):
|
759
|
-
"""Gemini provider implementation using PydanticAI."""
|
760
|
-
|
761
|
-
@staticmethod
|
762
|
-
def model(model_id: str) -> "GeminiModel":
|
763
|
-
"""Create a model instance for a custom Gemini model ID."""
|
764
|
-
return GeminiModel(model_id)
|
765
|
-
|
766
|
-
@staticmethod
|
767
|
-
def format_tool_response(tool_response: ToolResponse) -> ToolMessage:
|
768
|
-
"""Format a tool response into a message for Gemini.
|
769
|
-
|
770
|
-
Args:
|
771
|
-
tool_response: The tool response to format.
|
772
|
-
|
773
|
-
Returns:
|
774
|
-
A formatted tool message.
|
775
|
-
"""
|
776
|
-
return ToolMessage(
|
777
|
-
content=tool_response.content,
|
778
|
-
tool_call_id=tool_response.tool_call_id or "call_1",
|
779
|
-
)
|
780
|
-
|
781
|
-
@staticmethod
|
782
|
-
def prepare_messages(
|
783
|
-
messages: list[ModelMessage], file_map: FileMap | None = None
|
784
|
-
) -> list[Any]:
|
785
|
-
"""Prepare messages from Planar representations into the format expected by PydanticAI.
|
786
|
-
|
787
|
-
Args:
|
788
|
-
messages: List of structured messages.
|
789
|
-
file_map: Optional file map for file content.
|
790
|
-
|
791
|
-
Returns:
|
792
|
-
List of messages in PydanticAI format for Gemini.
|
793
|
-
"""
|
794
|
-
pydantic_messages: list[pydantic_ai.ModelMessage] = []
|
795
|
-
|
796
|
-
def append_request_part(part: pydantic_ai.ModelRequestPart):
|
797
|
-
last = (
|
798
|
-
pydantic_messages[-1]
|
799
|
-
if pydantic_messages
|
800
|
-
and isinstance(pydantic_messages[-1], pydantic_ai.ModelRequest)
|
801
|
-
else None
|
802
|
-
)
|
803
|
-
if not last:
|
804
|
-
last = pydantic_ai.ModelRequest(parts=[])
|
805
|
-
pydantic_messages.append(last)
|
806
|
-
last.parts.append(part)
|
807
|
-
|
808
|
-
def append_response_part(part: pydantic_ai.ModelResponsePart):
|
809
|
-
last = (
|
810
|
-
pydantic_messages[-1]
|
811
|
-
if pydantic_messages
|
812
|
-
and isinstance(pydantic_messages[-1], pydantic_ai.ModelResponse)
|
813
|
-
else None
|
814
|
-
)
|
815
|
-
if not last:
|
816
|
-
last = pydantic_ai.ModelResponse(parts=[])
|
817
|
-
pydantic_messages.append(last)
|
818
|
-
last.parts.append(part)
|
819
|
-
|
820
|
-
for message in messages:
|
821
|
-
if isinstance(message, SystemMessage):
|
822
|
-
append_request_part(
|
823
|
-
pydantic_ai.SystemPromptPart(content=message.content or "")
|
824
|
-
)
|
825
|
-
elif isinstance(message, UserMessage):
|
826
|
-
user_content: list[pydantic_ai.UserContent] = []
|
827
|
-
files: list[FileContent] = []
|
828
|
-
if message.files:
|
829
|
-
if not file_map:
|
830
|
-
raise ValueError("File map empty while user message has files.")
|
831
|
-
for file in message.files:
|
832
|
-
if str(file.id) not in file_map.mapping:
|
833
|
-
raise ValueError(
|
834
|
-
f"File {file} not found in file map {file_map}."
|
835
|
-
)
|
836
|
-
files.append(file_map.mapping[str(file.id)])
|
837
|
-
for file in files:
|
838
|
-
match file:
|
839
|
-
case Base64Content():
|
840
|
-
user_content.append(
|
841
|
-
pydantic_ai.BinaryContent(
|
842
|
-
data=base64.b64decode(file.content),
|
843
|
-
media_type=file.content_type,
|
844
|
-
)
|
845
|
-
)
|
846
|
-
case FileIdContent():
|
847
|
-
raise Exception(
|
848
|
-
"file id handling not implemented yet for Gemini"
|
849
|
-
)
|
850
|
-
if message.content is not None:
|
851
|
-
user_content.append(message.content)
|
852
|
-
append_request_part(pydantic_ai.UserPromptPart(content=user_content))
|
853
|
-
elif isinstance(message, ToolMessage):
|
854
|
-
append_request_part(
|
855
|
-
pydantic_ai.ToolReturnPart(
|
856
|
-
tool_name="unknown", # FIXME: Planar's ToolMessage doesn't include tool name
|
857
|
-
content=message.content,
|
858
|
-
tool_call_id=message.tool_call_id,
|
859
|
-
)
|
860
|
-
)
|
861
|
-
elif isinstance(message, AssistantMessage):
|
862
|
-
if message.content:
|
863
|
-
append_response_part(
|
864
|
-
pydantic_ai.TextPart(content=message.content or "")
|
865
|
-
)
|
866
|
-
if message.tool_calls:
|
867
|
-
for tc in message.tool_calls:
|
868
|
-
append_response_part(
|
869
|
-
pydantic_ai.ToolCallPart(
|
870
|
-
tool_name=tc.name, args=tc.arguments
|
871
|
-
)
|
872
|
-
)
|
873
|
-
|
874
|
-
return pydantic_messages
|
875
|
-
|
876
|
-
@staticmethod
|
877
|
-
async def _build_file_map(messages: list[ModelMessage]) -> FileMap:
|
878
|
-
"""Build file map for Gemini, converting files to base64 for multi-modal support."""
|
879
|
-
logger.debug("building file map for gemini", num_messages=len(messages))
|
880
|
-
file_dict = {}
|
881
|
-
|
882
|
-
for message_idx, message in enumerate(messages):
|
883
|
-
if isinstance(message, UserMessage) and message.files:
|
884
|
-
logger.debug(
|
885
|
-
"processing files in message for gemini",
|
886
|
-
num_files=len(message.files),
|
887
|
-
message_index=message_idx,
|
888
|
-
)
|
889
|
-
for file_idx, file in enumerate(message.files):
|
890
|
-
logger.debug(
|
891
|
-
"processing file for gemini",
|
892
|
-
file_index=file_idx,
|
893
|
-
file_id=file.id,
|
894
|
-
content_type=file.content_type,
|
895
|
-
)
|
896
|
-
|
897
|
-
# For now we are not using uploaded files with Gemini, so convert all to base64
|
898
|
-
if file.content_type.startswith(
|
899
|
-
("image/", "audio/", "video/", "application/pdf")
|
900
|
-
):
|
901
|
-
logger.debug(
|
902
|
-
"encoding file to base64 for gemini",
|
903
|
-
filename=file.filename,
|
904
|
-
content_type=file.content_type,
|
905
|
-
)
|
906
|
-
file_dict[str(file.id)] = Base64Content(
|
907
|
-
content=base64.b64encode(await file.get_content()).decode(
|
908
|
-
"utf-8"
|
909
|
-
),
|
910
|
-
content_type=file.content_type,
|
911
|
-
)
|
912
|
-
else:
|
913
|
-
logger.warning(
|
914
|
-
"unsupported file type for gemini",
|
915
|
-
content_type=file.content_type,
|
916
|
-
)
|
917
|
-
raise ValueError(
|
918
|
-
f"Unsupported file type for Gemini: {file.content_type}"
|
919
|
-
)
|
920
|
-
|
921
|
-
logger.debug("file map built for gemini", num_entries=len(file_dict))
|
922
|
-
return FileMap(mapping=file_dict)
|
923
|
-
|
924
|
-
@staticmethod
|
925
|
-
async def complete(
|
926
|
-
model_spec: ModelSpec,
|
927
|
-
messages: list[ModelMessage],
|
928
|
-
output_type: Type[T] | None = None,
|
929
|
-
tools: list[ToolDefinition] | None = None,
|
930
|
-
) -> CompletionResponse[T | str]:
|
931
|
-
"""
|
932
|
-
Generate a completion using Gemini via PydanticAI.
|
933
|
-
|
934
|
-
Args:
|
935
|
-
model_spec: The model specification to use.
|
936
|
-
messages: List of structured messages.
|
937
|
-
output_type: Optional desired output type (Pydantic model) for structured output.
|
938
|
-
tools: Optional list of tools the model can use.
|
939
|
-
|
940
|
-
Returns:
|
941
|
-
CompletionResponse containing either content or tool calls.
|
942
|
-
"""
|
943
|
-
logger.debug(
|
944
|
-
"gemini completion started",
|
945
|
-
model_spec=model_spec,
|
946
|
-
output_type=output_type,
|
947
|
-
has_tools=tools is not None,
|
948
|
-
)
|
949
|
-
|
950
|
-
try:
|
951
|
-
# Get config from context
|
952
|
-
config = get_config()
|
953
|
-
|
954
|
-
# Check if Gemini config is available
|
955
|
-
if not config or not config.ai_providers or not config.ai_providers.gemini:
|
956
|
-
logger.warning("gemini configuration is missing in planarconfig")
|
957
|
-
raise ValueError(
|
958
|
-
"Gemini configuration is missing. Please provide Gemini credentials in your config."
|
959
|
-
)
|
960
|
-
|
961
|
-
gemini_config = config.ai_providers.gemini
|
962
|
-
logger.debug("gemini configured from planarconfig")
|
963
|
-
|
964
|
-
# PydanticAI handles client initialization internally using GEMINI_API_KEY env var
|
965
|
-
# We need to ensure the API key is available in the environment
|
966
|
-
import os
|
967
|
-
|
968
|
-
os.environ["GEMINI_API_KEY"] = gemini_config.api_key.get_secret_value()
|
969
|
-
|
970
|
-
except (RuntimeError, ValueError) as e:
|
971
|
-
logger.exception(
|
972
|
-
"failed to configure gemini client from planarconfig or context"
|
973
|
-
)
|
974
|
-
raise ValueError("Gemini configuration is missing.") from e
|
975
|
-
|
976
|
-
# Build file map for multi-modal support
|
977
|
-
file_map = await GeminiProvider._build_file_map(messages)
|
978
|
-
|
979
|
-
# Format messages for PydanticAI
|
980
|
-
pydantic_ai_messages_list = GeminiProvider.prepare_messages(messages, file_map)
|
981
|
-
|
982
|
-
# Prepare model request parameters
|
983
|
-
model_request_parameters = PydanticAIModelRequestParameters()
|
984
|
-
|
985
|
-
# Add model-specific parameters
|
986
|
-
if model_spec.parameters:
|
987
|
-
# Apply any model parameters (temperature, etc.)
|
988
|
-
for key, value in model_spec.parameters.items():
|
989
|
-
setattr(model_request_parameters, key, value)
|
990
|
-
|
991
|
-
# Handle tools if provided
|
992
|
-
if tools:
|
993
|
-
pydantic_ai_tools = []
|
994
|
-
for tool in tools:
|
995
|
-
pydantic_ai_tools.append(
|
996
|
-
PydanticAIToolDefinition(
|
997
|
-
name=tool.name,
|
998
|
-
description=tool.description,
|
999
|
-
parameters_json_schema=tool.parameters,
|
1000
|
-
)
|
1001
|
-
)
|
1002
|
-
model_request_parameters.function_tools = pydantic_ai_tools
|
1003
|
-
|
1004
|
-
# Handle structured output if output_type is provided
|
1005
|
-
if output_type and issubclass(output_type, BaseModel):
|
1006
|
-
model_request_parameters.output_mode = "native"
|
1007
|
-
model_request_parameters.output_object = OutputObjectDefinition(
|
1008
|
-
name=output_type.__name__,
|
1009
|
-
description=output_type.__doc__ or "",
|
1010
|
-
json_schema=output_type.model_json_schema(),
|
1011
|
-
)
|
1012
|
-
|
1013
|
-
# Make the API call using PydanticAI
|
1014
|
-
try:
|
1015
|
-
pydantic_ai_response = await pydantic_ai_model_request(
|
1016
|
-
model=f"google-gla:{model_spec.model_id}",
|
1017
|
-
messages=pydantic_ai_messages_list,
|
1018
|
-
model_request_parameters=model_request_parameters,
|
1019
|
-
)
|
1020
|
-
logger.debug("gemini completion successful via pydantic_ai")
|
1021
|
-
except Exception as e:
|
1022
|
-
logger.exception("gemini api call failed")
|
1023
|
-
raise ValueError(f"Gemini API call failed: {e}") from e
|
1024
|
-
|
1025
|
-
# Process the response
|
1026
|
-
response_content: Any = None
|
1027
|
-
response_tool_calls = []
|
1028
|
-
|
1029
|
-
for part in pydantic_ai_response.parts:
|
1030
|
-
if isinstance(part, pydantic_ai.TextPart):
|
1031
|
-
response_content = part.content
|
1032
|
-
elif isinstance(part, pydantic_ai.ToolCallPart):
|
1033
|
-
response_tool_calls.append(
|
1034
|
-
ToolCall(
|
1035
|
-
id=part.tool_call_id,
|
1036
|
-
name=part.tool_name,
|
1037
|
-
arguments=part.args
|
1038
|
-
if isinstance(part.args, dict)
|
1039
|
-
else json.loads(part.args or "{}"),
|
1040
|
-
)
|
1041
|
-
)
|
1042
|
-
|
1043
|
-
# Handle structured output parsing
|
1044
|
-
if (
|
1045
|
-
output_type
|
1046
|
-
and issubclass(output_type, BaseModel)
|
1047
|
-
and isinstance(response_content, str)
|
1048
|
-
):
|
1049
|
-
try:
|
1050
|
-
response_content = output_type.model_validate_json(response_content)
|
1051
|
-
except Exception:
|
1052
|
-
logger.exception(
|
1053
|
-
"failed to parse gemini response into structured output"
|
1054
|
-
)
|
1055
|
-
# Keep as string if parsing fails
|
1056
|
-
|
1057
|
-
logger.debug(
|
1058
|
-
"gemini completion processed",
|
1059
|
-
content_type=type(response_content),
|
1060
|
-
num_tool_calls=len(response_tool_calls),
|
1061
|
-
)
|
1062
|
-
|
1063
|
-
return CompletionResponse(
|
1064
|
-
content=response_content, tool_calls=response_tool_calls or None
|
1065
|
-
)
|
1066
|
-
|
1067
|
-
|
1068
|
-
class GeminiModel(Model):
|
1069
|
-
"""Gemini-specific model implementation."""
|
1070
|
-
|
1071
|
-
provider_class = GeminiProvider
|
1072
|
-
name = "Gemini"
|
1073
|
-
|
1074
|
-
def __init__(self, model_id: str):
|
1075
|
-
super().__init__(model_id)
|
1076
|
-
|
1077
|
-
|
1078
|
-
class Gemini:
|
1079
|
-
"""Builder of Gemini models."""
|
1080
|
-
|
1081
|
-
@staticmethod
|
1082
|
-
def model(model_id: str) -> GeminiModel:
|
1083
|
-
"""Create a model instance for a custom Gemini model ID."""
|
1084
|
-
return GeminiModel(model_id)
|
1085
|
-
|
1086
|
-
# Class-level models
|
1087
|
-
gemini_2_5_flash = model("gemini-2.5-flash")
|
1088
|
-
gemini_2_5_pro = model("gemini-2.5-pro")
|