planar 0.5.0__py3-none-any.whl → 0.8.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (211) hide show
  1. planar/_version.py +1 -1
  2. planar/ai/agent.py +155 -283
  3. planar/ai/agent_base.py +170 -0
  4. planar/ai/agent_utils.py +7 -0
  5. planar/ai/pydantic_ai.py +638 -0
  6. planar/ai/test_agent_serialization.py +1 -1
  7. planar/app.py +64 -20
  8. planar/cli.py +39 -27
  9. planar/config.py +45 -36
  10. planar/db/db.py +2 -1
  11. planar/files/storage/azure_blob.py +343 -0
  12. planar/files/storage/base.py +7 -0
  13. planar/files/storage/config.py +70 -7
  14. planar/files/storage/s3.py +6 -6
  15. planar/files/storage/test_azure_blob.py +435 -0
  16. planar/logging/formatter.py +17 -4
  17. planar/logging/test_formatter.py +327 -0
  18. planar/registry_items.py +2 -1
  19. planar/routers/agents_router.py +3 -1
  20. planar/routers/files.py +11 -2
  21. planar/routers/models.py +14 -1
  22. planar/routers/test_agents_router.py +1 -1
  23. planar/routers/test_files_router.py +49 -0
  24. planar/routers/test_routes_security.py +5 -7
  25. planar/routers/test_workflow_router.py +270 -3
  26. planar/routers/workflow.py +95 -36
  27. planar/rules/models.py +36 -39
  28. planar/rules/test_data/account_dormancy_management.json +223 -0
  29. planar/rules/test_data/airline_loyalty_points_calculator.json +262 -0
  30. planar/rules/test_data/applicant_risk_assessment.json +435 -0
  31. planar/rules/test_data/booking_fraud_detection.json +407 -0
  32. planar/rules/test_data/cellular_data_rollover_system.json +258 -0
  33. planar/rules/test_data/clinical_trial_eligibility_screener.json +437 -0
  34. planar/rules/test_data/customer_lifetime_value.json +143 -0
  35. planar/rules/test_data/import_duties_calculator.json +289 -0
  36. planar/rules/test_data/insurance_prior_authorization.json +443 -0
  37. planar/rules/test_data/online_check_in_eligibility_system.json +254 -0
  38. planar/rules/test_data/order_consolidation_system.json +375 -0
  39. planar/rules/test_data/portfolio_risk_monitor.json +471 -0
  40. planar/rules/test_data/supply_chain_risk.json +253 -0
  41. planar/rules/test_data/warehouse_cross_docking.json +237 -0
  42. planar/rules/test_rules.py +750 -6
  43. planar/scaffold_templates/planar.dev.yaml.j2 +6 -6
  44. planar/scaffold_templates/planar.prod.yaml.j2 +9 -5
  45. planar/scaffold_templates/pyproject.toml.j2 +1 -1
  46. planar/security/auth_context.py +21 -0
  47. planar/security/{jwt_middleware.py → auth_middleware.py} +70 -17
  48. planar/security/authorization.py +9 -15
  49. planar/security/tests/test_auth_middleware.py +162 -0
  50. planar/sse/proxy.py +4 -9
  51. planar/test_app.py +92 -1
  52. planar/test_cli.py +81 -59
  53. planar/test_config.py +17 -14
  54. planar/testing/fixtures.py +325 -0
  55. planar/testing/planar_test_client.py +5 -2
  56. planar/utils.py +41 -1
  57. planar/workflows/execution.py +1 -1
  58. planar/workflows/orchestrator.py +5 -0
  59. planar/workflows/serialization.py +12 -6
  60. planar/workflows/step_core.py +3 -1
  61. planar/workflows/test_serialization.py +9 -1
  62. {planar-0.5.0.dist-info → planar-0.8.0.dist-info}/METADATA +30 -5
  63. planar-0.8.0.dist-info/RECORD +166 -0
  64. planar/.__init__.py.un~ +0 -0
  65. planar/._version.py.un~ +0 -0
  66. planar/.app.py.un~ +0 -0
  67. planar/.cli.py.un~ +0 -0
  68. planar/.config.py.un~ +0 -0
  69. planar/.context.py.un~ +0 -0
  70. planar/.db.py.un~ +0 -0
  71. planar/.di.py.un~ +0 -0
  72. planar/.engine.py.un~ +0 -0
  73. planar/.files.py.un~ +0 -0
  74. planar/.log_context.py.un~ +0 -0
  75. planar/.log_metadata.py.un~ +0 -0
  76. planar/.logging.py.un~ +0 -0
  77. planar/.object_registry.py.un~ +0 -0
  78. planar/.otel.py.un~ +0 -0
  79. planar/.server.py.un~ +0 -0
  80. planar/.session.py.un~ +0 -0
  81. planar/.sqlalchemy.py.un~ +0 -0
  82. planar/.task_local.py.un~ +0 -0
  83. planar/.test_app.py.un~ +0 -0
  84. planar/.test_config.py.un~ +0 -0
  85. planar/.test_object_config.py.un~ +0 -0
  86. planar/.test_sqlalchemy.py.un~ +0 -0
  87. planar/.test_utils.py.un~ +0 -0
  88. planar/.util.py.un~ +0 -0
  89. planar/.utils.py.un~ +0 -0
  90. planar/ai/.__init__.py.un~ +0 -0
  91. planar/ai/._models.py.un~ +0 -0
  92. planar/ai/.agent.py.un~ +0 -0
  93. planar/ai/.agent_utils.py.un~ +0 -0
  94. planar/ai/.events.py.un~ +0 -0
  95. planar/ai/.files.py.un~ +0 -0
  96. planar/ai/.models.py.un~ +0 -0
  97. planar/ai/.providers.py.un~ +0 -0
  98. planar/ai/.pydantic_ai.py.un~ +0 -0
  99. planar/ai/.pydantic_ai_agent.py.un~ +0 -0
  100. planar/ai/.pydantic_ai_provider.py.un~ +0 -0
  101. planar/ai/.step.py.un~ +0 -0
  102. planar/ai/.test_agent.py.un~ +0 -0
  103. planar/ai/.test_agent_serialization.py.un~ +0 -0
  104. planar/ai/.test_providers.py.un~ +0 -0
  105. planar/ai/.utils.py.un~ +0 -0
  106. planar/ai/providers.py +0 -1088
  107. planar/ai/test_agent.py +0 -1298
  108. planar/ai/test_providers.py +0 -463
  109. planar/db/.db.py.un~ +0 -0
  110. planar/files/.config.py.un~ +0 -0
  111. planar/files/.local.py.un~ +0 -0
  112. planar/files/.local_filesystem.py.un~ +0 -0
  113. planar/files/.model.py.un~ +0 -0
  114. planar/files/.models.py.un~ +0 -0
  115. planar/files/.s3.py.un~ +0 -0
  116. planar/files/.storage.py.un~ +0 -0
  117. planar/files/.test_files.py.un~ +0 -0
  118. planar/files/storage/.__init__.py.un~ +0 -0
  119. planar/files/storage/.base.py.un~ +0 -0
  120. planar/files/storage/.config.py.un~ +0 -0
  121. planar/files/storage/.context.py.un~ +0 -0
  122. planar/files/storage/.local_directory.py.un~ +0 -0
  123. planar/files/storage/.test_local_directory.py.un~ +0 -0
  124. planar/files/storage/.test_s3.py.un~ +0 -0
  125. planar/human/.human.py.un~ +0 -0
  126. planar/human/.test_human.py.un~ +0 -0
  127. planar/logging/.__init__.py.un~ +0 -0
  128. planar/logging/.attributes.py.un~ +0 -0
  129. planar/logging/.formatter.py.un~ +0 -0
  130. planar/logging/.logger.py.un~ +0 -0
  131. planar/logging/.otel.py.un~ +0 -0
  132. planar/logging/.tracer.py.un~ +0 -0
  133. planar/modeling/.mixin.py.un~ +0 -0
  134. planar/modeling/.storage.py.un~ +0 -0
  135. planar/modeling/orm/.planar_base_model.py.un~ +0 -0
  136. planar/object_config/.object_config.py.un~ +0 -0
  137. planar/routers/.__init__.py.un~ +0 -0
  138. planar/routers/.agents_router.py.un~ +0 -0
  139. planar/routers/.crud.py.un~ +0 -0
  140. planar/routers/.decision.py.un~ +0 -0
  141. planar/routers/.event.py.un~ +0 -0
  142. planar/routers/.file_attachment.py.un~ +0 -0
  143. planar/routers/.files.py.un~ +0 -0
  144. planar/routers/.files_router.py.un~ +0 -0
  145. planar/routers/.human.py.un~ +0 -0
  146. planar/routers/.info.py.un~ +0 -0
  147. planar/routers/.models.py.un~ +0 -0
  148. planar/routers/.object_config_router.py.un~ +0 -0
  149. planar/routers/.rule.py.un~ +0 -0
  150. planar/routers/.test_object_config_router.py.un~ +0 -0
  151. planar/routers/.test_workflow_router.py.un~ +0 -0
  152. planar/routers/.workflow.py.un~ +0 -0
  153. planar/rules/.decorator.py.un~ +0 -0
  154. planar/rules/.runner.py.un~ +0 -0
  155. planar/rules/.test_rules.py.un~ +0 -0
  156. planar/security/.jwt_middleware.py.un~ +0 -0
  157. planar/sse/.constants.py.un~ +0 -0
  158. planar/sse/.example.html.un~ +0 -0
  159. planar/sse/.hub.py.un~ +0 -0
  160. planar/sse/.model.py.un~ +0 -0
  161. planar/sse/.proxy.py.un~ +0 -0
  162. planar/testing/.client.py.un~ +0 -0
  163. planar/testing/.memory_storage.py.un~ +0 -0
  164. planar/testing/.planar_test_client.py.un~ +0 -0
  165. planar/testing/.predictable_tracer.py.un~ +0 -0
  166. planar/testing/.synchronizable_tracer.py.un~ +0 -0
  167. planar/testing/.test_memory_storage.py.un~ +0 -0
  168. planar/testing/.workflow_observer.py.un~ +0 -0
  169. planar/workflows/.__init__.py.un~ +0 -0
  170. planar/workflows/.builtin_steps.py.un~ +0 -0
  171. planar/workflows/.concurrency_tracing.py.un~ +0 -0
  172. planar/workflows/.context.py.un~ +0 -0
  173. planar/workflows/.contrib.py.un~ +0 -0
  174. planar/workflows/.decorators.py.un~ +0 -0
  175. planar/workflows/.durable_test.py.un~ +0 -0
  176. planar/workflows/.errors.py.un~ +0 -0
  177. planar/workflows/.events.py.un~ +0 -0
  178. planar/workflows/.exceptions.py.un~ +0 -0
  179. planar/workflows/.execution.py.un~ +0 -0
  180. planar/workflows/.human.py.un~ +0 -0
  181. planar/workflows/.lock.py.un~ +0 -0
  182. planar/workflows/.misc.py.un~ +0 -0
  183. planar/workflows/.model.py.un~ +0 -0
  184. planar/workflows/.models.py.un~ +0 -0
  185. planar/workflows/.notifications.py.un~ +0 -0
  186. planar/workflows/.orchestrator.py.un~ +0 -0
  187. planar/workflows/.runtime.py.un~ +0 -0
  188. planar/workflows/.serialization.py.un~ +0 -0
  189. planar/workflows/.step.py.un~ +0 -0
  190. planar/workflows/.step_core.py.un~ +0 -0
  191. planar/workflows/.sub_workflow_runner.py.un~ +0 -0
  192. planar/workflows/.sub_workflow_scheduler.py.un~ +0 -0
  193. planar/workflows/.test_concurrency.py.un~ +0 -0
  194. planar/workflows/.test_concurrency_detection.py.un~ +0 -0
  195. planar/workflows/.test_human.py.un~ +0 -0
  196. planar/workflows/.test_lock_timeout.py.un~ +0 -0
  197. planar/workflows/.test_orchestrator.py.un~ +0 -0
  198. planar/workflows/.test_race_conditions.py.un~ +0 -0
  199. planar/workflows/.test_serialization.py.un~ +0 -0
  200. planar/workflows/.test_suspend_deserialization.py.un~ +0 -0
  201. planar/workflows/.test_workflow.py.un~ +0 -0
  202. planar/workflows/.tracing.py.un~ +0 -0
  203. planar/workflows/.types.py.un~ +0 -0
  204. planar/workflows/.util.py.un~ +0 -0
  205. planar/workflows/.utils.py.un~ +0 -0
  206. planar/workflows/.workflow.py.un~ +0 -0
  207. planar/workflows/.workflow_wrapper.py.un~ +0 -0
  208. planar/workflows/.wrappers.py.un~ +0 -0
  209. planar-0.5.0.dist-info/RECORD +0 -289
  210. {planar-0.5.0.dist-info → planar-0.8.0.dist-info}/WHEEL +0 -0
  211. {planar-0.5.0.dist-info → planar-0.8.0.dist-info}/entry_points.txt +0 -0
@@ -1,463 +0,0 @@
1
- import json
2
- from unittest.mock import AsyncMock, Mock, patch
3
- from uuid import UUID
4
-
5
- import pytest
6
- from pydantic import BaseModel, SecretStr
7
-
8
- from planar.ai.models import (
9
- AssistantMessage,
10
- Base64Content,
11
- FileIdContent,
12
- FileMap,
13
- ModelMessage,
14
- SystemMessage,
15
- ToolCall,
16
- ToolMessage,
17
- ToolResponse,
18
- UserMessage,
19
- )
20
- from planar.ai.providers import Anthropic, ModelSpec, OpenAI, OpenAIProvider
21
- from planar.config import (
22
- AIProvidersConfig,
23
- AppConfig,
24
- OpenAIConfig,
25
- PlanarConfig,
26
- SQLiteConfig,
27
- )
28
- from planar.files.models import PlanarFile
29
- from planar.session import config_var
30
-
31
-
32
- class DummyOutput(BaseModel):
33
- value: str
34
- score: int
35
-
36
-
37
- class DummyGenericOutput[T: BaseModel](BaseModel):
38
- value: T
39
-
40
-
41
- # Mock classes for OpenAI client
42
- class MockResponse:
43
- def __init__(
44
- self, content="Test response", tool_calls=None, structured_output=None
45
- ):
46
- message_content = (
47
- structured_output if structured_output is not None else content
48
- )
49
- self.choices = [
50
- Mock(message=Mock(content=message_content, tool_calls=tool_calls))
51
- ]
52
-
53
-
54
- class MockCompletions:
55
- def __init__(self):
56
- self.captured_kwargs = None
57
-
58
- async def create(self, **kwargs):
59
- self.captured_kwargs = kwargs
60
- return MockResponse()
61
-
62
-
63
- class MockBetaCompletions:
64
- def __init__(self):
65
- self.captured_kwargs = None
66
-
67
- async def parse(self, response_format=None, **kwargs):
68
- """Handle structured output parsing"""
69
- self.captured_kwargs = kwargs.copy()
70
- self.captured_kwargs["response_format"] = response_format
71
- # If there's a response_format, create structured output based on it
72
- if response_format:
73
- if hasattr(response_format, "model_validate"):
74
- # Create an instance of the response format model with test data
75
- if response_format == DummyGenericOutput[DummyOutput]:
76
- structured_output = DummyGenericOutput[DummyOutput](
77
- value=DummyOutput(value="test value", score=95)
78
- )
79
- else:
80
- # Generic values for any other model
81
- structured_output = response_format.model_validate(
82
- {"value": "test", "score": 100}
83
- )
84
- return MockResponse(structured_output=structured_output)
85
- return MockResponse()
86
-
87
-
88
- class MockChat:
89
- def __init__(self):
90
- self.completions = MockCompletions()
91
-
92
-
93
- class MockBetaChat:
94
- def __init__(self):
95
- self.completions = MockBetaCompletions()
96
-
97
-
98
- class MockBeta:
99
- def __init__(self):
100
- self.chat = MockBetaChat()
101
-
102
-
103
- class MockClient:
104
- def __init__(self, **kwargs):
105
- self.chat = MockChat()
106
- self.beta = MockBeta()
107
-
108
-
109
- @pytest.fixture(name="mock_openai_client")
110
- def mock_openai_client_fixture(monkeypatch):
111
- """Set up a mock OpenAI client for testing."""
112
- mock_client = MockClient()
113
- monkeypatch.setattr("openai.AsyncOpenAI", lambda **kwargs: mock_client)
114
- return mock_client
115
-
116
-
117
- @pytest.fixture(name="fake_config")
118
- def fake_config_fixture():
119
- """Set up a fake config for testing."""
120
- # Create a minimal PlanarConfig for testing
121
- # We're using actual PlanarConfig classes to maintain type compatibility
122
- # Create config objects
123
- openai_config = OpenAIConfig(
124
- api_key=SecretStr("mock_key"),
125
- base_url="https://api.openai.com/v1",
126
- organization=None,
127
- )
128
-
129
- ai_providers = AIProvidersConfig(openai=openai_config)
130
-
131
- # Create a minimal valid PlanarConfig for testing
132
- mock_config = PlanarConfig(
133
- db_connections={"app": SQLiteConfig(path=":memory:")},
134
- app=AppConfig(db_connection="app"),
135
- ai_providers=ai_providers,
136
- )
137
-
138
- # Set the config in the context variable
139
- token = config_var.set(mock_config)
140
- yield mock_config
141
- # Reset when done
142
- config_var.reset(token)
143
-
144
-
145
- class TestOpenAIProvider:
146
- """Test suite for the OpenAIProvider implementation."""
147
-
148
- def test_format_tool_response(self):
149
- """Test that tool responses are correctly formatted."""
150
- # Test with all fields
151
- response1 = ToolResponse(tool_call_id="call_123", content="Test result")
152
- message1 = OpenAIProvider.format_tool_response(response1)
153
-
154
- assert isinstance(message1, ToolMessage)
155
- assert message1.tool_call_id == "call_123"
156
- assert message1.content == "Test result"
157
-
158
- # Test with missing ID (should generate default)
159
- response2 = ToolResponse(content="Another result")
160
- message2 = OpenAIProvider.format_tool_response(response2)
161
-
162
- assert isinstance(message2, ToolMessage)
163
- assert message2.tool_call_id == "call_1" # Default ID
164
- assert message2.content == "Another result"
165
-
166
- def test_format_messages(self):
167
- """Test that messages are correctly formatted for the OpenAI API."""
168
- # Create a list of different message types
169
- messages: list[ModelMessage] = [
170
- SystemMessage(content="You are a helpful assistant"),
171
- UserMessage(
172
- content="Hello",
173
- files=[
174
- PlanarFile(
175
- id=UUID("11111111-1111-1111-1111-111111111111"),
176
- filename="test_image.jpg",
177
- content_type="image/jpeg",
178
- size=1024,
179
- ),
180
- PlanarFile(
181
- id=UUID("22222222-2222-2222-2222-222222222222"),
182
- filename="test_doc.pdf",
183
- content_type="application/pdf",
184
- size=2048,
185
- ),
186
- ],
187
- ),
188
- AssistantMessage(content="How can I help?"),
189
- ToolMessage(tool_call_id="call_1", content="Tool result"),
190
- AssistantMessage(
191
- content=None,
192
- tool_calls=[
193
- ToolCall(
194
- id="call_2",
195
- name="test_tool",
196
- arguments={"param1": "value1"},
197
- )
198
- ],
199
- ),
200
- ]
201
-
202
- file_map = FileMap(
203
- mapping={
204
- "11111111-1111-1111-1111-111111111111": Base64Content(
205
- content_type="image/jpeg", content="fake content"
206
- ),
207
- "22222222-2222-2222-2222-222222222222": FileIdContent(
208
- content="file-123"
209
- ),
210
- }
211
- )
212
- # Format the messages
213
- formatted = OpenAIProvider.prepare_messages(messages, file_map)
214
-
215
- # Check the results
216
- assert len(formatted) == 5
217
-
218
- # Check system message
219
- assert formatted[0] == {
220
- "role": "system",
221
- "content": "You are a helpful assistant",
222
- }
223
-
224
- # Check user message - note that content is now a list with text item
225
- assert formatted[1]["role"] == "user"
226
- assert isinstance(formatted[1]["content"], list)
227
- assert len(formatted[1]["content"]) == 3
228
- assert formatted[1]["content"] == [
229
- {
230
- "image_url": {"url": "data:image/jpeg;base64,fake content"},
231
- "type": "image_url",
232
- },
233
- {"file": {"file_id": "file-123"}, "type": "file"},
234
- {"text": "Hello", "type": "text"},
235
- ]
236
-
237
- # Check assistant message
238
- assert formatted[2] == {"role": "assistant", "content": "How can I help?"}
239
-
240
- # Check tool message
241
- assert formatted[3] == {
242
- "role": "tool",
243
- "tool_call_id": "call_1",
244
- "content": "Tool result",
245
- }
246
-
247
- # Check assistant message with tool calls
248
- assert formatted[4]["role"] == "assistant"
249
- assert formatted[4]["content"] is None
250
- assert len(formatted[4]["tool_calls"]) == 1
251
- assert formatted[4]["tool_calls"][0]["id"] == "call_2"
252
- assert formatted[4]["tool_calls"][0]["type"] == "function"
253
- assert formatted[4]["tool_calls"][0]["function"]["name"] == "test_tool"
254
- # Verify JSON arguments
255
- tool_args = json.loads(formatted[4]["tool_calls"][0]["function"]["arguments"])
256
- assert tool_args == {"param1": "value1"}
257
-
258
- def test_tool_call_with_missing_id(self):
259
- """Test that tool calls without IDs get auto-generated IDs."""
260
- # Create a message with tool calls that have no IDs
261
- message = AssistantMessage(
262
- content=None,
263
- tool_calls=[
264
- ToolCall(
265
- name="tool1",
266
- arguments={"arg1": "val1"},
267
- ),
268
- ToolCall(
269
- name="tool2",
270
- arguments={"arg2": "val2"},
271
- ),
272
- ],
273
- )
274
-
275
- # Format the message
276
- formatted = OpenAIProvider.prepare_messages([message])
277
-
278
- # Check that IDs were auto-generated
279
- assert len(formatted) == 1
280
- assert formatted[0]["role"] == "assistant"
281
- assert len(formatted[0]["tool_calls"]) == 2
282
- assert formatted[0]["tool_calls"][0]["id"] == "call_1"
283
- assert formatted[0]["tool_calls"][1]["id"] == "call_2"
284
-
285
- def test_model_spec_handling(self):
286
- """Test that ModelSpec is correctly initialized and parameters are handled."""
287
- # Create a model spec with parameters
288
- spec = ModelSpec(
289
- model_id="gpt-4.1", parameters={"temperature": 0.7, "top_p": 0.95}
290
- )
291
-
292
- # Check values
293
- assert spec.model_id == "gpt-4.1"
294
- assert spec.parameters == {"temperature": 0.7, "top_p": 0.95}
295
-
296
- # Test updating parameters
297
- spec.parameters["temperature"] = 0.5
298
- assert spec.parameters["temperature"] == 0.5
299
-
300
- def test_model_str_and_repr(self):
301
- """Test that Model can be converted to a string and repr."""
302
- spec = OpenAI.gpt_4_1
303
- assert str(spec) == "OpenAI:gpt-4.1"
304
- assert repr(spec) == "OpenAI:gpt-4.1"
305
-
306
- spec = OpenAI.gpt_4_turbo
307
- assert str(spec) == "OpenAI:gpt-4-turbo"
308
- assert repr(spec) == "OpenAI:gpt-4-turbo"
309
-
310
- spec = Anthropic.claude_3_haiku
311
- assert str(spec) == "Anthropic:claude-3-haiku"
312
- assert repr(spec) == "Anthropic:claude-3-haiku"
313
-
314
- spec = Anthropic.claude_sonnet_4_20250514
315
- assert str(spec) == "Anthropic:claude-sonnet-4-20250514"
316
- assert repr(spec) == "Anthropic:claude-sonnet-4-20250514"
317
-
318
- spec = Anthropic.claude_opus_4_20250514
319
- assert str(spec) == "Anthropic:claude-opus-4-20250514"
320
- assert repr(spec) == "Anthropic:claude-opus-4-20250514"
321
-
322
- spec = Anthropic.claude_sonnet_4
323
- assert str(spec) == "Anthropic:claude-sonnet-4"
324
- assert repr(spec) == "Anthropic:claude-sonnet-4"
325
-
326
- spec = Anthropic.claude_opus_4
327
- assert str(spec) == "Anthropic:claude-opus-4"
328
- assert repr(spec) == "Anthropic:claude-opus-4"
329
-
330
- async def test_planar_files(self, fake_config, mock_openai_client):
331
- """Test that PlanarFile objects are correctly handled and formatted."""
332
- # Create PlanarFile test objects
333
- image_file = PlanarFile(
334
- id=UUID("11111111-1111-1111-1111-111111111111"),
335
- filename="test_image.jpg",
336
- content_type="image/jpeg",
337
- size=1024,
338
- )
339
-
340
- pdf_file = PlanarFile(
341
- id=UUID("22222222-2222-2222-2222-222222222222"),
342
- filename="test_doc.pdf",
343
- content_type="application/pdf",
344
- size=2048,
345
- )
346
-
347
- messages = [
348
- SystemMessage(content="You are a helpful assistant"),
349
- UserMessage(content="Describe this file", files=[image_file]),
350
- ]
351
-
352
- # Configure mock to return a specific response
353
- file_response = "This is a file description"
354
- mock_openai_client = Mock()
355
- mock_openai_client.chat.completions.create = AsyncMock(
356
- return_value=MockResponse(content=file_response)
357
- )
358
- mock_openai_client.files = Mock()
359
- mock_openai_client.files.create = AsyncMock(return_value=Mock(id="file-123"))
360
- mock_openai_client.beta = Mock()
361
- mock_openai_client.beta.chat = Mock()
362
- mock_openai_client.beta.chat.completions = Mock()
363
-
364
- # Replace the original mock with our configured one
365
- with (
366
- patch(
367
- "planar.files.models.PlanarFile.get_content",
368
- AsyncMock(return_value=b"fake content"),
369
- ),
370
- patch(
371
- "planar.files.models.PlanarFile.get_metadata",
372
- AsyncMock(return_value=None),
373
- ),
374
- pytest.MonkeyPatch().context() as m,
375
- ):
376
- m.setattr("openai.AsyncOpenAI", lambda **kwargs: mock_openai_client)
377
-
378
- # Test with a single image file
379
- result = await OpenAIProvider.complete(
380
- model_spec=ModelSpec(model_id="gpt-4.1"),
381
- messages=messages,
382
- )
383
-
384
- # Verify the returned value
385
- assert result.content == file_response
386
- assert result.tool_calls is None
387
-
388
- # Test with multiple files
389
- # Create a new message with multiple files
390
- messages[-1] = UserMessage(
391
- content="Describe these files", files=[image_file, pdf_file]
392
- )
393
-
394
- multiple_file_response = "This describes multiple files"
395
- mock_openai_client.chat.completions.create = AsyncMock(
396
- return_value=MockResponse(content=multiple_file_response)
397
- )
398
-
399
- # Make the API call with multiple files
400
- result = await OpenAIProvider.complete(
401
- model_spec=ModelSpec(model_id="gpt-4.1"),
402
- messages=messages,
403
- )
404
-
405
- # Verify the returned value
406
- assert result.content == multiple_file_response
407
- assert result.tool_calls is None
408
-
409
- # Test with both files and structured output
410
- class FileOutput(BaseModel):
411
- description: str
412
-
413
- structured_file_result = FileOutput(
414
- description="A PDF document",
415
- )
416
-
417
- mock_openai_client.beta.chat.completions.parse = AsyncMock(
418
- return_value=MockResponse(structured_output=structured_file_result)
419
- )
420
-
421
- # Make the API call with file and structured output
422
- result = await OpenAIProvider.complete(
423
- model_spec=ModelSpec(model_id="gpt-4.1"),
424
- messages=messages,
425
- output_type=FileOutput,
426
- )
427
-
428
- # Verify the structured output with file
429
- assert isinstance(result.content, FileOutput)
430
- assert result.content.description == "A PDF document"
431
-
432
- async def test_structured_output(self, fake_config, mock_openai_client):
433
- """Test that structured output is correctly handled."""
434
- # Create test messages
435
- messages = [
436
- SystemMessage(content="You are a helpful assistant"),
437
- UserMessage(content="Analyze this data"),
438
- ]
439
-
440
- # Test structured output with DummyOutput model
441
- result = await OpenAIProvider.complete(
442
- model_spec=ModelSpec(model_id="gpt-4.1"),
443
- messages=messages,
444
- output_type=DummyGenericOutput[DummyOutput],
445
- )
446
-
447
- # Verify the completion method used
448
- assert mock_openai_client.beta.chat.completions.captured_kwargs is not None
449
- captured_kwargs = mock_openai_client.beta.chat.completions.captured_kwargs
450
-
451
- # Verify the output is of the correct type
452
- assert isinstance(result.content, DummyGenericOutput)
453
- assert result.content.value == DummyOutput(value="test value", score=95)
454
- assert result.tool_calls is None
455
-
456
- # Verify the response_format parameter was correctly set
457
- assert "response_format" in captured_kwargs
458
- assert captured_kwargs["response_format"] == DummyGenericOutput[DummyOutput]
459
- # Verify we're sanitizing the name correctly as OpenAI expects
460
- assert (
461
- captured_kwargs["response_format"].__name__
462
- == "DummyGenericOutput_DummyOutput_"
463
- )
planar/db/.db.py.un~ DELETED
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
planar/files/.s3.py.un~ DELETED
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
planar/sse/.hub.py.un~ DELETED
Binary file
planar/sse/.model.py.un~ DELETED
Binary file
planar/sse/.proxy.py.un~ DELETED
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file