planar 0.10.0__py3-none-any.whl → 0.11.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (60) hide show
  1. planar/app.py +18 -6
  2. planar/routers/info.py +79 -36
  3. planar/scaffold_templates/pyproject.toml.j2 +1 -1
  4. planar/testing/fixtures.py +7 -4
  5. {planar-0.10.0.dist-info → planar-0.11.0.dist-info}/METADATA +9 -1
  6. {planar-0.10.0.dist-info → planar-0.11.0.dist-info}/RECORD +8 -60
  7. planar/ai/test_agent_serialization.py +0 -229
  8. planar/ai/test_agent_tool_step_display.py +0 -78
  9. planar/data/test_dataset.py +0 -358
  10. planar/files/storage/test_azure_blob.py +0 -435
  11. planar/files/storage/test_local_directory.py +0 -162
  12. planar/files/storage/test_s3.py +0 -299
  13. planar/files/test_files.py +0 -282
  14. planar/human/test_human.py +0 -385
  15. planar/logging/test_formatter.py +0 -327
  16. planar/modeling/mixins/test_auditable.py +0 -97
  17. planar/modeling/mixins/test_timestamp.py +0 -134
  18. planar/modeling/mixins/test_uuid_primary_key.py +0 -52
  19. planar/routers/test_agents_router.py +0 -174
  20. planar/routers/test_dataset_router.py +0 -429
  21. planar/routers/test_files_router.py +0 -49
  22. planar/routers/test_object_config_router.py +0 -367
  23. planar/routers/test_routes_security.py +0 -168
  24. planar/routers/test_rule_router.py +0 -470
  25. planar/routers/test_workflow_router.py +0 -564
  26. planar/rules/test_data/account_dormancy_management.json +0 -223
  27. planar/rules/test_data/airline_loyalty_points_calculator.json +0 -262
  28. planar/rules/test_data/applicant_risk_assessment.json +0 -435
  29. planar/rules/test_data/booking_fraud_detection.json +0 -407
  30. planar/rules/test_data/cellular_data_rollover_system.json +0 -258
  31. planar/rules/test_data/clinical_trial_eligibility_screener.json +0 -437
  32. planar/rules/test_data/customer_lifetime_value.json +0 -143
  33. planar/rules/test_data/import_duties_calculator.json +0 -289
  34. planar/rules/test_data/insurance_prior_authorization.json +0 -443
  35. planar/rules/test_data/online_check_in_eligibility_system.json +0 -254
  36. planar/rules/test_data/order_consolidation_system.json +0 -375
  37. planar/rules/test_data/portfolio_risk_monitor.json +0 -471
  38. planar/rules/test_data/supply_chain_risk.json +0 -253
  39. planar/rules/test_data/warehouse_cross_docking.json +0 -237
  40. planar/rules/test_rules.py +0 -1494
  41. planar/security/tests/test_auth_middleware.py +0 -162
  42. planar/security/tests/test_authorization_context.py +0 -78
  43. planar/security/tests/test_cedar_basics.py +0 -41
  44. planar/security/tests/test_cedar_policies.py +0 -158
  45. planar/security/tests/test_jwt_principal_context.py +0 -179
  46. planar/test_app.py +0 -142
  47. planar/test_cli.py +0 -394
  48. planar/test_config.py +0 -515
  49. planar/test_object_config.py +0 -527
  50. planar/test_object_registry.py +0 -14
  51. planar/test_sqlalchemy.py +0 -193
  52. planar/test_utils.py +0 -105
  53. planar/testing/test_memory_storage.py +0 -143
  54. planar/workflows/test_concurrency_detection.py +0 -120
  55. planar/workflows/test_lock_timeout.py +0 -140
  56. planar/workflows/test_serialization.py +0 -1203
  57. planar/workflows/test_suspend_deserialization.py +0 -231
  58. planar/workflows/test_workflow.py +0 -2005
  59. {planar-0.10.0.dist-info → planar-0.11.0.dist-info}/WHEEL +0 -0
  60. {planar-0.10.0.dist-info → planar-0.11.0.dist-info}/entry_points.txt +0 -0
@@ -1,78 +0,0 @@
1
- import os
2
- from unittest.mock import patch
3
-
4
- from sqlmodel import col, select
5
-
6
- from planar.ai import models as m
7
- from planar.ai.agent import Agent
8
- from planar.ai.pydantic_ai import ModelRunResponse
9
- from planar.workflows.decorators import workflow
10
- from planar.workflows.execution import execute
11
- from planar.workflows.models import StepType, WorkflowStep
12
-
13
-
14
- async def test_agent_tool_step_has_display_name(session):
15
- async def add(a: int, b: int) -> int:
16
- return a + b
17
-
18
- # Prepare mocked model responses: first triggers a tool call, then returns final content
19
- first = ModelRunResponse[str](
20
- response=m.CompletionResponse[str](
21
- content=None,
22
- tool_calls=[
23
- m.ToolCall(id="call_1", name="add", arguments={"a": 2, "b": 3})
24
- ],
25
- text_content="",
26
- reasoning_content=None,
27
- ),
28
- extra_turns_used=0,
29
- )
30
- second = ModelRunResponse[str](
31
- response=m.CompletionResponse[str](
32
- content="5",
33
- tool_calls=[],
34
- text_content="5",
35
- reasoning_content=None,
36
- ),
37
- extra_turns_used=0,
38
- )
39
-
40
- responses = [first, second]
41
-
42
- async def fake_model_run(*args, **kwargs):
43
- assert responses, "No more fake responses configured"
44
- return responses.pop(0)
45
-
46
- # Patch the model run to avoid any network/model dependency
47
- # Use unittest.mock.patch context managers to ensure cleanup
48
- with (
49
- patch.dict(os.environ, {"OPENAI_API_KEY": "test-key"}, clear=False),
50
- patch("planar.ai.agent.model_run", side_effect=fake_model_run),
51
- ):
52
- agent = Agent[str, str, None](
53
- name="test_agent",
54
- system_prompt="",
55
- user_prompt="",
56
- model="openai:gpt-4o-mini",
57
- tools=[add],
58
- max_turns=3,
59
- )
60
-
61
- @workflow()
62
- async def run_agent():
63
- result = await agent("please add")
64
- return result.output
65
-
66
- wf = await run_agent.start()
67
- result = await execute(wf)
68
- assert result == "5"
69
-
70
- steps = (
71
- await session.exec(select(WorkflowStep).order_by(col(WorkflowStep.step_id)))
72
- ).all()
73
- # Ensure there is a tool call step with the display name set to the tool name
74
- tool_steps = [s for s in steps if s.step_type == StepType.TOOL_CALL]
75
- assert tool_steps, "Expected at least one TOOL_CALL step recorded"
76
- assert any(s.display_name == "add" for s in tool_steps), (
77
- f"Expected a TOOL_CALL step with display_name 'add', got {[s.display_name for s in tool_steps]}"
78
- )
@@ -1,358 +0,0 @@
1
- """Tests for PlanarDataset."""
2
-
3
- import polars as pl
4
- import pyarrow as pa
5
- import pytest
6
- from ibis import literal
7
-
8
- from planar.data import PlanarDataset
9
- from planar.data.exceptions import (
10
- DataError,
11
- DatasetAlreadyExistsError,
12
- DatasetNotFoundError,
13
- )
14
- from planar.workflows import step
15
-
16
-
17
- @pytest.fixture(name="app")
18
- def app_fixture(app_with_data):
19
- """Use the shared app_with_data fixture as 'app' for this test module."""
20
- return app_with_data
21
-
22
-
23
- async def test_dataset_create(client):
24
- """Test creating a dataset reference."""
25
- dataset = await PlanarDataset.create("test_table")
26
- assert dataset.name == "test_table"
27
-
28
- # Dataset reference exists but table isn't created until first write
29
- assert not await dataset.exists()
30
-
31
- # Write some data to actually create the table
32
- df = pl.DataFrame({"id": [1], "name": ["test"]})
33
- await dataset.write(df, mode="overwrite")
34
-
35
- # Now it should exist
36
- assert await dataset.exists()
37
-
38
- # Cleanup
39
- await dataset.delete()
40
-
41
-
42
- async def test_dataset_create_if_not_exists(client):
43
- """Test creating a dataset with if_not_exists behavior."""
44
- # Create dataset and write data to make it exist
45
- dataset1 = await PlanarDataset.create("test_table")
46
- df = pl.DataFrame({"id": [1], "name": ["test"]})
47
- await dataset1.write(df, mode="overwrite")
48
-
49
- # Create again with if_not_exists=True (default) - should not raise
50
- dataset2 = await PlanarDataset.create("test_table", if_not_exists=True)
51
- assert dataset2.name == dataset1.name
52
-
53
- # Create again with if_not_exists=False - should raise
54
- with pytest.raises(DatasetAlreadyExistsError):
55
- await PlanarDataset.create("test_table", if_not_exists=False)
56
-
57
- # Cleanup
58
- await dataset1.delete()
59
-
60
-
61
- async def test_dataset_write_and_read_polars(client):
62
- """Test writing and reading data with Polars."""
63
- dataset = await PlanarDataset.create("test_polars")
64
-
65
- # Create test data
66
- df = pl.DataFrame(
67
- {
68
- "id": [1, 2, 3],
69
- "name": ["Alice", "Bob", "Charlie"],
70
- "amount": [100.5, 200.0, 150.75],
71
- }
72
- )
73
-
74
- # Write data
75
- await dataset.write(df, mode="overwrite")
76
-
77
- # Read data back
78
- result = await dataset.to_polars()
79
-
80
- # Verify
81
- assert result.shape == df.shape
82
- assert set(result.columns) == set(df.columns)
83
- assert result["id"].to_list() == [1, 2, 3]
84
- assert result["name"].to_list() == ["Alice", "Bob", "Charlie"]
85
-
86
- # Cleanup
87
- await dataset.delete()
88
-
89
-
90
- async def test_dataset_write_and_read_pyarrow(client):
91
- """Test writing and reading data with PyArrow."""
92
- dataset = await PlanarDataset.create("test_pyarrow")
93
-
94
- # Create test data
95
- table = pa.table(
96
- {
97
- "id": [1, 2, 3],
98
- "name": ["Alice", "Bob", "Charlie"],
99
- "amount": [100.5, 200.0, 150.75],
100
- }
101
- )
102
-
103
- # Write data
104
- await dataset.write(table, mode="overwrite")
105
-
106
- # Read data back
107
- result = await dataset.to_pyarrow()
108
-
109
- # Verify
110
- assert result.num_rows == table.num_rows
111
- assert result.column_names == table.column_names
112
-
113
- # Cleanup
114
- await dataset.delete()
115
-
116
-
117
- async def test_dataset_write_and_read_lazyframe(client):
118
- """Test writing and reading data with Polars LazyFrame."""
119
- dataset = await PlanarDataset.create("test_lazyframe")
120
-
121
- # Create test data as LazyFrame with computed columns
122
- lf = pl.LazyFrame(
123
- {
124
- "id": range(5),
125
- "name": ["Alice", "Bob", "Charlie", "David", "Eve"],
126
- "value": [10.5, 20.3, 30.1, 40.7, 50.9],
127
- }
128
- ).with_columns(
129
- # Use native polars expressions for efficiency
130
- pl.format("user_{}", pl.col("id")).alias("username"),
131
- pl.col("value").round(1).alias("rounded_value"),
132
- )
133
-
134
- # Write LazyFrame data
135
- await dataset.write(lf, mode="overwrite")
136
-
137
- # Read data back
138
- result = await dataset.to_polars()
139
-
140
- # Verify shape and columns
141
- assert result.shape == (5, 5)
142
- assert set(result.columns) == {"id", "name", "value", "username", "rounded_value"}
143
-
144
- # Verify the computed columns work correctly
145
- assert result["username"].to_list() == [
146
- "user_0",
147
- "user_1",
148
- "user_2",
149
- "user_3",
150
- "user_4",
151
- ]
152
- assert result["rounded_value"].to_list() == [10.5, 20.3, 30.1, 40.7, 50.9]
153
-
154
- # Cleanup
155
- await dataset.delete()
156
-
157
-
158
- async def test_dataset_append_mode(client):
159
- """Test appending data to a dataset."""
160
- dataset = await PlanarDataset.create("test_append")
161
-
162
- # Write initial data
163
- df1 = pl.DataFrame({"id": [1, 2], "value": ["a", "b"]})
164
- await dataset.write(df1, mode="overwrite")
165
-
166
- # Append more data
167
- df2 = pl.DataFrame({"id": [3, 4], "value": ["c", "d"]})
168
- await dataset.write(df2, mode="append")
169
-
170
- result = await dataset.to_polars()
171
-
172
- # Verify
173
- assert len(result) == 4
174
- assert set(result["id"].to_list()) == {1, 2, 3, 4}
175
- assert set(result["value"].to_list()) == {"a", "b", "c", "d"}
176
-
177
- # Cleanup
178
- await dataset.delete()
179
-
180
-
181
- async def test_dataset_overwrite_replaces_existing(client):
182
- """Overwrite should replace existing rows completely."""
183
- dataset = await PlanarDataset.create("test_overwrite")
184
-
185
- df1 = pl.DataFrame({"id": [1, 2], "value": ["a", "b"]})
186
- await dataset.write(df1, mode="overwrite")
187
- result1 = await dataset.to_polars()
188
- assert result1.shape == (2, 2)
189
-
190
- df2 = pl.DataFrame({"id": [3], "value": ["c"]})
191
- await dataset.write(df2, mode="overwrite")
192
- result2 = await dataset.to_polars()
193
- assert result2.shape == (1, 2)
194
- assert result2["id"].to_list() == [3]
195
- assert result2["value"].to_list() == ["c"]
196
-
197
- await dataset.delete()
198
-
199
-
200
- async def test_dataset_read_with_filter(client):
201
- """Test reading data with Ibis filtering."""
202
- dataset = await PlanarDataset.create("test_filter")
203
-
204
- # Write test data
205
- df = pl.DataFrame({"id": range(1, 11), "value": range(10, 101, 10)})
206
- await dataset.write(df, mode="overwrite")
207
-
208
- table = await dataset.read()
209
- filtered_table = table.filter(table.value > literal(50))
210
- filtered_df = filtered_table.to_polars()
211
-
212
- assert len(filtered_df) == 5
213
- assert all(v > 50 for v in filtered_df["value"].to_list())
214
-
215
- # Cleanup
216
- await dataset.delete()
217
-
218
-
219
- async def test_dataset_read_with_columns_and_limit(client):
220
- """Test reading specific columns with limit."""
221
- dataset = await PlanarDataset.create("test_select")
222
-
223
- # Write test data
224
- df = pl.DataFrame(
225
- {
226
- "id": range(1, 11),
227
- "name": [f"user_{i}" for i in range(1, 11)],
228
- "value": range(10, 101, 10),
229
- }
230
- )
231
- await dataset.write(df, mode="overwrite")
232
-
233
- # Read specific columns with limit
234
- table = await dataset.read(columns=["id", "name"], limit=5)
235
- result_df = table.to_polars()
236
-
237
- # Verify
238
- assert len(result_df) == 5
239
- assert set(result_df.columns) == {"id", "name"}
240
- assert "value" not in result_df.columns
241
-
242
- # Cleanup
243
- await dataset.delete()
244
-
245
-
246
- async def test_dataset_not_found(client):
247
- """Test reading from non-existent dataset."""
248
- dataset = PlanarDataset(name="nonexistent")
249
-
250
- # Check exists returns False
251
- assert not await dataset.exists()
252
-
253
- # Try to read - should raise
254
- with pytest.raises(DatasetNotFoundError):
255
- await dataset.read()
256
-
257
-
258
- async def test_dataset_delete(client):
259
- """Test deleting a dataset."""
260
- dataset = await PlanarDataset.create("test_delete")
261
-
262
- # Write some data
263
- df = pl.DataFrame({"id": [1, 2, 3]})
264
- await dataset.write(df)
265
-
266
- # Verify it exists
267
- assert await dataset.exists()
268
-
269
- # Delete it
270
- await dataset.delete()
271
-
272
- # Verify it's gone
273
- assert not await dataset.exists()
274
-
275
-
276
- async def test_dataset_write_list_of_dicts(client):
277
- """Write list-of-dicts input and read back with Polars."""
278
- dataset = await PlanarDataset.create("test_list_of_dicts")
279
-
280
- rows = [{"id": 1, "name": "a"}, {"id": 2, "name": "b"}]
281
- await dataset.write(rows, mode="overwrite")
282
-
283
- result = await dataset.to_polars()
284
- assert set(result.columns) == {"id", "name"}
285
- assert sorted(result["id"].to_list()) == [1, 2]
286
-
287
- await dataset.delete()
288
-
289
-
290
- async def test_dataset_write_dict_of_lists(client):
291
- """Write dict-of-lists input and read back with Polars."""
292
- dataset = await PlanarDataset.create("test_dict_of_lists")
293
-
294
- data = {"id": [1, 2], "name": ["a", "b"]}
295
- await dataset.write(data, mode="overwrite")
296
-
297
- result = await dataset.to_polars()
298
- assert result.shape == (2, 2)
299
- assert set(result["name"].to_list()) == {"a", "b"}
300
-
301
- await dataset.delete()
302
-
303
-
304
- async def test_dataset_workflow_serialization(client):
305
- """Test that PlanarDataset can be used as workflow input/output."""
306
-
307
- @step()
308
- async def create_data() -> PlanarDataset:
309
- """Create a dataset with sample data."""
310
- dataset = await PlanarDataset.create("workflow_data")
311
-
312
- df = pl.DataFrame(
313
- {"product": ["A", "B", "C", "D"], "sales": [100, 200, 150, 300]}
314
- )
315
- await dataset.write(df, mode="overwrite")
316
-
317
- return dataset
318
-
319
- @step()
320
- async def analyze_data(dataset: PlanarDataset) -> float:
321
- """Analyze the dataset and return total sales."""
322
- df = await dataset.to_polars()
323
- return float(df["sales"].sum())
324
-
325
- # Test basic workflow functionality without API
326
- dataset = await create_data()
327
- total = await analyze_data(dataset)
328
-
329
- # Verify results
330
- assert total == 750.0 # Sum of [100, 200, 150, 300]
331
-
332
- # Cleanup
333
- await dataset.delete()
334
-
335
-
336
- async def test_no_data_config_error(client):
337
- """Test error when data config is not set."""
338
- # Remove data config
339
- client.app.config.data = None
340
-
341
- dataset = PlanarDataset(name="test")
342
-
343
- with pytest.raises(DataError, match="Data configuration not found"):
344
- await dataset.exists()
345
-
346
-
347
- async def test_write_with_invalid_input_raises(client):
348
- """Unknown input types to write() should raise a DataError."""
349
-
350
- class Foo:
351
- pass
352
-
353
- dataset = await PlanarDataset.create("test_invalid_input")
354
-
355
- with pytest.raises(DataError):
356
- await dataset.write(Foo(), mode="overwrite") # type: ignore
357
-
358
- await dataset.delete()