planar 0.9.3__py3-none-any.whl → 0.11.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (76) hide show
  1. planar/ai/agent.py +2 -1
  2. planar/ai/agent_base.py +24 -5
  3. planar/ai/state.py +17 -0
  4. planar/app.py +18 -1
  5. planar/data/connection.py +108 -0
  6. planar/data/dataset.py +11 -104
  7. planar/data/utils.py +89 -0
  8. planar/db/alembic/env.py +25 -1
  9. planar/files/storage/azure_blob.py +1 -1
  10. planar/registry_items.py +2 -0
  11. planar/routers/dataset_router.py +213 -0
  12. planar/routers/info.py +79 -36
  13. planar/routers/models.py +1 -0
  14. planar/routers/workflow.py +2 -0
  15. planar/scaffold_templates/pyproject.toml.j2 +1 -1
  16. planar/security/authorization.py +31 -3
  17. planar/security/default_policies.cedar +25 -0
  18. planar/testing/fixtures.py +34 -1
  19. planar/testing/planar_test_client.py +1 -1
  20. planar/workflows/decorators.py +2 -1
  21. planar/workflows/wrappers.py +1 -0
  22. {planar-0.9.3.dist-info → planar-0.11.0.dist-info}/METADATA +9 -1
  23. {planar-0.9.3.dist-info → planar-0.11.0.dist-info}/RECORD +25 -72
  24. {planar-0.9.3.dist-info → planar-0.11.0.dist-info}/WHEEL +1 -1
  25. planar/ai/test_agent_serialization.py +0 -229
  26. planar/ai/test_agent_tool_step_display.py +0 -78
  27. planar/data/test_dataset.py +0 -354
  28. planar/files/storage/test_azure_blob.py +0 -435
  29. planar/files/storage/test_local_directory.py +0 -162
  30. planar/files/storage/test_s3.py +0 -299
  31. planar/files/test_files.py +0 -282
  32. planar/human/test_human.py +0 -385
  33. planar/logging/test_formatter.py +0 -327
  34. planar/modeling/mixins/test_auditable.py +0 -97
  35. planar/modeling/mixins/test_timestamp.py +0 -134
  36. planar/modeling/mixins/test_uuid_primary_key.py +0 -52
  37. planar/routers/test_agents_router.py +0 -174
  38. planar/routers/test_files_router.py +0 -49
  39. planar/routers/test_object_config_router.py +0 -367
  40. planar/routers/test_routes_security.py +0 -168
  41. planar/routers/test_rule_router.py +0 -470
  42. planar/routers/test_workflow_router.py +0 -539
  43. planar/rules/test_data/account_dormancy_management.json +0 -223
  44. planar/rules/test_data/airline_loyalty_points_calculator.json +0 -262
  45. planar/rules/test_data/applicant_risk_assessment.json +0 -435
  46. planar/rules/test_data/booking_fraud_detection.json +0 -407
  47. planar/rules/test_data/cellular_data_rollover_system.json +0 -258
  48. planar/rules/test_data/clinical_trial_eligibility_screener.json +0 -437
  49. planar/rules/test_data/customer_lifetime_value.json +0 -143
  50. planar/rules/test_data/import_duties_calculator.json +0 -289
  51. planar/rules/test_data/insurance_prior_authorization.json +0 -443
  52. planar/rules/test_data/online_check_in_eligibility_system.json +0 -254
  53. planar/rules/test_data/order_consolidation_system.json +0 -375
  54. planar/rules/test_data/portfolio_risk_monitor.json +0 -471
  55. planar/rules/test_data/supply_chain_risk.json +0 -253
  56. planar/rules/test_data/warehouse_cross_docking.json +0 -237
  57. planar/rules/test_rules.py +0 -1494
  58. planar/security/tests/test_auth_middleware.py +0 -162
  59. planar/security/tests/test_authorization_context.py +0 -78
  60. planar/security/tests/test_cedar_basics.py +0 -41
  61. planar/security/tests/test_cedar_policies.py +0 -158
  62. planar/security/tests/test_jwt_principal_context.py +0 -179
  63. planar/test_app.py +0 -142
  64. planar/test_cli.py +0 -394
  65. planar/test_config.py +0 -515
  66. planar/test_object_config.py +0 -527
  67. planar/test_object_registry.py +0 -14
  68. planar/test_sqlalchemy.py +0 -193
  69. planar/test_utils.py +0 -105
  70. planar/testing/test_memory_storage.py +0 -143
  71. planar/workflows/test_concurrency_detection.py +0 -120
  72. planar/workflows/test_lock_timeout.py +0 -140
  73. planar/workflows/test_serialization.py +0 -1203
  74. planar/workflows/test_suspend_deserialization.py +0 -231
  75. planar/workflows/test_workflow.py +0 -2005
  76. {planar-0.9.3.dist-info → planar-0.11.0.dist-info}/entry_points.txt +0 -0
@@ -1,539 +0,0 @@
1
- import asyncio
2
- from uuid import UUID, uuid4
3
-
4
- import pytest
5
- from pydantic import BaseModel, Field
6
- from sqlalchemy.ext.asyncio import AsyncEngine
7
- from sqlmodel import select
8
- from sqlmodel.ext.asyncio.session import AsyncSession
9
-
10
- from examples.expense_approval_workflow.models import (
11
- Expense,
12
- ExpenseStatus,
13
- )
14
- from planar import PlanarApp, get_session, sqlite_config
15
- from planar.db import new_session
16
- from planar.files.models import PlanarFile, PlanarFileMetadata
17
- from planar.files.storage.base import Storage
18
- from planar.testing.planar_test_client import PlanarTestClient
19
- from planar.testing.workflow_observer import WorkflowObserver
20
- from planar.workflows import step, workflow
21
- from planar.workflows.models import (
22
- StepStatus,
23
- StepType,
24
- Workflow,
25
- WorkflowStatus,
26
- WorkflowStep,
27
- )
28
-
29
- # ------ SETUP ------
30
-
31
-
32
- async def get_expense(expense_id: str) -> Expense:
33
- session = get_session()
34
- expense = (
35
- await session.exec(select(Expense).where(Expense.id == UUID(expense_id)))
36
- ).first()
37
- if not expense:
38
- raise ValueError(f"Expense {expense_id} not found")
39
- return expense
40
-
41
-
42
- @workflow(name="test_expense_approval_workflow")
43
- async def expense_approval_workflow(expense_id: str):
44
- """
45
- Main workflow that orchestrates the expense approval process
46
- """
47
- await validate_expense(expense_id)
48
-
49
- expense = await get_expense(expense_id)
50
-
51
- return expense
52
-
53
-
54
- @step()
55
- async def validate_expense(expense_id: str):
56
- expense = await get_expense(expense_id)
57
-
58
- if expense.status != ExpenseStatus.SUBMITTED:
59
- raise ValueError(f"Expense {expense_id} is not in SUBMITTED status")
60
-
61
-
62
- @step()
63
- async def dummy_step_1():
64
- pass
65
-
66
-
67
- @step()
68
- async def dummy_step_2():
69
- pass
70
-
71
-
72
- @step()
73
- async def dummy_step_3():
74
- pass
75
-
76
-
77
- @step(display_name="failing_step")
78
- async def failing_step():
79
- raise ValueError("This step is designed to fail")
80
-
81
-
82
- @workflow(name="successful_workflow_3_steps")
83
- async def successful_workflow_3_steps():
84
- await dummy_step_1()
85
- await dummy_step_2()
86
- await dummy_step_3()
87
-
88
-
89
- @workflow(name="failing_workflow_3_steps")
90
- async def failing_workflow_3_steps():
91
- await dummy_step_1()
92
- await failing_step()
93
- await dummy_step_3()
94
-
95
-
96
- class FileProcessingResult(BaseModel):
97
- """Result of processing a text file."""
98
-
99
- filename: str = Field(description="Original filename")
100
- character_count: int = Field(description="Number of characters in the file")
101
- content_preview: str = Field(description="Preview of the file content")
102
- file_id: UUID = Field(description="ID of the processed file")
103
-
104
-
105
- @workflow(name="test_file_processing_workflow")
106
- async def file_processing_workflow(file: PlanarFile):
107
- """
108
- Workflow that processes a text file and returns basic information about it.
109
- """
110
- file_content = await file.get_content()
111
- char_count = len(file_content)
112
- preview = file_content[:100].decode("utf-8")
113
-
114
- # Return structured result
115
- return FileProcessingResult(
116
- filename=file.filename,
117
- character_count=char_count,
118
- content_preview=preview,
119
- file_id=file.id,
120
- )
121
-
122
-
123
- # ------ TESTS ------
124
-
125
-
126
- @pytest.fixture(name="app")
127
- def app_fixture(tmp_db_path: str):
128
- app = PlanarApp(
129
- config=sqlite_config(tmp_db_path),
130
- title="Test Workflow Router API",
131
- description="API for testing workflow routers",
132
- )
133
- # Re-register workflows since ObjectRegistry gets reset before each test
134
- app.register_workflow(expense_approval_workflow)
135
- app.register_workflow(file_processing_workflow)
136
- app.register_workflow(successful_workflow_3_steps)
137
- app.register_workflow(failing_workflow_3_steps)
138
- yield app
139
-
140
-
141
- @pytest.fixture
142
- async def planar_file(storage: Storage) -> PlanarFile:
143
- """Create a PlanarFile instance for testing."""
144
- # Store test content
145
- test_data = b"This is a test file for the workflow router API test."
146
- mime_type = "text/plain"
147
-
148
- # Store the file and get a reference
149
- storage_ref = await storage.put_bytes(test_data, mime_type=mime_type)
150
-
151
- # Create and store the file metadata
152
- session = get_session()
153
- file_metadata = PlanarFileMetadata(
154
- filename="router_test_file.txt",
155
- content_type=mime_type,
156
- size=len(test_data),
157
- storage_ref=storage_ref,
158
- )
159
- session.add(file_metadata)
160
- await session.commit()
161
-
162
- # Return a PlanarFile reference (not the full metadata)
163
- return PlanarFile(
164
- id=file_metadata.id,
165
- filename=file_metadata.filename,
166
- content_type=file_metadata.content_type,
167
- size=file_metadata.size,
168
- )
169
-
170
-
171
- async def create_test_workflow_run(
172
- engine: AsyncEngine,
173
- workflow_name: str,
174
- status: WorkflowStatus,
175
- completed_steps: int = 0,
176
- total_steps: int = 0,
177
- error: dict | None = None,
178
- ) -> Workflow:
179
- """Helper to directly create a workflow run and its steps in the DB."""
180
- async with new_session(engine) as session:
181
- workflow = Workflow(
182
- function_name=workflow_name,
183
- status=status,
184
- error=error,
185
- args=[],
186
- kwargs={},
187
- )
188
- session.add(workflow)
189
-
190
- failed_steps = 0
191
- if status == WorkflowStatus.FAILED:
192
- failed_steps = 1
193
-
194
- running_steps = total_steps - completed_steps - failed_steps
195
-
196
- for i in range(completed_steps):
197
- step = WorkflowStep(
198
- workflow_id=workflow.id,
199
- step_id=i + 1,
200
- status=StepStatus.SUCCEEDED,
201
- function_name=f"dummy_step_{i + 1}",
202
- step_type=StepType.COMPUTE,
203
- args=[],
204
- kwargs={},
205
- )
206
- session.add(step)
207
-
208
- for i in range(failed_steps):
209
- step = WorkflowStep(
210
- workflow_id=workflow.id,
211
- step_id=completed_steps + i + 1,
212
- status=StepStatus.FAILED,
213
- function_name=f"dummy_step_{completed_steps + i + 1}",
214
- step_type=StepType.COMPUTE,
215
- args=[],
216
- kwargs={},
217
- )
218
- session.add(step)
219
-
220
- for i in range(running_steps):
221
- step = WorkflowStep(
222
- workflow_id=workflow.id,
223
- step_id=completed_steps + failed_steps + i + 1,
224
- status=StepStatus.RUNNING,
225
- function_name=f"dummy_step_{completed_steps + failed_steps + i + 1}",
226
- step_type=StepType.COMPUTE,
227
- args=[],
228
- kwargs={},
229
- )
230
- session.add(step)
231
-
232
- await session.commit()
233
- await session.refresh(workflow)
234
- return workflow
235
-
236
-
237
- async def test_list_workflows(client: PlanarTestClient):
238
- """
239
- Test that the workflow management router correctly lists registered workflows.
240
- """
241
- # Call the workflow management endpoint to list workflows
242
- response = await client.get("/planar/v1/workflows/")
243
-
244
- # Verify the response status code
245
- assert response.status_code == 200
246
-
247
- # Parse the response data
248
- data = response.json()
249
-
250
- # Verify that two workflows are returned
251
- assert data["total"] == 4
252
- assert len(data["items"]) == 4
253
-
254
- assert data["offset"] == 0
255
- assert data["limit"] == 10
256
-
257
- # Verify the expense workflow details
258
- expense_workflow = next(
259
- item
260
- for item in data["items"]
261
- if item["name"] == "test_expense_approval_workflow"
262
- )
263
- assert expense_workflow["fully_qualified_name"] == "test_expense_approval_workflow"
264
- assert (
265
- "Main workflow that orchestrates the expense approval process"
266
- in expense_workflow["description"]
267
- )
268
-
269
- # Verify the file workflow details
270
- file_workflow = next(
271
- item
272
- for item in data["items"]
273
- if item["name"] == "test_file_processing_workflow"
274
- )
275
- assert file_workflow["fully_qualified_name"] == "test_file_processing_workflow"
276
- assert "Workflow that processes a text file" in file_workflow["description"]
277
-
278
- # Verify that the workflows have input and output schemas
279
- assert "input_schema" in expense_workflow
280
- assert "output_schema" in expense_workflow
281
- assert "input_schema" in file_workflow
282
- assert "output_schema" in file_workflow
283
-
284
- # Verify that the file workflow input schema includes file parameter
285
- assert "file" in file_workflow["input_schema"]["properties"]
286
-
287
- # Verify run statistics are present
288
- assert "total_runs" in expense_workflow
289
- assert "run_statuses" in expense_workflow
290
- assert "total_runs" in file_workflow
291
- assert "run_statuses" in file_workflow
292
-
293
-
294
- async def test_list_workflow_runs_no_runs(client: PlanarTestClient):
295
- """Test listing runs for a workflow that has not been run."""
296
- response = await client.get(
297
- "/planar/v1/workflows/test_expense_approval_workflow/runs"
298
- )
299
- assert response.status_code == 200
300
- data = response.json()
301
- assert data["total"] == 0
302
- assert len(data["items"]) == 0
303
-
304
-
305
- async def test_list_workflow_runs_multiple_runs(
306
- client: PlanarTestClient, tmp_db_engine: AsyncEngine
307
- ):
308
- """Test listing runs for a workflow with a mix of succeeded and failed runs."""
309
- await asyncio.gather(
310
- # Run 1: Successful
311
- create_test_workflow_run(
312
- tmp_db_engine,
313
- workflow_name="test_expense_approval_workflow",
314
- status=WorkflowStatus.SUCCEEDED,
315
- completed_steps=1,
316
- total_steps=1,
317
- ),
318
- create_test_workflow_run(
319
- tmp_db_engine,
320
- workflow_name="test_expense_approval_workflow",
321
- status=WorkflowStatus.SUCCEEDED,
322
- completed_steps=1,
323
- total_steps=1,
324
- ),
325
- # Run 3: Failed
326
- create_test_workflow_run(
327
- tmp_db_engine,
328
- workflow_name="test_expense_approval_workflow",
329
- status=WorkflowStatus.FAILED,
330
- completed_steps=0,
331
- total_steps=1,
332
- error={"type": "ValueError", "message": "Forced failure for test"},
333
- ),
334
- )
335
-
336
- # List runs
337
- response = await client.get(
338
- "/planar/v1/workflows/test_expense_approval_workflow/runs"
339
- )
340
- assert response.status_code == 200
341
- data = response.json()
342
-
343
- assert data["total"] == 3
344
- assert len(data["items"]) == 3
345
-
346
- succeeded_runs = [r for r in data["items"] if r["status"] == "succeeded"]
347
- failed_runs = [r for r in data["items"] if r["status"] == "failed"]
348
-
349
- assert len(succeeded_runs) == 2
350
- assert len(failed_runs) == 1
351
-
352
- # Assert succeeded run details
353
- assert succeeded_runs[0]["step_stats"]["completed"] == 1
354
- assert succeeded_runs[1]["step_stats"]["completed"] == 1
355
- assert succeeded_runs[0]["step_stats"]["failed"] == 0
356
- assert succeeded_runs[1]["step_stats"]["failed"] == 0
357
- assert succeeded_runs[0]["step_stats"]["running"] == 0
358
- assert succeeded_runs[1]["step_stats"]["running"] == 0
359
-
360
- # Assert failed run details
361
- assert failed_runs[0]["step_stats"]["completed"] == 0
362
- assert failed_runs[0]["step_stats"]["failed"] == 1
363
- assert "ValueError" in failed_runs[0]["error"]["type"]
364
-
365
-
366
- async def test_get_workflow_run_succeeded(
367
- client: PlanarTestClient, tmp_db_engine: AsyncEngine
368
- ):
369
- """Test getting a single succeeded workflow run."""
370
- workflow = await create_test_workflow_run(
371
- tmp_db_engine,
372
- workflow_name="successful_workflow_3_steps",
373
- status=WorkflowStatus.SUCCEEDED,
374
- completed_steps=3,
375
- total_steps=3,
376
- )
377
-
378
- run_resp = await client.get(
379
- f"/planar/v1/workflows/successful_workflow_3_steps/runs/{workflow.id}"
380
- )
381
- assert run_resp.status_code == 200
382
- run_data = run_resp.json()
383
-
384
- assert run_data["id"] == str(workflow.id)
385
- assert run_data["status"] == "succeeded"
386
- assert run_data["step_stats"]["completed"] == 3
387
- assert run_data["step_stats"]["failed"] == 0
388
- assert run_data["step_stats"]["running"] == 0
389
- assert run_data["error"] is None
390
-
391
-
392
- async def test_get_workflow_run_failed(
393
- client: PlanarTestClient, tmp_db_engine: AsyncEngine
394
- ):
395
- """Test getting a single failed workflow run."""
396
- workflow = await create_test_workflow_run(
397
- tmp_db_engine,
398
- workflow_name="failing_workflow_3_steps",
399
- status=WorkflowStatus.FAILED,
400
- completed_steps=1,
401
- total_steps=2, # 1 succeeded, 1 failed
402
- error={
403
- "type": "ValueError",
404
- "message": "This step is designed to fail",
405
- },
406
- )
407
-
408
- run_resp = await client.get(
409
- f"/planar/v1/workflows/failing_workflow_3_steps/runs/{workflow.id}"
410
- )
411
- assert run_resp.status_code == 200
412
- run_data = run_resp.json()
413
-
414
- assert run_data["id"] == str(workflow.id)
415
- assert run_data["status"] == "failed"
416
- assert run_data["step_stats"]["completed"] == 1
417
- assert run_data["step_stats"]["failed"] == 1
418
- assert run_data["step_stats"]["running"] == 0
419
- assert run_data["error"] is not None
420
- assert "ValueError" in run_data["error"]["type"]
421
- assert "This step is designed to fail" in run_data["error"]["message"]
422
-
423
-
424
- async def test_get_workflow_run_pending_with_running_step(
425
- client: PlanarTestClient, tmp_db_engine: AsyncEngine
426
- ):
427
- """Test getting a pending workflow with completed and running steps."""
428
- workflow = await create_test_workflow_run(
429
- tmp_db_engine,
430
- workflow_name="pending_workflow_with_running_steps",
431
- status=WorkflowStatus.PENDING,
432
- completed_steps=3,
433
- total_steps=4, # 3 completed, 1 running
434
- )
435
-
436
- run_resp = await client.get(
437
- f"/planar/v1/workflows/pending_workflow_with_running_steps/runs/{workflow.id}"
438
- )
439
- assert run_resp.status_code == 200
440
- run_data = run_resp.json()
441
-
442
- assert run_data["id"] == str(workflow.id)
443
- assert run_data["status"] == "pending"
444
- assert run_data["step_stats"]["completed"] == 3
445
- assert run_data["step_stats"]["running"] == 1
446
- assert run_data["step_stats"]["failed"] == 0
447
- assert run_data["error"] is None
448
-
449
-
450
- async def test_start_file_workflow(
451
- client: PlanarTestClient,
452
- planar_file: PlanarFile,
453
- observer: WorkflowObserver,
454
- session: AsyncSession,
455
- ):
456
- """Test starting a workflow with a PlanarFile through the API."""
457
- # Prepare the request payload with the file reference
458
- payload = {
459
- "file": {
460
- "id": str(planar_file.id),
461
- "filename": planar_file.filename,
462
- "content_type": planar_file.content_type,
463
- "size": planar_file.size,
464
- }
465
- }
466
-
467
- response = await client.post(
468
- "/planar/v1/workflows/test_file_processing_workflow/start",
469
- json=payload,
470
- )
471
-
472
- # Verify the response status code
473
- assert response.status_code == 200
474
-
475
- data = response.json()
476
-
477
- assert "id" in data
478
- workflow_id = data["id"]
479
-
480
- await observer.wait("workflow-succeeded", workflow_id=workflow_id)
481
-
482
- workflow = await session.get(Workflow, UUID(workflow_id))
483
- await session.commit()
484
- assert workflow
485
-
486
- # Verify the workflow completed successfully
487
- assert workflow.status == WorkflowStatus.SUCCEEDED
488
-
489
- # Check the workflow result
490
- result = workflow.result
491
- assert result
492
- assert result["filename"] == planar_file.filename
493
- assert result["character_count"] == planar_file.size
494
- assert "This is a test file" in result["content_preview"]
495
- assert result["file_id"] == str(planar_file.id)
496
-
497
-
498
- async def test_get_compute_step(
499
- client: PlanarTestClient, session: AsyncSession, observer: WorkflowObserver
500
- ):
501
- """Ensure compute steps can be retrieved without metadata."""
502
-
503
- expense = Expense(
504
- title="Test Expense",
505
- amount=100.0,
506
- description="test",
507
- status=ExpenseStatus.SUBMITTED,
508
- submitter_id=uuid4(),
509
- category="misc",
510
- )
511
- session.add(expense)
512
- await session.commit()
513
-
514
- payload = {"expense_id": str(expense.id)}
515
- resp = await client.post(
516
- "/planar/v1/workflows/test_expense_approval_workflow/start",
517
- json=payload,
518
- )
519
- assert resp.status_code == 200
520
- wf_id = resp.json()["id"]
521
-
522
- await observer.wait("workflow-succeeded", workflow_id=wf_id)
523
-
524
- step = (
525
- await session.exec(
526
- select(WorkflowStep).where(WorkflowStep.workflow_id == UUID(wf_id))
527
- )
528
- ).first()
529
- await session.commit()
530
- assert step
531
- assert step.step_type == StepType.COMPUTE
532
-
533
- resp = await client.get(
534
- f"/planar/v1/workflows/test_expense_approval_workflow/runs/{wf_id}/steps/{step.step_id}"
535
- )
536
- assert resp.status_code == 200
537
- data = resp.json()
538
- assert "meta" in data
539
- assert data["meta"] is None