planar 0.10.0__py3-none-any.whl → 0.11.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (60) hide show
  1. planar/app.py +18 -6
  2. planar/routers/info.py +79 -36
  3. planar/scaffold_templates/pyproject.toml.j2 +1 -1
  4. planar/testing/fixtures.py +7 -4
  5. {planar-0.10.0.dist-info → planar-0.11.0.dist-info}/METADATA +9 -1
  6. {planar-0.10.0.dist-info → planar-0.11.0.dist-info}/RECORD +8 -60
  7. planar/ai/test_agent_serialization.py +0 -229
  8. planar/ai/test_agent_tool_step_display.py +0 -78
  9. planar/data/test_dataset.py +0 -358
  10. planar/files/storage/test_azure_blob.py +0 -435
  11. planar/files/storage/test_local_directory.py +0 -162
  12. planar/files/storage/test_s3.py +0 -299
  13. planar/files/test_files.py +0 -282
  14. planar/human/test_human.py +0 -385
  15. planar/logging/test_formatter.py +0 -327
  16. planar/modeling/mixins/test_auditable.py +0 -97
  17. planar/modeling/mixins/test_timestamp.py +0 -134
  18. planar/modeling/mixins/test_uuid_primary_key.py +0 -52
  19. planar/routers/test_agents_router.py +0 -174
  20. planar/routers/test_dataset_router.py +0 -429
  21. planar/routers/test_files_router.py +0 -49
  22. planar/routers/test_object_config_router.py +0 -367
  23. planar/routers/test_routes_security.py +0 -168
  24. planar/routers/test_rule_router.py +0 -470
  25. planar/routers/test_workflow_router.py +0 -564
  26. planar/rules/test_data/account_dormancy_management.json +0 -223
  27. planar/rules/test_data/airline_loyalty_points_calculator.json +0 -262
  28. planar/rules/test_data/applicant_risk_assessment.json +0 -435
  29. planar/rules/test_data/booking_fraud_detection.json +0 -407
  30. planar/rules/test_data/cellular_data_rollover_system.json +0 -258
  31. planar/rules/test_data/clinical_trial_eligibility_screener.json +0 -437
  32. planar/rules/test_data/customer_lifetime_value.json +0 -143
  33. planar/rules/test_data/import_duties_calculator.json +0 -289
  34. planar/rules/test_data/insurance_prior_authorization.json +0 -443
  35. planar/rules/test_data/online_check_in_eligibility_system.json +0 -254
  36. planar/rules/test_data/order_consolidation_system.json +0 -375
  37. planar/rules/test_data/portfolio_risk_monitor.json +0 -471
  38. planar/rules/test_data/supply_chain_risk.json +0 -253
  39. planar/rules/test_data/warehouse_cross_docking.json +0 -237
  40. planar/rules/test_rules.py +0 -1494
  41. planar/security/tests/test_auth_middleware.py +0 -162
  42. planar/security/tests/test_authorization_context.py +0 -78
  43. planar/security/tests/test_cedar_basics.py +0 -41
  44. planar/security/tests/test_cedar_policies.py +0 -158
  45. planar/security/tests/test_jwt_principal_context.py +0 -179
  46. planar/test_app.py +0 -142
  47. planar/test_cli.py +0 -394
  48. planar/test_config.py +0 -515
  49. planar/test_object_config.py +0 -527
  50. planar/test_object_registry.py +0 -14
  51. planar/test_sqlalchemy.py +0 -193
  52. planar/test_utils.py +0 -105
  53. planar/testing/test_memory_storage.py +0 -143
  54. planar/workflows/test_concurrency_detection.py +0 -120
  55. planar/workflows/test_lock_timeout.py +0 -140
  56. planar/workflows/test_serialization.py +0 -1203
  57. planar/workflows/test_suspend_deserialization.py +0 -231
  58. planar/workflows/test_workflow.py +0 -2005
  59. {planar-0.10.0.dist-info → planar-0.11.0.dist-info}/WHEEL +0 -0
  60. {planar-0.10.0.dist-info → planar-0.11.0.dist-info}/entry_points.txt +0 -0
@@ -1,385 +0,0 @@
1
- from datetime import datetime, timedelta
2
- from unittest.mock import AsyncMock, patch
3
- from uuid import UUID, uuid4
4
-
5
- import pytest
6
- from pydantic import BaseModel, Field
7
- from sqlmodel import col, select
8
- from sqlmodel.ext.asyncio.session import AsyncSession
9
-
10
- from planar.human.human import (
11
- Human,
12
- HumanTask,
13
- HumanTaskStatus,
14
- Timeout,
15
- complete_human_task,
16
- )
17
- from planar.workflows import suspend
18
- from planar.workflows.decorators import workflow
19
- from planar.workflows.execution import execute
20
- from planar.workflows.models import StepType, Workflow, WorkflowStatus, WorkflowStep
21
- from planar.workflows.step_core import Suspend
22
-
23
-
24
- # Test data models
25
- class ExpenseRequest(BaseModel):
26
- """An expense request submitted by an employee."""
27
-
28
- request_id: str = Field(description="Unique identifier for the request")
29
- amount: float = Field(description="Amount requested in dollars")
30
- requester: str = Field(description="Name of the person requesting")
31
- department: str = Field(description="Department the requester belongs to")
32
- purpose: str = Field(description="Purpose of the expense")
33
-
34
-
35
- class ExpenseDecision(BaseModel):
36
- """A decision made by a human approver on an expense request."""
37
-
38
- approved: bool = Field(description="Whether the expense is approved")
39
- approved_amount: float = Field(
40
- description="Amount approved (may be less than requested)"
41
- )
42
- notes: str = Field(description="Explanation for decision", default="")
43
-
44
-
45
- class HumanResponse(BaseModel):
46
- response: str = Field(description="A message from the human")
47
-
48
-
49
- @pytest.fixture
50
- def expense_approval():
51
- """Returns a Human task definition for expense approval testing."""
52
- return Human(
53
- name="expense_approval",
54
- title="Expense Approval",
55
- description="Review expense request and approve, adjust, or reject",
56
- input_type=ExpenseRequest,
57
- output_type=ExpenseDecision,
58
- timeout=Timeout(timedelta(hours=24)),
59
- )
60
-
61
-
62
- @pytest.fixture
63
- def expense_approval_no_input():
64
- """Returns a Human task definition for expense approval testing."""
65
- return Human(
66
- name="expense_approval_no_input",
67
- title="Expense Approval (No Input)",
68
- description="Review expense request and approve, adjust, or reject",
69
- output_type=ExpenseDecision,
70
- timeout=Timeout(timedelta(hours=24)),
71
- )
72
-
73
-
74
- # Create a fixture for sample expense request data
75
- @pytest.fixture
76
- def expense_request_data():
77
- """Returns sample expense request data for testing."""
78
- return {
79
- "request_id": "EXP-123",
80
- "amount": 750.00,
81
- "requester": "Jane Smith",
82
- "department": "Engineering",
83
- "purpose": "Conference travel expenses",
84
- }
85
-
86
-
87
- async def test_human_initialization():
88
- """Test that the Human class initializes with correct parameters."""
89
- human = Human(
90
- name="test_human",
91
- title="Test Human Task",
92
- output_type=ExpenseDecision,
93
- description="Test description",
94
- input_type=ExpenseRequest,
95
- timeout=Timeout(timedelta(hours=1)),
96
- )
97
-
98
- # Verify initialization
99
- assert human.name == "test_human"
100
- assert human.title == "Test Human Task"
101
- assert human.description == "Test description"
102
- assert human.input_type == ExpenseRequest
103
- assert human.output_type == ExpenseDecision
104
- assert human.timeout is not None
105
- assert human.timeout.get_seconds() == 3600 # 1 hour in seconds
106
-
107
-
108
- async def test_human_initialization_validation():
109
- """Test that the Human class validates output_type is a Pydantic model."""
110
- with pytest.raises(ValueError, match="output_type must be a Pydantic model"):
111
- Human(
112
- name="test_human",
113
- title="Test Human Task",
114
- # Invalid: not a Pydantic model
115
- output_type=str, # type: ignore
116
- )
117
-
118
-
119
- async def test_human_initialization_validation_no_input(session: AsyncSession):
120
- human_no_input = Human(
121
- name="test_human",
122
- title="Test Human Task",
123
- output_type=HumanResponse,
124
- )
125
-
126
- @workflow()
127
- async def expense_workflow():
128
- result = await human_no_input(message="Hello, world!")
129
- return result.output.response
130
-
131
- wf = await expense_workflow.start()
132
- result = await execute(wf)
133
- assert isinstance(result, Suspend)
134
-
135
- steps = (
136
- await session.exec(select(WorkflowStep).order_by(col(WorkflowStep.step_id)))
137
- ).all()
138
- assert len(steps) == 3
139
- assert "Create Human Task" in [s.display_name for s in steps]
140
- assert "Wait for event" in [s.display_name for s in steps]
141
-
142
- assert StepType.HUMAN_IN_THE_LOOP in [s.step_type for s in steps]
143
- assert steps[0].args == [None, "Hello, world!", None]
144
-
145
- # Get HumanTask from database
146
- human_task = (await session.exec(select(HumanTask))).one()
147
- assert human_task is not None
148
- assert human_task.name == "test_human"
149
- assert human_task.title == "Test Human Task"
150
- assert human_task.output_schema == HumanResponse.model_json_schema()
151
- assert human_task.input_schema is None
152
- assert human_task.message == "Hello, world!"
153
-
154
- await complete_human_task(human_task.id, {"response": "Approved"})
155
- result = await execute(wf)
156
- assert result == "Approved"
157
-
158
-
159
- async def test_human_basic_workflow(
160
- session: AsyncSession, expense_approval, expense_request_data
161
- ):
162
- """Test that a Human step can be used in a workflow with input data."""
163
-
164
- @workflow()
165
- async def expense_workflow(request_data: dict):
166
- request = ExpenseRequest(**request_data)
167
- result = await expense_approval(request)
168
- # Add a suspend to ensure the workflow correctly
169
- # deserializes the result of human task on subsequent executions
170
- await suspend(interval=timedelta(seconds=0))
171
- return {
172
- "request_id": request.request_id,
173
- "approved": result.output.approved,
174
- "amount": result.output.approved_amount,
175
- "notes": result.output.notes,
176
- }
177
-
178
- # Start the workflow and run until it suspends
179
- wf = await expense_workflow.start(expense_request_data)
180
- result = await execute(wf)
181
- assert isinstance(result, Suspend)
182
-
183
- # Query workflows and steps from the database
184
- updated_wf = await session.get(Workflow, wf.id)
185
- assert updated_wf is not None
186
- assert updated_wf.status == WorkflowStatus.PENDING
187
- assert updated_wf.waiting_for_event is not None
188
- assert "human_task_completed:" in updated_wf.waiting_for_event
189
-
190
- steps = (
191
- await session.exec(select(WorkflowStep).order_by(col(WorkflowStep.step_id)))
192
- ).all()
193
- assert len(steps) == 4
194
- assert "expense_approval" in [s.display_name for s in steps]
195
-
196
- # Get HumanTask from database and verify fields
197
- human_task = (await session.exec(select(HumanTask))).one()
198
- assert human_task is not None
199
- assert human_task.name == "expense_approval"
200
- assert human_task.title == "Expense Approval"
201
- assert human_task.workflow_id == wf.id
202
- assert human_task.status == HumanTaskStatus.PENDING
203
- assert human_task.input_schema == ExpenseRequest.model_json_schema()
204
- assert human_task.input_data is not None
205
- assert human_task.input_data["request_id"] == "EXP-123"
206
- assert human_task.input_data["amount"] == 750.00
207
- assert human_task.message is None
208
- assert human_task.output_schema == ExpenseDecision.model_json_schema()
209
- assert human_task.output_data is None
210
-
211
- # Complete the human task
212
- output_data = {
213
- "approved": True,
214
- "approved_amount": 700.00,
215
- "notes": "Approved with reduced amount",
216
- }
217
- await complete_human_task(human_task.id, output_data, completed_by="test_user")
218
-
219
- # Check the human task was updated correctly
220
- await session.refresh(human_task)
221
- assert human_task.status == HumanTaskStatus.COMPLETED
222
- assert human_task.output_data == output_data
223
- assert human_task.completed_by == "test_user"
224
- assert human_task.completed_at is not None
225
-
226
- # Resume and complete the workflow
227
- result = await execute(wf)
228
- assert isinstance(result, Suspend)
229
- result = await execute(wf)
230
-
231
- # Verify workflow completed successfully with expected result
232
- updated_wf = await session.get(Workflow, wf.id)
233
- assert updated_wf is not None
234
- assert updated_wf.status == WorkflowStatus.SUCCEEDED
235
- expected_result = {
236
- "request_id": expense_request_data["request_id"],
237
- "approved": output_data["approved"],
238
- "amount": output_data["approved_amount"],
239
- "notes": output_data["notes"],
240
- }
241
- assert updated_wf.result == expected_result
242
-
243
-
244
- async def test_human_task_completion_validation(session: AsyncSession):
245
- """Test validation when completing a human task."""
246
-
247
- workflow = Workflow(
248
- function_name="test_workflow",
249
- status=WorkflowStatus.PENDING,
250
- args=[],
251
- kwargs={},
252
- )
253
- session.add(workflow)
254
- await session.commit()
255
- # Create a human task
256
- task = HumanTask(
257
- id=uuid4(),
258
- name="test_task",
259
- title="Test Task",
260
- workflow_id=workflow.id,
261
- workflow_name="test_workflow",
262
- output_schema=ExpenseDecision.model_json_schema(),
263
- status=HumanTaskStatus.PENDING,
264
- )
265
-
266
- session.add(task)
267
- await session.commit()
268
- task_id = task.id
269
-
270
- # Test completing a non-existent task
271
- with pytest.raises(ValueError, match="not found"):
272
- await complete_human_task(UUID("00000000-0000-0000-0000-000000000000"), {})
273
-
274
- # Test completing a task that's not in pending state
275
- task.status = HumanTaskStatus.CANCELLED
276
- session.add(task)
277
- await session.commit()
278
-
279
- with pytest.raises(ValueError, match="not pending"):
280
- await complete_human_task(task_id, {})
281
-
282
- # Reset to pending for the next test
283
- task.status = HumanTaskStatus.PENDING
284
- session.add(task)
285
- await session.commit()
286
-
287
- # Mock emit_event for normal completion test
288
- with patch("planar.workflows.events.emit_event", AsyncMock()):
289
- # Complete with valid data
290
- output_data = {
291
- "approved": True,
292
- "approved_amount": 150.00,
293
- "notes": "Approved",
294
- }
295
-
296
- await complete_human_task(task_id, output_data)
297
-
298
- # Verify task state
299
- await session.refresh(task)
300
- assert task.status == HumanTaskStatus.COMPLETED
301
- assert task.output_data == output_data
302
-
303
-
304
- async def test_timeout_class():
305
- """Test the Timeout helper class functionality."""
306
- # Test with various durations
307
- one_hour = Timeout(timedelta(hours=1))
308
- assert one_hour.get_seconds() == 3600
309
- assert one_hour.get_timedelta() == timedelta(hours=1)
310
-
311
- five_minutes = Timeout(timedelta(minutes=5))
312
- assert five_minutes.get_seconds() == 300
313
- assert five_minutes.get_timedelta() == timedelta(minutes=5)
314
-
315
-
316
- async def test_human_task_with_suggested_data(session: AsyncSession):
317
- """Test that a Human step can be used with suggested_data."""
318
- human_with_suggestions = Human(
319
- name="test_human_suggestions",
320
- title="Test Human Task with Suggestions",
321
- output_type=ExpenseDecision,
322
- )
323
-
324
- @workflow()
325
- async def expense_workflow():
326
- result = await human_with_suggestions(
327
- message="Please review the expense",
328
- suggested_data=ExpenseDecision(
329
- approved=True,
330
- approved_amount=500.0,
331
- notes="Pre-approved amount",
332
- ),
333
- )
334
- return result.output.notes
335
-
336
- wf = await expense_workflow.start()
337
- result = await execute(wf)
338
- assert isinstance(result, Suspend)
339
-
340
- # Get HumanTask from database and verify suggested_data is stored
341
- human_task = (await session.exec(select(HumanTask))).one()
342
- assert human_task is not None
343
- assert human_task.name == "test_human_suggestions"
344
- assert human_task.suggested_data is not None
345
- assert human_task.suggested_data["approved"] is True
346
- assert human_task.suggested_data["approved_amount"] == 500.0
347
- assert human_task.suggested_data["notes"] == "Pre-approved amount"
348
-
349
- # Complete the human task
350
- await complete_human_task(
351
- human_task.id,
352
- {"approved": False, "approved_amount": 0.0, "notes": "Rejected after review"},
353
- )
354
-
355
- result = await execute(wf)
356
- assert result == "Rejected after review"
357
-
358
-
359
- async def test_deadline_calculation():
360
- """Test that deadlines are calculated correctly based on timeout."""
361
- # Create a human task with a deadline
362
- with patch("planar.human.human.utc_now") as mock_datetime:
363
- # Mock the current time
364
- now = datetime(2025, 1, 1, 12, 0, 0)
365
- mock_datetime.return_value = now
366
-
367
- # Calculate deadlines with different timeouts
368
- one_hour_timeout = Human(
369
- name="one_hour",
370
- title="One Hour Timeout",
371
- output_type=ExpenseDecision,
372
- timeout=Timeout(timedelta(hours=1)),
373
- )
374
-
375
- deadline = one_hour_timeout._calculate_deadline()
376
- assert deadline == datetime(2025, 1, 1, 13, 0, 0)
377
-
378
- # Test with no timeout
379
- no_timeout = Human(
380
- name="no_timeout",
381
- title="No Timeout",
382
- output_type=ExpenseDecision,
383
- )
384
-
385
- assert no_timeout._calculate_deadline() is None