planar 0.10.0__py3-none-any.whl → 0.11.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- planar/app.py +18 -6
- planar/routers/info.py +79 -36
- planar/scaffold_templates/pyproject.toml.j2 +1 -1
- planar/testing/fixtures.py +7 -4
- {planar-0.10.0.dist-info → planar-0.11.0.dist-info}/METADATA +9 -1
- {planar-0.10.0.dist-info → planar-0.11.0.dist-info}/RECORD +8 -60
- planar/ai/test_agent_serialization.py +0 -229
- planar/ai/test_agent_tool_step_display.py +0 -78
- planar/data/test_dataset.py +0 -358
- planar/files/storage/test_azure_blob.py +0 -435
- planar/files/storage/test_local_directory.py +0 -162
- planar/files/storage/test_s3.py +0 -299
- planar/files/test_files.py +0 -282
- planar/human/test_human.py +0 -385
- planar/logging/test_formatter.py +0 -327
- planar/modeling/mixins/test_auditable.py +0 -97
- planar/modeling/mixins/test_timestamp.py +0 -134
- planar/modeling/mixins/test_uuid_primary_key.py +0 -52
- planar/routers/test_agents_router.py +0 -174
- planar/routers/test_dataset_router.py +0 -429
- planar/routers/test_files_router.py +0 -49
- planar/routers/test_object_config_router.py +0 -367
- planar/routers/test_routes_security.py +0 -168
- planar/routers/test_rule_router.py +0 -470
- planar/routers/test_workflow_router.py +0 -564
- planar/rules/test_data/account_dormancy_management.json +0 -223
- planar/rules/test_data/airline_loyalty_points_calculator.json +0 -262
- planar/rules/test_data/applicant_risk_assessment.json +0 -435
- planar/rules/test_data/booking_fraud_detection.json +0 -407
- planar/rules/test_data/cellular_data_rollover_system.json +0 -258
- planar/rules/test_data/clinical_trial_eligibility_screener.json +0 -437
- planar/rules/test_data/customer_lifetime_value.json +0 -143
- planar/rules/test_data/import_duties_calculator.json +0 -289
- planar/rules/test_data/insurance_prior_authorization.json +0 -443
- planar/rules/test_data/online_check_in_eligibility_system.json +0 -254
- planar/rules/test_data/order_consolidation_system.json +0 -375
- planar/rules/test_data/portfolio_risk_monitor.json +0 -471
- planar/rules/test_data/supply_chain_risk.json +0 -253
- planar/rules/test_data/warehouse_cross_docking.json +0 -237
- planar/rules/test_rules.py +0 -1494
- planar/security/tests/test_auth_middleware.py +0 -162
- planar/security/tests/test_authorization_context.py +0 -78
- planar/security/tests/test_cedar_basics.py +0 -41
- planar/security/tests/test_cedar_policies.py +0 -158
- planar/security/tests/test_jwt_principal_context.py +0 -179
- planar/test_app.py +0 -142
- planar/test_cli.py +0 -394
- planar/test_config.py +0 -515
- planar/test_object_config.py +0 -527
- planar/test_object_registry.py +0 -14
- planar/test_sqlalchemy.py +0 -193
- planar/test_utils.py +0 -105
- planar/testing/test_memory_storage.py +0 -143
- planar/workflows/test_concurrency_detection.py +0 -120
- planar/workflows/test_lock_timeout.py +0 -140
- planar/workflows/test_serialization.py +0 -1203
- planar/workflows/test_suspend_deserialization.py +0 -231
- planar/workflows/test_workflow.py +0 -2005
- {planar-0.10.0.dist-info → planar-0.11.0.dist-info}/WHEEL +0 -0
- {planar-0.10.0.dist-info → planar-0.11.0.dist-info}/entry_points.txt +0 -0
@@ -1,564 +0,0 @@
|
|
1
|
-
import asyncio
|
2
|
-
from uuid import UUID, uuid4
|
3
|
-
|
4
|
-
import pytest
|
5
|
-
from pydantic import BaseModel, Field
|
6
|
-
from sqlalchemy.ext.asyncio import AsyncEngine
|
7
|
-
from sqlmodel import select
|
8
|
-
from sqlmodel.ext.asyncio.session import AsyncSession
|
9
|
-
|
10
|
-
from examples.expense_approval_workflow.models import (
|
11
|
-
Expense,
|
12
|
-
ExpenseStatus,
|
13
|
-
)
|
14
|
-
from planar import PlanarApp, get_session, sqlite_config
|
15
|
-
from planar.db import new_session
|
16
|
-
from planar.files.models import PlanarFile, PlanarFileMetadata
|
17
|
-
from planar.files.storage.base import Storage
|
18
|
-
from planar.testing.planar_test_client import PlanarTestClient
|
19
|
-
from planar.testing.workflow_observer import WorkflowObserver
|
20
|
-
from planar.workflows import step, workflow
|
21
|
-
from planar.workflows.models import (
|
22
|
-
StepStatus,
|
23
|
-
StepType,
|
24
|
-
Workflow,
|
25
|
-
WorkflowStatus,
|
26
|
-
WorkflowStep,
|
27
|
-
)
|
28
|
-
|
29
|
-
# ------ SETUP ------
|
30
|
-
|
31
|
-
|
32
|
-
async def get_expense(expense_id: str) -> Expense:
|
33
|
-
session = get_session()
|
34
|
-
expense = (
|
35
|
-
await session.exec(select(Expense).where(Expense.id == UUID(expense_id)))
|
36
|
-
).first()
|
37
|
-
if not expense:
|
38
|
-
raise ValueError(f"Expense {expense_id} not found")
|
39
|
-
return expense
|
40
|
-
|
41
|
-
|
42
|
-
@workflow(name="test_expense_approval_workflow")
|
43
|
-
async def expense_approval_workflow(expense_id: str):
|
44
|
-
"""
|
45
|
-
Main workflow that orchestrates the expense approval process
|
46
|
-
"""
|
47
|
-
await validate_expense(expense_id)
|
48
|
-
|
49
|
-
expense = await get_expense(expense_id)
|
50
|
-
|
51
|
-
return expense
|
52
|
-
|
53
|
-
|
54
|
-
@step()
|
55
|
-
async def validate_expense(expense_id: str):
|
56
|
-
expense = await get_expense(expense_id)
|
57
|
-
|
58
|
-
if expense.status != ExpenseStatus.SUBMITTED:
|
59
|
-
raise ValueError(f"Expense {expense_id} is not in SUBMITTED status")
|
60
|
-
|
61
|
-
|
62
|
-
@step()
|
63
|
-
async def dummy_step_1():
|
64
|
-
pass
|
65
|
-
|
66
|
-
|
67
|
-
@step()
|
68
|
-
async def dummy_step_2():
|
69
|
-
pass
|
70
|
-
|
71
|
-
|
72
|
-
@step()
|
73
|
-
async def dummy_step_3():
|
74
|
-
pass
|
75
|
-
|
76
|
-
|
77
|
-
@step(display_name="failing_step")
|
78
|
-
async def failing_step():
|
79
|
-
raise ValueError("This step is designed to fail")
|
80
|
-
|
81
|
-
|
82
|
-
@workflow(name="successful_workflow_3_steps")
|
83
|
-
async def successful_workflow_3_steps():
|
84
|
-
await dummy_step_1()
|
85
|
-
await dummy_step_2()
|
86
|
-
await dummy_step_3()
|
87
|
-
|
88
|
-
|
89
|
-
@workflow(name="failing_workflow_3_steps")
|
90
|
-
async def failing_workflow_3_steps():
|
91
|
-
await dummy_step_1()
|
92
|
-
await failing_step()
|
93
|
-
await dummy_step_3()
|
94
|
-
|
95
|
-
|
96
|
-
class FileProcessingResult(BaseModel):
|
97
|
-
"""Result of processing a text file."""
|
98
|
-
|
99
|
-
filename: str = Field(description="Original filename")
|
100
|
-
character_count: int = Field(description="Number of characters in the file")
|
101
|
-
content_preview: str = Field(description="Preview of the file content")
|
102
|
-
file_id: UUID = Field(description="ID of the processed file")
|
103
|
-
|
104
|
-
|
105
|
-
@workflow(name="test_file_processing_workflow", is_interactive=False)
|
106
|
-
async def file_processing_workflow(file: PlanarFile):
|
107
|
-
"""
|
108
|
-
Workflow that processes a text file and returns basic information about it.
|
109
|
-
"""
|
110
|
-
file_content = await file.get_content()
|
111
|
-
char_count = len(file_content)
|
112
|
-
preview = file_content[:100].decode("utf-8")
|
113
|
-
|
114
|
-
# Return structured result
|
115
|
-
return FileProcessingResult(
|
116
|
-
filename=file.filename,
|
117
|
-
character_count=char_count,
|
118
|
-
content_preview=preview,
|
119
|
-
file_id=file.id,
|
120
|
-
)
|
121
|
-
|
122
|
-
|
123
|
-
# ------ TESTS ------
|
124
|
-
|
125
|
-
|
126
|
-
@pytest.fixture(name="app")
|
127
|
-
def app_fixture(tmp_db_path: str):
|
128
|
-
app = PlanarApp(
|
129
|
-
config=sqlite_config(tmp_db_path),
|
130
|
-
title="Test Workflow Router API",
|
131
|
-
description="API for testing workflow routers",
|
132
|
-
)
|
133
|
-
# Re-register workflows since ObjectRegistry gets reset before each test
|
134
|
-
app.register_workflow(expense_approval_workflow)
|
135
|
-
app.register_workflow(file_processing_workflow)
|
136
|
-
app.register_workflow(successful_workflow_3_steps)
|
137
|
-
app.register_workflow(failing_workflow_3_steps)
|
138
|
-
yield app
|
139
|
-
|
140
|
-
|
141
|
-
@pytest.fixture
|
142
|
-
async def planar_file(storage: Storage) -> PlanarFile:
|
143
|
-
"""Create a PlanarFile instance for testing."""
|
144
|
-
# Store test content
|
145
|
-
test_data = b"This is a test file for the workflow router API test."
|
146
|
-
mime_type = "text/plain"
|
147
|
-
|
148
|
-
# Store the file and get a reference
|
149
|
-
storage_ref = await storage.put_bytes(test_data, mime_type=mime_type)
|
150
|
-
|
151
|
-
# Create and store the file metadata
|
152
|
-
session = get_session()
|
153
|
-
file_metadata = PlanarFileMetadata(
|
154
|
-
filename="router_test_file.txt",
|
155
|
-
content_type=mime_type,
|
156
|
-
size=len(test_data),
|
157
|
-
storage_ref=storage_ref,
|
158
|
-
)
|
159
|
-
session.add(file_metadata)
|
160
|
-
await session.commit()
|
161
|
-
|
162
|
-
# Return a PlanarFile reference (not the full metadata)
|
163
|
-
return PlanarFile(
|
164
|
-
id=file_metadata.id,
|
165
|
-
filename=file_metadata.filename,
|
166
|
-
content_type=file_metadata.content_type,
|
167
|
-
size=file_metadata.size,
|
168
|
-
)
|
169
|
-
|
170
|
-
|
171
|
-
async def create_test_workflow_run(
|
172
|
-
engine: AsyncEngine,
|
173
|
-
workflow_name: str,
|
174
|
-
status: WorkflowStatus,
|
175
|
-
completed_steps: int = 0,
|
176
|
-
total_steps: int = 0,
|
177
|
-
error: dict | None = None,
|
178
|
-
) -> Workflow:
|
179
|
-
"""Helper to directly create a workflow run and its steps in the DB."""
|
180
|
-
async with new_session(engine) as session:
|
181
|
-
workflow = Workflow(
|
182
|
-
function_name=workflow_name,
|
183
|
-
status=status,
|
184
|
-
error=error,
|
185
|
-
args=[],
|
186
|
-
kwargs={},
|
187
|
-
)
|
188
|
-
session.add(workflow)
|
189
|
-
|
190
|
-
failed_steps = 0
|
191
|
-
if status == WorkflowStatus.FAILED:
|
192
|
-
failed_steps = 1
|
193
|
-
|
194
|
-
running_steps = total_steps - completed_steps - failed_steps
|
195
|
-
|
196
|
-
for i in range(completed_steps):
|
197
|
-
step = WorkflowStep(
|
198
|
-
workflow_id=workflow.id,
|
199
|
-
step_id=i + 1,
|
200
|
-
status=StepStatus.SUCCEEDED,
|
201
|
-
function_name=f"dummy_step_{i + 1}",
|
202
|
-
step_type=StepType.COMPUTE,
|
203
|
-
args=[],
|
204
|
-
kwargs={},
|
205
|
-
)
|
206
|
-
session.add(step)
|
207
|
-
|
208
|
-
for i in range(failed_steps):
|
209
|
-
step = WorkflowStep(
|
210
|
-
workflow_id=workflow.id,
|
211
|
-
step_id=completed_steps + i + 1,
|
212
|
-
status=StepStatus.FAILED,
|
213
|
-
function_name=f"dummy_step_{completed_steps + i + 1}",
|
214
|
-
step_type=StepType.COMPUTE,
|
215
|
-
args=[],
|
216
|
-
kwargs={},
|
217
|
-
)
|
218
|
-
session.add(step)
|
219
|
-
|
220
|
-
for i in range(running_steps):
|
221
|
-
step = WorkflowStep(
|
222
|
-
workflow_id=workflow.id,
|
223
|
-
step_id=completed_steps + failed_steps + i + 1,
|
224
|
-
status=StepStatus.RUNNING,
|
225
|
-
function_name=f"dummy_step_{completed_steps + failed_steps + i + 1}",
|
226
|
-
step_type=StepType.COMPUTE,
|
227
|
-
args=[],
|
228
|
-
kwargs={},
|
229
|
-
)
|
230
|
-
session.add(step)
|
231
|
-
|
232
|
-
await session.commit()
|
233
|
-
await session.refresh(workflow)
|
234
|
-
return workflow
|
235
|
-
|
236
|
-
|
237
|
-
async def test_list_workflows(client: PlanarTestClient):
|
238
|
-
"""
|
239
|
-
Test that the workflow management router correctly lists registered workflows.
|
240
|
-
"""
|
241
|
-
# Call the workflow management endpoint to list workflows
|
242
|
-
response = await client.get("/planar/v1/workflows/")
|
243
|
-
|
244
|
-
# Verify the response status code
|
245
|
-
assert response.status_code == 200
|
246
|
-
|
247
|
-
# Parse the response data
|
248
|
-
data = response.json()
|
249
|
-
|
250
|
-
# Verify that two workflows are returned
|
251
|
-
assert data["total"] == 4
|
252
|
-
assert len(data["items"]) == 4
|
253
|
-
|
254
|
-
assert data["offset"] == 0
|
255
|
-
assert data["limit"] == 10
|
256
|
-
|
257
|
-
# Verify the expense workflow details
|
258
|
-
expense_workflow = next(
|
259
|
-
item
|
260
|
-
for item in data["items"]
|
261
|
-
if item["name"] == "test_expense_approval_workflow"
|
262
|
-
)
|
263
|
-
assert expense_workflow["fully_qualified_name"] == "test_expense_approval_workflow"
|
264
|
-
assert (
|
265
|
-
"Main workflow that orchestrates the expense approval process"
|
266
|
-
in expense_workflow["description"]
|
267
|
-
)
|
268
|
-
|
269
|
-
# Verify the file workflow details
|
270
|
-
file_workflow = next(
|
271
|
-
item
|
272
|
-
for item in data["items"]
|
273
|
-
if item["name"] == "test_file_processing_workflow"
|
274
|
-
)
|
275
|
-
assert file_workflow["fully_qualified_name"] == "test_file_processing_workflow"
|
276
|
-
assert "Workflow that processes a text file" in file_workflow["description"]
|
277
|
-
|
278
|
-
# Verify that the workflows have input and output schemas
|
279
|
-
assert "input_schema" in expense_workflow
|
280
|
-
assert "output_schema" in expense_workflow
|
281
|
-
assert "input_schema" in file_workflow
|
282
|
-
assert "output_schema" in file_workflow
|
283
|
-
|
284
|
-
# Verify that the file workflow input schema includes file parameter
|
285
|
-
assert "file" in file_workflow["input_schema"]["properties"]
|
286
|
-
|
287
|
-
# Verify that we propagated the `is_interactive` flag
|
288
|
-
assert file_workflow["is_interactive"] is False
|
289
|
-
|
290
|
-
# Verify run statistics are present
|
291
|
-
assert "total_runs" in expense_workflow
|
292
|
-
assert "run_statuses" in expense_workflow
|
293
|
-
assert "total_runs" in file_workflow
|
294
|
-
assert "run_statuses" in file_workflow
|
295
|
-
|
296
|
-
|
297
|
-
async def test_list_workflow_runs_no_runs(client: PlanarTestClient):
|
298
|
-
"""Test listing runs for a workflow that has not been run."""
|
299
|
-
response = await client.get(
|
300
|
-
"/planar/v1/workflows/test_expense_approval_workflow/runs"
|
301
|
-
)
|
302
|
-
assert response.status_code == 200
|
303
|
-
data = response.json()
|
304
|
-
assert data["total"] == 0
|
305
|
-
assert len(data["items"]) == 0
|
306
|
-
|
307
|
-
|
308
|
-
async def test_list_workflow_runs_multiple_runs(
|
309
|
-
client: PlanarTestClient, tmp_db_engine: AsyncEngine
|
310
|
-
):
|
311
|
-
"""Test listing runs for a workflow with a mix of succeeded and failed runs."""
|
312
|
-
await asyncio.gather(
|
313
|
-
# Run 1: Successful
|
314
|
-
create_test_workflow_run(
|
315
|
-
tmp_db_engine,
|
316
|
-
workflow_name="test_expense_approval_workflow",
|
317
|
-
status=WorkflowStatus.SUCCEEDED,
|
318
|
-
completed_steps=1,
|
319
|
-
total_steps=1,
|
320
|
-
),
|
321
|
-
create_test_workflow_run(
|
322
|
-
tmp_db_engine,
|
323
|
-
workflow_name="test_expense_approval_workflow",
|
324
|
-
status=WorkflowStatus.SUCCEEDED,
|
325
|
-
completed_steps=1,
|
326
|
-
total_steps=1,
|
327
|
-
),
|
328
|
-
# Run 3: Failed
|
329
|
-
create_test_workflow_run(
|
330
|
-
tmp_db_engine,
|
331
|
-
workflow_name="test_expense_approval_workflow",
|
332
|
-
status=WorkflowStatus.FAILED,
|
333
|
-
completed_steps=0,
|
334
|
-
total_steps=1,
|
335
|
-
error={"type": "ValueError", "message": "Forced failure for test"},
|
336
|
-
),
|
337
|
-
)
|
338
|
-
|
339
|
-
# List runs
|
340
|
-
response = await client.get(
|
341
|
-
"/planar/v1/workflows/test_expense_approval_workflow/runs"
|
342
|
-
)
|
343
|
-
assert response.status_code == 200
|
344
|
-
data = response.json()
|
345
|
-
|
346
|
-
assert data["total"] == 3
|
347
|
-
assert len(data["items"]) == 3
|
348
|
-
|
349
|
-
succeeded_runs = [r for r in data["items"] if r["status"] == "succeeded"]
|
350
|
-
failed_runs = [r for r in data["items"] if r["status"] == "failed"]
|
351
|
-
|
352
|
-
assert len(succeeded_runs) == 2
|
353
|
-
assert len(failed_runs) == 1
|
354
|
-
|
355
|
-
# Assert succeeded run details
|
356
|
-
assert succeeded_runs[0]["step_stats"]["completed"] == 1
|
357
|
-
assert succeeded_runs[1]["step_stats"]["completed"] == 1
|
358
|
-
assert succeeded_runs[0]["step_stats"]["failed"] == 0
|
359
|
-
assert succeeded_runs[1]["step_stats"]["failed"] == 0
|
360
|
-
assert succeeded_runs[0]["step_stats"]["running"] == 0
|
361
|
-
assert succeeded_runs[1]["step_stats"]["running"] == 0
|
362
|
-
|
363
|
-
# Assert failed run details
|
364
|
-
assert failed_runs[0]["step_stats"]["completed"] == 0
|
365
|
-
assert failed_runs[0]["step_stats"]["failed"] == 1
|
366
|
-
assert "ValueError" in failed_runs[0]["error"]["type"]
|
367
|
-
|
368
|
-
|
369
|
-
async def test_get_workflow_run_succeeded(
|
370
|
-
client: PlanarTestClient, tmp_db_engine: AsyncEngine
|
371
|
-
):
|
372
|
-
"""Test getting a single succeeded workflow run."""
|
373
|
-
workflow = await create_test_workflow_run(
|
374
|
-
tmp_db_engine,
|
375
|
-
workflow_name="successful_workflow_3_steps",
|
376
|
-
status=WorkflowStatus.SUCCEEDED,
|
377
|
-
completed_steps=3,
|
378
|
-
total_steps=3,
|
379
|
-
)
|
380
|
-
|
381
|
-
run_resp = await client.get(
|
382
|
-
f"/planar/v1/workflows/successful_workflow_3_steps/runs/{workflow.id}"
|
383
|
-
)
|
384
|
-
assert run_resp.status_code == 200
|
385
|
-
run_data = run_resp.json()
|
386
|
-
|
387
|
-
assert run_data["id"] == str(workflow.id)
|
388
|
-
assert run_data["status"] == "succeeded"
|
389
|
-
assert run_data["step_stats"]["completed"] == 3
|
390
|
-
assert run_data["step_stats"]["failed"] == 0
|
391
|
-
assert run_data["step_stats"]["running"] == 0
|
392
|
-
assert run_data["error"] is None
|
393
|
-
|
394
|
-
|
395
|
-
async def test_get_workflow_run_failed(
|
396
|
-
client: PlanarTestClient, tmp_db_engine: AsyncEngine
|
397
|
-
):
|
398
|
-
"""Test getting a single failed workflow run."""
|
399
|
-
workflow = await create_test_workflow_run(
|
400
|
-
tmp_db_engine,
|
401
|
-
workflow_name="failing_workflow_3_steps",
|
402
|
-
status=WorkflowStatus.FAILED,
|
403
|
-
completed_steps=1,
|
404
|
-
total_steps=2, # 1 succeeded, 1 failed
|
405
|
-
error={
|
406
|
-
"type": "ValueError",
|
407
|
-
"message": "This step is designed to fail",
|
408
|
-
},
|
409
|
-
)
|
410
|
-
|
411
|
-
run_resp = await client.get(
|
412
|
-
f"/planar/v1/workflows/failing_workflow_3_steps/runs/{workflow.id}"
|
413
|
-
)
|
414
|
-
assert run_resp.status_code == 200
|
415
|
-
run_data = run_resp.json()
|
416
|
-
|
417
|
-
assert run_data["id"] == str(workflow.id)
|
418
|
-
assert run_data["status"] == "failed"
|
419
|
-
assert run_data["step_stats"]["completed"] == 1
|
420
|
-
assert run_data["step_stats"]["failed"] == 1
|
421
|
-
assert run_data["step_stats"]["running"] == 0
|
422
|
-
assert run_data["error"] is not None
|
423
|
-
assert "ValueError" in run_data["error"]["type"]
|
424
|
-
assert "This step is designed to fail" in run_data["error"]["message"]
|
425
|
-
|
426
|
-
|
427
|
-
async def test_get_workflow_run_pending_with_running_step(
|
428
|
-
client: PlanarTestClient, tmp_db_engine: AsyncEngine
|
429
|
-
):
|
430
|
-
"""Test getting a pending workflow with completed and running steps."""
|
431
|
-
workflow = await create_test_workflow_run(
|
432
|
-
tmp_db_engine,
|
433
|
-
workflow_name="pending_workflow_with_running_steps",
|
434
|
-
status=WorkflowStatus.PENDING,
|
435
|
-
completed_steps=3,
|
436
|
-
total_steps=4, # 3 completed, 1 running
|
437
|
-
)
|
438
|
-
|
439
|
-
run_resp = await client.get(
|
440
|
-
f"/planar/v1/workflows/pending_workflow_with_running_steps/runs/{workflow.id}"
|
441
|
-
)
|
442
|
-
assert run_resp.status_code == 200
|
443
|
-
run_data = run_resp.json()
|
444
|
-
|
445
|
-
assert run_data["id"] == str(workflow.id)
|
446
|
-
assert run_data["status"] == "pending"
|
447
|
-
assert run_data["step_stats"]["completed"] == 3
|
448
|
-
assert run_data["step_stats"]["running"] == 1
|
449
|
-
assert run_data["step_stats"]["failed"] == 0
|
450
|
-
assert run_data["error"] is None
|
451
|
-
|
452
|
-
|
453
|
-
async def test_start_file_workflow(
|
454
|
-
client: PlanarTestClient,
|
455
|
-
planar_file: PlanarFile,
|
456
|
-
observer: WorkflowObserver,
|
457
|
-
session: AsyncSession,
|
458
|
-
):
|
459
|
-
"""Test starting a workflow with a PlanarFile through the API."""
|
460
|
-
# Prepare the request payload with the file reference
|
461
|
-
payload = {
|
462
|
-
"file": {
|
463
|
-
"id": str(planar_file.id),
|
464
|
-
"filename": planar_file.filename,
|
465
|
-
"content_type": planar_file.content_type,
|
466
|
-
"size": planar_file.size,
|
467
|
-
}
|
468
|
-
}
|
469
|
-
|
470
|
-
response = await client.post(
|
471
|
-
"/planar/v1/workflows/test_file_processing_workflow/start",
|
472
|
-
json=payload,
|
473
|
-
)
|
474
|
-
|
475
|
-
# Verify the response status code
|
476
|
-
assert response.status_code == 200
|
477
|
-
|
478
|
-
data = response.json()
|
479
|
-
|
480
|
-
assert "id" in data
|
481
|
-
workflow_id = data["id"]
|
482
|
-
|
483
|
-
await observer.wait("workflow-succeeded", workflow_id=workflow_id)
|
484
|
-
|
485
|
-
workflow = await session.get(Workflow, UUID(workflow_id))
|
486
|
-
await session.commit()
|
487
|
-
assert workflow
|
488
|
-
|
489
|
-
# Verify the workflow completed successfully
|
490
|
-
assert workflow.status == WorkflowStatus.SUCCEEDED
|
491
|
-
|
492
|
-
# Check the workflow result
|
493
|
-
result = workflow.result
|
494
|
-
assert result
|
495
|
-
assert result["filename"] == planar_file.filename
|
496
|
-
assert result["character_count"] == planar_file.size
|
497
|
-
assert "This is a test file" in result["content_preview"]
|
498
|
-
assert result["file_id"] == str(planar_file.id)
|
499
|
-
|
500
|
-
|
501
|
-
async def test_get_compute_step(
|
502
|
-
client: PlanarTestClient, session: AsyncSession, observer: WorkflowObserver
|
503
|
-
):
|
504
|
-
"""Ensure compute steps can be retrieved without metadata."""
|
505
|
-
|
506
|
-
expense = Expense(
|
507
|
-
title="Test Expense",
|
508
|
-
amount=100.0,
|
509
|
-
description="test",
|
510
|
-
status=ExpenseStatus.SUBMITTED,
|
511
|
-
submitter_id=uuid4(),
|
512
|
-
category="misc",
|
513
|
-
)
|
514
|
-
session.add(expense)
|
515
|
-
await session.commit()
|
516
|
-
|
517
|
-
payload = {"expense_id": str(expense.id)}
|
518
|
-
resp = await client.post(
|
519
|
-
"/planar/v1/workflows/test_expense_approval_workflow/start",
|
520
|
-
json=payload,
|
521
|
-
)
|
522
|
-
assert resp.status_code == 200
|
523
|
-
wf_id = resp.json()["id"]
|
524
|
-
|
525
|
-
await observer.wait("workflow-succeeded", workflow_id=wf_id)
|
526
|
-
|
527
|
-
step = (
|
528
|
-
await session.exec(
|
529
|
-
select(WorkflowStep).where(WorkflowStep.workflow_id == UUID(wf_id))
|
530
|
-
)
|
531
|
-
).first()
|
532
|
-
await session.commit()
|
533
|
-
assert step
|
534
|
-
assert step.step_type == StepType.COMPUTE
|
535
|
-
|
536
|
-
resp = await client.get(
|
537
|
-
f"/planar/v1/workflows/test_expense_approval_workflow/runs/{wf_id}/steps/{step.step_id}"
|
538
|
-
)
|
539
|
-
assert resp.status_code == 200
|
540
|
-
data = resp.json()
|
541
|
-
assert "meta" in data
|
542
|
-
assert data["meta"] is None
|
543
|
-
|
544
|
-
|
545
|
-
async def test_list_interactive_workflow(app: PlanarApp, client: PlanarTestClient):
|
546
|
-
"""
|
547
|
-
We propagate interactive workflows all the way to the `/workflows` endpoint.
|
548
|
-
"""
|
549
|
-
|
550
|
-
# This is here rather than at the top bc it's not registered as part of the `app` fixture.
|
551
|
-
@workflow(name="interactive_workflow", is_interactive=True)
|
552
|
-
async def interactive_workflow():
|
553
|
-
pass
|
554
|
-
|
555
|
-
app.register_workflow(interactive_workflow)
|
556
|
-
|
557
|
-
response = await client.get("/planar/v1/workflows/")
|
558
|
-
assert response.status_code == 200
|
559
|
-
|
560
|
-
data = response.json()
|
561
|
-
expense_workflow = next(
|
562
|
-
item for item in data["items"] if item["name"] == "interactive_workflow"
|
563
|
-
)
|
564
|
-
assert expense_workflow["is_interactive"] is True
|