planar 0.9.3__py3-none-any.whl → 0.11.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (76) hide show
  1. planar/ai/agent.py +2 -1
  2. planar/ai/agent_base.py +24 -5
  3. planar/ai/state.py +17 -0
  4. planar/app.py +18 -1
  5. planar/data/connection.py +108 -0
  6. planar/data/dataset.py +11 -104
  7. planar/data/utils.py +89 -0
  8. planar/db/alembic/env.py +25 -1
  9. planar/files/storage/azure_blob.py +1 -1
  10. planar/registry_items.py +2 -0
  11. planar/routers/dataset_router.py +213 -0
  12. planar/routers/info.py +79 -36
  13. planar/routers/models.py +1 -0
  14. planar/routers/workflow.py +2 -0
  15. planar/scaffold_templates/pyproject.toml.j2 +1 -1
  16. planar/security/authorization.py +31 -3
  17. planar/security/default_policies.cedar +25 -0
  18. planar/testing/fixtures.py +34 -1
  19. planar/testing/planar_test_client.py +1 -1
  20. planar/workflows/decorators.py +2 -1
  21. planar/workflows/wrappers.py +1 -0
  22. {planar-0.9.3.dist-info → planar-0.11.0.dist-info}/METADATA +9 -1
  23. {planar-0.9.3.dist-info → planar-0.11.0.dist-info}/RECORD +25 -72
  24. {planar-0.9.3.dist-info → planar-0.11.0.dist-info}/WHEEL +1 -1
  25. planar/ai/test_agent_serialization.py +0 -229
  26. planar/ai/test_agent_tool_step_display.py +0 -78
  27. planar/data/test_dataset.py +0 -354
  28. planar/files/storage/test_azure_blob.py +0 -435
  29. planar/files/storage/test_local_directory.py +0 -162
  30. planar/files/storage/test_s3.py +0 -299
  31. planar/files/test_files.py +0 -282
  32. planar/human/test_human.py +0 -385
  33. planar/logging/test_formatter.py +0 -327
  34. planar/modeling/mixins/test_auditable.py +0 -97
  35. planar/modeling/mixins/test_timestamp.py +0 -134
  36. planar/modeling/mixins/test_uuid_primary_key.py +0 -52
  37. planar/routers/test_agents_router.py +0 -174
  38. planar/routers/test_files_router.py +0 -49
  39. planar/routers/test_object_config_router.py +0 -367
  40. planar/routers/test_routes_security.py +0 -168
  41. planar/routers/test_rule_router.py +0 -470
  42. planar/routers/test_workflow_router.py +0 -539
  43. planar/rules/test_data/account_dormancy_management.json +0 -223
  44. planar/rules/test_data/airline_loyalty_points_calculator.json +0 -262
  45. planar/rules/test_data/applicant_risk_assessment.json +0 -435
  46. planar/rules/test_data/booking_fraud_detection.json +0 -407
  47. planar/rules/test_data/cellular_data_rollover_system.json +0 -258
  48. planar/rules/test_data/clinical_trial_eligibility_screener.json +0 -437
  49. planar/rules/test_data/customer_lifetime_value.json +0 -143
  50. planar/rules/test_data/import_duties_calculator.json +0 -289
  51. planar/rules/test_data/insurance_prior_authorization.json +0 -443
  52. planar/rules/test_data/online_check_in_eligibility_system.json +0 -254
  53. planar/rules/test_data/order_consolidation_system.json +0 -375
  54. planar/rules/test_data/portfolio_risk_monitor.json +0 -471
  55. planar/rules/test_data/supply_chain_risk.json +0 -253
  56. planar/rules/test_data/warehouse_cross_docking.json +0 -237
  57. planar/rules/test_rules.py +0 -1494
  58. planar/security/tests/test_auth_middleware.py +0 -162
  59. planar/security/tests/test_authorization_context.py +0 -78
  60. planar/security/tests/test_cedar_basics.py +0 -41
  61. planar/security/tests/test_cedar_policies.py +0 -158
  62. planar/security/tests/test_jwt_principal_context.py +0 -179
  63. planar/test_app.py +0 -142
  64. planar/test_cli.py +0 -394
  65. planar/test_config.py +0 -515
  66. planar/test_object_config.py +0 -527
  67. planar/test_object_registry.py +0 -14
  68. planar/test_sqlalchemy.py +0 -193
  69. planar/test_utils.py +0 -105
  70. planar/testing/test_memory_storage.py +0 -143
  71. planar/workflows/test_concurrency_detection.py +0 -120
  72. planar/workflows/test_lock_timeout.py +0 -140
  73. planar/workflows/test_serialization.py +0 -1203
  74. planar/workflows/test_suspend_deserialization.py +0 -231
  75. planar/workflows/test_workflow.py +0 -2005
  76. {planar-0.9.3.dist-info → planar-0.11.0.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,213 @@
1
+ import io
2
+ from typing import AsyncGenerator
3
+
4
+ import pyarrow as pa
5
+ import pyarrow.parquet as pq
6
+ from fastapi import APIRouter, HTTPException, Query
7
+ from fastapi.responses import StreamingResponse
8
+ from ibis.common.exceptions import TableNotFound
9
+ from pydantic import BaseModel
10
+
11
+ from planar.data.exceptions import DatasetNotFoundError
12
+ from planar.data.utils import (
13
+ get_dataset,
14
+ get_dataset_metadata,
15
+ list_datasets,
16
+ list_schemas,
17
+ )
18
+ from planar.logging import get_logger
19
+ from planar.security.authorization import (
20
+ DatasetAction,
21
+ DatasetResource,
22
+ validate_authorization_for,
23
+ )
24
+
25
+ logger = get_logger(__name__)
26
+
27
+
28
+ class DatasetMetadata(BaseModel):
29
+ name: str
30
+ table_schema: dict
31
+ row_count: int
32
+
33
+
34
+ def create_dataset_router() -> APIRouter:
35
+ router = APIRouter(tags=["Planar Datasets"])
36
+
37
+ @router.get("/schemas", response_model=list[str])
38
+ async def get_schemas():
39
+ validate_authorization_for(
40
+ DatasetResource(), DatasetAction.DATASET_LIST_SCHEMAS
41
+ )
42
+ schemas = await list_schemas()
43
+ return schemas
44
+
45
+ @router.get("/metadata", response_model=list[DatasetMetadata])
46
+ async def list_planar_datasets(
47
+ limit: int = Query(100, ge=1, le=1000),
48
+ offset: int = Query(0, ge=0),
49
+ schema_name: str = Query("main"),
50
+ ):
51
+ validate_authorization_for(DatasetResource(), DatasetAction.DATASET_LIST)
52
+ datasets = await list_datasets(limit, offset)
53
+
54
+ response = []
55
+ for dataset in datasets:
56
+ metadata = await get_dataset_metadata(dataset.name, schema_name)
57
+
58
+ if not metadata:
59
+ continue
60
+
61
+ schema = metadata["schema"]
62
+ row_count = metadata["row_count"]
63
+
64
+ response.append(
65
+ DatasetMetadata(
66
+ name=dataset.name,
67
+ row_count=row_count,
68
+ table_schema={
69
+ field_name: str(field_type)
70
+ for field_name, field_type in schema.items()
71
+ },
72
+ )
73
+ )
74
+
75
+ return response
76
+
77
+ @router.get("/metadata/{dataset_name}", response_model=DatasetMetadata)
78
+ async def get_planar_dataset(dataset_name: str, schema_name: str = "main"):
79
+ validate_authorization_for(
80
+ DatasetResource(dataset_name=dataset_name),
81
+ DatasetAction.DATASET_VIEW_DETAILS,
82
+ )
83
+ try:
84
+ metadata = await get_dataset_metadata(dataset_name, schema_name)
85
+
86
+ if not metadata:
87
+ raise HTTPException(
88
+ status_code=404, detail=f"Dataset {dataset_name} not found"
89
+ )
90
+
91
+ schema = metadata["schema"]
92
+ row_count = metadata["row_count"]
93
+
94
+ return DatasetMetadata(
95
+ name=dataset_name,
96
+ row_count=row_count,
97
+ table_schema={
98
+ field_name: str(field_type)
99
+ for field_name, field_type in schema.items()
100
+ },
101
+ )
102
+ except (DatasetNotFoundError, TableNotFound):
103
+ raise HTTPException(
104
+ status_code=404, detail=f"Dataset {dataset_name} not found"
105
+ )
106
+
107
+ @router.get(
108
+ "/content/{dataset_name}/arrow-stream", response_class=StreamingResponse
109
+ )
110
+ async def stream_dataset_content(
111
+ dataset_name: str,
112
+ batch_size: int = Query(100, ge=1, le=1000),
113
+ limit: int | None = Query(None, ge=1),
114
+ ):
115
+ validate_authorization_for(
116
+ DatasetResource(dataset_name=dataset_name),
117
+ DatasetAction.DATASET_STREAM_CONTENT,
118
+ )
119
+ try:
120
+ dataset = await get_dataset(dataset_name)
121
+
122
+ # Apply limit parameter if specified
123
+ table = await dataset.read(limit=limit)
124
+
125
+ schema = table.schema().to_pyarrow()
126
+
127
+ async def stream_content() -> AsyncGenerator[bytes, None]:
128
+ sink = io.BytesIO()
129
+
130
+ try:
131
+ with pa.ipc.new_stream(sink, schema) as writer:
132
+ yield sink.getvalue() # yield the schema
133
+
134
+ batch_count = 0
135
+ for batch in table.to_pyarrow_batches(chunk_size=batch_size):
136
+ # reset the sink to only stream
137
+ # the current batch
138
+ # we don't want to stream the schema or previous
139
+ # batches again
140
+ sink.seek(0)
141
+ sink.truncate(0)
142
+
143
+ writer.write_batch(batch)
144
+ yield sink.getvalue()
145
+ batch_count += 1
146
+
147
+ # For empty datasets, ensure we have a complete stream
148
+ if batch_count == 0:
149
+ # Write an empty batch to ensure valid Arrow stream format
150
+ empty_batch = pa.RecordBatch.from_arrays(
151
+ [pa.array([], type=field.type) for field in schema],
152
+ schema=schema,
153
+ )
154
+ sink.seek(0)
155
+ sink.truncate(0)
156
+ writer.write_batch(empty_batch)
157
+ yield sink.getvalue()
158
+ finally:
159
+ # Explicit BytesIO cleanup for memory safety
160
+ sink.close()
161
+
162
+ return StreamingResponse(
163
+ stream_content(),
164
+ media_type="application/vnd.apache.arrow.stream",
165
+ headers={
166
+ "Content-Disposition": f"attachment; filename={dataset_name}.arrow",
167
+ "X-Batch-Size": str(batch_size),
168
+ "X-Row-Limit": str(limit) if limit else "unlimited",
169
+ },
170
+ )
171
+ except (DatasetNotFoundError, TableNotFound):
172
+ raise HTTPException(
173
+ status_code=404, detail=f"Dataset {dataset_name} not found"
174
+ )
175
+
176
+ @router.get("/content/{dataset_name}/download")
177
+ async def download_dataset(dataset_name: str, schema_name: str = "main"):
178
+ validate_authorization_for(
179
+ DatasetResource(dataset_name=dataset_name),
180
+ DatasetAction.DATASET_DOWNLOAD,
181
+ )
182
+ try:
183
+ arrow_buffer = pa.BufferOutputStream()
184
+ dataset = await get_dataset(dataset_name, schema_name)
185
+
186
+ pyarrow_table = await dataset.to_pyarrow()
187
+
188
+ pq.write_table(pyarrow_table, arrow_buffer)
189
+
190
+ if arrow_buffer.tell() == 0:
191
+ logger.warning(
192
+ "Dataset is empty",
193
+ dataset_name=dataset_name,
194
+ schema_name=schema_name,
195
+ )
196
+
197
+ buffer = arrow_buffer.getvalue()
198
+ parquet_bytes = buffer.to_pybytes()
199
+ bytes_io = io.BytesIO(parquet_bytes)
200
+
201
+ return StreamingResponse(
202
+ bytes_io,
203
+ media_type="application/x-parquet",
204
+ headers={
205
+ "Content-Disposition": f"attachment; filename={dataset_name}.parquet"
206
+ },
207
+ )
208
+ except (DatasetNotFoundError, TableNotFound):
209
+ raise HTTPException(
210
+ status_code=404, detail=f"Dataset {dataset_name} not found"
211
+ )
212
+
213
+ return router
planar/routers/info.py CHANGED
@@ -1,16 +1,34 @@
1
+ import importlib.metadata
2
+ from typing import Literal, TypedDict
3
+
1
4
  from fastapi import APIRouter, Depends
2
5
  from pydantic import BaseModel
3
- from sqlalchemy.ext.asyncio import AsyncSession
4
- from sqlmodel import col, distinct, func, select
6
+ from sqlmodel import col, func, select
7
+ from sqlmodel.ext.asyncio.session import AsyncSession
5
8
 
9
+ from planar.config import PlanarConfig, get_environment
10
+ from planar.data.config import DataConfig
11
+ from planar.files.storage.config import StorageConfig
6
12
  from planar.human.models import HumanTask, HumanTaskStatus
7
13
  from planar.logging import get_logger
8
- from planar.object_config import ConfigurableObjectType, ObjectConfiguration
14
+ from planar.object_registry import ObjectRegistry
9
15
  from planar.session import get_session
10
16
  from planar.workflows.models import Workflow, WorkflowStatus
11
17
 
12
18
  logger = get_logger(__name__)
13
19
 
20
+ StorageInfo = Literal["s3", "localdir", "azure_blob"]
21
+
22
+
23
+ class DatasetsInfo(BaseModel):
24
+ catalog: Literal["duckdb", "postgres", "sqlite"]
25
+ storage: StorageInfo
26
+
27
+
28
+ class SystemFeatures(BaseModel):
29
+ storage: StorageInfo | None = None
30
+ datasets: DatasetsInfo | None = None
31
+
14
32
 
15
33
  class SystemInfo(BaseModel):
16
34
  """Combined application information and system statistics"""
@@ -19,6 +37,11 @@ class SystemInfo(BaseModel):
19
37
  title: str
20
38
  description: str
21
39
 
40
+ version: str
41
+ environment: str
42
+
43
+ features: SystemFeatures
44
+
22
45
  # System stats
23
46
  total_workflow_runs: int = 0
24
47
  completed_runs: int = 0
@@ -27,7 +50,18 @@ class SystemInfo(BaseModel):
27
50
  active_agents: int = 0
28
51
 
29
52
 
30
- async def get_system_stats(session: AsyncSession = Depends(get_session)) -> dict:
53
+ class SystemStats(TypedDict):
54
+ total_workflow_runs: int
55
+ completed_runs: int
56
+ in_progress_runs: int
57
+ pending_human_tasks: int
58
+ active_agents: int
59
+
60
+
61
+ async def get_system_stats(
62
+ registry: ObjectRegistry,
63
+ session: AsyncSession = Depends(get_session),
64
+ ) -> SystemStats:
31
65
  """
32
66
  Get system-wide statistics directly from the database.
33
67
 
@@ -35,8 +69,10 @@ async def get_system_stats(session: AsyncSession = Depends(get_session)) -> dict
35
69
  rather than fetching all records and calculating in the application.
36
70
  """
37
71
  try:
72
+ agent_count = len(registry.get_agents())
73
+
38
74
  # Get workflow run counts
39
- workflow_stats = await session.execute(
75
+ workflow_stats = await session.exec(
40
76
  select(
41
77
  func.count().label("total_runs"),
42
78
  func.count(col(Workflow.id))
@@ -47,42 +83,21 @@ async def get_system_stats(session: AsyncSession = Depends(get_session)) -> dict
47
83
  .label("in_progress_runs"),
48
84
  ).select_from(Workflow)
49
85
  )
50
- workflow_row = workflow_stats.one()
86
+ total_runs, completed_runs, in_progress_runs = workflow_stats.one()
51
87
 
52
88
  # Get pending human task count
53
- human_task_query = await session.execute(
89
+ human_task_query = await session.exec(
54
90
  select(func.count())
55
91
  .select_from(HumanTask)
56
92
  .where(HumanTask.status == HumanTaskStatus.PENDING)
57
93
  )
58
- pending_tasks = human_task_query.scalar() or 0
59
-
60
- # Get agent count from the registry or count distinct agent configs
61
- agent_count = 0
62
- try:
63
- # Count distinct agent names in the AgentConfig table
64
- agent_query = await session.execute(
65
- select(
66
- func.count(distinct(ObjectConfiguration.object_name))
67
- ).select_from(
68
- select(ObjectConfiguration)
69
- .where(
70
- ObjectConfiguration.object_type == ConfigurableObjectType.AGENT
71
- )
72
- .subquery()
73
- )
74
- )
75
- agent_count = agent_query.scalar() or 0
76
- except Exception:
77
- logger.exception("error counting agents")
78
- # Fallback to 0
79
- agent_count = 0
94
+ pending_tasks = human_task_query.one()
80
95
 
81
96
  # Return stats dict
82
97
  return {
83
- "total_workflow_runs": workflow_row.total_runs or 0,
84
- "completed_runs": workflow_row.completed_runs or 0,
85
- "in_progress_runs": workflow_row.in_progress_runs or 0,
98
+ "total_workflow_runs": total_runs,
99
+ "completed_runs": completed_runs,
100
+ "in_progress_runs": in_progress_runs,
86
101
  "pending_human_tasks": pending_tasks,
87
102
  "active_agents": agent_count,
88
103
  }
@@ -98,13 +113,31 @@ async def get_system_stats(session: AsyncSession = Depends(get_session)) -> dict
98
113
  }
99
114
 
100
115
 
101
- def create_info_router(title: str, description: str) -> APIRouter:
116
+ def get_app_version() -> str:
117
+ try:
118
+ return importlib.metadata.version("planar")
119
+ except importlib.metadata.PackageNotFoundError:
120
+ logger.warning("Planar package not found, returning development version")
121
+ return "development"
122
+
123
+
124
+ def get_storage_info(cfg: StorageConfig) -> StorageInfo:
125
+ return cfg.backend
126
+
127
+
128
+ def get_datasets_info(cfg: DataConfig) -> DatasetsInfo | None:
129
+ return DatasetsInfo(catalog=cfg.catalog.type, storage=get_storage_info(cfg.storage))
130
+
131
+
132
+ def create_info_router(
133
+ title: str, description: str, config: PlanarConfig, registry: ObjectRegistry
134
+ ) -> APIRouter:
102
135
  """
103
136
  Create a router for serving combined application information and system statistics.
104
137
 
105
138
  This router provides a single endpoint to retrieve the application's title,
106
139
  description, and system-wide statistics on workflow runs, human tasks,
107
- and registered agents.
140
+ and registered agents, as well as the application's features and configuration.
108
141
 
109
142
  Args:
110
143
  title: The application title
@@ -125,7 +158,17 @@ def create_info_router(title: str, description: str) -> APIRouter:
125
158
  Returns:
126
159
  SystemInfo object containing app details and system stats
127
160
  """
128
- stats = await get_system_stats(session)
129
- return SystemInfo(title=title, description=description, **stats)
161
+ stats = await get_system_stats(registry, session)
162
+ return SystemInfo(
163
+ title=title,
164
+ description=description,
165
+ version=get_app_version(),
166
+ environment=get_environment(),
167
+ features=SystemFeatures(
168
+ storage=get_storage_info(config.storage) if config.storage else None,
169
+ datasets=get_datasets_info(config.data) if config.data else None,
170
+ ),
171
+ **stats,
172
+ )
130
173
 
131
174
  return router
planar/routers/models.py CHANGED
@@ -79,6 +79,7 @@ class WorkflowDefinition(BaseModel):
79
79
  total_runs: int
80
80
  run_statuses: WorkflowRunStatusCounts
81
81
  durations: DurationStats | None = None
82
+ is_interactive: bool
82
83
 
83
84
 
84
85
  class StepStats(BaseModel):
@@ -217,6 +217,7 @@ def create_workflow_router(
217
217
  }
218
218
  ),
219
219
  durations=duration_stats,
220
+ is_interactive=workflow.is_interactive,
220
221
  )
221
222
  )
222
223
 
@@ -257,6 +258,7 @@ def create_workflow_router(
257
258
  **{status.value: count for status, count in run_statuses.items()}
258
259
  ),
259
260
  durations=duration_stats,
261
+ is_interactive=wf.is_interactive,
260
262
  )
261
263
 
262
264
  @router.get("/{workflow_name}/runs", response_model=WorkflowRunList)
@@ -3,7 +3,7 @@ name = "{{ name }}"
3
3
  version = "0.1.0"
4
4
  requires-python = ">=3.12"
5
5
  dependencies = [
6
- "planar>=0.9.0",
6
+ "planar[data]>=0.10.0",
7
7
  ]
8
8
 
9
9
  [[tool.uv.index]]
@@ -87,12 +87,23 @@ class RuleAction(str, Enum):
87
87
  RULE_SIMULATE = "Rule::Simulate"
88
88
 
89
89
 
90
+ class DatasetAction(str, Enum):
91
+ """Actions that can be performed on datasets."""
92
+
93
+ DATASET_LIST_SCHEMAS = "Dataset::ListSchemas"
94
+ DATASET_LIST = "Dataset::List"
95
+ DATASET_VIEW_DETAILS = "Dataset::ViewDetails"
96
+ DATASET_STREAM_CONTENT = "Dataset::StreamContent"
97
+ DATASET_DOWNLOAD = "Dataset::Download"
98
+
99
+
90
100
  class ResourceType(str, Enum):
91
101
  PRINCIPAL = "Principal"
92
102
  WORKFLOW = "Workflow"
93
103
  ENTITY = "Entity"
94
104
  AGENT = "Agent"
95
105
  Rule = "Rule"
106
+ DATASET = "Dataset"
96
107
 
97
108
 
98
109
  class EntityIdentifier(TypedDict):
@@ -129,7 +140,12 @@ class RuleResource:
129
140
  rule_name: str | None = None
130
141
 
131
142
 
132
- ResourceDescriptor = AgentResource | WorkflowResource | RuleResource
143
+ @dataclass(frozen=True, slots=True)
144
+ class DatasetResource:
145
+ dataset_name: str | None = None
146
+
147
+
148
+ ResourceDescriptor = AgentResource | WorkflowResource | RuleResource | DatasetResource
133
149
 
134
150
 
135
151
  class CedarEntity(BaseModel):
@@ -209,6 +225,15 @@ class CedarEntity(BaseModel):
209
225
  resource_attributes={"rule_name": rule_name},
210
226
  )
211
227
 
228
+ @staticmethod
229
+ def from_dataset(dataset_name: str | None) -> "CedarEntity":
230
+ """Create a CedarEntity instance from dataset data"""
231
+ return CedarEntity(
232
+ resource_type=ResourceType.DATASET,
233
+ resource_key="dataset_name",
234
+ resource_attributes={"dataset_name": dataset_name},
235
+ )
236
+
212
237
 
213
238
  class PolicyService:
214
239
  """Service for managing and evaluating Authorization policies."""
@@ -272,7 +297,7 @@ class PolicyService:
272
297
  def is_allowed(
273
298
  self,
274
299
  principal: CedarEntity,
275
- action: str | WorkflowAction | AgentAction | RuleAction,
300
+ action: str | WorkflowAction | AgentAction | RuleAction | DatasetAction,
276
301
  resource: CedarEntity,
277
302
  ) -> bool:
278
303
  """Check if the principal is permitted to perform the action on the resource.
@@ -294,6 +319,7 @@ class PolicyService:
294
319
  isinstance(action, WorkflowAction)
295
320
  or isinstance(action, AgentAction)
296
321
  or isinstance(action, RuleAction)
322
+ or isinstance(action, DatasetAction)
297
323
  ):
298
324
  action = f'Action::"{action.value}"'
299
325
  else:
@@ -346,7 +372,7 @@ class PolicyService:
346
372
 
347
373
  def validate_authorization_for(
348
374
  resource_descriptor: ResourceDescriptor,
349
- action: WorkflowAction | AgentAction | RuleAction,
375
+ action: WorkflowAction | AgentAction | RuleAction | DatasetAction,
350
376
  ):
351
377
  authz_service = get_policy_service()
352
378
 
@@ -363,6 +389,8 @@ def validate_authorization_for(
363
389
  entity = CedarEntity.from_agent(resource_descriptor.id)
364
390
  case RuleAction() if isinstance(resource_descriptor, RuleResource):
365
391
  entity = CedarEntity.from_rule(resource_descriptor.rule_name)
392
+ case DatasetAction() if isinstance(resource_descriptor, DatasetResource):
393
+ entity = CedarEntity.from_dataset(resource_descriptor.dataset_name)
366
394
  case _:
367
395
  raise ValueError(
368
396
  f"Invalid resource descriptor {type(resource_descriptor).__name__} for action {action}"
@@ -74,4 +74,29 @@ permit (
74
74
  principal,
75
75
  action == Action::"Rule::Simulate",
76
76
  resource
77
+ );
78
+ permit (
79
+ principal,
80
+ action == Action::"Dataset::ListSchemas",
81
+ resource
82
+ );
83
+ permit (
84
+ principal,
85
+ action == Action::"Dataset::List",
86
+ resource
87
+ );
88
+ permit (
89
+ principal,
90
+ action == Action::"Dataset::ViewDetails",
91
+ resource
92
+ );
93
+ permit (
94
+ principal,
95
+ action == Action::"Dataset::StreamContent",
96
+ resource
97
+ );
98
+ permit (
99
+ principal,
100
+ action == Action::"Dataset::Download",
101
+ resource
77
102
  );
@@ -16,6 +16,8 @@ Usage in external projects:
16
16
 
17
17
  Available fixtures:
18
18
  - storage: In-memory file storage for tests
19
+ - data_config: Test data configuration with SQLite catalog and local storage
20
+ - app_with_data: PlanarApp instance with data configuration
19
21
  - tmp_db_url: Parametrized database URL (SQLite/PostgreSQL)
20
22
  - session: Database session
21
23
  - client: Planar test client
@@ -33,8 +35,11 @@ from pathlib import Path
33
35
 
34
36
  import pytest
35
37
 
36
- from planar.config import load_config
38
+ from planar.app import PlanarApp
39
+ from planar.config import load_config, load_environment_aware_config
40
+ from planar.data.config import DataConfig, SQLiteCatalogConfig
37
41
  from planar.db import DatabaseManager, new_session
42
+ from planar.files.storage.config import LocalDirectoryConfig
38
43
  from planar.files.storage.context import set_storage
39
44
  from planar.logging import set_context_metadata
40
45
  from planar.object_registry import ObjectRegistry
@@ -114,6 +119,34 @@ async def storage():
114
119
  yield storage
115
120
 
116
121
 
122
+ @pytest.fixture()
123
+ def data_config(tmp_path):
124
+ """Create a test data configuration."""
125
+ data_dir = tmp_path / "data"
126
+ data_dir.mkdir(exist_ok=True)
127
+
128
+ catalog_path = data_dir / "test.sqlite"
129
+ storage_path = data_dir / "ducklake_files"
130
+ storage_path.mkdir(exist_ok=True)
131
+
132
+ return DataConfig(
133
+ catalog=SQLiteCatalogConfig(type="sqlite", path=str(catalog_path)),
134
+ storage=LocalDirectoryConfig(backend="localdir", directory=str(storage_path)),
135
+ )
136
+
137
+
138
+ @pytest.fixture(name="app_with_data")
139
+ def app_with_data_fixture(data_config):
140
+ """Create a PlanarApp with data configuration."""
141
+ config = load_environment_aware_config()
142
+
143
+ config.data = data_config
144
+
145
+ app = PlanarApp(config=config)
146
+
147
+ return app
148
+
149
+
117
150
  @pytest.fixture()
118
151
  def tmp_sqlite_url(tmp_db_path: str):
119
152
  return f"sqlite+aiosqlite:///{tmp_db_path}"
@@ -53,5 +53,5 @@ async def wait_all_event_loop_tasks():
53
53
  break
54
54
  try:
55
55
  await asyncio.gather(*other_tasks)
56
- except asyncio.CancelledError:
56
+ except (asyncio.CancelledError, Exception):
57
57
  pass
@@ -78,7 +78,7 @@ def step(
78
78
  return decorator
79
79
 
80
80
 
81
- def workflow(*, name: str | None = None):
81
+ def workflow(*, name: str | None = None, is_interactive: bool = False):
82
82
  """
83
83
  Decorator to define a workflow.
84
84
 
@@ -177,6 +177,7 @@ def workflow(*, name: str | None = None):
177
177
  start_step=start_workflow_step,
178
178
  wait_for_completion=wait_for_completion,
179
179
  wrapped_fn=run_workflow,
180
+ is_interactive=is_interactive,
180
181
  )
181
182
 
182
183
  return wf_wrapper
@@ -33,6 +33,7 @@ class WorkflowWrapper(Wrapper[P, T, U, R]):
33
33
  start: Callable[P, Coroutine[T, U, Workflow]]
34
34
  start_step: Callable[P, Coroutine[T, U, UUID]]
35
35
  wait_for_completion: Callable[[UUID], Coroutine[T, U, R]]
36
+ is_interactive: bool
36
37
 
37
38
 
38
39
  @dataclass(kw_only=True)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: planar
3
- Version: 0.9.3
3
+ Version: 0.11.0
4
4
  Summary: Add your description here
5
5
  License-Expression: LicenseRef-Proprietary
6
6
  Requires-Dist: aiofiles>=24.1.0
@@ -296,6 +296,14 @@ To disable SQLite testing:
296
296
  PLANAR_TEST_SQLITE=0 uv run pytest
297
297
  ```
298
298
 
299
+ ### Test coverage
300
+
301
+ We use [pytest-cov](https://pypi.org/project/pytest-cov/) to measure test coverage. To generate a simple coverage report use the following command:
302
+
303
+ ```bash
304
+ uv run pytest --cov=planar
305
+ ```
306
+
299
307
  ### Pre-commit hooks
300
308
 
301
309
  We use [pre-commit](https://pre-commit.com/) to manage pre-commit hooks. To install the pre-commit hooks, run the following command: