planar 0.5.0__py3-none-any.whl → 0.8.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- planar/_version.py +1 -1
- planar/ai/agent.py +155 -283
- planar/ai/agent_base.py +170 -0
- planar/ai/agent_utils.py +7 -0
- planar/ai/pydantic_ai.py +638 -0
- planar/ai/test_agent_serialization.py +1 -1
- planar/app.py +64 -20
- planar/cli.py +39 -27
- planar/config.py +45 -36
- planar/db/db.py +2 -1
- planar/files/storage/azure_blob.py +343 -0
- planar/files/storage/base.py +7 -0
- planar/files/storage/config.py +70 -7
- planar/files/storage/s3.py +6 -6
- planar/files/storage/test_azure_blob.py +435 -0
- planar/logging/formatter.py +17 -4
- planar/logging/test_formatter.py +327 -0
- planar/registry_items.py +2 -1
- planar/routers/agents_router.py +3 -1
- planar/routers/files.py +11 -2
- planar/routers/models.py +14 -1
- planar/routers/test_agents_router.py +1 -1
- planar/routers/test_files_router.py +49 -0
- planar/routers/test_routes_security.py +5 -7
- planar/routers/test_workflow_router.py +270 -3
- planar/routers/workflow.py +95 -36
- planar/rules/models.py +36 -39
- planar/rules/test_data/account_dormancy_management.json +223 -0
- planar/rules/test_data/airline_loyalty_points_calculator.json +262 -0
- planar/rules/test_data/applicant_risk_assessment.json +435 -0
- planar/rules/test_data/booking_fraud_detection.json +407 -0
- planar/rules/test_data/cellular_data_rollover_system.json +258 -0
- planar/rules/test_data/clinical_trial_eligibility_screener.json +437 -0
- planar/rules/test_data/customer_lifetime_value.json +143 -0
- planar/rules/test_data/import_duties_calculator.json +289 -0
- planar/rules/test_data/insurance_prior_authorization.json +443 -0
- planar/rules/test_data/online_check_in_eligibility_system.json +254 -0
- planar/rules/test_data/order_consolidation_system.json +375 -0
- planar/rules/test_data/portfolio_risk_monitor.json +471 -0
- planar/rules/test_data/supply_chain_risk.json +253 -0
- planar/rules/test_data/warehouse_cross_docking.json +237 -0
- planar/rules/test_rules.py +750 -6
- planar/scaffold_templates/planar.dev.yaml.j2 +6 -6
- planar/scaffold_templates/planar.prod.yaml.j2 +9 -5
- planar/scaffold_templates/pyproject.toml.j2 +1 -1
- planar/security/auth_context.py +21 -0
- planar/security/{jwt_middleware.py → auth_middleware.py} +70 -17
- planar/security/authorization.py +9 -15
- planar/security/tests/test_auth_middleware.py +162 -0
- planar/sse/proxy.py +4 -9
- planar/test_app.py +92 -1
- planar/test_cli.py +81 -59
- planar/test_config.py +17 -14
- planar/testing/fixtures.py +325 -0
- planar/testing/planar_test_client.py +5 -2
- planar/utils.py +41 -1
- planar/workflows/execution.py +1 -1
- planar/workflows/orchestrator.py +5 -0
- planar/workflows/serialization.py +12 -6
- planar/workflows/step_core.py +3 -1
- planar/workflows/test_serialization.py +9 -1
- {planar-0.5.0.dist-info → planar-0.8.0.dist-info}/METADATA +30 -5
- planar-0.8.0.dist-info/RECORD +166 -0
- planar/.__init__.py.un~ +0 -0
- planar/._version.py.un~ +0 -0
- planar/.app.py.un~ +0 -0
- planar/.cli.py.un~ +0 -0
- planar/.config.py.un~ +0 -0
- planar/.context.py.un~ +0 -0
- planar/.db.py.un~ +0 -0
- planar/.di.py.un~ +0 -0
- planar/.engine.py.un~ +0 -0
- planar/.files.py.un~ +0 -0
- planar/.log_context.py.un~ +0 -0
- planar/.log_metadata.py.un~ +0 -0
- planar/.logging.py.un~ +0 -0
- planar/.object_registry.py.un~ +0 -0
- planar/.otel.py.un~ +0 -0
- planar/.server.py.un~ +0 -0
- planar/.session.py.un~ +0 -0
- planar/.sqlalchemy.py.un~ +0 -0
- planar/.task_local.py.un~ +0 -0
- planar/.test_app.py.un~ +0 -0
- planar/.test_config.py.un~ +0 -0
- planar/.test_object_config.py.un~ +0 -0
- planar/.test_sqlalchemy.py.un~ +0 -0
- planar/.test_utils.py.un~ +0 -0
- planar/.util.py.un~ +0 -0
- planar/.utils.py.un~ +0 -0
- planar/ai/.__init__.py.un~ +0 -0
- planar/ai/._models.py.un~ +0 -0
- planar/ai/.agent.py.un~ +0 -0
- planar/ai/.agent_utils.py.un~ +0 -0
- planar/ai/.events.py.un~ +0 -0
- planar/ai/.files.py.un~ +0 -0
- planar/ai/.models.py.un~ +0 -0
- planar/ai/.providers.py.un~ +0 -0
- planar/ai/.pydantic_ai.py.un~ +0 -0
- planar/ai/.pydantic_ai_agent.py.un~ +0 -0
- planar/ai/.pydantic_ai_provider.py.un~ +0 -0
- planar/ai/.step.py.un~ +0 -0
- planar/ai/.test_agent.py.un~ +0 -0
- planar/ai/.test_agent_serialization.py.un~ +0 -0
- planar/ai/.test_providers.py.un~ +0 -0
- planar/ai/.utils.py.un~ +0 -0
- planar/ai/providers.py +0 -1088
- planar/ai/test_agent.py +0 -1298
- planar/ai/test_providers.py +0 -463
- planar/db/.db.py.un~ +0 -0
- planar/files/.config.py.un~ +0 -0
- planar/files/.local.py.un~ +0 -0
- planar/files/.local_filesystem.py.un~ +0 -0
- planar/files/.model.py.un~ +0 -0
- planar/files/.models.py.un~ +0 -0
- planar/files/.s3.py.un~ +0 -0
- planar/files/.storage.py.un~ +0 -0
- planar/files/.test_files.py.un~ +0 -0
- planar/files/storage/.__init__.py.un~ +0 -0
- planar/files/storage/.base.py.un~ +0 -0
- planar/files/storage/.config.py.un~ +0 -0
- planar/files/storage/.context.py.un~ +0 -0
- planar/files/storage/.local_directory.py.un~ +0 -0
- planar/files/storage/.test_local_directory.py.un~ +0 -0
- planar/files/storage/.test_s3.py.un~ +0 -0
- planar/human/.human.py.un~ +0 -0
- planar/human/.test_human.py.un~ +0 -0
- planar/logging/.__init__.py.un~ +0 -0
- planar/logging/.attributes.py.un~ +0 -0
- planar/logging/.formatter.py.un~ +0 -0
- planar/logging/.logger.py.un~ +0 -0
- planar/logging/.otel.py.un~ +0 -0
- planar/logging/.tracer.py.un~ +0 -0
- planar/modeling/.mixin.py.un~ +0 -0
- planar/modeling/.storage.py.un~ +0 -0
- planar/modeling/orm/.planar_base_model.py.un~ +0 -0
- planar/object_config/.object_config.py.un~ +0 -0
- planar/routers/.__init__.py.un~ +0 -0
- planar/routers/.agents_router.py.un~ +0 -0
- planar/routers/.crud.py.un~ +0 -0
- planar/routers/.decision.py.un~ +0 -0
- planar/routers/.event.py.un~ +0 -0
- planar/routers/.file_attachment.py.un~ +0 -0
- planar/routers/.files.py.un~ +0 -0
- planar/routers/.files_router.py.un~ +0 -0
- planar/routers/.human.py.un~ +0 -0
- planar/routers/.info.py.un~ +0 -0
- planar/routers/.models.py.un~ +0 -0
- planar/routers/.object_config_router.py.un~ +0 -0
- planar/routers/.rule.py.un~ +0 -0
- planar/routers/.test_object_config_router.py.un~ +0 -0
- planar/routers/.test_workflow_router.py.un~ +0 -0
- planar/routers/.workflow.py.un~ +0 -0
- planar/rules/.decorator.py.un~ +0 -0
- planar/rules/.runner.py.un~ +0 -0
- planar/rules/.test_rules.py.un~ +0 -0
- planar/security/.jwt_middleware.py.un~ +0 -0
- planar/sse/.constants.py.un~ +0 -0
- planar/sse/.example.html.un~ +0 -0
- planar/sse/.hub.py.un~ +0 -0
- planar/sse/.model.py.un~ +0 -0
- planar/sse/.proxy.py.un~ +0 -0
- planar/testing/.client.py.un~ +0 -0
- planar/testing/.memory_storage.py.un~ +0 -0
- planar/testing/.planar_test_client.py.un~ +0 -0
- planar/testing/.predictable_tracer.py.un~ +0 -0
- planar/testing/.synchronizable_tracer.py.un~ +0 -0
- planar/testing/.test_memory_storage.py.un~ +0 -0
- planar/testing/.workflow_observer.py.un~ +0 -0
- planar/workflows/.__init__.py.un~ +0 -0
- planar/workflows/.builtin_steps.py.un~ +0 -0
- planar/workflows/.concurrency_tracing.py.un~ +0 -0
- planar/workflows/.context.py.un~ +0 -0
- planar/workflows/.contrib.py.un~ +0 -0
- planar/workflows/.decorators.py.un~ +0 -0
- planar/workflows/.durable_test.py.un~ +0 -0
- planar/workflows/.errors.py.un~ +0 -0
- planar/workflows/.events.py.un~ +0 -0
- planar/workflows/.exceptions.py.un~ +0 -0
- planar/workflows/.execution.py.un~ +0 -0
- planar/workflows/.human.py.un~ +0 -0
- planar/workflows/.lock.py.un~ +0 -0
- planar/workflows/.misc.py.un~ +0 -0
- planar/workflows/.model.py.un~ +0 -0
- planar/workflows/.models.py.un~ +0 -0
- planar/workflows/.notifications.py.un~ +0 -0
- planar/workflows/.orchestrator.py.un~ +0 -0
- planar/workflows/.runtime.py.un~ +0 -0
- planar/workflows/.serialization.py.un~ +0 -0
- planar/workflows/.step.py.un~ +0 -0
- planar/workflows/.step_core.py.un~ +0 -0
- planar/workflows/.sub_workflow_runner.py.un~ +0 -0
- planar/workflows/.sub_workflow_scheduler.py.un~ +0 -0
- planar/workflows/.test_concurrency.py.un~ +0 -0
- planar/workflows/.test_concurrency_detection.py.un~ +0 -0
- planar/workflows/.test_human.py.un~ +0 -0
- planar/workflows/.test_lock_timeout.py.un~ +0 -0
- planar/workflows/.test_orchestrator.py.un~ +0 -0
- planar/workflows/.test_race_conditions.py.un~ +0 -0
- planar/workflows/.test_serialization.py.un~ +0 -0
- planar/workflows/.test_suspend_deserialization.py.un~ +0 -0
- planar/workflows/.test_workflow.py.un~ +0 -0
- planar/workflows/.tracing.py.un~ +0 -0
- planar/workflows/.types.py.un~ +0 -0
- planar/workflows/.util.py.un~ +0 -0
- planar/workflows/.utils.py.un~ +0 -0
- planar/workflows/.workflow.py.un~ +0 -0
- planar/workflows/.workflow_wrapper.py.un~ +0 -0
- planar/workflows/.wrappers.py.un~ +0 -0
- planar-0.5.0.dist-info/RECORD +0 -289
- {planar-0.5.0.dist-info → planar-0.8.0.dist-info}/WHEEL +0 -0
- {planar-0.5.0.dist-info → planar-0.8.0.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,327 @@
|
|
1
|
+
import json
|
2
|
+
import logging
|
3
|
+
import re
|
4
|
+
from datetime import datetime
|
5
|
+
from decimal import Decimal
|
6
|
+
from uuid import UUID, uuid4
|
7
|
+
|
8
|
+
from pydantic import BaseModel
|
9
|
+
|
10
|
+
from planar.logging.formatter import StructuredFormatter, dictionary_print, json_print
|
11
|
+
|
12
|
+
|
13
|
+
class SampleModel(BaseModel):
|
14
|
+
name: str
|
15
|
+
value: int
|
16
|
+
|
17
|
+
|
18
|
+
class TestJsonPrint:
|
19
|
+
def test_json_print_simple_values(self):
|
20
|
+
"""Test json_print with simple values"""
|
21
|
+
assert json_print("test") == '"test"'
|
22
|
+
assert json_print(42) == "42"
|
23
|
+
assert json_print(True) == "true"
|
24
|
+
assert json_print(None) == "null"
|
25
|
+
|
26
|
+
def test_json_print_dict(self):
|
27
|
+
"""Test json_print with dictionary"""
|
28
|
+
data = {"key": "value", "number": 42}
|
29
|
+
result = json_print(data)
|
30
|
+
assert json.loads(result) == data
|
31
|
+
|
32
|
+
def test_json_print_list_without_colors(self):
|
33
|
+
"""Test json_print with list without colors - should produce valid JSON"""
|
34
|
+
data = ["item1", "item2", {"nested": "value"}]
|
35
|
+
result = json_print(data, use_colors=False)
|
36
|
+
# Should be valid JSON
|
37
|
+
parsed = json.loads(result)
|
38
|
+
assert parsed == data
|
39
|
+
|
40
|
+
def test_json_print_list_with_colors(self):
|
41
|
+
"""Test json_print with list with colors - should produce valid JSON with ANSI codes"""
|
42
|
+
data = ["item1", "item2", {"nested": "value"}]
|
43
|
+
result = json_print(data, use_colors=True)
|
44
|
+
# Should contain ANSI escape codes but when stripped should be valid JSON
|
45
|
+
# Remove ANSI codes to check JSON validity
|
46
|
+
import re
|
47
|
+
|
48
|
+
ansi_escape = re.compile(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])")
|
49
|
+
clean_result = ansi_escape.sub("", result)
|
50
|
+
parsed = json.loads(clean_result)
|
51
|
+
assert parsed == data
|
52
|
+
# Should contain color codes
|
53
|
+
assert "\x1b[" in result
|
54
|
+
|
55
|
+
def test_json_print_nested_structures(self):
|
56
|
+
"""Test json_print with deeply nested structures"""
|
57
|
+
data = {
|
58
|
+
"messages": [
|
59
|
+
{"content": "hello", "role": "user"},
|
60
|
+
{"content": "world", "role": "assistant"},
|
61
|
+
],
|
62
|
+
"tools": [
|
63
|
+
{"name": "tool1", "params": {"key": "value"}},
|
64
|
+
{"name": "tool2", "params": {"num": 42}},
|
65
|
+
],
|
66
|
+
}
|
67
|
+
result = json_print(data, use_colors=False)
|
68
|
+
parsed = json.loads(result)
|
69
|
+
assert parsed == data
|
70
|
+
|
71
|
+
def test_json_print_pydantic_model(self):
|
72
|
+
"""Test json_print with Pydantic models"""
|
73
|
+
model = SampleModel(name="test", value=42)
|
74
|
+
result = json_print(model, use_colors=False)
|
75
|
+
parsed = json.loads(result)
|
76
|
+
assert parsed == {"name": "test", "value": 42}
|
77
|
+
|
78
|
+
def test_json_print_custom_objects(self):
|
79
|
+
"""Test json_print with custom objects that need string conversion"""
|
80
|
+
|
81
|
+
class CustomObject:
|
82
|
+
def __str__(self):
|
83
|
+
return "custom_object"
|
84
|
+
|
85
|
+
data = {"obj": CustomObject()}
|
86
|
+
result = json_print(data, use_colors=False)
|
87
|
+
parsed = json.loads(result)
|
88
|
+
assert parsed == {"obj": "custom_object"}
|
89
|
+
|
90
|
+
def test_json_print_no_ansi_in_escaped_strings(self):
|
91
|
+
"""Test that ANSI codes don't get escaped in JSON strings"""
|
92
|
+
data = ["message1", "message2", {"key": "value"}]
|
93
|
+
result = json_print(data, use_colors=False)
|
94
|
+
# Should not contain escaped ANSI codes like \u001b
|
95
|
+
assert "\\u001b" not in result
|
96
|
+
# Should be valid JSON
|
97
|
+
parsed = json.loads(result)
|
98
|
+
assert parsed == data
|
99
|
+
|
100
|
+
def test_json_print_complex_types(self):
|
101
|
+
"""Test json_print with datetime, uuid, and decimal types"""
|
102
|
+
test_datetime = datetime(2023, 12, 25, 10, 30, 45)
|
103
|
+
test_uuid = uuid4()
|
104
|
+
test_decimal = Decimal("123.45")
|
105
|
+
|
106
|
+
# Test complex data structure with these types
|
107
|
+
data = {
|
108
|
+
"timestamp": test_datetime,
|
109
|
+
"id": test_uuid,
|
110
|
+
"amount": test_decimal,
|
111
|
+
"nested": {
|
112
|
+
"dates": [test_datetime, datetime(2024, 1, 1)],
|
113
|
+
"ids": [test_uuid, uuid4()],
|
114
|
+
"values": [test_decimal, Decimal("67.89")],
|
115
|
+
},
|
116
|
+
}
|
117
|
+
|
118
|
+
# Test without colors
|
119
|
+
result = json_print(data, use_colors=False)
|
120
|
+
|
121
|
+
# Should be valid JSON
|
122
|
+
parsed = json.loads(result)
|
123
|
+
|
124
|
+
# All complex types should be converted to strings
|
125
|
+
assert isinstance(parsed["timestamp"], str)
|
126
|
+
assert isinstance(parsed["id"], str)
|
127
|
+
assert isinstance(parsed["amount"], str)
|
128
|
+
assert isinstance(parsed["nested"]["dates"][0], str)
|
129
|
+
assert isinstance(parsed["nested"]["ids"][0], str)
|
130
|
+
assert isinstance(parsed["nested"]["values"][0], str)
|
131
|
+
|
132
|
+
# Verify string representations contain expected content
|
133
|
+
assert "2023-12-25" in parsed["timestamp"]
|
134
|
+
assert str(test_uuid) == parsed["id"]
|
135
|
+
assert "123.45" in parsed["amount"]
|
136
|
+
|
137
|
+
# Test with colors - should also work and produce valid JSON when stripped
|
138
|
+
result_colored = json_print(data, use_colors=True)
|
139
|
+
assert "\x1b[" in result_colored # Should contain ANSI codes
|
140
|
+
|
141
|
+
ansi_escape = re.compile(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])")
|
142
|
+
clean_result = ansi_escape.sub("", result_colored)
|
143
|
+
parsed_colored = json.loads(clean_result)
|
144
|
+
assert parsed_colored == parsed # Should be same as non-colored version
|
145
|
+
|
146
|
+
def test_json_print_with_base_model(self):
|
147
|
+
"""Test json_print with BaseModel using complex data"""
|
148
|
+
|
149
|
+
class TestModel(BaseModel):
|
150
|
+
name: str
|
151
|
+
date: datetime
|
152
|
+
uuid_val: UUID
|
153
|
+
decimal_val: Decimal
|
154
|
+
|
155
|
+
model = TestModel(
|
156
|
+
name="test",
|
157
|
+
date=datetime(2023, 12, 25, 10, 30, 45),
|
158
|
+
uuid_val=uuid4(),
|
159
|
+
decimal_val=Decimal("123.45"),
|
160
|
+
)
|
161
|
+
result = json_print(model, use_colors=False)
|
162
|
+
parsed = json.loads(result)
|
163
|
+
assert parsed == {
|
164
|
+
"name": "test",
|
165
|
+
"date": "2023-12-25T10:30:45",
|
166
|
+
"uuid_val": str(model.uuid_val),
|
167
|
+
"decimal_val": "123.45",
|
168
|
+
}
|
169
|
+
|
170
|
+
|
171
|
+
class TestDictionaryPrint:
|
172
|
+
def test_dictionary_print_simple(self):
|
173
|
+
"""Test dictionary_print with simple values"""
|
174
|
+
data = {"key": "value", "number": 42}
|
175
|
+
result = dictionary_print(data, use_colors=False)
|
176
|
+
assert 'key="value"' in result
|
177
|
+
assert "number=42" in result
|
178
|
+
|
179
|
+
def test_dictionary_print_with_lists(self):
|
180
|
+
"""Test dictionary_print with lists"""
|
181
|
+
data = {"items": ["a", "b", "c"], "count": 3}
|
182
|
+
result = dictionary_print(data, use_colors=False)
|
183
|
+
assert 'items=["a","b","c"]' in result or 'items=["a", "b", "c"]' in result
|
184
|
+
assert "count=3" in result
|
185
|
+
|
186
|
+
def test_dictionary_print_with_colors(self):
|
187
|
+
"""Test dictionary_print with colors enabled"""
|
188
|
+
data = {"key": "value"}
|
189
|
+
result = dictionary_print(data, use_colors=True)
|
190
|
+
# Should contain ANSI codes
|
191
|
+
assert "\x1b[" in result
|
192
|
+
# Should still contain the key-value pair
|
193
|
+
assert "key=" in result
|
194
|
+
|
195
|
+
|
196
|
+
class TestStructuredFormatter:
|
197
|
+
def test_structured_formatter_with_extra_attrs(self):
|
198
|
+
"""Test StructuredFormatter with extra attributes"""
|
199
|
+
formatter = StructuredFormatter(use_colors=False)
|
200
|
+
|
201
|
+
record = logging.LogRecord(
|
202
|
+
name="test.logger",
|
203
|
+
level=logging.INFO,
|
204
|
+
pathname="test.py",
|
205
|
+
lineno=1,
|
206
|
+
msg="test message",
|
207
|
+
args=(),
|
208
|
+
exc_info=None,
|
209
|
+
)
|
210
|
+
|
211
|
+
# Add extra attributes (simulating what PlanarLogger does)
|
212
|
+
record.__dict__.update(
|
213
|
+
{
|
214
|
+
"$workflow_id": "test-workflow",
|
215
|
+
"$step_id": 42,
|
216
|
+
"$messages": ["msg1", "msg2"],
|
217
|
+
}
|
218
|
+
)
|
219
|
+
|
220
|
+
result = formatter.format(record)
|
221
|
+
assert "workflow_id=" in result
|
222
|
+
assert "step_id=42" in result
|
223
|
+
assert "messages=" in result
|
224
|
+
# Should not contain escaped ANSI codes
|
225
|
+
assert "\\u001b" not in result
|
226
|
+
|
227
|
+
def test_structured_formatter_with_colors(self):
|
228
|
+
"""Test StructuredFormatter with colors enabled"""
|
229
|
+
formatter = StructuredFormatter(use_colors=True)
|
230
|
+
|
231
|
+
record = logging.LogRecord(
|
232
|
+
name="test.logger",
|
233
|
+
level=logging.INFO,
|
234
|
+
pathname="test.py",
|
235
|
+
lineno=1,
|
236
|
+
msg="test message",
|
237
|
+
args=(),
|
238
|
+
exc_info=None,
|
239
|
+
)
|
240
|
+
|
241
|
+
result = formatter.format(record)
|
242
|
+
# Should contain ANSI color codes
|
243
|
+
assert "\x1b[" in result
|
244
|
+
|
245
|
+
def test_structured_formatter_complex_data(self):
|
246
|
+
"""Test StructuredFormatter with complex nested data like the original issue"""
|
247
|
+
formatter = StructuredFormatter(use_colors=True)
|
248
|
+
|
249
|
+
record = logging.LogRecord(
|
250
|
+
name="planar.ai.test_agent",
|
251
|
+
level=logging.INFO,
|
252
|
+
pathname="test_agent.py",
|
253
|
+
lineno=188,
|
254
|
+
msg="patched_complete",
|
255
|
+
args=(),
|
256
|
+
exc_info=None,
|
257
|
+
)
|
258
|
+
|
259
|
+
# Simulate the complex data from the original issue
|
260
|
+
record.__dict__.update(
|
261
|
+
{
|
262
|
+
"$messages": [
|
263
|
+
{"content": "Use tools to solve the problem"},
|
264
|
+
{"content": "Problem: complex problem", "files": []},
|
265
|
+
{
|
266
|
+
"content": None,
|
267
|
+
"tool_calls": [
|
268
|
+
{
|
269
|
+
"id": "call_1",
|
270
|
+
"name": "tool1",
|
271
|
+
"arguments": {"param": "test_param"},
|
272
|
+
}
|
273
|
+
],
|
274
|
+
},
|
275
|
+
{"content": "Tool 1 result: test_param", "tool_call_id": "call_1"},
|
276
|
+
],
|
277
|
+
"$tools": [
|
278
|
+
{
|
279
|
+
"name": "tool1",
|
280
|
+
"description": "Test tool 1",
|
281
|
+
"parameters": {
|
282
|
+
"type": "object",
|
283
|
+
"properties": {"param": {"type": "string"}},
|
284
|
+
},
|
285
|
+
},
|
286
|
+
{
|
287
|
+
"name": "tool2",
|
288
|
+
"description": "Test tool 2",
|
289
|
+
"parameters": {
|
290
|
+
"type": "object",
|
291
|
+
"properties": {"num": {"type": "integer"}},
|
292
|
+
},
|
293
|
+
},
|
294
|
+
],
|
295
|
+
"$workflow_id": "test-workflow-id",
|
296
|
+
"$step_id": 4,
|
297
|
+
}
|
298
|
+
)
|
299
|
+
|
300
|
+
result = formatter.format(record)
|
301
|
+
|
302
|
+
# Should contain the message
|
303
|
+
assert "patched_complete" in result
|
304
|
+
assert "planar.ai.test_agent" in result
|
305
|
+
|
306
|
+
# Should contain the extra attributes
|
307
|
+
assert "messages=" in result
|
308
|
+
assert "tools=" in result
|
309
|
+
assert "workflow_id=" in result
|
310
|
+
assert "step_id=" in result
|
311
|
+
|
312
|
+
# Most importantly: should NOT contain escaped ANSI codes
|
313
|
+
assert "\\u001b" not in result
|
314
|
+
|
315
|
+
# Should contain actual ANSI codes (for colors)
|
316
|
+
assert "\x1b[" in result
|
317
|
+
|
318
|
+
# Verify the fix - the data should be properly formatted JSON with colors
|
319
|
+
# Extract the JSON parts and verify they're valid when ANSI codes are stripped
|
320
|
+
import re
|
321
|
+
|
322
|
+
ansi_escape = re.compile(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])")
|
323
|
+
clean_result = ansi_escape.sub("", result)
|
324
|
+
|
325
|
+
# The messages should be valid JSON when extracted
|
326
|
+
assert '"content": "Use tools to solve the problem"' in clean_result
|
327
|
+
assert '"content": "Problem: complex problem"' in clean_result
|
planar/registry_items.py
CHANGED
@@ -54,7 +54,8 @@ class RegisteredWorkflow:
|
|
54
54
|
return RegisteredWorkflow(
|
55
55
|
obj=workflow,
|
56
56
|
name=workflow.function_name,
|
57
|
-
description=workflow.__doc__
|
57
|
+
description=workflow.__doc__
|
58
|
+
or "No description provided for this workflow.",
|
58
59
|
input_schema=generate_json_schema_for_input_parameters(
|
59
60
|
workflow.original_fn
|
60
61
|
),
|
planar/routers/agents_router.py
CHANGED
@@ -165,7 +165,9 @@ def create_agent_router(object_registry: ObjectRegistry) -> APIRouter:
|
|
165
165
|
else AgentSimulationData
|
166
166
|
)
|
167
167
|
parsed_data = data_model.model_validate(request_copy.model_dump())
|
168
|
-
|
168
|
+
agent.event_emitter = emitter
|
169
|
+
await agent(parsed_data.input_value)
|
170
|
+
agent.event_emitter = None
|
169
171
|
logger.debug(
|
170
172
|
"background task finished for agent simulation",
|
171
173
|
agent_name=agent_name,
|
planar/routers/files.py
CHANGED
@@ -1,3 +1,4 @@
|
|
1
|
+
import mimetypes
|
1
2
|
import uuid
|
2
3
|
from uuid import UUID
|
3
4
|
|
@@ -14,6 +15,9 @@ logger = get_logger(__name__)
|
|
14
15
|
|
15
16
|
router = APIRouter(tags=["Files"])
|
16
17
|
|
18
|
+
# Add Parquet MIME type
|
19
|
+
mimetypes.add_type("application/x-parquet", ".parquet")
|
20
|
+
|
17
21
|
|
18
22
|
@router.post("/upload", response_model=list[PlanarFile])
|
19
23
|
async def upload_files(files: list[UploadFile] = File(...)):
|
@@ -37,16 +41,21 @@ async def upload_files(files: list[UploadFile] = File(...)):
|
|
37
41
|
await current_file.seek(0)
|
38
42
|
|
39
43
|
try:
|
44
|
+
guessed_type, _ = mimetypes.guess_type(file.filename or "")
|
45
|
+
final_content_type = (
|
46
|
+
guessed_type or file.content_type or "application/octet-stream"
|
47
|
+
)
|
48
|
+
|
40
49
|
# Store the file content using the storage backend
|
41
50
|
storage_ref = await storage.put(
|
42
|
-
stream=file_stream_generator(file), mime_type=
|
51
|
+
stream=file_stream_generator(file), mime_type=final_content_type
|
43
52
|
)
|
44
53
|
|
45
54
|
# Create the metadata record in the database
|
46
55
|
planar_file = PlanarFileMetadata(
|
47
56
|
filename=file.filename
|
48
57
|
or str(uuid.uuid4()), # Use filename or default to random UUID
|
49
|
-
content_type=
|
58
|
+
content_type=final_content_type,
|
50
59
|
size=file.size
|
51
60
|
if file.size is not None
|
52
61
|
else -1, # Store size if available
|
planar/routers/models.py
CHANGED
@@ -81,6 +81,12 @@ class WorkflowDefinition(BaseModel):
|
|
81
81
|
durations: DurationStats | None = None
|
82
82
|
|
83
83
|
|
84
|
+
class StepStats(BaseModel):
|
85
|
+
completed: int = 0
|
86
|
+
failed: int = 0
|
87
|
+
running: int = 0
|
88
|
+
|
89
|
+
|
84
90
|
class WorkflowRun(BaseModel):
|
85
91
|
id: UUID
|
86
92
|
status: WorkflowStatus
|
@@ -90,6 +96,13 @@ class WorkflowRun(BaseModel):
|
|
90
96
|
error: Dict[str, Any] | None = None
|
91
97
|
created_at: datetime
|
92
98
|
updated_at: datetime
|
99
|
+
step_stats: StepStats
|
100
|
+
|
101
|
+
|
102
|
+
class StepRunError(BaseModel):
|
103
|
+
type: str
|
104
|
+
message: str
|
105
|
+
traceback: str | None = None
|
93
106
|
|
94
107
|
|
95
108
|
class WorkflowStepInfo(BaseModel):
|
@@ -108,7 +121,7 @@ class WorkflowStepInfo(BaseModel):
|
|
108
121
|
args: List[Any] | None = None
|
109
122
|
kwargs: Dict[str, Any] | None = None
|
110
123
|
result: Any | None = None
|
111
|
-
error:
|
124
|
+
error: StepRunError | None = None
|
112
125
|
retry_count: int
|
113
126
|
created_at: datetime
|
114
127
|
updated_at: datetime
|
@@ -0,0 +1,49 @@
|
|
1
|
+
import io
|
2
|
+
from uuid import UUID
|
3
|
+
|
4
|
+
import pytest
|
5
|
+
from sqlmodel.ext.asyncio.session import AsyncSession
|
6
|
+
|
7
|
+
from planar import PlanarApp, sqlite_config
|
8
|
+
from planar.files.models import PlanarFileMetadata
|
9
|
+
from planar.testing.planar_test_client import PlanarTestClient
|
10
|
+
|
11
|
+
|
12
|
+
@pytest.fixture(name="app")
|
13
|
+
def app_fixture():
|
14
|
+
return PlanarApp(
|
15
|
+
config=sqlite_config(":memory:"),
|
16
|
+
title="Test app for files router",
|
17
|
+
description="Testing files endpoints",
|
18
|
+
)
|
19
|
+
|
20
|
+
|
21
|
+
async def test_upload_parquet_sets_content_type(
|
22
|
+
client: PlanarTestClient, session: AsyncSession
|
23
|
+
):
|
24
|
+
"""Uploading a .parquet file should persist application/x-parquet in metadata."""
|
25
|
+
|
26
|
+
# Prepare a small in-memory payload and intentionally send an octet-stream
|
27
|
+
# to simulate browsers that don't know parquet. The route should override
|
28
|
+
# this using mimetypes.guess_type.
|
29
|
+
filename = "test_data.parquet"
|
30
|
+
payload = b"PAR1" # content doesn't matter for MIME guessing by filename
|
31
|
+
|
32
|
+
files = {
|
33
|
+
"files": (filename, io.BytesIO(payload), "application/octet-stream"),
|
34
|
+
}
|
35
|
+
|
36
|
+
resp = await client.post("/planar/v1/file/upload", files=files)
|
37
|
+
assert resp.status_code == 200
|
38
|
+
|
39
|
+
body = resp.json()
|
40
|
+
assert isinstance(body, list) and len(body) == 1
|
41
|
+
file_item = body[0]
|
42
|
+
assert file_item["filename"] == filename
|
43
|
+
|
44
|
+
# Verify the database record has the correct MIME type
|
45
|
+
file_id = UUID(file_item["id"])
|
46
|
+
meta = await session.get(PlanarFileMetadata, file_id)
|
47
|
+
|
48
|
+
assert meta is not None
|
49
|
+
assert meta.content_type == "application/x-parquet"
|
@@ -3,7 +3,7 @@ from http import HTTPStatus
|
|
3
3
|
import pytest
|
4
4
|
|
5
5
|
from planar import PlanarApp, sqlite_config
|
6
|
-
from planar.config import AuthzConfig
|
6
|
+
from planar.config import AuthzConfig, SecurityConfig
|
7
7
|
from planar.security.auth_context import Principal, clear_principal, set_principal
|
8
8
|
from planar.testing.planar_test_client import PlanarTestClient
|
9
9
|
from planar.workflows import workflow
|
@@ -21,8 +21,6 @@ async def simple_test_workflow(test_id: str) -> str:
|
|
21
21
|
@pytest.fixture(name="app_with_no_authz")
|
22
22
|
def create_app_no_authz():
|
23
23
|
config = sqlite_config("test_authz_router.db")
|
24
|
-
config.jwt = None
|
25
|
-
config.authz = None
|
26
24
|
|
27
25
|
return PlanarApp(
|
28
26
|
config=config,
|
@@ -34,8 +32,7 @@ def create_app_no_authz():
|
|
34
32
|
@pytest.fixture(name="app_with_default_authz")
|
35
33
|
def create_app_with_authz():
|
36
34
|
config = sqlite_config("test_authz_router.db")
|
37
|
-
config.
|
38
|
-
config.authz = AuthzConfig(enabled=True, policy_file=None)
|
35
|
+
config.security = SecurityConfig(authz=AuthzConfig(enabled=True, policy_file=None))
|
39
36
|
|
40
37
|
return PlanarApp(
|
41
38
|
config=config,
|
@@ -66,8 +63,9 @@ def restrictive_policy_file(tmp_path):
|
|
66
63
|
@pytest.fixture(name="app_with_restricted_authz")
|
67
64
|
def create_app_with_restricted_authz(restrictive_policy_file):
|
68
65
|
config = sqlite_config("test_authz_router.db")
|
69
|
-
config.
|
70
|
-
|
66
|
+
config.security = SecurityConfig(
|
67
|
+
authz=AuthzConfig(enabled=True, policy_file=restrictive_policy_file)
|
68
|
+
)
|
71
69
|
|
72
70
|
return PlanarApp(
|
73
71
|
config=config,
|