planar 0.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (289) hide show
  1. planar/.__init__.py.un~ +0 -0
  2. planar/._version.py.un~ +0 -0
  3. planar/.app.py.un~ +0 -0
  4. planar/.cli.py.un~ +0 -0
  5. planar/.config.py.un~ +0 -0
  6. planar/.context.py.un~ +0 -0
  7. planar/.db.py.un~ +0 -0
  8. planar/.di.py.un~ +0 -0
  9. planar/.engine.py.un~ +0 -0
  10. planar/.files.py.un~ +0 -0
  11. planar/.log_context.py.un~ +0 -0
  12. planar/.log_metadata.py.un~ +0 -0
  13. planar/.logging.py.un~ +0 -0
  14. planar/.object_registry.py.un~ +0 -0
  15. planar/.otel.py.un~ +0 -0
  16. planar/.server.py.un~ +0 -0
  17. planar/.session.py.un~ +0 -0
  18. planar/.sqlalchemy.py.un~ +0 -0
  19. planar/.task_local.py.un~ +0 -0
  20. planar/.test_app.py.un~ +0 -0
  21. planar/.test_config.py.un~ +0 -0
  22. planar/.test_object_config.py.un~ +0 -0
  23. planar/.test_sqlalchemy.py.un~ +0 -0
  24. planar/.test_utils.py.un~ +0 -0
  25. planar/.util.py.un~ +0 -0
  26. planar/.utils.py.un~ +0 -0
  27. planar/__init__.py +26 -0
  28. planar/_version.py +1 -0
  29. planar/ai/.__init__.py.un~ +0 -0
  30. planar/ai/._models.py.un~ +0 -0
  31. planar/ai/.agent.py.un~ +0 -0
  32. planar/ai/.agent_utils.py.un~ +0 -0
  33. planar/ai/.events.py.un~ +0 -0
  34. planar/ai/.files.py.un~ +0 -0
  35. planar/ai/.models.py.un~ +0 -0
  36. planar/ai/.providers.py.un~ +0 -0
  37. planar/ai/.pydantic_ai.py.un~ +0 -0
  38. planar/ai/.pydantic_ai_agent.py.un~ +0 -0
  39. planar/ai/.pydantic_ai_provider.py.un~ +0 -0
  40. planar/ai/.step.py.un~ +0 -0
  41. planar/ai/.test_agent.py.un~ +0 -0
  42. planar/ai/.test_agent_serialization.py.un~ +0 -0
  43. planar/ai/.test_providers.py.un~ +0 -0
  44. planar/ai/.utils.py.un~ +0 -0
  45. planar/ai/__init__.py +15 -0
  46. planar/ai/agent.py +457 -0
  47. planar/ai/agent_utils.py +205 -0
  48. planar/ai/models.py +140 -0
  49. planar/ai/providers.py +1088 -0
  50. planar/ai/test_agent.py +1298 -0
  51. planar/ai/test_agent_serialization.py +229 -0
  52. planar/ai/test_providers.py +463 -0
  53. planar/ai/utils.py +102 -0
  54. planar/app.py +494 -0
  55. planar/cli.py +282 -0
  56. planar/config.py +544 -0
  57. planar/db/.db.py.un~ +0 -0
  58. planar/db/__init__.py +17 -0
  59. planar/db/alembic/env.py +136 -0
  60. planar/db/alembic/script.py.mako +28 -0
  61. planar/db/alembic/versions/3476068c153c_initial_system_tables_migration.py +339 -0
  62. planar/db/alembic.ini +128 -0
  63. planar/db/db.py +318 -0
  64. planar/files/.config.py.un~ +0 -0
  65. planar/files/.local.py.un~ +0 -0
  66. planar/files/.local_filesystem.py.un~ +0 -0
  67. planar/files/.model.py.un~ +0 -0
  68. planar/files/.models.py.un~ +0 -0
  69. planar/files/.s3.py.un~ +0 -0
  70. planar/files/.storage.py.un~ +0 -0
  71. planar/files/.test_files.py.un~ +0 -0
  72. planar/files/__init__.py +2 -0
  73. planar/files/models.py +162 -0
  74. planar/files/storage/.__init__.py.un~ +0 -0
  75. planar/files/storage/.base.py.un~ +0 -0
  76. planar/files/storage/.config.py.un~ +0 -0
  77. planar/files/storage/.context.py.un~ +0 -0
  78. planar/files/storage/.local_directory.py.un~ +0 -0
  79. planar/files/storage/.test_local_directory.py.un~ +0 -0
  80. planar/files/storage/.test_s3.py.un~ +0 -0
  81. planar/files/storage/base.py +61 -0
  82. planar/files/storage/config.py +44 -0
  83. planar/files/storage/context.py +15 -0
  84. planar/files/storage/local_directory.py +188 -0
  85. planar/files/storage/s3.py +220 -0
  86. planar/files/storage/test_local_directory.py +162 -0
  87. planar/files/storage/test_s3.py +299 -0
  88. planar/files/test_files.py +283 -0
  89. planar/human/.human.py.un~ +0 -0
  90. planar/human/.test_human.py.un~ +0 -0
  91. planar/human/__init__.py +2 -0
  92. planar/human/human.py +458 -0
  93. planar/human/models.py +80 -0
  94. planar/human/test_human.py +385 -0
  95. planar/logging/.__init__.py.un~ +0 -0
  96. planar/logging/.attributes.py.un~ +0 -0
  97. planar/logging/.formatter.py.un~ +0 -0
  98. planar/logging/.logger.py.un~ +0 -0
  99. planar/logging/.otel.py.un~ +0 -0
  100. planar/logging/.tracer.py.un~ +0 -0
  101. planar/logging/__init__.py +10 -0
  102. planar/logging/attributes.py +54 -0
  103. planar/logging/context.py +14 -0
  104. planar/logging/formatter.py +113 -0
  105. planar/logging/logger.py +114 -0
  106. planar/logging/otel.py +51 -0
  107. planar/modeling/.mixin.py.un~ +0 -0
  108. planar/modeling/.storage.py.un~ +0 -0
  109. planar/modeling/__init__.py +0 -0
  110. planar/modeling/field_helpers.py +59 -0
  111. planar/modeling/json_schema_generator.py +94 -0
  112. planar/modeling/mixins/__init__.py +10 -0
  113. planar/modeling/mixins/auditable.py +52 -0
  114. planar/modeling/mixins/test_auditable.py +97 -0
  115. planar/modeling/mixins/test_timestamp.py +134 -0
  116. planar/modeling/mixins/test_uuid_primary_key.py +52 -0
  117. planar/modeling/mixins/timestamp.py +53 -0
  118. planar/modeling/mixins/uuid_primary_key.py +19 -0
  119. planar/modeling/orm/.planar_base_model.py.un~ +0 -0
  120. planar/modeling/orm/__init__.py +18 -0
  121. planar/modeling/orm/planar_base_entity.py +29 -0
  122. planar/modeling/orm/query_filter_builder.py +122 -0
  123. planar/modeling/orm/reexports.py +15 -0
  124. planar/object_config/.object_config.py.un~ +0 -0
  125. planar/object_config/__init__.py +11 -0
  126. planar/object_config/models.py +114 -0
  127. planar/object_config/object_config.py +378 -0
  128. planar/object_registry.py +100 -0
  129. planar/registry_items.py +65 -0
  130. planar/routers/.__init__.py.un~ +0 -0
  131. planar/routers/.agents_router.py.un~ +0 -0
  132. planar/routers/.crud.py.un~ +0 -0
  133. planar/routers/.decision.py.un~ +0 -0
  134. planar/routers/.event.py.un~ +0 -0
  135. planar/routers/.file_attachment.py.un~ +0 -0
  136. planar/routers/.files.py.un~ +0 -0
  137. planar/routers/.files_router.py.un~ +0 -0
  138. planar/routers/.human.py.un~ +0 -0
  139. planar/routers/.info.py.un~ +0 -0
  140. planar/routers/.models.py.un~ +0 -0
  141. planar/routers/.object_config_router.py.un~ +0 -0
  142. planar/routers/.rule.py.un~ +0 -0
  143. planar/routers/.test_object_config_router.py.un~ +0 -0
  144. planar/routers/.test_workflow_router.py.un~ +0 -0
  145. planar/routers/.workflow.py.un~ +0 -0
  146. planar/routers/__init__.py +13 -0
  147. planar/routers/agents_router.py +197 -0
  148. planar/routers/entity_router.py +143 -0
  149. planar/routers/event.py +91 -0
  150. planar/routers/files.py +142 -0
  151. planar/routers/human.py +151 -0
  152. planar/routers/info.py +131 -0
  153. planar/routers/models.py +170 -0
  154. planar/routers/object_config_router.py +133 -0
  155. planar/routers/rule.py +108 -0
  156. planar/routers/test_agents_router.py +174 -0
  157. planar/routers/test_object_config_router.py +367 -0
  158. planar/routers/test_routes_security.py +169 -0
  159. planar/routers/test_rule_router.py +470 -0
  160. planar/routers/test_workflow_router.py +274 -0
  161. planar/routers/workflow.py +468 -0
  162. planar/rules/.decorator.py.un~ +0 -0
  163. planar/rules/.runner.py.un~ +0 -0
  164. planar/rules/.test_rules.py.un~ +0 -0
  165. planar/rules/__init__.py +23 -0
  166. planar/rules/decorator.py +184 -0
  167. planar/rules/models.py +355 -0
  168. planar/rules/rule_configuration.py +191 -0
  169. planar/rules/runner.py +64 -0
  170. planar/rules/test_rules.py +750 -0
  171. planar/scaffold_templates/app/__init__.py.j2 +0 -0
  172. planar/scaffold_templates/app/db/entities.py.j2 +11 -0
  173. planar/scaffold_templates/app/flows/process_invoice.py.j2 +67 -0
  174. planar/scaffold_templates/main.py.j2 +13 -0
  175. planar/scaffold_templates/planar.dev.yaml.j2 +34 -0
  176. planar/scaffold_templates/planar.prod.yaml.j2 +28 -0
  177. planar/scaffold_templates/pyproject.toml.j2 +10 -0
  178. planar/security/.jwt_middleware.py.un~ +0 -0
  179. planar/security/auth_context.py +148 -0
  180. planar/security/authorization.py +388 -0
  181. planar/security/default_policies.cedar +77 -0
  182. planar/security/jwt_middleware.py +116 -0
  183. planar/security/security_context.py +18 -0
  184. planar/security/tests/test_authorization_context.py +78 -0
  185. planar/security/tests/test_cedar_basics.py +41 -0
  186. planar/security/tests/test_cedar_policies.py +158 -0
  187. planar/security/tests/test_jwt_principal_context.py +179 -0
  188. planar/session.py +40 -0
  189. planar/sse/.constants.py.un~ +0 -0
  190. planar/sse/.example.html.un~ +0 -0
  191. planar/sse/.hub.py.un~ +0 -0
  192. planar/sse/.model.py.un~ +0 -0
  193. planar/sse/.proxy.py.un~ +0 -0
  194. planar/sse/constants.py +1 -0
  195. planar/sse/example.html +126 -0
  196. planar/sse/hub.py +216 -0
  197. planar/sse/model.py +8 -0
  198. planar/sse/proxy.py +257 -0
  199. planar/task_local.py +37 -0
  200. planar/test_app.py +51 -0
  201. planar/test_cli.py +372 -0
  202. planar/test_config.py +512 -0
  203. planar/test_object_config.py +527 -0
  204. planar/test_object_registry.py +14 -0
  205. planar/test_sqlalchemy.py +158 -0
  206. planar/test_utils.py +105 -0
  207. planar/testing/.client.py.un~ +0 -0
  208. planar/testing/.memory_storage.py.un~ +0 -0
  209. planar/testing/.planar_test_client.py.un~ +0 -0
  210. planar/testing/.predictable_tracer.py.un~ +0 -0
  211. planar/testing/.synchronizable_tracer.py.un~ +0 -0
  212. planar/testing/.test_memory_storage.py.un~ +0 -0
  213. planar/testing/.workflow_observer.py.un~ +0 -0
  214. planar/testing/__init__.py +0 -0
  215. planar/testing/memory_storage.py +78 -0
  216. planar/testing/planar_test_client.py +54 -0
  217. planar/testing/synchronizable_tracer.py +153 -0
  218. planar/testing/test_memory_storage.py +143 -0
  219. planar/testing/workflow_observer.py +73 -0
  220. planar/utils.py +70 -0
  221. planar/workflows/.__init__.py.un~ +0 -0
  222. planar/workflows/.builtin_steps.py.un~ +0 -0
  223. planar/workflows/.concurrency_tracing.py.un~ +0 -0
  224. planar/workflows/.context.py.un~ +0 -0
  225. planar/workflows/.contrib.py.un~ +0 -0
  226. planar/workflows/.decorators.py.un~ +0 -0
  227. planar/workflows/.durable_test.py.un~ +0 -0
  228. planar/workflows/.errors.py.un~ +0 -0
  229. planar/workflows/.events.py.un~ +0 -0
  230. planar/workflows/.exceptions.py.un~ +0 -0
  231. planar/workflows/.execution.py.un~ +0 -0
  232. planar/workflows/.human.py.un~ +0 -0
  233. planar/workflows/.lock.py.un~ +0 -0
  234. planar/workflows/.misc.py.un~ +0 -0
  235. planar/workflows/.model.py.un~ +0 -0
  236. planar/workflows/.models.py.un~ +0 -0
  237. planar/workflows/.notifications.py.un~ +0 -0
  238. planar/workflows/.orchestrator.py.un~ +0 -0
  239. planar/workflows/.runtime.py.un~ +0 -0
  240. planar/workflows/.serialization.py.un~ +0 -0
  241. planar/workflows/.step.py.un~ +0 -0
  242. planar/workflows/.step_core.py.un~ +0 -0
  243. planar/workflows/.sub_workflow_runner.py.un~ +0 -0
  244. planar/workflows/.sub_workflow_scheduler.py.un~ +0 -0
  245. planar/workflows/.test_concurrency.py.un~ +0 -0
  246. planar/workflows/.test_concurrency_detection.py.un~ +0 -0
  247. planar/workflows/.test_human.py.un~ +0 -0
  248. planar/workflows/.test_lock_timeout.py.un~ +0 -0
  249. planar/workflows/.test_orchestrator.py.un~ +0 -0
  250. planar/workflows/.test_race_conditions.py.un~ +0 -0
  251. planar/workflows/.test_serialization.py.un~ +0 -0
  252. planar/workflows/.test_suspend_deserialization.py.un~ +0 -0
  253. planar/workflows/.test_workflow.py.un~ +0 -0
  254. planar/workflows/.tracing.py.un~ +0 -0
  255. planar/workflows/.types.py.un~ +0 -0
  256. planar/workflows/.util.py.un~ +0 -0
  257. planar/workflows/.utils.py.un~ +0 -0
  258. planar/workflows/.workflow.py.un~ +0 -0
  259. planar/workflows/.workflow_wrapper.py.un~ +0 -0
  260. planar/workflows/.wrappers.py.un~ +0 -0
  261. planar/workflows/__init__.py +42 -0
  262. planar/workflows/context.py +44 -0
  263. planar/workflows/contrib.py +190 -0
  264. planar/workflows/decorators.py +217 -0
  265. planar/workflows/events.py +185 -0
  266. planar/workflows/exceptions.py +34 -0
  267. planar/workflows/execution.py +198 -0
  268. planar/workflows/lock.py +229 -0
  269. planar/workflows/misc.py +5 -0
  270. planar/workflows/models.py +154 -0
  271. planar/workflows/notifications.py +96 -0
  272. planar/workflows/orchestrator.py +383 -0
  273. planar/workflows/query.py +256 -0
  274. planar/workflows/serialization.py +409 -0
  275. planar/workflows/step_core.py +373 -0
  276. planar/workflows/step_metadata.py +357 -0
  277. planar/workflows/step_testing_utils.py +86 -0
  278. planar/workflows/sub_workflow_runner.py +191 -0
  279. planar/workflows/test_concurrency_detection.py +120 -0
  280. planar/workflows/test_lock_timeout.py +140 -0
  281. planar/workflows/test_serialization.py +1195 -0
  282. planar/workflows/test_suspend_deserialization.py +231 -0
  283. planar/workflows/test_workflow.py +1967 -0
  284. planar/workflows/tracing.py +106 -0
  285. planar/workflows/wrappers.py +41 -0
  286. planar-0.5.0.dist-info/METADATA +285 -0
  287. planar-0.5.0.dist-info/RECORD +289 -0
  288. planar-0.5.0.dist-info/WHEEL +4 -0
  289. planar-0.5.0.dist-info/entry_points.txt +3 -0
@@ -0,0 +1,229 @@
1
+ """
2
+ Tests for agent serialization functionality.
3
+
4
+ This module tests the serialization of agents including configuration
5
+ management and schema validation warnings.
6
+ """
7
+
8
+ import pytest
9
+ from pydantic import BaseModel
10
+ from sqlmodel.ext.asyncio.session import AsyncSession
11
+
12
+ from planar.ai.agent import Agent
13
+ from planar.ai.agent_utils import AgentConfig, agent_configuration
14
+ from planar.ai.models import AgentSerializeable
15
+ from planar.ai.utils import serialize_agent
16
+ from planar.object_config.object_config import ObjectConfigurationBase
17
+
18
+
19
+ class InputModelForTest(BaseModel):
20
+ """Test input model for agents."""
21
+
22
+ text: str
23
+ value: int
24
+
25
+
26
+ class OutputModelForTest(BaseModel):
27
+ """Test output model for agents."""
28
+
29
+ result: str
30
+ score: float
31
+
32
+
33
+ @pytest.fixture
34
+ def test_agent():
35
+ """Create a test agent with various configurations."""
36
+ return Agent(
37
+ name="test_serialization_agent",
38
+ system_prompt="Test system prompt",
39
+ user_prompt="Test user prompt: {input}",
40
+ model="openai:gpt-4o",
41
+ max_turns=3,
42
+ input_type=InputModelForTest,
43
+ output_type=OutputModelForTest,
44
+ )
45
+
46
+
47
+ @pytest.fixture
48
+ def test_agent_with_tools():
49
+ """Create a test agent with tools."""
50
+
51
+ async def test_tool(param: str) -> str:
52
+ """A test tool."""
53
+ return f"Processed: {param}"
54
+
55
+ return Agent(
56
+ name="test_agent_with_tools",
57
+ system_prompt="System with tools",
58
+ user_prompt="User: {input}",
59
+ model="anthropic:claude-3-sonnet",
60
+ max_turns=5,
61
+ tools=[test_tool],
62
+ )
63
+
64
+
65
+ async def test_serialize_agent_basic(session: AsyncSession, test_agent):
66
+ """Test basic agent serialization without any configurations."""
67
+
68
+ # Serialize the agent
69
+ serialized = await serialize_agent(test_agent)
70
+
71
+ # Verify basic fields
72
+ assert isinstance(serialized, AgentSerializeable)
73
+ assert serialized.name == "test_serialization_agent"
74
+ assert serialized.input_schema is not None
75
+ assert serialized.output_schema is not None
76
+ assert serialized.tool_definitions == []
77
+
78
+ # Verify configs field exists and contains the default config (at least one config always present)
79
+ assert hasattr(serialized, "configs")
80
+ assert len(serialized.configs) == 1
81
+
82
+ # Verify the default config is present and correct
83
+ default_config = serialized.configs[-1]
84
+ assert isinstance(default_config, ObjectConfigurationBase)
85
+ assert default_config.version == 0
86
+ assert default_config.data.system_prompt == test_agent.system_prompt
87
+ assert default_config.data.user_prompt == test_agent.user_prompt
88
+ assert default_config.data.model == str(test_agent.model)
89
+ assert default_config.data.max_turns == test_agent.max_turns
90
+ assert default_config.data.model_parameters == test_agent.model_parameters
91
+
92
+ # Verify overwrites field is removed
93
+ assert not hasattr(serialized, "overwrites")
94
+
95
+
96
+ async def test_serialize_agent_with_configs(session: AsyncSession, test_agent):
97
+ """Test agent serialization with multiple configurations."""
98
+
99
+ # Create multiple configurations
100
+ config1 = AgentConfig(
101
+ system_prompt="Override system 1",
102
+ user_prompt="Override user 1: {input}",
103
+ model="openai:gpt-4o",
104
+ max_turns=2,
105
+ model_parameters={"temperature": 0.7},
106
+ )
107
+
108
+ config2 = AgentConfig(
109
+ system_prompt="Override system 2",
110
+ user_prompt="Override user 2: {input}",
111
+ model="anthropic:claude-3-opus",
112
+ max_turns=4,
113
+ model_parameters={"temperature": 0.9},
114
+ )
115
+
116
+ # Write configurations
117
+ await agent_configuration.write_config(test_agent.name, config1)
118
+ await agent_configuration.write_config(test_agent.name, config2)
119
+
120
+ # Serialize the agent
121
+ serialized = await serialize_agent(test_agent)
122
+
123
+ # Verify configs are included
124
+ assert len(serialized.configs) == 3
125
+
126
+ # Verify default config is included
127
+ default_config = serialized.configs[-1]
128
+ assert isinstance(default_config, ObjectConfigurationBase)
129
+ assert default_config.version == 0
130
+ assert default_config.data.system_prompt == test_agent.system_prompt
131
+ assert default_config.data.user_prompt == test_agent.user_prompt
132
+ assert default_config.data.model == str(test_agent.model)
133
+ assert default_config.data.max_turns == test_agent.max_turns
134
+ assert default_config.data.model_parameters == test_agent.model_parameters
135
+
136
+ # Verify configs are ordered by version (descending)
137
+ assert all(
138
+ isinstance(config, ObjectConfigurationBase) for config in serialized.configs
139
+ )
140
+ assert serialized.configs[0].version == 2 # Latest version first
141
+ assert serialized.configs[1].version == 1
142
+
143
+ # Verify config data
144
+ latest_config = serialized.configs[0]
145
+ assert latest_config.data.system_prompt == "Override system 2"
146
+ assert latest_config.data.user_prompt == "Override user 2: {input}"
147
+ assert latest_config.data.model == "anthropic:claude-3-opus"
148
+ assert latest_config.data.max_turns == 4
149
+
150
+ older_config = serialized.configs[1]
151
+ assert older_config.data.system_prompt == "Override system 1"
152
+ assert older_config.data.user_prompt == "Override user 1: {input}"
153
+
154
+
155
+ async def test_serialize_agent_with_tools(session: AsyncSession, test_agent_with_tools):
156
+ """Test serialization of agent with tools."""
157
+
158
+ # Serialize the agent
159
+ serialized = await serialize_agent(test_agent_with_tools)
160
+
161
+ # Verify tool definitions are included
162
+ assert len(serialized.tool_definitions) == 1
163
+ tool_def = serialized.tool_definitions[0]
164
+ assert tool_def["name"] == "test_tool"
165
+ assert tool_def["description"] == "A test tool."
166
+ assert "parameters" in tool_def
167
+
168
+
169
+ async def test_serialize_agent_no_duplicate_fields(session: AsyncSession, test_agent):
170
+ """Test that AgentSerializeable doesn't duplicate fields from AgentConfig."""
171
+
172
+ # Create a configuration
173
+ config = AgentConfig(
174
+ system_prompt="Config system",
175
+ user_prompt="Config user: {input}",
176
+ model="openai:gpt-3.5-turbo",
177
+ max_turns=1,
178
+ model_parameters={},
179
+ )
180
+
181
+ await agent_configuration.write_config(test_agent.name, config)
182
+
183
+ # Serialize the agent
184
+ serialized = await serialize_agent(test_agent)
185
+
186
+ # Verify that system_prompt, user_prompt, model, max_turns are NOT in the serialized object
187
+ # They should only be in the configs
188
+ assert not hasattr(serialized, "system_prompt")
189
+ assert not hasattr(serialized, "user_prompt")
190
+ assert not hasattr(serialized, "model")
191
+ assert not hasattr(serialized, "max_turns")
192
+
193
+ # These fields should only be accessible through configs
194
+ assert serialized.configs[0].data.system_prompt == "Config system"
195
+ assert serialized.configs[0].data.user_prompt == "Config user: {input}"
196
+ assert serialized.configs[0].data.model == "openai:gpt-3.5-turbo"
197
+ assert serialized.configs[0].data.max_turns == 1
198
+
199
+
200
+ async def test_agent_serializable_structure():
201
+ """Test the structure of AgentSerializeable model."""
202
+ # Verify the model has the expected fields
203
+ fields = AgentSerializeable.model_fields.keys()
204
+
205
+ # Should have these fields
206
+ assert "name" in fields
207
+ assert "input_schema" in fields
208
+ assert "output_schema" in fields
209
+ assert "tool_definitions" in fields
210
+ assert "configs" in fields
211
+ assert "built_in_vars" in fields
212
+
213
+ # Should NOT have these fields (moved to configs)
214
+ assert "system_prompt" not in fields
215
+ assert "user_prompt" not in fields
216
+ assert "model" not in fields
217
+ assert "max_turns" not in fields
218
+ assert "overwrites" not in fields
219
+
220
+
221
+ async def test_configs_field_type():
222
+ """Test that configs field has the correct type annotation."""
223
+ # Get the type annotation for configs field
224
+ configs_field = AgentSerializeable.model_fields["configs"]
225
+
226
+ # The annotation should be list[ObjectConfigurationBase[AgentConfig]]
227
+ # This is a complex type, so we'll check the string representation
228
+ assert "ObjectConfigurationBase" in str(configs_field.annotation)
229
+ assert "AgentConfig" in str(configs_field.annotation)
@@ -0,0 +1,463 @@
1
+ import json
2
+ from unittest.mock import AsyncMock, Mock, patch
3
+ from uuid import UUID
4
+
5
+ import pytest
6
+ from pydantic import BaseModel, SecretStr
7
+
8
+ from planar.ai.models import (
9
+ AssistantMessage,
10
+ Base64Content,
11
+ FileIdContent,
12
+ FileMap,
13
+ ModelMessage,
14
+ SystemMessage,
15
+ ToolCall,
16
+ ToolMessage,
17
+ ToolResponse,
18
+ UserMessage,
19
+ )
20
+ from planar.ai.providers import Anthropic, ModelSpec, OpenAI, OpenAIProvider
21
+ from planar.config import (
22
+ AIProvidersConfig,
23
+ AppConfig,
24
+ OpenAIConfig,
25
+ PlanarConfig,
26
+ SQLiteConfig,
27
+ )
28
+ from planar.files.models import PlanarFile
29
+ from planar.session import config_var
30
+
31
+
32
+ class DummyOutput(BaseModel):
33
+ value: str
34
+ score: int
35
+
36
+
37
+ class DummyGenericOutput[T: BaseModel](BaseModel):
38
+ value: T
39
+
40
+
41
+ # Mock classes for OpenAI client
42
+ class MockResponse:
43
+ def __init__(
44
+ self, content="Test response", tool_calls=None, structured_output=None
45
+ ):
46
+ message_content = (
47
+ structured_output if structured_output is not None else content
48
+ )
49
+ self.choices = [
50
+ Mock(message=Mock(content=message_content, tool_calls=tool_calls))
51
+ ]
52
+
53
+
54
+ class MockCompletions:
55
+ def __init__(self):
56
+ self.captured_kwargs = None
57
+
58
+ async def create(self, **kwargs):
59
+ self.captured_kwargs = kwargs
60
+ return MockResponse()
61
+
62
+
63
+ class MockBetaCompletions:
64
+ def __init__(self):
65
+ self.captured_kwargs = None
66
+
67
+ async def parse(self, response_format=None, **kwargs):
68
+ """Handle structured output parsing"""
69
+ self.captured_kwargs = kwargs.copy()
70
+ self.captured_kwargs["response_format"] = response_format
71
+ # If there's a response_format, create structured output based on it
72
+ if response_format:
73
+ if hasattr(response_format, "model_validate"):
74
+ # Create an instance of the response format model with test data
75
+ if response_format == DummyGenericOutput[DummyOutput]:
76
+ structured_output = DummyGenericOutput[DummyOutput](
77
+ value=DummyOutput(value="test value", score=95)
78
+ )
79
+ else:
80
+ # Generic values for any other model
81
+ structured_output = response_format.model_validate(
82
+ {"value": "test", "score": 100}
83
+ )
84
+ return MockResponse(structured_output=structured_output)
85
+ return MockResponse()
86
+
87
+
88
+ class MockChat:
89
+ def __init__(self):
90
+ self.completions = MockCompletions()
91
+
92
+
93
+ class MockBetaChat:
94
+ def __init__(self):
95
+ self.completions = MockBetaCompletions()
96
+
97
+
98
+ class MockBeta:
99
+ def __init__(self):
100
+ self.chat = MockBetaChat()
101
+
102
+
103
+ class MockClient:
104
+ def __init__(self, **kwargs):
105
+ self.chat = MockChat()
106
+ self.beta = MockBeta()
107
+
108
+
109
+ @pytest.fixture(name="mock_openai_client")
110
+ def mock_openai_client_fixture(monkeypatch):
111
+ """Set up a mock OpenAI client for testing."""
112
+ mock_client = MockClient()
113
+ monkeypatch.setattr("openai.AsyncOpenAI", lambda **kwargs: mock_client)
114
+ return mock_client
115
+
116
+
117
+ @pytest.fixture(name="fake_config")
118
+ def fake_config_fixture():
119
+ """Set up a fake config for testing."""
120
+ # Create a minimal PlanarConfig for testing
121
+ # We're using actual PlanarConfig classes to maintain type compatibility
122
+ # Create config objects
123
+ openai_config = OpenAIConfig(
124
+ api_key=SecretStr("mock_key"),
125
+ base_url="https://api.openai.com/v1",
126
+ organization=None,
127
+ )
128
+
129
+ ai_providers = AIProvidersConfig(openai=openai_config)
130
+
131
+ # Create a minimal valid PlanarConfig for testing
132
+ mock_config = PlanarConfig(
133
+ db_connections={"app": SQLiteConfig(path=":memory:")},
134
+ app=AppConfig(db_connection="app"),
135
+ ai_providers=ai_providers,
136
+ )
137
+
138
+ # Set the config in the context variable
139
+ token = config_var.set(mock_config)
140
+ yield mock_config
141
+ # Reset when done
142
+ config_var.reset(token)
143
+
144
+
145
+ class TestOpenAIProvider:
146
+ """Test suite for the OpenAIProvider implementation."""
147
+
148
+ def test_format_tool_response(self):
149
+ """Test that tool responses are correctly formatted."""
150
+ # Test with all fields
151
+ response1 = ToolResponse(tool_call_id="call_123", content="Test result")
152
+ message1 = OpenAIProvider.format_tool_response(response1)
153
+
154
+ assert isinstance(message1, ToolMessage)
155
+ assert message1.tool_call_id == "call_123"
156
+ assert message1.content == "Test result"
157
+
158
+ # Test with missing ID (should generate default)
159
+ response2 = ToolResponse(content="Another result")
160
+ message2 = OpenAIProvider.format_tool_response(response2)
161
+
162
+ assert isinstance(message2, ToolMessage)
163
+ assert message2.tool_call_id == "call_1" # Default ID
164
+ assert message2.content == "Another result"
165
+
166
+ def test_format_messages(self):
167
+ """Test that messages are correctly formatted for the OpenAI API."""
168
+ # Create a list of different message types
169
+ messages: list[ModelMessage] = [
170
+ SystemMessage(content="You are a helpful assistant"),
171
+ UserMessage(
172
+ content="Hello",
173
+ files=[
174
+ PlanarFile(
175
+ id=UUID("11111111-1111-1111-1111-111111111111"),
176
+ filename="test_image.jpg",
177
+ content_type="image/jpeg",
178
+ size=1024,
179
+ ),
180
+ PlanarFile(
181
+ id=UUID("22222222-2222-2222-2222-222222222222"),
182
+ filename="test_doc.pdf",
183
+ content_type="application/pdf",
184
+ size=2048,
185
+ ),
186
+ ],
187
+ ),
188
+ AssistantMessage(content="How can I help?"),
189
+ ToolMessage(tool_call_id="call_1", content="Tool result"),
190
+ AssistantMessage(
191
+ content=None,
192
+ tool_calls=[
193
+ ToolCall(
194
+ id="call_2",
195
+ name="test_tool",
196
+ arguments={"param1": "value1"},
197
+ )
198
+ ],
199
+ ),
200
+ ]
201
+
202
+ file_map = FileMap(
203
+ mapping={
204
+ "11111111-1111-1111-1111-111111111111": Base64Content(
205
+ content_type="image/jpeg", content="fake content"
206
+ ),
207
+ "22222222-2222-2222-2222-222222222222": FileIdContent(
208
+ content="file-123"
209
+ ),
210
+ }
211
+ )
212
+ # Format the messages
213
+ formatted = OpenAIProvider.prepare_messages(messages, file_map)
214
+
215
+ # Check the results
216
+ assert len(formatted) == 5
217
+
218
+ # Check system message
219
+ assert formatted[0] == {
220
+ "role": "system",
221
+ "content": "You are a helpful assistant",
222
+ }
223
+
224
+ # Check user message - note that content is now a list with text item
225
+ assert formatted[1]["role"] == "user"
226
+ assert isinstance(formatted[1]["content"], list)
227
+ assert len(formatted[1]["content"]) == 3
228
+ assert formatted[1]["content"] == [
229
+ {
230
+ "image_url": {"url": "data:image/jpeg;base64,fake content"},
231
+ "type": "image_url",
232
+ },
233
+ {"file": {"file_id": "file-123"}, "type": "file"},
234
+ {"text": "Hello", "type": "text"},
235
+ ]
236
+
237
+ # Check assistant message
238
+ assert formatted[2] == {"role": "assistant", "content": "How can I help?"}
239
+
240
+ # Check tool message
241
+ assert formatted[3] == {
242
+ "role": "tool",
243
+ "tool_call_id": "call_1",
244
+ "content": "Tool result",
245
+ }
246
+
247
+ # Check assistant message with tool calls
248
+ assert formatted[4]["role"] == "assistant"
249
+ assert formatted[4]["content"] is None
250
+ assert len(formatted[4]["tool_calls"]) == 1
251
+ assert formatted[4]["tool_calls"][0]["id"] == "call_2"
252
+ assert formatted[4]["tool_calls"][0]["type"] == "function"
253
+ assert formatted[4]["tool_calls"][0]["function"]["name"] == "test_tool"
254
+ # Verify JSON arguments
255
+ tool_args = json.loads(formatted[4]["tool_calls"][0]["function"]["arguments"])
256
+ assert tool_args == {"param1": "value1"}
257
+
258
+ def test_tool_call_with_missing_id(self):
259
+ """Test that tool calls without IDs get auto-generated IDs."""
260
+ # Create a message with tool calls that have no IDs
261
+ message = AssistantMessage(
262
+ content=None,
263
+ tool_calls=[
264
+ ToolCall(
265
+ name="tool1",
266
+ arguments={"arg1": "val1"},
267
+ ),
268
+ ToolCall(
269
+ name="tool2",
270
+ arguments={"arg2": "val2"},
271
+ ),
272
+ ],
273
+ )
274
+
275
+ # Format the message
276
+ formatted = OpenAIProvider.prepare_messages([message])
277
+
278
+ # Check that IDs were auto-generated
279
+ assert len(formatted) == 1
280
+ assert formatted[0]["role"] == "assistant"
281
+ assert len(formatted[0]["tool_calls"]) == 2
282
+ assert formatted[0]["tool_calls"][0]["id"] == "call_1"
283
+ assert formatted[0]["tool_calls"][1]["id"] == "call_2"
284
+
285
+ def test_model_spec_handling(self):
286
+ """Test that ModelSpec is correctly initialized and parameters are handled."""
287
+ # Create a model spec with parameters
288
+ spec = ModelSpec(
289
+ model_id="gpt-4.1", parameters={"temperature": 0.7, "top_p": 0.95}
290
+ )
291
+
292
+ # Check values
293
+ assert spec.model_id == "gpt-4.1"
294
+ assert spec.parameters == {"temperature": 0.7, "top_p": 0.95}
295
+
296
+ # Test updating parameters
297
+ spec.parameters["temperature"] = 0.5
298
+ assert spec.parameters["temperature"] == 0.5
299
+
300
+ def test_model_str_and_repr(self):
301
+ """Test that Model can be converted to a string and repr."""
302
+ spec = OpenAI.gpt_4_1
303
+ assert str(spec) == "OpenAI:gpt-4.1"
304
+ assert repr(spec) == "OpenAI:gpt-4.1"
305
+
306
+ spec = OpenAI.gpt_4_turbo
307
+ assert str(spec) == "OpenAI:gpt-4-turbo"
308
+ assert repr(spec) == "OpenAI:gpt-4-turbo"
309
+
310
+ spec = Anthropic.claude_3_haiku
311
+ assert str(spec) == "Anthropic:claude-3-haiku"
312
+ assert repr(spec) == "Anthropic:claude-3-haiku"
313
+
314
+ spec = Anthropic.claude_sonnet_4_20250514
315
+ assert str(spec) == "Anthropic:claude-sonnet-4-20250514"
316
+ assert repr(spec) == "Anthropic:claude-sonnet-4-20250514"
317
+
318
+ spec = Anthropic.claude_opus_4_20250514
319
+ assert str(spec) == "Anthropic:claude-opus-4-20250514"
320
+ assert repr(spec) == "Anthropic:claude-opus-4-20250514"
321
+
322
+ spec = Anthropic.claude_sonnet_4
323
+ assert str(spec) == "Anthropic:claude-sonnet-4"
324
+ assert repr(spec) == "Anthropic:claude-sonnet-4"
325
+
326
+ spec = Anthropic.claude_opus_4
327
+ assert str(spec) == "Anthropic:claude-opus-4"
328
+ assert repr(spec) == "Anthropic:claude-opus-4"
329
+
330
+ async def test_planar_files(self, fake_config, mock_openai_client):
331
+ """Test that PlanarFile objects are correctly handled and formatted."""
332
+ # Create PlanarFile test objects
333
+ image_file = PlanarFile(
334
+ id=UUID("11111111-1111-1111-1111-111111111111"),
335
+ filename="test_image.jpg",
336
+ content_type="image/jpeg",
337
+ size=1024,
338
+ )
339
+
340
+ pdf_file = PlanarFile(
341
+ id=UUID("22222222-2222-2222-2222-222222222222"),
342
+ filename="test_doc.pdf",
343
+ content_type="application/pdf",
344
+ size=2048,
345
+ )
346
+
347
+ messages = [
348
+ SystemMessage(content="You are a helpful assistant"),
349
+ UserMessage(content="Describe this file", files=[image_file]),
350
+ ]
351
+
352
+ # Configure mock to return a specific response
353
+ file_response = "This is a file description"
354
+ mock_openai_client = Mock()
355
+ mock_openai_client.chat.completions.create = AsyncMock(
356
+ return_value=MockResponse(content=file_response)
357
+ )
358
+ mock_openai_client.files = Mock()
359
+ mock_openai_client.files.create = AsyncMock(return_value=Mock(id="file-123"))
360
+ mock_openai_client.beta = Mock()
361
+ mock_openai_client.beta.chat = Mock()
362
+ mock_openai_client.beta.chat.completions = Mock()
363
+
364
+ # Replace the original mock with our configured one
365
+ with (
366
+ patch(
367
+ "planar.files.models.PlanarFile.get_content",
368
+ AsyncMock(return_value=b"fake content"),
369
+ ),
370
+ patch(
371
+ "planar.files.models.PlanarFile.get_metadata",
372
+ AsyncMock(return_value=None),
373
+ ),
374
+ pytest.MonkeyPatch().context() as m,
375
+ ):
376
+ m.setattr("openai.AsyncOpenAI", lambda **kwargs: mock_openai_client)
377
+
378
+ # Test with a single image file
379
+ result = await OpenAIProvider.complete(
380
+ model_spec=ModelSpec(model_id="gpt-4.1"),
381
+ messages=messages,
382
+ )
383
+
384
+ # Verify the returned value
385
+ assert result.content == file_response
386
+ assert result.tool_calls is None
387
+
388
+ # Test with multiple files
389
+ # Create a new message with multiple files
390
+ messages[-1] = UserMessage(
391
+ content="Describe these files", files=[image_file, pdf_file]
392
+ )
393
+
394
+ multiple_file_response = "This describes multiple files"
395
+ mock_openai_client.chat.completions.create = AsyncMock(
396
+ return_value=MockResponse(content=multiple_file_response)
397
+ )
398
+
399
+ # Make the API call with multiple files
400
+ result = await OpenAIProvider.complete(
401
+ model_spec=ModelSpec(model_id="gpt-4.1"),
402
+ messages=messages,
403
+ )
404
+
405
+ # Verify the returned value
406
+ assert result.content == multiple_file_response
407
+ assert result.tool_calls is None
408
+
409
+ # Test with both files and structured output
410
+ class FileOutput(BaseModel):
411
+ description: str
412
+
413
+ structured_file_result = FileOutput(
414
+ description="A PDF document",
415
+ )
416
+
417
+ mock_openai_client.beta.chat.completions.parse = AsyncMock(
418
+ return_value=MockResponse(structured_output=structured_file_result)
419
+ )
420
+
421
+ # Make the API call with file and structured output
422
+ result = await OpenAIProvider.complete(
423
+ model_spec=ModelSpec(model_id="gpt-4.1"),
424
+ messages=messages,
425
+ output_type=FileOutput,
426
+ )
427
+
428
+ # Verify the structured output with file
429
+ assert isinstance(result.content, FileOutput)
430
+ assert result.content.description == "A PDF document"
431
+
432
+ async def test_structured_output(self, fake_config, mock_openai_client):
433
+ """Test that structured output is correctly handled."""
434
+ # Create test messages
435
+ messages = [
436
+ SystemMessage(content="You are a helpful assistant"),
437
+ UserMessage(content="Analyze this data"),
438
+ ]
439
+
440
+ # Test structured output with DummyOutput model
441
+ result = await OpenAIProvider.complete(
442
+ model_spec=ModelSpec(model_id="gpt-4.1"),
443
+ messages=messages,
444
+ output_type=DummyGenericOutput[DummyOutput],
445
+ )
446
+
447
+ # Verify the completion method used
448
+ assert mock_openai_client.beta.chat.completions.captured_kwargs is not None
449
+ captured_kwargs = mock_openai_client.beta.chat.completions.captured_kwargs
450
+
451
+ # Verify the output is of the correct type
452
+ assert isinstance(result.content, DummyGenericOutput)
453
+ assert result.content.value == DummyOutput(value="test value", score=95)
454
+ assert result.tool_calls is None
455
+
456
+ # Verify the response_format parameter was correctly set
457
+ assert "response_format" in captured_kwargs
458
+ assert captured_kwargs["response_format"] == DummyGenericOutput[DummyOutput]
459
+ # Verify we're sanitizing the name correctly as OpenAI expects
460
+ assert (
461
+ captured_kwargs["response_format"].__name__
462
+ == "DummyGenericOutput_DummyOutput_"
463
+ )