planar 0.7.0__py3-none-any.whl → 0.8.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,170 @@
1
+ from __future__ import annotations
2
+
3
+ import abc
4
+ from dataclasses import dataclass, field
5
+ from typing import (
6
+ Any,
7
+ Callable,
8
+ Coroutine,
9
+ Type,
10
+ cast,
11
+ overload,
12
+ )
13
+
14
+ from pydantic import BaseModel
15
+
16
+ from planar.ai.agent_utils import AgentEventEmitter
17
+ from planar.ai.models import (
18
+ AgentConfig,
19
+ AgentRunResult,
20
+ )
21
+ from planar.logging import get_logger
22
+ from planar.modeling.field_helpers import JsonSchema
23
+ from planar.utils import P, R, T, U
24
+ from planar.workflows import as_step
25
+ from planar.workflows.models import StepType
26
+
27
+ logger = get_logger(__name__)
28
+
29
+
30
+ @dataclass
31
+ class AgentBase[
32
+ # TODO: add `= str` default when we upgrade to 3.13
33
+ TInput: BaseModel | str,
34
+ TOutput: BaseModel | str,
35
+ ](abc.ABC):
36
+ """An LLM-powered agent that can be called directly within workflows."""
37
+
38
+ name: str
39
+ system_prompt: str
40
+ output_type: Type[TOutput] | None = None
41
+ input_type: Type[TInput] | None = None
42
+ user_prompt: str = ""
43
+ tools: list[Callable] = field(default_factory=list)
44
+ max_turns: int = 2
45
+ model_parameters: dict[str, Any] = field(default_factory=dict)
46
+ event_emitter: AgentEventEmitter | None = None
47
+ durable: bool = True
48
+
49
+ # TODO: move here to serialize to frontend
50
+ #
51
+ # built_in_vars: Dict[str, str] = field(default_factory=lambda: {
52
+ # "datetime_now": datetime.datetime.now().isoformat(),
53
+ # "date_today": datetime.date.today().isoformat(),
54
+ # })
55
+
56
+ def __post_init__(self):
57
+ if self.input_type:
58
+ if (
59
+ not issubclass(self.input_type, BaseModel)
60
+ and self.input_type is not str
61
+ ):
62
+ raise ValueError(
63
+ "input_type must be 'str' or a subclass of a Pydantic model"
64
+ )
65
+ if self.max_turns < 1:
66
+ raise ValueError("Max_turns must be greater than or equal to 1.")
67
+ if self.tools and self.max_turns <= 1:
68
+ raise ValueError(
69
+ "For tool calling to work, max_turns must be greater than 1."
70
+ )
71
+
72
+ def input_schema(self) -> JsonSchema | None:
73
+ if self.input_type is None:
74
+ return None
75
+ if self.input_type is str:
76
+ return None
77
+ assert issubclass(self.input_type, BaseModel), (
78
+ "input_type must be a subclass of BaseModel or str"
79
+ )
80
+ return self.input_type.model_json_schema()
81
+
82
+ def output_schema(self) -> JsonSchema | None:
83
+ if self.output_type is None:
84
+ return None
85
+ if self.output_type is str:
86
+ return None
87
+ assert issubclass(self.output_type, BaseModel), (
88
+ "output_type must be a subclass of BaseModel or str"
89
+ )
90
+ return self.output_type.model_json_schema()
91
+
92
+ @overload
93
+ async def __call__(
94
+ self: "AgentBase[TInput, str]",
95
+ input_value: TInput,
96
+ ) -> AgentRunResult[str]: ...
97
+
98
+ @overload
99
+ async def __call__(
100
+ self: "AgentBase[TInput, TOutput]",
101
+ input_value: TInput,
102
+ ) -> AgentRunResult[TOutput]: ...
103
+
104
+ def as_step_if_durable(
105
+ self,
106
+ func: Callable[P, Coroutine[T, U, R]],
107
+ step_type: StepType,
108
+ display_name: str | None = None,
109
+ return_type: Type[R] | None = None,
110
+ ) -> Callable[P, Coroutine[T, U, R]]:
111
+ if not self.durable:
112
+ return func
113
+ return as_step(
114
+ func,
115
+ step_type=step_type,
116
+ display_name=display_name or self.name,
117
+ return_type=return_type,
118
+ )
119
+
120
+ async def __call__(
121
+ self,
122
+ input_value: TInput,
123
+ ) -> AgentRunResult[Any]:
124
+ if self.input_type is not None and not isinstance(input_value, self.input_type):
125
+ raise ValueError(
126
+ f"Input value must be of type {self.input_type}, but got {type(input_value)}"
127
+ )
128
+ elif not isinstance(input_value, (str, BaseModel)):
129
+ # Should not happen based on type constraints, but just in case
130
+ # user does not have type checking enabled
131
+ raise ValueError(
132
+ "Input value must be a string or a Pydantic model if input_type is not provided"
133
+ )
134
+
135
+ if self.output_type is None:
136
+ run_step = self.as_step_if_durable(
137
+ self.run_step,
138
+ step_type=StepType.AGENT,
139
+ display_name=self.name,
140
+ return_type=AgentRunResult[str],
141
+ )
142
+ else:
143
+ run_step = self.as_step_if_durable(
144
+ self.run_step,
145
+ step_type=StepType.AGENT,
146
+ display_name=self.name,
147
+ return_type=AgentRunResult[self.output_type],
148
+ )
149
+
150
+ result = await run_step(input_value=input_value)
151
+ # Cast the result to ensure type compatibility
152
+ return cast(AgentRunResult[TOutput], result)
153
+
154
+ @abc.abstractmethod
155
+ async def run_step(
156
+ self,
157
+ input_value: TInput,
158
+ ) -> AgentRunResult[TOutput]: ...
159
+
160
+ @abc.abstractmethod
161
+ def get_model_str(self) -> str: ...
162
+
163
+ def to_config(self) -> AgentConfig:
164
+ return AgentConfig(
165
+ system_prompt=self.system_prompt,
166
+ user_prompt=self.user_prompt,
167
+ model=self.get_model_str(),
168
+ max_turns=self.max_turns,
169
+ model_parameters=self.model_parameters,
170
+ )
planar/ai/agent_utils.py CHANGED
@@ -26,6 +26,13 @@ from planar.workflows import step
26
26
  logger = get_logger(__name__)
27
27
 
28
28
 
29
+ class ModelSpec(BaseModel):
30
+ """Pydantic model for AI model specifications."""
31
+
32
+ model_id: str
33
+ parameters: dict[str, Any] = {}
34
+
35
+
29
36
  class AgentEventType(str, Enum):
30
37
  """Valid event types that can be emitted by an Agent."""
31
38
 
planar/ai/pydantic_ai.py CHANGED
@@ -1,5 +1,6 @@
1
1
  import base64
2
2
  import json
3
+ import os
3
4
  import re
4
5
  import textwrap
5
6
  from typing import Any, Literal, Protocol, Type, cast
@@ -28,12 +29,13 @@ from pydantic_ai.messages import (
28
29
  UserContent,
29
30
  UserPromptPart,
30
31
  )
31
- from pydantic_ai.models import Model, ModelRequestParameters
32
+ from pydantic_ai.models import KnownModelName, Model, ModelRequestParameters
32
33
  from pydantic_ai.settings import ModelSettings
33
34
  from pydantic_ai.tools import ToolDefinition
34
35
  from pydantic_core import ErrorDetails
35
36
 
36
37
  from planar.ai import models as m
38
+ from planar.files.models import PlanarFile
37
39
  from planar.logging import get_logger
38
40
  from planar.utils import partition
39
41
 
@@ -67,7 +69,65 @@ def format_validation_errors(errors: list[ErrorDetails], function: bool) -> str:
67
69
  return "\n".join(lines)
68
70
 
69
71
 
70
- async def build_file_map(messages: list[m.ModelMessage]) -> m.FileMap:
72
+ async def openai_try_upload_file(
73
+ model: KnownModelName | Model, file: PlanarFile
74
+ ) -> m.FileIdContent | None:
75
+ # Currently pydanticAI doesn't support passing file_ids, but leaving the
76
+ # implementation here for when they add support.
77
+ return None
78
+
79
+ if file.content_type != "application/pdf":
80
+ # old implementation only does this for pdf files, so keep the behavior for now
81
+ return None
82
+
83
+ if isinstance(model, str) and not model.startswith("openai:"):
84
+ # not using openai provider
85
+ return None
86
+
87
+ try:
88
+ # make this code work with openai as optional dependency
89
+ from pydantic_ai.models.openai import OpenAIModel
90
+ except ImportError:
91
+ return None
92
+
93
+ if os.getenv("OPENAI_BASE_URL", None) is not None:
94
+ # cannot use OpenAI file upload if using a custom base url
95
+ return None
96
+
97
+ if (
98
+ isinstance(model, OpenAIModel)
99
+ and model.client.base_url.host != "api.openai.com"
100
+ ):
101
+ # same as above
102
+ return None
103
+
104
+ logger.debug("uploading pdf file to openai", filename=file.filename)
105
+
106
+ # use a separate AsyncClient instance since the model might be provided as a string
107
+ from openai import AsyncClient
108
+
109
+ client = AsyncClient()
110
+
111
+ # upload the file to the provider
112
+ openai_file = await client.files.create(
113
+ file=(
114
+ file.filename,
115
+ await file.get_content(),
116
+ file.content_type,
117
+ ),
118
+ purpose="user_data",
119
+ )
120
+ logger.info(
121
+ "uploaded pdf file to openai",
122
+ filename=file.filename,
123
+ openai_file_id=openai_file.id,
124
+ )
125
+ return m.FileIdContent(content=openai_file.id)
126
+
127
+
128
+ async def build_file_map(
129
+ model: KnownModelName | Model, messages: list[m.ModelMessage]
130
+ ) -> m.FileMap:
71
131
  logger.debug("building file map", num_messages=len(messages))
72
132
  file_dict = {}
73
133
 
@@ -86,6 +146,12 @@ async def build_file_map(messages: list[m.ModelMessage]) -> m.FileMap:
86
146
  content_type=file.content_type,
87
147
  )
88
148
 
149
+ file_content_id = await openai_try_upload_file(model, file)
150
+ # TODO: add more `try_upload_file` implementations for other providers that support
151
+ if file_content_id is not None:
152
+ file_dict[str(file.id)] = file_content_id
153
+ continue
154
+
89
155
  # For now we are not using uploaded files with Gemini, so convert all to base64
90
156
  if file.content_type.startswith(
91
157
  ("image/", "audio/", "video/", "application/pdf")
@@ -107,7 +173,9 @@ async def build_file_map(messages: list[m.ModelMessage]) -> m.FileMap:
107
173
  return m.FileMap(mapping=file_dict)
108
174
 
109
175
 
110
- async def prepare_messages(messages: list[m.ModelMessage]) -> list[Any]:
176
+ async def prepare_messages(
177
+ model: KnownModelName | Model, messages: list[m.ModelMessage]
178
+ ) -> list[Any]:
111
179
  """Prepare messages from Planar representations into the format expected by PydanticAI.
112
180
 
113
181
  Args:
@@ -118,7 +186,7 @@ async def prepare_messages(messages: list[m.ModelMessage]) -> list[Any]:
118
186
  List of messages in PydanticAI format
119
187
  """
120
188
  pydantic_messages: list[ModelMessage] = []
121
- file_map = await build_file_map(messages)
189
+ file_map = await build_file_map(model, messages)
122
190
 
123
191
  def append_request_part(part: ModelRequestPart):
124
192
  last = (
@@ -315,7 +383,7 @@ class ModelRunResponse[TOutput: BaseModel | str](BaseModel):
315
383
 
316
384
 
317
385
  async def model_run[TOutput: BaseModel | str](
318
- model: Model | str,
386
+ model: Model | KnownModelName,
319
387
  max_extra_turns: int,
320
388
  model_settings: dict[str, Any] | None = None,
321
389
  messages: list[m.ModelMessage] = [],
@@ -354,7 +422,7 @@ async def model_run[TOutput: BaseModel | str](
354
422
  if event_handler:
355
423
  event_handler.emit(event_type, content)
356
424
 
357
- history = await prepare_messages(messages=messages)
425
+ history = await prepare_messages(model, messages=messages)
358
426
 
359
427
  if structured_output:
360
428
  if supports_native_structured_output:
@@ -56,7 +56,7 @@ def test_agent_with_tools():
56
56
  name="test_agent_with_tools",
57
57
  system_prompt="System with tools",
58
58
  user_prompt="User: {input}",
59
- model="anthropic:claude-3-sonnet",
59
+ model="anthropic:claude-3-5-sonnet-latest",
60
60
  max_turns=5,
61
61
  tools=[test_tool],
62
62
  )
@@ -42,7 +42,7 @@ def app_fixture():
42
42
  name="agent_with_tools",
43
43
  system_prompt="System with tools",
44
44
  user_prompt="User: {input}",
45
- model="anthropic:claude-3-sonnet",
45
+ model="anthropic:claude-3-5-sonnet-latest",
46
46
  max_turns=5,
47
47
  tools=[test_tool],
48
48
  )
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: planar
3
- Version: 0.7.0
3
+ Version: 0.8.0
4
4
  Summary: Add your description here
5
5
  License-Expression: LicenseRef-Proprietary
6
6
  Requires-Python: >=3.12
@@ -1,5 +1,5 @@
1
1
  planar/__init__.py,sha256=FAYRGjuJOH2Y_XYFA0-BrRFjuKdPzIShNbaYwJbtu6A,499
2
- planar/_version.py,sha256=3acS1SXu-uxuFh9KAe5eEzbrP7hTsqs8yTGli2vtXKk,18
2
+ planar/_version.py,sha256=dZWiMFvyJ9WmUBmP_1Yn0Ze7otmWd3MrPYqf8mefKr4,18
3
3
  planar/app.py,sha256=SKIpNY1K6NFF4_20kDxT40BIfJotiyhjNPwjU7P8_Ek,18695
4
4
  planar/cli.py,sha256=2ObR5XkLGbdbnDqp5mrBzDVhSacHCNsVNSHnXkrMQzQ,9593
5
5
  planar/config.py,sha256=NHNrvJl8h1QkUHUxZ96Th_v1TSxoxf8u0C4CA1VBr2k,17557
@@ -16,15 +16,12 @@ planar/test_sqlalchemy.py,sha256=F0aKqm5tStQj_Mqjh50kiLX4r7kphBFDOUDu_Iw7S3s,557
16
16
  planar/test_utils.py,sha256=gKenXotj36SN_bb3bQpYPfD8t06IjnGBQqEgWpujHcA,3086
17
17
  planar/utils.py,sha256=v7q9AJyWgQWl9VPSN_0qxw3rBvYe-_Pb_KcwqSsjOFU,3103
18
18
  planar/ai/__init__.py,sha256=ABOKvqQOLlVJkptcvXcuLjVZZWEsK8h-1RyFGK7kib8,231
19
- planar/ai/agent.py,sha256=ROLNF8iLxuceOHKynw34lqlwz2IWbzgtgFVCacHu7Iw,17728
20
- planar/ai/agent_utils.py,sha256=uwU5w5qXcCNO7CPOINSP5kMZMy2bRaVTSLPQvGiwPLI,5827
19
+ planar/ai/agent.py,sha256=kcrFLQzIU7QPGNjgl_7irP1jJOReHs8EYz8-_H_TV0A,11929
20
+ planar/ai/agent_base.py,sha256=X4Gl4PmxyxD_HLnVyxK7fnhKgmudznrGwcv5nMzAXWs,5349
21
+ planar/ai/agent_utils.py,sha256=r7WGGMs5fCo5MlLX5vPlA9ry0mMzpSuf4Ro6Gnvp2TQ,5966
21
22
  planar/ai/models.py,sha256=Rxvt00kCaQzbU59LcYDegK7kV8qYTRVH3YhU3ufuJCY,3532
22
- planar/ai/providers.py,sha256=eXVDx-8manGKXVN0b5TDLWI2QfTAtnjLMitneQ21LhI,41400
23
- planar/ai/pydantic_ai.py,sha256=OtN0ZPp-66JVqgzXyRQuovhvMbScgerV6pv2yUvd6BA,21266
24
- planar/ai/pydantic_ai_agent.py,sha256=iNJQIiJHKm0u029BTubyTPedNUoUrAMZrSWUM1MjZVA,11951
25
- planar/ai/test_agent.py,sha256=B_eHcycNRQ_43-H_mTXsRyyvTYUCqu8ickPCTkYcXcE,44429
26
- planar/ai/test_agent_serialization.py,sha256=LZlgk147A8OM5DPuSLjEjVy16N4vdaAQvXv_8OJDSK4,8064
27
- planar/ai/test_providers.py,sha256=JEaIr0ex-02ij8Tl5X4vAfsAbp7nV2I0Wj0wIu6MBRQ,16282
23
+ planar/ai/pydantic_ai.py,sha256=IAVavvbqp7KIUpwFQwWf5hXtsxn2yMxIjl2NjeL3EsE,23423
24
+ planar/ai/test_agent_serialization.py,sha256=zYLIxhYdFhOZzBrEBoQNyYLyNcNxWwaMTkjt_ARTkZk,8073
28
25
  planar/ai/utils.py,sha256=WVBW0TGaoKytC4bNd_a9lXrBf5QsDRut4GBcA53U2Ww,3116
29
26
  planar/db/__init__.py,sha256=SNgB6unQ1f1E9dB9O-KrsPsYM17KLsgOW1u0ajqs57I,318
30
27
  planar/db/alembic.ini,sha256=8G9IWbmF61Vwp1BXbkNOXTTgCEUMBQhOK_e-nnpnSYY,4309
@@ -82,7 +79,7 @@ planar/routers/info.py,sha256=HQa-mumw4zitG61V9isJlZ3cMr8pEwlB54Ct_LrpJDo,4473
82
79
  planar/routers/models.py,sha256=RwXjXpJw2uyluM4Fjc34UA0Jm7J95cUjbmTTarD_P9k,4669
83
80
  planar/routers/object_config_router.py,sha256=zA8-gGBQp1-Gm3uCC4WJ6nLicFwt4CsCqCYLFp1lRN8,4802
84
81
  planar/routers/rule.py,sha256=d6giUwYRKzxQFPeoWbe8Ylp2Cxd71_uK8yoS9NrOOBg,3563
85
- planar/routers/test_agents_router.py,sha256=ZOc18sp5v8fWjjaNKV3BmWQzufo8LUtIOzLBUkHSFlg,6073
82
+ planar/routers/test_agents_router.py,sha256=d_d_lZT5zuSxNY2MEu51SmgLRGNZ3yCpGUooAXLpEaY,6082
86
83
  planar/routers/test_files_router.py,sha256=_uYpRJkxSxyjFJAG7aj3letx25iDSkaOgZDTRHfU8TU,1559
87
84
  planar/routers/test_object_config_router.py,sha256=HBOsQZXccPuWOLCPxEsduSd93loswUsbSk3eTM6KHRc,11389
88
85
  planar/routers/test_routes_security.py,sha256=DsyEbpaNsLGTE_aWhs_KyePJxw2qTBviP2Tn-GZj3T8,5508
@@ -163,7 +160,7 @@ planar/workflows/test_suspend_deserialization.py,sha256=ddw2jToSJ-ebQ0RfT7KWTRMC
163
160
  planar/workflows/test_workflow.py,sha256=KArm9m44IBXKY9j4v_O74MAweFN6jEb7tVRomziaeFU,64011
164
161
  planar/workflows/tracing.py,sha256=E7E_kj2VBQisDqrllviIshbvOmB9QcEeRwMapunqio4,2732
165
162
  planar/workflows/wrappers.py,sha256=KON6RGg1D6yStboNbuMEeTXRpPTEa8S6Elh1tOnMAlM,1149
166
- planar-0.7.0.dist-info/METADATA,sha256=k2JmBunmIPOgko0N6hsfbD0YdbXCY7dujm9F2pNosQw,12020
167
- planar-0.7.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
168
- planar-0.7.0.dist-info/entry_points.txt,sha256=ZtFgrZ0eeoVmhLA51ESipK0nHg2t_prjW0Cm8WhpP54,95
169
- planar-0.7.0.dist-info/RECORD,,
163
+ planar-0.8.0.dist-info/METADATA,sha256=D4RMLkoOUDNlZgbuEjMrnTjV4ga6FHJ3ZtxeC2uUe6A,12020
164
+ planar-0.8.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
165
+ planar-0.8.0.dist-info/entry_points.txt,sha256=ZtFgrZ0eeoVmhLA51ESipK0nHg2t_prjW0Cm8WhpP54,95
166
+ planar-0.8.0.dist-info/RECORD,,