grasp_agents 0.2.11__py3-none-any.whl → 0.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. grasp_agents/__init__.py +15 -14
  2. grasp_agents/cloud_llm.py +118 -131
  3. grasp_agents/comm_processor.py +201 -0
  4. grasp_agents/generics_utils.py +15 -7
  5. grasp_agents/llm.py +60 -31
  6. grasp_agents/llm_agent.py +229 -273
  7. grasp_agents/llm_agent_memory.py +58 -0
  8. grasp_agents/llm_policy_executor.py +482 -0
  9. grasp_agents/memory.py +20 -134
  10. grasp_agents/message_history.py +140 -0
  11. grasp_agents/openai/__init__.py +54 -36
  12. grasp_agents/openai/completion_chunk_converters.py +78 -0
  13. grasp_agents/openai/completion_converters.py +53 -30
  14. grasp_agents/openai/content_converters.py +13 -14
  15. grasp_agents/openai/converters.py +44 -68
  16. grasp_agents/openai/message_converters.py +58 -72
  17. grasp_agents/openai/openai_llm.py +101 -42
  18. grasp_agents/openai/tool_converters.py +24 -19
  19. grasp_agents/packet.py +24 -0
  20. grasp_agents/packet_pool.py +91 -0
  21. grasp_agents/printer.py +29 -15
  22. grasp_agents/processor.py +194 -0
  23. grasp_agents/prompt_builder.py +175 -192
  24. grasp_agents/run_context.py +20 -37
  25. grasp_agents/typing/completion.py +58 -12
  26. grasp_agents/typing/completion_chunk.py +173 -0
  27. grasp_agents/typing/converters.py +8 -12
  28. grasp_agents/typing/events.py +86 -0
  29. grasp_agents/typing/io.py +4 -13
  30. grasp_agents/typing/message.py +12 -50
  31. grasp_agents/typing/tool.py +52 -26
  32. grasp_agents/usage_tracker.py +6 -6
  33. grasp_agents/utils.py +3 -3
  34. grasp_agents/workflow/looped_workflow.py +132 -0
  35. grasp_agents/workflow/parallel_processor.py +95 -0
  36. grasp_agents/workflow/sequential_workflow.py +66 -0
  37. grasp_agents/workflow/workflow_processor.py +78 -0
  38. {grasp_agents-0.2.11.dist-info → grasp_agents-0.3.1.dist-info}/METADATA +41 -50
  39. grasp_agents-0.3.1.dist-info/RECORD +51 -0
  40. grasp_agents/agent_message.py +0 -27
  41. grasp_agents/agent_message_pool.py +0 -92
  42. grasp_agents/base_agent.py +0 -51
  43. grasp_agents/comm_agent.py +0 -217
  44. grasp_agents/llm_agent_state.py +0 -79
  45. grasp_agents/tool_orchestrator.py +0 -203
  46. grasp_agents/workflow/looped_agent.py +0 -134
  47. grasp_agents/workflow/sequential_agent.py +0 -72
  48. grasp_agents/workflow/workflow_agent.py +0 -88
  49. grasp_agents-0.2.11.dist-info/RECORD +0 -46
  50. {grasp_agents-0.2.11.dist-info → grasp_agents-0.3.1.dist-info}/WHEEL +0 -0
  51. {grasp_agents-0.2.11.dist-info → grasp_agents-0.3.1.dist-info}/licenses/LICENSE.md +0 -0
@@ -148,12 +148,20 @@ class AutoInstanceAttributesMixin:
148
148
  return resolved
149
149
 
150
150
  def _set_resolved_generic_instance_attributes(self) -> None:
151
- for name, typ in getattr(
151
+ attr_names = self._generic_arg_to_instance_attr_map.values()
152
+ resolved_attr_types = getattr(
152
153
  self.__class__, "_resolved_instance_attr_types", {}
153
- ).items():
154
- _typ = None if typ is type(None) else typ
155
- pyd_private = getattr(self, "__pydantic_private__", {})
156
- if name in pyd_private:
157
- pyd_private[name] = _typ
154
+ )
155
+ pyd_private = getattr(self, "__pydantic_private__", {})
156
+
157
+ for attr_name in attr_names:
158
+ if attr_name in resolved_attr_types:
159
+ attr_type = resolved_attr_types[attr_name]
160
+ # attr_type = None if _attr_type is type(None) else _attr_type
161
+ else:
162
+ attr_type = Any
163
+
164
+ if attr_name in pyd_private:
165
+ pyd_private[attr_name] = attr_type
158
166
  else:
159
- setattr(self, name, _typ)
167
+ setattr(self, attr_name, attr_type)
grasp_agents/llm.py CHANGED
@@ -7,31 +7,37 @@ from uuid import uuid4
7
7
  from pydantic import BaseModel, TypeAdapter
8
8
  from typing_extensions import TypedDict
9
9
 
10
- from .memory import MessageHistory
11
- from .typing.completion import Completion, CompletionChunk
10
+ from grasp_agents.utils import validate_obj_from_json_or_py_string
11
+
12
+ from .message_history import MessageHistory
13
+ from .typing.completion import Completion
12
14
  from .typing.converters import Converters
13
- from .typing.message import AssistantMessage, Conversation
15
+ from .typing.events import CompletionChunkEvent, CompletionEvent
16
+ from .typing.message import Messages
14
17
  from .typing.tool import BaseTool, ToolChoice
15
18
 
16
19
  logger = logging.getLogger(__name__)
17
20
 
18
21
 
19
- class LLMSettings(TypedDict):
20
- pass
22
+ class LLMSettings(TypedDict, total=False):
23
+ max_completion_tokens: int | None
24
+ temperature: float | None
25
+ top_p: float | None
26
+ seed: int | None
21
27
 
22
28
 
23
- SettingsT = TypeVar("SettingsT", bound=LLMSettings, covariant=True) # noqa: PLC0105
24
- ConvertT = TypeVar("ConvertT", bound=Converters, covariant=True) # noqa: PLC0105
29
+ SettingsT_co = TypeVar("SettingsT_co", bound=LLMSettings, covariant=True)
30
+ ConvertT_co = TypeVar("ConvertT_co", bound=Converters, covariant=True)
25
31
 
26
32
 
27
- class LLM(ABC, Generic[SettingsT, ConvertT]):
33
+ class LLM(ABC, Generic[SettingsT_co, ConvertT_co]):
28
34
  @abstractmethod
29
35
  def __init__(
30
36
  self,
31
- converters: ConvertT,
37
+ converters: ConvertT_co,
32
38
  model_name: str | None = None,
33
39
  model_id: str | None = None,
34
- llm_settings: SettingsT | None = None,
40
+ llm_settings: SettingsT_co | None = None,
35
41
  tools: list[BaseTool[BaseModel, Any, Any]] | None = None,
36
42
  response_format: type | Mapping[str, type] | None = None,
37
43
  **kwargs: Any,
@@ -42,20 +48,18 @@ class LLM(ABC, Generic[SettingsT, ConvertT]):
42
48
  self._model_id = model_id or str(uuid4())[:8]
43
49
  self._model_name = model_name
44
50
  self._tools = {t.name: t for t in tools} if tools else None
45
- self._llm_settings: SettingsT = llm_settings or cast("SettingsT", {})
51
+ self._llm_settings: SettingsT_co = llm_settings or cast("SettingsT_co", {})
46
52
 
47
53
  self._response_format = response_format
48
- self._response_format_pyd: (
49
- TypeAdapter[Any] | Mapping[str, TypeAdapter[Any]] | None
50
- )
54
+ self._response_format_adapter: TypeAdapter[Any] | Mapping[str, TypeAdapter[Any]]
51
55
  if isinstance(response_format, type):
52
- self._response_format_pyd = TypeAdapter(response_format)
56
+ self._response_format_adapter = TypeAdapter(response_format)
53
57
  elif isinstance(response_format, Mapping):
54
- self._response_format_pyd = {
58
+ self._response_format_adapter = {
55
59
  k: TypeAdapter(v) for k, v in response_format.items()
56
60
  }
57
61
  else:
58
- self._response_format_pyd = None
62
+ self._response_format_adapter = TypeAdapter(Any)
59
63
 
60
64
  @property
61
65
  def model_id(self) -> str:
@@ -66,7 +70,7 @@ class LLM(ABC, Generic[SettingsT, ConvertT]):
66
70
  return self._model_name
67
71
 
68
72
  @property
69
- def llm_settings(self) -> SettingsT:
73
+ def llm_settings(self) -> SettingsT_co:
70
74
  return self._llm_settings
71
75
 
72
76
  @property
@@ -82,7 +86,9 @@ class LLM(ABC, Generic[SettingsT, ConvertT]):
82
86
  self._tools = {t.name: t for t in tools} if tools else None
83
87
 
84
88
  @response_format.setter
85
- def response_format(self, response_format: type | None) -> None:
89
+ def response_format(
90
+ self, response_format: type | Mapping[str, type] | None
91
+ ) -> None:
86
92
  self._response_format = response_format
87
93
 
88
94
  def __repr__(self) -> str:
@@ -91,32 +97,55 @@ class LLM(ABC, Generic[SettingsT, ConvertT]):
91
97
  f"model_name={self._model_name})"
92
98
  )
93
99
 
100
+ def _validate_completion(self, completion: Completion) -> None:
101
+ for message in completion.messages:
102
+ if not message.tool_calls:
103
+ validate_obj_from_json_or_py_string(
104
+ message.content or "",
105
+ adapter=self._response_format_adapter,
106
+ from_substring=True,
107
+ )
108
+
109
+ def _validate_tool_calls(self, completion: Completion) -> None:
110
+ for message in completion.messages:
111
+ if message.tool_calls:
112
+ for tool_call in message.tool_calls:
113
+ tool_name = tool_call.tool_name
114
+ tool_arguments = tool_call.tool_arguments
115
+
116
+ available_tool_names = list(self.tools) if self.tools else []
117
+ if tool_name not in available_tool_names or not self.tools:
118
+ raise ValueError(
119
+ f"Tool '{tool_name}' is not available in the LLM tools "
120
+ f"(available: {available_tool_names}"
121
+ )
122
+ tool = self.tools[tool_name]
123
+ validate_obj_from_json_or_py_string(
124
+ tool_arguments, adapter=TypeAdapter(tool.in_type)
125
+ )
126
+
94
127
  @abstractmethod
95
128
  async def generate_completion(
96
129
  self,
97
- conversation: Conversation,
130
+ conversation: Messages,
98
131
  *,
99
132
  tool_choice: ToolChoice | None = None,
100
- **kwargs: Any,
133
+ n_choices: int | None = None,
101
134
  ) -> Completion:
102
135
  pass
103
136
 
104
137
  @abstractmethod
105
138
  async def generate_completion_stream(
106
139
  self,
107
- conversation: Conversation,
140
+ conversation: Messages,
108
141
  *,
109
142
  tool_choice: ToolChoice | None = None,
110
- **kwargs: Any,
111
- ) -> AsyncIterator[CompletionChunk]:
143
+ n_choices: int | None = None,
144
+ ) -> AsyncIterator[CompletionChunkEvent | CompletionEvent]:
112
145
  pass
113
146
 
114
147
  @abstractmethod
115
- async def generate_message_batch(
116
- self,
117
- message_history: MessageHistory,
118
- *,
119
- tool_choice: ToolChoice | None = None,
120
- **kwargs: Any,
121
- ) -> Sequence[AssistantMessage]:
148
+ async def generate_completion_batch(
149
+ self, message_history: MessageHistory, *, tool_choice: ToolChoice | None = None
150
+ ) -> Sequence[Completion]:
122
151
  pass