grasp_agents 0.3.10__py3-none-any.whl → 0.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- grasp_agents/cloud_llm.py +70 -77
- grasp_agents/comm_processor.py +21 -11
- grasp_agents/errors.py +34 -0
- grasp_agents/http_client.py +7 -5
- grasp_agents/llm.py +3 -9
- grasp_agents/llm_agent.py +92 -103
- grasp_agents/llm_agent_memory.py +36 -27
- grasp_agents/llm_policy_executor.py +66 -63
- grasp_agents/memory.py +3 -1
- grasp_agents/openai/completion_chunk_converters.py +4 -3
- grasp_agents/openai/openai_llm.py +14 -20
- grasp_agents/openai/tool_converters.py +0 -1
- grasp_agents/packet_pool.py +1 -1
- grasp_agents/printer.py +6 -6
- grasp_agents/processor.py +182 -48
- grasp_agents/prompt_builder.py +41 -55
- grasp_agents/run_context.py +1 -5
- grasp_agents/typing/completion_chunk.py +10 -5
- grasp_agents/typing/content.py +2 -2
- grasp_agents/typing/io.py +4 -4
- grasp_agents/typing/message.py +3 -6
- grasp_agents/typing/tool.py +5 -23
- grasp_agents/usage_tracker.py +2 -4
- grasp_agents/utils.py +37 -15
- grasp_agents/workflow/looped_workflow.py +14 -9
- grasp_agents/workflow/sequential_workflow.py +11 -6
- grasp_agents/workflow/workflow_processor.py +30 -13
- {grasp_agents-0.3.10.dist-info → grasp_agents-0.4.0.dist-info}/METADATA +2 -1
- grasp_agents-0.4.0.dist-info/RECORD +50 -0
- grasp_agents/message_history.py +0 -140
- grasp_agents/workflow/parallel_processor.py +0 -95
- grasp_agents-0.3.10.dist-info/RECORD +0 -51
- {grasp_agents-0.3.10.dist-info → grasp_agents-0.4.0.dist-info}/WHEEL +0 -0
- {grasp_agents-0.3.10.dist-info → grasp_agents-0.4.0.dist-info}/licenses/LICENSE.md +0 -0
grasp_agents/message_history.py
DELETED
@@ -1,140 +0,0 @@
|
|
1
|
-
import logging
|
2
|
-
from collections.abc import Iterator, Sequence
|
3
|
-
from copy import deepcopy
|
4
|
-
|
5
|
-
from .typing.io import LLMPrompt
|
6
|
-
from .typing.message import Message, Messages, SystemMessage
|
7
|
-
|
8
|
-
logger = logging.getLogger(__name__)
|
9
|
-
|
10
|
-
|
11
|
-
class MessageHistory:
|
12
|
-
def __init__(self, sys_prompt: LLMPrompt | None = None) -> None:
|
13
|
-
self._sys_prompt = sys_prompt
|
14
|
-
self._conversations: list[Messages]
|
15
|
-
self.reset()
|
16
|
-
|
17
|
-
@property
|
18
|
-
def sys_prompt(self) -> LLMPrompt | None:
|
19
|
-
return self._sys_prompt
|
20
|
-
|
21
|
-
def add_message_batch(self, message_batch: Sequence[Message]) -> None:
|
22
|
-
"""
|
23
|
-
Adds a batch of messages to the current batched conversations.
|
24
|
-
This method verifies that the size of the input message batch matches
|
25
|
-
the expected batch size (self.batch_size).
|
26
|
-
If there is a mismatch, the method adjusts by duplicating either
|
27
|
-
the message or the conversation as necessary:
|
28
|
-
|
29
|
-
- If the message batch contains exactly one message and
|
30
|
-
self.batch_size > 1, the single message is duplicated to match
|
31
|
-
the batch size.
|
32
|
-
- If the message batch contains multiple messages but
|
33
|
-
self.batch_size == 1, the entire conversation is duplicated to
|
34
|
-
accommodate each message in the batch.
|
35
|
-
- If the message batch size does not match self.batch_size and none of
|
36
|
-
the above adjustments apply, a ValueError is raised.
|
37
|
-
|
38
|
-
Afterwards, each message in the batch is appended to its corresponding
|
39
|
-
conversation in the batched conversations.
|
40
|
-
|
41
|
-
Args:
|
42
|
-
message_batch: A sequence of Message objects
|
43
|
-
representing the batch of messages to be added. Must align with
|
44
|
-
or be adjusted to match the current batch size.
|
45
|
-
|
46
|
-
Raises:
|
47
|
-
ValueError: If the message batch size does not match the current
|
48
|
-
batch size and cannot be automatically adjusted.
|
49
|
-
|
50
|
-
"""
|
51
|
-
message_batch_size = len(message_batch)
|
52
|
-
|
53
|
-
if message_batch_size == 1 and self.batch_size > 1:
|
54
|
-
logger.info(
|
55
|
-
"Message batch size is 1, current batch size is "
|
56
|
-
f"{self.batch_size}: duplicating the message to match the "
|
57
|
-
"current batch size"
|
58
|
-
)
|
59
|
-
message_batch = self._duplicate_message_to_current_batch_size(message_batch)
|
60
|
-
message_batch_size = self.batch_size
|
61
|
-
elif message_batch_size > 1 and self.batch_size == 1:
|
62
|
-
logger.info(
|
63
|
-
f"Message batch size is {len(message_batch)}, current batch "
|
64
|
-
"size is 1: duplicating the conversation to match the message "
|
65
|
-
"batch size"
|
66
|
-
)
|
67
|
-
self._duplicate_conversation_to_target_batch_size(message_batch_size)
|
68
|
-
elif message_batch_size != self.batch_size:
|
69
|
-
raise ValueError(
|
70
|
-
f"Message batch size {message_batch_size} does not match "
|
71
|
-
f"current batch size {self.batch_size}"
|
72
|
-
)
|
73
|
-
|
74
|
-
for batch_id in range(message_batch_size):
|
75
|
-
self._conversations[batch_id].append(message_batch[batch_id])
|
76
|
-
|
77
|
-
def add_message_batches(self, message_batches: Sequence[Sequence[Message]]) -> None:
|
78
|
-
for message_batch in message_batches:
|
79
|
-
self.add_message_batch(message_batch)
|
80
|
-
|
81
|
-
def add_message(self, message: Message) -> None:
|
82
|
-
for conversation in self._conversations:
|
83
|
-
conversation.append(message)
|
84
|
-
|
85
|
-
def add_message_list(self, message_list: Sequence[Message]) -> None:
|
86
|
-
for message in message_list:
|
87
|
-
self.add_message(message)
|
88
|
-
|
89
|
-
def __len__(self) -> int:
|
90
|
-
return len(self._conversations[0])
|
91
|
-
|
92
|
-
def __repr__(self) -> str:
|
93
|
-
return f"{self.__class__.__name__}(len={len(self)}; bs={self.batch_size})"
|
94
|
-
|
95
|
-
def __getitem__(self, idx: int) -> tuple[Message, ...]:
|
96
|
-
return tuple(conversation[idx] for conversation in self._conversations)
|
97
|
-
|
98
|
-
def __iter__(self) -> Iterator[tuple[Message, ...]]:
|
99
|
-
for idx in range(len(self)):
|
100
|
-
yield tuple(conversation[idx] for conversation in self._conversations)
|
101
|
-
|
102
|
-
def _duplicate_message_to_current_batch_size(
|
103
|
-
self, message_batch: Sequence[Message]
|
104
|
-
) -> Sequence[Message]:
|
105
|
-
assert len(message_batch) == 1, (
|
106
|
-
"Message batch size must be 1 to duplicate to current batch size"
|
107
|
-
)
|
108
|
-
|
109
|
-
return [deepcopy(message_batch[0]) for _ in range(self.batch_size)]
|
110
|
-
|
111
|
-
def _duplicate_conversation_to_target_batch_size(
|
112
|
-
self, target_batch_size: int
|
113
|
-
) -> None:
|
114
|
-
assert self.batch_size == 1, "Batch size must be 1 to duplicate conversation"
|
115
|
-
self._conversations = [
|
116
|
-
deepcopy(self._conversations[0]) for _ in range(target_batch_size)
|
117
|
-
]
|
118
|
-
|
119
|
-
@property
|
120
|
-
def conversations(self) -> list[Messages]:
|
121
|
-
return self._conversations
|
122
|
-
|
123
|
-
@property
|
124
|
-
def batch_size(self) -> int:
|
125
|
-
return len(self._conversations)
|
126
|
-
|
127
|
-
def reset(
|
128
|
-
self, sys_prompt: LLMPrompt | None = None, *, batch_size: int = 1
|
129
|
-
) -> None:
|
130
|
-
if sys_prompt is not None:
|
131
|
-
self._sys_prompt = sys_prompt
|
132
|
-
|
133
|
-
conv: Messages = []
|
134
|
-
if self._sys_prompt is not None:
|
135
|
-
conv.append(SystemMessage(content=self._sys_prompt))
|
136
|
-
|
137
|
-
self._conversations = [deepcopy(conv) for _ in range(batch_size)]
|
138
|
-
|
139
|
-
def erase(self) -> None:
|
140
|
-
self._conversations = [[]]
|
@@ -1,95 +0,0 @@
|
|
1
|
-
import asyncio
|
2
|
-
from collections.abc import Sequence
|
3
|
-
from copy import deepcopy
|
4
|
-
from typing import Any, Generic, cast
|
5
|
-
|
6
|
-
from ..comm_processor import CommProcessor
|
7
|
-
from ..packet import Packet
|
8
|
-
from ..packet_pool import PacketPool
|
9
|
-
from ..processor import Processor
|
10
|
-
from ..run_context import CtxT
|
11
|
-
from ..typing.io import InT_contra, OutT_co, ProcName
|
12
|
-
|
13
|
-
|
14
|
-
class ParallelCommProcessor(
|
15
|
-
CommProcessor[InT_contra, OutT_co, Any, CtxT],
|
16
|
-
Generic[InT_contra, OutT_co, CtxT],
|
17
|
-
):
|
18
|
-
def __init__(
|
19
|
-
self,
|
20
|
-
name: ProcName,
|
21
|
-
processor_type: type[Processor[InT_contra, OutT_co, Any, CtxT]],
|
22
|
-
packet_pool: PacketPool[CtxT] | None = None,
|
23
|
-
recipients: Sequence[ProcName] | None = None,
|
24
|
-
**subproc_init_kwargs: Any,
|
25
|
-
) -> None:
|
26
|
-
self._processor_type = processor_type
|
27
|
-
self._subproc_init_kwargs = deepcopy(subproc_init_kwargs)
|
28
|
-
|
29
|
-
# NOTE: If the processor is an LLMAgent, the parallel subprocessors will share
|
30
|
-
# the same LLM and tools instances. Make sure their state is managed correctly.
|
31
|
-
|
32
|
-
super().__init__(name=name, packet_pool=packet_pool, recipients=recipients)
|
33
|
-
|
34
|
-
@property
|
35
|
-
def processor_type(self) -> type[Processor[InT_contra, OutT_co, Any, CtxT]]:
|
36
|
-
return self._processor_type
|
37
|
-
|
38
|
-
async def _run_subprocessor(
|
39
|
-
self, in_args: InT_contra, name_suffix: str, **subproc_run_kwargs: Any
|
40
|
-
) -> Packet[OutT_co]:
|
41
|
-
subproc_name = f"{self.name}_{name_suffix}"
|
42
|
-
subproc = self._processor_type(name=subproc_name, **self._subproc_init_kwargs)
|
43
|
-
|
44
|
-
return await subproc.run(in_args=in_args, **subproc_run_kwargs)
|
45
|
-
|
46
|
-
def _validate_par_inputs(
|
47
|
-
self,
|
48
|
-
chat_inputs: Any | None,
|
49
|
-
in_packet: Packet[InT_contra] | None,
|
50
|
-
in_args: InT_contra | Sequence[InT_contra] | None,
|
51
|
-
) -> Sequence[InT_contra]:
|
52
|
-
if chat_inputs is not None:
|
53
|
-
raise ValueError(
|
54
|
-
"chat_inputs are not supported in ParallelCommProcessor. "
|
55
|
-
"Use in_packet or in_args."
|
56
|
-
)
|
57
|
-
if in_packet is not None:
|
58
|
-
if not in_packet.payloads:
|
59
|
-
raise ValueError(
|
60
|
-
"ParallelCommProcessor requires at least one input payload in "
|
61
|
-
"in_packet."
|
62
|
-
)
|
63
|
-
return in_packet.payloads
|
64
|
-
if in_args is not None:
|
65
|
-
if not isinstance(in_args, Sequence) or not in_args:
|
66
|
-
raise ValueError("in_args must be a non-empty sequence of input data.")
|
67
|
-
return cast("Sequence[InT_contra]", in_args)
|
68
|
-
raise ValueError(
|
69
|
-
"ParallelCommProcessor requires either in_packet or in_args to be provided."
|
70
|
-
)
|
71
|
-
|
72
|
-
async def run(
|
73
|
-
self,
|
74
|
-
chat_inputs: Any | None = None,
|
75
|
-
*,
|
76
|
-
in_packet: Packet[InT_contra] | None = None,
|
77
|
-
in_args: InT_contra | Sequence[InT_contra] | None = None,
|
78
|
-
**subproc_run_kwargs: Any,
|
79
|
-
) -> Packet[OutT_co]:
|
80
|
-
par_inputs = self._validate_par_inputs(
|
81
|
-
chat_inputs=chat_inputs, in_packet=in_packet, in_args=in_args
|
82
|
-
)
|
83
|
-
tasks = [
|
84
|
-
self._run_subprocessor(
|
85
|
-
in_args=inp, name_suffix=str(n), **subproc_run_kwargs
|
86
|
-
)
|
87
|
-
for n, inp in enumerate(par_inputs)
|
88
|
-
]
|
89
|
-
out_packets = await asyncio.gather(*tasks)
|
90
|
-
|
91
|
-
return Packet(
|
92
|
-
payloads=[out_packet.payloads[0] for out_packet in out_packets],
|
93
|
-
sender=self.name,
|
94
|
-
recipients=(self.recipients or []),
|
95
|
-
)
|
@@ -1,51 +0,0 @@
|
|
1
|
-
grasp_agents/__init__.py,sha256=CIsyUasb9HBC3M4olg6ATAwKXtVNmmtpyGJrt7hpZW4,947
|
2
|
-
grasp_agents/cloud_llm.py,sha256=S3yl-bKhAyThs5drHS_hmkmWa_mTlaC-guTX84v6KXM,13798
|
3
|
-
grasp_agents/comm_processor.py,sha256=w1Ix33K_1NALh1eLOwCpQbW5MDZDuP4eXOwEwPCBDJw,6790
|
4
|
-
grasp_agents/costs_dict.yaml,sha256=2MFNWtkv5W5WSCcv1Cj13B1iQLVv5Ot9pS_KW2Gu2DA,2510
|
5
|
-
grasp_agents/generics_utils.py,sha256=5Pw3I9dlnKC2VGqYKC4ZZUO3Z_vTNT-NPFovNfPkl6I,6542
|
6
|
-
grasp_agents/grasp_logging.py,sha256=H1GYhXdQvVkmauFDZ-KDwvVmPQHZUUm9sRqX_ObK2xI,1111
|
7
|
-
grasp_agents/http_client.py,sha256=KZva2MjJjuI5ohUeU8RdTAImUnQYaqBrV2jDH8smbJw,738
|
8
|
-
grasp_agents/llm.py,sha256=vVjELab9l0mK9bjO-IJebmmXUyUiws2fNjbXVQQNRvs,5392
|
9
|
-
grasp_agents/llm_agent.py,sha256=EM_1h1O8Gvs5RbQiEV3KT52ADpPDSpvToBLRn0MRUu0,14471
|
10
|
-
grasp_agents/llm_agent_memory.py,sha256=kK-eB-8jMeVeWQDIye9xixdmqEi1a3vqFtOQfX64JF0,1770
|
11
|
-
grasp_agents/llm_policy_executor.py,sha256=mm0WGmRPRMTnvXj9bSKkvPflYhgj7GTmP7kxPXRIloA,17633
|
12
|
-
grasp_agents/memory.py,sha256=gPkVIIF6dI_xXzarIAw9kSEnSJcfW_teUsWA2JAih94,671
|
13
|
-
grasp_agents/message_history.py,sha256=-ZNy3C1z0yQeahjqR0oIoWDMySJ7vPS19jdutibW7OE,5408
|
14
|
-
grasp_agents/packet.py,sha256=PZ1EpclniAoLk7z4ieZbWzgYH3JSRgnlTe_WfbJYG_4,707
|
15
|
-
grasp_agents/packet_pool.py,sha256=9umHbi5FwuUYYhhodSR-Z-fRR6OYiZyYEzq5d4nZFK4,3036
|
16
|
-
grasp_agents/printer.py,sha256=M-gkLrZMg0ll9T1MRmKOQbp65niFB5ZiZbm-tUT_EYw,5464
|
17
|
-
grasp_agents/processor.py,sha256=Atd_iTQNd3Nudb4mHqt3a5AQ31QUgpbrt1Fhn1sgpSk,6708
|
18
|
-
grasp_agents/prompt_builder.py,sha256=URMT4X8sNhdMPnkJK0YAF0i2xqBpK2-WlBlkmpVGe_Y,8455
|
19
|
-
grasp_agents/run_context.py,sha256=9CidWWCKJ8umlhkRGtg_P3JQsRpH0K3_vhUjgVon4Wk,1597
|
20
|
-
grasp_agents/usage_tracker.py,sha256=WzBCqfscVH0YaWcz2wL5wyuHRZt4wwEaxjDcqggVjc8,3760
|
21
|
-
grasp_agents/utils.py,sha256=WNA5b0IkAsth0XKHEjgTY3PIHdw5L3vEsYPkyDGw8Mw,4741
|
22
|
-
grasp_agents/openai/__init__.py,sha256=wpTeew6EjhM6esHCKrEKUpwq0kygMN2QQDxYtmbRG8Y,4201
|
23
|
-
grasp_agents/openai/completion_chunk_converters.py,sha256=i-1SvIWhRKtL0K8p5pb3jjACSnyHuJHTCoZovEERpxs,2628
|
24
|
-
grasp_agents/openai/completion_converters.py,sha256=vzPEkUOX4l2hobKxZjEk_dyWfzeYesO0DlvWvNVb-Sg,2656
|
25
|
-
grasp_agents/openai/content_converters.py,sha256=r1D5uci5x7sbDyl0XN27y-l_jVigCauJruvSdZSnZcc,2510
|
26
|
-
grasp_agents/openai/converters.py,sha256=ncscVyPnPMMbyxAfFX3U73lnr_BZU-I89HA5Ld8BuxI,4691
|
27
|
-
grasp_agents/openai/message_converters.py,sha256=_fG4vI42rBzoajuC5iYgnUBalg8cQ1ckSt8xFBOuWVY,4111
|
28
|
-
grasp_agents/openai/openai_llm.py,sha256=Hm2S2yyrR3cRvH20YHRegoTEYaetbx_USxI8CXIhfro,8091
|
29
|
-
grasp_agents/openai/tool_converters.py,sha256=d_7edJbnUhfSs2-F4J20D71UjVJWIslsXwMyac7-v2Q,1246
|
30
|
-
grasp_agents/rate_limiting/__init__.py,sha256=KRgtF_E7R3YfA2cpYcFcZ7wycV0pWVJ0xRQC7YhiIEQ,158
|
31
|
-
grasp_agents/rate_limiting/rate_limiter_chunked.py,sha256=BPgkUXvhmZhTpZs2T6uujNFuxH_kYHiISuf6_-eNhUc,5544
|
32
|
-
grasp_agents/rate_limiting/types.py,sha256=PbnNhEAcYedQdIpPJWud8HUVcxa_xZS2RDZu4c5jr40,1003
|
33
|
-
grasp_agents/rate_limiting/utils.py,sha256=oEDWDNHYMUdxOOG49PlAJochkZq8nnVBCo6JxPc1iSo,2007
|
34
|
-
grasp_agents/typing/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
35
|
-
grasp_agents/typing/completion.py,sha256=KrvdFZqOcw5GePdMhDoFuwCkMJ86nqRdz9mT9s2lFv4,2445
|
36
|
-
grasp_agents/typing/completion_chunk.py,sha256=b1d2a_8h21Uoif_FCS05XDMKap2nZWYV9G4goLKkoL0,5866
|
37
|
-
grasp_agents/typing/content.py,sha256=VdnHdW8PHDCtX_ffvcwQz-7ypPUQNSGqHa3txFE_72Y,3676
|
38
|
-
grasp_agents/typing/converters.py,sha256=kHlocHQS8QnduZOzNPbj3aRD8JpvJd53oudYqWdOxKE,2978
|
39
|
-
grasp_agents/typing/events.py,sha256=QbrvXnDmXFr9_cSsdqL9f35eQLOfZ2O0h3a6yCRtKwY,2625
|
40
|
-
grasp_agents/typing/io.py,sha256=_9G_pfEc41mHCLZ5lJ4N4qn7I-QDSOrdK8EFnQrSP18,315
|
41
|
-
grasp_agents/typing/message.py,sha256=jTdN6-wVftfzplRjAz9zEWCpx_wPOBgcaMl0fcnsioU,2415
|
42
|
-
grasp_agents/typing/tool.py,sha256=AfC6dsFiNAhM80yI9eh1Qm5U5WzJXsTHogXK-Zn4_cE,2495
|
43
|
-
grasp_agents/workflow/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
44
|
-
grasp_agents/workflow/looped_workflow.py,sha256=QqXclXYxsW6C8Rxkf3dRaMHi-DfCvCbjvFZr4nBl1u0,4299
|
45
|
-
grasp_agents/workflow/parallel_processor.py,sha256=Xyzs2UR_mRe2GFgzzadHOhqgMu3rFjd3GUjvmZimt_k,3505
|
46
|
-
grasp_agents/workflow/sequential_workflow.py,sha256=Pl7jl9ZVDu-rC5UMfympEaQN8iG3kZurVF5eIPG62XA,2130
|
47
|
-
grasp_agents/workflow/workflow_processor.py,sha256=2-iaDIlgNXgj-ClGbiE3fYfSv-N_qRC49Gf_dF6M_40,2640
|
48
|
-
grasp_agents-0.3.10.dist-info/METADATA,sha256=0TRQcYhukNlCH9iB7iaXL6wQRGbQV3PN_Ttf2-S-1mc,6807
|
49
|
-
grasp_agents-0.3.10.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
50
|
-
grasp_agents-0.3.10.dist-info/licenses/LICENSE.md,sha256=-nNNdWqGB8gJ2O-peFQ2Irshv5tW5pHKyTcYkwvH7CE,1201
|
51
|
-
grasp_agents-0.3.10.dist-info/RECORD,,
|
File without changes
|
File without changes
|