ommlds 0.0.0.dev451__py3-none-any.whl → 0.0.0.dev452__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ommlds might be problematic. Click here for more details.
- ommlds/.omlish-manifests.json +11 -11
- ommlds/backends/anthropic/protocol/_marshal.py +1 -1
- ommlds/backends/openai/protocol/_common.py +18 -0
- ommlds/backends/openai/protocol/_marshal.py +2 -1
- ommlds/backends/openai/protocol/chatcompletion/chunk.py +4 -0
- ommlds/backends/openai/protocol/chatcompletion/contentpart.py +15 -7
- ommlds/backends/openai/protocol/chatcompletion/message.py +10 -0
- ommlds/backends/openai/protocol/chatcompletion/request.py +25 -7
- ommlds/backends/openai/protocol/chatcompletion/response.py +10 -0
- ommlds/backends/openai/protocol/chatcompletion/responseformat.py +6 -0
- ommlds/backends/openai/protocol/chatcompletion/tokenlogprob.py +4 -0
- ommlds/backends/openai/protocol/completionusage.py +5 -0
- ommlds/cli/sessions/chat/code.py +22 -17
- ommlds/cli/sessions/chat/inject.py +4 -4
- ommlds/cli/sessions/chat/interactive.py +2 -1
- ommlds/cli/sessions/chat/printing.py +2 -2
- ommlds/cli/sessions/chat/prompt.py +28 -27
- ommlds/cli/sessions/chat/tools.py +12 -12
- ommlds/minichain/__init__.py +20 -8
- ommlds/minichain/backends/impls/anthropic/chat.py +27 -23
- ommlds/minichain/backends/impls/anthropic/names.py +3 -3
- ommlds/minichain/backends/impls/anthropic/stream.py +7 -7
- ommlds/minichain/backends/impls/google/chat.py +30 -32
- ommlds/minichain/backends/impls/google/stream.py +8 -4
- ommlds/minichain/backends/impls/llamacpp/chat.py +23 -17
- ommlds/minichain/backends/impls/llamacpp/format.py +4 -2
- ommlds/minichain/backends/impls/llamacpp/stream.py +6 -6
- ommlds/minichain/backends/impls/mistral.py +1 -1
- ommlds/minichain/backends/impls/mlx/chat.py +1 -1
- ommlds/minichain/backends/impls/openai/chat.py +6 -3
- ommlds/minichain/backends/impls/openai/format.py +80 -61
- ommlds/minichain/backends/impls/openai/format2.py +210 -0
- ommlds/minichain/backends/impls/openai/stream.py +9 -6
- ommlds/minichain/backends/impls/tinygrad/chat.py +10 -5
- ommlds/minichain/backends/impls/transformers/transformers.py +20 -16
- ommlds/minichain/chat/_marshal.py +15 -8
- ommlds/minichain/chat/choices/adapters.py +3 -3
- ommlds/minichain/chat/choices/types.py +2 -2
- ommlds/minichain/chat/history.py +1 -1
- ommlds/minichain/chat/messages.py +55 -19
- ommlds/minichain/chat/services.py +2 -2
- ommlds/minichain/chat/stream/_marshal.py +16 -0
- ommlds/minichain/chat/stream/adapters.py +39 -28
- ommlds/minichain/chat/stream/services.py +2 -2
- ommlds/minichain/chat/stream/types.py +20 -13
- ommlds/minichain/chat/tools/execution.py +8 -7
- ommlds/minichain/chat/tools/ids.py +9 -15
- ommlds/minichain/chat/tools/parsing.py +17 -26
- ommlds/minichain/chat/transforms/base.py +29 -38
- ommlds/minichain/chat/transforms/metadata.py +30 -4
- ommlds/minichain/chat/transforms/services.py +5 -7
- ommlds/minichain/tools/jsonschema.py +5 -6
- ommlds/minichain/tools/types.py +24 -1
- ommlds/server/server.py +1 -1
- ommlds/tools/git.py +18 -2
- {ommlds-0.0.0.dev451.dist-info → ommlds-0.0.0.dev452.dist-info}/METADATA +3 -3
- {ommlds-0.0.0.dev451.dist-info → ommlds-0.0.0.dev452.dist-info}/RECORD +61 -58
- {ommlds-0.0.0.dev451.dist-info → ommlds-0.0.0.dev452.dist-info}/WHEEL +0 -0
- {ommlds-0.0.0.dev451.dist-info → ommlds-0.0.0.dev452.dist-info}/entry_points.txt +0 -0
- {ommlds-0.0.0.dev451.dist-info → ommlds-0.0.0.dev452.dist-info}/licenses/LICENSE +0 -0
- {ommlds-0.0.0.dev451.dist-info → ommlds-0.0.0.dev452.dist-info}/top_level.txt +0 -0
|
@@ -10,7 +10,7 @@ from .base import ChatOptions
|
|
|
10
10
|
from .base import ChatSession
|
|
11
11
|
from .printing import ChatSessionPrinter
|
|
12
12
|
from .state import ChatStateManager
|
|
13
|
-
from .tools import
|
|
13
|
+
from .tools import ToolUseExecutor
|
|
14
14
|
|
|
15
15
|
|
|
16
16
|
##
|
|
@@ -42,7 +42,7 @@ class PromptChatSession(ChatSession['PromptChatSession.Config']):
|
|
|
42
42
|
chat_options: ChatOptions | None = None,
|
|
43
43
|
printer: ChatSessionPrinter,
|
|
44
44
|
backend_catalog: mc.BackendCatalog,
|
|
45
|
-
tool_exec_request_executor:
|
|
45
|
+
tool_exec_request_executor: ToolUseExecutor,
|
|
46
46
|
) -> None:
|
|
47
47
|
super().__init__(config)
|
|
48
48
|
|
|
@@ -83,11 +83,10 @@ class PromptChatSession(ChatSession['PromptChatSession.Config']):
|
|
|
83
83
|
lst: list[str] = []
|
|
84
84
|
async for o in st_resp:
|
|
85
85
|
if o:
|
|
86
|
-
|
|
87
|
-
if
|
|
88
|
-
print(check.isinstance(
|
|
89
|
-
lst.append(check.isinstance(
|
|
90
|
-
check.none(m.tool_exec_requests)
|
|
86
|
+
c = check.isinstance(check.single(check.single(o.choices).deltas), mc.ContentAiChoiceDelta).c
|
|
87
|
+
if c is not None:
|
|
88
|
+
print(check.isinstance(c, str), end='', flush=True)
|
|
89
|
+
lst.append(check.isinstance(c, str))
|
|
91
90
|
print()
|
|
92
91
|
|
|
93
92
|
resp_m = mc.AiMessage(''.join(lst))
|
|
@@ -118,33 +117,35 @@ class PromptChatSession(ChatSession['PromptChatSession.Config']):
|
|
|
118
117
|
(self._chat_options or []),
|
|
119
118
|
))
|
|
120
119
|
|
|
121
|
-
resp_m
|
|
122
|
-
|
|
120
|
+
for resp_m in response.v[0].ms:
|
|
121
|
+
new_chat.append(resp_m)
|
|
123
122
|
|
|
124
|
-
|
|
125
|
-
|
|
123
|
+
if isinstance(resp_m, mc.AiMessage):
|
|
124
|
+
self._printer.print(resp_m)
|
|
126
125
|
|
|
127
|
-
|
|
126
|
+
elif isinstance(resp_m, mc.ToolUseMessage):
|
|
127
|
+
tr: mc.ToolUse = resp_m.tu
|
|
128
128
|
|
|
129
|
-
|
|
130
|
-
|
|
129
|
+
# FIXME: lol
|
|
130
|
+
from ....minichain.lib.fs.context import FsContext
|
|
131
131
|
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
132
|
+
trm = await self._tool_exec_request_executor.execute_tool_use(
|
|
133
|
+
tr,
|
|
134
|
+
FsContext(root_dir=os.getcwd()),
|
|
135
|
+
)
|
|
136
136
|
|
|
137
|
-
|
|
138
|
-
|
|
137
|
+
print(trm.tur.c)
|
|
138
|
+
new_chat.append(trm)
|
|
139
139
|
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
140
|
+
response = await mdl.invoke(mc.ChatChoicesRequest(
|
|
141
|
+
[*state.chat, *new_chat],
|
|
142
|
+
(self._chat_options or []),
|
|
143
|
+
))
|
|
144
144
|
|
|
145
|
-
|
|
146
|
-
|
|
145
|
+
resp_m = check.isinstance(check.single(response.v[0].ms), mc.AiMessage)
|
|
146
|
+
new_chat.append(resp_m)
|
|
147
147
|
|
|
148
|
-
|
|
148
|
+
else:
|
|
149
|
+
raise TypeError(resp_m)
|
|
149
150
|
|
|
150
151
|
self._state_manager.extend_chat(new_chat)
|
|
@@ -23,7 +23,7 @@ class ToolExecutionConfirmation(lang.Abstract):
|
|
|
23
23
|
@abc.abstractmethod
|
|
24
24
|
def confirm_tool_execution_or_raise(
|
|
25
25
|
self,
|
|
26
|
-
tr: mc.
|
|
26
|
+
tr: mc.ToolUse,
|
|
27
27
|
tce: mc.ToolCatalogEntry,
|
|
28
28
|
) -> ta.Awaitable[None]:
|
|
29
29
|
raise NotImplementedError
|
|
@@ -32,7 +32,7 @@ class ToolExecutionConfirmation(lang.Abstract):
|
|
|
32
32
|
class NopToolExecutionConfirmation(ToolExecutionConfirmation):
|
|
33
33
|
async def confirm_tool_execution_or_raise(
|
|
34
34
|
self,
|
|
35
|
-
tr: mc.
|
|
35
|
+
tr: mc.ToolUse,
|
|
36
36
|
tce: mc.ToolCatalogEntry,
|
|
37
37
|
) -> None:
|
|
38
38
|
pass
|
|
@@ -41,7 +41,7 @@ class NopToolExecutionConfirmation(ToolExecutionConfirmation):
|
|
|
41
41
|
class AskingToolExecutionConfirmation(ToolExecutionConfirmation):
|
|
42
42
|
async def confirm_tool_execution_or_raise(
|
|
43
43
|
self,
|
|
44
|
-
tr: mc.
|
|
44
|
+
tr: mc.ToolUse,
|
|
45
45
|
tce: mc.ToolCatalogEntry,
|
|
46
46
|
) -> None:
|
|
47
47
|
tr_dct = dict(
|
|
@@ -59,17 +59,17 @@ class AskingToolExecutionConfirmation(ToolExecutionConfirmation):
|
|
|
59
59
|
##
|
|
60
60
|
|
|
61
61
|
|
|
62
|
-
class
|
|
62
|
+
class ToolUseExecutor(lang.Abstract):
|
|
63
63
|
@abc.abstractmethod
|
|
64
|
-
def
|
|
64
|
+
def execute_tool_use(
|
|
65
65
|
self,
|
|
66
|
-
tr: mc.
|
|
66
|
+
tr: mc.ToolUse,
|
|
67
67
|
*ctx_items: ta.Any,
|
|
68
|
-
) -> ta.Awaitable[mc.
|
|
68
|
+
) -> ta.Awaitable[mc.ToolUseResultMessage]:
|
|
69
69
|
raise NotImplementedError
|
|
70
70
|
|
|
71
71
|
|
|
72
|
-
class
|
|
72
|
+
class ToolUseExecutorImpl(ToolUseExecutor):
|
|
73
73
|
def __init__(
|
|
74
74
|
self,
|
|
75
75
|
*,
|
|
@@ -81,16 +81,16 @@ class ToolExecRequestExecutorImpl(ToolExecRequestExecutor):
|
|
|
81
81
|
self._catalog = catalog
|
|
82
82
|
self._confirmation = confirmation
|
|
83
83
|
|
|
84
|
-
async def
|
|
84
|
+
async def execute_tool_use(
|
|
85
85
|
self,
|
|
86
|
-
tr: mc.
|
|
86
|
+
tr: mc.ToolUse,
|
|
87
87
|
*ctx_items: ta.Any,
|
|
88
|
-
) -> mc.
|
|
88
|
+
) -> mc.ToolUseResultMessage:
|
|
89
89
|
tce = self._catalog.by_name[check.non_empty_str(tr.name)]
|
|
90
90
|
|
|
91
91
|
await self._confirmation.confirm_tool_execution_or_raise(tr, tce)
|
|
92
92
|
|
|
93
|
-
return await mc.
|
|
93
|
+
return await mc.execute_tool_use(
|
|
94
94
|
mc.ToolContext(
|
|
95
95
|
tr,
|
|
96
96
|
*ctx_items,
|
ommlds/minichain/__init__.py
CHANGED
|
@@ -107,14 +107,16 @@ with _lang.auto_proxy_init(
|
|
|
107
107
|
ChatChoicesStreamOutput,
|
|
108
108
|
ChatChoicesStreamOutputs,
|
|
109
109
|
|
|
110
|
-
ToolExecRequestDelta,
|
|
111
|
-
AiMessageDelta,
|
|
112
110
|
AiChoiceDelta,
|
|
111
|
+
ContentAiChoiceDelta,
|
|
112
|
+
ToolUseAiChoiceDelta,
|
|
113
|
+
|
|
113
114
|
AiChoiceDeltas,
|
|
115
|
+
AiChoicesDeltas,
|
|
114
116
|
)
|
|
115
117
|
|
|
116
118
|
from .chat.tools.execution import ( # noqa
|
|
117
|
-
|
|
119
|
+
execute_tool_use,
|
|
118
120
|
)
|
|
119
121
|
|
|
120
122
|
from .chat.transforms.base import ( # noqa
|
|
@@ -122,7 +124,6 @@ with _lang.auto_proxy_init(
|
|
|
122
124
|
CompositeMessageTransform,
|
|
123
125
|
FnMessageTransform,
|
|
124
126
|
TypeFilteredMessageTransform,
|
|
125
|
-
fn_message_transform,
|
|
126
127
|
|
|
127
128
|
ChatTransform,
|
|
128
129
|
CompositeChatTransform,
|
|
@@ -153,12 +154,22 @@ with _lang.auto_proxy_init(
|
|
|
153
154
|
)
|
|
154
155
|
|
|
155
156
|
from .chat.messages import ( # noqa
|
|
156
|
-
AiMessage,
|
|
157
|
-
Chat,
|
|
158
157
|
Message,
|
|
158
|
+
Chat,
|
|
159
|
+
|
|
160
|
+
AnyUserMessage,
|
|
161
|
+
UserChat,
|
|
162
|
+
check_user_chat,
|
|
163
|
+
|
|
164
|
+
AnyAiMessage,
|
|
165
|
+
AiChat,
|
|
166
|
+
check_ai_chat,
|
|
167
|
+
|
|
159
168
|
SystemMessage,
|
|
160
|
-
ToolExecResultMessage,
|
|
161
169
|
UserMessage,
|
|
170
|
+
AiMessage,
|
|
171
|
+
ToolUseMessage,
|
|
172
|
+
ToolUseResultMessage,
|
|
162
173
|
)
|
|
163
174
|
|
|
164
175
|
from .chat.services import ( # noqa
|
|
@@ -407,7 +418,8 @@ with _lang.auto_proxy_init(
|
|
|
407
418
|
|
|
408
419
|
ToolSpec,
|
|
409
420
|
|
|
410
|
-
|
|
421
|
+
ToolUse,
|
|
422
|
+
ToolUseResult,
|
|
411
423
|
)
|
|
412
424
|
|
|
413
425
|
##
|
|
@@ -18,16 +18,18 @@ from ....chat.choices.services import ChatChoicesResponse
|
|
|
18
18
|
from ....chat.choices.services import static_check_is_chat_choices_service
|
|
19
19
|
from ....chat.choices.types import AiChoice
|
|
20
20
|
from ....chat.messages import AiMessage
|
|
21
|
+
from ....chat.messages import AnyAiMessage
|
|
21
22
|
from ....chat.messages import Message
|
|
22
23
|
from ....chat.messages import SystemMessage
|
|
23
|
-
from ....chat.messages import
|
|
24
|
+
from ....chat.messages import ToolUseMessage
|
|
25
|
+
from ....chat.messages import ToolUseResultMessage
|
|
24
26
|
from ....chat.messages import UserMessage
|
|
25
27
|
from ....chat.tools.types import Tool
|
|
26
28
|
from ....content.prepare import prepare_content_str
|
|
27
29
|
from ....models.configs import ModelName
|
|
28
30
|
from ....standard import ApiKey
|
|
29
31
|
from ....tools.jsonschema import build_tool_spec_params_json_schema
|
|
30
|
-
from ....tools.types import
|
|
32
|
+
from ....tools.types import ToolUse
|
|
31
33
|
from .names import MODEL_NAMES
|
|
32
34
|
|
|
33
35
|
|
|
@@ -46,6 +48,7 @@ class AnthropicChatChoicesService:
|
|
|
46
48
|
SystemMessage: 'system',
|
|
47
49
|
UserMessage: 'user',
|
|
48
50
|
AiMessage: 'assistant',
|
|
51
|
+
ToolUseMessage: 'assistant',
|
|
49
52
|
}
|
|
50
53
|
|
|
51
54
|
def __init__(
|
|
@@ -83,12 +86,12 @@ class AnthropicChatChoicesService:
|
|
|
83
86
|
raise Exception('Only supports one system message and must be first')
|
|
84
87
|
system = [pt.Text(check.not_none(self._get_msg_content(m)))]
|
|
85
88
|
|
|
86
|
-
elif isinstance(m,
|
|
89
|
+
elif isinstance(m, ToolUseResultMessage):
|
|
87
90
|
messages.append(pt.Message(
|
|
88
91
|
role='user',
|
|
89
92
|
content=[pt.ToolResult(
|
|
90
|
-
tool_use_id=check.not_none(m.id),
|
|
91
|
-
content=json.dumps_compact(msh.marshal(m.c)) if not isinstance(m.c, str) else m.c,
|
|
93
|
+
tool_use_id=check.not_none(m.tur.id),
|
|
94
|
+
content=json.dumps_compact(msh.marshal(m.tur.c)) if not isinstance(m.tur.c, str) else m.tur.c,
|
|
92
95
|
)],
|
|
93
96
|
))
|
|
94
97
|
|
|
@@ -97,18 +100,22 @@ class AnthropicChatChoicesService:
|
|
|
97
100
|
# role=self.ROLES_MAP[type(m)], # noqa
|
|
98
101
|
# content=[pt.Text(check.isinstance(self._get_msg_content(m), str))],
|
|
99
102
|
# ))
|
|
100
|
-
a_tus: list[pt.ToolUse] = []
|
|
101
|
-
for tr in m.tool_exec_requests or []:
|
|
102
|
-
a_tus.append(pt.ToolUse(
|
|
103
|
-
id=check.not_none(tr.id),
|
|
104
|
-
name=check.not_none(tr.name),
|
|
105
|
-
input=tr.args,
|
|
106
|
-
))
|
|
107
103
|
messages.append(pt.Message(
|
|
108
104
|
role='assistant',
|
|
109
105
|
content=[
|
|
110
106
|
*([pt.Text(check.isinstance(m.c, str))] if m.c is not None else []),
|
|
111
|
-
|
|
107
|
+
],
|
|
108
|
+
))
|
|
109
|
+
|
|
110
|
+
elif isinstance(m, ToolUseMessage):
|
|
111
|
+
messages.append(pt.Message(
|
|
112
|
+
role='assistant',
|
|
113
|
+
content=[
|
|
114
|
+
pt.ToolUse(
|
|
115
|
+
id=check.not_none(m.tu.id),
|
|
116
|
+
name=check.not_none(m.tu.name),
|
|
117
|
+
input=m.tu.args,
|
|
118
|
+
),
|
|
112
119
|
],
|
|
113
120
|
))
|
|
114
121
|
|
|
@@ -150,24 +157,21 @@ class AnthropicChatChoicesService:
|
|
|
150
157
|
|
|
151
158
|
response = json.loads(check.not_none(raw_response.data).decode('utf-8'))
|
|
152
159
|
|
|
153
|
-
|
|
154
|
-
ters: list[ToolExecRequest] = []
|
|
160
|
+
out: list[AnyAiMessage] = []
|
|
155
161
|
for c in response['content']:
|
|
156
162
|
if c['type'] == 'text':
|
|
157
|
-
|
|
158
|
-
|
|
163
|
+
out.append(AiMessage(
|
|
164
|
+
check.not_none(c['text']),
|
|
165
|
+
))
|
|
159
166
|
elif c['type'] == 'tool_use':
|
|
160
|
-
|
|
167
|
+
out.append(ToolUseMessage(ToolUse(
|
|
161
168
|
id=c['id'],
|
|
162
169
|
name=c['name'],
|
|
163
170
|
args=c['input'],
|
|
164
|
-
))
|
|
171
|
+
)))
|
|
165
172
|
else:
|
|
166
173
|
raise TypeError(c['type'])
|
|
167
174
|
|
|
168
175
|
return ChatChoicesResponse([
|
|
169
|
-
AiChoice(
|
|
170
|
-
resp_c,
|
|
171
|
-
tool_exec_requests=ters if ters else None,
|
|
172
|
-
)),
|
|
176
|
+
AiChoice(out),
|
|
173
177
|
])
|
|
@@ -18,9 +18,9 @@ MODEL_NAMES = ModelNameCollection(
|
|
|
18
18
|
'claude-opus-4-1': 'claude-opus-4-1-20250805',
|
|
19
19
|
'claude-opus': 'claude-opus-4-1',
|
|
20
20
|
|
|
21
|
-
'claude-sonnet-4-
|
|
22
|
-
'claude-sonnet-4': 'claude-sonnet-4-
|
|
23
|
-
'claude-sonnet': 'claude-sonnet-4',
|
|
21
|
+
'claude-sonnet-4-5-20250929': None,
|
|
22
|
+
'claude-sonnet-4-5': 'claude-sonnet-4-5-20250929',
|
|
23
|
+
'claude-sonnet': 'claude-sonnet-4-5',
|
|
24
24
|
|
|
25
25
|
'claude-3-5-haiku-latest': None,
|
|
26
26
|
'claude-haiku-3-5-latest': 'claude-3-5-haiku-latest',
|
|
@@ -15,9 +15,9 @@ from ....chat.messages import SystemMessage
|
|
|
15
15
|
from ....chat.stream.services import ChatChoicesStreamRequest
|
|
16
16
|
from ....chat.stream.services import ChatChoicesStreamResponse
|
|
17
17
|
from ....chat.stream.services import static_check_is_chat_choices_stream_service
|
|
18
|
-
from ....chat.stream.types import AiChoiceDelta
|
|
19
18
|
from ....chat.stream.types import AiChoiceDeltas
|
|
20
|
-
from ....chat.stream.types import
|
|
19
|
+
from ....chat.stream.types import AiChoicesDeltas
|
|
20
|
+
from ....chat.stream.types import ContentAiChoiceDelta
|
|
21
21
|
from ....configs import Config
|
|
22
22
|
from ....resources import UseResources
|
|
23
23
|
from ....standard import ApiKey
|
|
@@ -86,7 +86,7 @@ class AnthropicChatChoicesStreamService:
|
|
|
86
86
|
http_client = rs.enter_context(http.client())
|
|
87
87
|
http_response = rs.enter_context(http_client.stream_request(http_request))
|
|
88
88
|
|
|
89
|
-
async def inner(sink: StreamResponseSink[
|
|
89
|
+
async def inner(sink: StreamResponseSink[AiChoicesDeltas]) -> ta.Sequence[ChatChoicesOutputs] | None:
|
|
90
90
|
msg_start: AnthropicSseDecoderEvents.MessageStart | None = None
|
|
91
91
|
cbk_start: AnthropicSseDecoderEvents.ContentBlockStart | None = None
|
|
92
92
|
msg_stop: AnthropicSseDecoderEvents.MessageStop | None = None
|
|
@@ -124,18 +124,18 @@ class AnthropicChatChoicesStreamService:
|
|
|
124
124
|
check.none(cbk_start)
|
|
125
125
|
cbk_start = ae
|
|
126
126
|
if isinstance(ae.content_block, AnthropicSseDecoderEvents.ContentBlockStart.Text): # noqa
|
|
127
|
-
await sink.emit([
|
|
127
|
+
await sink.emit(AiChoicesDeltas([AiChoiceDeltas([ContentAiChoiceDelta(
|
|
128
128
|
ae.content_block.text,
|
|
129
|
-
))])
|
|
129
|
+
)])]))
|
|
130
130
|
else:
|
|
131
131
|
raise TypeError(ae.content_block)
|
|
132
132
|
|
|
133
133
|
case AnthropicSseDecoderEvents.ContentBlockDelta():
|
|
134
134
|
check.not_none(cbk_start)
|
|
135
135
|
if isinstance(ae.delta, AnthropicSseDecoderEvents.ContentBlockDelta.TextDelta):
|
|
136
|
-
await sink.emit([
|
|
136
|
+
await sink.emit(AiChoicesDeltas([AiChoiceDeltas([ContentAiChoiceDelta(
|
|
137
137
|
ae.delta.text,
|
|
138
|
-
))])
|
|
138
|
+
)])]))
|
|
139
139
|
else:
|
|
140
140
|
raise TypeError(ae.delta)
|
|
141
141
|
|
|
@@ -15,15 +15,16 @@ from ....chat.choices.services import ChatChoicesResponse
|
|
|
15
15
|
from ....chat.choices.services import static_check_is_chat_choices_service
|
|
16
16
|
from ....chat.choices.types import AiChoice
|
|
17
17
|
from ....chat.messages import AiMessage
|
|
18
|
+
from ....chat.messages import AnyAiMessage
|
|
18
19
|
from ....chat.messages import Message
|
|
19
20
|
from ....chat.messages import SystemMessage
|
|
20
|
-
from ....chat.messages import
|
|
21
|
+
from ....chat.messages import ToolUseMessage
|
|
22
|
+
from ....chat.messages import ToolUseResultMessage
|
|
21
23
|
from ....chat.messages import UserMessage
|
|
22
24
|
from ....chat.tools.types import Tool
|
|
23
|
-
from ....content.types import Content
|
|
24
25
|
from ....models.configs import ModelName
|
|
25
26
|
from ....standard import ApiKey
|
|
26
|
-
from ....tools.types import
|
|
27
|
+
from ....tools.types import ToolUse
|
|
27
28
|
from .names import MODEL_NAMES
|
|
28
29
|
from .tools import build_tool_spec_schema
|
|
29
30
|
|
|
@@ -61,6 +62,7 @@ class GoogleChatChoicesService:
|
|
|
61
62
|
ROLES_MAP: ta.ClassVar[ta.Mapping[type[Message], str]] = {
|
|
62
63
|
UserMessage: 'user',
|
|
63
64
|
AiMessage: 'model',
|
|
65
|
+
ToolUseMessage: 'model',
|
|
64
66
|
}
|
|
65
67
|
|
|
66
68
|
async def invoke(
|
|
@@ -81,19 +83,19 @@ class GoogleChatChoicesService:
|
|
|
81
83
|
)],
|
|
82
84
|
)
|
|
83
85
|
|
|
84
|
-
elif isinstance(m,
|
|
86
|
+
elif isinstance(m, ToolUseResultMessage):
|
|
85
87
|
tr_resp_val: pt.Value
|
|
86
|
-
if m.c is None:
|
|
88
|
+
if m.tur.c is None:
|
|
87
89
|
tr_resp_val = pt.NullValue() # type: ignore[unreachable]
|
|
88
|
-
elif isinstance(m.c, str):
|
|
89
|
-
tr_resp_val = pt.StringValue(m.c)
|
|
90
|
+
elif isinstance(m.tur.c, str):
|
|
91
|
+
tr_resp_val = pt.StringValue(m.tur.c)
|
|
90
92
|
else:
|
|
91
|
-
raise TypeError(m.c)
|
|
93
|
+
raise TypeError(m.tur.c)
|
|
92
94
|
g_contents.append(pt.Content(
|
|
93
95
|
parts=[pt.Part(
|
|
94
96
|
function_response=pt.FunctionResponse(
|
|
95
|
-
id=m.id,
|
|
96
|
-
name=m.name,
|
|
97
|
+
id=m.tur.id,
|
|
98
|
+
name=m.tur.name,
|
|
97
99
|
response={
|
|
98
100
|
'value': tr_resp_val,
|
|
99
101
|
},
|
|
@@ -102,21 +104,22 @@ class GoogleChatChoicesService:
|
|
|
102
104
|
))
|
|
103
105
|
|
|
104
106
|
elif isinstance(m, AiMessage):
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
ai_parts.append(pt.Part(
|
|
107
|
+
g_contents.append(pt.Content(
|
|
108
|
+
parts=[pt.Part(
|
|
108
109
|
text=check.not_none(self._get_msg_content(m)),
|
|
109
|
-
)
|
|
110
|
-
|
|
111
|
-
|
|
110
|
+
)],
|
|
111
|
+
role='model',
|
|
112
|
+
))
|
|
113
|
+
|
|
114
|
+
elif isinstance(m, ToolUseMessage):
|
|
115
|
+
g_contents.append(pt.Content(
|
|
116
|
+
parts=[pt.Part(
|
|
112
117
|
function_call=pt.FunctionCall(
|
|
113
|
-
id=
|
|
114
|
-
name=
|
|
115
|
-
args=
|
|
118
|
+
id=m.tu.id,
|
|
119
|
+
name=m.tu.name,
|
|
120
|
+
args=m.tu.args,
|
|
116
121
|
),
|
|
117
|
-
)
|
|
118
|
-
g_contents.append(pt.Content(
|
|
119
|
-
parts=ai_parts,
|
|
122
|
+
)],
|
|
120
123
|
role='model',
|
|
121
124
|
))
|
|
122
125
|
|
|
@@ -159,23 +162,18 @@ class GoogleChatChoicesService:
|
|
|
159
162
|
|
|
160
163
|
ai_choices: list[AiChoice] = []
|
|
161
164
|
for c in g_resp.candidates or []:
|
|
162
|
-
|
|
163
|
-
ters: list[ToolExecRequest] = []
|
|
165
|
+
out: list[AnyAiMessage] = []
|
|
164
166
|
for g_resp_part in check.not_none(check.not_none(c.content).parts):
|
|
165
167
|
if (g_txt := g_resp_part.text) is not None:
|
|
166
|
-
|
|
167
|
-
ai_c = g_txt
|
|
168
|
+
out.append(AiMessage(g_txt))
|
|
168
169
|
elif (g_fc := g_resp_part.function_call) is not None:
|
|
169
|
-
|
|
170
|
+
out.append(ToolUseMessage(ToolUse(
|
|
170
171
|
id=g_fc.id,
|
|
171
172
|
name=g_fc.name,
|
|
172
173
|
args=g_fc.args or {},
|
|
173
|
-
))
|
|
174
|
+
)))
|
|
174
175
|
else:
|
|
175
176
|
raise TypeError(g_resp_part)
|
|
176
|
-
ai_choices.append(AiChoice(
|
|
177
|
-
c=ai_c,
|
|
178
|
-
tool_exec_requests=ters if ters else None,
|
|
179
|
-
)))
|
|
177
|
+
ai_choices.append(AiChoice(out))
|
|
180
178
|
|
|
181
179
|
return ChatChoicesResponse(ai_choices)
|
|
@@ -19,9 +19,9 @@ from ....chat.messages import UserMessage
|
|
|
19
19
|
from ....chat.stream.services import ChatChoicesStreamRequest
|
|
20
20
|
from ....chat.stream.services import ChatChoicesStreamResponse
|
|
21
21
|
from ....chat.stream.services import static_check_is_chat_choices_stream_service
|
|
22
|
-
from ....chat.stream.types import AiChoiceDelta
|
|
23
22
|
from ....chat.stream.types import AiChoiceDeltas
|
|
24
|
-
from ....chat.stream.types import
|
|
23
|
+
from ....chat.stream.types import AiChoicesDeltas
|
|
24
|
+
from ....chat.stream.types import ContentAiChoiceDelta
|
|
25
25
|
from ....models.configs import ModelName
|
|
26
26
|
from ....resources import UseResources
|
|
27
27
|
from ....standard import ApiKey
|
|
@@ -101,7 +101,7 @@ class GoogleChatChoicesStreamService:
|
|
|
101
101
|
http_client = rs.enter_context(http.client())
|
|
102
102
|
http_response = rs.enter_context(http_client.stream_request(http_request))
|
|
103
103
|
|
|
104
|
-
async def inner(sink: StreamResponseSink[
|
|
104
|
+
async def inner(sink: StreamResponseSink[AiChoicesDeltas]) -> ta.Sequence[ChatChoicesOutputs] | None:
|
|
105
105
|
db = DelimitingBuffer([b'\r', b'\n', b'\r\n'])
|
|
106
106
|
while True:
|
|
107
107
|
# FIXME: read1 not on response stream protocol
|
|
@@ -117,7 +117,11 @@ class GoogleChatChoicesStreamService:
|
|
|
117
117
|
gcr = msh.unmarshal(json.loads(l[6:]), pt.GenerateContentResponse) # noqa
|
|
118
118
|
cnd = check.single(check.not_none(gcr.candidates))
|
|
119
119
|
for p in check.not_none(cnd.content).parts or []:
|
|
120
|
-
await sink.emit([
|
|
120
|
+
await sink.emit(AiChoicesDeltas([
|
|
121
|
+
AiChoiceDeltas([
|
|
122
|
+
ContentAiChoiceDelta(check.not_none(p.text)),
|
|
123
|
+
]),
|
|
124
|
+
]))
|
|
121
125
|
|
|
122
126
|
if not b:
|
|
123
127
|
return []
|
|
@@ -15,7 +15,8 @@ from ....chat.choices.services import static_check_is_chat_choices_service
|
|
|
15
15
|
from ....chat.choices.types import AiChoice
|
|
16
16
|
from ....chat.choices.types import ChatChoicesOptions
|
|
17
17
|
from ....chat.messages import AiMessage
|
|
18
|
-
from ....chat.messages import
|
|
18
|
+
from ....chat.messages import ToolUseMessage
|
|
19
|
+
from ....chat.messages import ToolUseResultMessage
|
|
19
20
|
from ....chat.tools.types import Tool
|
|
20
21
|
from ....configs import Config
|
|
21
22
|
from ....llms.types import MaxTokens
|
|
@@ -100,29 +101,34 @@ class LlamacppChatChoicesService:
|
|
|
100
101
|
|
|
101
102
|
ims: list = []
|
|
102
103
|
for rm in request.v:
|
|
103
|
-
if isinstance(rm,
|
|
104
|
+
if isinstance(rm, ToolUseResultMessage):
|
|
104
105
|
ims.append(dict(
|
|
105
106
|
role='tool',
|
|
106
|
-
**(dict(id=rm.id) if rm.id is not None else {}),
|
|
107
|
-
name=rm.name,
|
|
108
|
-
content=check.isinstance(rm.c, str),
|
|
107
|
+
**(dict(id=rm.tur.id) if rm.tur.id is not None else {}),
|
|
108
|
+
name=rm.tur.name,
|
|
109
|
+
content=check.isinstance(rm.tur.c, str),
|
|
109
110
|
))
|
|
111
|
+
|
|
110
112
|
elif isinstance(rm, AiMessage):
|
|
111
|
-
tcs: list[dict] = []
|
|
112
|
-
for ter in rm.tool_exec_requests or []:
|
|
113
|
-
tcs.append(dict(
|
|
114
|
-
id=check.not_none(ter.id),
|
|
115
|
-
type='function',
|
|
116
|
-
function=dict(
|
|
117
|
-
name=ter.name,
|
|
118
|
-
arguments=check.isinstance(ter.raw_args, str),
|
|
119
|
-
),
|
|
120
|
-
))
|
|
121
113
|
ims.append(dict(
|
|
122
114
|
role=ROLES_MAP[type(rm)],
|
|
123
115
|
**(dict(content=mc) if (mc := get_msg_content(rm)) is not None else {}),
|
|
124
|
-
**(dict(tool_calls=tcs) if tcs else {}),
|
|
125
116
|
))
|
|
117
|
+
|
|
118
|
+
elif isinstance(rm, ToolUseMessage):
|
|
119
|
+
ims.append(dict(
|
|
120
|
+
role=ROLES_MAP[type(rm)],
|
|
121
|
+
content='',
|
|
122
|
+
tool_calls=[dict(
|
|
123
|
+
id=check.not_none(rm.tu.id),
|
|
124
|
+
type='function',
|
|
125
|
+
function=dict(
|
|
126
|
+
name=rm.tu.name,
|
|
127
|
+
arguments=check.isinstance(rm.tu.raw_args, str),
|
|
128
|
+
),
|
|
129
|
+
)],
|
|
130
|
+
))
|
|
131
|
+
|
|
126
132
|
else:
|
|
127
133
|
ims.append(dict(
|
|
128
134
|
role=ROLES_MAP[type(rm)],
|
|
@@ -137,6 +143,6 @@ class LlamacppChatChoicesService:
|
|
|
137
143
|
out: list[AiChoice] = []
|
|
138
144
|
for c in ta.cast(ta.Any, output)['choices']:
|
|
139
145
|
m = c['message']
|
|
140
|
-
out.append(AiChoice(AiMessage(m['content'])))
|
|
146
|
+
out.append(AiChoice([AiMessage(m['content'])]))
|
|
141
147
|
|
|
142
148
|
return ChatChoicesResponse(out)
|
|
@@ -5,7 +5,8 @@ from omlish import check
|
|
|
5
5
|
from ....chat.messages import AiMessage
|
|
6
6
|
from ....chat.messages import Message
|
|
7
7
|
from ....chat.messages import SystemMessage
|
|
8
|
-
from ....chat.messages import
|
|
8
|
+
from ....chat.messages import ToolUseMessage
|
|
9
|
+
from ....chat.messages import ToolUseResultMessage
|
|
9
10
|
from ....chat.messages import UserMessage
|
|
10
11
|
|
|
11
12
|
|
|
@@ -16,7 +17,8 @@ ROLES_MAP: ta.Mapping[type[Message], str] = {
|
|
|
16
17
|
SystemMessage: 'system',
|
|
17
18
|
UserMessage: 'user',
|
|
18
19
|
AiMessage: 'assistant',
|
|
19
|
-
|
|
20
|
+
ToolUseMessage: 'assistant',
|
|
21
|
+
ToolUseResultMessage: 'tool',
|
|
20
22
|
}
|
|
21
23
|
|
|
22
24
|
|
|
@@ -13,9 +13,9 @@ from ....chat.choices.services import ChatChoicesOutputs
|
|
|
13
13
|
from ....chat.stream.services import ChatChoicesStreamRequest
|
|
14
14
|
from ....chat.stream.services import ChatChoicesStreamResponse
|
|
15
15
|
from ....chat.stream.services import static_check_is_chat_choices_stream_service
|
|
16
|
-
from ....chat.stream.types import AiChoiceDelta
|
|
17
16
|
from ....chat.stream.types import AiChoiceDeltas
|
|
18
|
-
from ....chat.stream.types import
|
|
17
|
+
from ....chat.stream.types import AiChoicesDeltas
|
|
18
|
+
from ....chat.stream.types import ContentAiChoiceDelta
|
|
19
19
|
from ....configs import Config
|
|
20
20
|
from ....models.configs import ModelPath
|
|
21
21
|
from ....resources import UseResources
|
|
@@ -75,10 +75,10 @@ class LlamacppChatChoicesStreamService(lang.ExitStacked):
|
|
|
75
75
|
|
|
76
76
|
rs.enter_context(lang.defer(close_output))
|
|
77
77
|
|
|
78
|
-
async def inner(sink: StreamResponseSink[
|
|
78
|
+
async def inner(sink: StreamResponseSink[AiChoicesDeltas]) -> ta.Sequence[ChatChoicesOutputs] | None:
|
|
79
79
|
for chunk in output:
|
|
80
80
|
check.state(chunk['object'] == 'chat.completion.chunk')
|
|
81
|
-
l: list[
|
|
81
|
+
l: list[AiChoiceDeltas] = []
|
|
82
82
|
for choice in chunk['choices']:
|
|
83
83
|
# FIXME: check role is assistant
|
|
84
84
|
# FIXME: stop reason
|
|
@@ -86,8 +86,8 @@ class LlamacppChatChoicesStreamService(lang.ExitStacked):
|
|
|
86
86
|
continue
|
|
87
87
|
if not (content := delta.get('content', '')):
|
|
88
88
|
continue
|
|
89
|
-
l.append(
|
|
90
|
-
await sink.emit(l)
|
|
89
|
+
l.append(AiChoiceDeltas([ContentAiChoiceDelta(content)]))
|
|
90
|
+
await sink.emit(AiChoicesDeltas(l))
|
|
91
91
|
return None
|
|
92
92
|
|
|
93
93
|
return await new_stream_response(rs, inner)
|