ommlds 0.0.0.dev450__py3-none-any.whl → 0.0.0.dev452__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ommlds might be problematic. Click here for more details.
- ommlds/.omlish-manifests.json +12 -12
- ommlds/backends/anthropic/protocol/_marshal.py +2 -2
- ommlds/backends/anthropic/protocol/sse/_marshal.py +1 -1
- ommlds/backends/anthropic/protocol/sse/assemble.py +1 -1
- ommlds/backends/anthropic/protocol/types.py +30 -9
- ommlds/backends/google/protocol/_marshal.py +1 -1
- ommlds/backends/openai/protocol/_common.py +18 -0
- ommlds/backends/openai/protocol/_marshal.py +3 -2
- ommlds/backends/openai/protocol/chatcompletion/chunk.py +4 -0
- ommlds/backends/openai/protocol/chatcompletion/contentpart.py +15 -7
- ommlds/backends/openai/protocol/chatcompletion/message.py +10 -0
- ommlds/backends/openai/protocol/chatcompletion/request.py +25 -7
- ommlds/backends/openai/protocol/chatcompletion/response.py +10 -0
- ommlds/backends/openai/protocol/chatcompletion/responseformat.py +6 -0
- ommlds/backends/openai/protocol/chatcompletion/tokenlogprob.py +4 -0
- ommlds/backends/openai/protocol/completionusage.py +5 -0
- ommlds/cli/main.py +2 -2
- ommlds/cli/sessions/chat/code.py +34 -19
- ommlds/cli/sessions/chat/inject.py +4 -4
- ommlds/cli/sessions/chat/interactive.py +2 -1
- ommlds/cli/sessions/chat/printing.py +6 -2
- ommlds/cli/sessions/chat/prompt.py +28 -27
- ommlds/cli/sessions/chat/tools.py +13 -14
- ommlds/cli/tools/config.py +1 -1
- ommlds/cli/tools/inject.py +4 -1
- ommlds/minichain/__init__.py +32 -8
- ommlds/minichain/_marshal.py +39 -0
- ommlds/minichain/backends/impls/anthropic/chat.py +82 -10
- ommlds/minichain/backends/impls/anthropic/names.py +3 -3
- ommlds/minichain/backends/impls/anthropic/stream.py +7 -7
- ommlds/minichain/backends/impls/google/chat.py +48 -22
- ommlds/minichain/backends/impls/google/stream.py +8 -4
- ommlds/minichain/backends/impls/llamacpp/chat.py +23 -17
- ommlds/minichain/backends/impls/llamacpp/format.py +4 -2
- ommlds/minichain/backends/impls/llamacpp/stream.py +6 -6
- ommlds/minichain/backends/impls/mistral.py +1 -1
- ommlds/minichain/backends/impls/mlx/chat.py +1 -1
- ommlds/minichain/backends/impls/openai/chat.py +6 -3
- ommlds/minichain/backends/impls/openai/format.py +80 -61
- ommlds/minichain/backends/impls/openai/format2.py +210 -0
- ommlds/minichain/backends/impls/openai/stream.py +9 -6
- ommlds/minichain/backends/impls/tinygrad/chat.py +10 -5
- ommlds/minichain/backends/impls/transformers/transformers.py +20 -16
- ommlds/minichain/chat/_marshal.py +16 -9
- ommlds/minichain/chat/choices/adapters.py +3 -3
- ommlds/minichain/chat/choices/types.py +2 -2
- ommlds/minichain/chat/history.py +1 -1
- ommlds/minichain/chat/messages.py +55 -19
- ommlds/minichain/chat/services.py +2 -2
- ommlds/minichain/chat/stream/_marshal.py +16 -0
- ommlds/minichain/chat/stream/adapters.py +39 -28
- ommlds/minichain/chat/stream/services.py +2 -2
- ommlds/minichain/chat/stream/types.py +20 -13
- ommlds/minichain/chat/tools/execution.py +8 -7
- ommlds/minichain/chat/tools/ids.py +9 -15
- ommlds/minichain/chat/tools/parsing.py +17 -26
- ommlds/minichain/chat/transforms/base.py +29 -38
- ommlds/minichain/chat/transforms/metadata.py +30 -4
- ommlds/minichain/chat/transforms/services.py +5 -7
- ommlds/minichain/content/_marshal.py +24 -3
- ommlds/minichain/content/json.py +13 -0
- ommlds/minichain/content/materialize.py +13 -20
- ommlds/minichain/content/prepare.py +4 -0
- ommlds/minichain/json.py +20 -0
- ommlds/minichain/lib/fs/context.py +15 -1
- ommlds/minichain/lib/fs/errors.py +6 -0
- ommlds/minichain/lib/fs/tools/edit.py +104 -0
- ommlds/minichain/lib/fs/tools/ls.py +2 -2
- ommlds/minichain/lib/fs/tools/read.py +2 -2
- ommlds/minichain/lib/fs/tools/recursivels/execution.py +2 -2
- ommlds/minichain/lib/todo/context.py +29 -2
- ommlds/minichain/lib/todo/tools/read.py +11 -6
- ommlds/minichain/lib/todo/tools/write.py +73 -13
- ommlds/minichain/lib/todo/types.py +6 -1
- ommlds/minichain/llms/_marshal.py +1 -1
- ommlds/minichain/services/_marshal.py +1 -1
- ommlds/minichain/tools/_marshal.py +1 -1
- ommlds/minichain/tools/execution/catalog.py +2 -1
- ommlds/minichain/tools/execution/executors.py +8 -3
- ommlds/minichain/tools/execution/reflect.py +43 -5
- ommlds/minichain/tools/fns.py +46 -9
- ommlds/minichain/tools/jsonschema.py +5 -6
- ommlds/minichain/tools/reflect.py +2 -2
- ommlds/minichain/tools/types.py +24 -1
- ommlds/minichain/vectors/_marshal.py +1 -1
- ommlds/server/server.py +1 -1
- ommlds/tools/git.py +18 -2
- ommlds/tools/ocr.py +7 -1
- {ommlds-0.0.0.dev450.dist-info → ommlds-0.0.0.dev452.dist-info}/METADATA +3 -3
- {ommlds-0.0.0.dev450.dist-info → ommlds-0.0.0.dev452.dist-info}/RECORD +94 -89
- {ommlds-0.0.0.dev450.dist-info → ommlds-0.0.0.dev452.dist-info}/WHEEL +0 -0
- {ommlds-0.0.0.dev450.dist-info → ommlds-0.0.0.dev452.dist-info}/entry_points.txt +0 -0
- {ommlds-0.0.0.dev450.dist-info → ommlds-0.0.0.dev452.dist-info}/licenses/LICENSE +0 -0
- {ommlds-0.0.0.dev450.dist-info → ommlds-0.0.0.dev452.dist-info}/top_level.txt +0 -0
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
"""
|
|
2
|
+
https://docs.claude.com/en/api/messages
|
|
2
3
|
https://github.com/anthropics/anthropic-sdk-python/tree/cd80d46f7a223a5493565d155da31b898a4c6ee5/src/anthropic/types
|
|
3
4
|
https://github.com/anthropics/anthropic-sdk-python/blob/cd80d46f7a223a5493565d155da31b898a4c6ee5/src/anthropic/resources/completions.py#L53
|
|
4
5
|
https://github.com/anthropics/anthropic-sdk-python/blob/cd80d46f7a223a5493565d155da31b898a4c6ee5/src/anthropic/resources/messages.py#L70
|
|
@@ -6,21 +7,29 @@ https://github.com/anthropics/anthropic-sdk-python/blob/cd80d46f7a223a5493565d15
|
|
|
6
7
|
import typing as ta
|
|
7
8
|
|
|
8
9
|
from omlish import check
|
|
9
|
-
from omlish import
|
|
10
|
+
from omlish import marshal as msh
|
|
10
11
|
from omlish import typedvalues as tv
|
|
11
12
|
from omlish.formats import json
|
|
12
13
|
from omlish.http import all as http
|
|
13
14
|
|
|
15
|
+
from .....backends.anthropic.protocol import types as pt
|
|
14
16
|
from ....chat.choices.services import ChatChoicesRequest
|
|
15
17
|
from ....chat.choices.services import ChatChoicesResponse
|
|
16
18
|
from ....chat.choices.services import static_check_is_chat_choices_service
|
|
17
19
|
from ....chat.choices.types import AiChoice
|
|
18
20
|
from ....chat.messages import AiMessage
|
|
21
|
+
from ....chat.messages import AnyAiMessage
|
|
19
22
|
from ....chat.messages import Message
|
|
20
23
|
from ....chat.messages import SystemMessage
|
|
24
|
+
from ....chat.messages import ToolUseMessage
|
|
25
|
+
from ....chat.messages import ToolUseResultMessage
|
|
21
26
|
from ....chat.messages import UserMessage
|
|
27
|
+
from ....chat.tools.types import Tool
|
|
28
|
+
from ....content.prepare import prepare_content_str
|
|
22
29
|
from ....models.configs import ModelName
|
|
23
30
|
from ....standard import ApiKey
|
|
31
|
+
from ....tools.jsonschema import build_tool_spec_params_json_schema
|
|
32
|
+
from ....tools.types import ToolUse
|
|
24
33
|
from .names import MODEL_NAMES
|
|
25
34
|
|
|
26
35
|
|
|
@@ -39,6 +48,7 @@ class AnthropicChatChoicesService:
|
|
|
39
48
|
SystemMessage: 'system',
|
|
40
49
|
UserMessage: 'user',
|
|
41
50
|
AiMessage: 'assistant',
|
|
51
|
+
ToolUseMessage: 'assistant',
|
|
42
52
|
}
|
|
43
53
|
|
|
44
54
|
def __init__(
|
|
@@ -68,26 +78,73 @@ class AnthropicChatChoicesService:
|
|
|
68
78
|
*,
|
|
69
79
|
max_tokens: int = 4096, # FIXME: ChatOption
|
|
70
80
|
) -> ChatChoicesResponse:
|
|
71
|
-
messages = []
|
|
72
|
-
system:
|
|
81
|
+
messages: list[pt.Message] = []
|
|
82
|
+
system: list[pt.Content] | None = None
|
|
73
83
|
for i, m in enumerate(request.v):
|
|
74
84
|
if isinstance(m, SystemMessage):
|
|
75
85
|
if i != 0 or system is not None:
|
|
76
86
|
raise Exception('Only supports one system message and must be first')
|
|
77
|
-
system = self._get_msg_content(m)
|
|
87
|
+
system = [pt.Text(check.not_none(self._get_msg_content(m)))]
|
|
88
|
+
|
|
89
|
+
elif isinstance(m, ToolUseResultMessage):
|
|
90
|
+
messages.append(pt.Message(
|
|
91
|
+
role='user',
|
|
92
|
+
content=[pt.ToolResult(
|
|
93
|
+
tool_use_id=check.not_none(m.tur.id),
|
|
94
|
+
content=json.dumps_compact(msh.marshal(m.tur.c)) if not isinstance(m.tur.c, str) else m.tur.c,
|
|
95
|
+
)],
|
|
96
|
+
))
|
|
97
|
+
|
|
98
|
+
elif isinstance(m, AiMessage):
|
|
99
|
+
# messages.append(pt.Message(
|
|
100
|
+
# role=self.ROLES_MAP[type(m)], # noqa
|
|
101
|
+
# content=[pt.Text(check.isinstance(self._get_msg_content(m), str))],
|
|
102
|
+
# ))
|
|
103
|
+
messages.append(pt.Message(
|
|
104
|
+
role='assistant',
|
|
105
|
+
content=[
|
|
106
|
+
*([pt.Text(check.isinstance(m.c, str))] if m.c is not None else []),
|
|
107
|
+
],
|
|
108
|
+
))
|
|
109
|
+
|
|
110
|
+
elif isinstance(m, ToolUseMessage):
|
|
111
|
+
messages.append(pt.Message(
|
|
112
|
+
role='assistant',
|
|
113
|
+
content=[
|
|
114
|
+
pt.ToolUse(
|
|
115
|
+
id=check.not_none(m.tu.id),
|
|
116
|
+
name=check.not_none(m.tu.name),
|
|
117
|
+
input=m.tu.args,
|
|
118
|
+
),
|
|
119
|
+
],
|
|
120
|
+
))
|
|
121
|
+
|
|
78
122
|
else:
|
|
79
|
-
messages.append(
|
|
80
|
-
role=self.ROLES_MAP[type(m)], #
|
|
81
|
-
content=check.isinstance(self._get_msg_content(m), str),
|
|
123
|
+
messages.append(pt.Message(
|
|
124
|
+
role=self.ROLES_MAP[type(m)], # type: ignore[arg-type]
|
|
125
|
+
content=[pt.Text(check.isinstance(self._get_msg_content(m), str))],
|
|
82
126
|
))
|
|
83
127
|
|
|
84
|
-
|
|
128
|
+
tools: list[pt.ToolSpec] = []
|
|
129
|
+
with tv.TypedValues(*request.options).consume() as oc:
|
|
130
|
+
t: Tool
|
|
131
|
+
for t in oc.pop(Tool, []):
|
|
132
|
+
tools.append(pt.ToolSpec(
|
|
133
|
+
name=check.not_none(t.spec.name),
|
|
134
|
+
description=prepare_content_str(t.spec.desc),
|
|
135
|
+
input_schema=build_tool_spec_params_json_schema(t.spec),
|
|
136
|
+
))
|
|
137
|
+
|
|
138
|
+
a_req = pt.MessagesRequest(
|
|
85
139
|
model=MODEL_NAMES.resolve(self._model_name.v),
|
|
86
|
-
|
|
140
|
+
system=system,
|
|
87
141
|
messages=messages,
|
|
142
|
+
tools=tools or None,
|
|
88
143
|
max_tokens=max_tokens,
|
|
89
144
|
)
|
|
90
145
|
|
|
146
|
+
raw_request = msh.marshal(a_req)
|
|
147
|
+
|
|
91
148
|
raw_response = http.request(
|
|
92
149
|
'https://api.anthropic.com/v1/messages',
|
|
93
150
|
headers={
|
|
@@ -100,6 +157,21 @@ class AnthropicChatChoicesService:
|
|
|
100
157
|
|
|
101
158
|
response = json.loads(check.not_none(raw_response.data).decode('utf-8'))
|
|
102
159
|
|
|
160
|
+
out: list[AnyAiMessage] = []
|
|
161
|
+
for c in response['content']:
|
|
162
|
+
if c['type'] == 'text':
|
|
163
|
+
out.append(AiMessage(
|
|
164
|
+
check.not_none(c['text']),
|
|
165
|
+
))
|
|
166
|
+
elif c['type'] == 'tool_use':
|
|
167
|
+
out.append(ToolUseMessage(ToolUse(
|
|
168
|
+
id=c['id'],
|
|
169
|
+
name=c['name'],
|
|
170
|
+
args=c['input'],
|
|
171
|
+
)))
|
|
172
|
+
else:
|
|
173
|
+
raise TypeError(c['type'])
|
|
174
|
+
|
|
103
175
|
return ChatChoicesResponse([
|
|
104
|
-
AiChoice(
|
|
176
|
+
AiChoice(out),
|
|
105
177
|
])
|
|
@@ -18,9 +18,9 @@ MODEL_NAMES = ModelNameCollection(
|
|
|
18
18
|
'claude-opus-4-1': 'claude-opus-4-1-20250805',
|
|
19
19
|
'claude-opus': 'claude-opus-4-1',
|
|
20
20
|
|
|
21
|
-
'claude-sonnet-4-
|
|
22
|
-
'claude-sonnet-4': 'claude-sonnet-4-
|
|
23
|
-
'claude-sonnet': 'claude-sonnet-4',
|
|
21
|
+
'claude-sonnet-4-5-20250929': None,
|
|
22
|
+
'claude-sonnet-4-5': 'claude-sonnet-4-5-20250929',
|
|
23
|
+
'claude-sonnet': 'claude-sonnet-4-5',
|
|
24
24
|
|
|
25
25
|
'claude-3-5-haiku-latest': None,
|
|
26
26
|
'claude-haiku-3-5-latest': 'claude-3-5-haiku-latest',
|
|
@@ -15,9 +15,9 @@ from ....chat.messages import SystemMessage
|
|
|
15
15
|
from ....chat.stream.services import ChatChoicesStreamRequest
|
|
16
16
|
from ....chat.stream.services import ChatChoicesStreamResponse
|
|
17
17
|
from ....chat.stream.services import static_check_is_chat_choices_stream_service
|
|
18
|
-
from ....chat.stream.types import AiChoiceDelta
|
|
19
18
|
from ....chat.stream.types import AiChoiceDeltas
|
|
20
|
-
from ....chat.stream.types import
|
|
19
|
+
from ....chat.stream.types import AiChoicesDeltas
|
|
20
|
+
from ....chat.stream.types import ContentAiChoiceDelta
|
|
21
21
|
from ....configs import Config
|
|
22
22
|
from ....resources import UseResources
|
|
23
23
|
from ....standard import ApiKey
|
|
@@ -86,7 +86,7 @@ class AnthropicChatChoicesStreamService:
|
|
|
86
86
|
http_client = rs.enter_context(http.client())
|
|
87
87
|
http_response = rs.enter_context(http_client.stream_request(http_request))
|
|
88
88
|
|
|
89
|
-
async def inner(sink: StreamResponseSink[
|
|
89
|
+
async def inner(sink: StreamResponseSink[AiChoicesDeltas]) -> ta.Sequence[ChatChoicesOutputs] | None:
|
|
90
90
|
msg_start: AnthropicSseDecoderEvents.MessageStart | None = None
|
|
91
91
|
cbk_start: AnthropicSseDecoderEvents.ContentBlockStart | None = None
|
|
92
92
|
msg_stop: AnthropicSseDecoderEvents.MessageStop | None = None
|
|
@@ -124,18 +124,18 @@ class AnthropicChatChoicesStreamService:
|
|
|
124
124
|
check.none(cbk_start)
|
|
125
125
|
cbk_start = ae
|
|
126
126
|
if isinstance(ae.content_block, AnthropicSseDecoderEvents.ContentBlockStart.Text): # noqa
|
|
127
|
-
await sink.emit([
|
|
127
|
+
await sink.emit(AiChoicesDeltas([AiChoiceDeltas([ContentAiChoiceDelta(
|
|
128
128
|
ae.content_block.text,
|
|
129
|
-
))])
|
|
129
|
+
)])]))
|
|
130
130
|
else:
|
|
131
131
|
raise TypeError(ae.content_block)
|
|
132
132
|
|
|
133
133
|
case AnthropicSseDecoderEvents.ContentBlockDelta():
|
|
134
134
|
check.not_none(cbk_start)
|
|
135
135
|
if isinstance(ae.delta, AnthropicSseDecoderEvents.ContentBlockDelta.TextDelta):
|
|
136
|
-
await sink.emit([
|
|
136
|
+
await sink.emit(AiChoicesDeltas([AiChoiceDeltas([ContentAiChoiceDelta(
|
|
137
137
|
ae.delta.text,
|
|
138
|
-
))])
|
|
138
|
+
)])]))
|
|
139
139
|
else:
|
|
140
140
|
raise TypeError(ae.delta)
|
|
141
141
|
|
|
@@ -15,14 +15,16 @@ from ....chat.choices.services import ChatChoicesResponse
|
|
|
15
15
|
from ....chat.choices.services import static_check_is_chat_choices_service
|
|
16
16
|
from ....chat.choices.types import AiChoice
|
|
17
17
|
from ....chat.messages import AiMessage
|
|
18
|
+
from ....chat.messages import AnyAiMessage
|
|
18
19
|
from ....chat.messages import Message
|
|
19
20
|
from ....chat.messages import SystemMessage
|
|
20
|
-
from ....chat.messages import
|
|
21
|
+
from ....chat.messages import ToolUseMessage
|
|
22
|
+
from ....chat.messages import ToolUseResultMessage
|
|
21
23
|
from ....chat.messages import UserMessage
|
|
22
24
|
from ....chat.tools.types import Tool
|
|
23
25
|
from ....models.configs import ModelName
|
|
24
26
|
from ....standard import ApiKey
|
|
25
|
-
from ....tools.types import
|
|
27
|
+
from ....tools.types import ToolUse
|
|
26
28
|
from .names import MODEL_NAMES
|
|
27
29
|
from .tools import build_tool_spec_schema
|
|
28
30
|
|
|
@@ -59,7 +61,8 @@ class GoogleChatChoicesService:
|
|
|
59
61
|
|
|
60
62
|
ROLES_MAP: ta.ClassVar[ta.Mapping[type[Message], str]] = {
|
|
61
63
|
UserMessage: 'user',
|
|
62
|
-
AiMessage: '
|
|
64
|
+
AiMessage: 'model',
|
|
65
|
+
ToolUseMessage: 'model',
|
|
63
66
|
}
|
|
64
67
|
|
|
65
68
|
async def invoke(
|
|
@@ -79,25 +82,47 @@ class GoogleChatChoicesService:
|
|
|
79
82
|
text=check.not_none(self._get_msg_content(m)),
|
|
80
83
|
)],
|
|
81
84
|
)
|
|
82
|
-
|
|
85
|
+
|
|
86
|
+
elif isinstance(m, ToolUseResultMessage):
|
|
83
87
|
tr_resp_val: pt.Value
|
|
84
|
-
if m.c is None:
|
|
88
|
+
if m.tur.c is None:
|
|
85
89
|
tr_resp_val = pt.NullValue() # type: ignore[unreachable]
|
|
86
|
-
elif isinstance(m.c, str):
|
|
87
|
-
tr_resp_val = pt.StringValue(m.c)
|
|
90
|
+
elif isinstance(m.tur.c, str):
|
|
91
|
+
tr_resp_val = pt.StringValue(m.tur.c)
|
|
88
92
|
else:
|
|
89
|
-
raise TypeError(m.c)
|
|
93
|
+
raise TypeError(m.tur.c)
|
|
90
94
|
g_contents.append(pt.Content(
|
|
91
95
|
parts=[pt.Part(
|
|
92
96
|
function_response=pt.FunctionResponse(
|
|
93
|
-
id=m.id,
|
|
94
|
-
name=m.name,
|
|
97
|
+
id=m.tur.id,
|
|
98
|
+
name=m.tur.name,
|
|
95
99
|
response={
|
|
96
100
|
'value': tr_resp_val,
|
|
97
101
|
},
|
|
98
102
|
),
|
|
99
103
|
)],
|
|
100
104
|
))
|
|
105
|
+
|
|
106
|
+
elif isinstance(m, AiMessage):
|
|
107
|
+
g_contents.append(pt.Content(
|
|
108
|
+
parts=[pt.Part(
|
|
109
|
+
text=check.not_none(self._get_msg_content(m)),
|
|
110
|
+
)],
|
|
111
|
+
role='model',
|
|
112
|
+
))
|
|
113
|
+
|
|
114
|
+
elif isinstance(m, ToolUseMessage):
|
|
115
|
+
g_contents.append(pt.Content(
|
|
116
|
+
parts=[pt.Part(
|
|
117
|
+
function_call=pt.FunctionCall(
|
|
118
|
+
id=m.tu.id,
|
|
119
|
+
name=m.tu.name,
|
|
120
|
+
args=m.tu.args,
|
|
121
|
+
),
|
|
122
|
+
)],
|
|
123
|
+
role='model',
|
|
124
|
+
))
|
|
125
|
+
|
|
101
126
|
else:
|
|
102
127
|
g_contents.append(pt.Content(
|
|
103
128
|
parts=[pt.Part(
|
|
@@ -137,17 +162,18 @@ class GoogleChatChoicesService:
|
|
|
137
162
|
|
|
138
163
|
ai_choices: list[AiChoice] = []
|
|
139
164
|
for c in g_resp.candidates or []:
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
165
|
+
out: list[AnyAiMessage] = []
|
|
166
|
+
for g_resp_part in check.not_none(check.not_none(c.content).parts):
|
|
167
|
+
if (g_txt := g_resp_part.text) is not None:
|
|
168
|
+
out.append(AiMessage(g_txt))
|
|
169
|
+
elif (g_fc := g_resp_part.function_call) is not None:
|
|
170
|
+
out.append(ToolUseMessage(ToolUse(
|
|
171
|
+
id=g_fc.id,
|
|
172
|
+
name=g_fc.name,
|
|
173
|
+
args=g_fc.args or {},
|
|
174
|
+
)))
|
|
175
|
+
else:
|
|
176
|
+
raise TypeError(g_resp_part)
|
|
177
|
+
ai_choices.append(AiChoice(out))
|
|
152
178
|
|
|
153
179
|
return ChatChoicesResponse(ai_choices)
|
|
@@ -19,9 +19,9 @@ from ....chat.messages import UserMessage
|
|
|
19
19
|
from ....chat.stream.services import ChatChoicesStreamRequest
|
|
20
20
|
from ....chat.stream.services import ChatChoicesStreamResponse
|
|
21
21
|
from ....chat.stream.services import static_check_is_chat_choices_stream_service
|
|
22
|
-
from ....chat.stream.types import AiChoiceDelta
|
|
23
22
|
from ....chat.stream.types import AiChoiceDeltas
|
|
24
|
-
from ....chat.stream.types import
|
|
23
|
+
from ....chat.stream.types import AiChoicesDeltas
|
|
24
|
+
from ....chat.stream.types import ContentAiChoiceDelta
|
|
25
25
|
from ....models.configs import ModelName
|
|
26
26
|
from ....resources import UseResources
|
|
27
27
|
from ....standard import ApiKey
|
|
@@ -101,7 +101,7 @@ class GoogleChatChoicesStreamService:
|
|
|
101
101
|
http_client = rs.enter_context(http.client())
|
|
102
102
|
http_response = rs.enter_context(http_client.stream_request(http_request))
|
|
103
103
|
|
|
104
|
-
async def inner(sink: StreamResponseSink[
|
|
104
|
+
async def inner(sink: StreamResponseSink[AiChoicesDeltas]) -> ta.Sequence[ChatChoicesOutputs] | None:
|
|
105
105
|
db = DelimitingBuffer([b'\r', b'\n', b'\r\n'])
|
|
106
106
|
while True:
|
|
107
107
|
# FIXME: read1 not on response stream protocol
|
|
@@ -117,7 +117,11 @@ class GoogleChatChoicesStreamService:
|
|
|
117
117
|
gcr = msh.unmarshal(json.loads(l[6:]), pt.GenerateContentResponse) # noqa
|
|
118
118
|
cnd = check.single(check.not_none(gcr.candidates))
|
|
119
119
|
for p in check.not_none(cnd.content).parts or []:
|
|
120
|
-
await sink.emit([
|
|
120
|
+
await sink.emit(AiChoicesDeltas([
|
|
121
|
+
AiChoiceDeltas([
|
|
122
|
+
ContentAiChoiceDelta(check.not_none(p.text)),
|
|
123
|
+
]),
|
|
124
|
+
]))
|
|
121
125
|
|
|
122
126
|
if not b:
|
|
123
127
|
return []
|
|
@@ -15,7 +15,8 @@ from ....chat.choices.services import static_check_is_chat_choices_service
|
|
|
15
15
|
from ....chat.choices.types import AiChoice
|
|
16
16
|
from ....chat.choices.types import ChatChoicesOptions
|
|
17
17
|
from ....chat.messages import AiMessage
|
|
18
|
-
from ....chat.messages import
|
|
18
|
+
from ....chat.messages import ToolUseMessage
|
|
19
|
+
from ....chat.messages import ToolUseResultMessage
|
|
19
20
|
from ....chat.tools.types import Tool
|
|
20
21
|
from ....configs import Config
|
|
21
22
|
from ....llms.types import MaxTokens
|
|
@@ -100,29 +101,34 @@ class LlamacppChatChoicesService:
|
|
|
100
101
|
|
|
101
102
|
ims: list = []
|
|
102
103
|
for rm in request.v:
|
|
103
|
-
if isinstance(rm,
|
|
104
|
+
if isinstance(rm, ToolUseResultMessage):
|
|
104
105
|
ims.append(dict(
|
|
105
106
|
role='tool',
|
|
106
|
-
**(dict(id=rm.id) if rm.id is not None else {}),
|
|
107
|
-
name=rm.name,
|
|
108
|
-
content=check.isinstance(rm.c, str),
|
|
107
|
+
**(dict(id=rm.tur.id) if rm.tur.id is not None else {}),
|
|
108
|
+
name=rm.tur.name,
|
|
109
|
+
content=check.isinstance(rm.tur.c, str),
|
|
109
110
|
))
|
|
111
|
+
|
|
110
112
|
elif isinstance(rm, AiMessage):
|
|
111
|
-
tcs: list[dict] = []
|
|
112
|
-
for ter in rm.tool_exec_requests or []:
|
|
113
|
-
tcs.append(dict(
|
|
114
|
-
id=check.not_none(ter.id),
|
|
115
|
-
type='function',
|
|
116
|
-
function=dict(
|
|
117
|
-
name=ter.name,
|
|
118
|
-
arguments=check.isinstance(ter.raw_args, str),
|
|
119
|
-
),
|
|
120
|
-
))
|
|
121
113
|
ims.append(dict(
|
|
122
114
|
role=ROLES_MAP[type(rm)],
|
|
123
115
|
**(dict(content=mc) if (mc := get_msg_content(rm)) is not None else {}),
|
|
124
|
-
**(dict(tool_calls=tcs) if tcs else {}),
|
|
125
116
|
))
|
|
117
|
+
|
|
118
|
+
elif isinstance(rm, ToolUseMessage):
|
|
119
|
+
ims.append(dict(
|
|
120
|
+
role=ROLES_MAP[type(rm)],
|
|
121
|
+
content='',
|
|
122
|
+
tool_calls=[dict(
|
|
123
|
+
id=check.not_none(rm.tu.id),
|
|
124
|
+
type='function',
|
|
125
|
+
function=dict(
|
|
126
|
+
name=rm.tu.name,
|
|
127
|
+
arguments=check.isinstance(rm.tu.raw_args, str),
|
|
128
|
+
),
|
|
129
|
+
)],
|
|
130
|
+
))
|
|
131
|
+
|
|
126
132
|
else:
|
|
127
133
|
ims.append(dict(
|
|
128
134
|
role=ROLES_MAP[type(rm)],
|
|
@@ -137,6 +143,6 @@ class LlamacppChatChoicesService:
|
|
|
137
143
|
out: list[AiChoice] = []
|
|
138
144
|
for c in ta.cast(ta.Any, output)['choices']:
|
|
139
145
|
m = c['message']
|
|
140
|
-
out.append(AiChoice(AiMessage(m['content'])))
|
|
146
|
+
out.append(AiChoice([AiMessage(m['content'])]))
|
|
141
147
|
|
|
142
148
|
return ChatChoicesResponse(out)
|
|
@@ -5,7 +5,8 @@ from omlish import check
|
|
|
5
5
|
from ....chat.messages import AiMessage
|
|
6
6
|
from ....chat.messages import Message
|
|
7
7
|
from ....chat.messages import SystemMessage
|
|
8
|
-
from ....chat.messages import
|
|
8
|
+
from ....chat.messages import ToolUseMessage
|
|
9
|
+
from ....chat.messages import ToolUseResultMessage
|
|
9
10
|
from ....chat.messages import UserMessage
|
|
10
11
|
|
|
11
12
|
|
|
@@ -16,7 +17,8 @@ ROLES_MAP: ta.Mapping[type[Message], str] = {
|
|
|
16
17
|
SystemMessage: 'system',
|
|
17
18
|
UserMessage: 'user',
|
|
18
19
|
AiMessage: 'assistant',
|
|
19
|
-
|
|
20
|
+
ToolUseMessage: 'assistant',
|
|
21
|
+
ToolUseResultMessage: 'tool',
|
|
20
22
|
}
|
|
21
23
|
|
|
22
24
|
|
|
@@ -13,9 +13,9 @@ from ....chat.choices.services import ChatChoicesOutputs
|
|
|
13
13
|
from ....chat.stream.services import ChatChoicesStreamRequest
|
|
14
14
|
from ....chat.stream.services import ChatChoicesStreamResponse
|
|
15
15
|
from ....chat.stream.services import static_check_is_chat_choices_stream_service
|
|
16
|
-
from ....chat.stream.types import AiChoiceDelta
|
|
17
16
|
from ....chat.stream.types import AiChoiceDeltas
|
|
18
|
-
from ....chat.stream.types import
|
|
17
|
+
from ....chat.stream.types import AiChoicesDeltas
|
|
18
|
+
from ....chat.stream.types import ContentAiChoiceDelta
|
|
19
19
|
from ....configs import Config
|
|
20
20
|
from ....models.configs import ModelPath
|
|
21
21
|
from ....resources import UseResources
|
|
@@ -75,10 +75,10 @@ class LlamacppChatChoicesStreamService(lang.ExitStacked):
|
|
|
75
75
|
|
|
76
76
|
rs.enter_context(lang.defer(close_output))
|
|
77
77
|
|
|
78
|
-
async def inner(sink: StreamResponseSink[
|
|
78
|
+
async def inner(sink: StreamResponseSink[AiChoicesDeltas]) -> ta.Sequence[ChatChoicesOutputs] | None:
|
|
79
79
|
for chunk in output:
|
|
80
80
|
check.state(chunk['object'] == 'chat.completion.chunk')
|
|
81
|
-
l: list[
|
|
81
|
+
l: list[AiChoiceDeltas] = []
|
|
82
82
|
for choice in chunk['choices']:
|
|
83
83
|
# FIXME: check role is assistant
|
|
84
84
|
# FIXME: stop reason
|
|
@@ -86,8 +86,8 @@ class LlamacppChatChoicesStreamService(lang.ExitStacked):
|
|
|
86
86
|
continue
|
|
87
87
|
if not (content := delta.get('content', '')):
|
|
88
88
|
continue
|
|
89
|
-
l.append(
|
|
90
|
-
await sink.emit(l)
|
|
89
|
+
l.append(AiChoiceDeltas([ContentAiChoiceDelta(content)]))
|
|
90
|
+
await sink.emit(AiChoicesDeltas(l))
|
|
91
91
|
return None
|
|
92
92
|
|
|
93
93
|
return await new_stream_response(rs, inner)
|
|
@@ -90,6 +90,6 @@ class MistralChatChoicesService:
|
|
|
90
90
|
resp_dct = json.loads(check.not_none(resp.data).decode('utf-8'))
|
|
91
91
|
|
|
92
92
|
return ChatChoicesResponse([
|
|
93
|
-
AiChoice(AiMessage(c['message']['content']))
|
|
93
|
+
AiChoice([AiMessage(c['message']['content'])])
|
|
94
94
|
for c in resp_dct['choices']
|
|
95
95
|
])
|
|
@@ -14,17 +14,20 @@ TODO:
|
|
|
14
14
|
import typing as ta
|
|
15
15
|
|
|
16
16
|
from omlish import check
|
|
17
|
+
from omlish import marshal as msh
|
|
17
18
|
from omlish import typedvalues as tv
|
|
18
19
|
from omlish.formats import json
|
|
19
20
|
from omlish.http import all as http
|
|
20
21
|
|
|
22
|
+
from .....backends.openai import protocol as pt
|
|
21
23
|
from ....chat.choices.services import ChatChoicesRequest
|
|
22
24
|
from ....chat.choices.services import ChatChoicesResponse
|
|
23
25
|
from ....chat.choices.services import static_check_is_chat_choices_service
|
|
24
26
|
from ....models.configs import ModelName
|
|
25
27
|
from ....standard import ApiKey
|
|
26
28
|
from ....standard import DefaultOptions
|
|
27
|
-
from .
|
|
29
|
+
from .format2 import OpenaiChatRequestHandler
|
|
30
|
+
from .format2 import build_mc_choices_response
|
|
28
31
|
from .names import MODEL_NAMES
|
|
29
32
|
|
|
30
33
|
|
|
@@ -63,7 +66,7 @@ class OpenaiChatChoicesService:
|
|
|
63
66
|
),
|
|
64
67
|
)
|
|
65
68
|
|
|
66
|
-
raw_request = rh.
|
|
69
|
+
raw_request = msh.marshal(rh.oai_request())
|
|
67
70
|
|
|
68
71
|
http_response = http.request(
|
|
69
72
|
'https://api.openai.com/v1/chat/completions',
|
|
@@ -76,4 +79,4 @@ class OpenaiChatChoicesService:
|
|
|
76
79
|
|
|
77
80
|
raw_response = json.loads(check.not_none(http_response.data).decode('utf-8'))
|
|
78
81
|
|
|
79
|
-
return
|
|
82
|
+
return build_mc_choices_response(msh.unmarshal(raw_response, pt.ChatCompletionResponse))
|