ommlds 0.0.0.dev436__py3-none-any.whl → 0.0.0.dev480__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ommlds/.omlish-manifests.json +332 -35
- ommlds/__about__.py +15 -9
- ommlds/_hacks/__init__.py +4 -0
- ommlds/_hacks/funcs.py +110 -0
- ommlds/_hacks/names.py +158 -0
- ommlds/_hacks/params.py +73 -0
- ommlds/_hacks/patches.py +0 -3
- ommlds/backends/anthropic/protocol/_marshal.py +2 -2
- ommlds/backends/anthropic/protocol/sse/_marshal.py +1 -1
- ommlds/backends/anthropic/protocol/sse/assemble.py +23 -7
- ommlds/backends/anthropic/protocol/sse/events.py +13 -0
- ommlds/backends/anthropic/protocol/types.py +30 -9
- ommlds/backends/google/protocol/__init__.py +3 -0
- ommlds/backends/google/protocol/_marshal.py +16 -0
- ommlds/backends/google/protocol/types.py +626 -0
- ommlds/backends/groq/_marshal.py +23 -0
- ommlds/backends/groq/protocol.py +249 -0
- ommlds/backends/mlx/generation.py +1 -1
- ommlds/backends/mlx/loading.py +58 -1
- ommlds/backends/ollama/__init__.py +0 -0
- ommlds/backends/ollama/protocol.py +170 -0
- ommlds/backends/openai/protocol/__init__.py +9 -28
- ommlds/backends/openai/protocol/_common.py +18 -0
- ommlds/backends/openai/protocol/_marshal.py +27 -0
- ommlds/backends/openai/protocol/chatcompletion/chunk.py +58 -31
- ommlds/backends/openai/protocol/chatcompletion/contentpart.py +49 -44
- ommlds/backends/openai/protocol/chatcompletion/message.py +55 -43
- ommlds/backends/openai/protocol/chatcompletion/request.py +114 -66
- ommlds/backends/openai/protocol/chatcompletion/response.py +71 -45
- ommlds/backends/openai/protocol/chatcompletion/responseformat.py +27 -20
- ommlds/backends/openai/protocol/chatcompletion/tokenlogprob.py +16 -7
- ommlds/backends/openai/protocol/completionusage.py +24 -15
- ommlds/backends/tavily/__init__.py +0 -0
- ommlds/backends/tavily/protocol.py +301 -0
- ommlds/backends/tinygrad/models/llama3/__init__.py +22 -14
- ommlds/backends/transformers/__init__.py +0 -0
- ommlds/backends/transformers/filecache.py +109 -0
- ommlds/backends/transformers/streamers.py +73 -0
- ommlds/cli/asyncs.py +30 -0
- ommlds/cli/backends/catalog.py +93 -0
- ommlds/cli/backends/configs.py +9 -0
- ommlds/cli/backends/inject.py +31 -36
- ommlds/cli/backends/injection.py +16 -0
- ommlds/cli/backends/types.py +46 -0
- ommlds/cli/content/__init__.py +0 -0
- ommlds/cli/content/messages.py +34 -0
- ommlds/cli/content/strings.py +42 -0
- ommlds/cli/inject.py +15 -32
- ommlds/cli/inputs/__init__.py +0 -0
- ommlds/cli/inputs/asyncs.py +32 -0
- ommlds/cli/inputs/sync.py +75 -0
- ommlds/cli/main.py +270 -110
- ommlds/cli/rendering/__init__.py +0 -0
- ommlds/cli/rendering/configs.py +9 -0
- ommlds/cli/rendering/inject.py +31 -0
- ommlds/cli/rendering/markdown.py +52 -0
- ommlds/cli/rendering/raw.py +73 -0
- ommlds/cli/rendering/types.py +21 -0
- ommlds/cli/secrets.py +21 -0
- ommlds/cli/sessions/base.py +1 -1
- ommlds/cli/sessions/chat/chat/__init__.py +0 -0
- ommlds/cli/sessions/chat/chat/ai/__init__.py +0 -0
- ommlds/cli/sessions/chat/chat/ai/configs.py +11 -0
- ommlds/cli/sessions/chat/chat/ai/inject.py +74 -0
- ommlds/cli/sessions/chat/chat/ai/injection.py +14 -0
- ommlds/cli/sessions/chat/chat/ai/rendering.py +70 -0
- ommlds/cli/sessions/chat/chat/ai/services.py +79 -0
- ommlds/cli/sessions/chat/chat/ai/tools.py +44 -0
- ommlds/cli/sessions/chat/chat/ai/types.py +28 -0
- ommlds/cli/sessions/chat/chat/state/__init__.py +0 -0
- ommlds/cli/sessions/chat/chat/state/configs.py +11 -0
- ommlds/cli/sessions/chat/chat/state/inject.py +36 -0
- ommlds/cli/sessions/chat/chat/state/inmemory.py +33 -0
- ommlds/cli/sessions/chat/chat/state/storage.py +52 -0
- ommlds/cli/sessions/chat/chat/state/types.py +38 -0
- ommlds/cli/sessions/chat/chat/user/__init__.py +0 -0
- ommlds/cli/sessions/chat/chat/user/configs.py +17 -0
- ommlds/cli/sessions/chat/chat/user/inject.py +62 -0
- ommlds/cli/sessions/chat/chat/user/interactive.py +31 -0
- ommlds/cli/sessions/chat/chat/user/oneshot.py +25 -0
- ommlds/cli/sessions/chat/chat/user/types.py +15 -0
- ommlds/cli/sessions/chat/configs.py +27 -0
- ommlds/cli/sessions/chat/driver.py +43 -0
- ommlds/cli/sessions/chat/inject.py +33 -65
- ommlds/cli/sessions/chat/phases/__init__.py +0 -0
- ommlds/cli/sessions/chat/phases/inject.py +27 -0
- ommlds/cli/sessions/chat/phases/injection.py +14 -0
- ommlds/cli/sessions/chat/phases/manager.py +29 -0
- ommlds/cli/sessions/chat/phases/types.py +29 -0
- ommlds/cli/sessions/chat/session.py +27 -0
- ommlds/cli/sessions/chat/tools/__init__.py +0 -0
- ommlds/cli/sessions/chat/tools/configs.py +22 -0
- ommlds/cli/sessions/chat/tools/confirmation.py +46 -0
- ommlds/cli/sessions/chat/tools/execution.py +66 -0
- ommlds/cli/sessions/chat/tools/fs/__init__.py +0 -0
- ommlds/cli/sessions/chat/tools/fs/configs.py +12 -0
- ommlds/cli/sessions/chat/tools/fs/inject.py +35 -0
- ommlds/cli/sessions/chat/tools/inject.py +88 -0
- ommlds/cli/sessions/chat/tools/injection.py +44 -0
- ommlds/cli/sessions/chat/tools/rendering.py +58 -0
- ommlds/cli/sessions/chat/tools/todo/__init__.py +0 -0
- ommlds/cli/sessions/chat/tools/todo/configs.py +12 -0
- ommlds/cli/sessions/chat/tools/todo/inject.py +31 -0
- ommlds/cli/sessions/chat/tools/weather/__init__.py +0 -0
- ommlds/cli/sessions/chat/tools/weather/configs.py +12 -0
- ommlds/cli/sessions/chat/tools/weather/inject.py +22 -0
- ommlds/cli/{tools/weather.py → sessions/chat/tools/weather/tools.py} +1 -1
- ommlds/cli/sessions/completion/configs.py +21 -0
- ommlds/cli/sessions/completion/inject.py +42 -0
- ommlds/cli/sessions/completion/session.py +35 -0
- ommlds/cli/sessions/embedding/configs.py +21 -0
- ommlds/cli/sessions/embedding/inject.py +42 -0
- ommlds/cli/sessions/embedding/session.py +33 -0
- ommlds/cli/sessions/inject.py +28 -11
- ommlds/cli/state/__init__.py +0 -0
- ommlds/cli/state/inject.py +28 -0
- ommlds/cli/{state.py → state/storage.py} +41 -24
- ommlds/minichain/__init__.py +84 -24
- ommlds/minichain/_marshal.py +49 -9
- ommlds/minichain/_typedvalues.py +2 -4
- ommlds/minichain/backends/catalogs/base.py +20 -1
- ommlds/minichain/backends/catalogs/simple.py +2 -2
- ommlds/minichain/backends/catalogs/strings.py +10 -8
- ommlds/minichain/backends/impls/anthropic/chat.py +65 -27
- ommlds/minichain/backends/impls/anthropic/names.py +10 -8
- ommlds/minichain/backends/impls/anthropic/protocol.py +109 -0
- ommlds/minichain/backends/impls/anthropic/stream.py +111 -43
- ommlds/minichain/backends/impls/duckduckgo/search.py +1 -1
- ommlds/minichain/backends/impls/dummy/__init__.py +0 -0
- ommlds/minichain/backends/impls/dummy/chat.py +69 -0
- ommlds/minichain/backends/impls/google/chat.py +114 -22
- ommlds/minichain/backends/impls/google/search.py +7 -2
- ommlds/minichain/backends/impls/google/stream.py +219 -0
- ommlds/minichain/backends/impls/google/tools.py +149 -0
- ommlds/minichain/backends/impls/groq/__init__.py +0 -0
- ommlds/minichain/backends/impls/groq/chat.py +75 -0
- ommlds/minichain/backends/impls/groq/names.py +48 -0
- ommlds/minichain/backends/impls/groq/protocol.py +143 -0
- ommlds/minichain/backends/impls/groq/stream.py +125 -0
- ommlds/minichain/backends/impls/llamacpp/chat.py +33 -18
- ommlds/minichain/backends/impls/llamacpp/completion.py +1 -1
- ommlds/minichain/backends/impls/llamacpp/format.py +4 -2
- ommlds/minichain/backends/impls/llamacpp/stream.py +37 -20
- ommlds/minichain/backends/impls/mistral.py +20 -5
- ommlds/minichain/backends/impls/mlx/chat.py +96 -22
- ommlds/minichain/backends/impls/ollama/__init__.py +0 -0
- ommlds/minichain/backends/impls/ollama/chat.py +199 -0
- ommlds/minichain/backends/impls/openai/chat.py +18 -8
- ommlds/minichain/backends/impls/openai/completion.py +10 -3
- ommlds/minichain/backends/impls/openai/embedding.py +10 -3
- ommlds/minichain/backends/impls/openai/format.py +131 -106
- ommlds/minichain/backends/impls/openai/names.py +31 -5
- ommlds/minichain/backends/impls/openai/stream.py +43 -25
- ommlds/minichain/backends/impls/tavily.py +66 -0
- ommlds/minichain/backends/impls/tinygrad/chat.py +23 -16
- ommlds/minichain/backends/impls/transformers/sentence.py +1 -1
- ommlds/minichain/backends/impls/transformers/tokens.py +1 -1
- ommlds/minichain/backends/impls/transformers/transformers.py +155 -34
- ommlds/minichain/backends/strings/parsing.py +1 -1
- ommlds/minichain/backends/strings/resolving.py +4 -1
- ommlds/minichain/chat/_marshal.py +16 -9
- ommlds/minichain/chat/choices/adapters.py +4 -4
- ommlds/minichain/chat/choices/services.py +1 -1
- ommlds/minichain/chat/choices/stream/__init__.py +0 -0
- ommlds/minichain/chat/choices/stream/adapters.py +35 -0
- ommlds/minichain/chat/choices/stream/joining.py +31 -0
- ommlds/minichain/chat/choices/stream/services.py +45 -0
- ommlds/minichain/chat/choices/stream/types.py +43 -0
- ommlds/minichain/chat/choices/types.py +2 -2
- ommlds/minichain/chat/history.py +3 -3
- ommlds/minichain/chat/messages.py +55 -19
- ommlds/minichain/chat/services.py +3 -3
- ommlds/minichain/chat/stream/_marshal.py +16 -0
- ommlds/minichain/chat/stream/joining.py +85 -0
- ommlds/minichain/chat/stream/services.py +15 -21
- ommlds/minichain/chat/stream/types.py +32 -19
- ommlds/minichain/chat/tools/execution.py +8 -7
- ommlds/minichain/chat/tools/ids.py +9 -15
- ommlds/minichain/chat/tools/parsing.py +17 -26
- ommlds/minichain/chat/transforms/base.py +29 -38
- ommlds/minichain/chat/transforms/metadata.py +30 -4
- ommlds/minichain/chat/transforms/services.py +9 -11
- ommlds/minichain/content/_marshal.py +44 -20
- ommlds/minichain/content/json.py +13 -0
- ommlds/minichain/content/materialize.py +14 -21
- ommlds/minichain/content/prepare.py +4 -0
- ommlds/minichain/content/transforms/interleave.py +1 -1
- ommlds/minichain/content/transforms/squeeze.py +1 -1
- ommlds/minichain/content/transforms/stringify.py +1 -1
- ommlds/minichain/json.py +20 -0
- ommlds/minichain/lib/code/__init__.py +0 -0
- ommlds/minichain/lib/code/prompts.py +6 -0
- ommlds/minichain/lib/fs/binfiles.py +108 -0
- ommlds/minichain/lib/fs/context.py +126 -0
- ommlds/minichain/lib/fs/errors.py +101 -0
- ommlds/minichain/lib/fs/suggestions.py +36 -0
- ommlds/minichain/lib/fs/tools/__init__.py +0 -0
- ommlds/minichain/lib/fs/tools/edit.py +104 -0
- ommlds/minichain/lib/fs/tools/ls.py +38 -0
- ommlds/minichain/lib/fs/tools/read.py +115 -0
- ommlds/minichain/lib/fs/tools/recursivels/__init__.py +0 -0
- ommlds/minichain/lib/fs/tools/recursivels/execution.py +40 -0
- ommlds/minichain/lib/todo/__init__.py +0 -0
- ommlds/minichain/lib/todo/context.py +54 -0
- ommlds/minichain/lib/todo/tools/__init__.py +0 -0
- ommlds/minichain/lib/todo/tools/read.py +44 -0
- ommlds/minichain/lib/todo/tools/write.py +335 -0
- ommlds/minichain/lib/todo/types.py +60 -0
- ommlds/minichain/llms/_marshal.py +25 -17
- ommlds/minichain/llms/types.py +4 -0
- ommlds/minichain/registries/globals.py +18 -4
- ommlds/minichain/resources.py +66 -43
- ommlds/minichain/search.py +1 -1
- ommlds/minichain/services/_marshal.py +46 -39
- ommlds/minichain/services/facades.py +3 -3
- ommlds/minichain/services/services.py +1 -1
- ommlds/minichain/standard.py +8 -0
- ommlds/minichain/stream/services.py +152 -38
- ommlds/minichain/stream/wrap.py +22 -24
- ommlds/minichain/tools/_marshal.py +1 -1
- ommlds/minichain/tools/execution/catalog.py +2 -1
- ommlds/minichain/tools/execution/context.py +34 -14
- ommlds/minichain/tools/execution/errors.py +15 -0
- ommlds/minichain/tools/execution/executors.py +8 -3
- ommlds/minichain/tools/execution/reflect.py +40 -5
- ommlds/minichain/tools/fns.py +46 -9
- ommlds/minichain/tools/jsonschema.py +14 -5
- ommlds/minichain/tools/reflect.py +54 -18
- ommlds/minichain/tools/types.py +33 -1
- ommlds/minichain/utils.py +27 -0
- ommlds/minichain/vectors/_marshal.py +11 -10
- ommlds/nanochat/LICENSE +21 -0
- ommlds/nanochat/__init__.py +0 -0
- ommlds/nanochat/rustbpe/LICENSE +21 -0
- ommlds/nanochat/tokenizers.py +406 -0
- ommlds/server/server.py +3 -3
- ommlds/specs/__init__.py +0 -0
- ommlds/specs/mcp/__init__.py +0 -0
- ommlds/specs/mcp/_marshal.py +23 -0
- ommlds/specs/mcp/protocol.py +266 -0
- ommlds/tools/git.py +27 -10
- ommlds/tools/ocr.py +8 -9
- ommlds/wiki/analyze.py +2 -2
- ommlds/wiki/text/mfh.py +1 -5
- ommlds/wiki/text/wtp.py +1 -3
- ommlds/wiki/utils/xml.py +5 -5
- {ommlds-0.0.0.dev436.dist-info → ommlds-0.0.0.dev480.dist-info}/METADATA +24 -21
- ommlds-0.0.0.dev480.dist-info/RECORD +427 -0
- ommlds/cli/backends/standard.py +0 -20
- ommlds/cli/sessions/chat/base.py +0 -42
- ommlds/cli/sessions/chat/interactive.py +0 -73
- ommlds/cli/sessions/chat/printing.py +0 -96
- ommlds/cli/sessions/chat/prompt.py +0 -143
- ommlds/cli/sessions/chat/state.py +0 -109
- ommlds/cli/sessions/chat/tools.py +0 -91
- ommlds/cli/sessions/completion/completion.py +0 -44
- ommlds/cli/sessions/embedding/embedding.py +0 -42
- ommlds/cli/tools/config.py +0 -13
- ommlds/cli/tools/inject.py +0 -64
- ommlds/minichain/chat/stream/adapters.py +0 -69
- ommlds/minichain/lib/fs/ls/execution.py +0 -32
- ommlds-0.0.0.dev436.dist-info/RECORD +0 -303
- /ommlds/{cli/tools → backends/google}/__init__.py +0 -0
- /ommlds/{minichain/lib/fs/ls → backends/groq}/__init__.py +0 -0
- /ommlds/{huggingface.py → backends/huggingface.py} +0 -0
- /ommlds/minichain/lib/fs/{ls → tools/recursivels}/rendering.py +0 -0
- /ommlds/minichain/lib/fs/{ls → tools/recursivels}/running.py +0 -0
- {ommlds-0.0.0.dev436.dist-info → ommlds-0.0.0.dev480.dist-info}/WHEEL +0 -0
- {ommlds-0.0.0.dev436.dist-info → ommlds-0.0.0.dev480.dist-info}/entry_points.txt +0 -0
- {ommlds-0.0.0.dev436.dist-info → ommlds-0.0.0.dev480.dist-info}/licenses/LICENSE +0 -0
- {ommlds-0.0.0.dev436.dist-info → ommlds-0.0.0.dev480.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,219 @@
|
|
|
1
|
+
"""
|
|
2
|
+
https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models
|
|
3
|
+
"""
|
|
4
|
+
import typing as ta
|
|
5
|
+
|
|
6
|
+
from omlish import check
|
|
7
|
+
from omlish import marshal as msh
|
|
8
|
+
from omlish import typedvalues as tv
|
|
9
|
+
from omlish.formats import json
|
|
10
|
+
from omlish.http import all as http
|
|
11
|
+
from omlish.io.buffers import DelimitingBuffer
|
|
12
|
+
|
|
13
|
+
from .....backends.google.protocol import types as pt
|
|
14
|
+
from ....chat.choices.stream.services import ChatChoicesStreamRequest
|
|
15
|
+
from ....chat.choices.stream.services import ChatChoicesStreamResponse
|
|
16
|
+
from ....chat.choices.stream.services import static_check_is_chat_choices_stream_service
|
|
17
|
+
from ....chat.choices.stream.types import AiChoiceDeltas
|
|
18
|
+
from ....chat.choices.stream.types import AiChoicesDeltas
|
|
19
|
+
from ....chat.choices.types import ChatChoicesOutputs
|
|
20
|
+
from ....chat.messages import AiMessage
|
|
21
|
+
from ....chat.messages import Message
|
|
22
|
+
from ....chat.messages import SystemMessage
|
|
23
|
+
from ....chat.messages import ToolUseMessage
|
|
24
|
+
from ....chat.messages import ToolUseResultMessage
|
|
25
|
+
from ....chat.messages import UserMessage
|
|
26
|
+
from ....chat.stream.types import ContentAiDelta
|
|
27
|
+
from ....chat.stream.types import ToolUseAiDelta
|
|
28
|
+
from ....chat.tools.types import Tool
|
|
29
|
+
from ....models.configs import ModelName
|
|
30
|
+
from ....resources import UseResources
|
|
31
|
+
from ....standard import ApiKey
|
|
32
|
+
from ....stream.services import StreamResponseSink
|
|
33
|
+
from ....stream.services import new_stream_response
|
|
34
|
+
from .names import MODEL_NAMES
|
|
35
|
+
from .tools import build_tool_spec_schema
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
##
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
# @omlish-manifest $.minichain.registries.manifests.RegistryManifest(
|
|
42
|
+
# name='google',
|
|
43
|
+
# type='ChatChoicesStreamService',
|
|
44
|
+
# )
|
|
45
|
+
@static_check_is_chat_choices_stream_service
|
|
46
|
+
class GoogleChatChoicesStreamService:
|
|
47
|
+
DEFAULT_MODEL_NAME: ta.ClassVar[ModelName] = ModelName(check.not_none(MODEL_NAMES.default))
|
|
48
|
+
|
|
49
|
+
def __init__(
|
|
50
|
+
self,
|
|
51
|
+
*configs: ApiKey | ModelName,
|
|
52
|
+
http_client: http.AsyncHttpClient | None = None,
|
|
53
|
+
) -> None:
|
|
54
|
+
super().__init__()
|
|
55
|
+
|
|
56
|
+
self._http_client = http_client
|
|
57
|
+
|
|
58
|
+
with tv.consume(*configs) as cc:
|
|
59
|
+
self._model_name = cc.pop(self.DEFAULT_MODEL_NAME)
|
|
60
|
+
self._api_key = ApiKey.pop_secret(cc, env='GEMINI_API_KEY')
|
|
61
|
+
|
|
62
|
+
def _make_str_content(
|
|
63
|
+
self,
|
|
64
|
+
s: str | None,
|
|
65
|
+
*,
|
|
66
|
+
role: pt.ContentRole | None = None,
|
|
67
|
+
) -> pt.Content | None:
|
|
68
|
+
if s is None:
|
|
69
|
+
return None
|
|
70
|
+
|
|
71
|
+
return pt.Content(
|
|
72
|
+
parts=[pt.Part(
|
|
73
|
+
text=check.not_none(s),
|
|
74
|
+
)],
|
|
75
|
+
role=role,
|
|
76
|
+
)
|
|
77
|
+
|
|
78
|
+
def _make_msg_content(self, m: Message) -> pt.Content:
|
|
79
|
+
if isinstance(m, (AiMessage, SystemMessage, UserMessage)):
|
|
80
|
+
return check.not_none(self._make_str_content(
|
|
81
|
+
check.isinstance(m.c, str),
|
|
82
|
+
role=self.ROLES_MAP[type(m)],
|
|
83
|
+
))
|
|
84
|
+
|
|
85
|
+
elif isinstance(m, ToolUseResultMessage):
|
|
86
|
+
tr_resp_val: pt.Value
|
|
87
|
+
if m.tur.c is None:
|
|
88
|
+
tr_resp_val = pt.NullValue() # type: ignore[unreachable]
|
|
89
|
+
elif isinstance(m.tur.c, str):
|
|
90
|
+
tr_resp_val = pt.StringValue(m.tur.c)
|
|
91
|
+
else:
|
|
92
|
+
raise TypeError(m.tur.c)
|
|
93
|
+
return pt.Content(
|
|
94
|
+
parts=[pt.Part(
|
|
95
|
+
function_response=pt.FunctionResponse(
|
|
96
|
+
id=m.tur.id,
|
|
97
|
+
name=m.tur.name,
|
|
98
|
+
response={
|
|
99
|
+
'value': tr_resp_val,
|
|
100
|
+
},
|
|
101
|
+
),
|
|
102
|
+
)],
|
|
103
|
+
)
|
|
104
|
+
|
|
105
|
+
elif isinstance(m, ToolUseMessage):
|
|
106
|
+
return pt.Content(
|
|
107
|
+
parts=[pt.Part(
|
|
108
|
+
function_call=pt.FunctionCall(
|
|
109
|
+
id=m.tu.id,
|
|
110
|
+
name=m.tu.name,
|
|
111
|
+
args=m.tu.args,
|
|
112
|
+
),
|
|
113
|
+
)],
|
|
114
|
+
role='model',
|
|
115
|
+
)
|
|
116
|
+
|
|
117
|
+
else:
|
|
118
|
+
raise TypeError(m)
|
|
119
|
+
|
|
120
|
+
BASE_URL: ta.ClassVar[str] = 'https://generativelanguage.googleapis.com/v1beta/models'
|
|
121
|
+
|
|
122
|
+
ROLES_MAP: ta.ClassVar[ta.Mapping[type[Message], pt.ContentRole | None]] = { # noqa
|
|
123
|
+
SystemMessage: None,
|
|
124
|
+
UserMessage: 'user',
|
|
125
|
+
AiMessage: 'model',
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
READ_CHUNK_SIZE: ta.ClassVar[int] = -1
|
|
129
|
+
|
|
130
|
+
async def invoke(
|
|
131
|
+
self,
|
|
132
|
+
request: ChatChoicesStreamRequest,
|
|
133
|
+
) -> ChatChoicesStreamResponse:
|
|
134
|
+
key = check.not_none(self._api_key).reveal()
|
|
135
|
+
|
|
136
|
+
msgs = list(request.v)
|
|
137
|
+
|
|
138
|
+
system_inst: pt.Content | None = None
|
|
139
|
+
if msgs and isinstance(m0 := msgs[0], SystemMessage):
|
|
140
|
+
system_inst = self._make_msg_content(m0)
|
|
141
|
+
msgs.pop(0)
|
|
142
|
+
|
|
143
|
+
g_tools: list[pt.Tool] = []
|
|
144
|
+
with tv.TypedValues(*request.options).consume() as oc:
|
|
145
|
+
t: Tool
|
|
146
|
+
for t in oc.pop(Tool, []):
|
|
147
|
+
g_tools.append(pt.Tool(
|
|
148
|
+
function_declarations=[build_tool_spec_schema(t.spec)],
|
|
149
|
+
))
|
|
150
|
+
|
|
151
|
+
g_req = pt.GenerateContentRequest(
|
|
152
|
+
contents=[
|
|
153
|
+
self._make_msg_content(m)
|
|
154
|
+
for m in msgs
|
|
155
|
+
],
|
|
156
|
+
tools=g_tools or None,
|
|
157
|
+
system_instruction=system_inst,
|
|
158
|
+
)
|
|
159
|
+
|
|
160
|
+
req_dct = msh.marshal(g_req)
|
|
161
|
+
|
|
162
|
+
model_name = MODEL_NAMES.resolve(self._model_name.v)
|
|
163
|
+
|
|
164
|
+
http_request = http.HttpRequest(
|
|
165
|
+
f'{self.BASE_URL.rstrip("/")}/{model_name}:streamGenerateContent?alt=sse&key={key}',
|
|
166
|
+
headers={'Content-Type': 'application/json'},
|
|
167
|
+
data=json.dumps_compact(req_dct).encode('utf-8'),
|
|
168
|
+
method='POST',
|
|
169
|
+
)
|
|
170
|
+
|
|
171
|
+
async with UseResources.or_new(request.options) as rs:
|
|
172
|
+
http_client = await rs.enter_async_context(http.manage_async_client(self._http_client))
|
|
173
|
+
http_response = await rs.enter_async_context(await http_client.stream_request(http_request))
|
|
174
|
+
|
|
175
|
+
async def inner(sink: StreamResponseSink[AiChoicesDeltas]) -> ta.Sequence[ChatChoicesOutputs] | None:
|
|
176
|
+
db = DelimitingBuffer([b'\r', b'\n', b'\r\n'])
|
|
177
|
+
while True:
|
|
178
|
+
b = await http_response.stream.read1(self.READ_CHUNK_SIZE)
|
|
179
|
+
for bl in db.feed(b):
|
|
180
|
+
if isinstance(bl, DelimitingBuffer.Incomplete):
|
|
181
|
+
# FIXME: handle
|
|
182
|
+
return []
|
|
183
|
+
|
|
184
|
+
l = bl.decode('utf-8')
|
|
185
|
+
if not l:
|
|
186
|
+
continue
|
|
187
|
+
|
|
188
|
+
if l.startswith('data: '):
|
|
189
|
+
gcr = msh.unmarshal(json.loads(l[6:]), pt.GenerateContentResponse) # noqa
|
|
190
|
+
cnd = check.single(check.not_none(gcr.candidates))
|
|
191
|
+
|
|
192
|
+
for p in check.not_none(cnd.content).parts or []:
|
|
193
|
+
if (txt := p.text) is not None:
|
|
194
|
+
check.none(p.function_call)
|
|
195
|
+
await sink.emit(AiChoicesDeltas([
|
|
196
|
+
AiChoiceDeltas([
|
|
197
|
+
ContentAiDelta(check.not_none(txt)),
|
|
198
|
+
]),
|
|
199
|
+
]))
|
|
200
|
+
|
|
201
|
+
elif (fc := p.function_call) is not None:
|
|
202
|
+
check.none(p.text)
|
|
203
|
+
await sink.emit(AiChoicesDeltas([
|
|
204
|
+
AiChoiceDeltas([
|
|
205
|
+
ToolUseAiDelta(
|
|
206
|
+
id=fc.id,
|
|
207
|
+
name=fc.name,
|
|
208
|
+
args=fc.args,
|
|
209
|
+
),
|
|
210
|
+
]),
|
|
211
|
+
]))
|
|
212
|
+
|
|
213
|
+
else:
|
|
214
|
+
raise ValueError(p)
|
|
215
|
+
|
|
216
|
+
if not b:
|
|
217
|
+
return []
|
|
218
|
+
|
|
219
|
+
return await new_stream_response(rs, inner)
|
|
@@ -0,0 +1,149 @@
|
|
|
1
|
+
"""
|
|
2
|
+
https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models
|
|
3
|
+
"""
|
|
4
|
+
import typing as ta
|
|
5
|
+
|
|
6
|
+
from omlish import check
|
|
7
|
+
from omlish import dataclasses as dc
|
|
8
|
+
|
|
9
|
+
from .....backends.google.protocol import types as pt
|
|
10
|
+
from ....content.prepare import ContentStrPreparer
|
|
11
|
+
from ....content.prepare import default_content_str_preparer
|
|
12
|
+
from ....tools.types import EnumToolDtype
|
|
13
|
+
from ....tools.types import MappingToolDtype
|
|
14
|
+
from ....tools.types import NullableToolDtype
|
|
15
|
+
from ....tools.types import ObjectToolDtype
|
|
16
|
+
from ....tools.types import PrimitiveToolDtype
|
|
17
|
+
from ....tools.types import SequenceToolDtype
|
|
18
|
+
from ....tools.types import ToolDtype
|
|
19
|
+
from ....tools.types import ToolSpec
|
|
20
|
+
from ....tools.types import TupleToolDtype
|
|
21
|
+
from ....tools.types import UnionToolDtype
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
##
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def _shallow_dc_asdict_not_none(o: ta.Any) -> dict[str, ta.Any]:
|
|
28
|
+
return {k: v for k, v in dc.shallow_asdict(o).items() if v is not None}
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
PT_TYPE_BY_PRIMITIVE_TYPE: ta.Mapping[str, pt.Type] = {
|
|
32
|
+
'string': 'STRING',
|
|
33
|
+
'number': 'NUMBER',
|
|
34
|
+
'integer': 'INTEGER',
|
|
35
|
+
'boolean': 'BOOLEAN',
|
|
36
|
+
'array': 'ARRAY',
|
|
37
|
+
'null': 'NULL',
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
class ToolSchemaRenderer:
|
|
42
|
+
def __init__(
|
|
43
|
+
self,
|
|
44
|
+
*,
|
|
45
|
+
content_str_preparer: ContentStrPreparer | None = None,
|
|
46
|
+
) -> None:
|
|
47
|
+
super().__init__()
|
|
48
|
+
|
|
49
|
+
if content_str_preparer is None:
|
|
50
|
+
content_str_preparer = default_content_str_preparer()
|
|
51
|
+
self._content_str_preparer = content_str_preparer
|
|
52
|
+
|
|
53
|
+
def render_type(self, t: ToolDtype) -> pt.Schema:
|
|
54
|
+
if isinstance(t, PrimitiveToolDtype):
|
|
55
|
+
return pt.Schema(type=PT_TYPE_BY_PRIMITIVE_TYPE[t.type])
|
|
56
|
+
|
|
57
|
+
if isinstance(t, UnionToolDtype):
|
|
58
|
+
return pt.Schema(
|
|
59
|
+
any_of=[self.render_type(a) for a in t.args],
|
|
60
|
+
)
|
|
61
|
+
|
|
62
|
+
if isinstance(t, NullableToolDtype):
|
|
63
|
+
return pt.Schema(**{
|
|
64
|
+
**_shallow_dc_asdict_not_none(self.render_type(t.type)),
|
|
65
|
+
**dict(nullable=True),
|
|
66
|
+
})
|
|
67
|
+
|
|
68
|
+
if isinstance(t, SequenceToolDtype):
|
|
69
|
+
return pt.Schema(
|
|
70
|
+
type='ARRAY',
|
|
71
|
+
items=self.render_type(t.element),
|
|
72
|
+
)
|
|
73
|
+
|
|
74
|
+
if isinstance(t, MappingToolDtype):
|
|
75
|
+
# FIXME: t.key
|
|
76
|
+
# return {
|
|
77
|
+
# 'type': 'object',
|
|
78
|
+
# 'additionalProperties': self.render_type(t.value),
|
|
79
|
+
# }
|
|
80
|
+
raise NotImplementedError
|
|
81
|
+
|
|
82
|
+
if isinstance(t, TupleToolDtype):
|
|
83
|
+
# return {
|
|
84
|
+
# 'type': 'array',
|
|
85
|
+
# 'prefixItems': [self.render_type(e) for e in t.elements],
|
|
86
|
+
# }
|
|
87
|
+
raise NotImplementedError
|
|
88
|
+
|
|
89
|
+
if isinstance(t, EnumToolDtype):
|
|
90
|
+
return pt.Schema(**{
|
|
91
|
+
**_shallow_dc_asdict_not_none(self.render_type(t.type)),
|
|
92
|
+
**dict(enum=list(t.values)),
|
|
93
|
+
})
|
|
94
|
+
|
|
95
|
+
if isinstance(t, ObjectToolDtype):
|
|
96
|
+
return pt.Schema(
|
|
97
|
+
type='OBJECT',
|
|
98
|
+
properties={
|
|
99
|
+
k: self.render_type(v)
|
|
100
|
+
for k, v in t.fields.items()
|
|
101
|
+
},
|
|
102
|
+
)
|
|
103
|
+
|
|
104
|
+
raise TypeError(t)
|
|
105
|
+
|
|
106
|
+
def render_tool_params(self, ts: ToolSpec) -> pt.Schema:
|
|
107
|
+
pr_dct: dict[str, pt.Schema] | None = None
|
|
108
|
+
req_lst: list[str] | None = None
|
|
109
|
+
if ts.params is not None:
|
|
110
|
+
pr_dct = {}
|
|
111
|
+
req_lst = []
|
|
112
|
+
for p in ts.params or []:
|
|
113
|
+
pr_dct[check.non_empty_str(p.name)] = pt.Schema(**{
|
|
114
|
+
**(dict(description=self._content_str_preparer.prepare_str(p.desc)) if p.desc is not None else {}),
|
|
115
|
+
**(_shallow_dc_asdict_not_none(self.render_type(p.type)) if p.type is not None else {}),
|
|
116
|
+
})
|
|
117
|
+
if p.required:
|
|
118
|
+
req_lst.append(check.non_empty_str(p.name))
|
|
119
|
+
|
|
120
|
+
return pt.Schema(
|
|
121
|
+
type='OBJECT',
|
|
122
|
+
**(dict(properties=pr_dct) if pr_dct is not None else {}), # type: ignore[arg-type]
|
|
123
|
+
**(dict(required=req_lst) if req_lst is not None else {}), # type: ignore[arg-type]
|
|
124
|
+
)
|
|
125
|
+
|
|
126
|
+
def render_tool(self, ts: ToolSpec) -> pt.FunctionDeclaration:
|
|
127
|
+
ret_dct = {
|
|
128
|
+
**(dict(description=self._content_str_preparer.prepare_str(ts.returns_desc)) if ts.returns_desc is not None else {}), # noqa
|
|
129
|
+
**(_shallow_dc_asdict_not_none(self.render_type(ts.returns_type)) if ts.returns_type is not None else {}),
|
|
130
|
+
}
|
|
131
|
+
|
|
132
|
+
return pt.FunctionDeclaration(
|
|
133
|
+
name=check.non_empty_str(ts.name),
|
|
134
|
+
description=self._content_str_preparer.prepare_str(ts.desc) if ts.desc is not None else None, # type: ignore[arg-type] # noqa
|
|
135
|
+
behavior='BLOCKING',
|
|
136
|
+
parameters=self.render_tool_params(ts) if ts.params else None,
|
|
137
|
+
response=(pt.Schema(**ret_dct) if ret_dct else None),
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
|
|
141
|
+
##
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
def build_tool_spec_schema(ts: ToolSpec) -> pt.FunctionDeclaration:
|
|
145
|
+
return ToolSchemaRenderer().render_tool(ts)
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
def build_tool_spec_params_schema(ts: ToolSpec) -> pt.Schema:
|
|
149
|
+
return ToolSchemaRenderer().render_tool_params(ts)
|
|
File without changes
|
|
@@ -0,0 +1,75 @@
|
|
|
1
|
+
import typing as ta
|
|
2
|
+
|
|
3
|
+
from omlish import check
|
|
4
|
+
from omlish import marshal as msh
|
|
5
|
+
from omlish import typedvalues as tv
|
|
6
|
+
from omlish.formats import json
|
|
7
|
+
from omlish.http import all as http
|
|
8
|
+
|
|
9
|
+
from .....backends.groq import protocol as pt
|
|
10
|
+
from ....chat.choices.services import ChatChoicesRequest
|
|
11
|
+
from ....chat.choices.services import ChatChoicesResponse
|
|
12
|
+
from ....chat.choices.services import static_check_is_chat_choices_service
|
|
13
|
+
from ....chat.tools.types import Tool
|
|
14
|
+
from ....models.configs import ModelName
|
|
15
|
+
from ....standard import ApiKey
|
|
16
|
+
from ....standard import DefaultOptions
|
|
17
|
+
from .names import MODEL_NAMES
|
|
18
|
+
from .protocol import build_gq_request_messages
|
|
19
|
+
from .protocol import build_gq_request_tool
|
|
20
|
+
from .protocol import build_mc_choices_response
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
##
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
# @omlish-manifest $.minichain.registries.manifests.RegistryManifest(
|
|
27
|
+
# name='groq',
|
|
28
|
+
# type='ChatChoicesService',
|
|
29
|
+
# )
|
|
30
|
+
@static_check_is_chat_choices_service
|
|
31
|
+
class GroqChatChoicesService:
|
|
32
|
+
DEFAULT_MODEL_NAME: ta.ClassVar[ModelName] = ModelName(check.not_none(MODEL_NAMES.default))
|
|
33
|
+
|
|
34
|
+
def __init__(
|
|
35
|
+
self,
|
|
36
|
+
*configs: ApiKey | ModelName | DefaultOptions,
|
|
37
|
+
http_client: http.AsyncHttpClient | None = None,
|
|
38
|
+
) -> None:
|
|
39
|
+
super().__init__()
|
|
40
|
+
|
|
41
|
+
self._http_client = http_client
|
|
42
|
+
|
|
43
|
+
with tv.consume(*configs) as cc:
|
|
44
|
+
self._model_name = cc.pop(self.DEFAULT_MODEL_NAME)
|
|
45
|
+
self._api_key = ApiKey.pop_secret(cc, env='GROQ_API_KEY')
|
|
46
|
+
self._default_options: tv.TypedValues = DefaultOptions.pop(cc)
|
|
47
|
+
|
|
48
|
+
async def invoke(self, request: ChatChoicesRequest) -> ChatChoicesResponse:
|
|
49
|
+
tools: list[pt.ChatCompletionRequest.Tool] = []
|
|
50
|
+
with tv.TypedValues(*request.options).consume() as oc:
|
|
51
|
+
t: Tool
|
|
52
|
+
for t in oc.pop(Tool, []):
|
|
53
|
+
tools.append(build_gq_request_tool(t))
|
|
54
|
+
|
|
55
|
+
gq_request = pt.ChatCompletionRequest(
|
|
56
|
+
messages=build_gq_request_messages(request.v),
|
|
57
|
+
model=MODEL_NAMES.resolve(self._model_name.v),
|
|
58
|
+
tools=tools or None,
|
|
59
|
+
)
|
|
60
|
+
|
|
61
|
+
raw_request = msh.marshal(gq_request)
|
|
62
|
+
|
|
63
|
+
http_response = await http.async_request(
|
|
64
|
+
'https://api.groq.com/openai/v1/chat/completions',
|
|
65
|
+
headers={
|
|
66
|
+
http.consts.HEADER_CONTENT_TYPE: http.consts.CONTENT_TYPE_JSON,
|
|
67
|
+
http.consts.HEADER_AUTH: http.consts.format_bearer_auth_header(check.not_none(self._api_key).reveal()),
|
|
68
|
+
},
|
|
69
|
+
data=json.dumps(raw_request).encode('utf-8'),
|
|
70
|
+
client=self._http_client,
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
raw_response = json.loads(check.not_none(http_response.data).decode('utf-8'))
|
|
74
|
+
|
|
75
|
+
return build_mc_choices_response(msh.unmarshal(raw_response, pt.ChatCompletionResponse))
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
"""
|
|
2
|
+
https://console.groq.com/docs/models
|
|
3
|
+
|
|
4
|
+
curl -X GET "https://api.groq.com/openai/v1/models" \
|
|
5
|
+
-H "Authorization: Bearer $GROQ_API_KEY" \
|
|
6
|
+
-H "Content-Type: application/json"
|
|
7
|
+
|
|
8
|
+
"compound-beta",
|
|
9
|
+
"compound-beta-mini",
|
|
10
|
+
"gemma2-9b-it",
|
|
11
|
+
"llama-3.1-8b-instant",
|
|
12
|
+
"llama-3.3-70b-versatile",
|
|
13
|
+
"meta-llama/llama-4-maverick-17b-128e-instruct",
|
|
14
|
+
"meta-llama/llama-4-scout-17b-16e-instruct",
|
|
15
|
+
"meta-llama/llama-guard-4-12b",
|
|
16
|
+
"moonshotai/kimi-k2-instruct",
|
|
17
|
+
"openai/gpt-oss-120b",
|
|
18
|
+
"openai/gpt-oss-20b",
|
|
19
|
+
"qwen/qwen3-32b",
|
|
20
|
+
"""
|
|
21
|
+
from ....models.names import ModelNameCollection
|
|
22
|
+
from ...strings.manifests import BackendStringsManifest
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
##
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
MODEL_NAMES = ModelNameCollection(
|
|
29
|
+
default='gpt-oss-120b',
|
|
30
|
+
aliases={
|
|
31
|
+
'gpt-oss-120b': 'openai/gpt-oss-120b',
|
|
32
|
+
'openai/gpt-oss-120b': None,
|
|
33
|
+
|
|
34
|
+
'gpt-oss-20b': 'openai/gpt-oss-20b',
|
|
35
|
+
'openai/gpt-oss-20b': None,
|
|
36
|
+
},
|
|
37
|
+
)
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
# @omlish-manifest
|
|
41
|
+
_BACKEND_STRINGS_MANIFEST = BackendStringsManifest(
|
|
42
|
+
[
|
|
43
|
+
'ChatChoicesService',
|
|
44
|
+
'ChatChoicesStreamService',
|
|
45
|
+
],
|
|
46
|
+
'groq',
|
|
47
|
+
model_names=MODEL_NAMES,
|
|
48
|
+
)
|
|
@@ -0,0 +1,143 @@
|
|
|
1
|
+
import itertools
|
|
2
|
+
|
|
3
|
+
from omlish import check
|
|
4
|
+
from omlish.formats import json
|
|
5
|
+
|
|
6
|
+
from .....backends.groq import protocol as pt
|
|
7
|
+
from ....chat.choices.services import ChatChoicesResponse
|
|
8
|
+
from ....chat.choices.stream.types import AiChoiceDeltas
|
|
9
|
+
from ....chat.choices.types import AiChoice
|
|
10
|
+
from ....chat.messages import AiMessage
|
|
11
|
+
from ....chat.messages import AnyAiMessage
|
|
12
|
+
from ....chat.messages import Chat
|
|
13
|
+
from ....chat.messages import SystemMessage
|
|
14
|
+
from ....chat.messages import ToolUseMessage
|
|
15
|
+
from ....chat.messages import ToolUseResultMessage
|
|
16
|
+
from ....chat.messages import UserMessage
|
|
17
|
+
from ....chat.stream.types import AiDelta
|
|
18
|
+
from ....chat.stream.types import ContentAiDelta
|
|
19
|
+
from ....chat.stream.types import ToolUseAiDelta
|
|
20
|
+
from ....chat.tools.types import Tool
|
|
21
|
+
from ....content.prepare import prepare_content_str
|
|
22
|
+
from ....tools.jsonschema import build_tool_spec_params_json_schema
|
|
23
|
+
from ....tools.types import ToolUse
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
##
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def build_gq_request_messages(chat: Chat) -> list[pt.ChatCompletionRequest.Message]:
|
|
30
|
+
gq_msgs: list[pt.ChatCompletionRequest.Message] = []
|
|
31
|
+
|
|
32
|
+
for _, g in itertools.groupby(chat, lambda mc_m: isinstance(mc_m, AnyAiMessage)):
|
|
33
|
+
mc_msgs = list(g)
|
|
34
|
+
|
|
35
|
+
if isinstance(mc_msgs[0], AnyAiMessage):
|
|
36
|
+
tups: list[tuple[AiMessage | None, list[ToolUseMessage]]] = []
|
|
37
|
+
for mc_msg in mc_msgs:
|
|
38
|
+
if isinstance(mc_msg, AiMessage):
|
|
39
|
+
tups.append((mc_msg, []))
|
|
40
|
+
|
|
41
|
+
elif isinstance(mc_msg, ToolUseMessage):
|
|
42
|
+
if not tups:
|
|
43
|
+
tups.append((None, []))
|
|
44
|
+
tups[-1][1].append(mc_msg)
|
|
45
|
+
|
|
46
|
+
else:
|
|
47
|
+
raise TypeError(mc_msg)
|
|
48
|
+
|
|
49
|
+
for mc_ai_msg, mc_tu_msgs in tups:
|
|
50
|
+
gq_msgs.append(pt.ChatCompletionRequest.AssistantMessage(
|
|
51
|
+
content=check.isinstance(mc_ai_msg.c, str) if mc_ai_msg is not None else None,
|
|
52
|
+
tool_calls=[
|
|
53
|
+
pt.ChatCompletionRequest.AssistantMessage.ToolCall(
|
|
54
|
+
function=pt.ChatCompletionRequest.AssistantMessage.ToolCall.Function(
|
|
55
|
+
name=mc_tu_msg.tu.name,
|
|
56
|
+
arguments=check.not_none(mc_tu_msg.tu.raw_args),
|
|
57
|
+
),
|
|
58
|
+
id=check.not_none(mc_tu_msg.tu.id),
|
|
59
|
+
)
|
|
60
|
+
for mc_tu_msg in mc_tu_msgs
|
|
61
|
+
] if mc_tu_msgs else None,
|
|
62
|
+
))
|
|
63
|
+
|
|
64
|
+
else:
|
|
65
|
+
for mc_msg in mc_msgs:
|
|
66
|
+
if isinstance(mc_msg, SystemMessage):
|
|
67
|
+
gq_msgs.append(pt.ChatCompletionRequest.SystemMessage(
|
|
68
|
+
content=check.isinstance(mc_msg.c, str),
|
|
69
|
+
))
|
|
70
|
+
|
|
71
|
+
elif isinstance(mc_msg, UserMessage):
|
|
72
|
+
gq_msgs.append(pt.ChatCompletionRequest.UserMessage(
|
|
73
|
+
content=check.isinstance(mc_msg.c, str),
|
|
74
|
+
))
|
|
75
|
+
|
|
76
|
+
elif isinstance(mc_msg, ToolUseResultMessage):
|
|
77
|
+
gq_msgs.append(pt.ChatCompletionRequest.ToolMessage(
|
|
78
|
+
tool_call_id=check.not_none(mc_msg.tur.id),
|
|
79
|
+
content=check.isinstance(mc_msg.tur.c, str),
|
|
80
|
+
))
|
|
81
|
+
|
|
82
|
+
else:
|
|
83
|
+
raise TypeError(mc_msg)
|
|
84
|
+
|
|
85
|
+
return gq_msgs
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
def build_gq_request_tool(t: Tool) -> pt.ChatCompletionRequest.Tool:
|
|
89
|
+
return pt.ChatCompletionRequest.Tool(
|
|
90
|
+
function=pt.ChatCompletionRequest.Tool.Function(
|
|
91
|
+
name=check.not_none(t.spec.name),
|
|
92
|
+
description=prepare_content_str(t.spec.desc),
|
|
93
|
+
parameters=build_tool_spec_params_json_schema(t.spec),
|
|
94
|
+
),
|
|
95
|
+
)
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
def build_mc_choices_response(gq_resp: pt.ChatCompletionResponse) -> ChatChoicesResponse:
|
|
99
|
+
def build_choice(gq_choice: pt.ChatCompletionResponse.Choice) -> AiChoice:
|
|
100
|
+
gq_msg = gq_choice.message
|
|
101
|
+
|
|
102
|
+
lst: list[AnyAiMessage] = []
|
|
103
|
+
|
|
104
|
+
if gq_msg.content is not None:
|
|
105
|
+
lst.append(AiMessage(
|
|
106
|
+
check.isinstance(gq_msg.content, str),
|
|
107
|
+
))
|
|
108
|
+
|
|
109
|
+
for gq_tc in gq_msg.tool_calls or []:
|
|
110
|
+
lst.append(ToolUseMessage(ToolUse(
|
|
111
|
+
id=gq_tc.id,
|
|
112
|
+
name=gq_tc.function.name,
|
|
113
|
+
args=json.loads(gq_tc.function.arguments or '{}'),
|
|
114
|
+
raw_args=gq_tc.function.arguments,
|
|
115
|
+
)))
|
|
116
|
+
|
|
117
|
+
return AiChoice(lst)
|
|
118
|
+
|
|
119
|
+
return ChatChoicesResponse(list(map(build_choice, gq_resp.choices)))
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
def build_mc_ai_choice_deltas(delta: pt.ChatCompletionChunk.Choice.Delta) -> AiChoiceDeltas:
|
|
123
|
+
if delta.role in (None, 'assistant'):
|
|
124
|
+
lst: list[AiDelta] = []
|
|
125
|
+
|
|
126
|
+
if delta.content is not None:
|
|
127
|
+
lst.append(ContentAiDelta(delta.content))
|
|
128
|
+
|
|
129
|
+
for tc in delta.tool_calls or []:
|
|
130
|
+
tc_fn = check.not_none(tc.function)
|
|
131
|
+
lst.append(ToolUseAiDelta(
|
|
132
|
+
id=tc.id,
|
|
133
|
+
name=check.not_none(tc_fn.name),
|
|
134
|
+
args=json.loads(tc_fn.arguments or '{}'),
|
|
135
|
+
))
|
|
136
|
+
|
|
137
|
+
return AiChoiceDeltas(lst)
|
|
138
|
+
|
|
139
|
+
elif delta.channel in ('analysis', 'commentary'):
|
|
140
|
+
return AiChoiceDeltas([])
|
|
141
|
+
|
|
142
|
+
else:
|
|
143
|
+
raise ValueError(delta)
|