ommlds 0.0.0.dev456__py3-none-any.whl → 0.0.0.dev485__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ommlds/.omlish-manifests.json +314 -33
- ommlds/__about__.py +15 -9
- ommlds/_hacks/__init__.py +4 -0
- ommlds/_hacks/funcs.py +110 -0
- ommlds/_hacks/names.py +158 -0
- ommlds/_hacks/params.py +73 -0
- ommlds/_hacks/patches.py +0 -3
- ommlds/backends/anthropic/protocol/__init__.py +13 -1
- ommlds/backends/anthropic/protocol/_dataclasses.py +1625 -0
- ommlds/backends/anthropic/protocol/sse/assemble.py +22 -6
- ommlds/backends/anthropic/protocol/sse/events.py +13 -0
- ommlds/backends/google/protocol/__init__.py +13 -0
- ommlds/backends/google/protocol/_dataclasses.py +5997 -0
- ommlds/backends/google/protocol/types.py +5 -1
- ommlds/backends/groq/__init__.py +7 -0
- ommlds/backends/groq/_dataclasses.py +3901 -0
- ommlds/backends/groq/_marshal.py +23 -0
- ommlds/backends/groq/protocol.py +249 -0
- ommlds/backends/llamacpp/logging.py +4 -1
- ommlds/backends/mlx/caching.py +7 -3
- ommlds/backends/mlx/cli.py +10 -7
- ommlds/backends/mlx/generation.py +18 -16
- ommlds/backends/mlx/limits.py +10 -6
- ommlds/backends/mlx/loading.py +65 -5
- ommlds/backends/ollama/__init__.py +7 -0
- ommlds/backends/ollama/_dataclasses.py +3458 -0
- ommlds/backends/ollama/protocol.py +170 -0
- ommlds/backends/openai/protocol/__init__.py +15 -1
- ommlds/backends/openai/protocol/_dataclasses.py +7708 -0
- ommlds/backends/tavily/__init__.py +7 -0
- ommlds/backends/tavily/_dataclasses.py +1734 -0
- ommlds/backends/tavily/protocol.py +301 -0
- ommlds/backends/tinygrad/models/llama3/__init__.py +22 -14
- ommlds/backends/transformers/__init__.py +14 -0
- ommlds/backends/transformers/filecache.py +109 -0
- ommlds/backends/transformers/streamers.py +73 -0
- ommlds/cli/__init__.py +7 -0
- ommlds/cli/_dataclasses.py +2562 -0
- ommlds/cli/asyncs.py +30 -0
- ommlds/cli/backends/catalog.py +93 -0
- ommlds/cli/backends/configs.py +9 -0
- ommlds/cli/backends/inject.py +31 -36
- ommlds/cli/backends/injection.py +16 -0
- ommlds/cli/backends/types.py +46 -0
- ommlds/cli/content/messages.py +34 -0
- ommlds/cli/content/strings.py +42 -0
- ommlds/cli/inject.py +15 -32
- ommlds/cli/inputs/__init__.py +0 -0
- ommlds/cli/inputs/asyncs.py +32 -0
- ommlds/cli/inputs/sync.py +75 -0
- ommlds/cli/main.py +267 -128
- ommlds/cli/rendering/__init__.py +0 -0
- ommlds/cli/rendering/configs.py +9 -0
- ommlds/cli/rendering/inject.py +31 -0
- ommlds/cli/rendering/markdown.py +52 -0
- ommlds/cli/rendering/raw.py +73 -0
- ommlds/cli/rendering/types.py +21 -0
- ommlds/cli/secrets.py +21 -0
- ommlds/cli/sessions/base.py +1 -1
- ommlds/cli/sessions/chat/chat/__init__.py +0 -0
- ommlds/cli/sessions/chat/chat/ai/__init__.py +0 -0
- ommlds/cli/sessions/chat/chat/ai/configs.py +11 -0
- ommlds/cli/sessions/chat/chat/ai/inject.py +74 -0
- ommlds/cli/sessions/chat/chat/ai/injection.py +14 -0
- ommlds/cli/sessions/chat/chat/ai/rendering.py +70 -0
- ommlds/cli/sessions/chat/chat/ai/services.py +79 -0
- ommlds/cli/sessions/chat/chat/ai/tools.py +44 -0
- ommlds/cli/sessions/chat/chat/ai/types.py +28 -0
- ommlds/cli/sessions/chat/chat/state/__init__.py +0 -0
- ommlds/cli/sessions/chat/chat/state/configs.py +11 -0
- ommlds/cli/sessions/chat/chat/state/inject.py +36 -0
- ommlds/cli/sessions/chat/chat/state/inmemory.py +33 -0
- ommlds/cli/sessions/chat/chat/state/storage.py +52 -0
- ommlds/cli/sessions/chat/chat/state/types.py +38 -0
- ommlds/cli/sessions/chat/chat/user/__init__.py +0 -0
- ommlds/cli/sessions/chat/chat/user/configs.py +17 -0
- ommlds/cli/sessions/chat/chat/user/inject.py +62 -0
- ommlds/cli/sessions/chat/chat/user/interactive.py +31 -0
- ommlds/cli/sessions/chat/chat/user/oneshot.py +25 -0
- ommlds/cli/sessions/chat/chat/user/types.py +15 -0
- ommlds/cli/sessions/chat/configs.py +27 -0
- ommlds/cli/sessions/chat/driver.py +43 -0
- ommlds/cli/sessions/chat/inject.py +33 -65
- ommlds/cli/sessions/chat/phases/__init__.py +0 -0
- ommlds/cli/sessions/chat/phases/inject.py +27 -0
- ommlds/cli/sessions/chat/phases/injection.py +14 -0
- ommlds/cli/sessions/chat/phases/manager.py +29 -0
- ommlds/cli/sessions/chat/phases/types.py +29 -0
- ommlds/cli/sessions/chat/session.py +27 -0
- ommlds/cli/sessions/chat/tools/__init__.py +0 -0
- ommlds/cli/sessions/chat/tools/configs.py +22 -0
- ommlds/cli/sessions/chat/tools/confirmation.py +46 -0
- ommlds/cli/sessions/chat/tools/execution.py +66 -0
- ommlds/cli/sessions/chat/tools/fs/__init__.py +0 -0
- ommlds/cli/sessions/chat/tools/fs/configs.py +12 -0
- ommlds/cli/sessions/chat/tools/fs/inject.py +35 -0
- ommlds/cli/sessions/chat/tools/inject.py +88 -0
- ommlds/cli/sessions/chat/tools/injection.py +44 -0
- ommlds/cli/sessions/chat/tools/rendering.py +58 -0
- ommlds/cli/sessions/chat/tools/todo/__init__.py +0 -0
- ommlds/cli/sessions/chat/tools/todo/configs.py +12 -0
- ommlds/cli/sessions/chat/tools/todo/inject.py +31 -0
- ommlds/cli/sessions/chat/tools/weather/__init__.py +0 -0
- ommlds/cli/sessions/chat/tools/weather/configs.py +12 -0
- ommlds/cli/sessions/chat/tools/weather/inject.py +22 -0
- ommlds/cli/{tools/weather.py → sessions/chat/tools/weather/tools.py} +1 -1
- ommlds/cli/sessions/completion/configs.py +21 -0
- ommlds/cli/sessions/completion/inject.py +42 -0
- ommlds/cli/sessions/completion/session.py +35 -0
- ommlds/cli/sessions/embedding/configs.py +21 -0
- ommlds/cli/sessions/embedding/inject.py +42 -0
- ommlds/cli/sessions/embedding/session.py +33 -0
- ommlds/cli/sessions/inject.py +28 -11
- ommlds/cli/state/__init__.py +0 -0
- ommlds/cli/state/inject.py +28 -0
- ommlds/cli/{state.py → state/storage.py} +41 -24
- ommlds/minichain/__init__.py +46 -17
- ommlds/minichain/_dataclasses.py +15401 -0
- ommlds/minichain/backends/catalogs/base.py +20 -1
- ommlds/minichain/backends/catalogs/simple.py +2 -2
- ommlds/minichain/backends/catalogs/strings.py +10 -8
- ommlds/minichain/backends/impls/anthropic/chat.py +31 -65
- ommlds/minichain/backends/impls/anthropic/names.py +3 -4
- ommlds/minichain/backends/impls/anthropic/protocol.py +109 -0
- ommlds/minichain/backends/impls/anthropic/stream.py +53 -31
- ommlds/minichain/backends/impls/duckduckgo/search.py +5 -1
- ommlds/minichain/backends/impls/dummy/__init__.py +0 -0
- ommlds/minichain/backends/impls/dummy/chat.py +69 -0
- ommlds/minichain/backends/impls/google/chat.py +9 -2
- ommlds/minichain/backends/impls/google/search.py +6 -1
- ommlds/minichain/backends/impls/google/stream.py +122 -32
- ommlds/minichain/backends/impls/groq/__init__.py +0 -0
- ommlds/minichain/backends/impls/groq/chat.py +75 -0
- ommlds/minichain/backends/impls/groq/names.py +48 -0
- ommlds/minichain/backends/impls/groq/protocol.py +143 -0
- ommlds/minichain/backends/impls/groq/stream.py +125 -0
- ommlds/minichain/backends/impls/huggingface/repos.py +1 -5
- ommlds/minichain/backends/impls/llamacpp/chat.py +15 -3
- ommlds/minichain/backends/impls/llamacpp/completion.py +7 -3
- ommlds/minichain/backends/impls/llamacpp/stream.py +38 -19
- ommlds/minichain/backends/impls/mistral.py +9 -2
- ommlds/minichain/backends/impls/mlx/chat.py +100 -23
- ommlds/minichain/backends/impls/ollama/__init__.py +0 -0
- ommlds/minichain/backends/impls/ollama/chat.py +199 -0
- ommlds/minichain/backends/impls/openai/chat.py +14 -7
- ommlds/minichain/backends/impls/openai/completion.py +9 -2
- ommlds/minichain/backends/impls/openai/embedding.py +9 -2
- ommlds/minichain/backends/impls/openai/format.py +115 -109
- ommlds/minichain/backends/impls/openai/names.py +31 -5
- ommlds/minichain/backends/impls/openai/stream.py +33 -27
- ommlds/minichain/backends/impls/sentencepiece/tokens.py +9 -6
- ommlds/minichain/backends/impls/tavily.py +66 -0
- ommlds/minichain/backends/impls/tinygrad/chat.py +17 -14
- ommlds/minichain/backends/impls/tokenizers/tokens.py +9 -6
- ommlds/minichain/backends/impls/transformers/sentence.py +5 -2
- ommlds/minichain/backends/impls/transformers/tokens.py +10 -7
- ommlds/minichain/backends/impls/transformers/transformers.py +139 -20
- ommlds/minichain/backends/strings/parsing.py +1 -1
- ommlds/minichain/backends/strings/resolving.py +4 -1
- ommlds/minichain/chat/choices/stream/__init__.py +0 -0
- ommlds/minichain/chat/choices/stream/adapters.py +35 -0
- ommlds/minichain/chat/choices/stream/joining.py +31 -0
- ommlds/minichain/chat/choices/stream/services.py +45 -0
- ommlds/minichain/chat/choices/stream/types.py +43 -0
- ommlds/minichain/chat/stream/_marshal.py +4 -4
- ommlds/minichain/chat/stream/joining.py +85 -0
- ommlds/minichain/chat/stream/services.py +15 -15
- ommlds/minichain/chat/stream/types.py +24 -18
- ommlds/minichain/llms/types.py +4 -0
- ommlds/minichain/registries/globals.py +18 -4
- ommlds/minichain/resources.py +28 -3
- ommlds/minichain/search.py +1 -1
- ommlds/minichain/standard.py +8 -0
- ommlds/minichain/stream/services.py +19 -16
- ommlds/minichain/tools/reflect.py +5 -1
- ommlds/nanochat/LICENSE +21 -0
- ommlds/nanochat/__init__.py +0 -0
- ommlds/nanochat/rustbpe/LICENSE +21 -0
- ommlds/nanochat/tokenizers.py +406 -0
- ommlds/specs/__init__.py +0 -0
- ommlds/specs/mcp/__init__.py +0 -0
- ommlds/specs/mcp/_marshal.py +23 -0
- ommlds/specs/mcp/clients.py +146 -0
- ommlds/specs/mcp/protocol.py +371 -0
- ommlds/tools/git.py +13 -6
- ommlds/tools/ocr.py +1 -8
- ommlds/wiki/analyze.py +2 -2
- ommlds/wiki/text/mfh.py +1 -5
- ommlds/wiki/text/wtp.py +1 -3
- ommlds/wiki/utils/xml.py +5 -5
- {ommlds-0.0.0.dev456.dist-info → ommlds-0.0.0.dev485.dist-info}/METADATA +22 -19
- {ommlds-0.0.0.dev456.dist-info → ommlds-0.0.0.dev485.dist-info}/RECORD +198 -95
- ommlds/cli/backends/standard.py +0 -20
- ommlds/cli/sessions/chat/base.py +0 -42
- ommlds/cli/sessions/chat/code.py +0 -129
- ommlds/cli/sessions/chat/interactive.py +0 -71
- ommlds/cli/sessions/chat/printing.py +0 -97
- ommlds/cli/sessions/chat/prompt.py +0 -151
- ommlds/cli/sessions/chat/state.py +0 -110
- ommlds/cli/sessions/chat/tools.py +0 -100
- ommlds/cli/sessions/completion/completion.py +0 -44
- ommlds/cli/sessions/embedding/embedding.py +0 -42
- ommlds/cli/tools/config.py +0 -14
- ommlds/cli/tools/inject.py +0 -75
- ommlds/minichain/backends/impls/openai/format2.py +0 -210
- ommlds/minichain/chat/stream/adapters.py +0 -80
- /ommlds/{huggingface.py → backends/huggingface.py} +0 -0
- /ommlds/cli/{tools → content}/__init__.py +0 -0
- {ommlds-0.0.0.dev456.dist-info → ommlds-0.0.0.dev485.dist-info}/WHEEL +0 -0
- {ommlds-0.0.0.dev456.dist-info → ommlds-0.0.0.dev485.dist-info}/entry_points.txt +0 -0
- {ommlds-0.0.0.dev456.dist-info → ommlds-0.0.0.dev485.dist-info}/licenses/LICENSE +0 -0
- {ommlds-0.0.0.dev456.dist-info → ommlds-0.0.0.dev485.dist-info}/top_level.txt +0 -0
|
@@ -82,12 +82,16 @@ class CseSearchService:
|
|
|
82
82
|
self,
|
|
83
83
|
cse_id: str | None = None,
|
|
84
84
|
cse_api_key: str | None = None,
|
|
85
|
+
*,
|
|
86
|
+
http_client: http.AsyncHttpClient | None = None,
|
|
85
87
|
) -> None:
|
|
86
88
|
super().__init__()
|
|
87
89
|
|
|
88
90
|
self._cse_id = cse_id
|
|
89
91
|
self._cse_api_key = cse_api_key
|
|
90
92
|
|
|
93
|
+
self._http_client = http_client
|
|
94
|
+
|
|
91
95
|
async def invoke(
|
|
92
96
|
self,
|
|
93
97
|
request: SearchRequest,
|
|
@@ -97,8 +101,9 @@ class CseSearchService:
|
|
|
97
101
|
cx=check.non_empty_str(self._cse_id),
|
|
98
102
|
q=request.v,
|
|
99
103
|
))
|
|
100
|
-
resp = http.
|
|
104
|
+
resp = await http.async_request(
|
|
101
105
|
f'https://www.googleapis.com/customsearch/v1?{qs}',
|
|
106
|
+
client=self._http_client,
|
|
102
107
|
)
|
|
103
108
|
out = check.not_none(resp.data)
|
|
104
109
|
|
|
@@ -11,23 +11,28 @@ from omlish.http import all as http
|
|
|
11
11
|
from omlish.io.buffers import DelimitingBuffer
|
|
12
12
|
|
|
13
13
|
from .....backends.google.protocol import types as pt
|
|
14
|
+
from ....chat.choices.stream.services import ChatChoicesStreamRequest
|
|
15
|
+
from ....chat.choices.stream.services import ChatChoicesStreamResponse
|
|
16
|
+
from ....chat.choices.stream.services import static_check_is_chat_choices_stream_service
|
|
17
|
+
from ....chat.choices.stream.types import AiChoiceDeltas
|
|
18
|
+
from ....chat.choices.stream.types import AiChoicesDeltas
|
|
14
19
|
from ....chat.choices.types import ChatChoicesOutputs
|
|
15
20
|
from ....chat.messages import AiMessage
|
|
16
21
|
from ....chat.messages import Message
|
|
17
22
|
from ....chat.messages import SystemMessage
|
|
23
|
+
from ....chat.messages import ToolUseMessage
|
|
24
|
+
from ....chat.messages import ToolUseResultMessage
|
|
18
25
|
from ....chat.messages import UserMessage
|
|
19
|
-
from ....chat.stream.
|
|
20
|
-
from ....chat.stream.
|
|
21
|
-
from ....chat.
|
|
22
|
-
from ....chat.stream.types import AiChoiceDeltas
|
|
23
|
-
from ....chat.stream.types import AiChoicesDeltas
|
|
24
|
-
from ....chat.stream.types import ContentAiChoiceDelta
|
|
26
|
+
from ....chat.stream.types import ContentAiDelta
|
|
27
|
+
from ....chat.stream.types import ToolUseAiDelta
|
|
28
|
+
from ....chat.tools.types import Tool
|
|
25
29
|
from ....models.configs import ModelName
|
|
26
30
|
from ....resources import UseResources
|
|
27
31
|
from ....standard import ApiKey
|
|
28
32
|
from ....stream.services import StreamResponseSink
|
|
29
33
|
from ....stream.services import new_stream_response
|
|
30
34
|
from .names import MODEL_NAMES
|
|
35
|
+
from .tools import build_tool_spec_schema
|
|
31
36
|
|
|
32
37
|
|
|
33
38
|
##
|
|
@@ -41,32 +46,86 @@ from .names import MODEL_NAMES
|
|
|
41
46
|
class GoogleChatChoicesStreamService:
|
|
42
47
|
DEFAULT_MODEL_NAME: ta.ClassVar[ModelName] = ModelName(check.not_none(MODEL_NAMES.default))
|
|
43
48
|
|
|
44
|
-
def __init__(
|
|
49
|
+
def __init__(
|
|
50
|
+
self,
|
|
51
|
+
*configs: ApiKey | ModelName,
|
|
52
|
+
http_client: http.AsyncHttpClient | None = None,
|
|
53
|
+
) -> None:
|
|
45
54
|
super().__init__()
|
|
46
55
|
|
|
56
|
+
self._http_client = http_client
|
|
57
|
+
|
|
47
58
|
with tv.consume(*configs) as cc:
|
|
48
59
|
self._model_name = cc.pop(self.DEFAULT_MODEL_NAME)
|
|
49
60
|
self._api_key = ApiKey.pop_secret(cc, env='GEMINI_API_KEY')
|
|
50
61
|
|
|
51
|
-
def
|
|
52
|
-
|
|
53
|
-
|
|
62
|
+
def _make_str_content(
|
|
63
|
+
self,
|
|
64
|
+
s: str | None,
|
|
65
|
+
*,
|
|
66
|
+
role: pt.ContentRole | None = None,
|
|
67
|
+
) -> pt.Content | None:
|
|
68
|
+
if s is None:
|
|
69
|
+
return None
|
|
70
|
+
|
|
71
|
+
return pt.Content(
|
|
72
|
+
parts=[pt.Part(
|
|
73
|
+
text=check.not_none(s),
|
|
74
|
+
)],
|
|
75
|
+
role=role,
|
|
76
|
+
)
|
|
54
77
|
|
|
55
|
-
|
|
56
|
-
|
|
78
|
+
def _make_msg_content(self, m: Message) -> pt.Content:
|
|
79
|
+
if isinstance(m, (AiMessage, SystemMessage, UserMessage)):
|
|
80
|
+
return check.not_none(self._make_str_content(
|
|
81
|
+
check.isinstance(m.c, str),
|
|
82
|
+
role=self.ROLES_MAP[type(m)],
|
|
83
|
+
))
|
|
84
|
+
|
|
85
|
+
elif isinstance(m, ToolUseResultMessage):
|
|
86
|
+
tr_resp_val: pt.Value
|
|
87
|
+
if m.tur.c is None:
|
|
88
|
+
tr_resp_val = pt.NullValue() # type: ignore[unreachable]
|
|
89
|
+
elif isinstance(m.tur.c, str):
|
|
90
|
+
tr_resp_val = pt.StringValue(m.tur.c)
|
|
91
|
+
else:
|
|
92
|
+
raise TypeError(m.tur.c)
|
|
93
|
+
return pt.Content(
|
|
94
|
+
parts=[pt.Part(
|
|
95
|
+
function_response=pt.FunctionResponse(
|
|
96
|
+
id=m.tur.id,
|
|
97
|
+
name=m.tur.name,
|
|
98
|
+
response={
|
|
99
|
+
'value': tr_resp_val,
|
|
100
|
+
},
|
|
101
|
+
),
|
|
102
|
+
)],
|
|
103
|
+
)
|
|
104
|
+
|
|
105
|
+
elif isinstance(m, ToolUseMessage):
|
|
106
|
+
return pt.Content(
|
|
107
|
+
parts=[pt.Part(
|
|
108
|
+
function_call=pt.FunctionCall(
|
|
109
|
+
id=m.tu.id,
|
|
110
|
+
name=m.tu.name,
|
|
111
|
+
args=m.tu.args,
|
|
112
|
+
),
|
|
113
|
+
)],
|
|
114
|
+
role='model',
|
|
115
|
+
)
|
|
57
116
|
|
|
58
117
|
else:
|
|
59
118
|
raise TypeError(m)
|
|
60
119
|
|
|
61
120
|
BASE_URL: ta.ClassVar[str] = 'https://generativelanguage.googleapis.com/v1beta/models'
|
|
62
121
|
|
|
63
|
-
ROLES_MAP: ta.ClassVar[ta.Mapping[type[Message],
|
|
64
|
-
SystemMessage:
|
|
122
|
+
ROLES_MAP: ta.ClassVar[ta.Mapping[type[Message], pt.ContentRole | None]] = { # noqa
|
|
123
|
+
SystemMessage: None,
|
|
65
124
|
UserMessage: 'user',
|
|
66
|
-
AiMessage: '
|
|
125
|
+
AiMessage: 'model',
|
|
67
126
|
}
|
|
68
127
|
|
|
69
|
-
READ_CHUNK_SIZE =
|
|
128
|
+
READ_CHUNK_SIZE: ta.ClassVar[int] = -1
|
|
70
129
|
|
|
71
130
|
async def invoke(
|
|
72
131
|
self,
|
|
@@ -74,16 +133,28 @@ class GoogleChatChoicesStreamService:
|
|
|
74
133
|
) -> ChatChoicesStreamResponse:
|
|
75
134
|
key = check.not_none(self._api_key).reveal()
|
|
76
135
|
|
|
136
|
+
msgs = list(request.v)
|
|
137
|
+
|
|
138
|
+
system_inst: pt.Content | None = None
|
|
139
|
+
if msgs and isinstance(m0 := msgs[0], SystemMessage):
|
|
140
|
+
system_inst = self._make_msg_content(m0)
|
|
141
|
+
msgs.pop(0)
|
|
142
|
+
|
|
143
|
+
g_tools: list[pt.Tool] = []
|
|
144
|
+
with tv.TypedValues(*request.options).consume() as oc:
|
|
145
|
+
t: Tool
|
|
146
|
+
for t in oc.pop(Tool, []):
|
|
147
|
+
g_tools.append(pt.Tool(
|
|
148
|
+
function_declarations=[build_tool_spec_schema(t.spec)],
|
|
149
|
+
))
|
|
150
|
+
|
|
77
151
|
g_req = pt.GenerateContentRequest(
|
|
78
152
|
contents=[
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
text=check.not_none(self._get_msg_content(m)),
|
|
82
|
-
)],
|
|
83
|
-
role=self.ROLES_MAP[type(m)], # type: ignore[arg-type]
|
|
84
|
-
)
|
|
85
|
-
for m in request.v
|
|
153
|
+
self._make_msg_content(m)
|
|
154
|
+
for m in msgs
|
|
86
155
|
],
|
|
156
|
+
tools=g_tools or None,
|
|
157
|
+
system_instruction=system_inst,
|
|
87
158
|
)
|
|
88
159
|
|
|
89
160
|
req_dct = msh.marshal(g_req)
|
|
@@ -98,30 +169,49 @@ class GoogleChatChoicesStreamService:
|
|
|
98
169
|
)
|
|
99
170
|
|
|
100
171
|
async with UseResources.or_new(request.options) as rs:
|
|
101
|
-
http_client = rs.
|
|
102
|
-
http_response = rs.
|
|
172
|
+
http_client = await rs.enter_async_context(http.manage_async_client(self._http_client))
|
|
173
|
+
http_response = await rs.enter_async_context(await http_client.stream_request(http_request))
|
|
103
174
|
|
|
104
175
|
async def inner(sink: StreamResponseSink[AiChoicesDeltas]) -> ta.Sequence[ChatChoicesOutputs] | None:
|
|
105
176
|
db = DelimitingBuffer([b'\r', b'\n', b'\r\n'])
|
|
106
177
|
while True:
|
|
107
|
-
|
|
108
|
-
b = http_response.stream.read1(self.READ_CHUNK_SIZE) # type: ignore[attr-defined]
|
|
178
|
+
b = await http_response.stream.read1(self.READ_CHUNK_SIZE)
|
|
109
179
|
for bl in db.feed(b):
|
|
110
180
|
if isinstance(bl, DelimitingBuffer.Incomplete):
|
|
111
181
|
# FIXME: handle
|
|
112
182
|
return []
|
|
183
|
+
|
|
113
184
|
l = bl.decode('utf-8')
|
|
114
185
|
if not l:
|
|
115
186
|
continue
|
|
187
|
+
|
|
116
188
|
if l.startswith('data: '):
|
|
117
189
|
gcr = msh.unmarshal(json.loads(l[6:]), pt.GenerateContentResponse) # noqa
|
|
118
190
|
cnd = check.single(check.not_none(gcr.candidates))
|
|
191
|
+
|
|
119
192
|
for p in check.not_none(cnd.content).parts or []:
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
193
|
+
if (txt := p.text) is not None:
|
|
194
|
+
check.none(p.function_call)
|
|
195
|
+
await sink.emit(AiChoicesDeltas([
|
|
196
|
+
AiChoiceDeltas([
|
|
197
|
+
ContentAiDelta(check.not_none(txt)),
|
|
198
|
+
]),
|
|
199
|
+
]))
|
|
200
|
+
|
|
201
|
+
elif (fc := p.function_call) is not None:
|
|
202
|
+
check.none(p.text)
|
|
203
|
+
await sink.emit(AiChoicesDeltas([
|
|
204
|
+
AiChoiceDeltas([
|
|
205
|
+
ToolUseAiDelta(
|
|
206
|
+
id=fc.id,
|
|
207
|
+
name=fc.name,
|
|
208
|
+
args=fc.args,
|
|
209
|
+
),
|
|
210
|
+
]),
|
|
211
|
+
]))
|
|
212
|
+
|
|
213
|
+
else:
|
|
214
|
+
raise ValueError(p)
|
|
125
215
|
|
|
126
216
|
if not b:
|
|
127
217
|
return []
|
|
File without changes
|
|
@@ -0,0 +1,75 @@
|
|
|
1
|
+
import typing as ta
|
|
2
|
+
|
|
3
|
+
from omlish import check
|
|
4
|
+
from omlish import marshal as msh
|
|
5
|
+
from omlish import typedvalues as tv
|
|
6
|
+
from omlish.formats import json
|
|
7
|
+
from omlish.http import all as http
|
|
8
|
+
|
|
9
|
+
from .....backends.groq import protocol as pt
|
|
10
|
+
from ....chat.choices.services import ChatChoicesRequest
|
|
11
|
+
from ....chat.choices.services import ChatChoicesResponse
|
|
12
|
+
from ....chat.choices.services import static_check_is_chat_choices_service
|
|
13
|
+
from ....chat.tools.types import Tool
|
|
14
|
+
from ....models.configs import ModelName
|
|
15
|
+
from ....standard import ApiKey
|
|
16
|
+
from ....standard import DefaultOptions
|
|
17
|
+
from .names import MODEL_NAMES
|
|
18
|
+
from .protocol import build_gq_request_messages
|
|
19
|
+
from .protocol import build_gq_request_tool
|
|
20
|
+
from .protocol import build_mc_choices_response
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
##
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
# @omlish-manifest $.minichain.registries.manifests.RegistryManifest(
|
|
27
|
+
# name='groq',
|
|
28
|
+
# type='ChatChoicesService',
|
|
29
|
+
# )
|
|
30
|
+
@static_check_is_chat_choices_service
|
|
31
|
+
class GroqChatChoicesService:
|
|
32
|
+
DEFAULT_MODEL_NAME: ta.ClassVar[ModelName] = ModelName(check.not_none(MODEL_NAMES.default))
|
|
33
|
+
|
|
34
|
+
def __init__(
|
|
35
|
+
self,
|
|
36
|
+
*configs: ApiKey | ModelName | DefaultOptions,
|
|
37
|
+
http_client: http.AsyncHttpClient | None = None,
|
|
38
|
+
) -> None:
|
|
39
|
+
super().__init__()
|
|
40
|
+
|
|
41
|
+
self._http_client = http_client
|
|
42
|
+
|
|
43
|
+
with tv.consume(*configs) as cc:
|
|
44
|
+
self._model_name = cc.pop(self.DEFAULT_MODEL_NAME)
|
|
45
|
+
self._api_key = ApiKey.pop_secret(cc, env='GROQ_API_KEY')
|
|
46
|
+
self._default_options: tv.TypedValues = DefaultOptions.pop(cc)
|
|
47
|
+
|
|
48
|
+
async def invoke(self, request: ChatChoicesRequest) -> ChatChoicesResponse:
|
|
49
|
+
tools: list[pt.ChatCompletionRequest.Tool] = []
|
|
50
|
+
with tv.TypedValues(*request.options).consume() as oc:
|
|
51
|
+
t: Tool
|
|
52
|
+
for t in oc.pop(Tool, []):
|
|
53
|
+
tools.append(build_gq_request_tool(t))
|
|
54
|
+
|
|
55
|
+
gq_request = pt.ChatCompletionRequest(
|
|
56
|
+
messages=build_gq_request_messages(request.v),
|
|
57
|
+
model=MODEL_NAMES.resolve(self._model_name.v),
|
|
58
|
+
tools=tools or None,
|
|
59
|
+
)
|
|
60
|
+
|
|
61
|
+
raw_request = msh.marshal(gq_request)
|
|
62
|
+
|
|
63
|
+
http_response = await http.async_request(
|
|
64
|
+
'https://api.groq.com/openai/v1/chat/completions',
|
|
65
|
+
headers={
|
|
66
|
+
http.consts.HEADER_CONTENT_TYPE: http.consts.CONTENT_TYPE_JSON,
|
|
67
|
+
http.consts.HEADER_AUTH: http.consts.format_bearer_auth_header(check.not_none(self._api_key).reveal()),
|
|
68
|
+
},
|
|
69
|
+
data=json.dumps(raw_request).encode('utf-8'),
|
|
70
|
+
client=self._http_client,
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
raw_response = json.loads(check.not_none(http_response.data).decode('utf-8'))
|
|
74
|
+
|
|
75
|
+
return build_mc_choices_response(msh.unmarshal(raw_response, pt.ChatCompletionResponse))
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
"""
|
|
2
|
+
https://console.groq.com/docs/models
|
|
3
|
+
|
|
4
|
+
curl -X GET "https://api.groq.com/openai/v1/models" \
|
|
5
|
+
-H "Authorization: Bearer $GROQ_API_KEY" \
|
|
6
|
+
-H "Content-Type: application/json"
|
|
7
|
+
|
|
8
|
+
"compound-beta",
|
|
9
|
+
"compound-beta-mini",
|
|
10
|
+
"gemma2-9b-it",
|
|
11
|
+
"llama-3.1-8b-instant",
|
|
12
|
+
"llama-3.3-70b-versatile",
|
|
13
|
+
"meta-llama/llama-4-maverick-17b-128e-instruct",
|
|
14
|
+
"meta-llama/llama-4-scout-17b-16e-instruct",
|
|
15
|
+
"meta-llama/llama-guard-4-12b",
|
|
16
|
+
"moonshotai/kimi-k2-instruct",
|
|
17
|
+
"openai/gpt-oss-120b",
|
|
18
|
+
"openai/gpt-oss-20b",
|
|
19
|
+
"qwen/qwen3-32b",
|
|
20
|
+
"""
|
|
21
|
+
from ....models.names import ModelNameCollection
|
|
22
|
+
from ...strings.manifests import BackendStringsManifest
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
##
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
MODEL_NAMES = ModelNameCollection(
|
|
29
|
+
default='gpt-oss-120b',
|
|
30
|
+
aliases={
|
|
31
|
+
'gpt-oss-120b': 'openai/gpt-oss-120b',
|
|
32
|
+
'openai/gpt-oss-120b': None,
|
|
33
|
+
|
|
34
|
+
'gpt-oss-20b': 'openai/gpt-oss-20b',
|
|
35
|
+
'openai/gpt-oss-20b': None,
|
|
36
|
+
},
|
|
37
|
+
)
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
# @omlish-manifest
|
|
41
|
+
_BACKEND_STRINGS_MANIFEST = BackendStringsManifest(
|
|
42
|
+
[
|
|
43
|
+
'ChatChoicesService',
|
|
44
|
+
'ChatChoicesStreamService',
|
|
45
|
+
],
|
|
46
|
+
'groq',
|
|
47
|
+
model_names=MODEL_NAMES,
|
|
48
|
+
)
|
|
@@ -0,0 +1,143 @@
|
|
|
1
|
+
import itertools
|
|
2
|
+
|
|
3
|
+
from omlish import check
|
|
4
|
+
from omlish.formats import json
|
|
5
|
+
|
|
6
|
+
from .....backends.groq import protocol as pt
|
|
7
|
+
from ....chat.choices.services import ChatChoicesResponse
|
|
8
|
+
from ....chat.choices.stream.types import AiChoiceDeltas
|
|
9
|
+
from ....chat.choices.types import AiChoice
|
|
10
|
+
from ....chat.messages import AiMessage
|
|
11
|
+
from ....chat.messages import AnyAiMessage
|
|
12
|
+
from ....chat.messages import Chat
|
|
13
|
+
from ....chat.messages import SystemMessage
|
|
14
|
+
from ....chat.messages import ToolUseMessage
|
|
15
|
+
from ....chat.messages import ToolUseResultMessage
|
|
16
|
+
from ....chat.messages import UserMessage
|
|
17
|
+
from ....chat.stream.types import AiDelta
|
|
18
|
+
from ....chat.stream.types import ContentAiDelta
|
|
19
|
+
from ....chat.stream.types import ToolUseAiDelta
|
|
20
|
+
from ....chat.tools.types import Tool
|
|
21
|
+
from ....content.prepare import prepare_content_str
|
|
22
|
+
from ....tools.jsonschema import build_tool_spec_params_json_schema
|
|
23
|
+
from ....tools.types import ToolUse
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
##
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def build_gq_request_messages(chat: Chat) -> list[pt.ChatCompletionRequest.Message]:
|
|
30
|
+
gq_msgs: list[pt.ChatCompletionRequest.Message] = []
|
|
31
|
+
|
|
32
|
+
for _, g in itertools.groupby(chat, lambda mc_m: isinstance(mc_m, AnyAiMessage)):
|
|
33
|
+
mc_msgs = list(g)
|
|
34
|
+
|
|
35
|
+
if isinstance(mc_msgs[0], AnyAiMessage):
|
|
36
|
+
tups: list[tuple[AiMessage | None, list[ToolUseMessage]]] = []
|
|
37
|
+
for mc_msg in mc_msgs:
|
|
38
|
+
if isinstance(mc_msg, AiMessage):
|
|
39
|
+
tups.append((mc_msg, []))
|
|
40
|
+
|
|
41
|
+
elif isinstance(mc_msg, ToolUseMessage):
|
|
42
|
+
if not tups:
|
|
43
|
+
tups.append((None, []))
|
|
44
|
+
tups[-1][1].append(mc_msg)
|
|
45
|
+
|
|
46
|
+
else:
|
|
47
|
+
raise TypeError(mc_msg)
|
|
48
|
+
|
|
49
|
+
for mc_ai_msg, mc_tu_msgs in tups:
|
|
50
|
+
gq_msgs.append(pt.ChatCompletionRequest.AssistantMessage(
|
|
51
|
+
content=check.isinstance(mc_ai_msg.c, str) if mc_ai_msg is not None else None,
|
|
52
|
+
tool_calls=[
|
|
53
|
+
pt.ChatCompletionRequest.AssistantMessage.ToolCall(
|
|
54
|
+
function=pt.ChatCompletionRequest.AssistantMessage.ToolCall.Function(
|
|
55
|
+
name=mc_tu_msg.tu.name,
|
|
56
|
+
arguments=check.not_none(mc_tu_msg.tu.raw_args),
|
|
57
|
+
),
|
|
58
|
+
id=check.not_none(mc_tu_msg.tu.id),
|
|
59
|
+
)
|
|
60
|
+
for mc_tu_msg in mc_tu_msgs
|
|
61
|
+
] if mc_tu_msgs else None,
|
|
62
|
+
))
|
|
63
|
+
|
|
64
|
+
else:
|
|
65
|
+
for mc_msg in mc_msgs:
|
|
66
|
+
if isinstance(mc_msg, SystemMessage):
|
|
67
|
+
gq_msgs.append(pt.ChatCompletionRequest.SystemMessage(
|
|
68
|
+
content=check.isinstance(mc_msg.c, str),
|
|
69
|
+
))
|
|
70
|
+
|
|
71
|
+
elif isinstance(mc_msg, UserMessage):
|
|
72
|
+
gq_msgs.append(pt.ChatCompletionRequest.UserMessage(
|
|
73
|
+
content=check.isinstance(mc_msg.c, str),
|
|
74
|
+
))
|
|
75
|
+
|
|
76
|
+
elif isinstance(mc_msg, ToolUseResultMessage):
|
|
77
|
+
gq_msgs.append(pt.ChatCompletionRequest.ToolMessage(
|
|
78
|
+
tool_call_id=check.not_none(mc_msg.tur.id),
|
|
79
|
+
content=check.isinstance(mc_msg.tur.c, str),
|
|
80
|
+
))
|
|
81
|
+
|
|
82
|
+
else:
|
|
83
|
+
raise TypeError(mc_msg)
|
|
84
|
+
|
|
85
|
+
return gq_msgs
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
def build_gq_request_tool(t: Tool) -> pt.ChatCompletionRequest.Tool:
|
|
89
|
+
return pt.ChatCompletionRequest.Tool(
|
|
90
|
+
function=pt.ChatCompletionRequest.Tool.Function(
|
|
91
|
+
name=check.not_none(t.spec.name),
|
|
92
|
+
description=prepare_content_str(t.spec.desc),
|
|
93
|
+
parameters=build_tool_spec_params_json_schema(t.spec),
|
|
94
|
+
),
|
|
95
|
+
)
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
def build_mc_choices_response(gq_resp: pt.ChatCompletionResponse) -> ChatChoicesResponse:
|
|
99
|
+
def build_choice(gq_choice: pt.ChatCompletionResponse.Choice) -> AiChoice:
|
|
100
|
+
gq_msg = gq_choice.message
|
|
101
|
+
|
|
102
|
+
lst: list[AnyAiMessage] = []
|
|
103
|
+
|
|
104
|
+
if gq_msg.content is not None:
|
|
105
|
+
lst.append(AiMessage(
|
|
106
|
+
check.isinstance(gq_msg.content, str),
|
|
107
|
+
))
|
|
108
|
+
|
|
109
|
+
for gq_tc in gq_msg.tool_calls or []:
|
|
110
|
+
lst.append(ToolUseMessage(ToolUse(
|
|
111
|
+
id=gq_tc.id,
|
|
112
|
+
name=gq_tc.function.name,
|
|
113
|
+
args=json.loads(gq_tc.function.arguments or '{}'),
|
|
114
|
+
raw_args=gq_tc.function.arguments,
|
|
115
|
+
)))
|
|
116
|
+
|
|
117
|
+
return AiChoice(lst)
|
|
118
|
+
|
|
119
|
+
return ChatChoicesResponse(list(map(build_choice, gq_resp.choices)))
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
def build_mc_ai_choice_deltas(delta: pt.ChatCompletionChunk.Choice.Delta) -> AiChoiceDeltas:
|
|
123
|
+
if delta.role in (None, 'assistant'):
|
|
124
|
+
lst: list[AiDelta] = []
|
|
125
|
+
|
|
126
|
+
if delta.content is not None:
|
|
127
|
+
lst.append(ContentAiDelta(delta.content))
|
|
128
|
+
|
|
129
|
+
for tc in delta.tool_calls or []:
|
|
130
|
+
tc_fn = check.not_none(tc.function)
|
|
131
|
+
lst.append(ToolUseAiDelta(
|
|
132
|
+
id=tc.id,
|
|
133
|
+
name=check.not_none(tc_fn.name),
|
|
134
|
+
args=json.loads(tc_fn.arguments or '{}'),
|
|
135
|
+
))
|
|
136
|
+
|
|
137
|
+
return AiChoiceDeltas(lst)
|
|
138
|
+
|
|
139
|
+
elif delta.channel in ('analysis', 'commentary'):
|
|
140
|
+
return AiChoiceDeltas([])
|
|
141
|
+
|
|
142
|
+
else:
|
|
143
|
+
raise ValueError(delta)
|
|
@@ -0,0 +1,125 @@
|
|
|
1
|
+
import typing as ta
|
|
2
|
+
|
|
3
|
+
from omlish import check
|
|
4
|
+
from omlish import marshal as msh
|
|
5
|
+
from omlish import typedvalues as tv
|
|
6
|
+
from omlish.formats import json
|
|
7
|
+
from omlish.http import all as http
|
|
8
|
+
from omlish.http import sse
|
|
9
|
+
from omlish.io.buffers import DelimitingBuffer
|
|
10
|
+
|
|
11
|
+
from .....backends.groq import protocol as pt
|
|
12
|
+
from ....chat.choices.services import ChatChoicesOutputs
|
|
13
|
+
from ....chat.choices.stream.services import ChatChoicesStreamRequest
|
|
14
|
+
from ....chat.choices.stream.services import ChatChoicesStreamResponse
|
|
15
|
+
from ....chat.choices.stream.services import static_check_is_chat_choices_stream_service
|
|
16
|
+
from ....chat.choices.stream.types import AiChoicesDeltas
|
|
17
|
+
from ....chat.tools.types import Tool
|
|
18
|
+
from ....configs import Config
|
|
19
|
+
from ....resources import UseResources
|
|
20
|
+
from ....standard import ApiKey
|
|
21
|
+
from ....stream.services import StreamResponseSink
|
|
22
|
+
from ....stream.services import new_stream_response
|
|
23
|
+
from .chat import GroqChatChoicesService
|
|
24
|
+
from .names import MODEL_NAMES
|
|
25
|
+
from .protocol import build_gq_request_messages
|
|
26
|
+
from .protocol import build_gq_request_tool
|
|
27
|
+
from .protocol import build_mc_ai_choice_deltas
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
##
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
# @omlish-manifest $.minichain.registries.manifests.RegistryManifest(
|
|
34
|
+
# name='groq',
|
|
35
|
+
# type='ChatChoicesStreamService',
|
|
36
|
+
# )
|
|
37
|
+
@static_check_is_chat_choices_stream_service
|
|
38
|
+
class GroqChatChoicesStreamService:
|
|
39
|
+
def __init__(
|
|
40
|
+
self,
|
|
41
|
+
*configs: Config,
|
|
42
|
+
http_client: http.AsyncHttpClient | None = None,
|
|
43
|
+
) -> None:
|
|
44
|
+
super().__init__()
|
|
45
|
+
|
|
46
|
+
self._http_client = http_client
|
|
47
|
+
|
|
48
|
+
with tv.consume(*configs) as cc:
|
|
49
|
+
self._model_name = cc.pop(GroqChatChoicesService.DEFAULT_MODEL_NAME)
|
|
50
|
+
self._api_key = ApiKey.pop_secret(cc, env='GROQ_API_KEY')
|
|
51
|
+
|
|
52
|
+
READ_CHUNK_SIZE: ta.ClassVar[int] = -1
|
|
53
|
+
|
|
54
|
+
async def invoke(self, request: ChatChoicesStreamRequest) -> ChatChoicesStreamResponse:
|
|
55
|
+
tools: list[pt.ChatCompletionRequest.Tool] = []
|
|
56
|
+
with tv.TypedValues(*request.options).consume() as oc:
|
|
57
|
+
t: Tool
|
|
58
|
+
for t in oc.pop(Tool, []):
|
|
59
|
+
tools.append(build_gq_request_tool(t))
|
|
60
|
+
|
|
61
|
+
gq_request = pt.ChatCompletionRequest(
|
|
62
|
+
messages=build_gq_request_messages(request.v),
|
|
63
|
+
model=MODEL_NAMES.resolve(self._model_name.v),
|
|
64
|
+
tools=tools or None,
|
|
65
|
+
stream=True,
|
|
66
|
+
)
|
|
67
|
+
|
|
68
|
+
raw_request = msh.marshal(gq_request)
|
|
69
|
+
|
|
70
|
+
http_request = http.HttpRequest(
|
|
71
|
+
'https://api.groq.com/openai/v1/chat/completions',
|
|
72
|
+
headers={
|
|
73
|
+
http.consts.HEADER_CONTENT_TYPE: http.consts.CONTENT_TYPE_JSON,
|
|
74
|
+
http.consts.HEADER_AUTH: http.consts.format_bearer_auth_header(check.not_none(self._api_key).reveal()),
|
|
75
|
+
},
|
|
76
|
+
data=json.dumps(raw_request).encode('utf-8'),
|
|
77
|
+
)
|
|
78
|
+
|
|
79
|
+
async with UseResources.or_new(request.options) as rs:
|
|
80
|
+
http_client = await rs.enter_async_context(http.manage_async_client(self._http_client))
|
|
81
|
+
http_response = await rs.enter_async_context(await http_client.stream_request(http_request))
|
|
82
|
+
|
|
83
|
+
async def inner(sink: StreamResponseSink[AiChoicesDeltas]) -> ta.Sequence[ChatChoicesOutputs]:
|
|
84
|
+
db = DelimitingBuffer([b'\r', b'\n', b'\r\n'])
|
|
85
|
+
sd = sse.SseDecoder()
|
|
86
|
+
while True:
|
|
87
|
+
b = await http_response.stream.read1(self.READ_CHUNK_SIZE)
|
|
88
|
+
for l in db.feed(b):
|
|
89
|
+
if isinstance(l, DelimitingBuffer.Incomplete):
|
|
90
|
+
# FIXME: handle
|
|
91
|
+
return []
|
|
92
|
+
|
|
93
|
+
# FIXME: https://platform.openai.com/docs/guides/function-calling?api-mode=responses#streaming
|
|
94
|
+
for so in sd.process_line(l):
|
|
95
|
+
if isinstance(so, sse.SseEvent) and so.type == b'message':
|
|
96
|
+
ss = so.data.decode('utf-8')
|
|
97
|
+
if ss == '[DONE]':
|
|
98
|
+
return []
|
|
99
|
+
|
|
100
|
+
sj = json.loads(ss) # ChatCompletionChunk
|
|
101
|
+
|
|
102
|
+
check.state(sj['object'] == 'chat.completion.chunk')
|
|
103
|
+
|
|
104
|
+
ccc = msh.unmarshal(sj, pt.ChatCompletionChunk)
|
|
105
|
+
|
|
106
|
+
# FIXME: stop reason
|
|
107
|
+
if not ccc.choices:
|
|
108
|
+
continue
|
|
109
|
+
|
|
110
|
+
if any(choice.finish_reason for choice in ccc.choices):
|
|
111
|
+
check.state(all(choice.finish_reason for choice in ccc.choices))
|
|
112
|
+
break
|
|
113
|
+
|
|
114
|
+
await sink.emit(AiChoicesDeltas([
|
|
115
|
+
build_mc_ai_choice_deltas(choice.delta)
|
|
116
|
+
for choice in ccc.choices
|
|
117
|
+
]))
|
|
118
|
+
|
|
119
|
+
if not b:
|
|
120
|
+
return []
|
|
121
|
+
|
|
122
|
+
# raw_response = json.loads(check.not_none(http_response.data).decode('utf-8'))
|
|
123
|
+
# return rh.build_response(raw_response)
|
|
124
|
+
|
|
125
|
+
return await new_stream_response(rs, inner)
|
|
@@ -3,8 +3,6 @@ TODO:
|
|
|
3
3
|
- local-only check first
|
|
4
4
|
- cat ~/.cache/.../models/.../refs/main -> c5bfd839cd4cda0e5a39a97e00218d9c56e468af
|
|
5
5
|
"""
|
|
6
|
-
import typing as ta
|
|
7
|
-
|
|
8
6
|
from omlish import lang
|
|
9
7
|
|
|
10
8
|
from ....models.configs import ModelRepo
|
|
@@ -12,10 +10,8 @@ from ....models.repos.resolving import ModelRepoResolver
|
|
|
12
10
|
from ....models.repos.resolving import ResolvedModelRepo
|
|
13
11
|
|
|
14
12
|
|
|
15
|
-
|
|
13
|
+
with lang.auto_proxy_import(globals()):
|
|
16
14
|
import huggingface_hub as hf
|
|
17
|
-
else:
|
|
18
|
-
hf = lang.proxy_import('huggingface_hub')
|
|
19
15
|
|
|
20
16
|
|
|
21
17
|
##
|