ommlds 0.0.0.dev426__py3-none-any.whl → 0.0.0.dev485__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (295) hide show
  1. ommlds/.omlish-manifests.json +336 -39
  2. ommlds/__about__.py +16 -10
  3. ommlds/_hacks/__init__.py +4 -0
  4. ommlds/_hacks/funcs.py +110 -0
  5. ommlds/_hacks/names.py +158 -0
  6. ommlds/_hacks/params.py +73 -0
  7. ommlds/_hacks/patches.py +0 -3
  8. ommlds/backends/anthropic/protocol/__init__.py +13 -1
  9. ommlds/backends/anthropic/protocol/_dataclasses.py +1625 -0
  10. ommlds/backends/anthropic/protocol/_marshal.py +2 -2
  11. ommlds/backends/anthropic/protocol/sse/_marshal.py +1 -1
  12. ommlds/backends/anthropic/protocol/sse/assemble.py +23 -7
  13. ommlds/backends/anthropic/protocol/sse/events.py +13 -0
  14. ommlds/backends/anthropic/protocol/types.py +40 -8
  15. ommlds/backends/google/protocol/__init__.py +16 -0
  16. ommlds/backends/google/protocol/_dataclasses.py +5997 -0
  17. ommlds/backends/google/protocol/_marshal.py +16 -0
  18. ommlds/backends/google/protocol/types.py +626 -0
  19. ommlds/backends/groq/__init__.py +7 -0
  20. ommlds/backends/groq/_dataclasses.py +3901 -0
  21. ommlds/backends/groq/_marshal.py +23 -0
  22. ommlds/backends/groq/protocol.py +249 -0
  23. ommlds/backends/llamacpp/logging.py +4 -1
  24. ommlds/backends/mlx/caching.py +7 -3
  25. ommlds/backends/mlx/cli.py +10 -7
  26. ommlds/backends/mlx/generation.py +19 -17
  27. ommlds/backends/mlx/limits.py +10 -6
  28. ommlds/backends/mlx/loading.py +65 -5
  29. ommlds/backends/ollama/__init__.py +7 -0
  30. ommlds/backends/ollama/_dataclasses.py +3458 -0
  31. ommlds/backends/ollama/protocol.py +170 -0
  32. ommlds/backends/openai/protocol/__init__.py +24 -29
  33. ommlds/backends/openai/protocol/_common.py +18 -0
  34. ommlds/backends/openai/protocol/_dataclasses.py +7708 -0
  35. ommlds/backends/openai/protocol/_marshal.py +27 -0
  36. ommlds/backends/openai/protocol/chatcompletion/chunk.py +58 -31
  37. ommlds/backends/openai/protocol/chatcompletion/contentpart.py +49 -44
  38. ommlds/backends/openai/protocol/chatcompletion/message.py +55 -43
  39. ommlds/backends/openai/protocol/chatcompletion/request.py +114 -66
  40. ommlds/backends/openai/protocol/chatcompletion/response.py +71 -45
  41. ommlds/backends/openai/protocol/chatcompletion/responseformat.py +27 -20
  42. ommlds/backends/openai/protocol/chatcompletion/tokenlogprob.py +16 -7
  43. ommlds/backends/openai/protocol/completionusage.py +24 -15
  44. ommlds/backends/tavily/__init__.py +7 -0
  45. ommlds/backends/tavily/_dataclasses.py +1734 -0
  46. ommlds/backends/tavily/protocol.py +301 -0
  47. ommlds/backends/tinygrad/models/llama3/__init__.py +22 -14
  48. ommlds/backends/transformers/__init__.py +14 -0
  49. ommlds/backends/transformers/filecache.py +109 -0
  50. ommlds/backends/transformers/streamers.py +73 -0
  51. ommlds/cli/__init__.py +7 -0
  52. ommlds/cli/_dataclasses.py +2562 -0
  53. ommlds/cli/asyncs.py +30 -0
  54. ommlds/cli/backends/catalog.py +93 -0
  55. ommlds/cli/backends/configs.py +9 -0
  56. ommlds/cli/backends/inject.py +31 -36
  57. ommlds/cli/backends/injection.py +16 -0
  58. ommlds/cli/backends/types.py +46 -0
  59. ommlds/cli/content/messages.py +34 -0
  60. ommlds/cli/content/strings.py +42 -0
  61. ommlds/cli/inject.py +17 -32
  62. ommlds/cli/inputs/__init__.py +0 -0
  63. ommlds/cli/inputs/asyncs.py +32 -0
  64. ommlds/cli/inputs/sync.py +75 -0
  65. ommlds/cli/main.py +270 -110
  66. ommlds/cli/rendering/__init__.py +0 -0
  67. ommlds/cli/rendering/configs.py +9 -0
  68. ommlds/cli/rendering/inject.py +31 -0
  69. ommlds/cli/rendering/markdown.py +52 -0
  70. ommlds/cli/rendering/raw.py +73 -0
  71. ommlds/cli/rendering/types.py +21 -0
  72. ommlds/cli/secrets.py +21 -0
  73. ommlds/cli/sessions/base.py +1 -1
  74. ommlds/cli/sessions/chat/chat/__init__.py +0 -0
  75. ommlds/cli/sessions/chat/chat/ai/__init__.py +0 -0
  76. ommlds/cli/sessions/chat/chat/ai/configs.py +11 -0
  77. ommlds/cli/sessions/chat/chat/ai/inject.py +74 -0
  78. ommlds/cli/sessions/chat/chat/ai/injection.py +14 -0
  79. ommlds/cli/sessions/chat/chat/ai/rendering.py +70 -0
  80. ommlds/cli/sessions/chat/chat/ai/services.py +79 -0
  81. ommlds/cli/sessions/chat/chat/ai/tools.py +44 -0
  82. ommlds/cli/sessions/chat/chat/ai/types.py +28 -0
  83. ommlds/cli/sessions/chat/chat/state/__init__.py +0 -0
  84. ommlds/cli/sessions/chat/chat/state/configs.py +11 -0
  85. ommlds/cli/sessions/chat/chat/state/inject.py +36 -0
  86. ommlds/cli/sessions/chat/chat/state/inmemory.py +33 -0
  87. ommlds/cli/sessions/chat/chat/state/storage.py +52 -0
  88. ommlds/cli/sessions/chat/chat/state/types.py +38 -0
  89. ommlds/cli/sessions/chat/chat/user/__init__.py +0 -0
  90. ommlds/cli/sessions/chat/chat/user/configs.py +17 -0
  91. ommlds/cli/sessions/chat/chat/user/inject.py +62 -0
  92. ommlds/cli/sessions/chat/chat/user/interactive.py +31 -0
  93. ommlds/cli/sessions/chat/chat/user/oneshot.py +25 -0
  94. ommlds/cli/sessions/chat/chat/user/types.py +15 -0
  95. ommlds/cli/sessions/chat/configs.py +27 -0
  96. ommlds/cli/sessions/chat/driver.py +43 -0
  97. ommlds/cli/sessions/chat/inject.py +33 -65
  98. ommlds/cli/sessions/chat/phases/__init__.py +0 -0
  99. ommlds/cli/sessions/chat/phases/inject.py +27 -0
  100. ommlds/cli/sessions/chat/phases/injection.py +14 -0
  101. ommlds/cli/sessions/chat/phases/manager.py +29 -0
  102. ommlds/cli/sessions/chat/phases/types.py +29 -0
  103. ommlds/cli/sessions/chat/session.py +27 -0
  104. ommlds/cli/sessions/chat/tools/__init__.py +0 -0
  105. ommlds/cli/sessions/chat/tools/configs.py +22 -0
  106. ommlds/cli/sessions/chat/tools/confirmation.py +46 -0
  107. ommlds/cli/sessions/chat/tools/execution.py +66 -0
  108. ommlds/cli/sessions/chat/tools/fs/__init__.py +0 -0
  109. ommlds/cli/sessions/chat/tools/fs/configs.py +12 -0
  110. ommlds/cli/sessions/chat/tools/fs/inject.py +35 -0
  111. ommlds/cli/sessions/chat/tools/inject.py +88 -0
  112. ommlds/cli/sessions/chat/tools/injection.py +44 -0
  113. ommlds/cli/sessions/chat/tools/rendering.py +58 -0
  114. ommlds/cli/sessions/chat/tools/todo/__init__.py +0 -0
  115. ommlds/cli/sessions/chat/tools/todo/configs.py +12 -0
  116. ommlds/cli/sessions/chat/tools/todo/inject.py +31 -0
  117. ommlds/cli/sessions/chat/tools/weather/__init__.py +0 -0
  118. ommlds/cli/sessions/chat/tools/weather/configs.py +12 -0
  119. ommlds/cli/sessions/chat/tools/weather/inject.py +22 -0
  120. ommlds/cli/{tools/weather.py → sessions/chat/tools/weather/tools.py} +1 -1
  121. ommlds/cli/sessions/completion/configs.py +21 -0
  122. ommlds/cli/sessions/completion/inject.py +42 -0
  123. ommlds/cli/sessions/completion/session.py +35 -0
  124. ommlds/cli/sessions/embedding/configs.py +21 -0
  125. ommlds/cli/sessions/embedding/inject.py +42 -0
  126. ommlds/cli/sessions/embedding/session.py +33 -0
  127. ommlds/cli/sessions/inject.py +28 -11
  128. ommlds/cli/state/__init__.py +0 -0
  129. ommlds/cli/state/inject.py +28 -0
  130. ommlds/cli/{state.py → state/storage.py} +41 -24
  131. ommlds/minichain/__init__.py +84 -24
  132. ommlds/minichain/_dataclasses.py +15401 -0
  133. ommlds/minichain/_marshal.py +49 -9
  134. ommlds/minichain/_typedvalues.py +2 -4
  135. ommlds/minichain/backends/catalogs/base.py +20 -1
  136. ommlds/minichain/backends/catalogs/simple.py +2 -2
  137. ommlds/minichain/backends/catalogs/strings.py +10 -8
  138. ommlds/minichain/backends/impls/anthropic/chat.py +65 -27
  139. ommlds/minichain/backends/impls/anthropic/names.py +10 -8
  140. ommlds/minichain/backends/impls/anthropic/protocol.py +109 -0
  141. ommlds/minichain/backends/impls/anthropic/stream.py +111 -43
  142. ommlds/minichain/backends/impls/duckduckgo/search.py +6 -2
  143. ommlds/minichain/backends/impls/dummy/__init__.py +0 -0
  144. ommlds/minichain/backends/impls/dummy/chat.py +69 -0
  145. ommlds/minichain/backends/impls/google/chat.py +114 -22
  146. ommlds/minichain/backends/impls/google/search.py +7 -2
  147. ommlds/minichain/backends/impls/google/stream.py +219 -0
  148. ommlds/minichain/backends/impls/google/tools.py +149 -0
  149. ommlds/minichain/backends/impls/groq/__init__.py +0 -0
  150. ommlds/minichain/backends/impls/groq/chat.py +75 -0
  151. ommlds/minichain/backends/impls/groq/names.py +48 -0
  152. ommlds/minichain/backends/impls/groq/protocol.py +143 -0
  153. ommlds/minichain/backends/impls/groq/stream.py +125 -0
  154. ommlds/minichain/backends/impls/huggingface/repos.py +1 -5
  155. ommlds/minichain/backends/impls/llamacpp/chat.py +40 -22
  156. ommlds/minichain/backends/impls/llamacpp/completion.py +9 -5
  157. ommlds/minichain/backends/impls/llamacpp/format.py +4 -2
  158. ommlds/minichain/backends/impls/llamacpp/stream.py +43 -23
  159. ommlds/minichain/backends/impls/mistral.py +20 -5
  160. ommlds/minichain/backends/impls/mlx/chat.py +101 -24
  161. ommlds/minichain/backends/impls/ollama/__init__.py +0 -0
  162. ommlds/minichain/backends/impls/ollama/chat.py +199 -0
  163. ommlds/minichain/backends/impls/openai/chat.py +18 -8
  164. ommlds/minichain/backends/impls/openai/completion.py +10 -3
  165. ommlds/minichain/backends/impls/openai/embedding.py +10 -3
  166. ommlds/minichain/backends/impls/openai/format.py +131 -106
  167. ommlds/minichain/backends/impls/openai/names.py +31 -5
  168. ommlds/minichain/backends/impls/openai/stream.py +43 -25
  169. ommlds/minichain/backends/impls/sentencepiece/tokens.py +9 -6
  170. ommlds/minichain/backends/impls/tavily.py +66 -0
  171. ommlds/minichain/backends/impls/tinygrad/chat.py +30 -20
  172. ommlds/minichain/backends/impls/tokenizers/tokens.py +9 -6
  173. ommlds/minichain/backends/impls/transformers/sentence.py +6 -3
  174. ommlds/minichain/backends/impls/transformers/tokens.py +10 -7
  175. ommlds/minichain/backends/impls/transformers/transformers.py +160 -37
  176. ommlds/minichain/backends/strings/parsing.py +1 -1
  177. ommlds/minichain/backends/strings/resolving.py +4 -1
  178. ommlds/minichain/chat/_marshal.py +16 -9
  179. ommlds/minichain/chat/choices/adapters.py +4 -4
  180. ommlds/minichain/chat/choices/services.py +1 -1
  181. ommlds/minichain/chat/choices/stream/__init__.py +0 -0
  182. ommlds/minichain/chat/choices/stream/adapters.py +35 -0
  183. ommlds/minichain/chat/choices/stream/joining.py +31 -0
  184. ommlds/minichain/chat/choices/stream/services.py +45 -0
  185. ommlds/minichain/chat/choices/stream/types.py +43 -0
  186. ommlds/minichain/chat/choices/types.py +2 -2
  187. ommlds/minichain/chat/history.py +3 -3
  188. ommlds/minichain/chat/messages.py +55 -19
  189. ommlds/minichain/chat/services.py +3 -3
  190. ommlds/minichain/chat/stream/_marshal.py +16 -0
  191. ommlds/minichain/chat/stream/joining.py +85 -0
  192. ommlds/minichain/chat/stream/services.py +15 -21
  193. ommlds/minichain/chat/stream/types.py +32 -19
  194. ommlds/minichain/chat/tools/execution.py +8 -7
  195. ommlds/minichain/chat/tools/ids.py +9 -15
  196. ommlds/minichain/chat/tools/parsing.py +17 -26
  197. ommlds/minichain/chat/transforms/base.py +29 -38
  198. ommlds/minichain/chat/transforms/metadata.py +30 -4
  199. ommlds/minichain/chat/transforms/services.py +9 -11
  200. ommlds/minichain/content/_marshal.py +44 -20
  201. ommlds/minichain/content/json.py +13 -0
  202. ommlds/minichain/content/materialize.py +14 -21
  203. ommlds/minichain/content/prepare.py +4 -0
  204. ommlds/minichain/content/transforms/interleave.py +1 -1
  205. ommlds/minichain/content/transforms/squeeze.py +1 -1
  206. ommlds/minichain/content/transforms/stringify.py +1 -1
  207. ommlds/minichain/json.py +20 -0
  208. ommlds/minichain/lib/code/__init__.py +0 -0
  209. ommlds/minichain/lib/code/prompts.py +6 -0
  210. ommlds/minichain/lib/fs/binfiles.py +108 -0
  211. ommlds/minichain/lib/fs/context.py +126 -0
  212. ommlds/minichain/lib/fs/errors.py +101 -0
  213. ommlds/minichain/lib/fs/suggestions.py +36 -0
  214. ommlds/minichain/lib/fs/tools/__init__.py +0 -0
  215. ommlds/minichain/lib/fs/tools/edit.py +104 -0
  216. ommlds/minichain/lib/fs/tools/ls.py +38 -0
  217. ommlds/minichain/lib/fs/tools/read.py +115 -0
  218. ommlds/minichain/lib/fs/tools/recursivels/__init__.py +0 -0
  219. ommlds/minichain/lib/fs/tools/recursivels/execution.py +40 -0
  220. ommlds/minichain/lib/todo/__init__.py +0 -0
  221. ommlds/minichain/lib/todo/context.py +54 -0
  222. ommlds/minichain/lib/todo/tools/__init__.py +0 -0
  223. ommlds/minichain/lib/todo/tools/read.py +44 -0
  224. ommlds/minichain/lib/todo/tools/write.py +335 -0
  225. ommlds/minichain/lib/todo/types.py +60 -0
  226. ommlds/minichain/llms/_marshal.py +25 -17
  227. ommlds/minichain/llms/types.py +4 -0
  228. ommlds/minichain/registries/globals.py +18 -4
  229. ommlds/minichain/resources.py +68 -45
  230. ommlds/minichain/search.py +1 -1
  231. ommlds/minichain/services/_marshal.py +46 -39
  232. ommlds/minichain/services/facades.py +3 -3
  233. ommlds/minichain/services/services.py +1 -1
  234. ommlds/minichain/standard.py +8 -0
  235. ommlds/minichain/stream/services.py +152 -38
  236. ommlds/minichain/stream/wrap.py +22 -24
  237. ommlds/minichain/text/toolparsing/llamacpp/hermes2.py +3 -2
  238. ommlds/minichain/text/toolparsing/llamacpp/llama31.py +3 -2
  239. ommlds/minichain/text/toolparsing/llamacpp/utils.py +3 -2
  240. ommlds/minichain/tools/_marshal.py +1 -1
  241. ommlds/minichain/tools/execution/catalog.py +2 -1
  242. ommlds/minichain/tools/execution/context.py +34 -14
  243. ommlds/minichain/tools/execution/errors.py +15 -0
  244. ommlds/minichain/tools/execution/executors.py +8 -3
  245. ommlds/minichain/tools/execution/reflect.py +40 -5
  246. ommlds/minichain/tools/fns.py +46 -9
  247. ommlds/minichain/tools/jsonschema.py +14 -5
  248. ommlds/minichain/tools/reflect.py +54 -18
  249. ommlds/minichain/tools/types.py +33 -1
  250. ommlds/minichain/utils.py +27 -0
  251. ommlds/minichain/vectors/_marshal.py +11 -10
  252. ommlds/minichain/vectors/types.py +1 -1
  253. ommlds/nanochat/LICENSE +21 -0
  254. ommlds/nanochat/__init__.py +0 -0
  255. ommlds/nanochat/rustbpe/LICENSE +21 -0
  256. ommlds/nanochat/tokenizers.py +406 -0
  257. ommlds/server/cli.py +1 -2
  258. ommlds/server/server.py +5 -5
  259. ommlds/server/service.py +1 -1
  260. ommlds/specs/__init__.py +0 -0
  261. ommlds/specs/mcp/__init__.py +0 -0
  262. ommlds/specs/mcp/_marshal.py +23 -0
  263. ommlds/specs/mcp/clients.py +146 -0
  264. ommlds/specs/mcp/protocol.py +371 -0
  265. ommlds/tools/git.py +35 -12
  266. ommlds/tools/ocr.py +8 -9
  267. ommlds/wiki/analyze.py +6 -7
  268. ommlds/wiki/text/mfh.py +1 -5
  269. ommlds/wiki/text/wtp.py +1 -3
  270. ommlds/wiki/utils/xml.py +5 -5
  271. {ommlds-0.0.0.dev426.dist-info → ommlds-0.0.0.dev485.dist-info}/METADATA +24 -21
  272. ommlds-0.0.0.dev485.dist-info/RECORD +436 -0
  273. ommlds/cli/backends/standard.py +0 -20
  274. ommlds/cli/sessions/chat/base.py +0 -42
  275. ommlds/cli/sessions/chat/interactive.py +0 -73
  276. ommlds/cli/sessions/chat/printing.py +0 -96
  277. ommlds/cli/sessions/chat/prompt.py +0 -143
  278. ommlds/cli/sessions/chat/state.py +0 -109
  279. ommlds/cli/sessions/chat/tools.py +0 -91
  280. ommlds/cli/sessions/completion/completion.py +0 -44
  281. ommlds/cli/sessions/embedding/embedding.py +0 -42
  282. ommlds/cli/tools/config.py +0 -13
  283. ommlds/cli/tools/inject.py +0 -64
  284. ommlds/minichain/chat/stream/adapters.py +0 -69
  285. ommlds/minichain/lib/fs/ls/execution.py +0 -32
  286. ommlds-0.0.0.dev426.dist-info/RECORD +0 -303
  287. /ommlds/{cli/tools → backends/google}/__init__.py +0 -0
  288. /ommlds/{huggingface.py → backends/huggingface.py} +0 -0
  289. /ommlds/{minichain/lib/fs/ls → cli/content}/__init__.py +0 -0
  290. /ommlds/minichain/lib/fs/{ls → tools/recursivels}/rendering.py +0 -0
  291. /ommlds/minichain/lib/fs/{ls → tools/recursivels}/running.py +0 -0
  292. {ommlds-0.0.0.dev426.dist-info → ommlds-0.0.0.dev485.dist-info}/WHEEL +0 -0
  293. {ommlds-0.0.0.dev426.dist-info → ommlds-0.0.0.dev485.dist-info}/entry_points.txt +0 -0
  294. {ommlds-0.0.0.dev426.dist-info → ommlds-0.0.0.dev485.dist-info}/licenses/LICENSE +0 -0
  295. {ommlds-0.0.0.dev426.dist-info → ommlds-0.0.0.dev485.dist-info}/top_level.txt +0 -0
@@ -2,74 +2,150 @@ import typing as ta
2
2
 
3
3
  from omlish import cached
4
4
  from omlish import check
5
- from omlish import lang
6
5
  from omlish import typedvalues as tv
7
6
  from omlish.formats import json
8
7
 
8
+ from .....backends.openai import protocol as pt
9
9
  from ....chat.choices.services import ChatChoicesResponse
10
10
  from ....chat.choices.types import AiChoice
11
+ from ....chat.choices.types import AiChoices
11
12
  from ....chat.choices.types import ChatChoicesOptions
12
13
  from ....chat.messages import AiMessage
14
+ from ....chat.messages import AnyAiMessage
13
15
  from ....chat.messages import Chat
14
- from ....chat.messages import Message
15
16
  from ....chat.messages import SystemMessage
16
- from ....chat.messages import ToolExecResultMessage
17
+ from ....chat.messages import ToolUseMessage
18
+ from ....chat.messages import ToolUseResultMessage
17
19
  from ....chat.messages import UserMessage
18
- from ....chat.stream.types import AiMessageDelta
20
+ from ....chat.stream.types import AiDelta
21
+ from ....chat.stream.types import ContentAiDelta
22
+ from ....chat.stream.types import PartialToolUseAiDelta
19
23
  from ....chat.tools.types import Tool
24
+ from ....content.json import JsonContent
20
25
  from ....content.prepare import prepare_content_str
26
+ from ....llms.types import MaxCompletionTokens
21
27
  from ....llms.types import MaxTokens
22
28
  from ....llms.types import Temperature
23
29
  from ....llms.types import TokenUsage
24
30
  from ....llms.types import TokenUsageOutput
25
- from ....tools.jsonschema import build_tool_spec_json_schema
26
- from ....tools.types import ToolExecRequest
31
+ from ....tools.jsonschema import build_tool_spec_params_json_schema
27
32
  from ....tools.types import ToolSpec
33
+ from ....tools.types import ToolUse
28
34
  from ....types import Option
29
35
 
30
36
 
31
37
  ##
32
38
 
33
39
 
34
- def build_request_message(m: Message) -> ta.Mapping[str, ta.Any]:
35
- if isinstance(m, SystemMessage):
36
- return dict(
37
- role='system',
38
- content=m.c,
39
- )
40
+ def build_oai_request_msgs(mc_chat: Chat) -> ta.Sequence[pt.ChatCompletionMessage]:
41
+ oai_msgs: list[pt.ChatCompletionMessage] = []
42
+
43
+ for mc_msg in mc_chat:
44
+ if isinstance(mc_msg, SystemMessage):
45
+ oai_msgs.append(pt.SystemChatCompletionMessage(
46
+ content=check.isinstance(mc_msg.c, str),
47
+ ))
40
48
 
41
- elif isinstance(m, AiMessage):
42
- return dict(
43
- role='assistant',
44
- content=check.isinstance(m.c, (str, None)),
45
- **(dict(tool_calls=[
46
- dict(
47
- id=te.id,
48
- function=dict(
49
- arguments=check.not_none(te.raw_args),
50
- name=te.name,
49
+ elif isinstance(mc_msg, AiMessage):
50
+ oai_msgs.append(pt.AssistantChatCompletionMessage(
51
+ content=check.isinstance(mc_msg.c, (str, None)),
52
+ ))
53
+
54
+ elif isinstance(mc_msg, ToolUseMessage):
55
+ oai_msgs.append(pt.AssistantChatCompletionMessage(
56
+ tool_calls=[pt.AssistantChatCompletionMessage.ToolCall(
57
+ id=check.not_none(mc_msg.tu.id),
58
+ function=pt.AssistantChatCompletionMessage.ToolCall.Function(
59
+ arguments=check.not_none(mc_msg.tu.raw_args),
60
+ name=mc_msg.tu.name,
51
61
  ),
52
- type='function',
53
- )
54
- for te in m.tool_exec_requests
55
- ]) if m.tool_exec_requests else {}),
56
- )
62
+ )],
63
+ ))
57
64
 
58
- elif isinstance(m, UserMessage):
59
- return dict(
60
- role='user',
61
- content=prepare_content_str(m.c),
62
- )
65
+ elif isinstance(mc_msg, UserMessage):
66
+ oai_msgs.append(pt.UserChatCompletionMessage(
67
+ content=prepare_content_str(mc_msg.c),
68
+ ))
69
+
70
+ elif isinstance(mc_msg, ToolUseResultMessage):
71
+ tc: str
72
+ if isinstance(mc_msg.tur.c, str):
73
+ tc = mc_msg.tur.c
74
+ elif isinstance(mc_msg.tur.c, JsonContent):
75
+ tc = json.dumps_compact(mc_msg.tur.c)
76
+ else:
77
+ raise TypeError(mc_msg.tur.c)
78
+ oai_msgs.append(pt.ToolChatCompletionMessage(
79
+ tool_call_id=check.not_none(mc_msg.tur.id),
80
+ content=tc,
81
+ ))
82
+
83
+ else:
84
+ raise TypeError(mc_msg)
85
+
86
+ return oai_msgs
87
+
88
+
89
+ #
90
+
91
+
92
+ def build_mc_ai_choice(oai_choice: pt.ChatCompletionResponseChoice) -> AiChoice:
93
+ cur: list[AnyAiMessage] = []
94
+
95
+ oai_msg = oai_choice.message
96
+
97
+ if (oai_c := oai_msg.content) is not None:
98
+ cur.append(AiMessage(check.isinstance(oai_c, str)))
99
+
100
+ for oai_tc in oai_msg.tool_calls or []:
101
+ cur.append(ToolUseMessage(ToolUse(
102
+ id=oai_tc.id,
103
+ name=oai_tc.function.name,
104
+ args=json.loads(oai_tc.function.arguments or '{}'),
105
+ raw_args=oai_tc.function.arguments,
106
+ )))
107
+
108
+ return AiChoice(cur)
63
109
 
64
- elif isinstance(m, ToolExecResultMessage):
65
- return dict(
66
- role='tool',
67
- tool_call_id=m.id,
68
- content=check.isinstance(m.c, str),
110
+
111
+ def build_mc_ai_choices(oai_resp: pt.ChatCompletionResponse) -> AiChoices:
112
+ return [
113
+ build_mc_ai_choice(oai_choice)
114
+ for oai_choice in oai_resp.choices
115
+ ]
116
+
117
+
118
+ def build_mc_choices_response(oai_resp: pt.ChatCompletionResponse) -> ChatChoicesResponse:
119
+ return ChatChoicesResponse(
120
+ build_mc_ai_choices(oai_resp),
121
+
122
+ tv.TypedValues(
123
+ *([TokenUsageOutput(TokenUsage(
124
+ input=tu.prompt_tokens,
125
+ output=tu.completion_tokens,
126
+ total=tu.total_tokens,
127
+ ))] if (tu := oai_resp.usage) is not None else []),
128
+ ),
129
+ )
130
+
131
+
132
+ def build_mc_ai_delta(delta: pt.ChatCompletionChunkChoiceDelta) -> AiDelta:
133
+ if delta.content is not None:
134
+ check.state(not delta.tool_calls)
135
+ return ContentAiDelta(delta.content)
136
+
137
+ elif delta.tool_calls is not None:
138
+ check.state(delta.content is None)
139
+ tc = check.single(delta.tool_calls)
140
+ tc_fn = check.not_none(tc.function)
141
+ return PartialToolUseAiDelta(
142
+ id=tc.id,
143
+ name=tc_fn.name,
144
+ raw_args=tc_fn.arguments,
69
145
  )
70
146
 
71
147
  else:
72
- raise TypeError(m)
148
+ raise ValueError(delta)
73
149
 
74
150
 
75
151
  ##
@@ -90,21 +166,15 @@ class OpenaiChatRequestHandler:
90
166
  self._model = model
91
167
  self._mandatory_kwargs = mandatory_kwargs
92
168
 
93
- ROLES_MAP: ta.ClassVar[ta.Mapping[type[Message], str]] = {
94
- SystemMessage: 'system',
95
- UserMessage: 'user',
96
- AiMessage: 'assistant',
97
- ToolExecResultMessage: 'tool',
98
- }
99
-
100
- DEFAULT_OPTIONS: ta.ClassVar[tv.TypedValues[Option]] = tv.TypedValues(
101
- Temperature(0.),
102
- MaxTokens(1024),
169
+ DEFAULT_OPTIONS: ta.ClassVar[tv.TypedValues[Option]] = tv.TypedValues[Option](
170
+ # Temperature(0.),
171
+ # MaxTokens(1024),
103
172
  )
104
173
 
105
174
  _OPTION_KWARG_NAMES_MAP: ta.ClassVar[ta.Mapping[str, type[ChatChoicesOptions]]] = dict(
106
175
  temperature=Temperature,
107
176
  max_tokens=MaxTokens,
177
+ max_completion_tokens=MaxCompletionTokens,
108
178
  )
109
179
 
110
180
  class _ProcessedOptions(ta.NamedTuple):
@@ -114,8 +184,8 @@ class OpenaiChatRequestHandler:
114
184
  @cached.function
115
185
  def _process_options(self) -> _ProcessedOptions:
116
186
  kwargs: dict = dict(
117
- temperature=0,
118
- max_tokens=1024,
187
+ # temperature=0,
188
+ # max_tokens=1024,
119
189
  )
120
190
 
121
191
  tools_by_name: dict[str, ToolSpec] = {}
@@ -139,71 +209,26 @@ class OpenaiChatRequestHandler:
139
209
  )
140
210
 
141
211
  @cached.function
142
- def raw_request(self) -> ta.Mapping[str, ta.Any]:
212
+ def oai_request(self) -> pt.ChatCompletionRequest:
143
213
  po = self._process_options()
144
214
 
145
- tools = [
146
- dict(
147
- type='function',
148
- function=build_tool_spec_json_schema(ts),
215
+ tools: list[pt.ChatCompletionRequestTool] = [
216
+ pt.ChatCompletionRequestTool(
217
+ function=pt.ChatCompletionRequestTool.Function(
218
+ name=check.not_none(ts.name),
219
+ description=prepare_content_str(ts.desc),
220
+ parameters=build_tool_spec_params_json_schema(ts),
221
+ ),
149
222
  )
150
223
  for ts in po.tools_by_name.values()
151
224
  ]
152
225
 
153
- return dict(
226
+ return pt.ChatCompletionRequest(
154
227
  model=self._model,
155
- messages=[
156
- build_request_message(m)
157
- for m in self._chat
158
- ],
228
+ messages=build_oai_request_msgs(self._chat),
159
229
  top_p=1,
160
- **lang.opt_kw(tools=tools),
230
+ tools=tools or None,
161
231
  frequency_penalty=0.0,
162
232
  presence_penalty=0.0,
163
233
  **po.kwargs,
164
234
  )
165
-
166
- def build_ai_message(self, message: ta.Mapping[str, ta.Any]) -> AiMessage:
167
- return AiMessage(
168
- message.get('content'),
169
- tool_exec_requests=[
170
- ToolExecRequest(
171
- id=tc['id'],
172
- name=tc['function']['name'],
173
- args=json.loads(tc['function']['arguments'] or '{}'),
174
- raw_args=tc['function']['arguments'],
175
- )
176
- for tc in message.get('tool_calls', [])
177
- ] or None,
178
- )
179
-
180
- def build_response(self, raw_response: ta.Mapping[str, ta.Any]) -> ChatChoicesResponse:
181
- return ChatChoicesResponse(
182
- [
183
- AiChoice(self.build_ai_message(choice['message']))
184
- for choice in raw_response['choices']
185
- ],
186
-
187
- tv.TypedValues(
188
- *([TokenUsageOutput(TokenUsage(
189
- input=tu['prompt_tokens'],
190
- output=tu['completion_tokens'],
191
- total=tu['total_tokens'],
192
- ))] if (tu := raw_response.get('usage')) is not None else []),
193
- ),
194
- )
195
-
196
- def build_ai_message_delta(self, delta: ta.Mapping[str, ta.Any]) -> AiMessageDelta:
197
- return AiMessageDelta(
198
- delta.get('content'),
199
- # FIXME:
200
- # tool_exec_requests=[
201
- # ToolExecRequest(
202
- # id=tc['id'],
203
- # spec=self._process_options().tools_by_name[tc['function']['name']],
204
- # args=json.loads(tc['function']['arguments'] or '{}'),
205
- # raw_args=tc['function']['arguments'],
206
- # )
207
- # for tc in message_or_delta.get('tool_calls', [])
208
- # ] or None,
209
- )
@@ -30,10 +30,12 @@ _GPT_MODEL_NAMES = [
30
30
  'gpt-5-chat-latest',
31
31
  'gpt-5-mini',
32
32
  'gpt-5-nano',
33
+
34
+ 'gpt-5.1',
33
35
  ]
34
36
 
35
37
 
36
- MODEL_NAMES = ModelNameCollection(
38
+ CHAT_MODEL_NAMES = ModelNameCollection(
37
39
  default='gpt',
38
40
  aliases={
39
41
  **{
@@ -46,8 +48,8 @@ MODEL_NAMES = ModelNameCollection(
46
48
  for n in _GPT_MODEL_NAMES
47
49
  },
48
50
 
49
- 'gpt': 'gpt-4o',
50
- 'gpt-mini': 'gpt-4o-mini',
51
+ 'gpt': 'gpt-5.1',
52
+ 'gpt-mini': 'gpt-5-mini',
51
53
 
52
54
  #
53
55
 
@@ -61,11 +63,35 @@ MODEL_NAMES = ModelNameCollection(
61
63
 
62
64
 
63
65
  # @omlish-manifest
64
- _BACKEND_STRINGS_MANIFEST = BackendStringsManifest(
66
+ _CHAT_BACKEND_STRINGS_MANIFEST = BackendStringsManifest(
65
67
  [
66
68
  'ChatChoicesService',
67
69
  'ChatChoicesStreamService',
68
70
  ],
69
71
  'openai',
70
- model_names=MODEL_NAMES,
72
+ model_names=CHAT_MODEL_NAMES,
73
+ )
74
+
75
+
76
+ ##
77
+
78
+
79
+ # @omlish-manifest
80
+ _COMPLETION_BACKEND_STRINGS_MANIFEST = BackendStringsManifest(
81
+ [
82
+ 'CompletionService',
83
+ ],
84
+ 'openai',
85
+ )
86
+
87
+
88
+ ##
89
+
90
+
91
+ # @omlish-manifest
92
+ _EMBEDDING_BACKEND_STRINGS_MANIFEST = BackendStringsManifest(
93
+ [
94
+ 'EmbeddingService',
95
+ ],
96
+ 'openai',
71
97
  )
@@ -1,28 +1,35 @@
1
+ """
2
+ https://platform.openai.com/docs/api-reference/responses-streaming
3
+ """
1
4
  import typing as ta
2
5
 
3
6
  from omlish import check
7
+ from omlish import marshal as msh
4
8
  from omlish import typedvalues as tv
5
9
  from omlish.formats import json
6
10
  from omlish.http import all as http
7
11
  from omlish.http import sse
8
12
  from omlish.io.buffers import DelimitingBuffer
9
13
 
14
+ from .....backends.openai import protocol as pt
10
15
  from ....chat.choices.services import ChatChoicesOutputs
11
- from ....chat.stream.services import ChatChoicesStreamRequest
12
- from ....chat.stream.services import ChatChoicesStreamResponse
13
- from ....chat.stream.services import static_check_is_chat_choices_stream_service
14
- from ....chat.stream.types import AiChoiceDelta
15
- from ....chat.stream.types import AiChoiceDeltas
16
- from ....chat.stream.types import ChatChoicesStreamOption
16
+ from ....chat.choices.stream.services import ChatChoicesStreamRequest
17
+ from ....chat.choices.stream.services import ChatChoicesStreamResponse
18
+ from ....chat.choices.stream.services import static_check_is_chat_choices_stream_service
19
+ from ....chat.choices.stream.types import AiChoiceDeltas
20
+ from ....chat.choices.stream.types import AiChoicesDeltas
21
+ from ....chat.choices.stream.types import ChatChoicesStreamOption
17
22
  from ....configs import Config
18
23
  from ....resources import ResourcesOption
19
24
  from ....resources import UseResources
20
25
  from ....standard import ApiKey
21
26
  from ....stream.services import StreamOption
27
+ from ....stream.services import StreamResponseSink
22
28
  from ....stream.services import new_stream_response
23
29
  from .chat import OpenaiChatChoicesService
24
30
  from .format import OpenaiChatRequestHandler
25
- from .names import MODEL_NAMES
31
+ from .format import build_mc_ai_delta
32
+ from .names import CHAT_MODEL_NAMES
26
33
 
27
34
 
28
35
  ##
@@ -34,16 +41,22 @@ from .names import MODEL_NAMES
34
41
  # )
35
42
  @static_check_is_chat_choices_stream_service
36
43
  class OpenaiChatChoicesStreamService:
37
- def __init__(self, *configs: Config) -> None:
44
+ def __init__(
45
+ self,
46
+ *configs: Config,
47
+ http_client: http.AsyncHttpClient | None = None,
48
+ ) -> None:
38
49
  super().__init__()
39
50
 
51
+ self._http_client = http_client
52
+
40
53
  with tv.consume(*configs) as cc:
41
54
  self._model_name = cc.pop(OpenaiChatChoicesService.DEFAULT_MODEL_NAME)
42
55
  self._api_key = ApiKey.pop_secret(cc, env='OPENAI_API_KEY')
43
56
 
44
- READ_CHUNK_SIZE = 64 * 1024
57
+ READ_CHUNK_SIZE: ta.ClassVar[int] = -1
45
58
 
46
- def invoke(self, request: ChatChoicesStreamRequest) -> ChatChoicesStreamResponse:
59
+ async def invoke(self, request: ChatChoicesStreamRequest) -> ChatChoicesStreamResponse:
47
60
  # check.isinstance(request, ChatRequest)
48
61
 
49
62
  rh = OpenaiChatRequestHandler(
@@ -53,16 +66,16 @@ class OpenaiChatChoicesStreamService:
53
66
  for o in request.options
54
67
  if not isinstance(o, (ChatChoicesStreamOption, StreamOption, ResourcesOption))
55
68
  ],
56
- model=MODEL_NAMES.resolve(self._model_name.v),
69
+ model=CHAT_MODEL_NAMES.resolve(self._model_name.v),
57
70
  mandatory_kwargs=dict(
58
71
  stream=True,
59
- stream_options=dict(
72
+ stream_options=pt.ChatCompletionRequest.StreamOptions(
60
73
  include_usage=True,
61
74
  ),
62
75
  ),
63
76
  )
64
77
 
65
- raw_request = rh.raw_request()
78
+ raw_request = msh.marshal(rh.oai_request())
66
79
 
67
80
  http_request = http.HttpRequest(
68
81
  'https://api.openai.com/v1/chat/completions',
@@ -73,16 +86,15 @@ class OpenaiChatChoicesStreamService:
73
86
  data=json.dumps(raw_request).encode('utf-8'),
74
87
  )
75
88
 
76
- with UseResources.or_new(request.options) as rs:
77
- http_client = rs.enter_context(http.client())
78
- http_response = rs.enter_context(http_client.stream_request(http_request))
89
+ async with UseResources.or_new(request.options) as rs:
90
+ http_client = await rs.enter_async_context(http.manage_async_client(self._http_client))
91
+ http_response = await rs.enter_async_context(await http_client.stream_request(http_request))
79
92
 
80
- def yield_choices() -> ta.Generator[AiChoiceDeltas, None, ta.Sequence[ChatChoicesOutputs] | None]:
93
+ async def inner(sink: StreamResponseSink[AiChoicesDeltas]) -> ta.Sequence[ChatChoicesOutputs]:
81
94
  db = DelimitingBuffer([b'\r', b'\n', b'\r\n'])
82
95
  sd = sse.SseDecoder()
83
96
  while True:
84
- # FIXME: read1 not on response stream protocol
85
- b = http_response.stream.read1(self.READ_CHUNK_SIZE) # type: ignore[attr-defined]
97
+ b = await http_response.stream.read1(self.READ_CHUNK_SIZE)
86
98
  for l in db.feed(b):
87
99
  if isinstance(l, DelimitingBuffer.Incomplete):
88
100
  # FIXME: handle
@@ -99,14 +111,20 @@ class OpenaiChatChoicesStreamService:
99
111
 
100
112
  check.state(sj['object'] == 'chat.completion.chunk')
101
113
 
114
+ ccc = msh.unmarshal(sj, pt.ChatCompletionChunk)
115
+
102
116
  # FIXME: stop reason
103
- if not sj['choices']:
117
+ if not ccc.choices:
104
118
  continue
105
119
 
106
- yield [
107
- AiChoiceDelta(rh.build_ai_message_delta(choice['delta']))
108
- for choice in sj['choices']
109
- ]
120
+ if any(choice.finish_reason for choice in ccc.choices):
121
+ check.state(all(choice.finish_reason for choice in ccc.choices))
122
+ break
123
+
124
+ await sink.emit(AiChoicesDeltas([
125
+ AiChoiceDeltas([build_mc_ai_delta(choice.delta)])
126
+ for choice in ccc.choices
127
+ ]))
110
128
 
111
129
  if not b:
112
130
  return []
@@ -114,4 +132,4 @@ class OpenaiChatChoicesStreamService:
114
132
  # raw_response = json.loads(check.not_none(http_response.data).decode('utf-8'))
115
133
  # return rh.build_response(raw_response)
116
134
 
117
- return new_stream_response(rs, yield_choices())
135
+ return await new_stream_response(rs, inner)
@@ -1,23 +1,26 @@
1
1
  import typing as ta
2
2
 
3
- import sentencepiece as spm
4
-
5
3
  from omlish import check
4
+ from omlish import lang
6
5
 
7
6
  from .... import tokens as tks
8
7
 
9
8
 
9
+ with lang.auto_proxy_import(globals()):
10
+ import sentencepiece as spm
11
+
12
+
10
13
  ##
11
14
 
12
15
 
13
- def build_vocab(spm_tokenizer: spm.SentencePieceProcessor) -> tks.Vocab:
16
+ def build_vocab(spm_tokenizer: 'spm.SentencePieceProcessor') -> tks.Vocab:
14
17
  return tks.Vocab([
15
18
  (ta.cast(tks.Token, i), tks.TokenStr(spm_tokenizer.id_to_piece(i))) # noqa
16
19
  for i in range(spm_tokenizer.get_piece_size()) # noqa
17
20
  ])
18
21
 
19
22
 
20
- def build_specials(spm_tokenizer: spm.SentencePieceProcessor) -> tks.SpecialTokens:
23
+ def build_specials(spm_tokenizer: 'spm.SentencePieceProcessor') -> tks.SpecialTokens:
21
24
  # FIXME
22
25
  return tks.SpecialTokens([])
23
26
 
@@ -28,7 +31,7 @@ def build_specials(spm_tokenizer: spm.SentencePieceProcessor) -> tks.SpecialToke
28
31
  class SentencepieceTokenizer(tks.BaseTokenizer):
29
32
  def __init__(
30
33
  self,
31
- spm_tokenizer: spm.SentencePieceProcessor,
34
+ spm_tokenizer: 'spm.SentencePieceProcessor',
32
35
  ) -> None:
33
36
  self._spm_tokenizer = check.isinstance(spm_tokenizer, spm.SentencePieceProcessor)
34
37
 
@@ -38,7 +41,7 @@ class SentencepieceTokenizer(tks.BaseTokenizer):
38
41
  )
39
42
 
40
43
  @property
41
- def spm_tokenizer(self) -> spm.SentencePieceProcessor:
44
+ def spm_tokenizer(self) -> 'spm.SentencePieceProcessor':
42
45
  return self._spm_tokenizer
43
46
 
44
47
  #
@@ -0,0 +1,66 @@
1
+ from omlish import check
2
+ from omlish import marshal as msh
3
+ from omlish import typedvalues as tv
4
+ from omlish.formats import json
5
+ from omlish.http import all as http
6
+
7
+ from ....backends.tavily import protocol as pt
8
+ from ...search import SearchHit
9
+ from ...search import SearchHits
10
+ from ...search import SearchRequest
11
+ from ...search import SearchResponse
12
+ from ...search import static_check_is_search_service
13
+ from ...standard import ApiKey
14
+
15
+
16
+ ##
17
+
18
+
19
+ # @omlish-manifest $.minichain.registries.manifests.RegistryManifest(
20
+ # name='tavily',
21
+ # type='SearchService',
22
+ # )
23
+ @static_check_is_search_service
24
+ class TavilySearchService:
25
+ def __init__(
26
+ self,
27
+ *configs: ApiKey,
28
+ http_client: http.AsyncHttpClient | None = None,
29
+ ) -> None:
30
+ super().__init__()
31
+
32
+ self._http_client = http_client
33
+
34
+ with tv.consume(*configs) as cc:
35
+ self._api_key = ApiKey.pop_secret(cc, env='TAVILY_API_KEY')
36
+
37
+ async def invoke(self, request: SearchRequest) -> SearchResponse:
38
+ pt_request = pt.SearchRequest(
39
+ query=request.v,
40
+ )
41
+
42
+ raw_request = msh.marshal(pt_request)
43
+
44
+ http_response = await http.async_request(
45
+ 'https://api.tavily.com/search',
46
+ headers={
47
+ http.consts.HEADER_CONTENT_TYPE: http.consts.CONTENT_TYPE_JSON,
48
+ http.consts.HEADER_AUTH: http.consts.format_bearer_auth_header(check.not_none(self._api_key).reveal()),
49
+ },
50
+ data=json.dumps(raw_request).encode('utf-8'),
51
+ client=self._http_client,
52
+ )
53
+
54
+ raw_response = json.loads(check.not_none(http_response.data).decode('utf-8'))
55
+
56
+ pt_response = msh.unmarshal(raw_response, pt.SearchResponse)
57
+
58
+ return SearchResponse(SearchHits(
59
+ l=[
60
+ SearchHit(
61
+ title=r.title,
62
+ url=r.url,
63
+ )
64
+ for r in pt_response.results or []
65
+ ],
66
+ ))