ommlds 0.0.0.dev466__py3-none-any.whl → 0.0.0.dev512__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ommlds/.omlish-manifests.json +404 -31
- ommlds/README.md +11 -0
- ommlds/__about__.py +21 -12
- ommlds/_hacks/__init__.py +4 -0
- ommlds/_hacks/funcs.py +110 -0
- ommlds/_hacks/names.py +158 -0
- ommlds/_hacks/params.py +73 -0
- ommlds/_hacks/patches.py +0 -3
- ommlds/backends/anthropic/protocol/__init__.py +13 -1
- ommlds/backends/anthropic/protocol/_dataclasses.py +1625 -0
- ommlds/backends/anthropic/protocol/sse/events.py +2 -0
- ommlds/backends/anthropic/protocol/types.py +5 -7
- ommlds/backends/cerebras/__init__.py +7 -0
- ommlds/backends/cerebras/_dataclasses.py +4254 -0
- ommlds/backends/cerebras/_marshal.py +24 -0
- ommlds/backends/cerebras/clients.py +9 -0
- ommlds/backends/cerebras/protocol.py +310 -0
- ommlds/backends/google/protocol/__init__.py +13 -0
- ommlds/backends/google/protocol/_dataclasses.py +5997 -0
- ommlds/backends/google/protocol/types.py +6 -8
- ommlds/backends/groq/__init__.py +7 -0
- ommlds/backends/groq/_dataclasses.py +3901 -0
- ommlds/backends/groq/_marshal.py +23 -0
- ommlds/backends/groq/clients.py +9 -0
- ommlds/backends/groq/protocol.py +247 -0
- ommlds/{huggingface.py → backends/huggingface/cache.py} +1 -6
- ommlds/backends/huggingface/cli.py +208 -0
- ommlds/backends/llamacpp/logging.py +4 -1
- ommlds/backends/mlx/caching.py +7 -3
- ommlds/backends/mlx/cli.py +10 -7
- ommlds/backends/mlx/generation.py +18 -16
- ommlds/backends/mlx/limits.py +10 -6
- ommlds/backends/mlx/loading.py +7 -4
- ommlds/backends/ollama/__init__.py +7 -0
- ommlds/backends/ollama/_dataclasses.py +3940 -0
- ommlds/backends/ollama/cli.py +36 -0
- ommlds/backends/ollama/protocol.py +201 -0
- ommlds/backends/openai/protocol/__init__.py +15 -1
- ommlds/backends/openai/protocol/_common.py +3 -5
- ommlds/backends/openai/protocol/_dataclasses.py +7708 -0
- ommlds/backends/tavily/__init__.py +7 -0
- ommlds/backends/tavily/_dataclasses.py +1734 -0
- ommlds/backends/tavily/protocol.py +299 -0
- ommlds/backends/tinygrad/models/llama3/__init__.py +22 -14
- ommlds/backends/torch/backends.py +1 -1
- ommlds/backends/transformers/__init__.py +14 -0
- ommlds/backends/transformers/filecache.py +109 -0
- ommlds/backends/transformers/streamers.py +73 -0
- ommlds/cli/__init__.py +7 -0
- ommlds/cli/_dataclasses.py +3835 -0
- ommlds/cli/asyncs.py +30 -0
- ommlds/cli/backends/catalog.py +88 -0
- ommlds/cli/backends/configs.py +9 -0
- ommlds/cli/backends/inject.py +100 -42
- ommlds/cli/{sessions/chat/backends → backends}/injection.py +1 -1
- ommlds/cli/backends/meta.py +82 -0
- ommlds/cli/{sessions/chat/backends → backends}/types.py +11 -1
- ommlds/cli/{sessions/chat/content → content}/messages.py +2 -2
- ommlds/cli/{sessions/chat/content → content}/strings.py +1 -1
- ommlds/cli/inject.py +17 -8
- ommlds/cli/inputs/asyncs.py +32 -0
- ommlds/cli/inputs/sync.py +75 -0
- ommlds/cli/main.py +346 -114
- ommlds/cli/rendering/configs.py +9 -0
- ommlds/cli/{sessions/chat/rendering → rendering}/inject.py +4 -5
- ommlds/cli/{sessions/chat/rendering → rendering}/markdown.py +1 -1
- ommlds/cli/{sessions/chat/rendering → rendering}/raw.py +1 -1
- ommlds/cli/{sessions/chat/rendering → rendering}/types.py +7 -1
- ommlds/cli/secrets.py +22 -0
- ommlds/cli/sessions/base.py +1 -10
- ommlds/cli/sessions/chat/configs.py +13 -30
- ommlds/cli/sessions/chat/drivers/ai/configs.py +13 -0
- ommlds/cli/sessions/chat/drivers/ai/events.py +57 -0
- ommlds/cli/sessions/chat/{chat → drivers}/ai/inject.py +15 -12
- ommlds/cli/sessions/chat/{chat → drivers}/ai/rendering.py +8 -8
- ommlds/cli/sessions/chat/{chat → drivers}/ai/services.py +5 -5
- ommlds/cli/sessions/chat/{chat → drivers}/ai/tools.py +4 -8
- ommlds/cli/sessions/chat/{chat → drivers}/ai/types.py +10 -1
- ommlds/cli/sessions/chat/drivers/configs.py +25 -0
- ommlds/cli/sessions/chat/drivers/events/inject.py +27 -0
- ommlds/cli/sessions/chat/drivers/events/injection.py +14 -0
- ommlds/cli/sessions/chat/drivers/events/manager.py +16 -0
- ommlds/cli/sessions/chat/drivers/events/types.py +44 -0
- ommlds/cli/sessions/chat/drivers/impl.py +50 -0
- ommlds/cli/sessions/chat/drivers/inject.py +70 -0
- ommlds/cli/sessions/chat/drivers/state/configs.py +13 -0
- ommlds/cli/sessions/chat/drivers/state/ids.py +25 -0
- ommlds/cli/sessions/chat/drivers/state/inject.py +83 -0
- ommlds/cli/sessions/chat/{chat → drivers}/state/inmemory.py +1 -6
- ommlds/cli/sessions/chat/{chat → drivers}/state/storage.py +18 -12
- ommlds/cli/sessions/chat/{chat → drivers}/state/types.py +11 -6
- ommlds/cli/sessions/chat/drivers/tools/configs.py +22 -0
- ommlds/cli/sessions/chat/drivers/tools/confirmation.py +44 -0
- ommlds/cli/sessions/chat/drivers/tools/errorhandling.py +39 -0
- ommlds/cli/sessions/chat/{tools → drivers/tools}/execution.py +3 -4
- ommlds/cli/sessions/chat/drivers/tools/fs/__init__.py +0 -0
- ommlds/cli/sessions/chat/drivers/tools/fs/configs.py +12 -0
- ommlds/cli/sessions/chat/drivers/tools/fs/inject.py +35 -0
- ommlds/cli/sessions/chat/drivers/tools/inject.py +83 -0
- ommlds/cli/sessions/chat/{tools → drivers/tools}/injection.py +20 -5
- ommlds/cli/sessions/chat/{tools → drivers/tools}/rendering.py +3 -3
- ommlds/cli/sessions/chat/drivers/tools/todo/__init__.py +0 -0
- ommlds/cli/sessions/chat/drivers/tools/todo/configs.py +12 -0
- ommlds/cli/sessions/chat/drivers/tools/todo/inject.py +31 -0
- ommlds/cli/sessions/chat/drivers/tools/weather/__init__.py +0 -0
- ommlds/cli/sessions/chat/drivers/tools/weather/configs.py +12 -0
- ommlds/cli/sessions/chat/drivers/tools/weather/inject.py +22 -0
- ommlds/cli/sessions/chat/{tools/weather.py → drivers/tools/weather/tools.py} +1 -1
- ommlds/cli/sessions/chat/drivers/types.py +31 -0
- ommlds/cli/sessions/chat/drivers/user/__init__.py +0 -0
- ommlds/cli/sessions/chat/drivers/user/configs.py +14 -0
- ommlds/cli/sessions/chat/drivers/user/inject.py +41 -0
- ommlds/cli/sessions/chat/facades/__init__.py +0 -0
- ommlds/cli/sessions/chat/facades/commands/__init__.py +0 -0
- ommlds/cli/sessions/chat/facades/commands/base.py +83 -0
- ommlds/cli/sessions/chat/facades/commands/configs.py +9 -0
- ommlds/cli/sessions/chat/facades/commands/inject.py +41 -0
- ommlds/cli/sessions/chat/facades/commands/injection.py +15 -0
- ommlds/cli/sessions/chat/facades/commands/manager.py +59 -0
- ommlds/cli/sessions/chat/facades/commands/simple.py +34 -0
- ommlds/cli/sessions/chat/facades/commands/types.py +13 -0
- ommlds/cli/sessions/chat/facades/configs.py +11 -0
- ommlds/cli/sessions/chat/facades/facade.py +26 -0
- ommlds/cli/sessions/chat/facades/inject.py +35 -0
- ommlds/cli/sessions/chat/facades/ui.py +34 -0
- ommlds/cli/sessions/chat/inject.py +10 -49
- ommlds/cli/sessions/chat/interfaces/__init__.py +0 -0
- ommlds/cli/sessions/chat/interfaces/bare/__init__.py +0 -0
- ommlds/cli/sessions/chat/interfaces/bare/configs.py +15 -0
- ommlds/cli/sessions/chat/interfaces/bare/inject.py +69 -0
- ommlds/cli/sessions/chat/interfaces/bare/interactive.py +49 -0
- ommlds/cli/sessions/chat/interfaces/bare/oneshot.py +21 -0
- ommlds/cli/sessions/chat/{tools/confirmation.py → interfaces/bare/tools.py} +3 -22
- ommlds/cli/sessions/chat/interfaces/base.py +13 -0
- ommlds/cli/sessions/chat/interfaces/configs.py +11 -0
- ommlds/cli/sessions/chat/interfaces/inject.py +29 -0
- ommlds/cli/sessions/chat/interfaces/textual/__init__.py +0 -0
- ommlds/cli/sessions/chat/interfaces/textual/app.py +429 -0
- ommlds/cli/sessions/chat/interfaces/textual/configs.py +11 -0
- ommlds/cli/sessions/chat/interfaces/textual/facades.py +19 -0
- ommlds/cli/sessions/chat/interfaces/textual/inject.py +111 -0
- ommlds/cli/sessions/chat/interfaces/textual/inputhistory.py +174 -0
- ommlds/cli/sessions/chat/interfaces/textual/interface.py +24 -0
- ommlds/cli/sessions/chat/interfaces/textual/styles/__init__.py +29 -0
- ommlds/cli/sessions/chat/interfaces/textual/styles/input.tcss +53 -0
- ommlds/cli/sessions/chat/interfaces/textual/styles/markdown.tcss +7 -0
- ommlds/cli/sessions/chat/interfaces/textual/styles/messages.tcss +167 -0
- ommlds/cli/sessions/chat/interfaces/textual/tools.py +38 -0
- ommlds/cli/sessions/chat/interfaces/textual/widgets/__init__.py +0 -0
- ommlds/cli/sessions/chat/interfaces/textual/widgets/input.py +70 -0
- ommlds/cli/sessions/chat/interfaces/textual/widgets/messages.py +207 -0
- ommlds/cli/sessions/chat/session.py +8 -13
- ommlds/cli/sessions/completion/configs.py +5 -6
- ommlds/cli/sessions/completion/inject.py +15 -2
- ommlds/cli/sessions/completion/session.py +10 -18
- ommlds/cli/sessions/configs.py +10 -0
- ommlds/cli/sessions/embedding/configs.py +5 -6
- ommlds/cli/sessions/embedding/inject.py +15 -2
- ommlds/cli/sessions/embedding/session.py +10 -18
- ommlds/cli/sessions/inject.py +15 -15
- ommlds/cli/state/storage.py +8 -2
- ommlds/minichain/__init__.py +217 -60
- ommlds/minichain/_dataclasses.py +20640 -0
- ommlds/minichain/_typedvalues.py +15 -8
- ommlds/minichain/backends/catalogs/base.py +20 -1
- ommlds/minichain/backends/catalogs/simple.py +2 -2
- ommlds/minichain/backends/catalogs/strings.py +13 -10
- ommlds/minichain/backends/impls/anthropic/chat.py +28 -5
- ommlds/minichain/backends/impls/anthropic/names.py +3 -3
- ommlds/minichain/backends/impls/anthropic/protocol.py +2 -2
- ommlds/minichain/backends/impls/anthropic/stream.py +23 -18
- ommlds/minichain/backends/impls/cerebras/__init__.py +0 -0
- ommlds/minichain/backends/impls/cerebras/chat.py +82 -0
- ommlds/minichain/backends/impls/cerebras/names.py +45 -0
- ommlds/minichain/backends/impls/cerebras/protocol.py +143 -0
- ommlds/minichain/backends/impls/cerebras/stream.py +114 -0
- ommlds/minichain/backends/impls/duckduckgo/search.py +5 -1
- ommlds/minichain/backends/impls/dummy/__init__.py +0 -0
- ommlds/minichain/backends/impls/dummy/chat.py +69 -0
- ommlds/minichain/backends/impls/google/chat.py +20 -84
- ommlds/minichain/backends/impls/google/names.py +6 -0
- ommlds/minichain/backends/impls/google/protocol.py +105 -0
- ommlds/minichain/backends/impls/google/search.py +10 -5
- ommlds/minichain/backends/impls/google/stream.py +64 -142
- ommlds/minichain/backends/impls/google/tools.py +2 -2
- ommlds/minichain/backends/impls/groq/__init__.py +0 -0
- ommlds/minichain/backends/impls/groq/chat.py +77 -0
- ommlds/minichain/backends/impls/groq/names.py +48 -0
- ommlds/minichain/backends/impls/groq/protocol.py +143 -0
- ommlds/minichain/backends/impls/groq/stream.py +114 -0
- ommlds/minichain/backends/impls/huggingface/repos.py +1 -5
- ommlds/minichain/backends/impls/llamacpp/chat.py +15 -3
- ommlds/minichain/backends/impls/llamacpp/completion.py +7 -3
- ommlds/minichain/backends/impls/llamacpp/stream.py +38 -19
- ommlds/minichain/backends/impls/mistral.py +9 -2
- ommlds/minichain/backends/impls/mlx/chat.py +100 -23
- ommlds/minichain/backends/impls/ollama/__init__.py +0 -0
- ommlds/minichain/backends/impls/ollama/chat.py +193 -0
- ommlds/minichain/backends/impls/ollama/protocol.py +144 -0
- ommlds/minichain/backends/impls/openai/chat.py +14 -7
- ommlds/minichain/backends/impls/openai/completion.py +9 -2
- ommlds/minichain/backends/impls/openai/embedding.py +9 -2
- ommlds/minichain/backends/impls/openai/format.py +117 -115
- ommlds/minichain/backends/impls/openai/names.py +33 -5
- ommlds/minichain/backends/impls/openai/stream.py +61 -70
- ommlds/minichain/backends/impls/sentencepiece/tokens.py +9 -6
- ommlds/minichain/backends/impls/tavily.py +66 -0
- ommlds/minichain/backends/impls/tinygrad/chat.py +17 -14
- ommlds/minichain/backends/impls/tokenizers/tokens.py +9 -6
- ommlds/minichain/backends/impls/transformers/sentence.py +5 -2
- ommlds/minichain/backends/impls/transformers/tokens.py +9 -6
- ommlds/minichain/backends/impls/transformers/transformers.py +139 -20
- ommlds/minichain/backends/strings/parsing.py +2 -2
- ommlds/minichain/backends/strings/resolving.py +7 -2
- ommlds/minichain/chat/choices/stream/__init__.py +0 -0
- ommlds/minichain/chat/{stream → choices/stream}/adapters.py +7 -7
- ommlds/minichain/chat/choices/stream/joining.py +31 -0
- ommlds/minichain/chat/choices/stream/services.py +45 -0
- ommlds/minichain/chat/choices/stream/types.py +43 -0
- ommlds/minichain/chat/content.py +42 -0
- ommlds/minichain/chat/messages.py +46 -42
- ommlds/minichain/chat/stream/_marshal.py +4 -4
- ommlds/minichain/chat/stream/joining.py +56 -43
- ommlds/minichain/chat/stream/services.py +15 -15
- ommlds/minichain/chat/stream/types.py +17 -24
- ommlds/minichain/chat/templating.py +3 -3
- ommlds/minichain/content/__init__.py +20 -3
- ommlds/minichain/content/_marshal.py +181 -55
- ommlds/minichain/content/code.py +26 -0
- ommlds/minichain/content/composite.py +28 -0
- ommlds/minichain/content/content.py +27 -0
- ommlds/minichain/content/dynamic.py +12 -0
- ommlds/minichain/content/emphasis.py +27 -0
- ommlds/minichain/content/images.py +2 -2
- ommlds/minichain/content/json.py +2 -2
- ommlds/minichain/content/link.py +13 -0
- ommlds/minichain/content/markdown.py +12 -0
- ommlds/minichain/content/metadata.py +10 -0
- ommlds/minichain/content/namespaces.py +8 -0
- ommlds/minichain/content/placeholders.py +10 -9
- ommlds/minichain/content/quote.py +26 -0
- ommlds/minichain/content/raw.py +49 -0
- ommlds/minichain/content/recursive.py +12 -0
- ommlds/minichain/content/resources.py +22 -0
- ommlds/minichain/content/section.py +26 -0
- ommlds/minichain/content/sequence.py +17 -3
- ommlds/minichain/content/standard.py +32 -0
- ommlds/minichain/content/tag.py +28 -0
- ommlds/minichain/content/templates.py +13 -0
- ommlds/minichain/content/text.py +2 -2
- ommlds/minichain/content/transform/__init__.py +0 -0
- ommlds/minichain/content/transform/json.py +55 -0
- ommlds/minichain/content/transform/markdown.py +8 -0
- ommlds/minichain/content/transform/materialize.py +59 -0
- ommlds/minichain/content/transform/metadata.py +16 -0
- ommlds/minichain/content/transform/namespaces.py +20 -0
- ommlds/minichain/content/transform/placeholders.py +60 -0
- ommlds/minichain/content/{prepare.py → transform/prepare.py} +10 -15
- ommlds/minichain/content/transform/recursive.py +54 -0
- ommlds/minichain/content/transform/resources.py +58 -0
- ommlds/minichain/content/transform/standard.py +43 -0
- ommlds/minichain/content/{transforms → transform}/stringify.py +1 -7
- ommlds/minichain/content/transform/strings.py +33 -0
- ommlds/minichain/content/transform/templates.py +25 -0
- ommlds/minichain/content/transform/types.py +18 -0
- ommlds/minichain/content/transform/visitors.py +38 -0
- ommlds/minichain/content/visitors.py +218 -0
- ommlds/minichain/http/__init__.py +0 -0
- ommlds/minichain/http/stream.py +195 -0
- ommlds/minichain/lib/fs/tools/read.py +1 -1
- ommlds/minichain/lib/fs/tools/recursivels/rendering.py +1 -1
- ommlds/minichain/lib/fs/tools/recursivels/running.py +1 -1
- ommlds/minichain/lib/todo/tools/write.py +2 -1
- ommlds/minichain/lib/todo/types.py +1 -1
- ommlds/minichain/llms/types.py +4 -0
- ommlds/minichain/metadata.py +56 -2
- ommlds/minichain/models/configs.py +2 -2
- ommlds/minichain/models/names.py +2 -0
- ommlds/minichain/registries/globals.py +18 -4
- ommlds/minichain/resources.py +49 -3
- ommlds/minichain/search.py +1 -1
- ommlds/minichain/services/README.md +154 -0
- ommlds/minichain/services/__init__.py +6 -2
- ommlds/minichain/services/_marshal.py +46 -10
- ommlds/minichain/services/_origclasses.py +11 -0
- ommlds/minichain/services/_typedvalues.py +8 -3
- ommlds/minichain/services/requests.py +73 -3
- ommlds/minichain/services/responses.py +73 -3
- ommlds/minichain/services/services.py +9 -0
- ommlds/minichain/standard.py +8 -0
- ommlds/minichain/stream/services.py +43 -17
- ommlds/minichain/text/applypatch.py +2 -1
- ommlds/minichain/text/toolparsing/llamacpp/types.py +1 -1
- ommlds/minichain/tokens/specials.py +1 -1
- ommlds/minichain/tools/execution/catalog.py +2 -2
- ommlds/minichain/tools/execution/errorhandling.py +36 -0
- ommlds/minichain/tools/execution/errors.py +2 -2
- ommlds/minichain/tools/execution/executors.py +1 -1
- ommlds/minichain/tools/fns.py +1 -1
- ommlds/minichain/tools/jsonschema.py +2 -2
- ommlds/minichain/tools/reflect.py +11 -7
- ommlds/minichain/tools/types.py +16 -19
- ommlds/minichain/vectors/_marshal.py +1 -1
- ommlds/minichain/vectors/embeddings.py +1 -1
- ommlds/minichain/wrappers/__init__.py +7 -0
- ommlds/minichain/wrappers/firstinwins.py +144 -0
- ommlds/minichain/wrappers/instrument.py +146 -0
- ommlds/minichain/wrappers/retry.py +168 -0
- ommlds/minichain/wrappers/services.py +98 -0
- ommlds/minichain/wrappers/stream.py +57 -0
- ommlds/nanochat/LICENSE +21 -0
- ommlds/nanochat/__init__.py +0 -0
- ommlds/nanochat/rustbpe/LICENSE +21 -0
- ommlds/nanochat/rustbpe/README.md +10 -0
- ommlds/nanochat/tokenizers.py +440 -0
- ommlds/specs/__init__.py +0 -0
- ommlds/specs/mcp/__init__.py +0 -0
- ommlds/specs/mcp/_marshal.py +23 -0
- ommlds/specs/mcp/clients.py +146 -0
- ommlds/specs/mcp/protocol.py +369 -0
- ommlds/tools/git.py +84 -64
- ommlds/tools/ocr.py +1 -1
- ommlds/wiki/analyze.py +2 -2
- ommlds/wiki/models.py +4 -4
- ommlds/wiki/text/mfh.py +9 -9
- ommlds/wiki/utils/xml.py +5 -5
- {ommlds-0.0.0.dev466.dist-info → ommlds-0.0.0.dev512.dist-info}/METADATA +28 -21
- ommlds-0.0.0.dev512.dist-info/RECORD +534 -0
- {ommlds-0.0.0.dev466.dist-info → ommlds-0.0.0.dev512.dist-info}/WHEEL +1 -1
- ommlds/cli/backends/standard.py +0 -20
- ommlds/cli/sessions/chat/backends/catalog.py +0 -56
- ommlds/cli/sessions/chat/backends/inject.py +0 -37
- ommlds/cli/sessions/chat/chat/state/inject.py +0 -40
- ommlds/cli/sessions/chat/chat/user/inject.py +0 -61
- ommlds/cli/sessions/chat/chat/user/interactive.py +0 -29
- ommlds/cli/sessions/chat/chat/user/oneshot.py +0 -25
- ommlds/cli/sessions/chat/chat/user/types.py +0 -15
- ommlds/cli/sessions/chat/driver.py +0 -43
- ommlds/cli/sessions/chat/tools/inject.py +0 -145
- ommlds/minichain/backends/impls/openai/format2.py +0 -210
- ommlds/minichain/content/materialize.py +0 -196
- ommlds/minichain/content/simple.py +0 -47
- ommlds/minichain/content/transforms/base.py +0 -46
- ommlds/minichain/content/transforms/interleave.py +0 -70
- ommlds/minichain/content/transforms/squeeze.py +0 -72
- ommlds/minichain/content/transforms/strings.py +0 -24
- ommlds/minichain/content/types.py +0 -43
- ommlds/minichain/stream/wrap.py +0 -62
- ommlds-0.0.0.dev466.dist-info/RECORD +0 -376
- /ommlds/{cli/sessions/chat/backends → backends/huggingface}/__init__.py +0 -0
- /ommlds/cli/{sessions/chat/chat → content}/__init__.py +0 -0
- /ommlds/cli/{sessions/chat/chat/ai → inputs}/__init__.py +0 -0
- /ommlds/cli/{sessions/chat/chat/state → rendering}/__init__.py +0 -0
- /ommlds/cli/sessions/chat/{chat/user → drivers}/__init__.py +0 -0
- /ommlds/cli/sessions/chat/{content → drivers/ai}/__init__.py +0 -0
- /ommlds/cli/sessions/chat/{chat → drivers}/ai/injection.py +0 -0
- /ommlds/cli/sessions/chat/{phases → drivers/events}/__init__.py +0 -0
- /ommlds/cli/sessions/chat/{rendering → drivers/phases}/__init__.py +0 -0
- /ommlds/cli/sessions/chat/{phases → drivers/phases}/inject.py +0 -0
- /ommlds/cli/sessions/chat/{phases → drivers/phases}/injection.py +0 -0
- /ommlds/cli/sessions/chat/{phases → drivers/phases}/manager.py +0 -0
- /ommlds/cli/sessions/chat/{phases → drivers/phases}/types.py +0 -0
- /ommlds/cli/sessions/chat/{tools → drivers/state}/__init__.py +0 -0
- /ommlds/{minichain/content/transforms → cli/sessions/chat/drivers/tools}/__init__.py +0 -0
- {ommlds-0.0.0.dev466.dist-info → ommlds-0.0.0.dev512.dist-info}/entry_points.txt +0 -0
- {ommlds-0.0.0.dev466.dist-info → ommlds-0.0.0.dev512.dist-info}/licenses/LICENSE +0 -0
- {ommlds-0.0.0.dev466.dist-info → ommlds-0.0.0.dev512.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,440 @@
|
|
|
1
|
+
# https://github.com/karpathy/nanochat/tree/9467d83cf23dcc9a9b4ca6e35103142f48a55b27
|
|
2
|
+
"""
|
|
3
|
+
BPE Tokenizer in the style of GPT-4.
|
|
4
|
+
|
|
5
|
+
Two implementations are available:
|
|
6
|
+
1) HuggingFace Tokenizer that can do both training and inference but is really confusing
|
|
7
|
+
2) Our own RustBPE Tokenizer for training and tiktoken for efficient inference
|
|
8
|
+
"""
|
|
9
|
+
import copy
|
|
10
|
+
import os
|
|
11
|
+
import pickle
|
|
12
|
+
import typing as ta
|
|
13
|
+
|
|
14
|
+
from omlish import check
|
|
15
|
+
from omlish import collections as col
|
|
16
|
+
from omlish import lang
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
with lang.auto_proxy_import(globals()):
|
|
20
|
+
import tiktoken
|
|
21
|
+
import tokenizers.decoders
|
|
22
|
+
import tokenizers.models
|
|
23
|
+
import tokenizers.pre_tokenizers
|
|
24
|
+
import tokenizers.trainers
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
rustbpe: ta.Any = lang.proxy_import('.rustbpe', __package__)
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
##
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
SPECIAL_TOKENS: ta.Sequence[str] = [
|
|
34
|
+
# every document begins with the Beginning of Sequence (BOS) token that delimits documents
|
|
35
|
+
'<|bos|>',
|
|
36
|
+
# tokens below are only used during finetuning to render Conversations into token ids
|
|
37
|
+
'<|user_start|>', # user messages
|
|
38
|
+
'<|user_end|>',
|
|
39
|
+
'<|assistant_start|>', # assistant messages
|
|
40
|
+
'<|assistant_end|>',
|
|
41
|
+
'<|python_start|>', # assistant invokes python REPL tool
|
|
42
|
+
'<|python_end|>',
|
|
43
|
+
'<|output_start|>', # python REPL outputs back to assistant
|
|
44
|
+
'<|output_end|>',
|
|
45
|
+
]
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
# NOTE: this split pattern deviates from GPT-4 in that we use \p{N}{1,2} instead of \p{N}{1,3}
|
|
49
|
+
# I did this because I didn't want to "waste" too many tokens on numbers for smaller vocab sizes.
|
|
50
|
+
# I haven't validated that this is actually a good idea, TODO.
|
|
51
|
+
SPLIT_PATTERN = (
|
|
52
|
+
r"'(?i:[sdmt]|ll|ve|re)|"
|
|
53
|
+
r"[^\r\n\p{L}\p{N}]?+\p{L}+|"
|
|
54
|
+
r"\p{N}{1,2}|"
|
|
55
|
+
r" ?[^\s\p{L}\p{N}]++[\r\n]*|"
|
|
56
|
+
r"\s*[\r\n]|"
|
|
57
|
+
r"\s+(?!\S)|"
|
|
58
|
+
r"\s+"
|
|
59
|
+
)
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
##
|
|
63
|
+
# Generic GPT-4-style tokenizer based on HuggingFace Tokenizer
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
class HuggingFaceTokenizer:
|
|
67
|
+
"""Light wrapper around HuggingFace Tokenizer for some utilities"""
|
|
68
|
+
|
|
69
|
+
def __init__(self, tokenizer):
|
|
70
|
+
self.tokenizer = tokenizer
|
|
71
|
+
|
|
72
|
+
@classmethod
|
|
73
|
+
def from_pretrained(cls, hf_path):
|
|
74
|
+
# init from a HuggingFace pretrained tokenizer (e.g. "gpt2")
|
|
75
|
+
tokenizer = tokenizers.Tokenizer.from_pretrained(hf_path)
|
|
76
|
+
return cls(tokenizer)
|
|
77
|
+
|
|
78
|
+
@classmethod
|
|
79
|
+
def from_directory(cls, tokenizer_dir):
|
|
80
|
+
# init from a local directory on disk (e.g. "out/tokenizer")
|
|
81
|
+
tokenizer_path = os.path.join(tokenizer_dir, 'tokenizer.json')
|
|
82
|
+
tokenizer = tokenizers.Tokenizer.from_file(tokenizer_path)
|
|
83
|
+
return cls(tokenizer)
|
|
84
|
+
|
|
85
|
+
@classmethod
|
|
86
|
+
def train_from_iterator(
|
|
87
|
+
cls,
|
|
88
|
+
text_iterator,
|
|
89
|
+
vocab_size,
|
|
90
|
+
*,
|
|
91
|
+
split_pattern=SPLIT_PATTERN,
|
|
92
|
+
special_tokens=SPECIAL_TOKENS,
|
|
93
|
+
):
|
|
94
|
+
# train from an iterator of text
|
|
95
|
+
# Configure the HuggingFace Tokenizer
|
|
96
|
+
tokenizer = tokenizers.Tokenizer(tokenizers.models.BPE(
|
|
97
|
+
byte_fallback=True, # needed!
|
|
98
|
+
unk_token=None,
|
|
99
|
+
fuse_unk=False,
|
|
100
|
+
))
|
|
101
|
+
|
|
102
|
+
# Normalizer: None
|
|
103
|
+
tokenizer.normalizer = None
|
|
104
|
+
|
|
105
|
+
# Pre-tokenizer: GPT-4 style
|
|
106
|
+
# the regex pattern used by GPT-4 to split text into groups before BPE
|
|
107
|
+
# NOTE: The pattern was changed from \p{N}{1,3} to \p{N}{1,2} because I suspect it is harmful to
|
|
108
|
+
# very small models and smaller vocab sizes, because it is a little bit wasteful in the token space.
|
|
109
|
+
# (but I haven't validated this! TODO)
|
|
110
|
+
gpt4_split_regex = tokenizers.Regex(split_pattern) # huggingface demands that you wrap it in Regex!!
|
|
111
|
+
|
|
112
|
+
tokenizer.pre_tokenizer = tokenizers.pre_tokenizers.Sequence([
|
|
113
|
+
tokenizers.pre_tokenizers.Split(pattern=gpt4_split_regex, behavior='isolated', invert=False),
|
|
114
|
+
tokenizers.pre_tokenizers.ByteLevel(add_prefix_space=False, use_regex=False),
|
|
115
|
+
])
|
|
116
|
+
|
|
117
|
+
# Decoder: ByteLevel (it pairs together with the ByteLevel pre-tokenizer)
|
|
118
|
+
tokenizer.decoder = tokenizers.decoders.ByteLevel()
|
|
119
|
+
|
|
120
|
+
# Post-processor: None
|
|
121
|
+
tokenizer.post_processor = None
|
|
122
|
+
|
|
123
|
+
# Trainer: BPE
|
|
124
|
+
trainer = tokenizers.trainers.BpeTrainer(
|
|
125
|
+
vocab_size=vocab_size,
|
|
126
|
+
show_progress=True,
|
|
127
|
+
min_frequency=0, # no minimum frequency
|
|
128
|
+
initial_alphabet=tokenizers.pre_tokenizers.ByteLevel.alphabet(),
|
|
129
|
+
special_tokens=special_tokens,
|
|
130
|
+
)
|
|
131
|
+
|
|
132
|
+
# Kick off the training
|
|
133
|
+
tokenizer.train_from_iterator(text_iterator, trainer)
|
|
134
|
+
|
|
135
|
+
return cls(tokenizer)
|
|
136
|
+
|
|
137
|
+
def encode_ordinary(self, text):
|
|
138
|
+
ids = self.tokenizer.encode(text, add_special_tokens=False).ids
|
|
139
|
+
return ids
|
|
140
|
+
|
|
141
|
+
def get_vocab_size(self):
|
|
142
|
+
return self.tokenizer.get_vocab_size()
|
|
143
|
+
|
|
144
|
+
def get_special_tokens(self):
|
|
145
|
+
special_tokens_map = self.tokenizer.get_added_tokens_decoder()
|
|
146
|
+
special_tokens = [w.content for w in special_tokens_map.values()]
|
|
147
|
+
return special_tokens
|
|
148
|
+
|
|
149
|
+
def id_to_token(self, id): # noqa
|
|
150
|
+
return self.tokenizer.id_to_token(id)
|
|
151
|
+
|
|
152
|
+
def _encode_one(self, text, prepend=None, append=None):
|
|
153
|
+
# encode a single string
|
|
154
|
+
# prepend/append can be either a string of a special token or a token id directly.
|
|
155
|
+
check.isinstance(text, str)
|
|
156
|
+
ids = []
|
|
157
|
+
if prepend is not None:
|
|
158
|
+
prepend_id = prepend if isinstance(prepend, int) else self.encode_special(prepend)
|
|
159
|
+
ids.append(prepend_id)
|
|
160
|
+
ids.extend(self.tokenizer.encode(text, add_special_tokens=False).ids)
|
|
161
|
+
if append is not None:
|
|
162
|
+
append_id = append if isinstance(append, int) else self.encode_special(append)
|
|
163
|
+
ids.append(append_id)
|
|
164
|
+
return ids
|
|
165
|
+
|
|
166
|
+
def encode_special(self, text):
|
|
167
|
+
# encode a single special token via exact match
|
|
168
|
+
return self.tokenizer.token_to_id(text)
|
|
169
|
+
|
|
170
|
+
def get_bos_token_id(self):
|
|
171
|
+
bos = self.encode_special('<|bos|>')
|
|
172
|
+
return bos
|
|
173
|
+
|
|
174
|
+
def encode(self, text, *args, **kwargs):
|
|
175
|
+
if isinstance(text, str):
|
|
176
|
+
return self._encode_one(text, *args, **kwargs)
|
|
177
|
+
elif isinstance(text, list):
|
|
178
|
+
return [self._encode_one(t, *args, **kwargs) for t in text]
|
|
179
|
+
else:
|
|
180
|
+
raise ValueError(f'Invalid input type: {type(text)}') # noqa
|
|
181
|
+
|
|
182
|
+
def __call__(self, *args, **kwargs):
|
|
183
|
+
return self.encode(*args, **kwargs)
|
|
184
|
+
|
|
185
|
+
def decode(self, ids):
|
|
186
|
+
return self.tokenizer.decode(ids, skip_special_tokens=False)
|
|
187
|
+
|
|
188
|
+
def save(self, tokenizer_dir):
|
|
189
|
+
# save the tokenizer to disk
|
|
190
|
+
os.makedirs(tokenizer_dir, exist_ok=True)
|
|
191
|
+
tokenizer_path = os.path.join(tokenizer_dir, 'tokenizer.json')
|
|
192
|
+
self.tokenizer.save(tokenizer_path)
|
|
193
|
+
print(f'Saved tokenizer to {tokenizer_path}')
|
|
194
|
+
|
|
195
|
+
|
|
196
|
+
##
|
|
197
|
+
# Tokenizer based on rustbpe + tiktoken combo
|
|
198
|
+
|
|
199
|
+
|
|
200
|
+
class RustBPETokenizer:
|
|
201
|
+
"""Light wrapper around tiktoken (for efficient inference) but train with rustbpe"""
|
|
202
|
+
|
|
203
|
+
def __init__(self, enc, bos_token):
|
|
204
|
+
self.enc = enc
|
|
205
|
+
self.bos_token_id = self.encode_special(bos_token)
|
|
206
|
+
|
|
207
|
+
@classmethod
|
|
208
|
+
def train_from_iterator(cls, text_iterator, vocab_size):
|
|
209
|
+
# 1) train using rustbpe
|
|
210
|
+
tokenizer = rustbpe.Tokenizer()
|
|
211
|
+
# the special tokens are inserted later in __init__, we don't train them here
|
|
212
|
+
vocab_size_no_special = vocab_size - len(SPECIAL_TOKENS)
|
|
213
|
+
check.state(vocab_size_no_special >= 256, f'vocab_size_no_special must be at least 256, got {vocab_size_no_special}') # noqa
|
|
214
|
+
tokenizer.train_from_iterator(text_iterator, vocab_size_no_special, pattern=SPLIT_PATTERN)
|
|
215
|
+
# 2) construct the associated tiktoken encoding for inference
|
|
216
|
+
pattern = tokenizer.get_pattern()
|
|
217
|
+
mergeable_ranks_list = tokenizer.get_mergeable_ranks()
|
|
218
|
+
mergeable_ranks = {bytes(k): v for k, v in mergeable_ranks_list}
|
|
219
|
+
tokens_offset = len(mergeable_ranks)
|
|
220
|
+
special_tokens = {name: tokens_offset + i for i, name in enumerate(SPECIAL_TOKENS)}
|
|
221
|
+
enc = tiktoken.Encoding(
|
|
222
|
+
name='rustbpe',
|
|
223
|
+
pat_str=pattern,
|
|
224
|
+
mergeable_ranks=mergeable_ranks, # dict[bytes, int] (token bytes -> merge priority rank)
|
|
225
|
+
special_tokens=special_tokens, # dict[str, int] (special token name -> token id)
|
|
226
|
+
)
|
|
227
|
+
return cls(enc, '<|bos|>')
|
|
228
|
+
|
|
229
|
+
@classmethod
|
|
230
|
+
def from_directory(cls, tokenizer_dir):
|
|
231
|
+
pickle_path = os.path.join(tokenizer_dir, 'tokenizer.pkl')
|
|
232
|
+
with open(pickle_path, 'rb') as f:
|
|
233
|
+
enc = pickle.load(f) # noqa
|
|
234
|
+
return cls(enc, '<|bos|>')
|
|
235
|
+
|
|
236
|
+
@classmethod
|
|
237
|
+
def from_pretrained(cls, tiktoken_name):
|
|
238
|
+
# https://github.com/openai/tiktoken/blob/eedc8563/tiktoken_ext/openai_public.py
|
|
239
|
+
enc = tiktoken.get_encoding(tiktoken_name)
|
|
240
|
+
# tiktoken calls the special document delimiter token "<|endoftext|>"
|
|
241
|
+
# yes this is confusing because this token is almost always PREPENDED to the beginning of the document
|
|
242
|
+
# it most often is used to signal the start of a new sequence to the LLM during inference etc.
|
|
243
|
+
# so in nanoChat we always use "<|bos|>" short for "beginning of sequence", but historically it is often called
|
|
244
|
+
# "<|endoftext|>".
|
|
245
|
+
return cls(enc, '<|endoftext|>')
|
|
246
|
+
|
|
247
|
+
def get_vocab_size(self):
|
|
248
|
+
return self.enc.n_vocab
|
|
249
|
+
|
|
250
|
+
def get_special_tokens(self):
|
|
251
|
+
return self.enc.special_tokens_set
|
|
252
|
+
|
|
253
|
+
def id_to_token(self, id): # noqa
|
|
254
|
+
return self.enc.decode([id])
|
|
255
|
+
|
|
256
|
+
@col.cache.cache(max_size=32)
|
|
257
|
+
def encode_special(self, text):
|
|
258
|
+
return self.enc.encode_single_token(text)
|
|
259
|
+
|
|
260
|
+
def get_bos_token_id(self):
|
|
261
|
+
return self.bos_token_id
|
|
262
|
+
|
|
263
|
+
def encode(self, text, prepend=None, append=None, num_threads=8):
|
|
264
|
+
# text can be either a string or a list of strings
|
|
265
|
+
|
|
266
|
+
if prepend is not None:
|
|
267
|
+
prepend_id = prepend if isinstance(prepend, int) else self.encode_special(prepend)
|
|
268
|
+
if append is not None:
|
|
269
|
+
append_id = append if isinstance(append, int) else self.encode_special(append)
|
|
270
|
+
|
|
271
|
+
if isinstance(text, str):
|
|
272
|
+
ids = self.enc.encode_ordinary(text)
|
|
273
|
+
if prepend is not None:
|
|
274
|
+
ids.insert(0, prepend_id) # TODO: slightly inefficient here? :( hmm
|
|
275
|
+
if append is not None:
|
|
276
|
+
ids.append(append_id)
|
|
277
|
+
|
|
278
|
+
elif isinstance(text, list):
|
|
279
|
+
ids = self.enc.encode_ordinary_batch(text, num_threads=num_threads)
|
|
280
|
+
if prepend is not None:
|
|
281
|
+
for ids_row in ids:
|
|
282
|
+
ids_row.insert(0, prepend_id) # TODO: same
|
|
283
|
+
if append is not None:
|
|
284
|
+
for ids_row in ids:
|
|
285
|
+
ids_row.append(append_id)
|
|
286
|
+
|
|
287
|
+
else:
|
|
288
|
+
raise ValueError(f'Invalid input type: {type(text)}') # noqa
|
|
289
|
+
|
|
290
|
+
return ids
|
|
291
|
+
|
|
292
|
+
def __call__(self, *args, **kwargs):
|
|
293
|
+
return self.encode(*args, **kwargs)
|
|
294
|
+
|
|
295
|
+
def decode(self, ids):
|
|
296
|
+
return self.enc.decode(ids)
|
|
297
|
+
|
|
298
|
+
def save(self, tokenizer_dir):
|
|
299
|
+
# save the encoding object to disk
|
|
300
|
+
os.makedirs(tokenizer_dir, exist_ok=True)
|
|
301
|
+
pickle_path = os.path.join(tokenizer_dir, 'tokenizer.pkl')
|
|
302
|
+
with open(pickle_path, 'wb') as f:
|
|
303
|
+
pickle.dump(self.enc, f)
|
|
304
|
+
print(f'Saved tokenizer encoding to {pickle_path}')
|
|
305
|
+
|
|
306
|
+
def render_conversation(self, conversation, max_tokens=2048):
|
|
307
|
+
"""
|
|
308
|
+
Tokenize a single Chat conversation (which we call a "doc" or "document" here).
|
|
309
|
+
|
|
310
|
+
Returns:
|
|
311
|
+
- ids: list[int] is a list of token ids of this rendered conversation
|
|
312
|
+
- mask: list[int] of same length, mask = 1 for tokens that the Assistant is expected to train on.
|
|
313
|
+
"""
|
|
314
|
+
|
|
315
|
+
# ids, masks that we will return and a helper function to help build them up.
|
|
316
|
+
ids, mask = [], []
|
|
317
|
+
|
|
318
|
+
def add_tokens(token_ids, mask_val):
|
|
319
|
+
if isinstance(token_ids, int):
|
|
320
|
+
token_ids = [token_ids]
|
|
321
|
+
ids.extend(token_ids)
|
|
322
|
+
mask.extend([mask_val] * len(token_ids))
|
|
323
|
+
|
|
324
|
+
# sometimes the first message is a system message...
|
|
325
|
+
# => just merge it with the second (user) message
|
|
326
|
+
if conversation['messages'][0]['role'] == 'system':
|
|
327
|
+
# some conversation surgery is necessary here for now...
|
|
328
|
+
conversation = copy.deepcopy(conversation) # avoid mutating the original
|
|
329
|
+
messages = conversation['messages']
|
|
330
|
+
check.state(messages[1]['role'] == 'user', 'System message must be followed by a user message')
|
|
331
|
+
messages[1]['content'] = messages[0]['content'] + '\n\n' + messages[1]['content']
|
|
332
|
+
messages = messages[1:]
|
|
333
|
+
else:
|
|
334
|
+
messages = conversation['messages']
|
|
335
|
+
check.state(len(messages) >= 1, f'Conversation has less than 1 message: {messages}')
|
|
336
|
+
|
|
337
|
+
# fetch all the special tokens we need
|
|
338
|
+
bos = self.get_bos_token_id()
|
|
339
|
+
user_start, user_end = self.encode_special('<|user_start|>'), self.encode_special('<|user_end|>')
|
|
340
|
+
assistant_start, assistant_end = self.encode_special('<|assistant_start|>'), self.encode_special('<|assistant_end|>') # noqa
|
|
341
|
+
python_start, python_end = self.encode_special('<|python_start|>'), self.encode_special('<|python_end|>')
|
|
342
|
+
output_start, output_end = self.encode_special('<|output_start|>'), self.encode_special('<|output_end|>')
|
|
343
|
+
|
|
344
|
+
# now we can tokenize the conversation
|
|
345
|
+
add_tokens(bos, 0)
|
|
346
|
+
for i, message in enumerate(messages):
|
|
347
|
+
# some sanity checking here around assumptions, to prevent footguns
|
|
348
|
+
must_be_from = 'user' if i % 2 == 0 else 'assistant'
|
|
349
|
+
check.state(
|
|
350
|
+
message['role'] == must_be_from,
|
|
351
|
+
f"Message {i} is from {message['role']} but should be from {must_be_from}",
|
|
352
|
+
)
|
|
353
|
+
|
|
354
|
+
# content can be either a simple string or a list of parts (e.g. containing tool calls)
|
|
355
|
+
content = message['content']
|
|
356
|
+
|
|
357
|
+
if message['role'] == 'user':
|
|
358
|
+
check.isinstance(content, str), 'User messages are simply expected to be strings'
|
|
359
|
+
value_ids = self.encode(content)
|
|
360
|
+
add_tokens(user_start, 0)
|
|
361
|
+
add_tokens(value_ids, 0)
|
|
362
|
+
add_tokens(user_end, 0)
|
|
363
|
+
|
|
364
|
+
elif message['role'] == 'assistant':
|
|
365
|
+
add_tokens(assistant_start, 0)
|
|
366
|
+
|
|
367
|
+
if isinstance(content, str):
|
|
368
|
+
# simple string => simply add the tokens
|
|
369
|
+
value_ids = self.encode(content)
|
|
370
|
+
add_tokens(value_ids, 1)
|
|
371
|
+
|
|
372
|
+
elif isinstance(content, list):
|
|
373
|
+
for part in content:
|
|
374
|
+
value_ids = self.encode(part['text'])
|
|
375
|
+
|
|
376
|
+
if part['type'] == 'text':
|
|
377
|
+
# string part => simply add the tokens
|
|
378
|
+
add_tokens(value_ids, 1)
|
|
379
|
+
|
|
380
|
+
elif part['type'] == 'python':
|
|
381
|
+
# python tool call => add the tokens inside <|python_start|> and <|python_end|>
|
|
382
|
+
add_tokens(python_start, 1)
|
|
383
|
+
add_tokens(value_ids, 1)
|
|
384
|
+
add_tokens(python_end, 1)
|
|
385
|
+
|
|
386
|
+
elif part['type'] == 'python_output':
|
|
387
|
+
# python output => add the tokens inside <|output_start|> and <|output_end|>
|
|
388
|
+
# none of these tokens are supervised because the tokens come from Python at test time
|
|
389
|
+
add_tokens(output_start, 0)
|
|
390
|
+
add_tokens(value_ids, 0)
|
|
391
|
+
add_tokens(output_end, 0)
|
|
392
|
+
|
|
393
|
+
else:
|
|
394
|
+
raise ValueError(f"Unknown part type: {part['type']}")
|
|
395
|
+
|
|
396
|
+
else:
|
|
397
|
+
raise ValueError(f'Unknown content type: {type(content)}')
|
|
398
|
+
|
|
399
|
+
add_tokens(assistant_end, 1)
|
|
400
|
+
|
|
401
|
+
# truncate to max_tokens tokens MAX (helps prevent OOMs)
|
|
402
|
+
ids = ids[:max_tokens]
|
|
403
|
+
mask = mask[:max_tokens]
|
|
404
|
+
return ids, mask
|
|
405
|
+
|
|
406
|
+
def visualize_tokenization(self, ids, mask, with_token_id=False):
|
|
407
|
+
"""Small helper function useful in debugging: visualize the tokenization of render_conversation"""
|
|
408
|
+
|
|
409
|
+
red = '\033[91m'
|
|
410
|
+
green = '\033[92m'
|
|
411
|
+
reset = '\033[0m'
|
|
412
|
+
gray = '\033[90m'
|
|
413
|
+
tokens = []
|
|
414
|
+
for i, (token_id, mask_val) in enumerate(zip(ids, mask)): # noqa
|
|
415
|
+
token_str = self.decode([token_id])
|
|
416
|
+
color = green if mask_val == 1 else red
|
|
417
|
+
tokens.append(f'{color}{token_str}{reset}')
|
|
418
|
+
if with_token_id:
|
|
419
|
+
tokens.append(f'{gray}({token_id}){reset}')
|
|
420
|
+
return '|'.join(tokens)
|
|
421
|
+
|
|
422
|
+
def render_for_completion(self, conversation):
|
|
423
|
+
"""
|
|
424
|
+
Used during Reinforcement Learning. In that setting, we want to render the conversation priming the Assistant
|
|
425
|
+
for a completion. Unlike the Chat SFT case, we don't need to return the mask.
|
|
426
|
+
"""
|
|
427
|
+
|
|
428
|
+
# We have some surgery to do: we need to pop the last message (of the Assistant)
|
|
429
|
+
conversation = copy.deepcopy(conversation) # avoid mutating the original
|
|
430
|
+
messages = conversation['messages']
|
|
431
|
+
check.state(messages[-1]['role'] == 'assistant', 'Last message must be from the Assistant')
|
|
432
|
+
messages.pop() # remove the last message (of the Assistant) inplace
|
|
433
|
+
|
|
434
|
+
# Now tokenize the conversation
|
|
435
|
+
ids, mask = self.render_conversation(conversation)
|
|
436
|
+
|
|
437
|
+
# Finally, to prime the Assistant for a completion, append the Assistant start token
|
|
438
|
+
assistant_start = self.encode_special('<|assistant_start|>')
|
|
439
|
+
ids.append(assistant_start)
|
|
440
|
+
return ids
|
ommlds/specs/__init__.py
ADDED
|
File without changes
|
|
File without changes
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
from omlish import lang
|
|
2
|
+
from omlish import marshal as msh
|
|
3
|
+
|
|
4
|
+
from .protocol import ContentBlock
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
##
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
@lang.static_init
|
|
11
|
+
def _install_standard_marshaling() -> None:
|
|
12
|
+
for root_cls, tag_field in [
|
|
13
|
+
(ContentBlock, 'type'),
|
|
14
|
+
]:
|
|
15
|
+
msh.install_standard_factories(*msh.standard_polymorphism_factories(
|
|
16
|
+
msh.polymorphism_from_subclasses(
|
|
17
|
+
root_cls,
|
|
18
|
+
naming=msh.Naming.SNAKE,
|
|
19
|
+
strip_suffix=msh.AutoStripSuffix,
|
|
20
|
+
),
|
|
21
|
+
msh.FieldTypeTagging(tag_field),
|
|
22
|
+
unions='partial',
|
|
23
|
+
))
|
|
@@ -0,0 +1,146 @@
|
|
|
1
|
+
import contextlib
|
|
2
|
+
import subprocess
|
|
3
|
+
import typing as ta
|
|
4
|
+
|
|
5
|
+
import anyio.abc
|
|
6
|
+
|
|
7
|
+
from omlish import check
|
|
8
|
+
from omlish import dataclasses as dc
|
|
9
|
+
from omlish import marshal as msh
|
|
10
|
+
from omlish.asyncs import anyio as aiu
|
|
11
|
+
from omlish.specs import jsonrpc as jr
|
|
12
|
+
|
|
13
|
+
from . import protocol as pt
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
##
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class McpServerConnection:
|
|
20
|
+
def __init__(
|
|
21
|
+
self,
|
|
22
|
+
tg: anyio.abc.TaskGroup,
|
|
23
|
+
stream: anyio.abc.ByteStream,
|
|
24
|
+
*,
|
|
25
|
+
default_timeout: float | None = 30.,
|
|
26
|
+
) -> None:
|
|
27
|
+
super().__init__()
|
|
28
|
+
|
|
29
|
+
self._conn = jr.Connection(
|
|
30
|
+
tg,
|
|
31
|
+
stream,
|
|
32
|
+
request_handler=self._handle_client_request,
|
|
33
|
+
notification_handler=self._handle_client_notification,
|
|
34
|
+
default_timeout=default_timeout,
|
|
35
|
+
)
|
|
36
|
+
|
|
37
|
+
#
|
|
38
|
+
|
|
39
|
+
@classmethod
|
|
40
|
+
def from_process(
|
|
41
|
+
cls,
|
|
42
|
+
tg: anyio.abc.TaskGroup,
|
|
43
|
+
proc: anyio.abc.Process,
|
|
44
|
+
**kwargs: ta.Any,
|
|
45
|
+
) -> 'McpServerConnection':
|
|
46
|
+
return cls(
|
|
47
|
+
tg,
|
|
48
|
+
aiu.StapledByteStream(
|
|
49
|
+
check.not_none(proc.stdin),
|
|
50
|
+
check.not_none(proc.stdout),
|
|
51
|
+
),
|
|
52
|
+
**kwargs,
|
|
53
|
+
)
|
|
54
|
+
|
|
55
|
+
@classmethod
|
|
56
|
+
def open_process(
|
|
57
|
+
cls,
|
|
58
|
+
tg: anyio.abc.TaskGroup,
|
|
59
|
+
cmd: ta.Sequence[str],
|
|
60
|
+
open_kwargs: ta.Mapping[str, ta.Any] | None = None,
|
|
61
|
+
**kwargs: ta.Any,
|
|
62
|
+
) -> ta.AsyncContextManager[tuple[anyio.abc.Process, 'McpServerConnection']]:
|
|
63
|
+
@contextlib.asynccontextmanager
|
|
64
|
+
async def inner():
|
|
65
|
+
async with await anyio.open_process(
|
|
66
|
+
cmd,
|
|
67
|
+
stdin=subprocess.PIPE,
|
|
68
|
+
stdout=subprocess.PIPE,
|
|
69
|
+
**open_kwargs or {},
|
|
70
|
+
) as proc:
|
|
71
|
+
async with cls.from_process(
|
|
72
|
+
tg,
|
|
73
|
+
proc,
|
|
74
|
+
**kwargs,
|
|
75
|
+
) as client:
|
|
76
|
+
yield (proc, client)
|
|
77
|
+
|
|
78
|
+
return inner()
|
|
79
|
+
|
|
80
|
+
#
|
|
81
|
+
|
|
82
|
+
async def __aenter__(self) -> ta.Self:
|
|
83
|
+
await self._conn.__aenter__()
|
|
84
|
+
return self
|
|
85
|
+
|
|
86
|
+
async def __aexit__(self, et, e, tb) -> None:
|
|
87
|
+
await self._conn.__aexit__(et, e, tb)
|
|
88
|
+
|
|
89
|
+
#
|
|
90
|
+
|
|
91
|
+
async def _handle_client_request(self, _client: jr.Connection, req: jr.Request) -> None:
|
|
92
|
+
pass
|
|
93
|
+
|
|
94
|
+
async def _handle_client_notification(self, _client: jr.Connection, no: jr.Request) -> None:
|
|
95
|
+
pass
|
|
96
|
+
|
|
97
|
+
#
|
|
98
|
+
|
|
99
|
+
async def request(self, req: pt.ClientRequest[pt.ClientResultT]) -> pt.ClientResultT:
|
|
100
|
+
res_cls = pt.MESSAGE_TYPES_BY_JSON_RPC_METHOD_NAME[pt.ClientResult][req.json_rpc_method_name] # type: ignore[type-abstract] # noqa
|
|
101
|
+
req_mv = msh.marshal(req)
|
|
102
|
+
res_mv = await self._conn.request(req.json_rpc_method_name, req_mv) # type: ignore[arg-type]
|
|
103
|
+
res = msh.unmarshal(res_mv, res_cls)
|
|
104
|
+
return ta.cast(pt.ClientResultT, res)
|
|
105
|
+
|
|
106
|
+
async def notify(self, no: pt.Notification) -> None:
|
|
107
|
+
no_mv = msh.marshal(no)
|
|
108
|
+
await self._conn.notify(no.json_rpc_method_name, no_mv) # type: ignore[arg-type]
|
|
109
|
+
|
|
110
|
+
#
|
|
111
|
+
|
|
112
|
+
async def yield_cursor_request(
|
|
113
|
+
self,
|
|
114
|
+
req: pt.CursorClientRequest[pt.CursorClientResultT],
|
|
115
|
+
) -> ta.AsyncGenerator[pt.CursorClientResultT]:
|
|
116
|
+
check.none(req.cursor)
|
|
117
|
+
|
|
118
|
+
cursor: str | None = None
|
|
119
|
+
while True:
|
|
120
|
+
res = await self.request(dc.replace(req, cursor=cursor)) # noqa
|
|
121
|
+
yield res
|
|
122
|
+
|
|
123
|
+
if (cursor := res.next_cursor) is None:
|
|
124
|
+
break
|
|
125
|
+
|
|
126
|
+
async def list_cursor_request(
|
|
127
|
+
self,
|
|
128
|
+
req: pt.CursorClientRequest[pt.CursorClientResultT],
|
|
129
|
+
) -> list[pt.CursorClientResultT]:
|
|
130
|
+
return [res async for res in self.yield_cursor_request(req)]
|
|
131
|
+
|
|
132
|
+
#
|
|
133
|
+
|
|
134
|
+
async def list_tools(self) -> list[pt.Tool]:
|
|
135
|
+
return [
|
|
136
|
+
tool
|
|
137
|
+
async for res in self.yield_cursor_request(pt.ListToolsRequest())
|
|
138
|
+
for tool in res.tools
|
|
139
|
+
]
|
|
140
|
+
|
|
141
|
+
async def list_prompts(self) -> list[pt.Prompt]:
|
|
142
|
+
return [
|
|
143
|
+
prompt
|
|
144
|
+
async for res in self.yield_cursor_request(pt.ListPromptsRequest())
|
|
145
|
+
for prompt in res.prompts
|
|
146
|
+
]
|