llguidance 0.7.26__tar.gz → 0.7.27__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (182) hide show
  1. {llguidance-0.7.26 → llguidance-0.7.27}/CHANGELOG.md +5 -0
  2. {llguidance-0.7.26 → llguidance-0.7.27}/Cargo.lock +80 -9
  3. {llguidance-0.7.26 → llguidance-0.7.27}/Cargo.toml +4 -1
  4. {llguidance-0.7.26 → llguidance-0.7.27}/PKG-INFO +1 -1
  5. {llguidance-0.7.26 → llguidance-0.7.27}/parser/Cargo.toml +1 -1
  6. {llguidance-0.7.26 → llguidance-0.7.27}/parser/llguidance.h +7 -0
  7. {llguidance-0.7.26 → llguidance-0.7.27}/parser/src/earley/parser.rs +57 -48
  8. {llguidance-0.7.26 → llguidance-0.7.27}/parser/src/ffi.rs +11 -0
  9. {llguidance-0.7.26 → llguidance-0.7.27}/parser/src/stop_controller.rs +10 -5
  10. {llguidance-0.7.26 → llguidance-0.7.27}/pyproject.toml +1 -1
  11. llguidance-0.7.27/python/llguidance/tiktoken.py +34 -0
  12. {llguidance-0.7.26 → llguidance-0.7.27}/python/torch_tests/test_llamacpp.py +4 -0
  13. llguidance-0.7.27/python/torch_tests/test_tiktoken.py +30 -0
  14. {llguidance-0.7.26 → llguidance-0.7.27}/python_ext/Cargo.toml +2 -1
  15. {llguidance-0.7.26 → llguidance-0.7.27}/python_ext/src/py.rs +36 -1
  16. {llguidance-0.7.26 → llguidance-0.7.27}/scripts/bump.py +1 -1
  17. {llguidance-0.7.26 → llguidance-0.7.27}/scripts/ci-publish.py +18 -16
  18. {llguidance-0.7.26 → llguidance-0.7.27}/toktrie/Cargo.toml +1 -1
  19. {llguidance-0.7.26 → llguidance-0.7.27}/toktrie/src/toktree.rs +62 -0
  20. {llguidance-0.7.26 → llguidance-0.7.27}/toktrie_hf_downloader/Cargo.toml +1 -1
  21. {llguidance-0.7.26 → llguidance-0.7.27}/toktrie_hf_tokenizers/Cargo.toml +1 -1
  22. llguidance-0.7.27/toktrie_tiktoken/Cargo.toml +15 -0
  23. llguidance-0.7.27/toktrie_tiktoken/LICENSE +21 -0
  24. llguidance-0.7.27/toktrie_tiktoken/src/lib.rs +103 -0
  25. {llguidance-0.7.26 → llguidance-0.7.27}/.github/workflows/rust.yml +0 -0
  26. {llguidance-0.7.26 → llguidance-0.7.27}/.github/workflows/wheels.yml +0 -0
  27. {llguidance-0.7.26 → llguidance-0.7.27}/.gitignore +0 -0
  28. {llguidance-0.7.26 → llguidance-0.7.27}/CODE_OF_CONDUCT.md +0 -0
  29. {llguidance-0.7.26 → llguidance-0.7.27}/LICENSE +0 -0
  30. {llguidance-0.7.26 → llguidance-0.7.27}/README.md +0 -0
  31. {llguidance-0.7.26 → llguidance-0.7.27}/SECURITY.md +0 -0
  32. {llguidance-0.7.26 → llguidance-0.7.27}/SUPPORT.md +0 -0
  33. {llguidance-0.7.26 → llguidance-0.7.27}/c_sample/Makefile +0 -0
  34. {llguidance-0.7.26 → llguidance-0.7.27}/c_sample/README.md +0 -0
  35. {llguidance-0.7.26 → llguidance-0.7.27}/c_sample/c_sample.cpp +0 -0
  36. {llguidance-0.7.26 → llguidance-0.7.27}/docs/fast_forward.md +0 -0
  37. {llguidance-0.7.26 → llguidance-0.7.27}/docs/json_schema.md +0 -0
  38. {llguidance-0.7.26 → llguidance-0.7.27}/docs/mask_plot.png +0 -0
  39. {llguidance-0.7.26 → llguidance-0.7.27}/docs/optimizations.md +0 -0
  40. {llguidance-0.7.26 → llguidance-0.7.27}/docs/special_tokens.md +0 -0
  41. {llguidance-0.7.26 → llguidance-0.7.27}/docs/syntax.md +0 -0
  42. {llguidance-0.7.26 → llguidance-0.7.27}/docs/toktrie.md +0 -0
  43. {llguidance-0.7.26 → llguidance-0.7.27}/json_stats/Cargo.toml +0 -0
  44. {llguidance-0.7.26 → llguidance-0.7.27}/json_stats/expected_maskbench.json +0 -0
  45. {llguidance-0.7.26 → llguidance-0.7.27}/json_stats/jstats.sh +0 -0
  46. {llguidance-0.7.26 → llguidance-0.7.27}/json_stats/scripts/split-stats.sh +0 -0
  47. {llguidance-0.7.26 → llguidance-0.7.27}/json_stats/scripts/split_plot.py +0 -0
  48. {llguidance-0.7.26 → llguidance-0.7.27}/json_stats/src/json_stats.rs +0 -0
  49. {llguidance-0.7.26 → llguidance-0.7.27}/json_stats/src/lib.rs +0 -0
  50. {llguidance-0.7.26 → llguidance-0.7.27}/json_stats/src/stats.rs +0 -0
  51. {llguidance-0.7.26 → llguidance-0.7.27}/parser/LICENSE +0 -0
  52. {llguidance-0.7.26 → llguidance-0.7.27}/parser/README.md +0 -0
  53. {llguidance-0.7.26 → llguidance-0.7.27}/parser/build.rs +0 -0
  54. {llguidance-0.7.26 → llguidance-0.7.27}/parser/cbindgen.toml +0 -0
  55. {llguidance-0.7.26 → llguidance-0.7.27}/parser/grammars/character.json +0 -0
  56. {llguidance-0.7.26 → llguidance-0.7.27}/parser/grammars/json.json +0 -0
  57. {llguidance-0.7.26 → llguidance-0.7.27}/parser/src/api.rs +0 -0
  58. {llguidance-0.7.26 → llguidance-0.7.27}/parser/src/constraint.rs +0 -0
  59. {llguidance-0.7.26 → llguidance-0.7.27}/parser/src/earley/from_guidance.rs +0 -0
  60. {llguidance-0.7.26 → llguidance-0.7.27}/parser/src/earley/grammar.rs +0 -0
  61. {llguidance-0.7.26 → llguidance-0.7.27}/parser/src/earley/lexer.rs +0 -0
  62. {llguidance-0.7.26 → llguidance-0.7.27}/parser/src/earley/lexerspec.rs +0 -0
  63. {llguidance-0.7.26 → llguidance-0.7.27}/parser/src/earley/mod.rs +0 -0
  64. {llguidance-0.7.26 → llguidance-0.7.27}/parser/src/earley/perf.rs +0 -0
  65. {llguidance-0.7.26 → llguidance-0.7.27}/parser/src/earley/regexvec.rs +0 -0
  66. {llguidance-0.7.26 → llguidance-0.7.27}/parser/src/earley/slicer.rs +0 -0
  67. {llguidance-0.7.26 → llguidance-0.7.27}/parser/src/factory.rs +0 -0
  68. {llguidance-0.7.26 → llguidance-0.7.27}/parser/src/ffi_par.rs +0 -0
  69. {llguidance-0.7.26 → llguidance-0.7.27}/parser/src/grammar_builder.rs +0 -0
  70. {llguidance-0.7.26 → llguidance-0.7.27}/parser/src/json/README.md +0 -0
  71. {llguidance-0.7.26 → llguidance-0.7.27}/parser/src/json/compiler.rs +0 -0
  72. {llguidance-0.7.26 → llguidance-0.7.27}/parser/src/json/context_ref.rs +0 -0
  73. {llguidance-0.7.26 → llguidance-0.7.27}/parser/src/json/context_simple/context.rs +0 -0
  74. {llguidance-0.7.26 → llguidance-0.7.27}/parser/src/json/context_simple/draft.rs +0 -0
  75. {llguidance-0.7.26 → llguidance-0.7.27}/parser/src/json/context_simple/mod.rs +0 -0
  76. {llguidance-0.7.26 → llguidance-0.7.27}/parser/src/json/formats.rs +0 -0
  77. {llguidance-0.7.26 → llguidance-0.7.27}/parser/src/json/mod.rs +0 -0
  78. {llguidance-0.7.26 → llguidance-0.7.27}/parser/src/json/numeric.rs +0 -0
  79. {llguidance-0.7.26 → llguidance-0.7.27}/parser/src/json/schema.rs +0 -0
  80. {llguidance-0.7.26 → llguidance-0.7.27}/parser/src/json/shared_context.rs +0 -0
  81. {llguidance-0.7.26 → llguidance-0.7.27}/parser/src/json_validation.rs +0 -0
  82. {llguidance-0.7.26 → llguidance-0.7.27}/parser/src/lark/README.md +0 -0
  83. {llguidance-0.7.26 → llguidance-0.7.27}/parser/src/lark/ast.rs +0 -0
  84. {llguidance-0.7.26 → llguidance-0.7.27}/parser/src/lark/common.rs +0 -0
  85. {llguidance-0.7.26 → llguidance-0.7.27}/parser/src/lark/compiler.rs +0 -0
  86. {llguidance-0.7.26 → llguidance-0.7.27}/parser/src/lark/lexer.rs +0 -0
  87. {llguidance-0.7.26 → llguidance-0.7.27}/parser/src/lark/mod.rs +0 -0
  88. {llguidance-0.7.26 → llguidance-0.7.27}/parser/src/lark/parser.rs +0 -0
  89. {llguidance-0.7.26 → llguidance-0.7.27}/parser/src/lib.rs +0 -0
  90. {llguidance-0.7.26 → llguidance-0.7.27}/parser/src/logging.rs +0 -0
  91. {llguidance-0.7.26 → llguidance-0.7.27}/parser/src/matcher.rs +0 -0
  92. {llguidance-0.7.26 → llguidance-0.7.27}/parser/src/output.rs +0 -0
  93. {llguidance-0.7.26 → llguidance-0.7.27}/parser/src/panic_utils.rs +0 -0
  94. {llguidance-0.7.26 → llguidance-0.7.27}/parser/src/regex_rewrite.rs +0 -0
  95. {llguidance-0.7.26 → llguidance-0.7.27}/parser/src/substring.rs +0 -0
  96. {llguidance-0.7.26 → llguidance-0.7.27}/parser/src/tokenizer_json.rs +0 -0
  97. {llguidance-0.7.26 → llguidance-0.7.27}/parser/src/tokenparser.rs +0 -0
  98. {llguidance-0.7.26 → llguidance-0.7.27}/plan.md +0 -0
  99. {llguidance-0.7.26 → llguidance-0.7.27}/python/llguidance/__init__.py +0 -0
  100. {llguidance-0.7.26 → llguidance-0.7.27}/python/llguidance/_grammar_from.py +0 -0
  101. {llguidance-0.7.26 → llguidance-0.7.27}/python/llguidance/_lib.pyi +0 -0
  102. {llguidance-0.7.26 → llguidance-0.7.27}/python/llguidance/_struct_tag.py +0 -0
  103. {llguidance-0.7.26 → llguidance-0.7.27}/python/llguidance/_tokenizer.py +0 -0
  104. {llguidance-0.7.26 → llguidance-0.7.27}/python/llguidance/_util.py +0 -0
  105. {llguidance-0.7.26 → llguidance-0.7.27}/python/llguidance/cli.py +0 -0
  106. {llguidance-0.7.26 → llguidance-0.7.27}/python/llguidance/gbnf_to_lark.py +0 -0
  107. {llguidance-0.7.26 → llguidance-0.7.27}/python/llguidance/hf.py +0 -0
  108. {llguidance-0.7.26 → llguidance-0.7.27}/python/llguidance/llamacpp.py +0 -0
  109. {llguidance-0.7.26 → llguidance-0.7.27}/python/llguidance/mlx.py +0 -0
  110. {llguidance-0.7.26 → llguidance-0.7.27}/python/llguidance/numpy.py +0 -0
  111. {llguidance-0.7.26 → llguidance-0.7.27}/python/llguidance/py.typed +0 -0
  112. {llguidance-0.7.26 → llguidance-0.7.27}/python/llguidance/torch.py +0 -0
  113. {llguidance-0.7.26 → llguidance-0.7.27}/python/mypy.ini +0 -0
  114. {llguidance-0.7.26 → llguidance-0.7.27}/python/torch_tests/__init__.py +0 -0
  115. {llguidance-0.7.26 → llguidance-0.7.27}/python/torch_tests/test_bitmask.py +0 -0
  116. {llguidance-0.7.26 → llguidance-0.7.27}/python/torch_tests/test_hf.py +0 -0
  117. {llguidance-0.7.26 → llguidance-0.7.27}/python/torch_tests/test_matcher.py +0 -0
  118. {llguidance-0.7.26 → llguidance-0.7.27}/python_ext/src/lib.rs +0 -0
  119. {llguidance-0.7.26 → llguidance-0.7.27}/python_ext/src/llamatokenizer.rs +0 -0
  120. {llguidance-0.7.26 → llguidance-0.7.27}/python_ext/src/llinterpreter.rs +0 -0
  121. {llguidance-0.7.26 → llguidance-0.7.27}/python_ext/src/llmatcher.rs +0 -0
  122. {llguidance-0.7.26 → llguidance-0.7.27}/python_ext/src/parserlimits.rs +0 -0
  123. {llguidance-0.7.26 → llguidance-0.7.27}/python_ext/src/pyjson.rs +0 -0
  124. {llguidance-0.7.26 → llguidance-0.7.27}/sample_parser/Cargo.toml +0 -0
  125. {llguidance-0.7.26 → llguidance-0.7.27}/sample_parser/README.md +0 -0
  126. {llguidance-0.7.26 → llguidance-0.7.27}/sample_parser/cli.sh +0 -0
  127. {llguidance-0.7.26 → llguidance-0.7.27}/sample_parser/data/blog.sample.json +0 -0
  128. {llguidance-0.7.26 → llguidance-0.7.27}/sample_parser/data/blog.schema.json +0 -0
  129. {llguidance-0.7.26 → llguidance-0.7.27}/sample_parser/data/blog.schema.ll.json +0 -0
  130. {llguidance-0.7.26 → llguidance-0.7.27}/sample_parser/data/from-llama.cpp/README.md +0 -0
  131. {llguidance-0.7.26 → llguidance-0.7.27}/sample_parser/data/from-llama.cpp/arithmetic.gbnf +0 -0
  132. {llguidance-0.7.26 → llguidance-0.7.27}/sample_parser/data/from-llama.cpp/c.gbnf +0 -0
  133. {llguidance-0.7.26 → llguidance-0.7.27}/sample_parser/data/from-llama.cpp/chess.gbnf +0 -0
  134. {llguidance-0.7.26 → llguidance-0.7.27}/sample_parser/data/from-llama.cpp/english.gbnf +0 -0
  135. {llguidance-0.7.26 → llguidance-0.7.27}/sample_parser/data/from-llama.cpp/japanese.gbnf +0 -0
  136. {llguidance-0.7.26 → llguidance-0.7.27}/sample_parser/data/from-llama.cpp/json.gbnf +0 -0
  137. {llguidance-0.7.26 → llguidance-0.7.27}/sample_parser/data/from-llama.cpp/json_arr.gbnf +0 -0
  138. {llguidance-0.7.26 → llguidance-0.7.27}/sample_parser/data/from-llama.cpp/list.gbnf +0 -0
  139. {llguidance-0.7.26 → llguidance-0.7.27}/sample_parser/data/from-llama.cpp/vllm-sql.gbnf +0 -0
  140. {llguidance-0.7.26 → llguidance-0.7.27}/sample_parser/data/lark.lark +0 -0
  141. {llguidance-0.7.26 → llguidance-0.7.27}/sample_parser/data/rfc.lark +0 -0
  142. {llguidance-0.7.26 → llguidance-0.7.27}/sample_parser/data/rfc.xml +0 -0
  143. {llguidance-0.7.26 → llguidance-0.7.27}/sample_parser/data/ulysses.md +0 -0
  144. {llguidance-0.7.26 → llguidance-0.7.27}/sample_parser/gtest.sh +0 -0
  145. {llguidance-0.7.26 → llguidance-0.7.27}/sample_parser/lark.sh +0 -0
  146. {llguidance-0.7.26 → llguidance-0.7.27}/sample_parser/run.sh +0 -0
  147. {llguidance-0.7.26 → llguidance-0.7.27}/sample_parser/src/lib.rs +0 -0
  148. {llguidance-0.7.26 → llguidance-0.7.27}/sample_parser/src/minimal.rs +0 -0
  149. {llguidance-0.7.26 → llguidance-0.7.27}/sample_parser/src/sample_parser.rs +0 -0
  150. {llguidance-0.7.26 → llguidance-0.7.27}/sample_parser/tests/test_lark.rs +0 -0
  151. {llguidance-0.7.26 → llguidance-0.7.27}/sample_parser/tests/test_ll.rs +0 -0
  152. {llguidance-0.7.26 → llguidance-0.7.27}/sample_parser/tests/test_raw_parser.rs +0 -0
  153. {llguidance-0.7.26 → llguidance-0.7.27}/sample_parser/tests/test_stop.rs +0 -0
  154. {llguidance-0.7.26 → llguidance-0.7.27}/scripts/annotate_asm.js +0 -0
  155. {llguidance-0.7.26 → llguidance-0.7.27}/scripts/cbindgen.sh +0 -0
  156. {llguidance-0.7.26 → llguidance-0.7.27}/scripts/checklinks.py +0 -0
  157. {llguidance-0.7.26 → llguidance-0.7.27}/scripts/checklinks.sh +0 -0
  158. {llguidance-0.7.26 → llguidance-0.7.27}/scripts/disasm.sh +0 -0
  159. {llguidance-0.7.26 → llguidance-0.7.27}/scripts/gbnf_to_lark.py +0 -0
  160. {llguidance-0.7.26 → llguidance-0.7.27}/scripts/gen-testcase.py +0 -0
  161. {llguidance-0.7.26 → llguidance-0.7.27}/scripts/git-version.sh +0 -0
  162. {llguidance-0.7.26 → llguidance-0.7.27}/scripts/install-deps.sh +0 -0
  163. {llguidance-0.7.26 → llguidance-0.7.27}/scripts/jsonschema-stats.js +0 -0
  164. {llguidance-0.7.26 → llguidance-0.7.27}/scripts/remote-guidance-test.sh +0 -0
  165. {llguidance-0.7.26 → llguidance-0.7.27}/scripts/rust-size.js +0 -0
  166. {llguidance-0.7.26 → llguidance-0.7.27}/scripts/rust_size.py +0 -0
  167. {llguidance-0.7.26 → llguidance-0.7.27}/scripts/test-guidance.sh +0 -0
  168. {llguidance-0.7.26 → llguidance-0.7.27}/scripts/tokenizer_test.py +0 -0
  169. {llguidance-0.7.26 → llguidance-0.7.27}/scripts/update-git.py +0 -0
  170. {llguidance-0.7.26 → llguidance-0.7.27}/toktrie/LICENSE +0 -0
  171. {llguidance-0.7.26 → llguidance-0.7.27}/toktrie/README.md +0 -0
  172. {llguidance-0.7.26 → llguidance-0.7.27}/toktrie/src/bytes.rs +0 -0
  173. {llguidance-0.7.26 → llguidance-0.7.27}/toktrie/src/lib.rs +0 -0
  174. {llguidance-0.7.26 → llguidance-0.7.27}/toktrie/src/recognizer.rs +0 -0
  175. {llguidance-0.7.26 → llguidance-0.7.27}/toktrie/src/rng.rs +0 -0
  176. {llguidance-0.7.26 → llguidance-0.7.27}/toktrie/src/svob.rs +0 -0
  177. {llguidance-0.7.26 → llguidance-0.7.27}/toktrie/src/tokenv.rs +0 -0
  178. {llguidance-0.7.26 → llguidance-0.7.27}/toktrie/tests/test_svob.rs +0 -0
  179. {llguidance-0.7.26 → llguidance-0.7.27}/toktrie_hf_downloader/LICENSE +0 -0
  180. {llguidance-0.7.26 → llguidance-0.7.27}/toktrie_hf_downloader/src/lib.rs +0 -0
  181. {llguidance-0.7.26 → llguidance-0.7.27}/toktrie_hf_tokenizers/LICENSE +0 -0
  182. {llguidance-0.7.26 → llguidance-0.7.27}/toktrie_hf_tokenizers/src/lib.rs +0 -0
@@ -4,6 +4,11 @@ All notable changes to this project will be documented in this file. Dates are d
4
4
 
5
5
  If a release doesn't introduce any interesting changes (build fixes etc.), it's skipped.
6
6
 
7
+ #### [0.7.27](https://github.com/guidance-ai/llguidance/compare/v0.7.26...0.7.27) 2025-06-04
8
+
9
+ - add toktrie_tiktoken and llguidance.tiktoken.lltokenizer_from_encoding [`#154`](https://github.com/guidance-ai/llguidance/issues/154)
10
+ - implement clone on StopController [`#185`](https://github.com/guidance-ai/llguidance/issues/185)
11
+
7
12
  #### [0.7.26](https://github.com/guidance-ai/llguidance/compare/v0.7.25...0.7.26) 2025-05-30
8
13
 
9
14
  - add support for & and ~ in lark regexes [`96fcee3`](https://github.com/guidance-ai/llguidance/commit/96fcee373697b57bead94d1bc06c17cf1c6134e4)
@@ -135,15 +135,30 @@ version = "0.22.1"
135
135
  source = "registry+https://github.com/rust-lang/crates.io-index"
136
136
  checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6"
137
137
 
138
+ [[package]]
139
+ name = "bit-set"
140
+ version = "0.5.3"
141
+ source = "registry+https://github.com/rust-lang/crates.io-index"
142
+ checksum = "0700ddab506f33b20a03b13996eccd309a48e5ff77d0d95926aa0210fb4e95f1"
143
+ dependencies = [
144
+ "bit-vec 0.6.3",
145
+ ]
146
+
138
147
  [[package]]
139
148
  name = "bit-set"
140
149
  version = "0.8.0"
141
150
  source = "registry+https://github.com/rust-lang/crates.io-index"
142
151
  checksum = "08807e080ed7f9d5433fa9b275196cfc35414f66a0c79d864dc51a0d825231a3"
143
152
  dependencies = [
144
- "bit-vec",
153
+ "bit-vec 0.8.0",
145
154
  ]
146
155
 
156
+ [[package]]
157
+ name = "bit-vec"
158
+ version = "0.6.3"
159
+ source = "registry+https://github.com/rust-lang/crates.io-index"
160
+ checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb"
161
+
147
162
  [[package]]
148
163
  name = "bit-vec"
149
164
  version = "0.8.0"
@@ -162,6 +177,17 @@ version = "0.2.2"
162
177
  source = "registry+https://github.com/rust-lang/crates.io-index"
163
178
  checksum = "3eeab4423108c5d7c744f4d234de88d18d636100093ae04caf4825134b9c3a32"
164
179
 
180
+ [[package]]
181
+ name = "bstr"
182
+ version = "1.12.0"
183
+ source = "registry+https://github.com/rust-lang/crates.io-index"
184
+ checksum = "234113d19d0d7d613b40e86fb654acf958910802bcceab913a4f9e7cda03b1a4"
185
+ dependencies = [
186
+ "memchr",
187
+ "regex-automata",
188
+ "serde",
189
+ ]
190
+
165
191
  [[package]]
166
192
  name = "bumpalo"
167
193
  version = "3.17.0"
@@ -492,13 +518,24 @@ version = "0.1.10"
492
518
  source = "registry+https://github.com/rust-lang/crates.io-index"
493
519
  checksum = "d817e038c30374a4bcb22f94d0a8a0e216958d4c3dcde369b1439fec4bdda6e6"
494
520
 
521
+ [[package]]
522
+ name = "fancy-regex"
523
+ version = "0.13.0"
524
+ source = "registry+https://github.com/rust-lang/crates.io-index"
525
+ checksum = "531e46835a22af56d1e3b66f04844bed63158bc094a628bec1d321d9b4c44bf2"
526
+ dependencies = [
527
+ "bit-set 0.5.3",
528
+ "regex-automata",
529
+ "regex-syntax",
530
+ ]
531
+
495
532
  [[package]]
496
533
  name = "fancy-regex"
497
534
  version = "0.14.0"
498
535
  source = "registry+https://github.com/rust-lang/crates.io-index"
499
536
  checksum = "6e24cb5a94bcae1e5408b0effca5cd7172ea3c5755049c5f3af4cd283a165298"
500
537
  dependencies = [
501
- "bit-set",
538
+ "bit-set 0.8.0",
502
539
  "regex-automata",
503
540
  "regex-syntax",
504
541
  ]
@@ -1123,7 +1160,7 @@ dependencies = [
1123
1160
  "base64 0.22.1",
1124
1161
  "bytecount",
1125
1162
  "email_address",
1126
- "fancy-regex",
1163
+ "fancy-regex 0.14.0",
1127
1164
  "fraction",
1128
1165
  "idna",
1129
1166
  "itoa",
@@ -1174,7 +1211,7 @@ checksum = "23fb14cb19457329c82206317a5663005a4d404783dc74f4252769b0d5f42856"
1174
1211
 
1175
1212
  [[package]]
1176
1213
  name = "llguidance"
1177
- version = "0.7.26"
1214
+ version = "0.7.27"
1178
1215
  dependencies = [
1179
1216
  "anyhow",
1180
1217
  "derivre",
@@ -1193,7 +1230,7 @@ dependencies = [
1193
1230
 
1194
1231
  [[package]]
1195
1232
  name = "llguidance_py"
1196
- version = "0.7.26"
1233
+ version = "0.7.27"
1197
1234
  dependencies = [
1198
1235
  "anyhow",
1199
1236
  "bytemuck",
@@ -1203,6 +1240,7 @@ dependencies = [
1203
1240
  "serde",
1204
1241
  "serde_json",
1205
1242
  "toktrie_hf_tokenizers",
1243
+ "toktrie_tiktoken",
1206
1244
  ]
1207
1245
 
1208
1246
  [[package]]
@@ -1865,6 +1903,12 @@ version = "0.1.24"
1865
1903
  source = "registry+https://github.com/rust-lang/crates.io-index"
1866
1904
  checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f"
1867
1905
 
1906
+ [[package]]
1907
+ name = "rustc-hash"
1908
+ version = "1.1.0"
1909
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1910
+ checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2"
1911
+
1868
1912
  [[package]]
1869
1913
  name = "rustix"
1870
1914
  version = "1.0.5"
@@ -2233,6 +2277,21 @@ dependencies = [
2233
2277
  "syn",
2234
2278
  ]
2235
2279
 
2280
+ [[package]]
2281
+ name = "tiktoken-rs"
2282
+ version = "0.7.0"
2283
+ source = "registry+https://github.com/rust-lang/crates.io-index"
2284
+ checksum = "25563eeba904d770acf527e8b370fe9a5547bacd20ff84a0b6c3bc41288e5625"
2285
+ dependencies = [
2286
+ "anyhow",
2287
+ "base64 0.22.1",
2288
+ "bstr",
2289
+ "fancy-regex 0.13.0",
2290
+ "lazy_static",
2291
+ "regex",
2292
+ "rustc-hash",
2293
+ ]
2294
+
2236
2295
  [[package]]
2237
2296
  name = "tinystr"
2238
2297
  version = "0.7.6"
@@ -2252,7 +2311,7 @@ dependencies = [
2252
2311
  "aho-corasick",
2253
2312
  "derive_builder",
2254
2313
  "esaxx-rs",
2255
- "fancy-regex",
2314
+ "fancy-regex 0.14.0",
2256
2315
  "getrandom 0.2.15",
2257
2316
  "itertools 0.13.0",
2258
2317
  "lazy_static",
@@ -2336,7 +2395,7 @@ dependencies = [
2336
2395
 
2337
2396
  [[package]]
2338
2397
  name = "toktrie"
2339
- version = "0.7.26"
2398
+ version = "0.7.27"
2340
2399
  dependencies = [
2341
2400
  "anyhow",
2342
2401
  "bytemuck",
@@ -2347,7 +2406,7 @@ dependencies = [
2347
2406
 
2348
2407
  [[package]]
2349
2408
  name = "toktrie_hf_downloader"
2350
- version = "0.7.26"
2409
+ version = "0.7.27"
2351
2410
  dependencies = [
2352
2411
  "anyhow",
2353
2412
  "hf-hub",
@@ -2358,7 +2417,7 @@ dependencies = [
2358
2417
 
2359
2418
  [[package]]
2360
2419
  name = "toktrie_hf_tokenizers"
2361
- version = "0.7.26"
2420
+ version = "0.7.27"
2362
2421
  dependencies = [
2363
2422
  "anyhow",
2364
2423
  "log",
@@ -2368,6 +2427,18 @@ dependencies = [
2368
2427
  "toktrie",
2369
2428
  ]
2370
2429
 
2430
+ [[package]]
2431
+ name = "toktrie_tiktoken"
2432
+ version = "0.7.27"
2433
+ dependencies = [
2434
+ "anyhow",
2435
+ "log",
2436
+ "serde",
2437
+ "serde_json",
2438
+ "tiktoken-rs",
2439
+ "toktrie",
2440
+ ]
2441
+
2371
2442
  [[package]]
2372
2443
  name = "tower"
2373
2444
  version = "0.5.2"
@@ -7,6 +7,7 @@ members = [
7
7
  "toktrie",
8
8
  "toktrie_hf_tokenizers",
9
9
  "toktrie_hf_downloader",
10
+ "toktrie_tiktoken",
10
11
  ]
11
12
  # just exclude python_ext since it doesn't build without maturin
12
13
  default-members = [
@@ -16,6 +17,7 @@ default-members = [
16
17
  "toktrie",
17
18
  "toktrie_hf_tokenizers",
18
19
  "toktrie_hf_downloader",
20
+ "toktrie_tiktoken",
19
21
  ]
20
22
  resolver = "2"
21
23
 
@@ -36,4 +38,5 @@ opt-level = 3
36
38
  toktrie = { path = "toktrie" }
37
39
  llguidance = { path = "parser" }
38
40
  toktrie_hf_tokenizers = { path = "toktrie_hf_tokenizers" }
39
- toktrie_hf_downloader = { path = "toktrie_hf_downloader" }
41
+ toktrie_hf_downloader = { path = "toktrie_hf_downloader" }
42
+ toktrie_tiktoken = { path = "toktrie_tiktoken" }
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: llguidance
3
- Version: 0.7.26
3
+ Version: 0.7.27
4
4
  License-File: LICENSE
5
5
  Summary: Bindings for the Low-level Guidance (llguidance) Rust library for use within Guidance
6
6
  Author: Michal Moskal
@@ -1,6 +1,6 @@
1
1
  [package]
2
2
  name = "llguidance"
3
- version = "0.7.26"
3
+ version = "0.7.27"
4
4
  edition = "2021"
5
5
  license = "MIT"
6
6
  description = "Super-fast Structured Outputs"
@@ -433,6 +433,13 @@ const char *llg_stop_commit_token(struct LlgStopController *stop_ctrl,
433
433
  size_t *output_len_p,
434
434
  bool *is_stopped_p);
435
435
 
436
+ /**
437
+ * Clone the stop-sequence controller.
438
+ * The cloned controller shares (under mutex) regex caches if any, so that
439
+ * cloning is cheap.
440
+ */
441
+ struct LlgStopController *llg_clone_stop_controller(const struct LlgStopController *stop_ctrl);
442
+
436
443
  /**
437
444
  * Free the stop-sequence controller
438
445
  */
@@ -53,6 +53,14 @@ macro_rules! debug {
53
53
  }
54
54
  }
55
55
 
56
+ macro_rules! debug_def {
57
+ ($s:expr, $($arg:tt)*) => {
58
+ if cfg!(feature = "logging") && DEBUG && $s.scratch.log_enabled() {
59
+ eprintln!($($arg)*);
60
+ }
61
+ }
62
+ }
63
+
56
64
  macro_rules! item_trace {
57
65
  ($($arg:tt)*) => {
58
66
  if ITEM_TRACE {
@@ -322,6 +330,8 @@ struct Scratch {
322
330
  // mode, which is used for computing the token mask on the
323
331
  // pre-lexemes.
324
332
  definitive: bool,
333
+
334
+ log_override: bool,
325
335
  }
326
336
 
327
337
  #[derive(Clone)]
@@ -488,9 +498,14 @@ impl Scratch {
488
498
  items: vec![],
489
499
  grammar_stack: vec![],
490
500
  definitive: true,
501
+ log_override: false,
491
502
  }
492
503
  }
493
504
 
505
+ fn log_enabled(&self) -> bool {
506
+ self.definitive || self.log_override
507
+ }
508
+
494
509
  // Set current working Earley to empty set
495
510
  // The set backing data is at `pos`
496
511
  fn new_row(&mut self, pos: usize) {
@@ -523,7 +538,7 @@ impl Scratch {
523
538
  }
524
539
 
525
540
  fn push_grammar_stack(&mut self, node: GrammarStackNode) {
526
- if self.definitive {
541
+ if self.log_enabled() {
527
542
  debug!("push_grammar_stack: {:?}", node);
528
543
  }
529
544
  let ptr = GrammarStackPtr::new(self.grammar_stack.len());
@@ -543,7 +558,7 @@ impl Scratch {
543
558
  } else {
544
559
  self.items[self.row_end] = item;
545
560
  }
546
- if self.definitive {
561
+ if self.log_enabled() {
547
562
  debug!(
548
563
  " addu: {} ({})",
549
564
  self.item_to_string(self.row_end),
@@ -1003,6 +1018,7 @@ impl ParserState {
1003
1018
  pub fn validate_tokens(&mut self, tokens: &[TokenId]) -> usize {
1004
1019
  self.assert_definitive();
1005
1020
  self.run_speculative("validate_tokens", |state| {
1021
+ state.scratch.log_override = true;
1006
1022
  let mut applied_idx = state.byte_to_token_idx.len();
1007
1023
  let tok_env = state.tok_env.clone();
1008
1024
  let trie = tok_env.tok_trie();
@@ -1497,6 +1513,7 @@ impl ParserState {
1497
1513
  self.scratch.definitive = true;
1498
1514
  self.assert_definitive();
1499
1515
  self.rows_valid_end = self.num_rows();
1516
+ self.scratch.log_override = false; // reset
1500
1517
  }
1501
1518
 
1502
1519
  fn run_speculative<T>(&mut self, lbl: &str, f: impl FnOnce(&mut Self) -> T) -> T {
@@ -1762,14 +1779,13 @@ impl ParserState {
1762
1779
  self.scratch.new_row(items.end);
1763
1780
  self.scratch.push_lexeme_idx = lexeme.idx;
1764
1781
 
1765
- if self.scratch.definitive {
1766
- debug!(
1767
- " scan: {} at row={} token={}",
1768
- self.lexer().dbg_lexeme(lexeme),
1769
- row_idx,
1770
- self.token_idx,
1771
- );
1772
- }
1782
+ debug_def!(
1783
+ self,
1784
+ " scan: {} at row={} token={}",
1785
+ self.lexer().dbg_lexeme(lexeme),
1786
+ row_idx,
1787
+ self.token_idx,
1788
+ );
1773
1789
 
1774
1790
  // This loop performs the scan inference rule
1775
1791
  // (slide 21 of Kallmeyer 2018). It is an
@@ -1890,9 +1906,7 @@ impl ParserState {
1890
1906
  let item_idx = agenda_ptr;
1891
1907
  let item = self.scratch.items[agenda_ptr];
1892
1908
  agenda_ptr += 1;
1893
- if self.scratch.definitive {
1894
- debug!(" agenda: {}", self.item_to_string(item_idx));
1895
- }
1909
+ debug_def!(self, " agenda: {}", self.item_to_string(item_idx));
1896
1910
 
1897
1911
  let rule = item.rhs_ptr();
1898
1912
  let after_dot = self.grammar.sym_idx_dot(rule);
@@ -1988,13 +2002,12 @@ impl ParserState {
1988
2002
  .start_state(&self.scratch.push_allowed_lexemes)
1989
2003
  };
1990
2004
 
1991
- if self.scratch.definitive {
1992
- debug!(
1993
- " push row: {} {:?}",
1994
- self.allowed_lexemes_dbg(lex_start),
1995
- grammar_id
1996
- );
1997
- }
2005
+ debug_def!(
2006
+ self,
2007
+ " push row: {} {:?}",
2008
+ self.allowed_lexemes_dbg(lex_start),
2009
+ grammar_id
2010
+ );
1998
2011
 
1999
2012
  // Add the working row to the parser state
2000
2013
  let idx = self.num_rows();
@@ -2042,9 +2055,7 @@ impl ParserState {
2042
2055
  }
2043
2056
 
2044
2057
  fn process_max_tokens(&mut self, ptr: GrammarStackPtr, lexeme: &Lexeme) {
2045
- if self.scratch.definitive {
2046
- debug!(" process_max_tokens");
2047
- }
2058
+ debug_def!(self, " process_max_tokens");
2048
2059
  let curr_idx = self.num_rows();
2049
2060
  let top = &self.scratch.grammar_stack[ptr.as_usize()];
2050
2061
  self.scratch.push_grm_top = top.back_ptr;
@@ -2118,12 +2129,13 @@ impl ParserState {
2118
2129
 
2119
2130
  while grm_stack_top.as_usize() > 0 {
2120
2131
  let grm_top = &self.scratch.grammar_stack[grm_stack_top.as_usize()];
2121
- if self.scratch.definitive {
2122
- debug!(
2123
- " pop grammar_stack: top={:?}, curr={:?}, #{}",
2124
- grm_top.grammar_id, grammar_ids, self.token_idx
2125
- );
2126
- }
2132
+ debug_def!(
2133
+ self,
2134
+ " pop grammar_stack: top={:?}, curr={:?}, #{}",
2135
+ grm_top.grammar_id,
2136
+ grammar_ids,
2137
+ self.token_idx
2138
+ );
2127
2139
  if grammar_ids.contains(&grm_top.grammar_id) {
2128
2140
  // token_idx is one behind
2129
2141
  if grm_top.token_horizon <= self.token_idx as u32 {
@@ -2132,12 +2144,12 @@ impl ParserState {
2132
2144
  // We only pop one grammar off the stack.
2133
2145
  // If more grammars have the same token horizon, they will get popped
2134
2146
  // in the next step - we might overrun a bit.
2135
- if self.scratch.definitive {
2136
- debug!(
2137
- " hit token limit horizon={} token_idx={}",
2138
- grm_top.token_horizon, self.token_idx
2139
- );
2140
- }
2147
+ debug_def!(
2148
+ self,
2149
+ " hit token limit horizon={} token_idx={}",
2150
+ grm_top.token_horizon,
2151
+ self.token_idx
2152
+ );
2141
2153
  max_token_ptr = Some(grm_stack_top);
2142
2154
  }
2143
2155
  break;
@@ -2252,13 +2264,14 @@ impl ParserState {
2252
2264
  .saturating_sub(1);
2253
2265
  self.row_infos[added_row].start_byte_idx -= new_start;
2254
2266
  }
2255
- debug!(
2256
- "lex: re-start {:?} (via {:?}); allowed: {}",
2257
- no_hidden.lexer_state,
2258
- transition_byte.map(|b| b as char),
2259
- self.allowed_lexemes_dbg(added_row_start_state)
2260
- );
2261
2267
  }
2268
+ debug_def!(
2269
+ self,
2270
+ "lex: re-start {:?} (via {:?}); allowed: {}",
2271
+ no_hidden.lexer_state,
2272
+ transition_byte.map(|b| b as char),
2273
+ self.allowed_lexemes_dbg(added_row_start_state)
2274
+ );
2262
2275
 
2263
2276
  no_hidden
2264
2277
  }
@@ -2277,7 +2290,7 @@ impl ParserState {
2277
2290
 
2278
2291
  let hidden_bytes = lexeme.hidden_bytes();
2279
2292
 
2280
- let trace_here = self.scratch.definitive;
2293
+ let trace_here = self.scratch.log_enabled();
2281
2294
 
2282
2295
  if trace_here {
2283
2296
  trace!(
@@ -2467,9 +2480,7 @@ impl ParserState {
2467
2480
  .lexer_mut()
2468
2481
  .check_for_single_byte_lexeme(no_hidden.lexer_state, b);
2469
2482
  if let Some(second_lexeme) = single {
2470
- if self.scratch.definitive {
2471
- debug!("single byte lexeme: {:?}", second_lexeme);
2472
- }
2483
+ debug_def!(self, "single byte lexeme: {:?}", second_lexeme);
2473
2484
  no_hidden.byte = None;
2474
2485
  self.lexer_stack.push(no_hidden);
2475
2486
 
@@ -2495,9 +2506,7 @@ impl ParserState {
2495
2506
  }
2496
2507
  true
2497
2508
  } else {
2498
- if self.scratch.definitive {
2499
- debug!(" scan failed");
2500
- }
2509
+ debug_def!(self, " scan failed");
2501
2510
  false
2502
2511
  }
2503
2512
  }
@@ -330,6 +330,7 @@ pub struct LlgConstraint {
330
330
  last_commit_result: CommitResult,
331
331
  }
332
332
 
333
+ #[derive(Clone)]
333
334
  pub struct LlgStopController {
334
335
  stop_controller: StopController,
335
336
  last_result: String,
@@ -923,6 +924,16 @@ pub extern "C" fn llg_stop_commit_token(
923
924
  stop_ctrl.last_result.as_ptr() as *const c_char
924
925
  }
925
926
 
927
+ /// Clone the stop-sequence controller.
928
+ /// The cloned controller shares (under mutex) regex caches if any, so that
929
+ /// cloning is cheap.
930
+ #[no_mangle]
931
+ pub extern "C" fn llg_clone_stop_controller(
932
+ stop_ctrl: &LlgStopController,
933
+ ) -> *mut LlgStopController {
934
+ Box::into_raw(Box::new(stop_ctrl.clone()))
935
+ }
936
+
926
937
  /// Free the stop-sequence controller
927
938
  /// # Safety
928
939
  /// This function should only be called from C code.
@@ -1,3 +1,5 @@
1
+ use std::sync::{Arc, Mutex};
2
+
1
3
  use anyhow::Result;
2
4
  use derivre::{RegexAst, RegexBuilder, StateID};
3
5
  use toktrie::{TokEnv, TokTrie, TokenId};
@@ -10,12 +12,14 @@ use crate::{
10
12
  },
11
13
  };
12
14
 
15
+ #[derive(Clone)]
13
16
  struct StopRegex {
14
- dfa: RegexVec,
17
+ dfa: Arc<Mutex<RegexVec>>,
15
18
  state: StateID,
16
19
  initial_state: StateID,
17
20
  }
18
21
 
22
+ #[derive(Clone)]
19
23
  pub struct StopController {
20
24
  tok_env: TokEnv,
21
25
  is_stopped: bool,
@@ -73,7 +77,7 @@ impl StopController {
73
77
  )?;
74
78
  let initial_state = dfa.initial_state(&all_regex);
75
79
  res.regex = Some(StopRegex {
76
- dfa,
80
+ dfa: Arc::new(Mutex::new(dfa)),
77
81
  state: initial_state,
78
82
  initial_state,
79
83
  });
@@ -104,23 +108,24 @@ impl StopController {
104
108
  buf.extend_from_slice(format!("<[{}]>", tok_id).as_bytes());
105
109
  } else if let Some(rx) = self.regex.as_mut() {
106
110
  let mut state = rx.state;
111
+ let mut dfa = rx.dfa.lock().unwrap();
107
112
  for &b in bytes {
108
113
  buf.push(b);
109
- let state2 = rx.dfa.transition(state, b);
114
+ let state2 = dfa.transition(state, b);
110
115
  // println!("state: {:?} -{:?}-> {:?}", state, b as char, state2);
111
116
  state = state2;
112
117
  assert!(!state.is_dead());
113
118
  if state.has_lowest_match() {
114
119
  self.is_stopped = true;
115
120
  rx.state = state;
116
- let stop_len = rx.dfa.lookahead_len_for_state(state).unwrap_or(0);
121
+ let stop_len = dfa.lookahead_len_for_state(state).unwrap_or(0);
117
122
  buf.truncate(buf.len().saturating_sub(stop_len));
118
123
  return buf;
119
124
  }
120
125
  }
121
126
 
122
127
  rx.state = state;
123
- let chop = rx.dfa.possible_lookahead_len(state);
128
+ let chop = dfa.possible_lookahead_len(state);
124
129
  let to_return = buf.len().saturating_sub(chop);
125
130
  // println!("chop: {:?} {}", String::from_utf8_lossy(&buf), chop);
126
131
  let valid_len = valid_utf8_len(&buf[..to_return]);
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "llguidance"
3
- version = "0.7.26"
3
+ version = "0.7.27"
4
4
  description = "Bindings for the Low-level Guidance (llguidance) Rust library for use within Guidance"
5
5
  requires-python = ">=3.9"
6
6
  license = "MIT"
@@ -0,0 +1,34 @@
1
+ from typing import List, Optional, TYPE_CHECKING
2
+
3
+ from ._lib import LLTokenizer
4
+
5
+ if TYPE_CHECKING:
6
+ import tiktoken
7
+
8
+
9
+ def lltokenizer_from_encoding(
10
+ encoding: 'tiktoken.Encoding',
11
+ *,
12
+ n_vocab: Optional[int] = None,
13
+ eos_token: Optional[int] = None,
14
+ slices: Optional[List[str]] = None,
15
+ ) -> LLTokenizer:
16
+ """
17
+ Create a new tokenizer from a tiktoken Encoding object.
18
+ This is an expensive operation (~1s), so the result should be cached.
19
+
20
+ Args:
21
+ encoding: tiktoken.Encoding - the encoding object to use
22
+ n_vocab: int - override the size of the vocabulary
23
+ eos_token: int - override the EOS token
24
+ slices: List[str] - configuration for slicer optimization; pass [] to disable,
25
+ or None to use the default configuration
26
+ """
27
+
28
+ return LLTokenizer.from_tiktoken( # type: ignore
29
+ encoder=encoding._mergeable_ranks,
30
+ special_tokens=encoding._special_tokens,
31
+ pattern=encoding._pat_str,
32
+ eos_token=encoding.eot_token if eos_token is None else eos_token,
33
+ n_vocab=n_vocab,
34
+ slices=slices)
@@ -47,3 +47,7 @@ def test_llama_cpp(pytestconfig: Any) -> None:
47
47
  assert len(toks0) > 1
48
48
  toks2 = llt.tokenize_str("<|eot_id|>", parse_special=True)
49
49
  assert len(toks2) == 1
50
+
51
+ toks3 = llt.tokenize_str("a<|eot_id|>b", parse_special=True)
52
+ print(llt.dbg_tokens(toks3))
53
+ assert len(toks3) == 3
@@ -0,0 +1,30 @@
1
+ import llguidance.tiktoken
2
+ import tiktoken
3
+
4
+
5
+ def test_tiktoken() -> None:
6
+ enc = tiktoken.get_encoding("o200k_base")
7
+ llt = llguidance.tiktoken.lltokenizer_from_encoding(enc)
8
+ for s in [
9
+ "Hello world!", "Hello world! こんにちは世界!", "wave 👋", "heart 👋💖",
10
+ "1`a`b`c`d`e`f`g`h`i"
11
+ ]:
12
+ toks = llt.tokenize_str(s)
13
+ print(llt.dbg_tokens(toks))
14
+ assert llt.decode_str(toks) == s
15
+ toks = llt.tokenize_bytes(b"\x8b")
16
+ print(llt.dbg_tokens(toks))
17
+ print(toks)
18
+ assert len(toks) == 1
19
+ assert llt.decode_bytes(toks) == b"\x8b"
20
+
21
+ toks1 = llt.tokenize_str("<|endoftext|>")
22
+ toks0 = llt.tokenize_str("<|endoftext|>", parse_special=False)
23
+ assert toks1 == toks0
24
+ assert len(toks0) > 1
25
+ toks2 = llt.tokenize_str("<|endoftext|>", parse_special=True)
26
+ assert len(toks2) == 1
27
+
28
+ toks3 = llt.tokenize_str("a<|endoftext|>b", parse_special=True)
29
+ print(llt.dbg_tokens(toks3))
30
+ assert len(toks3) == 3
@@ -1,6 +1,6 @@
1
1
  [package]
2
2
  name = "llguidance_py"
3
- version = "0.7.26"
3
+ version = "0.7.27"
4
4
  edition = "2021"
5
5
  license = "MIT"
6
6
  description = "Super-fast Structured Outputs"
@@ -9,6 +9,7 @@ repository = "https://github.com/guidance-ai/llguidance"
9
9
  [dependencies]
10
10
  llguidance = { workspace = true }
11
11
  toktrie_hf_tokenizers = { workspace = true }
12
+ toktrie_tiktoken = { workspace = true }
12
13
  bytemuck = "1.21.0"
13
14
  pyo3 = {version = "0.24.1", features = ["extension-module", "abi3-py39", "anyhow"]}
14
15
  serde = { version = "1.0.217", features = ["derive"] }