tokenizers 0.5.5 → 0.6.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +5 -0
- data/Cargo.lock +124 -53
- data/ext/tokenizers/Cargo.toml +4 -3
- data/ext/tokenizers/src/encoding.rs +10 -8
- data/ext/tokenizers/src/models.rs +37 -24
- data/ext/tokenizers/src/normalizers.rs +1 -2
- data/ext/tokenizers/src/pre_tokenizers.rs +5 -5
- data/ext/tokenizers/src/tokenizer.rs +61 -49
- data/ext/tokenizers/src/trainers.rs +60 -50
- data/ext/tokenizers/src/utils/normalization.rs +3 -2
- data/ext/tokenizers/src/utils/regex.rs +5 -4
- data/lib/tokenizers/from_pretrained.rb +2 -2
- data/lib/tokenizers/trainers/unigram_trainer.rb +10 -9
- data/lib/tokenizers/trainers/word_piece_trainer.rb +10 -9
- data/lib/tokenizers/version.rb +1 -1
- metadata +3 -3
@@ -1,13 +1,13 @@
|
|
1
1
|
module Tokenizers
|
2
2
|
module FromPretrained
|
3
3
|
# for user agent
|
4
|
-
TOKENIZERS_VERSION = "0.
|
4
|
+
TOKENIZERS_VERSION = "0.22.0"
|
5
5
|
|
6
6
|
# use Ruby for downloads
|
7
7
|
# this avoids the need to vendor OpenSSL on Linux
|
8
8
|
# and reduces the extension size by about half
|
9
9
|
def from_pretrained(identifier, revision: "main", auth_token: nil)
|
10
|
-
require "cgi"
|
10
|
+
require "cgi/escape"
|
11
11
|
require "digest"
|
12
12
|
require "fileutils"
|
13
13
|
require "json"
|
@@ -1,15 +1,16 @@
|
|
1
1
|
module Tokenizers
|
2
2
|
module Trainers
|
3
3
|
class UnigramTrainer
|
4
|
-
def self.new(
|
5
|
-
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
4
|
+
def self.new(
|
5
|
+
vocab_size: 8000,
|
6
|
+
show_progress: true,
|
7
|
+
special_tokens: [],
|
8
|
+
initial_alphabet: [],
|
9
|
+
shrinking_factor: 0.75,
|
10
|
+
unk_token: nil,
|
11
|
+
max_piece_length: 16,
|
12
|
+
n_sub_iterations: 2
|
13
|
+
)
|
13
14
|
_new({
|
14
15
|
vocab_size: vocab_size,
|
15
16
|
show_progress: show_progress,
|
@@ -1,15 +1,16 @@
|
|
1
1
|
module Tokenizers
|
2
2
|
module Trainers
|
3
3
|
class WordPieceTrainer
|
4
|
-
def self.new(
|
5
|
-
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
4
|
+
def self.new(
|
5
|
+
vocab_size: 30000,
|
6
|
+
min_frequency: 0,
|
7
|
+
show_progress: true,
|
8
|
+
special_tokens: [],
|
9
|
+
limit_alphabet: nil,
|
10
|
+
initial_alphabet: [],
|
11
|
+
continuing_subword_prefix: "##",
|
12
|
+
end_of_word_suffix: nil
|
13
|
+
)
|
13
14
|
_new({
|
14
15
|
vocab_size: vocab_size,
|
15
16
|
min_frequency: min_frequency,
|
data/lib/tokenizers/version.rb
CHANGED
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: tokenizers
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.6.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Andrew Kane
|
@@ -91,14 +91,14 @@ required_ruby_version: !ruby/object:Gem::Requirement
|
|
91
91
|
requirements:
|
92
92
|
- - ">="
|
93
93
|
- !ruby/object:Gem::Version
|
94
|
-
version: '3.
|
94
|
+
version: '3.2'
|
95
95
|
required_rubygems_version: !ruby/object:Gem::Requirement
|
96
96
|
requirements:
|
97
97
|
- - ">="
|
98
98
|
- !ruby/object:Gem::Version
|
99
99
|
version: '0'
|
100
100
|
requirements: []
|
101
|
-
rubygems_version: 3.6.
|
101
|
+
rubygems_version: 3.6.9
|
102
102
|
specification_version: 4
|
103
103
|
summary: Fast state-of-the-art tokenizers for Ruby
|
104
104
|
test_files: []
|