discourse_ai-tokenizers 0.1.0 → 0.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/lib/discourse_ai/tokenizers/all_mpnet_base_v2_tokenizer.rb +4 -1
- data/lib/discourse_ai/tokenizers/anthropic_tokenizer.rb +3 -1
- data/lib/discourse_ai/tokenizers/bert_tokenizer.rb +4 -1
- data/lib/discourse_ai/tokenizers/bge_large_en_tokenizer.rb +4 -1
- data/lib/discourse_ai/tokenizers/bge_m3_tokenizer.rb +4 -1
- data/lib/discourse_ai/tokenizers/gemini_tokenizer.rb +4 -1
- data/lib/discourse_ai/tokenizers/llama3_tokenizer.rb +5 -1
- data/lib/discourse_ai/tokenizers/mistral_tokenizer.rb +5 -1
- data/lib/discourse_ai/tokenizers/multilingual_e5_large_tokenizer.rb +3 -1
- data/lib/discourse_ai/tokenizers/qwen_tokenizer.rb +4 -1
- data/lib/discourse_ai/tokenizers/version.rb +1 -1
- data/lib/discourse_ai/tokenizers.rb +8 -1
- metadata +1 -1
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: bc2fd76c9fd338fd19f6d56d4e21c98695d2ff8a2baf5626f36f7df7f98af3d9
|
4
|
+
data.tar.gz: d5e9b1ca74715a0346a3d22e2413834191488167e867d5bec795b6f9c9c25f5d
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 4b06f6c801f878f173471a337f0d9d28c3321ba0b7b089876b83296353144257080bc0e1495bdbe020914f4cb6324696f9505a625cd01545b17d6e99624f190f
|
7
|
+
data.tar.gz: f7c37e3d464b16419c7218554a29514c63d84d1d4247969940d2f9d7f3316618ae593aa98df8941a44d2c721471f97073b75b37b2246a18349f43b6a324adb8a
|
@@ -5,7 +5,10 @@ module DiscourseAi
|
|
5
5
|
# Tokenizer for the mpnet based embeddings models
|
6
6
|
class AllMpnetBaseV2Tokenizer < BasicTokenizer
|
7
7
|
def self.tokenizer
|
8
|
-
@tokenizer ||=
|
8
|
+
@tokenizer ||=
|
9
|
+
::Tokenizers.from_file(
|
10
|
+
DiscourseAi::Tokenizers.vendor_path("all-mpnet-base-v2.json")
|
11
|
+
)
|
9
12
|
end
|
10
13
|
end
|
11
14
|
end
|
@@ -6,7 +6,9 @@ module DiscourseAi
|
|
6
6
|
class AnthropicTokenizer < BasicTokenizer
|
7
7
|
def self.tokenizer
|
8
8
|
@tokenizer ||=
|
9
|
-
::Tokenizers.from_file(
|
9
|
+
::Tokenizers.from_file(
|
10
|
+
DiscourseAi::Tokenizers.vendor_path("claude-v1-tokenization.json")
|
11
|
+
)
|
10
12
|
end
|
11
13
|
end
|
12
14
|
end
|
@@ -5,7 +5,10 @@ module DiscourseAi
|
|
5
5
|
# Bert tokenizer, useful for lots of embeddings and small classification models
|
6
6
|
class BertTokenizer < BasicTokenizer
|
7
7
|
def self.tokenizer
|
8
|
-
@tokenizer ||=
|
8
|
+
@tokenizer ||=
|
9
|
+
::Tokenizers.from_file(
|
10
|
+
DiscourseAi::Tokenizers.vendor_path("bert-base-uncased.json")
|
11
|
+
)
|
9
12
|
end
|
10
13
|
end
|
11
14
|
end
|
@@ -5,7 +5,10 @@ module DiscourseAi
|
|
5
5
|
# Tokenizer used in bge-large-en-v1.5, the most common embeddings model used for Discourse
|
6
6
|
class BgeLargeEnTokenizer < BasicTokenizer
|
7
7
|
def self.tokenizer
|
8
|
-
@tokenizer ||=
|
8
|
+
@tokenizer ||=
|
9
|
+
::Tokenizers.from_file(
|
10
|
+
DiscourseAi::Tokenizers.vendor_path("bge-large-en.json")
|
11
|
+
)
|
9
12
|
end
|
10
13
|
end
|
11
14
|
end
|
@@ -5,7 +5,10 @@ module DiscourseAi
|
|
5
5
|
# Tokenizer used in bge-m3, a capable multilingual long context embeddings model.
|
6
6
|
class BgeM3Tokenizer < BasicTokenizer
|
7
7
|
def self.tokenizer
|
8
|
-
@tokenizer ||=
|
8
|
+
@tokenizer ||=
|
9
|
+
::Tokenizers.from_file(
|
10
|
+
DiscourseAi::Tokenizers.vendor_path("bge-m3.json")
|
11
|
+
)
|
9
12
|
end
|
10
13
|
end
|
11
14
|
end
|
@@ -5,7 +5,10 @@ module DiscourseAi
|
|
5
5
|
# Tokenizer from Gemma3, which is said to be the same for Gemini
|
6
6
|
class GeminiTokenizer < BasicTokenizer
|
7
7
|
def self.tokenizer
|
8
|
-
@tokenizer ||=
|
8
|
+
@tokenizer ||=
|
9
|
+
::Tokenizers.from_file(
|
10
|
+
DiscourseAi::Tokenizers.vendor_path("gemma3.json")
|
11
|
+
)
|
9
12
|
end
|
10
13
|
end
|
11
14
|
end
|
@@ -6,7 +6,11 @@ module DiscourseAi
|
|
6
6
|
class Llama3Tokenizer < BasicTokenizer
|
7
7
|
def self.tokenizer
|
8
8
|
@tokenizer ||=
|
9
|
-
::Tokenizers.from_file(
|
9
|
+
::Tokenizers.from_file(
|
10
|
+
DiscourseAi::Tokenizers.vendor_path(
|
11
|
+
"Meta-Llama-3-70B-Instruct.json"
|
12
|
+
)
|
13
|
+
)
|
10
14
|
end
|
11
15
|
end
|
12
16
|
end
|
@@ -6,7 +6,11 @@ module DiscourseAi
|
|
6
6
|
class MistralTokenizer < BasicTokenizer
|
7
7
|
def self.tokenizer
|
8
8
|
@tokenizer ||=
|
9
|
-
::Tokenizers.from_file(
|
9
|
+
::Tokenizers.from_file(
|
10
|
+
DiscourseAi::Tokenizers.vendor_path(
|
11
|
+
"mistral-small-3.1-24b-2503.json"
|
12
|
+
)
|
13
|
+
)
|
10
14
|
end
|
11
15
|
end
|
12
16
|
end
|
@@ -6,7 +6,9 @@ module DiscourseAi
|
|
6
6
|
class MultilingualE5LargeTokenizer < BasicTokenizer
|
7
7
|
def self.tokenizer
|
8
8
|
@tokenizer ||=
|
9
|
-
::Tokenizers.from_file(
|
9
|
+
::Tokenizers.from_file(
|
10
|
+
DiscourseAi::Tokenizers.vendor_path("multilingual-e5-large.json")
|
11
|
+
)
|
10
12
|
end
|
11
13
|
end
|
12
14
|
end
|
@@ -5,7 +5,10 @@ module DiscourseAi
|
|
5
5
|
# Tokenizer from Qwen3 LLM series. Also compatible with their embedding models
|
6
6
|
class QwenTokenizer < BasicTokenizer
|
7
7
|
def self.tokenizer
|
8
|
-
@tokenizer ||=
|
8
|
+
@tokenizer ||=
|
9
|
+
::Tokenizers.from_file(
|
10
|
+
DiscourseAi::Tokenizers.vendor_path("qwen3.json")
|
11
|
+
)
|
9
12
|
end
|
10
13
|
end
|
11
14
|
end
|
@@ -20,6 +20,13 @@ module DiscourseAi
|
|
20
20
|
module Tokenizers
|
21
21
|
class Error < StandardError
|
22
22
|
end
|
23
|
-
|
23
|
+
|
24
|
+
def self.gem_root
|
25
|
+
@gem_root ||= File.expand_path("../../..", __FILE__)
|
26
|
+
end
|
27
|
+
|
28
|
+
def self.vendor_path(filename)
|
29
|
+
File.join(gem_root, "vendor", filename)
|
30
|
+
end
|
24
31
|
end
|
25
32
|
end
|