cld3 3.2.4 → 3.2.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (82) hide show
  1. checksums.yaml +4 -4
  2. data/cld3.gemspec +4 -4
  3. data/ext/cld3/ext/CMakeLists.txt +69 -0
  4. data/ext/cld3/ext/CONTRIBUTING.md +26 -0
  5. data/{LICENSE_CLD3 → ext/cld3/ext/LICENSE} +0 -0
  6. data/ext/cld3/ext/README.md +73 -0
  7. data/ext/cld3/ext/misc/myprotobuf.cmake +58 -0
  8. data/ext/cld3/ext/model.png +0 -0
  9. data/ext/cld3/ext/src/BUILD.gn +133 -0
  10. data/ext/cld3/ext/src/DEPS +4 -0
  11. data/ext/cld3/{base.cc → ext/src/base.cc} +0 -0
  12. data/ext/cld3/{base.h → ext/src/base.h} +0 -0
  13. data/ext/cld3/{casts.h → ext/src/casts.h} +0 -0
  14. data/ext/cld3/{embedding_feature_extractor.cc → ext/src/embedding_feature_extractor.cc} +0 -0
  15. data/ext/cld3/{embedding_feature_extractor.h → ext/src/embedding_feature_extractor.h} +0 -0
  16. data/ext/cld3/{embedding_network.cc → ext/src/embedding_network.cc} +0 -0
  17. data/ext/cld3/{embedding_network.h → ext/src/embedding_network.h} +0 -0
  18. data/ext/cld3/{embedding_network_params.h → ext/src/embedding_network_params.h} +0 -0
  19. data/ext/cld3/{feature_extractor.cc → ext/src/feature_extractor.cc} +0 -0
  20. data/ext/cld3/{feature_extractor.h → ext/src/feature_extractor.h} +0 -0
  21. data/ext/cld3/{feature_extractor.proto → ext/src/feature_extractor.proto} +0 -0
  22. data/ext/cld3/{feature_types.cc → ext/src/feature_types.cc} +0 -0
  23. data/ext/cld3/{feature_types.h → ext/src/feature_types.h} +0 -0
  24. data/ext/cld3/{float16.h → ext/src/float16.h} +0 -0
  25. data/ext/cld3/{fml_parser.cc → ext/src/fml_parser.cc} +0 -0
  26. data/ext/cld3/{fml_parser.h → ext/src/fml_parser.h} +0 -0
  27. data/ext/cld3/{lang_id_nn_params.cc → ext/src/lang_id_nn_params.cc} +0 -0
  28. data/ext/cld3/{lang_id_nn_params.h → ext/src/lang_id_nn_params.h} +0 -0
  29. data/ext/cld3/{language_identifier_features.cc → ext/src/language_identifier_features.cc} +0 -0
  30. data/ext/cld3/{language_identifier_features.h → ext/src/language_identifier_features.h} +0 -0
  31. data/ext/cld3/ext/src/language_identifier_features_test.cc +261 -0
  32. data/ext/cld3/ext/src/language_identifier_main.cc +54 -0
  33. data/ext/cld3/ext/src/nnet_lang_id_test.cc +254 -0
  34. data/ext/cld3/ext/src/nnet_lang_id_test_data.cc +529 -0
  35. data/ext/cld3/ext/src/nnet_lang_id_test_data.h +117 -0
  36. data/ext/cld3/{nnet_language_identifier.cc → ext/src/nnet_language_identifier.cc} +8 -0
  37. data/ext/cld3/{nnet_language_identifier.h → ext/src/nnet_language_identifier.h} +16 -0
  38. data/ext/cld3/{registry.cc → ext/src/registry.cc} +0 -0
  39. data/ext/cld3/{registry.h → ext/src/registry.h} +0 -0
  40. data/ext/cld3/{relevant_script_feature.cc → ext/src/relevant_script_feature.cc} +0 -0
  41. data/ext/cld3/{relevant_script_feature.h → ext/src/relevant_script_feature.h} +0 -0
  42. data/ext/cld3/ext/src/relevant_script_feature_test.cc +259 -0
  43. data/ext/cld3/{script_detector.h → ext/src/script_detector.h} +0 -0
  44. data/ext/cld3/ext/src/script_detector_test.cc +161 -0
  45. data/ext/cld3/ext/src/script_span/README.md +11 -0
  46. data/ext/cld3/{fixunicodevalue.cc → ext/src/script_span/fixunicodevalue.cc} +0 -0
  47. data/ext/cld3/{fixunicodevalue.h → ext/src/script_span/fixunicodevalue.h} +0 -0
  48. data/ext/cld3/{generated_entities.cc → ext/src/script_span/generated_entities.cc} +0 -0
  49. data/ext/cld3/{generated_ulscript.cc → ext/src/script_span/generated_ulscript.cc} +0 -0
  50. data/ext/cld3/{generated_ulscript.h → ext/src/script_span/generated_ulscript.h} +0 -0
  51. data/ext/cld3/{getonescriptspan.cc → ext/src/script_span/getonescriptspan.cc} +0 -0
  52. data/ext/cld3/{getonescriptspan.h → ext/src/script_span/getonescriptspan.h} +1 -1
  53. data/ext/cld3/ext/src/script_span/getonescriptspan_test.cc +135 -0
  54. data/ext/cld3/{integral_types.h → ext/src/script_span/integral_types.h} +0 -0
  55. data/ext/cld3/{offsetmap.cc → ext/src/script_span/offsetmap.cc} +0 -0
  56. data/ext/cld3/{offsetmap.h → ext/src/script_span/offsetmap.h} +0 -0
  57. data/ext/cld3/{port.h → ext/src/script_span/port.h} +0 -0
  58. data/ext/cld3/{stringpiece.h → ext/src/script_span/stringpiece.h} +0 -0
  59. data/ext/cld3/{text_processing.cc → ext/src/script_span/text_processing.cc} +0 -0
  60. data/ext/cld3/{text_processing.h → ext/src/script_span/text_processing.h} +0 -0
  61. data/ext/cld3/{utf8acceptinterchange.h → ext/src/script_span/utf8acceptinterchange.h} +0 -0
  62. data/ext/cld3/{utf8prop_lettermarkscriptnum.h → ext/src/script_span/utf8prop_lettermarkscriptnum.h} +0 -0
  63. data/ext/cld3/{utf8repl_lettermarklower.h → ext/src/script_span/utf8repl_lettermarklower.h} +0 -0
  64. data/ext/cld3/{utf8scannot_lettermarkspecial.h → ext/src/script_span/utf8scannot_lettermarkspecial.h} +0 -0
  65. data/ext/cld3/{utf8statetable.cc → ext/src/script_span/utf8statetable.cc} +0 -0
  66. data/ext/cld3/{utf8statetable.h → ext/src/script_span/utf8statetable.h} +0 -0
  67. data/ext/cld3/{sentence.proto → ext/src/sentence.proto} +0 -0
  68. data/ext/cld3/{sentence_features.cc → ext/src/sentence_features.cc} +0 -0
  69. data/ext/cld3/{sentence_features.h → ext/src/sentence_features.h} +0 -0
  70. data/ext/cld3/{simple_adder.h → ext/src/simple_adder.h} +0 -0
  71. data/ext/cld3/{task_context.cc → ext/src/task_context.cc} +0 -0
  72. data/ext/cld3/{task_context.h → ext/src/task_context.h} +0 -0
  73. data/ext/cld3/{task_context_params.cc → ext/src/task_context_params.cc} +0 -0
  74. data/ext/cld3/{task_context_params.h → ext/src/task_context_params.h} +0 -0
  75. data/ext/cld3/{task_spec.proto → ext/src/task_spec.proto} +0 -0
  76. data/ext/cld3/{unicodetext.cc → ext/src/unicodetext.cc} +0 -0
  77. data/ext/cld3/{unicodetext.h → ext/src/unicodetext.h} +0 -0
  78. data/ext/cld3/{utils.cc → ext/src/utils.cc} +0 -0
  79. data/ext/cld3/{utils.h → ext/src/utils.h} +0 -0
  80. data/ext/cld3/{workspace.cc → ext/src/workspace.cc} +0 -0
  81. data/ext/cld3/{workspace.h → ext/src/workspace.h} +0 -0
  82. metadata +87 -71
@@ -0,0 +1,261 @@
1
+ /* Copyright 2016 Google Inc. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #include <cmath>
17
+ #include <iostream>
18
+ #include <vector>
19
+ #include <set>
20
+
21
+ #include "base.h"
22
+ #include "feature_extractor.h"
23
+ #include "language_identifier_features.h"
24
+ #include "nnet_language_identifier.h"
25
+ #include "script_span/generated_ulscript.h"
26
+ #include "cld_3/protos/sentence.pb.h"
27
+ #include "task_context.h"
28
+ #include "utils.h"
29
+ #include "workspace.h"
30
+
31
+ namespace chrome_lang_id {
32
+ namespace language_identifier_features_test {
33
+
34
+ static WholeSentenceFeature *cbog_factory() {
35
+ return new ContinuousBagOfNgramsFunction;
36
+ }
37
+
38
+ static WholeSentenceFeature *sf_factory() { return new ScriptFeature; }
39
+
40
+ // Class for calculating the feature weights and ids.
41
+ class FeatureIdWeightCalculator {
42
+ public:
43
+ explicit FeatureIdWeightCalculator(TaskContext *context) {
44
+ if (WholeSentenceFeature::registry() == nullptr) {
45
+ // Create registry for our WholeSentenceFeature(s).
46
+ RegisterableClass<WholeSentenceFeature>::CreateRegistry(
47
+ "sentence feature function", "WholeSentenceFeature", __FILE__,
48
+ __LINE__);
49
+ }
50
+
51
+ // Register our WholeSentenceFeature(s).
52
+ // Register ContinuousBagOfNgramsFunction feature function.
53
+ static WholeSentenceFeature::Registry::Registrar cbog_registrar(
54
+ WholeSentenceFeature::registry(), "continuous-bag-of-ngrams",
55
+ "ContinuousBagOfNgramsFunction", __FILE__, __LINE__, cbog_factory);
56
+
57
+ // Register Script feature function.
58
+ static WholeSentenceFeature::Registry::Registrar sf_registrar(
59
+ WholeSentenceFeature::registry(), "script", "ScriptFeature", __FILE__,
60
+ __LINE__, sf_factory);
61
+
62
+ feature_extractor_.Setup(context);
63
+ feature_extractor_.Init(context);
64
+ }
65
+
66
+ // Assumes that a single feature is specified and extracts it.
67
+ void ExtractOnlyFeature(Sentence *sentence,
68
+ std::vector<FeatureVector> *features) {
69
+ CLD3_CHECK(features->size() == 1);
70
+ WorkspaceSet workspace;
71
+ workspace.Reset(workspace_registry_);
72
+ feature_extractor_.Preprocess(&workspace, sentence);
73
+ feature_extractor_.ExtractFeatures(workspace, *sentence, features);
74
+ CLD3_CHECK(features->size() == 1);
75
+ }
76
+
77
+ // Returns a map from feature value id to feature value weight.
78
+ std::unordered_map<int, float> GetFloatFeatureValIdsAndWeights(
79
+ Sentence *sentence) {
80
+ std::vector<FeatureVector> feature_vectors(1); // one feature space
81
+ ExtractOnlyFeature(sentence, &feature_vectors);
82
+ const FeatureVector &feature_vector = feature_vectors.at(0);
83
+
84
+ // Save the (feature value id, feature value weight) pairs to a map.
85
+ std::unordered_map<int, float> feature_id_weight;
86
+ for (int index = 0; index < feature_vector.size(); ++index) {
87
+ const FloatFeatureValue feature_value =
88
+ FloatFeatureValue(feature_vector.value(index));
89
+ feature_id_weight[feature_value.value.id] = feature_value.value.weight;
90
+ }
91
+ return feature_id_weight;
92
+ }
93
+
94
+ // Returns the feature value ids.
95
+ std::set<int> GetFeatureValueIds(Sentence *sentence) {
96
+ std::vector<FeatureVector> feature_vectors(1); // one feature space
97
+ ExtractOnlyFeature(sentence, &feature_vectors);
98
+ const FeatureVector &feature_vector = feature_vectors.at(0);
99
+
100
+ std::set<int> ids;
101
+ for (int index = 0; index < feature_vector.size(); ++index) {
102
+ ids.insert(feature_vector.value(index));
103
+ }
104
+ return ids;
105
+ }
106
+
107
+ private:
108
+ // The registry of shared workspaces in the feature extractor.
109
+ WorkspaceRegistry workspace_registry_;
110
+ LanguageIdEmbeddingFeatureExtractor feature_extractor_;
111
+ };
112
+
113
+ // Extracts features and checks that their ids and weights are correct.
114
+ bool ExtractAndCheckFeatures(const string &features, const int id_dim,
115
+ const std::vector<string> &expected_char_ngrams,
116
+ const std::vector<float> &expected_weights,
117
+ Sentence *sentence) {
118
+ TaskContext context;
119
+ context.SetParameter("language_identifier_features", features);
120
+ FeatureIdWeightCalculator calc(&context);
121
+
122
+ // Get the feature ids and the corresponding weights.
123
+ const std::unordered_map<int, float> feature_id_weight =
124
+ calc.GetFloatFeatureValIdsAndWeights(sentence);
125
+ if (feature_id_weight.size() != expected_char_ngrams.size()) {
126
+ std::cout << " Failure" << std::endl;
127
+ std::cout << " Number of expected feature ids: "
128
+ << expected_char_ngrams.size() << std::endl;
129
+ std::cout << " Number of extracted feature ids: "
130
+ << feature_id_weight.size() << std::endl;
131
+ return false;
132
+ }
133
+
134
+ // Specifies how close two float values should be to be considered equal.
135
+ const float epsilon = 0.0001f;
136
+ bool test_successful = true;
137
+ for (size_t i = 0; i < expected_char_ngrams.size(); ++i) {
138
+ const int expected_id =
139
+ utils::Hash32WithDefaultSeed(expected_char_ngrams.at(i)) % id_dim;
140
+
141
+ // Check the ids and the weights.
142
+ if (feature_id_weight.count(expected_id) == 0) {
143
+ std::cout << " Failure" << std::endl;
144
+ std::cout << " Feature id " << expected_id << " is missing" << std::endl;
145
+ test_successful = false;
146
+ } else {
147
+ if (std::abs(feature_id_weight.at(expected_id) - expected_weights.at(i)) >
148
+ epsilon) {
149
+ std::cout << " Failure" << std::endl;
150
+ std::cout << " Different weight for feature id " << expected_id
151
+ << ": expected weight " << expected_weights.at(i)
152
+ << ", actual weight " << feature_id_weight.at(expected_id)
153
+ << std::endl;
154
+ test_successful = false;
155
+ }
156
+ }
157
+ }
158
+
159
+ if (test_successful) {
160
+ std::cout << " Success!" << std::endl;
161
+ }
162
+ return test_successful;
163
+ }
164
+
165
+ // Tests the case when ngram features get equal weight. Returns "true" if the
166
+ // test is successful and "false" otherwise.
167
+ bool TestExtractFeaturesWithEqualWeight() {
168
+ std::cout << "Running " << __FUNCTION__ << std::endl;
169
+
170
+ // The integer id of each char ngram is computed as follows:
171
+ // utils::Hash32WithDefaultSeed(char ngram) % id_dim.
172
+ const int id_dim = 100;
173
+ const string features = "continuous-bag-of-ngrams(id_dim=" +
174
+ std::to_string(id_dim) +
175
+ ",size=2,include_terminators=true,include_" +
176
+ "spaces=false,use_equal_weight=true)";
177
+ Sentence sentence;
178
+ sentence.set_text("aa aab");
179
+ const std::vector<string> expected_char_ngrams{"ab", "b$", "^a", "aa", "a$"};
180
+ const std::vector<float> expected_weights = {0.2f, 0.2f, 0.2f, 0.2f, 0.2f};
181
+ return ExtractAndCheckFeatures(features, id_dim, expected_char_ngrams,
182
+ expected_weights, &sentence);
183
+ }
184
+
185
+ // Tests the case when ngram features get weights equal to their normalized
186
+ // counts. Returns "true" if the test is successful and "false" otherwise.
187
+ bool TestExtractFeaturesWithNonEqualWeight() {
188
+ std::cout << "Running " << __FUNCTION__ << std::endl;
189
+
190
+ // The integer id of each char ngram is computed as follows:
191
+ // utils::Hash32WithDefaultSeed(char ngram) % id_dim.
192
+ const int id_dim = 100;
193
+ const string features = "continuous-bag-of-ngrams(id_dim=" +
194
+ std::to_string(id_dim) +
195
+ ",size=2,include_terminators=true,include_" +
196
+ "spaces=false,use_equal_weight=false)";
197
+ Sentence sentence;
198
+ sentence.set_text("aa aab");
199
+ const std::vector<string> expected_char_ngrams{"ab", "b$", "^a", "aa", "a$"};
200
+ const std::vector<float> expected_weights{0.1428f, 0.1428f, 0.2857f, 0.2857f,
201
+ 0.1428f};
202
+ return ExtractAndCheckFeatures(features, id_dim, expected_char_ngrams,
203
+ expected_weights, &sentence);
204
+ }
205
+
206
+ // Tests the feature Script.
207
+ bool TestScriptFeature() {
208
+ std::cout << "Running " << __FUNCTION__ << std::endl;
209
+
210
+ bool test_successful = true;
211
+ TaskContext context;
212
+ context.SetParameter("language_identifier_features", "script");
213
+ FeatureIdWeightCalculator calc(&context);
214
+
215
+ // Check the script of the English sentence.
216
+ Sentence sentence;
217
+ sentence.set_text("food");
218
+ std::set<int> feature_val_ids = calc.GetFeatureValueIds(&sentence);
219
+ if (feature_val_ids.size() != 1 ||
220
+ feature_val_ids.count(chrome_lang_id::CLD2::ULScript_Latin) == 0) {
221
+ test_successful = false;
222
+ std::cout << " Failure for input: " << sentence.text() << std::endl;
223
+ }
224
+
225
+ // Check the script of a Chinese sentence.
226
+ sentence.set_text("字");
227
+ feature_val_ids = calc.GetFeatureValueIds(&sentence);
228
+ if (feature_val_ids.size() != 1 ||
229
+ feature_val_ids.count(chrome_lang_id::CLD2::ULScript_Hani) == 0) {
230
+ test_successful = false;
231
+ std::cout << " Failure for input: " << sentence.text() << std::endl;
232
+ }
233
+
234
+ // Check the script of a Korean sentence.
235
+ sentence.set_text("워드");
236
+ feature_val_ids = calc.GetFeatureValueIds(&sentence);
237
+ if (feature_val_ids.size() != 1 ||
238
+ feature_val_ids.count(chrome_lang_id::CLD2::NUM_ULSCRIPTS) == 0) {
239
+ test_successful = false;
240
+ std::cout << " Failure for input: " << sentence.text() << std::endl;
241
+ }
242
+
243
+ if (test_successful) {
244
+ std::cout << " Success!" << std::endl;
245
+ }
246
+ return test_successful;
247
+ }
248
+
249
+ } // namespace language_identifier_features_test
250
+ } // namespace chrome_lang_id
251
+
252
+ // Runs the feature extraction tests.
253
+ int main(int argc, char **argv) {
254
+ const bool tests_successful =
255
+ chrome_lang_id::language_identifier_features_test::
256
+ TestExtractFeaturesWithEqualWeight() &&
257
+ chrome_lang_id::language_identifier_features_test::
258
+ TestExtractFeaturesWithNonEqualWeight() &&
259
+ chrome_lang_id::language_identifier_features_test::TestScriptFeature();
260
+ return tests_successful ? 0 : 1;
261
+ }
@@ -0,0 +1,54 @@
1
+ /* Copyright 2016 Google Inc. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #include <iostream>
17
+ #include <string>
18
+
19
+ #include "base.h"
20
+ #include "nnet_language_identifier.h"
21
+
22
+ using chrome_lang_id::NNetLanguageIdentifier;
23
+
24
+ // Runs a neural net model for language identification.
25
+ int main(int argc, char **argv) {
26
+ NNetLanguageIdentifier lang_id(/*min_num_bytes=*/0,
27
+ /*max_num_bytes=*/1000);
28
+
29
+ const std::vector<std::string> texts{"This text is written in English.",
30
+ "Text in deutscher Sprache verfasst."};
31
+ for (const std::string &text : texts) {
32
+ const NNetLanguageIdentifier::Result result = lang_id.FindLanguage(text);
33
+ std::cout << "text: " << text << std::endl
34
+ << " language: " << result.language << std::endl
35
+ << " probability: " << result.probability << std::endl
36
+ << " reliable: " << result.is_reliable << std::endl
37
+ << " proportion: " << result.proportion << std::endl
38
+ << std::endl;
39
+ }
40
+
41
+ const std::string &text =
42
+ "This piece of text is in English. Този текст е на Български.";
43
+ std::cout << "text: " << text << std::endl;
44
+ const std::vector<NNetLanguageIdentifier::Result> results =
45
+ lang_id.FindTopNMostFreqLangs(text, /*num_langs*/ 3);
46
+ for (const NNetLanguageIdentifier::Result &result : results) {
47
+ std::cout << " language: " << result.language << std::endl
48
+ << " probability: " << result.probability << std::endl
49
+ << " reliable: " << result.is_reliable << std::endl
50
+ << " proportion: " << result.proportion << std::endl
51
+ << std::endl;
52
+ }
53
+ return 0;
54
+ }
@@ -0,0 +1,254 @@
1
+ /* Copyright 2016 Google Inc. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #include <cmath>
17
+ #include <iostream>
18
+ #include <string>
19
+ #include <utility>
20
+ #include <vector>
21
+
22
+ #include "base.h"
23
+ #include "nnet_lang_id_test_data.h"
24
+ #include "nnet_language_identifier.h"
25
+
26
+ namespace chrome_lang_id {
27
+ namespace nnet_lang_id_test {
28
+
29
+ // Tests the model on all supported languages. Returns "true" if the test is
30
+ // successful and "false" otherwise.
31
+ // TODO(abakalov): Add a test for random input that should be labeled as
32
+ // "unknown" due to low confidence.
33
+ bool TestPredictions() {
34
+ std::cout << "Running " << __FUNCTION__ << std::endl;
35
+
36
+ // (gold language, sample text) pairs used for testing.
37
+ const std::vector<std::pair<std::string, std::string>> gold_lang_text = {
38
+ {"af", NNetLangIdTestData::kTestStrAF},
39
+ {"ar", NNetLangIdTestData::kTestStrAR},
40
+ {"az", NNetLangIdTestData::kTestStrAZ},
41
+ {"be", NNetLangIdTestData::kTestStrBE},
42
+ {"bg", NNetLangIdTestData::kTestStrBG},
43
+ {"bn", NNetLangIdTestData::kTestStrBN},
44
+ {"bs", NNetLangIdTestData::kTestStrBS},
45
+ {"ca", NNetLangIdTestData::kTestStrCA},
46
+ {"ceb", NNetLangIdTestData::kTestStrCEB},
47
+ {"cs", NNetLangIdTestData::kTestStrCS},
48
+ {"cy", NNetLangIdTestData::kTestStrCY},
49
+ {"da", NNetLangIdTestData::kTestStrDA},
50
+ {"de", NNetLangIdTestData::kTestStrDE},
51
+ {"el", NNetLangIdTestData::kTestStrEL},
52
+ {"en", NNetLangIdTestData::kTestStrEN},
53
+ {"eo", NNetLangIdTestData::kTestStrEO},
54
+ {"es", NNetLangIdTestData::kTestStrES},
55
+ {"et", NNetLangIdTestData::kTestStrET},
56
+ {"eu", NNetLangIdTestData::kTestStrEU},
57
+ {"fa", NNetLangIdTestData::kTestStrFA},
58
+ {"fi", NNetLangIdTestData::kTestStrFI},
59
+ {"fil", NNetLangIdTestData::kTestStrFIL},
60
+ {"fr", NNetLangIdTestData::kTestStrFR},
61
+ {"ga", NNetLangIdTestData::kTestStrGA},
62
+ {"gl", NNetLangIdTestData::kTestStrGL},
63
+ {"gu", NNetLangIdTestData::kTestStrGU},
64
+ {"ha", NNetLangIdTestData::kTestStrHA},
65
+ {"hi", NNetLangIdTestData::kTestStrHI},
66
+ {"hmn", NNetLangIdTestData::kTestStrHMN},
67
+ {"hr", NNetLangIdTestData::kTestStrHR},
68
+ {"ht", NNetLangIdTestData::kTestStrHT},
69
+ {"hu", NNetLangIdTestData::kTestStrHU},
70
+ {"hy", NNetLangIdTestData::kTestStrHY},
71
+ {"id", NNetLangIdTestData::kTestStrID},
72
+ {"ig", NNetLangIdTestData::kTestStrIG},
73
+ {"is", NNetLangIdTestData::kTestStrIS},
74
+ {"it", NNetLangIdTestData::kTestStrIT},
75
+ {"iw", NNetLangIdTestData::kTestStrIW},
76
+ {"ja", NNetLangIdTestData::kTestStrJA},
77
+ {"jv", NNetLangIdTestData::kTestStrJV},
78
+ {"ka", NNetLangIdTestData::kTestStrKA},
79
+ {"kk", NNetLangIdTestData::kTestStrKK},
80
+ {"km", NNetLangIdTestData::kTestStrKM},
81
+ {"kn", NNetLangIdTestData::kTestStrKN},
82
+ {"ko", NNetLangIdTestData::kTestStrKO},
83
+ {"la", NNetLangIdTestData::kTestStrLA},
84
+ {"lo", NNetLangIdTestData::kTestStrLO},
85
+ {"lt", NNetLangIdTestData::kTestStrLT},
86
+ {"lv", NNetLangIdTestData::kTestStrLV},
87
+ {"mg", NNetLangIdTestData::kTestStrMG},
88
+ {"mi", NNetLangIdTestData::kTestStrMI},
89
+ {"mk", NNetLangIdTestData::kTestStrMK},
90
+ {"ml", NNetLangIdTestData::kTestStrML},
91
+ {"mn", NNetLangIdTestData::kTestStrMN},
92
+ {"mr", NNetLangIdTestData::kTestStrMR},
93
+ {"ms", NNetLangIdTestData::kTestStrMS},
94
+ {"mt", NNetLangIdTestData::kTestStrMT},
95
+ {"my", NNetLangIdTestData::kTestStrMY},
96
+ {"ne", NNetLangIdTestData::kTestStrNE},
97
+ {"nl", NNetLangIdTestData::kTestStrNL},
98
+ {"no", NNetLangIdTestData::kTestStrNO},
99
+ {"ny", NNetLangIdTestData::kTestStrNY},
100
+ {"pa", NNetLangIdTestData::kTestStrPA},
101
+ {"pl", NNetLangIdTestData::kTestStrPL},
102
+ {"pt", NNetLangIdTestData::kTestStrPT},
103
+ {"ro", NNetLangIdTestData::kTestStrRO},
104
+ {"ru", NNetLangIdTestData::kTestStrRU},
105
+ {"si", NNetLangIdTestData::kTestStrSI},
106
+ {"sk", NNetLangIdTestData::kTestStrSK},
107
+ {"sl", NNetLangIdTestData::kTestStrSL},
108
+ {"so", NNetLangIdTestData::kTestStrSO},
109
+ {"sq", NNetLangIdTestData::kTestStrSQ},
110
+ {"sr", NNetLangIdTestData::kTestStrSR},
111
+ {"st", NNetLangIdTestData::kTestStrST},
112
+ {"su", NNetLangIdTestData::kTestStrSU},
113
+ {"sv", NNetLangIdTestData::kTestStrSV},
114
+ {"sw", NNetLangIdTestData::kTestStrSW},
115
+ {"ta", NNetLangIdTestData::kTestStrTA},
116
+ {"te", NNetLangIdTestData::kTestStrTE},
117
+ {"tg", NNetLangIdTestData::kTestStrTG},
118
+ {"th", NNetLangIdTestData::kTestStrTH},
119
+ {"tr", NNetLangIdTestData::kTestStrTR},
120
+ {"uk", NNetLangIdTestData::kTestStrUK},
121
+ {"ur", NNetLangIdTestData::kTestStrUR},
122
+ {"uz", NNetLangIdTestData::kTestStrUZ},
123
+ {"vi", NNetLangIdTestData::kTestStrVI},
124
+ {"yi", NNetLangIdTestData::kTestStrYI},
125
+ {"yo", NNetLangIdTestData::kTestStrYO},
126
+ {"zh", NNetLangIdTestData::kTestStrZH},
127
+ {"zu", NNetLangIdTestData::kTestStrZU}};
128
+
129
+ NNetLanguageIdentifier lang_id(/*min_num_bytes=*/0,
130
+ /*max_num_bytes=*/1000);
131
+
132
+ // Iterate over all the test instances, make predictions and check that they
133
+ // are correct.
134
+ int num_wrong = 0;
135
+ for (const auto &test_instance : gold_lang_text) {
136
+ const std::string &expected_lang = test_instance.first;
137
+ const std::string &text = test_instance.second;
138
+
139
+ const NNetLanguageIdentifier::Result result = lang_id.FindLanguage(text);
140
+ if (result.language != expected_lang) {
141
+ ++num_wrong;
142
+ std::cout << " Misclassification: " << std::endl;
143
+ std::cout << " Text: " << text << std::endl;
144
+ std::cout << " Expected language: " << expected_lang << std::endl;
145
+ std::cout << " Predicted language: " << result.language << std::endl;
146
+ }
147
+ }
148
+
149
+ if (num_wrong == 0) {
150
+ std::cout << " Success!" << std::endl;
151
+ return true;
152
+ } else {
153
+ std::cout << " Failure: " << num_wrong << " wrong predictions"
154
+ << std::endl;
155
+ return false;
156
+ }
157
+ }
158
+
159
+ // Tests the model on input containing multiple languages of different scripts.
160
+ // Returns "true" if the test is successful and "false" otherwise.
161
+ bool TestMultipleLanguagesInInput() {
162
+ std::cout << "Running " << __FUNCTION__ << std::endl;
163
+
164
+ // Text containing snippets in English and Bulgarian.
165
+ const std::string text =
166
+ "This piece of text is in English. Този текст е на Български.";
167
+
168
+ // Expected language spans in the input text, corresponding respectively to
169
+ // Bulgarian and English.
170
+ const std::string expected_bg_span = " Този текст е на Български ";
171
+ const std::string expected_en_span = " This piece of text is in English ";
172
+ const float expected_byte_sum =
173
+ static_cast<float>(expected_bg_span.size() + expected_en_span.size());
174
+
175
+ // Number of languages to query for and the expected byte proportions.
176
+ const int num_queried_langs = 3;
177
+ const std::unordered_map<string, float> expected_lang_proportions{
178
+ {"bg", expected_bg_span.size() / expected_byte_sum},
179
+ {"en", expected_en_span.size() / expected_byte_sum},
180
+ {NNetLanguageIdentifier::kUnknown, 0.0}};
181
+
182
+ NNetLanguageIdentifier lang_id(/*min_num_bytes=*/0,
183
+ /*max_num_bytes=*/1000);
184
+ const std::vector<NNetLanguageIdentifier::Result> results =
185
+ lang_id.FindTopNMostFreqLangs(text, num_queried_langs);
186
+
187
+ if (results.size() != expected_lang_proportions.size()) {
188
+ std::cout << " Failure" << std::endl;
189
+ std::cout << " Wrong number of languages: expected "
190
+ << expected_lang_proportions.size() << ", obtained "
191
+ << results.size() << std::endl;
192
+ return false;
193
+ }
194
+
195
+ // Iterate over the results and check that the correct proportions are
196
+ // returned for the expected languages.
197
+ const float epsilon = 0.00001f;
198
+ for (const NNetLanguageIdentifier::Result &result : results) {
199
+ if (expected_lang_proportions.count(result.language) == 0) {
200
+ std::cout << " Failure" << std::endl;
201
+ std::cout << " Incorrect language: " << result.language << std::endl;
202
+ return false;
203
+ }
204
+ if (std::abs(result.proportion -
205
+ expected_lang_proportions.at(result.language)) > epsilon) {
206
+ std::cout << " Failure" << std::endl;
207
+ std::cout << " Language " << result.language << ": expected proportion "
208
+ << expected_lang_proportions.at(result.language) << ", got "
209
+ << result.proportion << std::endl;
210
+ return false;
211
+ }
212
+
213
+ // Skip over undefined language.
214
+ if (result.language == "und")
215
+ continue;
216
+ if (result.byte_ranges.size() != 1) {
217
+ std::cout << " Should only detect one span containing " << result.language
218
+ << std::endl;
219
+ return false;
220
+ }
221
+ // Check that specified byte ranges for language are correct.
222
+ int start_index = result.byte_ranges[0].start_index;
223
+ int end_index = result.byte_ranges[0].end_index;
224
+ std::string byte_ranges_text = text.substr(start_index, end_index - start_index);
225
+ if (result.language == "bg") {
226
+ if (byte_ranges_text.compare("Този текст е на Български.") != 0) {
227
+ std::cout << " Incorrect byte ranges returned for Bulgarian " << std::endl;
228
+ return false;
229
+ }
230
+ } else if (result.language == "en") {
231
+ if (byte_ranges_text.compare("This piece of text is in English. ") != 0) {
232
+ std::cout << " Incorrect byte ranges returned for English " << std::endl;
233
+ return false;
234
+ }
235
+ } else {
236
+ std::cout << " Got language other than English or Bulgarian "
237
+ << std::endl;
238
+ return false;
239
+ }
240
+ }
241
+ std::cout << " Success!" << std::endl;
242
+ return true;
243
+ }
244
+
245
+ } // namespace nnet_lang_id_test
246
+ } // namespace chrome_lang_id
247
+
248
+ // Runs tests for the language identification model.
249
+ int main(int argc, char **argv) {
250
+ const bool tests_successful =
251
+ chrome_lang_id::nnet_lang_id_test::TestPredictions() &&
252
+ chrome_lang_id::nnet_lang_id_test::TestMultipleLanguagesInInput();
253
+ return tests_successful ? 0 : 1;
254
+ }