jitera-google-protobuf 3.21.12.pre.beta.1
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/ext/google/protobuf_c/BUILD.bazel +72 -0
- data/ext/google/protobuf_c/convert.c +361 -0
- data/ext/google/protobuf_c/convert.h +75 -0
- data/ext/google/protobuf_c/defs.c +1280 -0
- data/ext/google/protobuf_c/defs.h +107 -0
- data/ext/google/protobuf_c/extconf.rb +28 -0
- data/ext/google/protobuf_c/map.c +687 -0
- data/ext/google/protobuf_c/map.h +66 -0
- data/ext/google/protobuf_c/message.c +1435 -0
- data/ext/google/protobuf_c/message.h +104 -0
- data/ext/google/protobuf_c/naive.c +92 -0
- data/ext/google/protobuf_c/protobuf.c +480 -0
- data/ext/google/protobuf_c/protobuf.h +120 -0
- data/ext/google/protobuf_c/range2-neon.c +157 -0
- data/ext/google/protobuf_c/range2-sse.c +170 -0
- data/ext/google/protobuf_c/repeated_field.c +657 -0
- data/ext/google/protobuf_c/repeated_field.h +63 -0
- data/ext/google/protobuf_c/ruby-upb.c +13707 -0
- data/ext/google/protobuf_c/ruby-upb.h +10582 -0
- data/ext/google/protobuf_c/utf8_range.h +21 -0
- data/ext/google/protobuf_c/wrap_memcpy.c +52 -0
- data/lib/google/protobuf/any_pb.rb +19 -0
- data/lib/google/protobuf/api_pb.rb +42 -0
- data/lib/google/protobuf/descriptor_dsl.rb +465 -0
- data/lib/google/protobuf/descriptor_pb.rb +279 -0
- data/lib/google/protobuf/duration_pb.rb +19 -0
- data/lib/google/protobuf/empty_pb.rb +17 -0
- data/lib/google/protobuf/field_mask_pb.rb +18 -0
- data/lib/google/protobuf/message_exts.rb +58 -0
- data/lib/google/protobuf/repeated_field.rb +201 -0
- data/lib/google/protobuf/source_context_pb.rb +18 -0
- data/lib/google/protobuf/struct_pb.rb +37 -0
- data/lib/google/protobuf/timestamp_pb.rb +19 -0
- data/lib/google/protobuf/type_pb.rb +92 -0
- data/lib/google/protobuf/well_known_types.rb +240 -0
- data/lib/google/protobuf/wrappers_pb.rb +50 -0
- data/lib/google/protobuf.rb +79 -0
- metadata +129 -0
@@ -0,0 +1,120 @@
|
|
1
|
+
// Protocol Buffers - Google's data interchange format
|
2
|
+
// Copyright 2014 Google Inc. All rights reserved.
|
3
|
+
// https://developers.google.com/protocol-buffers/
|
4
|
+
//
|
5
|
+
// Redistribution and use in source and binary forms, with or without
|
6
|
+
// modification, are permitted provided that the following conditions are
|
7
|
+
// met:
|
8
|
+
//
|
9
|
+
// * Redistributions of source code must retain the above copyright
|
10
|
+
// notice, this list of conditions and the following disclaimer.
|
11
|
+
// * Redistributions in binary form must reproduce the above
|
12
|
+
// copyright notice, this list of conditions and the following disclaimer
|
13
|
+
// in the documentation and/or other materials provided with the
|
14
|
+
// distribution.
|
15
|
+
// * Neither the name of Google Inc. nor the names of its
|
16
|
+
// contributors may be used to endorse or promote products derived from
|
17
|
+
// this software without specific prior written permission.
|
18
|
+
//
|
19
|
+
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
20
|
+
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
21
|
+
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
22
|
+
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
23
|
+
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
24
|
+
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
25
|
+
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
26
|
+
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
27
|
+
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
28
|
+
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
29
|
+
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
30
|
+
|
31
|
+
#ifndef __GOOGLE_PROTOBUF_RUBY_PROTOBUF_H__
|
32
|
+
#define __GOOGLE_PROTOBUF_RUBY_PROTOBUF_H__
|
33
|
+
|
34
|
+
#include <ruby/encoding.h>
|
35
|
+
#include <ruby/ruby.h>
|
36
|
+
#include <ruby/vm.h>
|
37
|
+
|
38
|
+
#include "defs.h"
|
39
|
+
#include "ruby-upb.h"
|
40
|
+
|
41
|
+
// These operate on a map field (i.e., a repeated field of submessages whose
|
42
|
+
// submessage type is a map-entry msgdef).
|
43
|
+
const upb_FieldDef* map_field_key(const upb_FieldDef* field);
|
44
|
+
const upb_FieldDef* map_field_value(const upb_FieldDef* field);
|
45
|
+
|
46
|
+
// -----------------------------------------------------------------------------
|
47
|
+
// Arena
|
48
|
+
// -----------------------------------------------------------------------------
|
49
|
+
|
50
|
+
// A Ruby object that wraps an underlying upb_Arena. Any objects that are
|
51
|
+
// allocated from this arena should reference the Arena in rb_gc_mark(), to
|
52
|
+
// ensure that the object's underlying memory outlives any Ruby object that can
|
53
|
+
// reach it.
|
54
|
+
|
55
|
+
VALUE Arena_new();
|
56
|
+
upb_Arena* Arena_get(VALUE arena);
|
57
|
+
|
58
|
+
// Fuses this arena to another, throwing a Ruby exception if this is not
|
59
|
+
// possible.
|
60
|
+
void Arena_fuse(VALUE arena, upb_Arena* other);
|
61
|
+
|
62
|
+
// Pins this Ruby object to the lifetime of this arena, so that as long as the
|
63
|
+
// arena is alive this object will not be collected.
|
64
|
+
//
|
65
|
+
// We use this to guarantee that the "frozen" bit on the object will be
|
66
|
+
// remembered, even if the user drops their reference to this precise object.
|
67
|
+
void Arena_Pin(VALUE arena, VALUE obj);
|
68
|
+
|
69
|
+
// -----------------------------------------------------------------------------
|
70
|
+
// ObjectCache
|
71
|
+
// -----------------------------------------------------------------------------
|
72
|
+
|
73
|
+
// Global object cache from upb array/map/message/symtab to wrapper object.
|
74
|
+
//
|
75
|
+
// This is a conceptually "weak" cache, in that it does not prevent "val" from
|
76
|
+
// being collected (though in Ruby <2.7 is it effectively strong, due to
|
77
|
+
// implementation limitations).
|
78
|
+
|
79
|
+
// Adds an entry to the cache. The "arena" parameter must give the arena that
|
80
|
+
// "key" was allocated from. In Ruby <2.7.0, it will be used to remove the key
|
81
|
+
// from the cache when the arena is destroyed.
|
82
|
+
void ObjectCache_Add(const void* key, VALUE val);
|
83
|
+
|
84
|
+
// Returns the cached object for this key, if any. Otherwise returns Qnil.
|
85
|
+
VALUE ObjectCache_Get(const void* key);
|
86
|
+
|
87
|
+
// -----------------------------------------------------------------------------
|
88
|
+
// StringBuilder, for inspect
|
89
|
+
// -----------------------------------------------------------------------------
|
90
|
+
|
91
|
+
struct StringBuilder;
|
92
|
+
typedef struct StringBuilder StringBuilder;
|
93
|
+
|
94
|
+
StringBuilder* StringBuilder_New();
|
95
|
+
void StringBuilder_Free(StringBuilder* b);
|
96
|
+
void StringBuilder_Printf(StringBuilder* b, const char* fmt, ...);
|
97
|
+
VALUE StringBuilder_ToRubyString(StringBuilder* b);
|
98
|
+
|
99
|
+
void StringBuilder_PrintMsgval(StringBuilder* b, upb_MessageValue val,
|
100
|
+
TypeInfo info);
|
101
|
+
|
102
|
+
// -----------------------------------------------------------------------------
|
103
|
+
// Utilities.
|
104
|
+
// -----------------------------------------------------------------------------
|
105
|
+
|
106
|
+
extern VALUE cTypeError;
|
107
|
+
|
108
|
+
#ifdef NDEBUG
|
109
|
+
#define PBRUBY_ASSERT(expr) \
|
110
|
+
do { \
|
111
|
+
} while (false && (expr))
|
112
|
+
#else
|
113
|
+
#define PBRUBY_ASSERT(expr) assert(expr)
|
114
|
+
#endif
|
115
|
+
|
116
|
+
#define PBRUBY_MAX(x, y) (((x) > (y)) ? (x) : (y))
|
117
|
+
|
118
|
+
#define UPB_UNUSED(var) (void)var
|
119
|
+
|
120
|
+
#endif // __GOOGLE_PROTOBUF_RUBY_PROTOBUF_H__
|
@@ -0,0 +1,157 @@
|
|
1
|
+
/*
|
2
|
+
* Process 2x16 bytes in each iteration.
|
3
|
+
* Comments removed for brevity. See range-neon.c for details.
|
4
|
+
*/
|
5
|
+
#ifdef __aarch64__
|
6
|
+
|
7
|
+
#include <stdio.h>
|
8
|
+
#include <stdint.h>
|
9
|
+
#include <arm_neon.h>
|
10
|
+
|
11
|
+
int utf8_naive(const unsigned char *data, int len);
|
12
|
+
|
13
|
+
static const uint8_t _first_len_tbl[] = {
|
14
|
+
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 3,
|
15
|
+
};
|
16
|
+
|
17
|
+
static const uint8_t _first_range_tbl[] = {
|
18
|
+
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 8, 8, 8,
|
19
|
+
};
|
20
|
+
|
21
|
+
static const uint8_t _range_min_tbl[] = {
|
22
|
+
0x00, 0x80, 0x80, 0x80, 0xA0, 0x80, 0x90, 0x80,
|
23
|
+
0xC2, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
|
24
|
+
};
|
25
|
+
static const uint8_t _range_max_tbl[] = {
|
26
|
+
0x7F, 0xBF, 0xBF, 0xBF, 0xBF, 0x9F, 0xBF, 0x8F,
|
27
|
+
0xF4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
28
|
+
};
|
29
|
+
|
30
|
+
static const uint8_t _range_adjust_tbl[] = {
|
31
|
+
2, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0,
|
32
|
+
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0,
|
33
|
+
};
|
34
|
+
|
35
|
+
/* Return 0 on success, -1 on error */
|
36
|
+
int utf8_range2(const unsigned char *data, int len)
|
37
|
+
{
|
38
|
+
if (len >= 32) {
|
39
|
+
uint8x16_t prev_input = vdupq_n_u8(0);
|
40
|
+
uint8x16_t prev_first_len = vdupq_n_u8(0);
|
41
|
+
|
42
|
+
const uint8x16_t first_len_tbl = vld1q_u8(_first_len_tbl);
|
43
|
+
const uint8x16_t first_range_tbl = vld1q_u8(_first_range_tbl);
|
44
|
+
const uint8x16_t range_min_tbl = vld1q_u8(_range_min_tbl);
|
45
|
+
const uint8x16_t range_max_tbl = vld1q_u8(_range_max_tbl);
|
46
|
+
const uint8x16x2_t range_adjust_tbl = vld2q_u8(_range_adjust_tbl);
|
47
|
+
|
48
|
+
const uint8x16_t const_1 = vdupq_n_u8(1);
|
49
|
+
const uint8x16_t const_2 = vdupq_n_u8(2);
|
50
|
+
const uint8x16_t const_e0 = vdupq_n_u8(0xE0);
|
51
|
+
|
52
|
+
uint8x16_t error1 = vdupq_n_u8(0);
|
53
|
+
uint8x16_t error2 = vdupq_n_u8(0);
|
54
|
+
uint8x16_t error3 = vdupq_n_u8(0);
|
55
|
+
uint8x16_t error4 = vdupq_n_u8(0);
|
56
|
+
|
57
|
+
while (len >= 32) {
|
58
|
+
/******************* two blocks interleaved **********************/
|
59
|
+
|
60
|
+
#if defined(__GNUC__) && !defined(__clang__) && (__GNUC__ < 8)
|
61
|
+
/* gcc doesn't support vldq1_u8_x2 until version 8 */
|
62
|
+
const uint8x16_t input_a = vld1q_u8(data);
|
63
|
+
const uint8x16_t input_b = vld1q_u8(data + 16);
|
64
|
+
#else
|
65
|
+
/* Forces a double load on Clang */
|
66
|
+
const uint8x16x2_t input_pair = vld1q_u8_x2(data);
|
67
|
+
const uint8x16_t input_a = input_pair.val[0];
|
68
|
+
const uint8x16_t input_b = input_pair.val[1];
|
69
|
+
#endif
|
70
|
+
|
71
|
+
const uint8x16_t high_nibbles_a = vshrq_n_u8(input_a, 4);
|
72
|
+
const uint8x16_t high_nibbles_b = vshrq_n_u8(input_b, 4);
|
73
|
+
|
74
|
+
const uint8x16_t first_len_a =
|
75
|
+
vqtbl1q_u8(first_len_tbl, high_nibbles_a);
|
76
|
+
const uint8x16_t first_len_b =
|
77
|
+
vqtbl1q_u8(first_len_tbl, high_nibbles_b);
|
78
|
+
|
79
|
+
uint8x16_t range_a = vqtbl1q_u8(first_range_tbl, high_nibbles_a);
|
80
|
+
uint8x16_t range_b = vqtbl1q_u8(first_range_tbl, high_nibbles_b);
|
81
|
+
|
82
|
+
range_a =
|
83
|
+
vorrq_u8(range_a, vextq_u8(prev_first_len, first_len_a, 15));
|
84
|
+
range_b =
|
85
|
+
vorrq_u8(range_b, vextq_u8(first_len_a, first_len_b, 15));
|
86
|
+
|
87
|
+
uint8x16_t tmp1_a, tmp2_a, tmp1_b, tmp2_b;
|
88
|
+
tmp1_a = vextq_u8(prev_first_len, first_len_a, 14);
|
89
|
+
tmp1_a = vqsubq_u8(tmp1_a, const_1);
|
90
|
+
range_a = vorrq_u8(range_a, tmp1_a);
|
91
|
+
|
92
|
+
tmp1_b = vextq_u8(first_len_a, first_len_b, 14);
|
93
|
+
tmp1_b = vqsubq_u8(tmp1_b, const_1);
|
94
|
+
range_b = vorrq_u8(range_b, tmp1_b);
|
95
|
+
|
96
|
+
tmp2_a = vextq_u8(prev_first_len, first_len_a, 13);
|
97
|
+
tmp2_a = vqsubq_u8(tmp2_a, const_2);
|
98
|
+
range_a = vorrq_u8(range_a, tmp2_a);
|
99
|
+
|
100
|
+
tmp2_b = vextq_u8(first_len_a, first_len_b, 13);
|
101
|
+
tmp2_b = vqsubq_u8(tmp2_b, const_2);
|
102
|
+
range_b = vorrq_u8(range_b, tmp2_b);
|
103
|
+
|
104
|
+
uint8x16_t shift1_a = vextq_u8(prev_input, input_a, 15);
|
105
|
+
uint8x16_t pos_a = vsubq_u8(shift1_a, const_e0);
|
106
|
+
range_a = vaddq_u8(range_a, vqtbl2q_u8(range_adjust_tbl, pos_a));
|
107
|
+
|
108
|
+
uint8x16_t shift1_b = vextq_u8(input_a, input_b, 15);
|
109
|
+
uint8x16_t pos_b = vsubq_u8(shift1_b, const_e0);
|
110
|
+
range_b = vaddq_u8(range_b, vqtbl2q_u8(range_adjust_tbl, pos_b));
|
111
|
+
|
112
|
+
uint8x16_t minv_a = vqtbl1q_u8(range_min_tbl, range_a);
|
113
|
+
uint8x16_t maxv_a = vqtbl1q_u8(range_max_tbl, range_a);
|
114
|
+
|
115
|
+
uint8x16_t minv_b = vqtbl1q_u8(range_min_tbl, range_b);
|
116
|
+
uint8x16_t maxv_b = vqtbl1q_u8(range_max_tbl, range_b);
|
117
|
+
|
118
|
+
error1 = vorrq_u8(error1, vcltq_u8(input_a, minv_a));
|
119
|
+
error2 = vorrq_u8(error2, vcgtq_u8(input_a, maxv_a));
|
120
|
+
|
121
|
+
error3 = vorrq_u8(error3, vcltq_u8(input_b, minv_b));
|
122
|
+
error4 = vorrq_u8(error4, vcgtq_u8(input_b, maxv_b));
|
123
|
+
|
124
|
+
/************************ next iteration *************************/
|
125
|
+
prev_input = input_b;
|
126
|
+
prev_first_len = first_len_b;
|
127
|
+
|
128
|
+
data += 32;
|
129
|
+
len -= 32;
|
130
|
+
}
|
131
|
+
error1 = vorrq_u8(error1, error2);
|
132
|
+
error1 = vorrq_u8(error1, error3);
|
133
|
+
error1 = vorrq_u8(error1, error4);
|
134
|
+
|
135
|
+
if (vmaxvq_u8(error1))
|
136
|
+
return -1;
|
137
|
+
|
138
|
+
uint32_t token4;
|
139
|
+
vst1q_lane_u32(&token4, vreinterpretq_u32_u8(prev_input), 3);
|
140
|
+
|
141
|
+
const int8_t *token = (const int8_t *)&token4;
|
142
|
+
int lookahead = 0;
|
143
|
+
if (token[3] > (int8_t)0xBF)
|
144
|
+
lookahead = 1;
|
145
|
+
else if (token[2] > (int8_t)0xBF)
|
146
|
+
lookahead = 2;
|
147
|
+
else if (token[1] > (int8_t)0xBF)
|
148
|
+
lookahead = 3;
|
149
|
+
|
150
|
+
data -= lookahead;
|
151
|
+
len += lookahead;
|
152
|
+
}
|
153
|
+
|
154
|
+
return utf8_naive(data, len);
|
155
|
+
}
|
156
|
+
|
157
|
+
#endif
|
@@ -0,0 +1,170 @@
|
|
1
|
+
/*
|
2
|
+
* Process 2x16 bytes in each iteration.
|
3
|
+
* Comments removed for brevity. See range-sse.c for details.
|
4
|
+
*/
|
5
|
+
#ifdef __SSE4_1__
|
6
|
+
|
7
|
+
#include <stdio.h>
|
8
|
+
#include <stdint.h>
|
9
|
+
#include <x86intrin.h>
|
10
|
+
|
11
|
+
int utf8_naive(const unsigned char *data, int len);
|
12
|
+
|
13
|
+
static const int8_t _first_len_tbl[] = {
|
14
|
+
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 3,
|
15
|
+
};
|
16
|
+
|
17
|
+
static const int8_t _first_range_tbl[] = {
|
18
|
+
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 8, 8, 8,
|
19
|
+
};
|
20
|
+
|
21
|
+
static const int8_t _range_min_tbl[] = {
|
22
|
+
0x00, 0x80, 0x80, 0x80, 0xA0, 0x80, 0x90, 0x80,
|
23
|
+
0xC2, 0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x7F,
|
24
|
+
};
|
25
|
+
static const int8_t _range_max_tbl[] = {
|
26
|
+
0x7F, 0xBF, 0xBF, 0xBF, 0xBF, 0x9F, 0xBF, 0x8F,
|
27
|
+
0xF4, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
|
28
|
+
};
|
29
|
+
|
30
|
+
static const int8_t _df_ee_tbl[] = {
|
31
|
+
0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0,
|
32
|
+
};
|
33
|
+
static const int8_t _ef_fe_tbl[] = {
|
34
|
+
0, 3, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
35
|
+
};
|
36
|
+
|
37
|
+
/* Return 0 on success, -1 on error */
|
38
|
+
int utf8_range2(const unsigned char *data, int len)
|
39
|
+
{
|
40
|
+
if (len >= 32) {
|
41
|
+
__m128i prev_input = _mm_set1_epi8(0);
|
42
|
+
__m128i prev_first_len = _mm_set1_epi8(0);
|
43
|
+
|
44
|
+
const __m128i first_len_tbl =
|
45
|
+
_mm_loadu_si128((const __m128i *)_first_len_tbl);
|
46
|
+
const __m128i first_range_tbl =
|
47
|
+
_mm_loadu_si128((const __m128i *)_first_range_tbl);
|
48
|
+
const __m128i range_min_tbl =
|
49
|
+
_mm_loadu_si128((const __m128i *)_range_min_tbl);
|
50
|
+
const __m128i range_max_tbl =
|
51
|
+
_mm_loadu_si128((const __m128i *)_range_max_tbl);
|
52
|
+
const __m128i df_ee_tbl =
|
53
|
+
_mm_loadu_si128((const __m128i *)_df_ee_tbl);
|
54
|
+
const __m128i ef_fe_tbl =
|
55
|
+
_mm_loadu_si128((const __m128i *)_ef_fe_tbl);
|
56
|
+
|
57
|
+
__m128i error = _mm_set1_epi8(0);
|
58
|
+
|
59
|
+
while (len >= 32) {
|
60
|
+
/***************************** block 1 ****************************/
|
61
|
+
const __m128i input_a = _mm_loadu_si128((const __m128i *)data);
|
62
|
+
|
63
|
+
__m128i high_nibbles =
|
64
|
+
_mm_and_si128(_mm_srli_epi16(input_a, 4), _mm_set1_epi8(0x0F));
|
65
|
+
|
66
|
+
__m128i first_len_a = _mm_shuffle_epi8(first_len_tbl, high_nibbles);
|
67
|
+
|
68
|
+
__m128i range_a = _mm_shuffle_epi8(first_range_tbl, high_nibbles);
|
69
|
+
|
70
|
+
range_a = _mm_or_si128(
|
71
|
+
range_a, _mm_alignr_epi8(first_len_a, prev_first_len, 15));
|
72
|
+
|
73
|
+
__m128i tmp;
|
74
|
+
tmp = _mm_alignr_epi8(first_len_a, prev_first_len, 14);
|
75
|
+
tmp = _mm_subs_epu8(tmp, _mm_set1_epi8(1));
|
76
|
+
range_a = _mm_or_si128(range_a, tmp);
|
77
|
+
|
78
|
+
tmp = _mm_alignr_epi8(first_len_a, prev_first_len, 13);
|
79
|
+
tmp = _mm_subs_epu8(tmp, _mm_set1_epi8(2));
|
80
|
+
range_a = _mm_or_si128(range_a, tmp);
|
81
|
+
|
82
|
+
__m128i shift1, pos, range2;
|
83
|
+
shift1 = _mm_alignr_epi8(input_a, prev_input, 15);
|
84
|
+
pos = _mm_sub_epi8(shift1, _mm_set1_epi8(0xEF));
|
85
|
+
tmp = _mm_subs_epu8(pos, _mm_set1_epi8(0xF0));
|
86
|
+
range2 = _mm_shuffle_epi8(df_ee_tbl, tmp);
|
87
|
+
tmp = _mm_adds_epu8(pos, _mm_set1_epi8(0x70));
|
88
|
+
range2 = _mm_add_epi8(range2, _mm_shuffle_epi8(ef_fe_tbl, tmp));
|
89
|
+
|
90
|
+
range_a = _mm_add_epi8(range_a, range2);
|
91
|
+
|
92
|
+
__m128i minv = _mm_shuffle_epi8(range_min_tbl, range_a);
|
93
|
+
__m128i maxv = _mm_shuffle_epi8(range_max_tbl, range_a);
|
94
|
+
|
95
|
+
tmp = _mm_or_si128(
|
96
|
+
_mm_cmplt_epi8(input_a, minv),
|
97
|
+
_mm_cmpgt_epi8(input_a, maxv)
|
98
|
+
);
|
99
|
+
error = _mm_or_si128(error, tmp);
|
100
|
+
|
101
|
+
/***************************** block 2 ****************************/
|
102
|
+
const __m128i input_b = _mm_loadu_si128((const __m128i *)(data+16));
|
103
|
+
|
104
|
+
high_nibbles =
|
105
|
+
_mm_and_si128(_mm_srli_epi16(input_b, 4), _mm_set1_epi8(0x0F));
|
106
|
+
|
107
|
+
__m128i first_len_b = _mm_shuffle_epi8(first_len_tbl, high_nibbles);
|
108
|
+
|
109
|
+
__m128i range_b = _mm_shuffle_epi8(first_range_tbl, high_nibbles);
|
110
|
+
|
111
|
+
range_b = _mm_or_si128(
|
112
|
+
range_b, _mm_alignr_epi8(first_len_b, first_len_a, 15));
|
113
|
+
|
114
|
+
|
115
|
+
tmp = _mm_alignr_epi8(first_len_b, first_len_a, 14);
|
116
|
+
tmp = _mm_subs_epu8(tmp, _mm_set1_epi8(1));
|
117
|
+
range_b = _mm_or_si128(range_b, tmp);
|
118
|
+
|
119
|
+
tmp = _mm_alignr_epi8(first_len_b, first_len_a, 13);
|
120
|
+
tmp = _mm_subs_epu8(tmp, _mm_set1_epi8(2));
|
121
|
+
range_b = _mm_or_si128(range_b, tmp);
|
122
|
+
|
123
|
+
shift1 = _mm_alignr_epi8(input_b, input_a, 15);
|
124
|
+
pos = _mm_sub_epi8(shift1, _mm_set1_epi8(0xEF));
|
125
|
+
tmp = _mm_subs_epu8(pos, _mm_set1_epi8(0xF0));
|
126
|
+
range2 = _mm_shuffle_epi8(df_ee_tbl, tmp);
|
127
|
+
tmp = _mm_adds_epu8(pos, _mm_set1_epi8(0x70));
|
128
|
+
range2 = _mm_add_epi8(range2, _mm_shuffle_epi8(ef_fe_tbl, tmp));
|
129
|
+
|
130
|
+
range_b = _mm_add_epi8(range_b, range2);
|
131
|
+
|
132
|
+
minv = _mm_shuffle_epi8(range_min_tbl, range_b);
|
133
|
+
maxv = _mm_shuffle_epi8(range_max_tbl, range_b);
|
134
|
+
|
135
|
+
|
136
|
+
tmp = _mm_or_si128(
|
137
|
+
_mm_cmplt_epi8(input_b, minv),
|
138
|
+
_mm_cmpgt_epi8(input_b, maxv)
|
139
|
+
);
|
140
|
+
error = _mm_or_si128(error, tmp);
|
141
|
+
|
142
|
+
/************************ next iteration **************************/
|
143
|
+
prev_input = input_b;
|
144
|
+
prev_first_len = first_len_b;
|
145
|
+
|
146
|
+
data += 32;
|
147
|
+
len -= 32;
|
148
|
+
}
|
149
|
+
|
150
|
+
if (!_mm_testz_si128(error, error))
|
151
|
+
return -1;
|
152
|
+
|
153
|
+
int32_t token4 = _mm_extract_epi32(prev_input, 3);
|
154
|
+
const int8_t *token = (const int8_t *)&token4;
|
155
|
+
int lookahead = 0;
|
156
|
+
if (token[3] > (int8_t)0xBF)
|
157
|
+
lookahead = 1;
|
158
|
+
else if (token[2] > (int8_t)0xBF)
|
159
|
+
lookahead = 2;
|
160
|
+
else if (token[1] > (int8_t)0xBF)
|
161
|
+
lookahead = 3;
|
162
|
+
|
163
|
+
data -= lookahead;
|
164
|
+
len += lookahead;
|
165
|
+
}
|
166
|
+
|
167
|
+
return utf8_naive(data, len);
|
168
|
+
}
|
169
|
+
|
170
|
+
#endif
|