xrpl-ruby 0.0.3 → 0.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/lib/address-codec/address_codec.rb +22 -4
- data/lib/address-codec/codec.rb +15 -2
- data/lib/address-codec/xrp_codec.rb +29 -2
- data/lib/binary-codec/binary_codec.rb +62 -0
- data/lib/binary-codec/enums/constants.rb +8 -0
- data/lib/binary-codec/enums/definitions.json +3774 -0
- data/lib/binary-codec/enums/definitions.rb +90 -0
- data/lib/binary-codec/enums/fields.rb +104 -0
- data/lib/binary-codec/serdes/binary_parser.rb +183 -0
- data/lib/binary-codec/serdes/binary_serializer.rb +93 -0
- data/lib/binary-codec/serdes/bytes_list.rb +47 -0
- data/lib/binary-codec/types/account_id.rb +60 -0
- data/lib/binary-codec/types/amount.rb +304 -0
- data/lib/binary-codec/types/blob.rb +41 -0
- data/lib/binary-codec/types/currency.rb +116 -0
- data/lib/binary-codec/types/hash.rb +106 -0
- data/lib/binary-codec/types/issue.rb +50 -0
- data/lib/binary-codec/types/path_set.rb +93 -0
- data/lib/binary-codec/types/serialized_type.rb +157 -0
- data/lib/binary-codec/types/st_array.rb +71 -0
- data/lib/binary-codec/types/st_object.rb +157 -0
- data/lib/binary-codec/types/uint.rb +166 -0
- data/lib/binary-codec/types/vector256.rb +53 -0
- data/lib/binary-codec/types/xchain_bridge.rb +47 -0
- data/lib/binary-codec/utilities.rb +98 -0
- data/lib/core/base_58_xrp.rb +2 -0
- data/lib/core/base_x.rb +10 -0
- data/lib/core/core.rb +79 -6
- data/lib/core/utilities.rb +38 -0
- data/lib/key-pairs/ed25519.rb +64 -0
- data/lib/key-pairs/key_pairs.rb +92 -0
- data/lib/key-pairs/secp256k1.rb +116 -0
- data/lib/wallet/wallet.rb +117 -0
- data/lib/xrpl-ruby.rb +32 -1
- metadata +44 -3
|
@@ -0,0 +1,90 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
require 'json'
|
|
3
|
+
require 'digest'
|
|
4
|
+
|
|
5
|
+
module BinaryCodec
|
|
6
|
+
|
|
7
|
+
class Definitions
|
|
8
|
+
|
|
9
|
+
@@instance = nil
|
|
10
|
+
|
|
11
|
+
def initialize
|
|
12
|
+
file_path = File.join(__dir__, 'definitions.json') #
|
|
13
|
+
contents = File.read(file_path)
|
|
14
|
+
@definitions = JSON.parse(contents)
|
|
15
|
+
|
|
16
|
+
@type_ordinals = @definitions['TYPES']
|
|
17
|
+
@ledger_entry_types = @definitions['LEDGER_ENTRY_TYPES']
|
|
18
|
+
@transaction_results = @definitions['TRANSACTION_RESULTS']
|
|
19
|
+
@transaction_types = @definitions['TRANSACTION_TYPES']
|
|
20
|
+
|
|
21
|
+
@field_info_map = {}
|
|
22
|
+
@field_id_name_map = {}
|
|
23
|
+
@field_header_map = {}
|
|
24
|
+
|
|
25
|
+
@definitions['FIELDS'].each do |field|
|
|
26
|
+
field_name = field[0]
|
|
27
|
+
field_info = FieldInfo.new(
|
|
28
|
+
nth: field[1]['nth'],
|
|
29
|
+
is_vl_encoded: field[1]['isVLEncoded'],
|
|
30
|
+
is_serialized: field[1]['isSerialized'],
|
|
31
|
+
is_signing_field: field[1]['isSigningField'],
|
|
32
|
+
type: field[1]['type']
|
|
33
|
+
)
|
|
34
|
+
field_header = FieldHeader.new(type: @type_ordinals[field_info.type], nth: field_info.nth)
|
|
35
|
+
|
|
36
|
+
@field_info_map[field_name] = field_info
|
|
37
|
+
@field_id_name_map[Digest::MD5.hexdigest(Marshal.dump(field_header))] = field_name
|
|
38
|
+
@field_header_map[field_name] = field_header
|
|
39
|
+
end
|
|
40
|
+
|
|
41
|
+
rescue Errno::ENOENT
|
|
42
|
+
raise "Error: The file '#{file_path}' was not found. Please ensure the file exists."
|
|
43
|
+
rescue JSON::ParserError => e
|
|
44
|
+
raise "Error: The file '#{file_path}' contains invalid JSON: #{e.message}"
|
|
45
|
+
|
|
46
|
+
end
|
|
47
|
+
|
|
48
|
+
# Returns the singleton instance of the Definitions class.
|
|
49
|
+
# @return [Definitions] The singleton instance.
|
|
50
|
+
def self.instance
|
|
51
|
+
@@instance ||= new
|
|
52
|
+
end
|
|
53
|
+
|
|
54
|
+
# Returns the field header for a given field name.
|
|
55
|
+
# @param field_name [String] The name of the field.
|
|
56
|
+
# @return [FieldHeader] The field header.
|
|
57
|
+
def get_field_header_from_name(field_name)
|
|
58
|
+
@field_header_map[field_name]
|
|
59
|
+
end
|
|
60
|
+
|
|
61
|
+
# Returns the field name for a given field header.
|
|
62
|
+
# @param field_header [FieldHeader] The field header.
|
|
63
|
+
# @return [String] The name of the field.
|
|
64
|
+
def get_field_name_from_header(field_header)
|
|
65
|
+
@field_id_name_map[Digest::MD5.hexdigest(Marshal.dump(field_header))]
|
|
66
|
+
end
|
|
67
|
+
|
|
68
|
+
# Returns a FieldInstance for a given field name.
|
|
69
|
+
# @param field_name [String] The name of the field.
|
|
70
|
+
# @return [FieldInstance] The field instance.
|
|
71
|
+
def get_field_instance(field_name)
|
|
72
|
+
field_info = @field_info_map[field_name]
|
|
73
|
+
field_header = get_field_header_from_name(field_name)
|
|
74
|
+
|
|
75
|
+
FieldInstance.new(
|
|
76
|
+
nth: field_info.nth,
|
|
77
|
+
is_variable_length_encoded: field_info.is_vl_encoded,
|
|
78
|
+
is_serialized: field_info.is_serialized,
|
|
79
|
+
is_signing_field: field_info.is_signing_field,
|
|
80
|
+
type: field_info.type,
|
|
81
|
+
ordinal: (@type_ordinals[field_info.type] << 16) | field_info.nth,
|
|
82
|
+
name: field_name,
|
|
83
|
+
header: field_header,
|
|
84
|
+
associated_type: SerializedType.get_type_by_name(field_info.type)
|
|
85
|
+
)
|
|
86
|
+
end
|
|
87
|
+
|
|
88
|
+
end
|
|
89
|
+
|
|
90
|
+
end
|
|
@@ -0,0 +1,104 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module BinaryCodec
|
|
4
|
+
|
|
5
|
+
class FieldHeader
|
|
6
|
+
|
|
7
|
+
attr_reader :type, :nth
|
|
8
|
+
|
|
9
|
+
def initialize(type:, nth:)
|
|
10
|
+
@type = type
|
|
11
|
+
@nth = nth
|
|
12
|
+
end
|
|
13
|
+
|
|
14
|
+
# Converts the field header to a byte array.
|
|
15
|
+
# @return [Array<Integer>] The byte array.
|
|
16
|
+
def to_bytes
|
|
17
|
+
header = []
|
|
18
|
+
if type < 16
|
|
19
|
+
if nth < 16
|
|
20
|
+
header.push((type << 4) | nth)
|
|
21
|
+
else
|
|
22
|
+
header.push(type << 4, nth)
|
|
23
|
+
end
|
|
24
|
+
elsif nth < 16
|
|
25
|
+
header.push(nth,type)
|
|
26
|
+
else
|
|
27
|
+
header.push(0, type, nth)
|
|
28
|
+
end
|
|
29
|
+
|
|
30
|
+
header
|
|
31
|
+
end
|
|
32
|
+
|
|
33
|
+
end
|
|
34
|
+
class FieldInfo
|
|
35
|
+
|
|
36
|
+
attr_reader :nth, :is_vl_encoded, :is_serialized, :is_signing_field, :type
|
|
37
|
+
|
|
38
|
+
def initialize(nth:, is_vl_encoded:, is_serialized:, is_signing_field:, type:)
|
|
39
|
+
@nth = nth
|
|
40
|
+
@is_vl_encoded = is_vl_encoded
|
|
41
|
+
@is_serialized = is_serialized
|
|
42
|
+
@is_signing_field = is_signing_field
|
|
43
|
+
@type = type
|
|
44
|
+
end
|
|
45
|
+
|
|
46
|
+
end
|
|
47
|
+
|
|
48
|
+
class FieldInstance
|
|
49
|
+
|
|
50
|
+
attr_reader :nth, :is_variable_length_encoded, :is_serialized, :is_signing_field, :type, :ordinal, :name, :header, :associated_type
|
|
51
|
+
|
|
52
|
+
def initialize(nth:, is_variable_length_encoded:, is_serialized:, is_signing_field:, type:, ordinal:, name:, header:, associated_type:)
|
|
53
|
+
@nth = nth
|
|
54
|
+
@is_variable_length_encoded = is_variable_length_encoded
|
|
55
|
+
@is_serialized = is_serialized
|
|
56
|
+
@is_signing_field = is_signing_field
|
|
57
|
+
@type = type
|
|
58
|
+
@ordinal = ordinal
|
|
59
|
+
@name = name
|
|
60
|
+
@header = header
|
|
61
|
+
@associated_type = associated_type
|
|
62
|
+
end
|
|
63
|
+
|
|
64
|
+
end
|
|
65
|
+
|
|
66
|
+
# TODO: See if this makes sense or if Ruby hashes are just fine
|
|
67
|
+
class FieldLookup
|
|
68
|
+
def initialize(fields:, types:)
|
|
69
|
+
@fields_hash = {}
|
|
70
|
+
|
|
71
|
+
fields.each do |name, field_info|
|
|
72
|
+
type_ordinal = types[field_info.type]
|
|
73
|
+
field = build_field([name, field_info], type_ordinal) # Store the built field
|
|
74
|
+
@fields_hash[name] = field # Map field by name
|
|
75
|
+
@fields_hash[field.ordinal.to_s] = field # Map field by ordinal
|
|
76
|
+
end
|
|
77
|
+
end
|
|
78
|
+
|
|
79
|
+
# Method to retrieve a FieldInstance by its string key
|
|
80
|
+
def from_string(value)
|
|
81
|
+
@fields_hash[value]
|
|
82
|
+
end
|
|
83
|
+
|
|
84
|
+
private
|
|
85
|
+
|
|
86
|
+
# Dummy build_field method (must be implemented elsewhere)
|
|
87
|
+
def build_field(field, type_ordinal)
|
|
88
|
+
field_header = FieldHeader.new(type: field[1].type, nth: field[1].nth)
|
|
89
|
+
FieldInstance.new(
|
|
90
|
+
nth: field[1].nth,
|
|
91
|
+
is_variable_length_encoded: field[1].is_vl_encoded,
|
|
92
|
+
is_serialized: field[1].is_serialized,
|
|
93
|
+
is_signing_field: field[1].is_signing_field,
|
|
94
|
+
type: field[1].type,
|
|
95
|
+
ordinal: type_ordinal,
|
|
96
|
+
name: field[0],
|
|
97
|
+
header: field_header,
|
|
98
|
+
associated_type: SerializedType
|
|
99
|
+
#associated_type: SerializedType.get_type_by_name(field[1].type)
|
|
100
|
+
)
|
|
101
|
+
end
|
|
102
|
+
end
|
|
103
|
+
|
|
104
|
+
end
|
|
@@ -0,0 +1,183 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module BinaryCodec
|
|
4
|
+
|
|
5
|
+
class BinaryParser
|
|
6
|
+
|
|
7
|
+
attr_reader :definitions
|
|
8
|
+
|
|
9
|
+
def initialize(hex_bytes = '')
|
|
10
|
+
@bytes = hex_to_bytes(hex_bytes)
|
|
11
|
+
@definitions = Definitions.instance
|
|
12
|
+
end
|
|
13
|
+
|
|
14
|
+
# Returns the first byte in the stream without consuming it.
|
|
15
|
+
# @return [Integer] The first byte.
|
|
16
|
+
def peek
|
|
17
|
+
if @bytes.empty?
|
|
18
|
+
raise StandardError.new
|
|
19
|
+
end
|
|
20
|
+
@bytes[0]
|
|
21
|
+
end
|
|
22
|
+
|
|
23
|
+
# Consumes n bytes from the stream.
|
|
24
|
+
# @param n [Integer] The number of bytes to skip.
|
|
25
|
+
def skip(n)
|
|
26
|
+
if n > @bytes.length
|
|
27
|
+
raise StandardError.new
|
|
28
|
+
end
|
|
29
|
+
@bytes = @bytes[n..-1]
|
|
30
|
+
end
|
|
31
|
+
|
|
32
|
+
# Reads n bytes from the stream.
|
|
33
|
+
# @param n [Integer] The number of bytes to read.
|
|
34
|
+
# @return [Array<Integer>] The read bytes.
|
|
35
|
+
def read(n)
|
|
36
|
+
if n > @bytes.length
|
|
37
|
+
raise StandardError.new('End of byte stream reached')
|
|
38
|
+
end
|
|
39
|
+
|
|
40
|
+
slice = @bytes[0, n]
|
|
41
|
+
skip(n)
|
|
42
|
+
slice
|
|
43
|
+
end
|
|
44
|
+
|
|
45
|
+
# Reads n bytes and converts them to an unsigned integer.
|
|
46
|
+
# @param n [Integer] The number of bytes to read (1-4).
|
|
47
|
+
# @return [Integer] The resulting integer.
|
|
48
|
+
def read_uint_n(n)
|
|
49
|
+
if n <= 0 || n > 4
|
|
50
|
+
raise StandardError.new('invalid n')
|
|
51
|
+
end
|
|
52
|
+
read(n).reduce(0) { |a, b| (a << 8) | b }
|
|
53
|
+
end
|
|
54
|
+
|
|
55
|
+
# Reads a 1-byte unsigned integer.
|
|
56
|
+
# @return [Integer] The 8-bit integer.
|
|
57
|
+
def read_uint8
|
|
58
|
+
read_uint_n(1)
|
|
59
|
+
end
|
|
60
|
+
|
|
61
|
+
# Reads a 2-byte unsigned integer.
|
|
62
|
+
# @return [Integer] The 16-bit integer.
|
|
63
|
+
def read_uint16
|
|
64
|
+
read_uint_n(2)
|
|
65
|
+
end
|
|
66
|
+
|
|
67
|
+
# Reads a 4-byte unsigned integer.
|
|
68
|
+
# @return [Integer] The 32-bit integer.
|
|
69
|
+
def read_uint32
|
|
70
|
+
read_uint_n(4)
|
|
71
|
+
end
|
|
72
|
+
|
|
73
|
+
# Returns the number of bytes remaining in the stream.
|
|
74
|
+
# @return [Integer] The remaining size.
|
|
75
|
+
def size
|
|
76
|
+
@bytes.length
|
|
77
|
+
end
|
|
78
|
+
|
|
79
|
+
# Checks if the end of the stream has been reached.
|
|
80
|
+
# @param custom_end [Integer, nil] Optional offset to check against.
|
|
81
|
+
# @return [Boolean] True if at the end, false otherwise.
|
|
82
|
+
def end?(custom_end = nil)
|
|
83
|
+
length = @bytes.length
|
|
84
|
+
length == 0 || (!custom_end.nil? && length <= custom_end)
|
|
85
|
+
end
|
|
86
|
+
|
|
87
|
+
# Reads variable length data from the stream.
|
|
88
|
+
# @return [Array<Integer>] The read bytes.
|
|
89
|
+
def read_variable_length
|
|
90
|
+
read(read_variable_length_length)
|
|
91
|
+
end
|
|
92
|
+
|
|
93
|
+
# Reads the length of a variable length data segment.
|
|
94
|
+
# @return [Integer] The length.
|
|
95
|
+
def read_variable_length_length
|
|
96
|
+
b1 = read_uint8
|
|
97
|
+
if b1 <= 192
|
|
98
|
+
b1
|
|
99
|
+
elsif b1 <= 240
|
|
100
|
+
b2 = read_uint8
|
|
101
|
+
193 + (b1 - 193) * 256 + b2
|
|
102
|
+
elsif b1 <= 254
|
|
103
|
+
b2 = read_uint8
|
|
104
|
+
b3 = read_uint8
|
|
105
|
+
12481 + (b1 - 241) * 65536 + b2 * 256 + b3
|
|
106
|
+
else
|
|
107
|
+
raise StandardError.new('Invalid variable length indicator')
|
|
108
|
+
end
|
|
109
|
+
end
|
|
110
|
+
|
|
111
|
+
# Reads a field header from the stream.
|
|
112
|
+
# @return [FieldHeader] The field header.
|
|
113
|
+
def read_field_header
|
|
114
|
+
type = read_uint8
|
|
115
|
+
nth = type & 15
|
|
116
|
+
type >>= 4
|
|
117
|
+
|
|
118
|
+
if type == 0
|
|
119
|
+
type = read_uint8
|
|
120
|
+
if type == 0 || type < 16
|
|
121
|
+
raise StandardError.new("Cannot read FieldOrdinal, type_code #{type} out of range")
|
|
122
|
+
end
|
|
123
|
+
end
|
|
124
|
+
|
|
125
|
+
if nth == 0
|
|
126
|
+
nth = read_uint8
|
|
127
|
+
if nth == 0 || nth < 16
|
|
128
|
+
raise StandardError.new("Cannot read FieldOrdinal, field_code #{nth} out of range")
|
|
129
|
+
end
|
|
130
|
+
end
|
|
131
|
+
|
|
132
|
+
FieldHeader.new(type: type, nth: nth) # (type << 16) | nth for read_field_ordinal
|
|
133
|
+
end
|
|
134
|
+
|
|
135
|
+
# Reads a field instance from the stream.
|
|
136
|
+
# @return [FieldInstance] The field instance.
|
|
137
|
+
def read_field
|
|
138
|
+
field_header = read_field_header
|
|
139
|
+
field_name = @definitions.get_field_name_from_header(field_header)
|
|
140
|
+
|
|
141
|
+
@definitions.get_field_instance(field_name)
|
|
142
|
+
end
|
|
143
|
+
|
|
144
|
+
# Reads a value of the specified type from the stream.
|
|
145
|
+
# @param type [Class] The class of the type to read (subclass of SerializedType).
|
|
146
|
+
# @return [SerializedType] The read value.
|
|
147
|
+
def read_type(type)
|
|
148
|
+
type.from_parser(self)
|
|
149
|
+
end
|
|
150
|
+
|
|
151
|
+
# Returns the associated type for a given field.
|
|
152
|
+
# @param field [FieldInstance] The field instance.
|
|
153
|
+
# @return [Class] The associated SerializedType subclass.
|
|
154
|
+
def type_for_field(field)
|
|
155
|
+
field.associated_type
|
|
156
|
+
end
|
|
157
|
+
|
|
158
|
+
# Reads the value of a specific field from the stream.
|
|
159
|
+
# @param field [FieldInstance] The field to read.
|
|
160
|
+
# @return [SerializedType] The read value.
|
|
161
|
+
def read_field_value(field)
|
|
162
|
+
type = SerializedType.get_type_by_name(field.type)
|
|
163
|
+
|
|
164
|
+
if type.nil?
|
|
165
|
+
raise StandardError.new("unsupported: (#{field.name}, #{field.type.name})")
|
|
166
|
+
end
|
|
167
|
+
|
|
168
|
+
size_hint = field.is_variable_length_encoded ? read_variable_length_length : nil
|
|
169
|
+
value = type.from_parser(self, size_hint)
|
|
170
|
+
|
|
171
|
+
if value.nil?
|
|
172
|
+
raise StandardError.new("from_parser for (#{field.name}, #{field.type.name}) -> nil")
|
|
173
|
+
end
|
|
174
|
+
|
|
175
|
+
value
|
|
176
|
+
end
|
|
177
|
+
|
|
178
|
+
# get_size
|
|
179
|
+
# read_field_and_value
|
|
180
|
+
|
|
181
|
+
end
|
|
182
|
+
|
|
183
|
+
end
|
|
@@ -0,0 +1,93 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module BinaryCodec
|
|
4
|
+
class BinarySerializer
|
|
5
|
+
|
|
6
|
+
def initialize(sink)
|
|
7
|
+
@sink = sink || BytesList.new
|
|
8
|
+
end
|
|
9
|
+
|
|
10
|
+
# Serializes a value into the sink.
|
|
11
|
+
# @param value [SerializedType] The value to write.
|
|
12
|
+
def write(value)
|
|
13
|
+
value.to_byte_sink(@sink)
|
|
14
|
+
end
|
|
15
|
+
|
|
16
|
+
# Adds raw bytes to the sink.
|
|
17
|
+
# @param bytes [Array<Integer>] The bytes to add.
|
|
18
|
+
def put(bytes)
|
|
19
|
+
@sink.put(bytes)
|
|
20
|
+
end
|
|
21
|
+
|
|
22
|
+
# Serializes a value of a given type.
|
|
23
|
+
# @param type [Class] The class of the type (subclass of SerializedType).
|
|
24
|
+
# @param value [Object] The value to serialize.
|
|
25
|
+
def write_type(type, value)
|
|
26
|
+
write(type.from(value))
|
|
27
|
+
end
|
|
28
|
+
|
|
29
|
+
# Writes a BytesList into the sink.
|
|
30
|
+
# @param bytes_list [BytesList] The bytes list to write.
|
|
31
|
+
def write_bytes_list(bytes_list)
|
|
32
|
+
bytes_list.to_byte_sink(@sink)
|
|
33
|
+
end
|
|
34
|
+
|
|
35
|
+
# Writes a field and its value into the sink.
|
|
36
|
+
# @param field [FieldInstance] The field to write.
|
|
37
|
+
# @param value [Object] The value of the field.
|
|
38
|
+
# @param is_unl_modify_workaround [Boolean] Whether to apply the UNLModify workaround.
|
|
39
|
+
def write_field_and_value(field, value, is_unl_modify_workaround = false)
|
|
40
|
+
field_header = field.header
|
|
41
|
+
associated_value = field.associated_type.from(value)
|
|
42
|
+
|
|
43
|
+
@sink.put(field_header.to_bytes)
|
|
44
|
+
|
|
45
|
+
if field.is_variable_length_encoded
|
|
46
|
+
write_length_encoded(associated_value, is_unl_modify_workaround)
|
|
47
|
+
else
|
|
48
|
+
associated_value.to_byte_sink(@sink)
|
|
49
|
+
end
|
|
50
|
+
end
|
|
51
|
+
|
|
52
|
+
# Writes a value with its length encoded prefix.
|
|
53
|
+
# @param value [SerializedType] The value to write.
|
|
54
|
+
# @param is_unl_modify_workaround [Boolean] Whether to apply the UNLModify workaround.
|
|
55
|
+
def write_length_encoded(value, is_unl_modify_workaround = false)
|
|
56
|
+
bytes = BytesList.new
|
|
57
|
+
|
|
58
|
+
unless is_unl_modify_workaround
|
|
59
|
+
# This part doesn't happen for the Account field in a UNLModify transaction
|
|
60
|
+
value.to_byte_sink(bytes)
|
|
61
|
+
end
|
|
62
|
+
|
|
63
|
+
self.put(encode_variable_length(bytes.get_length))
|
|
64
|
+
write_bytes_list(bytes)
|
|
65
|
+
end
|
|
66
|
+
|
|
67
|
+
private
|
|
68
|
+
|
|
69
|
+
def encode_variable_length(length)
|
|
70
|
+
len_bytes = [0, 0, 0] # Create an array to hold 3 bytes (default 0)
|
|
71
|
+
|
|
72
|
+
if length <= 192
|
|
73
|
+
len_bytes[0] = length
|
|
74
|
+
return len_bytes[0, 1] # Equivalent to slice(0, 1)
|
|
75
|
+
elsif length <= 12480
|
|
76
|
+
length -= 193
|
|
77
|
+
len_bytes[0] = 193 + (length >> 8) # Equivalent to length >>> 8 in TypeScript
|
|
78
|
+
len_bytes[1] = length & 0xff
|
|
79
|
+
return len_bytes[0, 2] # Equivalent to slice(0, 2)
|
|
80
|
+
elsif length <= 918744
|
|
81
|
+
length -= 12481
|
|
82
|
+
len_bytes[0] = 241 + (length >> 16)
|
|
83
|
+
len_bytes[1] = (length >> 8) & 0xff
|
|
84
|
+
len_bytes[2] = length & 0xff
|
|
85
|
+
return len_bytes[0, 3] # Equivalent to slice(0, 3)
|
|
86
|
+
end
|
|
87
|
+
|
|
88
|
+
raise 'Overflow error'
|
|
89
|
+
end
|
|
90
|
+
|
|
91
|
+
end
|
|
92
|
+
|
|
93
|
+
end
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module BinaryCodec
|
|
4
|
+
class BytesList
|
|
5
|
+
|
|
6
|
+
attr_reader :bytes_array
|
|
7
|
+
|
|
8
|
+
def initialize
|
|
9
|
+
@bytes_array = []
|
|
10
|
+
end
|
|
11
|
+
|
|
12
|
+
# Returns the total length of all bytes in the list.
|
|
13
|
+
# @return [Integer] The total length.
|
|
14
|
+
def get_length
|
|
15
|
+
@bytes_array.inject(0) { |sum, arr| sum + arr.length }
|
|
16
|
+
end
|
|
17
|
+
|
|
18
|
+
# Adds bytes to the list.
|
|
19
|
+
# @param bytes_arg [Array<Integer>] The bytes to add.
|
|
20
|
+
# @return [BytesList] self for chaining.
|
|
21
|
+
def put(bytes_arg)
|
|
22
|
+
bytes = bytes_arg.dup
|
|
23
|
+
@bytes_array << bytes
|
|
24
|
+
self # Allow chaining
|
|
25
|
+
end
|
|
26
|
+
|
|
27
|
+
# Puts the bytes into another byte sink.
|
|
28
|
+
# @param list [Object] The sink to put bytes into.
|
|
29
|
+
def to_byte_sink(list)
|
|
30
|
+
list.put(to_bytes)
|
|
31
|
+
end
|
|
32
|
+
|
|
33
|
+
# Returns all bytes as a single flat array.
|
|
34
|
+
# @return [Array<Integer>] The flattened byte array.
|
|
35
|
+
def to_bytes
|
|
36
|
+
@bytes_array.flatten # TODO: Uses concat in xrpl.js, maybe implement that instead
|
|
37
|
+
end
|
|
38
|
+
|
|
39
|
+
# Returns the hex representation of all bytes in the list.
|
|
40
|
+
# @return [String] The hex string.
|
|
41
|
+
def to_hex
|
|
42
|
+
bytes_to_hex(to_bytes)
|
|
43
|
+
end
|
|
44
|
+
|
|
45
|
+
end
|
|
46
|
+
|
|
47
|
+
end
|
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module BinaryCodec
|
|
4
|
+
class AccountId < Hash160
|
|
5
|
+
@width = 20
|
|
6
|
+
|
|
7
|
+
def initialize(bytes = nil)
|
|
8
|
+
super(bytes)
|
|
9
|
+
end
|
|
10
|
+
|
|
11
|
+
# Creates a new AccountId instance from a value.
|
|
12
|
+
# @param value [AccountId, String] The value to convert (hex or base58 address).
|
|
13
|
+
# @return [AccountId] The created instance.
|
|
14
|
+
def self.from(value)
|
|
15
|
+
return value if value.is_a?(AccountId)
|
|
16
|
+
|
|
17
|
+
if value.is_a?(String)
|
|
18
|
+
return new if value.empty?
|
|
19
|
+
|
|
20
|
+
if valid_hex?(value)
|
|
21
|
+
return new(hex_to_bytes(value))
|
|
22
|
+
else
|
|
23
|
+
return from_base58(value)
|
|
24
|
+
end
|
|
25
|
+
end
|
|
26
|
+
|
|
27
|
+
raise 'Cannot construct AccountID from the value provided'
|
|
28
|
+
end
|
|
29
|
+
|
|
30
|
+
# Creates an AccountId instance from a base58 address.
|
|
31
|
+
# @param value [String] The classic or X-address.
|
|
32
|
+
# @return [AccountId] The created instance.
|
|
33
|
+
def self.from_base58(value)
|
|
34
|
+
address_codec = AddressCodec::AddressCodec.new
|
|
35
|
+
if address_codec.valid_x_address?(value)
|
|
36
|
+
classic = address_codec.x_address_to_classic_address(value)
|
|
37
|
+
|
|
38
|
+
if classic[:tag] != false
|
|
39
|
+
raise 'Only allowed to have tag on Account or Destination'
|
|
40
|
+
end
|
|
41
|
+
|
|
42
|
+
value = classic[:classic_address]
|
|
43
|
+
end
|
|
44
|
+
|
|
45
|
+
new(address_codec.decode_account_id(value))
|
|
46
|
+
end
|
|
47
|
+
|
|
48
|
+
def to_json(_definitions = nil, _field_name = nil)
|
|
49
|
+
to_base58
|
|
50
|
+
end
|
|
51
|
+
|
|
52
|
+
# Returns the base58 representation of the account ID.
|
|
53
|
+
# @return [String] The base58 address.
|
|
54
|
+
def to_base58
|
|
55
|
+
address_codec = AddressCodec::AddressCodec.new
|
|
56
|
+
address_codec.encode_account_id(@bytes)
|
|
57
|
+
end
|
|
58
|
+
end
|
|
59
|
+
|
|
60
|
+
end
|