hexapdf 1.0.2 → 1.0.3
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG.md +11 -1
- data/lib/hexapdf/cli/inspect.rb +5 -2
- data/lib/hexapdf/encryption/arc4.rb +2 -2
- data/lib/hexapdf/test_utils.rb +2 -1
- data/lib/hexapdf/type/acro_form/form.rb +1 -1
- data/lib/hexapdf/version.rb +1 -1
- data/test/hexapdf/common_tokenizer_tests.rb +3 -3
- data/test/hexapdf/encryption/common.rb +1 -1
- data/test/hexapdf/encryption/test_aes.rb +1 -1
- data/test/hexapdf/encryption/test_arc4.rb +2 -2
- data/test/hexapdf/encryption/test_security_handler.rb +1 -1
- data/test/hexapdf/filter/test_ascii85_decode.rb +1 -1
- data/test/hexapdf/filter/test_ascii_hex_decode.rb +1 -1
- data/test/hexapdf/filter/test_flate_decode.rb +2 -3
- data/test/hexapdf/font/cmap/test_writer.rb +2 -2
- data/test/hexapdf/font/encoding/test_glyph_list.rb +1 -1
- data/test/hexapdf/font/test_true_type_wrapper.rb +2 -2
- data/test/hexapdf/font/test_type1_wrapper.rb +1 -1
- data/test/hexapdf/task/test_merge_acro_form.rb +1 -1
- data/test/hexapdf/test_filter.rb +1 -1
- data/test/hexapdf/test_parser.rb +10 -10
- data/test/hexapdf/test_revisions.rb +1 -1
- data/test/hexapdf/test_serializer.rb +2 -3
- data/test/hexapdf/test_tokenizer.rb +1 -1
- data/test/hexapdf/test_writer.rb +2 -2
- data/test/hexapdf/type/acro_form/test_form.rb +8 -0
- data/test/hexapdf/type/test_image.rb +1 -1
- data/test/hexapdf/type/test_page_tree_node.rb +2 -2
- metadata +2 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 7a94d8744657f89cf855604bdb363426637190a5db9615bbcb78e033f9aa5b0f
|
4
|
+
data.tar.gz: b5ea3789c402ce1affb937eca574aba6c3cd21864484f56015aa2aef4acb9b86
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 6c7a881cc83213116e3f818c4df6063a9ebc758fe79d4120e5163d7bd78509ad358a32c122db0ffdfd6b1def50a6ec53a83e429ece1355aff02751b3a886e9a3
|
7
|
+
data.tar.gz: eb82b47d523c96403fd64afcbd5a3a867b74e059890819c816dadd71b8fac0595a431c9ff926dea8a45c2d949e8802fa9a2dccb776a24acb04922f677cac2b66
|
data/CHANGELOG.md
CHANGED
@@ -1,4 +1,14 @@
|
|
1
|
-
## 1.0.
|
1
|
+
## 1.0.3 - 2024-12-04
|
2
|
+
|
3
|
+
### Fixed
|
4
|
+
|
5
|
+
* Offsets and lengths of revisions shown using the `inspect rev` CLI command for
|
6
|
+
linearized PDF files
|
7
|
+
* [HexaPDF::Type::AcroForm::Form#recalculate_fields] to only consider real
|
8
|
+
fields
|
9
|
+
|
10
|
+
|
11
|
+
## 1.0.2 - 2024-11-05
|
2
12
|
|
3
13
|
### Added
|
4
14
|
|
data/lib/hexapdf/cli/inspect.rb
CHANGED
@@ -395,9 +395,12 @@ module HexaPDF
|
|
395
395
|
end
|
396
396
|
io = @doc.revisions.parser.io
|
397
397
|
|
398
|
-
startxrefs = @doc.revisions.map {|rev| rev.trailer[:Prev] }
|
399
398
|
io.seek(0, IO::SEEK_END)
|
400
|
-
startxrefs
|
399
|
+
startxrefs = @doc.revisions.map {|rev| rev.trailer[:Prev] } <<
|
400
|
+
@doc.revisions.parser.startxref_offset <<
|
401
|
+
io.pos
|
402
|
+
startxrefs.sort!
|
403
|
+
startxrefs.shift
|
401
404
|
|
402
405
|
@doc.revisions.each_with_index.map do |rev, index|
|
403
406
|
end_index = 0
|
@@ -66,14 +66,14 @@ module HexaPDF
|
|
66
66
|
# Encrypts the given +data+ with the +key+.
|
67
67
|
#
|
68
68
|
# See: PDF2.0 s7.6.3
|
69
|
-
def encrypt(key, data)
|
69
|
+
def encrypt(key, data, &_block)
|
70
70
|
new(key).process(data)
|
71
71
|
end
|
72
72
|
alias decrypt encrypt
|
73
73
|
|
74
74
|
# Returns a Fiber object that encrypts the data from the given source fiber with the
|
75
75
|
# +key+.
|
76
|
-
def encryption_fiber(key, source)
|
76
|
+
def encryption_fiber(key, source, &_block)
|
77
77
|
Fiber.new do
|
78
78
|
algorithm = new(key)
|
79
79
|
while source.alive? && (data = source.resume)
|
data/lib/hexapdf/test_utils.rb
CHANGED
@@ -92,8 +92,9 @@ module HexaPDF
|
|
92
92
|
# Creates a fiber that yields the given string in +len+ length parts.
|
93
93
|
def feeder(string, len = string.length)
|
94
94
|
Fiber.new do
|
95
|
+
string = string.b
|
95
96
|
until string.empty?
|
96
|
-
Fiber.yield(string.slice!(0, len)
|
97
|
+
Fiber.yield(string.slice!(0, len))
|
97
98
|
end
|
98
99
|
end
|
99
100
|
end
|
@@ -517,7 +517,7 @@ module HexaPDF
|
|
517
517
|
#
|
518
518
|
# See: JavaScriptActions
|
519
519
|
def recalculate_fields
|
520
|
-
self[:CO]
|
520
|
+
(each_field.to_a & self[:CO].to_a).each do |field|
|
521
521
|
field = Field.wrap(document, field)
|
522
522
|
next unless field && (calculation_action = field[:AA]&.[](:C))
|
523
523
|
result = JavaScriptActions.calculate(self, calculation_action)
|
data/lib/hexapdf/version.rb
CHANGED
@@ -65,7 +65,7 @@ module CommonTokenizerTests
|
|
65
65
|
:'The_Key_of_F#_Minor', :AB, :"",
|
66
66
|
'[', 5, 6, :Name, ']', '[', 5, 6, :Name, ']',
|
67
67
|
'<<', :Name, 5, '>>'
|
68
|
-
].
|
68
|
+
].map {|t| t.respond_to?(:force_encoding) ? t.b : t }
|
69
69
|
|
70
70
|
until expected_tokens.empty?
|
71
71
|
expected_token = expected_tokens.shift
|
@@ -127,7 +127,7 @@ module CommonTokenizerTests
|
|
127
127
|
end
|
128
128
|
|
129
129
|
it "next_token: should not fail when reading super long numbers" do
|
130
|
-
create_tokenizer("1"
|
130
|
+
create_tokenizer("1" + "0" * 10_000)
|
131
131
|
assert_equal(10**10_000, @tokenizer.next_token)
|
132
132
|
end
|
133
133
|
|
@@ -182,7 +182,7 @@ module CommonTokenizerTests
|
|
182
182
|
end
|
183
183
|
|
184
184
|
it "returns the correct position on operations" do
|
185
|
-
create_tokenizer("hallo du"
|
185
|
+
create_tokenizer("hallo du" + " " * 50000 + "hallo du")
|
186
186
|
@tokenizer.next_token
|
187
187
|
assert_equal(5, @tokenizer.pos)
|
188
188
|
|
@@ -67,7 +67,7 @@ module ARC4EncryptionTests
|
|
67
67
|
super
|
68
68
|
@encrypted = ['BBF316E8D940AF0AD3', '1021BF0420', '45A01F645FC35B383552544B9BF5'].
|
69
69
|
map {|c| [c].pack('H*') }
|
70
|
-
@plain = ['Plaintext', 'pedia', 'Attack at dawn']
|
70
|
+
@plain = ['Plaintext'.b, 'pedia'.b, 'Attack at dawn'.b]
|
71
71
|
@keys = ['Key', 'Wiki', 'Secret']
|
72
72
|
end
|
73
73
|
|
@@ -141,7 +141,7 @@ describe HexaPDF::Encryption::AES do
|
|
141
141
|
collector(@algorithm_class.decryption_fiber('some' * 4, Fiber.new { 'a' * 40 }))
|
142
142
|
end
|
143
143
|
assert_raises(HexaPDF::EncryptionError) do
|
144
|
-
collector(@algorithm_class.decryption_fiber('some' * 4, Fiber.new { 'a' * 40 })
|
144
|
+
collector(@algorithm_class.decryption_fiber('some' * 4, Fiber.new { 'a' * 40 }) { true })
|
145
145
|
end
|
146
146
|
end
|
147
147
|
end
|
@@ -11,13 +11,13 @@ describe HexaPDF::Encryption::ARC4 do
|
|
11
11
|
prepend HexaPDF::Encryption::ARC4
|
12
12
|
|
13
13
|
def initialize(key)
|
14
|
-
@data = key
|
14
|
+
@data = +key
|
15
15
|
end
|
16
16
|
|
17
17
|
def process(data)
|
18
18
|
raise if data.empty?
|
19
19
|
result = @data << data
|
20
|
-
@data = ''
|
20
|
+
@data = +''
|
21
21
|
result
|
22
22
|
end
|
23
23
|
end
|
@@ -236,7 +236,7 @@ describe HexaPDF::Encryption::SecurityHandler do
|
|
236
236
|
dict[:Filter] = :Test
|
237
237
|
@enc.strf = alg
|
238
238
|
@enc.set_up_encryption(key_length: length, algorithm: (alg == :identity ? :aes : alg))
|
239
|
-
@obj[:X] = @enc.encrypt_string('data', @obj)
|
239
|
+
@obj[:X] = @enc.encrypt_string(+'data', @obj)
|
240
240
|
@handler.set_up_decryption(dict)
|
241
241
|
assert_equal('data', @handler.decrypt(@obj)[:X])
|
242
242
|
end
|
@@ -33,7 +33,7 @@ describe HexaPDF::Filter::ASCII85Decode do
|
|
33
33
|
end
|
34
34
|
|
35
35
|
it "ignores data after the EOD marker" do
|
36
|
-
assert_equal(@decoded, collector(@obj.decoder(feeder(@encoded
|
36
|
+
assert_equal(@decoded, collector(@obj.decoder(feeder(@encoded + "~>abcdefg"))))
|
37
37
|
end
|
38
38
|
|
39
39
|
it "fails if the input contains invalid characters" do
|
@@ -24,7 +24,7 @@ describe HexaPDF::Filter::ASCIIHexDecode do
|
|
24
24
|
end
|
25
25
|
|
26
26
|
it "ignores data after the EOD marker" do
|
27
|
-
assert_equal(@decoded, collector(@obj.decoder(feeder(@encoded
|
27
|
+
assert_equal(@decoded, collector(@obj.decoder(feeder(@encoded + '4e6f7gzz'))))
|
28
28
|
end
|
29
29
|
|
30
30
|
it "assumes the missing char is '0' if the input length is odd" do
|
@@ -8,11 +8,10 @@ describe HexaPDF::Filter::FlateDecode do
|
|
8
8
|
|
9
9
|
before do
|
10
10
|
@obj = HexaPDF::Filter::FlateDecode
|
11
|
-
@all_test_cases = [["abcdefg".
|
12
|
-
"x\xDAKLJNIMK\a\x00\n\xDB\x02\xBD".force_encoding(Encoding::BINARY)]]
|
11
|
+
@all_test_cases = [["abcdefg".b, "x\xDAKLJNIMK\a\x00\n\xDB\x02\xBD".b]]
|
13
12
|
@decoded = @all_test_cases[0][0]
|
14
13
|
@encoded = @all_test_cases[0][1]
|
15
|
-
@encoded_predictor = "x\xDAcJdbD@\x00\x05\x8F\x00v".
|
14
|
+
@encoded_predictor = "x\xDAcJdbD@\x00\x05\x8F\x00v".b
|
16
15
|
@predictor_opts = {Predictor: 12}
|
17
16
|
end
|
18
17
|
|
@@ -5,7 +5,7 @@ require 'hexapdf/font/cmap/writer'
|
|
5
5
|
|
6
6
|
describe HexaPDF::Font::CMap::Writer do
|
7
7
|
before do
|
8
|
-
@to_unicode_cmap_data =
|
8
|
+
@to_unicode_cmap_data = +<<~EOF
|
9
9
|
/CIDInit /ProcSet findresource begin
|
10
10
|
12 dict begin
|
11
11
|
begincmap
|
@@ -32,7 +32,7 @@ describe HexaPDF::Font::CMap::Writer do
|
|
32
32
|
end
|
33
33
|
end
|
34
34
|
EOF
|
35
|
-
@cid_cmap_data =
|
35
|
+
@cid_cmap_data = +<<~EOF
|
36
36
|
%!PS-Adobe-3.0 Resource-CMap
|
37
37
|
%%DocumentNeededResources: ProcSet (CIDInit)
|
38
38
|
%%IncludeResource: ProcSet (CIDInit)
|
@@ -32,7 +32,7 @@ describe HexaPDF::Font::Encoding::GlyphList do
|
|
32
32
|
|
33
33
|
it "maps special uXXXX[XX] names to unicode values" do
|
34
34
|
assert_equal("A", @list.name_to_unicode(:u0041))
|
35
|
-
assert_equal(
|
35
|
+
assert_equal(+'' << "1F000".hex, @list.name_to_unicode(:u1F000))
|
36
36
|
end
|
37
37
|
|
38
38
|
it "maps Zapf Dingbats glyph names to their unicode" do
|
@@ -51,7 +51,7 @@ describe HexaPDF::Font::TrueTypeWrapper do
|
|
51
51
|
glyphs = @font_wrapper.decode_utf8("😁")
|
52
52
|
assert_equal(1, glyphs.length)
|
53
53
|
assert_kind_of(HexaPDF::Font::InvalidGlyph, glyphs.first)
|
54
|
-
assert_equal('' << 128_513, glyphs.first.str)
|
54
|
+
assert_equal(+'' << 128_513, glyphs.first.str)
|
55
55
|
end
|
56
56
|
end
|
57
57
|
|
@@ -81,7 +81,7 @@ describe HexaPDF::Font::TrueTypeWrapper do
|
|
81
81
|
glyph = @font_wrapper.glyph(9999)
|
82
82
|
assert_kind_of(HexaPDF::Font::InvalidGlyph, glyph)
|
83
83
|
assert_equal(0, glyph.id)
|
84
|
-
assert_equal('' << 0xFFFD, glyph.str)
|
84
|
+
assert_equal(+'' << 0xFFFD, glyph.str)
|
85
85
|
end
|
86
86
|
end
|
87
87
|
|
@@ -56,7 +56,7 @@ describe HexaPDF::Font::Type1Wrapper do
|
|
56
56
|
glyphs = @times_wrapper.decode_utf8("😁")
|
57
57
|
assert_equal(1, glyphs.length)
|
58
58
|
assert_kind_of(HexaPDF::Font::InvalidGlyph, glyphs.first)
|
59
|
-
assert_equal('' << 128_513, glyphs.first.str)
|
59
|
+
assert_equal(+'' << 128_513, glyphs.first.str)
|
60
60
|
end
|
61
61
|
end
|
62
62
|
|
@@ -81,7 +81,7 @@ describe HexaPDF::Task::MergeAcroForm do
|
|
81
81
|
end
|
82
82
|
|
83
83
|
it "updates the /DA entries of widgets and fields" do
|
84
|
-
@pages[0][:Annots][0][:DA] = '/F1 10 Tf'
|
84
|
+
@pages[0][:Annots][0][:DA] = +'/F1 10 Tf'
|
85
85
|
@doc.task(:merge_acro_form, source: @doc1, pages: @pages)
|
86
86
|
field = @doc.acro_form.field_by_name('merged_1.Text')
|
87
87
|
assert_equal('0.0 g /F2 0 Tf', field[:DA])
|
data/test/hexapdf/test_filter.rb
CHANGED
data/test/hexapdf/test_parser.rb
CHANGED
@@ -11,7 +11,7 @@ describe HexaPDF::Parser do
|
|
11
11
|
@document.config['parser.try_xref_reconstruction'] = false
|
12
12
|
@document.add(@document.wrap(10, oid: 1, gen: 0))
|
13
13
|
|
14
|
-
create_parser(
|
14
|
+
create_parser(+<<~EOF)
|
15
15
|
%PDF-1.7
|
16
16
|
|
17
17
|
1 0 obj
|
@@ -354,7 +354,7 @@ describe HexaPDF::Parser do
|
|
354
354
|
describe "startxref_offset" do
|
355
355
|
it "caches the offset value" do
|
356
356
|
assert_equal(330, @parser.startxref_offset)
|
357
|
-
@parser.instance_eval { @io
|
357
|
+
@parser.instance_eval { @io.string = @io.string.sub(/330\n/, "309\n") }
|
358
358
|
assert_equal(330, @parser.startxref_offset)
|
359
359
|
end
|
360
360
|
|
@@ -363,7 +363,7 @@ describe HexaPDF::Parser do
|
|
363
363
|
end
|
364
364
|
|
365
365
|
it "ignores garbage at the end of the file" do
|
366
|
-
create_parser("startxref\n5\n%%EOF"
|
366
|
+
create_parser("startxref\n5\n%%EOF" + "\nhallo" * 150)
|
367
367
|
assert_equal(5, @parser.startxref_offset)
|
368
368
|
end
|
369
369
|
|
@@ -373,17 +373,17 @@ describe HexaPDF::Parser do
|
|
373
373
|
end
|
374
374
|
|
375
375
|
it "finds the startxref anywhere in file" do
|
376
|
-
create_parser("startxref\n5\n%%EOF"
|
376
|
+
create_parser("startxref\n5\n%%EOF" + "\nhallo" * 5000)
|
377
377
|
assert_equal(5, @parser.startxref_offset)
|
378
378
|
end
|
379
379
|
|
380
380
|
it "handles the case where %%EOF is the on the 1. line of the 1024 byte search block" do
|
381
|
-
create_parser("startxref\n5\n%%EOF\n"
|
381
|
+
create_parser("startxref\n5\n%%EOF\n" + "h" * 1018)
|
382
382
|
assert_equal(5, @parser.startxref_offset)
|
383
383
|
end
|
384
384
|
|
385
385
|
it "handles the case where %%EOF is the on the 2. line of the 1024 byte search block" do
|
386
|
-
create_parser("startxref\n5\n%%EOF\n"
|
386
|
+
create_parser("startxref\n5\n%%EOF\n" + "h" * 1017)
|
387
387
|
assert_equal(5, @parser.startxref_offset)
|
388
388
|
end
|
389
389
|
|
@@ -421,7 +421,7 @@ describe HexaPDF::Parser do
|
|
421
421
|
|
422
422
|
it "fails on strict parsing if the startxref is not in the last part of the file" do
|
423
423
|
@document.config['parser.on_correctable_error'] = proc { true }
|
424
|
-
create_parser("startxref\n5\n%%EOF"
|
424
|
+
create_parser("startxref\n5\n%%EOF" + "\nhallo" * 5000)
|
425
425
|
exp = assert_raises(HexaPDF::MalformedPDFError) { @parser.startxref_offset }
|
426
426
|
assert_match(/end-of-file marker not found/, exp.message)
|
427
427
|
end
|
@@ -459,7 +459,7 @@ describe HexaPDF::Parser do
|
|
459
459
|
end
|
460
460
|
|
461
461
|
it "ignores junk at the beginning of the file and correctly calculates offset" do
|
462
|
-
create_parser("junk" * 200
|
462
|
+
create_parser("junk" * 200 + "\n%PDF-1.4\n")
|
463
463
|
assert_equal('1.4', @parser.file_header_version)
|
464
464
|
assert_equal(801, @parser.instance_variable_get(:@header_offset))
|
465
465
|
end
|
@@ -670,13 +670,13 @@ describe HexaPDF::Parser do
|
|
670
670
|
end
|
671
671
|
|
672
672
|
it "handles cases where the line contains an invalid string that exceeds the read buffer" do
|
673
|
-
create_parser("(1"
|
673
|
+
create_parser("(1" + "(abc" * 32188 + "\n1 0 obj\n6\nendobj\ntrailer\n<</Size 1>>")
|
674
674
|
assert_equal(6, @parser.load_object(@xref).value)
|
675
675
|
end
|
676
676
|
|
677
677
|
it "handles pathalogical cases which contain many opened literal strings" do
|
678
678
|
time = Time.now
|
679
|
-
create_parser("(1"
|
679
|
+
create_parser("(1" + "(abc\n" * 10000 + "\n1 0 obj\n6\nendobj\ntrailer\n<</Size 1>>")
|
680
680
|
assert_equal(6, @parser.load_object(@xref).value)
|
681
681
|
assert(Time.now - time < 0.5, "Xref reconstruction takes too long")
|
682
682
|
end
|
@@ -88,7 +88,7 @@ describe HexaPDF::Serializer do
|
|
88
88
|
assert_serialized('/The_Key_of_F#23_Minor', :'The_Key_of_F#_Minor')
|
89
89
|
assert_serialized('/ ', :"")
|
90
90
|
assert_serialized('/H#c3#b6#c3#9fgang', :Hößgang)
|
91
|
-
assert_serialized('/H#e8lp', "H\xE8lp".
|
91
|
+
assert_serialized('/H#e8lp', "H\xE8lp".b.intern)
|
92
92
|
assert_serialized('/#00#09#0a#0c#0d#20', :"\x00\t\n\f\r ")
|
93
93
|
end
|
94
94
|
|
@@ -105,8 +105,7 @@ describe HexaPDF::Serializer do
|
|
105
105
|
it "serializes strings" do
|
106
106
|
assert_serialized("(Hallo)", "Hallo")
|
107
107
|
assert_serialized("(Hallo\\r\n\t\\(\\)\\\\)", "Hallo\r\n\t()\\")
|
108
|
-
assert_serialized("(\xFE\xFF\x00H\x00a\x00l\x00\f\x00\b\x00\\()".
|
109
|
-
"Hal\f\b(")
|
108
|
+
assert_serialized("(\xFE\xFF\x00H\x00a\x00l\x00\f\x00\b\x00\\()".b, "Hal\f\b(")
|
110
109
|
end
|
111
110
|
|
112
111
|
it "serializes time like objects" do
|
@@ -38,7 +38,7 @@ describe HexaPDF::Tokenizer do
|
|
38
38
|
end
|
39
39
|
|
40
40
|
it "next_token: should not fail for strings due to use of an internal buffer" do
|
41
|
-
create_tokenizer("("
|
41
|
+
create_tokenizer("(" + ("a" * 8189) + "\\006)")
|
42
42
|
assert_equal("a" * 8189 << "\x06", @tokenizer.next_token)
|
43
43
|
end
|
44
44
|
|
data/test/hexapdf/test_writer.rb
CHANGED
@@ -98,7 +98,7 @@ describe HexaPDF::Writer do
|
|
98
98
|
def assert_document_conversion(input_io)
|
99
99
|
document = HexaPDF::Document.new(io: input_io)
|
100
100
|
document.trailer.info[:Producer] = "unknown"
|
101
|
-
output_io = StringIO.new(''.
|
101
|
+
output_io = StringIO.new(''.b)
|
102
102
|
start_xref_offset, xref_section = HexaPDF::Writer.write(document, output_io)
|
103
103
|
assert_kind_of(HexaPDF::XRefSection, xref_section)
|
104
104
|
assert_kind_of(Integer, start_xref_offset)
|
@@ -206,7 +206,7 @@ describe HexaPDF::Writer do
|
|
206
206
|
|
207
207
|
it "doesn't create an xref stream if one was just used for an XRefStm entry" do
|
208
208
|
# The following document's structure is built like a typical MS Word created PDF
|
209
|
-
input = StringIO.new(<<~EOF.
|
209
|
+
input = StringIO.new(<<~EOF.b)
|
210
210
|
%PDF-1.2
|
211
211
|
%\xCF\xEC\xFF\xE8\xD7\xCB\xCD
|
212
212
|
1 0 obj
|
@@ -494,6 +494,14 @@ describe HexaPDF::Type::AcroForm::Form do
|
|
494
494
|
@acro_form.recalculate_fields
|
495
495
|
assert_equal("10", @text3.field_value)
|
496
496
|
end
|
497
|
+
|
498
|
+
it "ensures that only entries in /CO that are actually fields are used" do
|
499
|
+
@text1.field_value = "10"
|
500
|
+
@text3.set_calculate_action(:sfn, fields: 'text1')
|
501
|
+
@acro_form[:CO] = [nil, 5, [:some, :array], @doc.pages.root, @text3]
|
502
|
+
@acro_form.recalculate_fields
|
503
|
+
assert_equal("10", @text3.field_value)
|
504
|
+
end
|
497
505
|
end
|
498
506
|
|
499
507
|
describe "perform_validation" do
|
@@ -171,7 +171,7 @@ describe HexaPDF::Type::Image do
|
|
171
171
|
|
172
172
|
def assert_valid_png(filename, original = nil)
|
173
173
|
if PNG_CHECK_AVAILABLE
|
174
|
-
result = `pngcheck -q #{filename}`
|
174
|
+
result = `pngcheck -q #{filename} 2>/dev/null`
|
175
175
|
assert(result.empty?, "pngcheck error: #{result}")
|
176
176
|
else
|
177
177
|
skip("Skipping PNG output validity check because pngcheck executable is missing")
|
@@ -326,9 +326,9 @@ describe HexaPDF::Type::PageTreeNode do
|
|
326
326
|
assert(@root.validate(auto_correct: false) {|m, _| p m })
|
327
327
|
|
328
328
|
@doc.delete(@pages[3])
|
329
|
-
refute(@root.validate(auto_correct: false)
|
329
|
+
refute(@root.validate(auto_correct: false) do |msg, _|
|
330
330
|
assert_match(/invalid object/i, msg)
|
331
|
-
end
|
331
|
+
end)
|
332
332
|
assert(@root.validate)
|
333
333
|
assert_equal(2, @kid12[:Count])
|
334
334
|
assert_equal([@pages[2], @pages[4]], @kid12[:Kids].value)
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: hexapdf
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 1.0.
|
4
|
+
version: 1.0.3
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Thomas Leitner
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2024-
|
11
|
+
date: 2024-12-04 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: cmdparse
|