crypto-toolbox 0.2.3 → 0.2.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/bin/break-vigenere-xor +3 -0
- data/lib/crypto-toolbox.rb +4 -2
- data/lib/crypto-toolbox/analyzers/utils/key_candidate_map.rb +0 -8
- data/lib/crypto-toolbox/analyzers/utils/key_filter.rb +11 -7
- data/lib/crypto-toolbox/analyzers/utils/letter_frequency.rb +22 -4
- data/lib/crypto-toolbox/analyzers/utils/spell_checker.rb +8 -1
- data/lib/crypto-toolbox/analyzers/vigenere_xor.rb +1 -8
- data/lib/crypto-toolbox/crypt_buffer/concerns/convertable.rb +2 -2
- data/lib/crypto-toolbox/crypto_challanges/solver.rb +3 -13
- data/lib/crypto-toolbox/utils/hamming_distance_filter.rb +15 -0
- metadata +2 -1
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 65cce12b1b52be22312dcdbe387a696e0b5e2dbc
|
4
|
+
data.tar.gz: 79e26f00e777f4f1fb76af853a171419f23aa1b2
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: becd7a86fa057831193646cb161799c2f4dd85319990294b3c608b78c2b8cc774cefadd11d0306ac5b5c5f57cd82c419875f08b3cef36aacabd59bbf943b650f
|
7
|
+
data.tar.gz: 00c1bba37d1e37691ee0245f5f8f09004751479bb5002fabe5b42e8222a9449161b39ef025d74e428097e98fa9d25d4256367e1fb801924a7a21cc930e6d704b
|
data/bin/break-vigenere-xor
CHANGED
@@ -1,16 +1,19 @@
|
|
1
1
|
#!/usr/bin/env ruby
|
2
2
|
|
3
3
|
require 'crypto-toolbox'
|
4
|
+
require 'stackprof'
|
4
5
|
|
5
6
|
if ARGV[0].nil?
|
6
7
|
$stderr.puts "Missing Argument: Ciphertext (hexstring)"
|
7
8
|
else
|
8
9
|
ciphertext = ARGV[0]
|
9
10
|
|
11
|
+
StackProf.run(mode: :cpu, out: 'tmp/stackprof-cpu-vigenere-xor.dump') do
|
10
12
|
results = Analyzers::VigenereXor.new.analyze(ciphertext)
|
11
13
|
unless results.empty?
|
12
14
|
puts "[Success] Found valid result(s):"
|
13
15
|
puts results.map(&:str)
|
14
16
|
end
|
17
|
+
end
|
15
18
|
|
16
19
|
end
|
data/lib/crypto-toolbox.rb
CHANGED
@@ -1,14 +1,17 @@
|
|
1
|
+
# coding: utf-8
|
1
2
|
require 'crypto-toolbox/utils/reporting/console.rb'
|
3
|
+
require 'crypto-toolbox/utils/hamming_distance_filter.rb'
|
2
4
|
|
3
5
|
require 'crypto-toolbox/crypt_buffer_input_converter.rb'
|
4
6
|
require 'crypto-toolbox/crypt_buffer.rb'
|
5
7
|
|
6
8
|
require 'crypto-toolbox/analyzers/utils/key_filter.rb'
|
7
|
-
|
9
|
+
require 'crypto-toolbox/analyzers/utils/letter_frequency.rb'
|
8
10
|
require 'crypto-toolbox/analyzers/utils/ascii_language_detector.rb'
|
9
11
|
require 'crypto-toolbox/analyzers/utils/spell_checker.rb'
|
10
12
|
require 'crypto-toolbox/analyzers/utils/human_language_detector.rb'
|
11
13
|
|
14
|
+
|
12
15
|
require 'crypto-toolbox/analyzers/padding_oracle.rb'
|
13
16
|
require 'crypto-toolbox/analyzers/cbc_mac.rb'
|
14
17
|
require 'crypto-toolbox/analyzers/vigenere_xor.rb'
|
@@ -17,7 +20,6 @@ require 'crypto-toolbox/ciphers/aes.rb'
|
|
17
20
|
require 'crypto-toolbox/ciphers/caesar.rb'
|
18
21
|
require 'crypto-toolbox/ciphers/rot13.rb'
|
19
22
|
|
20
|
-
|
21
23
|
require 'crypto-toolbox/forgers/stream_ciphers/forge_generator.rb'
|
22
24
|
|
23
25
|
require 'crypto-toolbox/crypto_challanges/solver.rb'
|
@@ -25,9 +25,6 @@ module Analyzers
|
|
25
25
|
# 3) xor any possible byte value (guess) with all nth's bytes
|
26
26
|
# 4) select those guesses that decipher the nth-byte stream to only english plain ascii chars
|
27
27
|
def run(input_buf,keylen)
|
28
|
-
#return run2(input_buf,keylen)
|
29
|
-
detector = Analyzers::Utils::HumanLanguageDetector.new
|
30
|
-
|
31
28
|
candidate_map = (0..(keylen-1)).each_with_object({}) do |key_byte_pos,hsh|
|
32
29
|
=begin
|
33
30
|
# Letter frquency testing
|
@@ -41,11 +38,6 @@ module Analyzers
|
|
41
38
|
# create an array of every nth byte of the input. ( thus a pseudo stream of the nth bytes )
|
42
39
|
# 1) create an enumerator of the nth positions. e.g for iteration 0: [0,7,14,...]
|
43
40
|
# 2) Next: Map the positions to bytes of the input buffer
|
44
|
-
#
|
45
|
-
# NOTE: regular implementation without cryptbuffer magic:
|
46
|
-
# nth_stream = (key_byte_pos).step(input_buf.bytes.length() -1, keylen).map{|i| input_buf.bytes[i]}
|
47
|
-
# nth_byte_stream2 = CryptBuffer.new(nth_stream)
|
48
|
-
|
49
41
|
nth_byte_stream = input_buf.nth_bytes(keylen,offset: key_byte_pos)
|
50
42
|
hsh[key_byte_pos] = 0.upto(255).select{|guess| nth_byte_stream.xor_all_with(guess).bytes.all?{|byte| acceptable_char?(byte) } }
|
51
43
|
|
@@ -5,27 +5,31 @@ module Analyzers
|
|
5
5
|
module KeyFilter
|
6
6
|
class AsciiPlain
|
7
7
|
|
8
|
-
|
9
|
-
def initialize(keys,ciphertext,dict_lang="en_US")
|
8
|
+
def initialize(keys,ciphertext)
|
10
9
|
@keys = keys
|
11
10
|
@c = @ciphertext = ciphertext
|
12
11
|
@keylen = keys.first.length
|
13
|
-
@
|
12
|
+
@detector = Analyzers::Utils::HumanLanguageDetector.new
|
13
|
+
@spell_checker = Analyzers::Utils::SpellChecker.new("en_US")
|
14
14
|
end
|
15
15
|
|
16
16
|
def filter
|
17
17
|
# how often is the key repeated
|
18
18
|
reps = @c.bytes.length / @keylen
|
19
19
|
result =[]
|
20
|
-
spell_checker = Analyzers::Utils::SpellChecker.new("en_US")
|
21
20
|
|
21
|
+
|
22
|
+
|
23
|
+
|
22
24
|
# should we fork here ?
|
23
25
|
@keys.each_with_index do |key,i| # i is used as a simple counter only !
|
24
26
|
test = CryptBuffer.new(@c.bytes[0,@keylen]).xor(key).str
|
25
27
|
repkey = CryptBuffer.new((key*reps) + key[0,(@c.bytes.length % reps).to_i])
|
26
|
-
str
|
27
|
-
|
28
|
-
|
28
|
+
str = @c.xor(repkey).to_s
|
29
|
+
|
30
|
+
# NOTE: we dont need the ASCII check provided by the human language detector
|
31
|
+
# since the key selection is usually based on ascii value checks
|
32
|
+
if @spell_checker.human_language?(str)
|
29
33
|
result << repkey
|
30
34
|
break
|
31
35
|
else
|
@@ -18,19 +18,37 @@ module Analyzers
|
|
18
18
|
'u' => 2.88,
|
19
19
|
'c' => 2.71
|
20
20
|
}
|
21
|
+
|
22
|
+
|
21
23
|
def letter_count(str)
|
22
24
|
str.downcase.each_char.with_object({}) do |c,h|
|
23
|
-
h[c] = (h
|
25
|
+
h[c] = increment_letter_count(h,c) if countable?(c)
|
24
26
|
end
|
25
27
|
end
|
26
28
|
|
27
29
|
def letter_freq(str)
|
28
|
-
counts
|
29
|
-
|
30
|
-
counts.
|
30
|
+
counts = letter_count(str)
|
31
|
+
total_chars = counts.values.reduce(&:+)
|
32
|
+
Hash[reverse_hash(counts).map{|k,v| [k,calculate_frequency(v,total_chars)] } ]
|
31
33
|
end
|
32
34
|
|
35
|
+
|
36
|
+
private
|
33
37
|
|
38
|
+
def reverse_hash(hsh)
|
39
|
+
hsh.sort_by{|k,v| -v}
|
40
|
+
end
|
41
|
+
def calculate_frequency(value,total)
|
42
|
+
(value/total.to_f).round(4)
|
43
|
+
end
|
44
|
+
|
45
|
+
def increment_letter_count(hsh,char)
|
46
|
+
(hsh.fetch(char,0) + 1)
|
47
|
+
end
|
48
|
+
|
49
|
+
def countable?(char)
|
50
|
+
char =~ /[A-Za-z ]/
|
51
|
+
end
|
34
52
|
end
|
35
53
|
end
|
36
54
|
end
|
@@ -1,4 +1,5 @@
|
|
1
1
|
require 'ffi/hunspell'
|
2
|
+
require 'ffi/aspell'
|
2
3
|
|
3
4
|
module Analyzers
|
4
5
|
module Utils
|
@@ -6,6 +7,7 @@ module Analyzers
|
|
6
7
|
|
7
8
|
def initialize(dict_lang="en_US")
|
8
9
|
@dict = FFI::Hunspell.dict(dict_lang)
|
10
|
+
@dict2 = FFI::Aspell::Speller.new(dict_lang)
|
9
11
|
end
|
10
12
|
=begin
|
11
13
|
NOTE: About spelling error rates and language detection:
|
@@ -69,8 +71,13 @@ if numbers or single char words are taken into account
|
|
69
71
|
end
|
70
72
|
end
|
71
73
|
|
74
|
+
# note:
|
75
|
+
# Aspell is much faster but requires expensive and slow removal of all punctuation marks
|
76
|
+
# which makes it slower than hunspell.
|
77
|
+
# Thus we stick with hunspell for correctness and speed.
|
72
78
|
def check?(input)
|
73
|
-
@dict.check?(input)
|
79
|
+
@dict.check?(input)
|
80
|
+
# @dict2.correct?(input.gsub(/[^a-zA-Z]/,""))
|
74
81
|
end
|
75
82
|
|
76
83
|
def error_rate_sufficient?(rate)
|
@@ -30,14 +30,7 @@ module Analyzers
|
|
30
30
|
|
31
31
|
class HammingDistanceKeyLengthFinder
|
32
32
|
def keylen_for(buffer)
|
33
|
-
|
34
|
-
distances = ((0+offset)..64).map do |keysize|
|
35
|
-
# take the first 4 blocks of keysize length, generate all combinations (6),
|
36
|
-
# map than to normalized hamming distance and take mean
|
37
|
-
buffer.chunks_of(keysize)[0,4].combination(2).map{|a,b| a.hdist(b,normalize: true)}.reduce(&:+) / 6.0
|
38
|
-
end
|
39
|
-
# get the min distance, find its index, convert the keylen
|
40
|
-
distances.min(4).map{|m| distances.index(m)}.map{|i| i + offset }.uniq
|
33
|
+
::Utils::HammingDistanceFilter.new.shortest_distance_entries(buffer)
|
41
34
|
end
|
42
35
|
end
|
43
36
|
|
@@ -8,18 +8,6 @@ module CryptoChallanges
|
|
8
8
|
def solve2(c1,c2)
|
9
9
|
(CryptBuffer.from_hex(c1) ^ CryptBuffer.from_hex(c2)).hex.downcase
|
10
10
|
end
|
11
|
-
|
12
|
-
def letter_count(str)
|
13
|
-
str.downcase.each_char.with_object({}) do |c,h|
|
14
|
-
h[c] = (h.fetch(c,0) + 1) if c =~ /[A-Za-z ]/
|
15
|
-
end
|
16
|
-
end
|
17
|
-
|
18
|
-
def letter_freq(str)
|
19
|
-
counts = letter_count(str)
|
20
|
-
quotient = counts.values.reduce(&:+).to_f
|
21
|
-
counts.sort_by{|k,v| v}.reverse.to_h.each_with_object({}){|(k,v),hsh| hsh[k] = (v/quotient) }
|
22
|
-
end
|
23
11
|
|
24
12
|
def solve3(input)
|
25
13
|
candidates = (1..256).map{ |guess| CryptBuffer.from_hex(input).xor_all_with(guess) }
|
@@ -27,7 +15,9 @@ module CryptoChallanges
|
|
27
15
|
|
28
16
|
detector.human_language_entries(candidates).first.to_s
|
29
17
|
end
|
30
|
-
|
18
|
+
|
19
|
+
# challange:
|
20
|
+
# One of the 60-character strings in this file has been encrypted by single-character XOR.
|
31
21
|
def solve4(hexstrings)
|
32
22
|
detector = Analyzers::Utils::HumanLanguageDetector.new
|
33
23
|
result = hexstrings.map{|h| CryptBuffer.from_hex(h)}.map.with_index do |c,i|
|
@@ -0,0 +1,15 @@
|
|
1
|
+
module Utils
|
2
|
+
class HammingDistanceFilter
|
3
|
+
def shortest_distance_entries(buffer,result_entries: 4,samples: 4)
|
4
|
+
offset = 2
|
5
|
+
distances = ((0+offset)..64).map do |keysize|
|
6
|
+
# take the first 4 blocks of keysize length, generate all combinations (6),
|
7
|
+
# map than to normalized hamming distance and take mean
|
8
|
+
buffer.chunks_of(keysize)[0,samples].combination(2).map{|a,b| a.hdist(b,normalize: true)}.reduce(&:+) / 6.0
|
9
|
+
end
|
10
|
+
# get the min distance, find its index, convert the keylen
|
11
|
+
distances.min(result_entries).map{|m| distances.index(m)}.map{|i| i + offset }.uniq
|
12
|
+
end
|
13
|
+
end
|
14
|
+
end
|
15
|
+
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: crypto-toolbox
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.2.
|
4
|
+
version: 0.2.4
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Dennis Sivia
|
@@ -82,6 +82,7 @@ files:
|
|
82
82
|
- lib/crypto-toolbox/crypt_buffer_input_converter.rb
|
83
83
|
- lib/crypto-toolbox/crypto_challanges/solver.rb
|
84
84
|
- lib/crypto-toolbox/forgers/stream_ciphers/forge_generator.rb
|
85
|
+
- lib/crypto-toolbox/utils/hamming_distance_filter.rb
|
85
86
|
- lib/crypto-toolbox/utils/reporting/console.rb
|
86
87
|
homepage: https://github.com/scepticulous/crypto-toolbox
|
87
88
|
licenses:
|