wordlist 0.1.1 → 1.0.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (148) hide show
  1. checksums.yaml +7 -0
  2. data/.github/workflows/ruby.yml +27 -0
  3. data/.gitignore +6 -3
  4. data/ChangeLog.md +45 -1
  5. data/Gemfile +13 -0
  6. data/LICENSE.txt +1 -3
  7. data/README.md +266 -61
  8. data/Rakefile +7 -32
  9. data/benchmarks.rb +115 -0
  10. data/bin/wordlist +4 -7
  11. data/data/stop_words/ar.txt +104 -0
  12. data/data/stop_words/bg.txt +259 -0
  13. data/data/stop_words/bn.txt +363 -0
  14. data/data/stop_words/ca.txt +126 -0
  15. data/data/stop_words/cs.txt +138 -0
  16. data/data/stop_words/da.txt +101 -0
  17. data/data/stop_words/de.txt +129 -0
  18. data/data/stop_words/el.txt +79 -0
  19. data/data/stop_words/en.txt +175 -0
  20. data/data/stop_words/es.txt +178 -0
  21. data/data/stop_words/eu.txt +98 -0
  22. data/data/stop_words/fa.txt +332 -0
  23. data/data/stop_words/fi.txt +747 -0
  24. data/data/stop_words/fr.txt +116 -0
  25. data/data/stop_words/ga.txt +109 -0
  26. data/data/stop_words/gl.txt +160 -0
  27. data/data/stop_words/he.txt +499 -0
  28. data/data/stop_words/hi.txt +97 -0
  29. data/data/stop_words/hr.txt +179 -0
  30. data/data/stop_words/hu.txt +35 -0
  31. data/data/stop_words/hy.txt +45 -0
  32. data/data/stop_words/id.txt +357 -0
  33. data/data/stop_words/it.txt +134 -0
  34. data/data/stop_words/ja.txt +44 -0
  35. data/data/stop_words/ko.txt +677 -0
  36. data/data/stop_words/ku.txt +63 -0
  37. data/data/stop_words/lt.txt +507 -0
  38. data/data/stop_words/lv.txt +163 -0
  39. data/data/stop_words/mr.txt +99 -0
  40. data/data/stop_words/nl.txt +48 -0
  41. data/data/stop_words/no.txt +172 -0
  42. data/data/stop_words/pl.txt +138 -0
  43. data/data/stop_words/pt.txt +147 -0
  44. data/data/stop_words/ro.txt +281 -0
  45. data/data/stop_words/ru.txt +421 -0
  46. data/data/stop_words/sk.txt +173 -0
  47. data/data/stop_words/sv.txt +386 -0
  48. data/data/stop_words/th.txt +115 -0
  49. data/data/stop_words/tr.txt +114 -0
  50. data/data/stop_words/uk.txt +28 -0
  51. data/data/stop_words/ur.txt +513 -0
  52. data/data/stop_words/zh.txt +125 -0
  53. data/gemspec.yml +4 -10
  54. data/lib/wordlist/abstract_wordlist.rb +24 -0
  55. data/lib/wordlist/builder.rb +170 -138
  56. data/lib/wordlist/cli.rb +458 -0
  57. data/lib/wordlist/compression/reader.rb +72 -0
  58. data/lib/wordlist/compression/writer.rb +80 -0
  59. data/lib/wordlist/exceptions.rb +31 -0
  60. data/lib/wordlist/file.rb +176 -0
  61. data/lib/wordlist/format.rb +38 -0
  62. data/lib/wordlist/lexer/lang.rb +32 -0
  63. data/lib/wordlist/lexer/stop_words.rb +68 -0
  64. data/lib/wordlist/lexer.rb +218 -0
  65. data/lib/wordlist/list_methods.rb +462 -0
  66. data/lib/wordlist/modifiers/capitalize.rb +45 -0
  67. data/lib/wordlist/modifiers/downcase.rb +45 -0
  68. data/lib/wordlist/modifiers/gsub.rb +51 -0
  69. data/lib/wordlist/modifiers/modifier.rb +44 -0
  70. data/lib/wordlist/modifiers/mutate.rb +133 -0
  71. data/lib/wordlist/modifiers/mutate_case.rb +25 -0
  72. data/lib/wordlist/modifiers/sub.rb +97 -0
  73. data/lib/wordlist/modifiers/tr.rb +71 -0
  74. data/lib/wordlist/modifiers/upcase.rb +45 -0
  75. data/lib/wordlist/modifiers.rb +8 -0
  76. data/lib/wordlist/operators/binary_operator.rb +38 -0
  77. data/lib/wordlist/operators/concat.rb +47 -0
  78. data/lib/wordlist/operators/intersect.rb +55 -0
  79. data/lib/wordlist/operators/operator.rb +29 -0
  80. data/lib/wordlist/operators/power.rb +72 -0
  81. data/lib/wordlist/operators/product.rb +50 -0
  82. data/lib/wordlist/operators/subtract.rb +54 -0
  83. data/lib/wordlist/operators/unary_operator.rb +29 -0
  84. data/lib/wordlist/operators/union.rb +61 -0
  85. data/lib/wordlist/operators/unique.rb +52 -0
  86. data/lib/wordlist/operators.rb +7 -0
  87. data/lib/wordlist/unique_filter.rb +40 -61
  88. data/lib/wordlist/version.rb +1 -1
  89. data/lib/wordlist/words.rb +71 -0
  90. data/lib/wordlist.rb +103 -2
  91. data/spec/abstract_list_spec.rb +18 -0
  92. data/spec/builder_spec.rb +220 -76
  93. data/spec/cli_spec.rb +801 -0
  94. data/spec/compression/reader_spec.rb +137 -0
  95. data/spec/compression/writer_spec.rb +194 -0
  96. data/spec/file_spec.rb +258 -0
  97. data/spec/fixtures/wordlist.txt +15 -0
  98. data/spec/fixtures/wordlist.txt.bz2 +0 -0
  99. data/spec/fixtures/wordlist.txt.gz +0 -0
  100. data/spec/fixtures/wordlist.txt.xz +0 -0
  101. data/spec/fixtures/wordlist_with_ambiguous_format +3 -0
  102. data/spec/fixtures/wordlist_with_comments.txt +19 -0
  103. data/spec/fixtures/wordlist_with_empty_lines.txt +19 -0
  104. data/spec/format_spec.rb +50 -0
  105. data/spec/helpers/text.rb +3 -3
  106. data/spec/helpers/wordlist.rb +2 -2
  107. data/spec/lexer/lang_spec.rb +70 -0
  108. data/spec/lexer/stop_words_spec.rb +77 -0
  109. data/spec/lexer_spec.rb +652 -0
  110. data/spec/list_methods_spec.rb +181 -0
  111. data/spec/modifiers/capitalize_spec.rb +27 -0
  112. data/spec/modifiers/downcase_spec.rb +27 -0
  113. data/spec/modifiers/gsub_spec.rb +59 -0
  114. data/spec/modifiers/modifier_spec.rb +20 -0
  115. data/spec/modifiers/mutate_case_spec.rb +46 -0
  116. data/spec/modifiers/mutate_spec.rb +39 -0
  117. data/spec/modifiers/sub_spec.rb +98 -0
  118. data/spec/modifiers/tr_spec.rb +46 -0
  119. data/spec/modifiers/upcase_spec.rb +27 -0
  120. data/spec/operators/binary_operator_spec.rb +19 -0
  121. data/spec/operators/concat_spec.rb +26 -0
  122. data/spec/operators/intersect_spec.rb +37 -0
  123. data/spec/operators/operator_spec.rb +16 -0
  124. data/spec/operators/power_spec.rb +57 -0
  125. data/spec/operators/product_spec.rb +39 -0
  126. data/spec/operators/subtract_spec.rb +37 -0
  127. data/spec/operators/union_spec.rb +37 -0
  128. data/spec/operators/unique_spec.rb +25 -0
  129. data/spec/spec_helper.rb +2 -1
  130. data/spec/unique_filter_spec.rb +108 -18
  131. data/spec/wordlist_spec.rb +55 -3
  132. data/spec/words_spec.rb +41 -0
  133. metadata +183 -120
  134. data/lib/wordlist/builders/website.rb +0 -216
  135. data/lib/wordlist/builders.rb +0 -1
  136. data/lib/wordlist/flat_file.rb +0 -47
  137. data/lib/wordlist/list.rb +0 -162
  138. data/lib/wordlist/mutator.rb +0 -113
  139. data/lib/wordlist/parsers.rb +0 -74
  140. data/lib/wordlist/runners/list.rb +0 -116
  141. data/lib/wordlist/runners/runner.rb +0 -67
  142. data/lib/wordlist/runners.rb +0 -2
  143. data/scripts/benchmark +0 -59
  144. data/scripts/text/comedy_of_errors.txt +0 -4011
  145. data/spec/flat_file_spec.rb +0 -25
  146. data/spec/list_spec.rb +0 -58
  147. data/spec/mutator_spec.rb +0 -43
  148. data/spec/parsers_spec.rb +0 -118
@@ -0,0 +1,176 @@
1
+ require 'wordlist/abstract_wordlist'
2
+ require 'wordlist/exceptions'
3
+ require 'wordlist/format'
4
+ require 'wordlist/compression/reader'
5
+
6
+ module Wordlist
7
+ #
8
+ # Represents a `.txt` file wordlist.
9
+ #
10
+ # wordlist = Wordlist::File.new("rockyou.txt")
11
+ # wordlist.each do |word|
12
+ # puts word
13
+ # end
14
+ #
15
+ # @api public
16
+ #
17
+ # @since 1.0.0
18
+ #
19
+ class File < AbstractWordlist
20
+
21
+ # The path to the `.txt` file
22
+ attr_reader :path
23
+
24
+ # The format of the wordlist file.
25
+ #
26
+ # @return [:txt, :gzip, :bzip2, :xz]
27
+ attr_reader :format
28
+
29
+ #
30
+ # Opens a wordlist file.
31
+ #
32
+ # @param [String] path
33
+ # The path to the `.txt` file wordlist read from.
34
+ #
35
+ # @param [:txt, :gz, :bzip2, :xz, nil] format
36
+ # The format of the wordlist. If not given the format will be inferred
37
+ # from the file extension.
38
+ #
39
+ # @raise [WordlistNotFound]
40
+ # The given path does not exist.
41
+ #
42
+ # @raise [UnknownFormat]
43
+ # The format could not be inferred from the file extension.
44
+ #
45
+ # @api public
46
+ #
47
+ def initialize(path, format: Format.infer(path))
48
+ @path = ::File.expand_path(path)
49
+ @format = format
50
+
51
+ unless ::File.file?(@path)
52
+ raise(WordlistNotFound,"wordlist file does not exist: #{path.inspect}")
53
+ end
54
+
55
+ unless Format::FORMATS.include?(@format)
56
+ raise(UnknownFormat,"unknown format given: #{format.inspect}")
57
+ end
58
+ end
59
+
60
+ #
61
+ # Opens a wordlist file.
62
+ #
63
+ # @param [String] path
64
+ # The path to the `.txt` file wordlist read from.
65
+ #
66
+ # @yield [wordlist]
67
+ # If a block is given, it will be passed the opened wordlist.
68
+ #
69
+ # @yieldparam [File] wordlist
70
+ # The newly opened wordlist.
71
+ #
72
+ # @return [File]
73
+ # The newly opened wordlist.
74
+ #
75
+ # @see #initialize
76
+ #
77
+ # @api public
78
+ #
79
+ def self.open(path,**kwargs)
80
+ wordlist = new(path,**kwargs)
81
+ yield wordlist if block_given?
82
+ return wordlist
83
+ end
84
+
85
+ #
86
+ # Opens and reads the wordlist file.
87
+ #
88
+ # @param [String] path
89
+ # The path to the `.txt` file wordlist read from.
90
+ #
91
+ # @yield [word]
92
+ # The given block will be passed every word from the wordlist.
93
+ #
94
+ # @yieldparam [String] word
95
+ # A word from the wordlist.
96
+ #
97
+ # @return [Enumerator]
98
+ # If no block is given, an Enumerator object will be returned.
99
+ #
100
+ def self.read(path,**kwargs,&block)
101
+ open(path,**kwargs).each(&block)
102
+ end
103
+
104
+ #
105
+ # Enumerates through each line in the `.txt` file wordlist.
106
+ #
107
+ # @yield [line]
108
+ # The given block will be passed each line from the `.txt` file.
109
+ #
110
+ # @yieldparam [String] line
111
+ # A newline terminated line from the file.
112
+ #
113
+ # @return [Enumerator]
114
+ # If no block is given, an Enumerator object will be returned.
115
+ #
116
+ # @api semipublic
117
+ #
118
+ def each_line(&block)
119
+ return enum_for(__method__) unless block
120
+
121
+ open { |io| io.each_line(&block) }
122
+ end
123
+
124
+ #
125
+ # Enumerates through every word in the `.txt` file.
126
+ #
127
+ # @yield [word]
128
+ # The given block will be passed every word from the wordlist.
129
+ #
130
+ # @yieldparam [String] word
131
+ # A word from the wordlist.
132
+ #
133
+ # @return [Enumerator]
134
+ # If no block is given, an Enumerator object will be returned.
135
+ #
136
+ # @note
137
+ # Empty lines and lines beginning with `#` characters will be ignored.
138
+ #
139
+ # @example
140
+ # wordlist.each do |word|
141
+ # puts word
142
+ # end
143
+ #
144
+ # @api public
145
+ #
146
+ def each
147
+ return enum_for(__method__) unless block_given?
148
+
149
+ each_line do |line|
150
+ line.chomp!
151
+
152
+ unless (line.empty? || line.start_with?('#'))
153
+ yield line
154
+ end
155
+ end
156
+ end
157
+
158
+ private
159
+
160
+ #
161
+ # Opens the wordlist for reading.
162
+ #
163
+ # @yield [io]
164
+ #
165
+ # @yieldparam [IO] io
166
+ #
167
+ def open(&block)
168
+ if @format == :txt
169
+ ::File.open(@path,&block)
170
+ else
171
+ Compression::Reader.open(@path, format: @format, &block)
172
+ end
173
+ end
174
+
175
+ end
176
+ end
@@ -0,0 +1,38 @@
1
+ require 'wordlist/exceptions'
2
+
3
+ module Wordlist
4
+ #
5
+ # Handles wordlist format detection.
6
+ #
7
+ # @since 1.0.0
8
+ #
9
+ module Format
10
+ # Mapping of file extensions to formats
11
+ FILE_FORMATS = {
12
+ '.txt' => :txt,
13
+ '.gz' => :gzip,
14
+ '.bz2' => :bzip2,
15
+ '.xz' => :xz
16
+ }
17
+
18
+ # Valid formats.
19
+ FORMATS = FILE_FORMATS.values
20
+
21
+ #
22
+ # Infers the format from the given file name.
23
+ #
24
+ # @param [String] path
25
+ # The path to the file.
26
+ #
27
+ # @return [:txt, :gzip, :bzip2, :xz]
28
+ #
29
+ # @raise [UnknownFormat]
30
+ # The format could not be inferred from the file path.
31
+ #
32
+ def self.infer(path)
33
+ FILE_FORMATS.fetch(::File.extname(path)) do
34
+ raise(UnknownFormat,"could not infer the format of file: #{path.inspect}")
35
+ end
36
+ end
37
+ end
38
+ end
@@ -0,0 +1,32 @@
1
+ module Wordlist
2
+ class Lexer
3
+ #
4
+ # Detects the system's default language.
5
+ #
6
+ # @api semipublic
7
+ #
8
+ # @since 1.0.0
9
+ #
10
+ module Lang
11
+ #
12
+ # The default language.
13
+ #
14
+ # @return [Symbol]
15
+ #
16
+ def self.default
17
+ if (lang = ENV['LANG'])
18
+ lang, encoding = lang.split('.',2)
19
+ lang, country = lang.split('_',2)
20
+
21
+ unless lang == 'C'
22
+ lang.to_sym
23
+ else
24
+ :en
25
+ end
26
+ else
27
+ :en
28
+ end
29
+ end
30
+ end
31
+ end
32
+ end
@@ -0,0 +1,68 @@
1
+ require 'wordlist/exceptions'
2
+
3
+ module Wordlist
4
+ class Lexer
5
+ #
6
+ # Stop words for various languages.
7
+ #
8
+ # @api semipublic
9
+ #
10
+ # @since 1.0.0
11
+ #
12
+ module StopWords
13
+ # The directory containing the stop words `.txt` files.
14
+ DIRECTORY = ::File.expand_path(::File.join(__dir__,'..','..','..','data','stop_words'))
15
+
16
+ #
17
+ # The path to the stop words `.txt` file.
18
+ #
19
+ # @param [Symbol] lang
20
+ # The language to load.
21
+ #
22
+ # @return [String]
23
+ #
24
+ def self.path_for(lang)
25
+ ::File.join(DIRECTORY,"#{lang}.txt")
26
+ end
27
+
28
+ #
29
+ # Reads the stop words.
30
+ #
31
+ # @param [Symbol] lang
32
+ # The language to load.
33
+ #
34
+ # @return [Array<String>]
35
+ #
36
+ # @raise [UnsupportedLanguage]
37
+ #
38
+ def self.read(lang)
39
+ path = path_for(lang)
40
+
41
+ unless ::File.file?(path)
42
+ raise(UnsupportedLanguage,"unsupported language: #{lang}")
43
+ end
44
+
45
+ lines = ::File.readlines(path)
46
+ lines.each(&:chomp!)
47
+ lines
48
+ end
49
+
50
+ @stop_words = {}
51
+ @mutex = Mutex.new
52
+
53
+ #
54
+ # Lazy loads the stop words for the given language.
55
+ #
56
+ # @param [Symbol] lang
57
+ # The language to load.
58
+ #
59
+ # @return [Array<String>]
60
+ #
61
+ def self.[](lang)
62
+ @mutex.synchronize do
63
+ @stop_words[lang] ||= read(lang)
64
+ end
65
+ end
66
+ end
67
+ end
68
+ end
@@ -0,0 +1,218 @@
1
+ require 'wordlist/lexer/lang'
2
+ require 'wordlist/lexer/stop_words'
3
+
4
+ require 'strscan'
5
+
6
+ module Wordlist
7
+ #
8
+ # Parses arbitrary text and scans each word from it.
9
+ #
10
+ # @api semipublic
11
+ #
12
+ # @since 1.0.0
13
+ #
14
+ class Lexer
15
+
16
+ include Enumerable
17
+
18
+ # Regexp to match acronyms.
19
+ ACRONYM = /[[:alpha:]](?:\.[[:alpha:]])+\./
20
+
21
+ # Default set of punctuation characters allowed within words
22
+ SPECIAL_CHARS = %w[_ - ']
23
+
24
+ # @return [Symbol]
25
+ attr_reader :lang
26
+
27
+ # @return [Array<String>]
28
+ attr_reader :stop_words
29
+
30
+ # @return [Array<String, Regexp>]
31
+ attr_reader :ignore_words
32
+
33
+ # @return [Array<String>]
34
+ attr_reader :special_chars
35
+
36
+ #
37
+ # Initializes the lexer.
38
+ #
39
+ # @param [Symbol] lang
40
+ # The language to use. Defaults to {Lang.default}.
41
+ #
42
+ # @param [Array<String>] stop_words
43
+ # The explicit stop-words to ignore. If not given, default stop words
44
+ # will be loaded based on `lang` or {Lang.default}.
45
+ #
46
+ # @param [Array<String, Regexp>] ignore_words
47
+ # Optional list of words to ignore. Can contain Strings or Regexps.
48
+ #
49
+ # @param [Boolean] digits
50
+ # Controls whether parsed words may contain digits or not.
51
+ #
52
+ # @param [Array<String>] special_chars
53
+ # The additional special characters allowed within words.
54
+ #
55
+ # @param [Boolean] numbers
56
+ # Controls whether whole numbers will be parsed as words.
57
+ #
58
+ # @param [Boolean] acronyms
59
+ # Controls whether acronyms will be parsed as words.
60
+ #
61
+ # @param [Boolean] normalize_case
62
+ # Controls whether to convert all words to lowercase.
63
+ #
64
+ # @param [Boolean] normalize_apostrophes
65
+ # Controls whether apostrophes will be removed from the end of words.
66
+ #
67
+ # @param [Boolean] normalize_acronyms
68
+ # Controls whether acronyms will have `.` characters removed.
69
+ #
70
+ # @raise [ArgumentError]
71
+ # The `ignore_words` keyword contained a value other than a String or
72
+ # Regexp.
73
+ #
74
+ def initialize(lang: Lang.default,
75
+ stop_words: StopWords[lang],
76
+ ignore_words: [],
77
+ digits: true,
78
+ special_chars: SPECIAL_CHARS,
79
+ numbers: false,
80
+ acronyms: true,
81
+ normalize_case: false,
82
+ normalize_apostrophes: false,
83
+ normalize_acronyms: false)
84
+ @lang = lang
85
+ @stop_words = stop_words
86
+ @ignore_words = ignore_words
87
+ @special_chars = special_chars
88
+
89
+ @digits = digits
90
+ @numbers = numbers
91
+ @acronyms = acronyms
92
+
93
+ @normalize_acronyms = normalize_acronyms
94
+ @normalize_apostrophes = normalize_apostrophes
95
+ @normalize_case = normalize_case
96
+
97
+ escaped_chars = Regexp.escape(@special_chars.join)
98
+
99
+ @word = if @digits
100
+ /[[:alpha:]](?:[[:alnum:]#{escaped_chars}]*[[:alnum:]])?/
101
+ else
102
+ /[[:alpha:]](?:[[:alpha:]#{escaped_chars}]*[[:alpha:]])?/
103
+ end
104
+
105
+ skip_words = Regexp.union(
106
+ (@stop_words + @ignore_words).map { |pattern|
107
+ case pattern
108
+ when Regexp then pattern
109
+ when String then /#{Regexp.escape(pattern)}/i
110
+ else
111
+ raise(ArgumentError,"ignore_words: must contain only Strings or Regexps")
112
+ end
113
+ }
114
+ )
115
+
116
+ if @numbers
117
+ @skip_word = /(?:#{skip_words}[[:punct:]]*(?:\s+|$))+/i
118
+ @word = /#{@word}|\d+/
119
+ @not_a_word = /[\s[:punct:]]+/
120
+ else
121
+ @skip_word = /(?:(?:#{skip_words}|\d+)[[:punct:]]*(?:\s+|$))+/i
122
+ @not_a_word = /[\s\d[:punct:]]+/
123
+ end
124
+ end
125
+
126
+ #
127
+ # Determines whether parsed words may contain digits or not.
128
+ #
129
+ # @return [Boolean]
130
+ #
131
+ def digits?
132
+ @digits
133
+ end
134
+
135
+ #
136
+ # Determines whether numbers will be parsed or ignored.
137
+ #
138
+ # @return [Boolean]
139
+ #
140
+ def numbers?
141
+ @numbers
142
+ end
143
+
144
+ #
145
+ # Determines whether acronyms will be parsed or ignored.
146
+ #
147
+ # @return [Boolean]
148
+ #
149
+ def acronyms?
150
+ @acronyms
151
+ end
152
+
153
+ #
154
+ # Determines whether `.` characters will be removed from acronyms.
155
+ #
156
+ # @return [Boolean]
157
+ #
158
+ def normalize_acronyms?
159
+ @normalize_acronyms
160
+ end
161
+
162
+ #
163
+ # Determines whether apostrophes will be stripped from words.
164
+ #
165
+ # @return [Boolean]
166
+ #
167
+ def normalize_apostrophes?
168
+ @normalize_apostrophes
169
+ end
170
+
171
+ #
172
+ # Determines whether all words will be converted to lowercase.
173
+ #
174
+ # @return [Boolean]
175
+ #
176
+ def normalize_case?
177
+ @normalize_case
178
+ end
179
+
180
+ #
181
+ # Enumerates over each word in the text.
182
+ #
183
+ # @yield [word]
184
+ # The given block will be passed each word from the text.
185
+ #
186
+ # @yieldparam [String] word
187
+ # A parsed word from the text.
188
+ #
189
+ # @return [Array<String>]
190
+ # If no block is given, an Array of the parsed words will be returned
191
+ # instead.
192
+ #
193
+ def parse(text,&block)
194
+ return enum_for(__method__,text).to_a unless block_given?
195
+
196
+ scanner = StringScanner.new(text)
197
+
198
+ until scanner.eos?
199
+ scanner.skip(@not_a_word)
200
+ scanner.skip(@skip_word)
201
+
202
+ if (acronym = scanner.scan(ACRONYM))
203
+ if @acronyms
204
+ acronym.tr!('.','') if @normalize_acronyms
205
+
206
+ yield acronym
207
+ end
208
+ elsif (word = scanner.scan(@word))
209
+ word.downcase! if @normalize_case
210
+ word.chomp!("'s") if (@normalize_apostrophes && word.end_with?("'s"))
211
+
212
+ yield word
213
+ end
214
+ end
215
+ end
216
+
217
+ end
218
+ end