mrflip-wukong 0.1.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (137) hide show
  1. data/LICENSE.txt +202 -0
  2. data/README-tutorial.textile +163 -0
  3. data/README.textile +165 -0
  4. data/bin/cutc +30 -0
  5. data/bin/cuttab +5 -0
  6. data/bin/greptrue +8 -0
  7. data/bin/hdp-cat +3 -0
  8. data/bin/hdp-catd +3 -0
  9. data/bin/hdp-du +81 -0
  10. data/bin/hdp-get +3 -0
  11. data/bin/hdp-kill +3 -0
  12. data/bin/hdp-ls +10 -0
  13. data/bin/hdp-mkdir +3 -0
  14. data/bin/hdp-mv +3 -0
  15. data/bin/hdp-parts_to_keys.rb +77 -0
  16. data/bin/hdp-ps +3 -0
  17. data/bin/hdp-put +3 -0
  18. data/bin/hdp-rm +11 -0
  19. data/bin/hdp-sort +29 -0
  20. data/bin/hdp-stream +29 -0
  21. data/bin/hdp-stream-flat +18 -0
  22. data/bin/hdp-sync +17 -0
  23. data/bin/hdp-wc +67 -0
  24. data/bin/md5sort +20 -0
  25. data/bin/tabchar +5 -0
  26. data/bin/uniqc +3 -0
  27. data/bin/wu-hist +3 -0
  28. data/bin/wu-lign +177 -0
  29. data/bin/wu-sum +30 -0
  30. data/doc/README-wulign.textile +59 -0
  31. data/doc/README-wutils.textile +128 -0
  32. data/doc/UsingWukong-part1.textile +2 -0
  33. data/doc/UsingWukong-part2.textile +2 -0
  34. data/doc/UsingWukong-part3-parsing.textile +132 -0
  35. data/doc/code/api_response_example.txt +20 -0
  36. data/doc/code/parser_skeleton.rb +38 -0
  37. data/doc/hadoop-setup.textile +21 -0
  38. data/doc/intro_to_map_reduce/MapReduceDiagram.graffle +0 -0
  39. data/doc/links.textile +42 -0
  40. data/doc/overview.textile +91 -0
  41. data/doc/pig/PigLatinExpressionsList.txt +122 -0
  42. data/doc/pig/PigLatinReferenceManual.html +19134 -0
  43. data/doc/pig/PigLatinReferenceManual.txt +1640 -0
  44. data/doc/tips.textile +65 -0
  45. data/doc/utils.textile +48 -0
  46. data/examples/README.txt +17 -0
  47. data/examples/and_pig/sample_queries.rb +128 -0
  48. data/examples/apache_log_parser.rb +53 -0
  49. data/examples/count_keys.rb +56 -0
  50. data/examples/count_keys_at_mapper.rb +57 -0
  51. data/examples/graph/adjacency_list.rb +74 -0
  52. data/examples/graph/breadth_first_search.rb +79 -0
  53. data/examples/graph/gen_2paths.rb +68 -0
  54. data/examples/graph/gen_multi_edge.rb +103 -0
  55. data/examples/graph/gen_symmetric_links.rb +53 -0
  56. data/examples/package-local.rb +100 -0
  57. data/examples/package.rb +96 -0
  58. data/examples/pagerank/README.textile +6 -0
  59. data/examples/pagerank/gen_initial_pagerank_graph.pig +57 -0
  60. data/examples/pagerank/pagerank.rb +88 -0
  61. data/examples/pagerank/pagerank_initialize.rb +46 -0
  62. data/examples/pagerank/run_pagerank.sh +19 -0
  63. data/examples/rank_and_bin.rb +173 -0
  64. data/examples/run_all.sh +47 -0
  65. data/examples/sample_records.rb +44 -0
  66. data/examples/size.rb +60 -0
  67. data/examples/word_count.rb +95 -0
  68. data/lib/wukong.rb +11 -0
  69. data/lib/wukong/and_pig.rb +62 -0
  70. data/lib/wukong/and_pig/README.textile +12 -0
  71. data/lib/wukong/and_pig/as.rb +37 -0
  72. data/lib/wukong/and_pig/data_types.rb +30 -0
  73. data/lib/wukong/and_pig/functions.rb +50 -0
  74. data/lib/wukong/and_pig/generate.rb +85 -0
  75. data/lib/wukong/and_pig/generate/variable_inflections.rb +85 -0
  76. data/lib/wukong/and_pig/junk.rb +51 -0
  77. data/lib/wukong/and_pig/operators.rb +8 -0
  78. data/lib/wukong/and_pig/operators/compound.rb +29 -0
  79. data/lib/wukong/and_pig/operators/evaluators.rb +7 -0
  80. data/lib/wukong/and_pig/operators/execution.rb +15 -0
  81. data/lib/wukong/and_pig/operators/file_methods.rb +29 -0
  82. data/lib/wukong/and_pig/operators/foreach.rb +98 -0
  83. data/lib/wukong/and_pig/operators/groupies.rb +212 -0
  84. data/lib/wukong/and_pig/operators/load_store.rb +65 -0
  85. data/lib/wukong/and_pig/operators/meta.rb +42 -0
  86. data/lib/wukong/and_pig/operators/relational.rb +129 -0
  87. data/lib/wukong/and_pig/pig_struct.rb +48 -0
  88. data/lib/wukong/and_pig/pig_var.rb +95 -0
  89. data/lib/wukong/and_pig/symbol.rb +29 -0
  90. data/lib/wukong/and_pig/utils.rb +0 -0
  91. data/lib/wukong/bad_record.rb +18 -0
  92. data/lib/wukong/boot.rb +47 -0
  93. data/lib/wukong/datatypes.rb +24 -0
  94. data/lib/wukong/datatypes/enum.rb +123 -0
  95. data/lib/wukong/dfs.rb +80 -0
  96. data/lib/wukong/encoding.rb +111 -0
  97. data/lib/wukong/extensions.rb +15 -0
  98. data/lib/wukong/extensions/array.rb +18 -0
  99. data/lib/wukong/extensions/blank.rb +93 -0
  100. data/lib/wukong/extensions/class.rb +189 -0
  101. data/lib/wukong/extensions/date_time.rb +24 -0
  102. data/lib/wukong/extensions/emittable.rb +82 -0
  103. data/lib/wukong/extensions/hash.rb +120 -0
  104. data/lib/wukong/extensions/hash_like.rb +112 -0
  105. data/lib/wukong/extensions/hashlike_class.rb +47 -0
  106. data/lib/wukong/extensions/module.rb +2 -0
  107. data/lib/wukong/extensions/pathname.rb +27 -0
  108. data/lib/wukong/extensions/string.rb +65 -0
  109. data/lib/wukong/extensions/struct.rb +17 -0
  110. data/lib/wukong/extensions/symbol.rb +11 -0
  111. data/lib/wukong/logger.rb +40 -0
  112. data/lib/wukong/models/graph.rb +27 -0
  113. data/lib/wukong/rdf.rb +104 -0
  114. data/lib/wukong/schema.rb +39 -0
  115. data/lib/wukong/script.rb +265 -0
  116. data/lib/wukong/script/hadoop_command.rb +111 -0
  117. data/lib/wukong/script/local_command.rb +14 -0
  118. data/lib/wukong/streamer.rb +13 -0
  119. data/lib/wukong/streamer/accumulating_reducer.rb +89 -0
  120. data/lib/wukong/streamer/base.rb +76 -0
  121. data/lib/wukong/streamer/count_keys.rb +30 -0
  122. data/lib/wukong/streamer/count_lines.rb +26 -0
  123. data/lib/wukong/streamer/filter.rb +20 -0
  124. data/lib/wukong/streamer/line_streamer.rb +12 -0
  125. data/lib/wukong/streamer/list_reducer.rb +20 -0
  126. data/lib/wukong/streamer/preprocess_with_pipe_streamer.rb +22 -0
  127. data/lib/wukong/streamer/rank_and_bin_reducer.rb +145 -0
  128. data/lib/wukong/streamer/set_reducer.rb +14 -0
  129. data/lib/wukong/streamer/struct_streamer.rb +48 -0
  130. data/lib/wukong/streamer/summing_reducer.rb +29 -0
  131. data/lib/wukong/streamer/uniq_by_last_reducer.rb +44 -0
  132. data/lib/wukong/typed_struct.rb +12 -0
  133. data/lib/wukong/wukong_class.rb +20 -0
  134. data/spec/bin/hdp-wc_spec.rb +4 -0
  135. data/spec/spec_helper.rb +0 -0
  136. data/wukong.gemspec +173 -0
  137. metadata +208 -0
@@ -0,0 +1,30 @@
1
+ #!/usr/bin/env bash
2
+
3
+ #
4
+ # cut 1
5
+ #
6
+ # Example:
7
+ #
8
+ # A quickie histogram of timestamps; say that for the object in the foo/bar
9
+ # directory, field 3 holds a flat timestamp (YYYYmmddHHMMSS) and you want a
10
+ # histogram by hour (and that foo/bar is small enough to be worth sucking
11
+ # through a single machine):
12
+ #
13
+ # hdp-catd foo/bar | cuttab 3 | cutc 12 | sort | uniq -c
14
+ #
15
+ # If foo/bar is already sorted leave out the call to sort.
16
+ #
17
+
18
+
19
+ #
20
+ # Set it to cut up to $1 (if defined), or if not, up to $CUTC_MAX (if defined), or 200 chars as a fallback.
21
+ #
22
+ CUTC_MAX=${CUTC_MAX-200}
23
+ CUTC_MAX=${1-$CUTC_MAX}
24
+ cutchars="1-${CUTC_MAX}"
25
+ shift
26
+
27
+ #
28
+ # Do the cuttin'
29
+ #
30
+ cut -c"${cutchars}" "$@"
@@ -0,0 +1,5 @@
1
+ #!/usr/bin/env bash
2
+
3
+ fields=${1-"1-"}
4
+ shift
5
+ cut -d' ' -f"$fields" "$@"
@@ -0,0 +1,8 @@
1
+ #!/usr/bin/env bash
2
+
3
+ # runs grep but always returns a true exit status. (Otherwise hadoop vomits)
4
+ grep "$@"
5
+ true
6
+ # runs grep but always returns a true exit status. (Otherwise hadoop vomits)
7
+ egrep "$@"
8
+ true
@@ -0,0 +1,3 @@
1
+ #!/usr/bin/env bash
2
+
3
+ hadoop dfs -cat "$@"
@@ -0,0 +1,3 @@
1
+ #!/usr/bin/env bash
2
+ args=`echo "$@" | ruby -ne 'a = $_.split(/\s+/); puts a.map{|arg| arg+"/[^_]*" }.join(" ")'`
3
+ hadoop dfs -cat $args
@@ -0,0 +1,81 @@
1
+ #!/usr/bin/env ruby
2
+
3
+ OPTIONS={}
4
+
5
+ #
6
+ # grok options
7
+ #
8
+ if ARGV[0] =~ /-[a-z]+/
9
+ flags = ARGV.shift
10
+ OPTIONS[:summary] = flags.include?('s')
11
+ OPTIONS[:humanize] = flags.include?('h')
12
+ end
13
+
14
+ #
15
+ # Prepare command
16
+ #
17
+ def prepare_command
18
+ dfs_cmd = OPTIONS[:summary] ? 'dus' : 'du'
19
+ dfs_args = "'" + ARGV.join("' '") + "'"
20
+ %Q{ hadoop dfs -#{dfs_cmd} #{dfs_args} }
21
+ end
22
+
23
+ Numeric.class_eval do
24
+ def bytes() self ; end
25
+ alias :byte :bytes
26
+ def kilobytes() self * 1024 ; end
27
+ alias :kilobyte :kilobytes
28
+ def megabytes() self * 1024.kilobytes ; end
29
+ alias :megabyte :megabytes
30
+ def gigabytes() self * 1024.megabytes ; end
31
+ alias :gigabyte :gigabytes
32
+ def terabytes() self * 1024.gigabytes ; end
33
+ alias :terabyte :terabytes
34
+ def petabytes() self * 1024.terabytes ; end
35
+ alias :petabyte :petabytes
36
+ def exabytes() self * 1024.petabytes ; end
37
+ alias :exabyte :exabytes
38
+ end
39
+
40
+ # Formats the bytes in +size+ into a more understandable representation
41
+ # (e.g., giving it 1500 yields 1.5 KB). This method is useful for
42
+ # reporting file sizes to users. This method returns nil if
43
+ # +size+ cannot be converted into a number. You can change the default
44
+ # precision of 1 using the precision parameter +precision+.
45
+ #
46
+ # ==== Examples
47
+ # number_to_human_size(123) # => 123 Bytes
48
+ # number_to_human_size(1234) # => 1.2 KB
49
+ # number_to_human_size(12345) # => 12.1 KB
50
+ # number_to_human_size(1234567) # => 1.2 MB
51
+ # number_to_human_size(1234567890) # => 1.1 GB
52
+ # number_to_human_size(1234567890123) # => 1.1 TB
53
+ # number_to_human_size(1234567, 2) # => 1.18 MB
54
+ # number_to_human_size(483989, 0) # => 4 MB
55
+ def number_to_human_size(size, precision=1)
56
+ size = Kernel.Float(size)
57
+ case
58
+ when size.to_i == 1; "1 Byte"
59
+ when size < 1.kilobyte; "%d Bytes" % size
60
+ when size < 1.megabyte; "%.#{precision}f KB" % (size / 1.0.kilobyte)
61
+ when size < 1.gigabyte; "%.#{precision}f MB" % (size / 1.0.megabyte)
62
+ when size < 1.terabyte; "%.#{precision}f GB" % (size / 1.0.gigabyte)
63
+ else "%.#{precision}f TB" % (size / 1.0.terabyte)
64
+ end.sub(/([0-9]\.\d*?)0+ /, '\1 ' ).sub(/\. /,' ')
65
+ rescue
66
+ nil
67
+ end
68
+
69
+ def format_output file, size
70
+ human_size = number_to_human_size(size) || 3
71
+ file = file.gsub(%r{hdfs://[^/]+/}, '/') # kill off hdfs paths, otherwise leave it alone
72
+ "%-71s\t%15d\t%15s" % [file, size.to_i, human_size]
73
+ end
74
+
75
+
76
+ %x{ #{prepare_command} }.split("\n").each do |line|
77
+ if line =~ /^Found \d+ items$/ then puts line ; next end
78
+ info = line.split(/\s+/)
79
+ if OPTIONS[:summary] then file, size = info else size, file = info end
80
+ puts format_output(file, size)
81
+ end
@@ -0,0 +1,3 @@
1
+ #!/usr/bin/env bash
2
+
3
+ hadoop dfs -copyToLocal "$1" "$2"
@@ -0,0 +1,3 @@
1
+ #!/usr/bin/env bash
2
+
3
+ hadoop job -kill "$@"
@@ -0,0 +1,10 @@
1
+ #!/usr/bin/env bash
2
+
3
+ if [ "$1" == "-r" ] || [ "$1" == "-R" ] ; then
4
+ shift
5
+ action=lsr
6
+ else
7
+ action=ls
8
+ fi
9
+
10
+ hadoop dfs -$action "$@"
@@ -0,0 +1,3 @@
1
+ #!/usr/bin/env bash
2
+
3
+ hadoop dfs -mkdir "$@"
@@ -0,0 +1,3 @@
1
+ #!/usr/bin/env bash
2
+
3
+ hadoop dfs -mv "$@"
@@ -0,0 +1,77 @@
1
+ #!/usr/bin/env ruby
2
+
3
+ dir_to_rename = ARGV[0]
4
+ dest_ext = '.tsv'
5
+
6
+ unless dir_to_rename && (! dir_to_rename.empty?)
7
+ warn "Need a directory or file spec to rename."
8
+ exit
9
+ end
10
+
11
+ #
12
+ # Setup
13
+ #
14
+ warn "\nPlease IGNORE the 'cat: Unable to write to output stream.' errors\n"
15
+
16
+ #
17
+ # Examine the files
18
+ #
19
+ file_listings = `hdp-ls #{dir_to_rename}`.split("\n")
20
+ command_lists = { }
21
+ file_listings[1..-1].each do |file_listing|
22
+ m = %r{[-drwx]+\s+[\-\d]+\s+\w+\s+\w+\s+(\d+)\s+[\d\-]+\s+[\d\:]+\s+(.+)$}.match(file_listing)
23
+ if !m then warn "Couldn't grok #{file_listing}" ; next ; end
24
+ size, filename = m.captures
25
+ case
26
+ when size.to_i == 0 then (command_lists[:deletes]||=[]) << filename
27
+ else
28
+ firstline = `hdp-cat #{filename} | head -qn1 `
29
+ file_key, _ = firstline.split("\t", 2)
30
+ unless file_key && (file_key =~ /\A[\w\-\.]+\z/)
31
+ warn "Don't want to rename to '#{file_key}'... skipping"
32
+ next
33
+ end
34
+ dirname = File.dirname(filename)
35
+ destfile = File.join(dirname, file_key)+dest_ext
36
+ (command_lists[:moves]||=[]) << "hdp-mv #{filename} #{destfile}"
37
+ end
38
+ end
39
+
40
+ #
41
+ # Execute the command_lists
42
+ #
43
+ command_lists.each do |type, command_list|
44
+ case type
45
+ when :deletes
46
+ command = "hdp-rm #{command_list.join(" ")}"
47
+ puts command
48
+ `#{command}`
49
+ when :moves
50
+ command_list.each do |command|
51
+ puts command
52
+ `#{command}`
53
+ end
54
+ end
55
+ end
56
+
57
+
58
+ # -rw-r--r-- 3 flip supergroup 0 2008-12-20 05:51 /user/flip/out/sorted-tweets-20081220/part-00010
59
+
60
+ # # Killing empty files
61
+ # find . -size 0 -print -exec rm {} \;
62
+ #
63
+ # for foo in part-0* ; do
64
+ # newname=`
65
+ # head -n1 $foo |
66
+ # cut -d' ' -f1 |
67
+ # ruby -ne 'puts $_.chomp.gsub(/[^\-\w]/){|s| s.bytes.map{|c| "%%%02X" % c }}'
68
+ # `.tsv ;
69
+ # echo "moving $foo to $newname"
70
+ # mv "$foo" "$newname"
71
+ # done
72
+ #
73
+ # # dir=`basename $PWD`
74
+ # # for foo in *.tsv ; do
75
+ # # echo "Compressing $dir"
76
+ # # bzip2 -c $foo > ../$dir-bz2/$foo.bz2
77
+ # # done
@@ -0,0 +1,3 @@
1
+ #!/usr/bin/env bash
2
+
3
+ hadoop job -list all
@@ -0,0 +1,3 @@
1
+ #!/usr/bin/env bash
2
+
3
+ hadoop dfs -put "$1" "$2"
@@ -0,0 +1,11 @@
1
+ #!/usr/bin/env bash
2
+
3
+ if [ "$1" == "-r" ] ; then
4
+ shift
5
+ action=rmr
6
+ else
7
+ action=rm
8
+ fi
9
+ echo hadoop dfs -$action "$@"
10
+ # read -p "Hit ctrl-C to abort or enter to do this...."
11
+ hadoop dfs -$action "$@"
@@ -0,0 +1,29 @@
1
+ #!/usr/bin/env bash
2
+ # hadoop dfs -rmr out/parsed-followers
3
+
4
+ input_file=${1} ; shift
5
+ output_file=${1} ; shift
6
+ map_script=${1-/bin/cat} ; shift
7
+ reduce_script=${1-/usr/bin/uniq} ; shift
8
+ fields=${1-2} ; shift
9
+
10
+ if [ "$reduce_script" == "" ] ; then echo "$0 input_file output_file [sort_fields] [mapper] [reducer] [args]" ; exit ; fi
11
+
12
+ HADOOP_HOME=${HADOOP_HOME-/usr/lib/hadoop}
13
+
14
+ ${HADOOP_HOME}/bin/hadoop \
15
+ jar ${HADOOP_HOME}/contrib/streaming/hadoop-*-streaming.jar \
16
+ -partitioner org.apache.hadoop.mapred.lib.KeyFieldBasedPartitioner \
17
+ -jobconf map.output.key.field.separator='\t' \
18
+ -jobconf num.key.fields.for.partition=1 \
19
+ -jobconf stream.map.output.field.separator='\t' \
20
+ -jobconf stream.num.map.output.key.fields="$fields" \
21
+ -mapper "$map_script" \
22
+ -reducer "$reduce_script" \
23
+ -input "$input_file" \
24
+ -output "$output_file" \
25
+ "$@"
26
+
27
+
28
+ # -jobconf mapred.map.tasks=3 \
29
+ # -jobconf mapred.reduce.tasks=3 \
@@ -0,0 +1,29 @@
1
+ #!/usr/bin/env bash
2
+ # hadoop dfs -rmr out/parsed-followers
3
+
4
+ input_file=${1} ; shift
5
+ output_file=${1} ; shift
6
+ map_script=${1-/bin/cat} ; shift
7
+ reduce_script=${1-/usr/bin/uniq} ; shift
8
+ fields=${1-2} ; shift
9
+
10
+ if [ "$reduce_script" == "" ] ; then echo "$0 input_file output_file [sort_fields] [mapper] [reducer] [args]" ; exit ; fi
11
+
12
+ HADOOP_HOME=${HADOOP_HOME-/usr/lib/hadoop}
13
+
14
+ ${HADOOP_HOME}/bin/hadoop \
15
+ jar ${HADOOP_HOME}/contrib/streaming/hadoop-*-streaming.jar \
16
+ -partitioner org.apache.hadoop.mapred.lib.KeyFieldBasedPartitioner \
17
+ -jobconf map.output.key.field.separator='\t' \
18
+ -jobconf num.key.fields.for.partition=1 \
19
+ -jobconf stream.map.output.field.separator='\t' \
20
+ -jobconf stream.num.map.output.key.fields="$fields" \
21
+ -mapper "$map_script" \
22
+ -reducer "$reduce_script" \
23
+ -input "$input_file" \
24
+ -output "$output_file" \
25
+ "$@"
26
+
27
+
28
+ # -jobconf mapred.map.tasks=3 \
29
+ # -jobconf mapred.reduce.tasks=3 \
@@ -0,0 +1,18 @@
1
+ #!/usr/bin/env bash
2
+
3
+ input_file=${1} ; shift
4
+ output_file=${1} ; shift
5
+ map_script=${1-/bin/cat} ; shift
6
+ reduce_script=${1-/usr/bin/uniq} ; shift
7
+
8
+ if [ "$reduce_script" == "" ] ; then echo "$0 input_file output_file [sort_fields] [mapper] [reducer] [args]" ; exit ; fi
9
+
10
+ hadoop jar /home/flip/hadoop/h/contrib/streaming/hadoop-*-streaming.jar \
11
+ -mapper "$map_script" \
12
+ -reducer "$reduce_script" \
13
+ -input "$input_file" \
14
+ -output "$output_file" \
15
+ "$@"
16
+
17
+ # -jobconf mapred.map.tasks=3 \
18
+ # -jobconf mapred.reduce.tasks=3 \
@@ -0,0 +1,17 @@
1
+ #!/usr/bin/env ruby
2
+ require 'wukong'
3
+
4
+ src_dir, dest_dir = ARGV[0..1]
5
+ src_files = Dir[src_dir + '/*']
6
+ dest_files = Wukong::Dfs.list_files dest_dir
7
+ Wukong::Dfs.compare_listings(src_files, dest_files) do |comparison, src_file, dest_file|
8
+ case comparison
9
+ when :missing
10
+ dest_filename = "%s/%s" % [dest_dir, dest_file]
11
+ puts "Copying #{src_file} #{dest_filename}"
12
+ puts `hadoop dfs -put #{src_file} #{dest_filename}`
13
+ when :differ
14
+ src_ls = `ls -l #{src_file}`.split(/\s+/).join("\t")
15
+ puts "Differ: #{src_ls} \n#{dest_file}"
16
+ end
17
+ end
@@ -0,0 +1,67 @@
1
+ #!/usr/bin/env ruby
2
+ require 'wukong'
3
+ NEWLINE_LENGTH = $/.length # KLUDGE
4
+
5
+ #
6
+ #
7
+ #
8
+ # !! The +words+ count comes out higher than that of +wc+ -- don't know
9
+ # why. (It's close: a 10GB, 1M line dataset it showed 367833839 vs. 367713271)
10
+ #
11
+ class WcMapper < Wukong::Streamer::LineStreamer
12
+ attr_accessor :lines, :fields, :words, :chars, :bytes
13
+
14
+ def before_stream
15
+ self.lines, self.fields, self.words, self.chars, self.bytes = [0,0,0,0,0]
16
+ end
17
+
18
+ def process line
19
+ return unless line
20
+ self.lines += 1
21
+ self.fields += 1 + line.count("\t")
22
+ self.words += 1 + line.strip.scan(/\s+/).length unless line.blank?
23
+ self.chars += line.chars.to_a.length + NEWLINE_LENGTH
24
+ self.bytes += line.bytesize + NEWLINE_LENGTH
25
+ $stderr.puts line if (line.chars.to_a.length != line.bytesize)
26
+ end
27
+
28
+ def after_stream
29
+ emit [lines, fields, words, chars, bytes]
30
+ end
31
+ end
32
+
33
+ #
34
+ #
35
+ class WcReducer < Wukong::Streamer::Base
36
+ attr_accessor :lines, :fields, :words, :chars, :bytes
37
+
38
+ def before_stream
39
+ self.lines, self.fields, self.words, self.chars, self.bytes = [0,0,0,0,0]
40
+ end
41
+
42
+ def process m_lines, m_fields, m_words, m_chars, m_bytes
43
+ self.lines += m_lines.to_i
44
+ self.fields += m_fields.to_i
45
+ self.words += m_words.to_i
46
+ self.chars += m_chars.to_i
47
+ self.bytes += m_bytes.to_i
48
+ end
49
+
50
+ def after_stream
51
+ emit [lines, fields, words, chars, bytes]
52
+ end
53
+ end
54
+
55
+ Wukong::Script.new(WcMapper, WcReducer, :reduce_tasks => 1).run
56
+
57
+ # class FooScript < Wukong::Script
58
+ # def map_command
59
+ # '/usr/bin/wc'
60
+ # end
61
+ # def reduce_command
62
+ # '/bin/cat'
63
+ # end
64
+ # end
65
+ # FooScript.new(nil, nil, :reduce_tasks => 1).run
66
+ #
67
+ # ruby -ne 'wc_v = `echo "#{$_.chomp}" | wc`; gr_v=($_.strip.empty? ? 0 : $_.strip.scan(/\s+/).length + 1 ) ; puts [wc_v.chomp, " ", gr_v, $_.chomp].join("\t")'
@@ -0,0 +1,20 @@
1
+ #!/usr/bin/env python
2
+ """ sorts lines (or tab-sep records) by md5. (e.g. for train/test splits).
3
+ optionally prepends with the md5 id too.
4
+ brendan o'connor - anyall.org - gist.github.com/brendano """
5
+
6
+ import hashlib,sys,optparse
7
+ p = optparse.OptionParser()
8
+ p.add_option('-k', type='int', default=False)
9
+ p.add_option('-p', action='store_true')
10
+ opts,args=p.parse_args()
11
+
12
+ lines = sys.stdin.readlines()
13
+ getter=lambda s: hashlib.md5(s[:-1]).hexdigest()
14
+ if opts.k:
15
+ getter=lambda s: hashlib.md5(s[:-1].split("\t")[opts.k-1]).hexdigest()
16
+
17
+ lines.sort(key=lambda s: getter(s))
18
+ for line in lines:
19
+ if opts.p: line = getter(line) + "\t" + line
20
+ print line,