couchproxy 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/LICENSE +19 -0
- data/README +36 -0
- data/Rakefile +42 -0
- data/bin/couchproxy +88 -0
- data/conf/couchproxy.yml +21 -0
- data/lib/couchproxy/cluster.rb +43 -0
- data/lib/couchproxy/collator.rb +60 -0
- data/lib/couchproxy/deferrable_body.rb +15 -0
- data/lib/couchproxy/node.rb +25 -0
- data/lib/couchproxy/partition.rb +15 -0
- data/lib/couchproxy/rack/active_tasks.rb +9 -0
- data/lib/couchproxy/rack/all_databases.rb +23 -0
- data/lib/couchproxy/rack/all_docs.rb +9 -0
- data/lib/couchproxy/rack/base.rb +197 -0
- data/lib/couchproxy/rack/bulk_docs.rb +68 -0
- data/lib/couchproxy/rack/changes.rb +9 -0
- data/lib/couchproxy/rack/compact.rb +16 -0
- data/lib/couchproxy/rack/config.rb +16 -0
- data/lib/couchproxy/rack/database.rb +83 -0
- data/lib/couchproxy/rack/design_doc.rb +227 -0
- data/lib/couchproxy/rack/doc.rb +15 -0
- data/lib/couchproxy/rack/ensure_full_commit.rb +16 -0
- data/lib/couchproxy/rack/not_found.rb +13 -0
- data/lib/couchproxy/rack/replicate.rb +9 -0
- data/lib/couchproxy/rack/revs_limit.rb +18 -0
- data/lib/couchproxy/rack/root.rb +10 -0
- data/lib/couchproxy/rack/stats.rb +53 -0
- data/lib/couchproxy/rack/temp_view.rb +9 -0
- data/lib/couchproxy/rack/update.rb +11 -0
- data/lib/couchproxy/rack/users.rb +9 -0
- data/lib/couchproxy/rack/uuids.rb +9 -0
- data/lib/couchproxy/rack/view_cleanup.rb +16 -0
- data/lib/couchproxy/reducer.rb +57 -0
- data/lib/couchproxy/request.rb +50 -0
- data/lib/couchproxy/router.rb +62 -0
- data/lib/couchproxy.rb +48 -0
- data/lib/couchproxy.ru +22 -0
- data/test/collator_test.rb +100 -0
- metadata +164 -0
data/LICENSE
ADDED
@@ -0,0 +1,19 @@
|
|
1
|
+
Copyright (c) 2010 David Graham
|
2
|
+
|
3
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
4
|
+
of this software and associated documentation files (the "Software"), to deal
|
5
|
+
in the Software without restriction, including without limitation the rights
|
6
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
7
|
+
copies of the Software, and to permit persons to whom the Software is
|
8
|
+
furnished to do so, subject to the following conditions:
|
9
|
+
|
10
|
+
The above copyright notice and this permission notice shall be included in
|
11
|
+
all copies or substantial portions of the Software.
|
12
|
+
|
13
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
14
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
15
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
16
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
17
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
18
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
19
|
+
THE SOFTWARE.
|
data/README
ADDED
@@ -0,0 +1,36 @@
|
|
1
|
+
== Welcome to CouchProxy
|
2
|
+
|
3
|
+
CouchProxy is a simple proxy server that distributes reads and writes to a
|
4
|
+
cluster of Apache CouchDB servers so they appear to be a single huge database.
|
5
|
+
Documents are stored and retrieved from a particular CouchDB instance, using
|
6
|
+
consistent hashing of the document id. Map/reduce views are processed
|
7
|
+
concurrently on each CouchDB instance and merged together by the proxy before
|
8
|
+
returning the results to the client.
|
9
|
+
|
10
|
+
CouchProxy uses the Thin web server and EventMachine for asynchronous IO. There
|
11
|
+
are no blocking IO calls in the server, which allows it to handle many clients
|
12
|
+
and many backend CouchDB nodes concurrently, using very little processor and
|
13
|
+
memory.
|
14
|
+
|
15
|
+
== Usage
|
16
|
+
|
17
|
+
1. gem install couchproxy
|
18
|
+
2. couchproxy config
|
19
|
+
3. Configure CouchDB nodes in couchproxy.yml
|
20
|
+
4. couchproxy start
|
21
|
+
|
22
|
+
== Dependencies
|
23
|
+
|
24
|
+
* em-http-request >= 0.2.11
|
25
|
+
* json >= 1.4.6
|
26
|
+
* json-stream >= 0.1.0
|
27
|
+
* thin >= 1.2.7
|
28
|
+
* ruby >= 1.9.1
|
29
|
+
|
30
|
+
== Contact
|
31
|
+
|
32
|
+
Project contact: David Graham <david.malcom.graham@gmail.com>
|
33
|
+
|
34
|
+
== License
|
35
|
+
|
36
|
+
CouchProxy is released under the MIT license. Check the LICENSE file for details.
|
data/Rakefile
ADDED
@@ -0,0 +1,42 @@
|
|
1
|
+
require 'rake'
|
2
|
+
require 'rake/clean'
|
3
|
+
require 'rake/gempackagetask'
|
4
|
+
require 'rake/testtask'
|
5
|
+
require File.expand_path('../lib/couchproxy', __FILE__)
|
6
|
+
|
7
|
+
spec = Gem::Specification.new do |s|
|
8
|
+
s.name = "couchproxy"
|
9
|
+
s.version = CouchProxy::VERSION
|
10
|
+
s.date = Time.now.strftime("%Y-%m-%d")
|
11
|
+
s.summary = "A proxy server for Apache CouchDB clusters."
|
12
|
+
s.description = "CouchProxy is a simple proxy server that distributes reads and writes to a
|
13
|
+
cluster of Apache CouchDB servers so they appear to be a single huge database.
|
14
|
+
Documents are stored and retrieved from a particular CouchDB instance, using
|
15
|
+
consistent hashing of the document id. Map/reduce views are processed
|
16
|
+
concurrently on each CouchDB instance and merged together by the proxy before
|
17
|
+
returning the results to the client."
|
18
|
+
s.email = "david.malcom.graham@gmail.com"
|
19
|
+
s.homepage = "http://github.com/dgraham/couchproxy"
|
20
|
+
s.authors = ["David Graham"]
|
21
|
+
s.files = FileList['[A-Z]*', '{bin,lib,conf}/**/*']
|
22
|
+
s.test_files = FileList["test/**/*test.rb"]
|
23
|
+
s.executables = %w[couchproxy]
|
24
|
+
s.require_path = "lib"
|
25
|
+
s.has_rdoc = true
|
26
|
+
s.add_dependency('em-http-request', '~> 0.2')
|
27
|
+
s.add_dependency('json', '~> 1.4')
|
28
|
+
s.add_dependency('json-stream', '~> 0.1')
|
29
|
+
s.add_dependency('thin', '~> 1.2')
|
30
|
+
s.required_ruby_version = '>= 1.9.1'
|
31
|
+
end
|
32
|
+
|
33
|
+
Rake::GemPackageTask.new(spec) do |pkg|
|
34
|
+
pkg.need_tar = true
|
35
|
+
end
|
36
|
+
|
37
|
+
Rake::TestTask.new(:test) do |test|
|
38
|
+
test.pattern = 'test/**/*_test.rb'
|
39
|
+
test.warning = true
|
40
|
+
end
|
41
|
+
|
42
|
+
task :default => [:clobber, :test, :gem]
|
data/bin/couchproxy
ADDED
@@ -0,0 +1,88 @@
|
|
1
|
+
#!/usr/bin/env ruby
|
2
|
+
# encoding: UTF-8
|
3
|
+
|
4
|
+
require 'fileutils'
|
5
|
+
require 'optparse'
|
6
|
+
|
7
|
+
def parse
|
8
|
+
options = {}
|
9
|
+
parser = OptionParser.new do |opts|
|
10
|
+
opts.banner = "Usage: couchproxy [options] start|stop|config"
|
11
|
+
|
12
|
+
options[:config] = default_config
|
13
|
+
opts.on('-C', '--config FILE',
|
14
|
+
"Specify couchproxy.yml file (default: #{default_config})") do |config|
|
15
|
+
options[:config] = config
|
16
|
+
end
|
17
|
+
|
18
|
+
options[:host] = '0.0.0.0'
|
19
|
+
opts.on('-a', '--address HOST',
|
20
|
+
'Bind to HOST address (default: 0.0.0.0)') do |host|
|
21
|
+
options[:host] = host
|
22
|
+
end
|
23
|
+
|
24
|
+
options[:port] = 6984
|
25
|
+
opts.on('-p', '--port PORT', 'Use PORT (default: 6984)') do |port|
|
26
|
+
options[:port] = port
|
27
|
+
end
|
28
|
+
|
29
|
+
options[:log] = 'couchproxy.log'
|
30
|
+
opts.on('-l', '--log FILE',
|
31
|
+
'File to redirect output (default: couchproxy.log)') do |log|
|
32
|
+
options[:log] = log
|
33
|
+
end
|
34
|
+
|
35
|
+
options[:pid] = 'couchproxy.pid'
|
36
|
+
opts.on('-P', '--pid FILE',
|
37
|
+
'File to store PID (default: couchproxy.pid)') do |pid|
|
38
|
+
options[:pid] = pid
|
39
|
+
end
|
40
|
+
end
|
41
|
+
|
42
|
+
begin
|
43
|
+
parser.parse!
|
44
|
+
rescue
|
45
|
+
puts parser
|
46
|
+
exit(1)
|
47
|
+
end
|
48
|
+
|
49
|
+
options.tap do |opts|
|
50
|
+
if ARGV.size != 1 || !%w[start stop config].include?(ARGV.first)
|
51
|
+
puts parser
|
52
|
+
exit(1)
|
53
|
+
end
|
54
|
+
opts[:cmd] = ARGV.first
|
55
|
+
end
|
56
|
+
end
|
57
|
+
|
58
|
+
def default_config
|
59
|
+
File.join(Dir.pwd, 'couchproxy.yml')
|
60
|
+
end
|
61
|
+
|
62
|
+
def start(opts)
|
63
|
+
ENV['COUCH_PROXY_CONFIG'] = opts[:config]
|
64
|
+
flags = {
|
65
|
+
'-a' => opts[:host],
|
66
|
+
'-p' => opts[:port],
|
67
|
+
'-l' => opts[:log],
|
68
|
+
'-P' => opts[:pid],
|
69
|
+
'-R' => File.expand_path('../../lib/couchproxy.ru', __FILE__),
|
70
|
+
'--tag' => 'couchproxy'
|
71
|
+
}
|
72
|
+
system("thin -d #{flags.map {|k, v| "#{k} #{v}" }.join(' ')} start")
|
73
|
+
end
|
74
|
+
|
75
|
+
def stop(opts)
|
76
|
+
system("thin -P #{opts[:pid]} stop")
|
77
|
+
end
|
78
|
+
|
79
|
+
def config(opts)
|
80
|
+
template = File.expand_path('../../conf/couchproxy.yml', __FILE__)
|
81
|
+
FileUtils.copy(template, default_config)
|
82
|
+
puts "Copied template cluster file to #{default_config}."
|
83
|
+
puts "Update the file and start the server."
|
84
|
+
end
|
85
|
+
|
86
|
+
opts = parse
|
87
|
+
method(opts[:cmd]).call(opts)
|
88
|
+
|
data/conf/couchproxy.yml
ADDED
@@ -0,0 +1,21 @@
|
|
1
|
+
# This is the couchproxy configuration file. Restart the couchproxy process
|
2
|
+
# after changing this file for the changes to take effect.
|
3
|
+
#
|
4
|
+
# Parameters:
|
5
|
+
# couchjs: The fully qualified path to the couchjs view server.
|
6
|
+
# reducers: The number of couchjs processes to run. We typically want one
|
7
|
+
# view server process per core for best rereduce performance.
|
8
|
+
# nodes: A list of CouchDB servers with the partitions that they host. Once
|
9
|
+
# we start using the proxy with these partitions, we can't change the
|
10
|
+
# total number of partitions without carefully repartitioning the database.
|
11
|
+
# We can move partitions to new servers easily though. The number of
|
12
|
+
# partitions should match the maximum number of servers we think we'll
|
13
|
+
# need for our data set.
|
14
|
+
|
15
|
+
couchjs: /usr/bin/couchjs
|
16
|
+
reducers: 4
|
17
|
+
nodes:
|
18
|
+
- host: 'http://localhost:5984'
|
19
|
+
partitions: !ruby/range 0...32
|
20
|
+
- host: 'http://localhost:5984'
|
21
|
+
partitions: !ruby/range 32...64
|
@@ -0,0 +1,43 @@
|
|
1
|
+
# encoding: UTF-8
|
2
|
+
|
3
|
+
module CouchProxy
|
4
|
+
class Cluster
|
5
|
+
attr_reader :nodes
|
6
|
+
|
7
|
+
def initialize(nodes, couchjs, reducers)
|
8
|
+
@nodes, @partitions = [], 0
|
9
|
+
nodes.each {|n| self << n }
|
10
|
+
@reducers = Array.new(reducers) { Reducer.new(couchjs) }
|
11
|
+
end
|
12
|
+
|
13
|
+
def reducer
|
14
|
+
@reducers[rand(@reducers.size)]
|
15
|
+
end
|
16
|
+
|
17
|
+
def <<(node)
|
18
|
+
@nodes << node
|
19
|
+
@partitions = @nodes.inject(0) do |acc, n|
|
20
|
+
acc + n.partitions.size
|
21
|
+
end
|
22
|
+
self
|
23
|
+
end
|
24
|
+
|
25
|
+
def partition(doc_id)
|
26
|
+
num = Zlib.crc32(doc_id.to_s).abs % @partitions
|
27
|
+
node = @nodes.find {|n| n.hosts?(num) }
|
28
|
+
node.partition(num)
|
29
|
+
end
|
30
|
+
|
31
|
+
def any_node
|
32
|
+
@nodes[rand(@nodes.size)]
|
33
|
+
end
|
34
|
+
|
35
|
+
def any_partition
|
36
|
+
any_node.any_partition
|
37
|
+
end
|
38
|
+
|
39
|
+
def partitions
|
40
|
+
@nodes.map {|n| n.partitions}.flatten
|
41
|
+
end
|
42
|
+
end
|
43
|
+
end
|
@@ -0,0 +1,60 @@
|
|
1
|
+
module CouchProxy
|
2
|
+
# Implements the JSON sorting rules defined at
|
3
|
+
# http://wiki.apache.org/couchdb/View_collation.
|
4
|
+
class Collator
|
5
|
+
CLASSES = [NilClass, FalseClass, TrueClass, Numeric, String, Array, Hash]
|
6
|
+
|
7
|
+
def initialize(reverse=false)
|
8
|
+
@reverse = reverse
|
9
|
+
end
|
10
|
+
|
11
|
+
def compare(a, b)
|
12
|
+
klass = compare_class(a, b)
|
13
|
+
val = case klass
|
14
|
+
when 0
|
15
|
+
case a
|
16
|
+
when String then compare_string(a, b)
|
17
|
+
when Array then compare_array(a, b)
|
18
|
+
when Hash then compare_array(a.to_a, b.to_a)
|
19
|
+
else a <=> b
|
20
|
+
end
|
21
|
+
else
|
22
|
+
klass
|
23
|
+
end
|
24
|
+
@reverse ? val * -1 : val
|
25
|
+
end
|
26
|
+
|
27
|
+
private
|
28
|
+
|
29
|
+
def compare_class(a, b)
|
30
|
+
aix = CLASSES.find_index {|c| a.is_a?(c) }
|
31
|
+
bix = CLASSES.find_index {|c| b.is_a?(c) }
|
32
|
+
aix == bix ? 0 : aix < bix ? -1 : 1
|
33
|
+
end
|
34
|
+
|
35
|
+
# FIXME Implement UCA sorting with ICU
|
36
|
+
def compare_string(a, b)
|
37
|
+
a <=> b
|
38
|
+
end
|
39
|
+
|
40
|
+
def compare_array(a, b)
|
41
|
+
if a.size == b.size
|
42
|
+
compare_same_size_array(a, b)
|
43
|
+
elsif a.size < b.size
|
44
|
+
val = compare_same_size_array(a, b[0, a.size])
|
45
|
+
val == 0 ? -1 : val
|
46
|
+
else
|
47
|
+
val = compare_same_size_array(a[0, b.size], b)
|
48
|
+
val == 0 ? 1 : val
|
49
|
+
end
|
50
|
+
end
|
51
|
+
|
52
|
+
def compare_same_size_array(a, b)
|
53
|
+
a.each_with_index do |el, ix|
|
54
|
+
val = compare(el, b[ix])
|
55
|
+
return val unless val == 0
|
56
|
+
end
|
57
|
+
0
|
58
|
+
end
|
59
|
+
end
|
60
|
+
end
|
@@ -0,0 +1,25 @@
|
|
1
|
+
# encoding: UTF-8
|
2
|
+
|
3
|
+
module CouchProxy
|
4
|
+
class Node
|
5
|
+
attr_reader :host, :port, :uri, :partitions
|
6
|
+
|
7
|
+
def initialize(uri, partitions)
|
8
|
+
parsed = URI.parse(uri)
|
9
|
+
@uri, @host, @port = uri, parsed.host, parsed.port
|
10
|
+
@partitions = partitions.map {|num| Partition.new(self, num) }
|
11
|
+
end
|
12
|
+
|
13
|
+
def hosts?(partition)
|
14
|
+
@partitions.any? {|p| p.num == partition }
|
15
|
+
end
|
16
|
+
|
17
|
+
def partition(num)
|
18
|
+
@partitions.find {|p| p.num == num }
|
19
|
+
end
|
20
|
+
|
21
|
+
def any_partition
|
22
|
+
@partitions[rand(@partitions.size)]
|
23
|
+
end
|
24
|
+
end
|
25
|
+
end
|
@@ -0,0 +1,23 @@
|
|
1
|
+
# encoding: UTF-8
|
2
|
+
|
3
|
+
module CouchProxy
|
4
|
+
module Rack
|
5
|
+
class AllDatabases < Base
|
6
|
+
SUFFIX = /_\d+$/
|
7
|
+
|
8
|
+
def get
|
9
|
+
proxy_to_all_nodes do |responses|
|
10
|
+
dbs = responses.map do |res|
|
11
|
+
JSON.parse(res.response).map {|name| name.gsub(SUFFIX, '') }
|
12
|
+
end.flatten.uniq.sort
|
13
|
+
send_response(responses.first.response_header.status,
|
14
|
+
response_headers, [dbs.to_json])
|
15
|
+
end
|
16
|
+
end
|
17
|
+
|
18
|
+
def head
|
19
|
+
# FIXME
|
20
|
+
end
|
21
|
+
end
|
22
|
+
end
|
23
|
+
end
|
@@ -0,0 +1,197 @@
|
|
1
|
+
module CouchProxy
|
2
|
+
module Rack
|
3
|
+
class Base
|
4
|
+
DESIGN_ID = /^_design\/.+/
|
5
|
+
METHODS = [:get, :put, :post, :delete, :head].freeze
|
6
|
+
INVALID_JSON = '{"error":"bad_request","reason":"invalid UTF-8 JSON"}'.freeze
|
7
|
+
|
8
|
+
attr_reader :request, :cluster
|
9
|
+
|
10
|
+
def initialize(request, cluster)
|
11
|
+
@request, @cluster = request, cluster
|
12
|
+
end
|
13
|
+
|
14
|
+
def method_missing(name)
|
15
|
+
allowed = methods.map {|m| m.to_sym } & METHODS
|
16
|
+
allowed = allowed.map {|m| m.to_s.upcase }.join(',')
|
17
|
+
body = "{\"error\:\"method_not_allowed\",\"reason\":\"Only #{allowed} allowed\"}"
|
18
|
+
send_response(405, response_headers, [body])
|
19
|
+
end
|
20
|
+
|
21
|
+
def proxy_to(node, &finish)
|
22
|
+
head_proxy_to(node, finish) if @request.request_method == 'HEAD'
|
23
|
+
|
24
|
+
body, started = DeferrableBody.new, false
|
25
|
+
uri = "#{node.uri}#{@request.fullpath}"
|
26
|
+
http = EM::HttpRequest.new(uri)
|
27
|
+
res = http.send(@request.request_method.downcase,
|
28
|
+
:head => proxy_headers, :body => @request.content)
|
29
|
+
res.stream do |chunk|
|
30
|
+
unless started
|
31
|
+
started = true
|
32
|
+
head = normalize(res.response_header).tap do |h|
|
33
|
+
h['Server'] = "CouchProxy/#{CouchProxy::VERSION}"
|
34
|
+
if res.response_header.location
|
35
|
+
h['Location'] = rewrite_location(res.response_header.location)
|
36
|
+
end
|
37
|
+
end
|
38
|
+
send_response(res.response_header.status, head, body)
|
39
|
+
end
|
40
|
+
body.call([chunk])
|
41
|
+
end
|
42
|
+
res.callback do
|
43
|
+
body.succeed
|
44
|
+
finish.call if finish
|
45
|
+
end
|
46
|
+
res.errback { send_error_response }
|
47
|
+
end
|
48
|
+
|
49
|
+
def rewrite_location(uri)
|
50
|
+
URI.parse(request.url).tap do |req|
|
51
|
+
req.query, req.user, req.password = nil
|
52
|
+
req.path = URI.parse(uri).path.gsub(
|
53
|
+
/^\/#{request.db_name}_\d+/, "/#{request.db_name}")
|
54
|
+
end.to_s
|
55
|
+
end
|
56
|
+
|
57
|
+
def proxy_to_any_node
|
58
|
+
proxy_to(@cluster.any_node)
|
59
|
+
end
|
60
|
+
|
61
|
+
def proxy_to_any_partition
|
62
|
+
partition = cluster.any_partition
|
63
|
+
request.rewrite_proxy_url!(partition.num)
|
64
|
+
proxy_to(partition.node)
|
65
|
+
end
|
66
|
+
|
67
|
+
def proxy_to_all_nodes(&callback)
|
68
|
+
method = request.request_method.downcase
|
69
|
+
multi = EM::MultiRequest.new
|
70
|
+
cluster.nodes.each do |n|
|
71
|
+
uri = "#{n.uri}#{@request.fullpath}"
|
72
|
+
req = EM::HttpRequest.new(uri).send(method,
|
73
|
+
:head => proxy_headers, :body => @request.content)
|
74
|
+
multi.add(req)
|
75
|
+
end
|
76
|
+
multi.callback(&multi(&callback)) if callback
|
77
|
+
end
|
78
|
+
|
79
|
+
def proxy_to_all_partitions(&callback)
|
80
|
+
method = request.request_method.downcase
|
81
|
+
multi = EM::MultiRequest.new
|
82
|
+
cluster.partitions.each do |p|
|
83
|
+
uri = "#{p.node.uri}#{@request.rewrite_proxy_url(p.num)}"
|
84
|
+
uri << "?#{@request.query_string}" unless @request.query_string.empty?
|
85
|
+
multi.add EM::HttpRequest.new(uri).send(method,
|
86
|
+
:head => proxy_headers, :body => @request.content)
|
87
|
+
end
|
88
|
+
multi.callback(&multi(&callback)) if callback
|
89
|
+
end
|
90
|
+
|
91
|
+
def replicate_to_all_partitions(source, *doc_ids, &callback)
|
92
|
+
multi = EM::MultiRequest.new
|
93
|
+
(@cluster.partitions - [source]).each do |p|
|
94
|
+
task = {
|
95
|
+
:source => source.uri(request.db_name),
|
96
|
+
:target => p.uri(request.db_name),
|
97
|
+
:doc_ids => doc_ids.flatten}
|
98
|
+
multi.add EM::HttpRequest.new("#{p.node.uri}/_replicate").post(
|
99
|
+
:head => proxy_headers, :body => task.to_json)
|
100
|
+
end
|
101
|
+
multi.callback(&multi(&callback)) if callback
|
102
|
+
end
|
103
|
+
|
104
|
+
def uuids(count, &callback)
|
105
|
+
http = EM::HttpRequest.new("#{cluster.any_node.uri}/_uuids?count=#{count}").get
|
106
|
+
http.errback { callback.call(nil) }
|
107
|
+
http.callback do |res|
|
108
|
+
if res.response_header.status == 200
|
109
|
+
uuids = JSON.parse(res.response)['uuids']
|
110
|
+
callback.call(uuids)
|
111
|
+
else
|
112
|
+
callback.call(nil)
|
113
|
+
end
|
114
|
+
end
|
115
|
+
end
|
116
|
+
|
117
|
+
def send_response(*args)
|
118
|
+
@request.env['async.callback'].call(args)
|
119
|
+
end
|
120
|
+
|
121
|
+
def send_error_response
|
122
|
+
send_response(503, response_headers, [])
|
123
|
+
end
|
124
|
+
|
125
|
+
private
|
126
|
+
|
127
|
+
def multi(&callback)
|
128
|
+
proc do |multi|
|
129
|
+
if multi.responses[:failed].empty?
|
130
|
+
err = multi.responses[:succeeded].find do |res|
|
131
|
+
res.response_header.status >= 400
|
132
|
+
end
|
133
|
+
if err
|
134
|
+
send_response(err.response_header.status,
|
135
|
+
response_headers, err.response)
|
136
|
+
else
|
137
|
+
callback.call(multi.responses[:succeeded])
|
138
|
+
end
|
139
|
+
else
|
140
|
+
send_error_response
|
141
|
+
end
|
142
|
+
end
|
143
|
+
end
|
144
|
+
|
145
|
+
def head_proxy_to(node, &finish)
|
146
|
+
uri = "#{node.uri}#{@request.fullpath}"
|
147
|
+
http = EM::HttpRequest.new(uri).head(:head => proxy_headers)
|
148
|
+
http.callback do
|
149
|
+
status = http.response_header.status
|
150
|
+
headers = normalize(http.response_header)
|
151
|
+
send_response(status, headers, [])
|
152
|
+
finish.call if finish
|
153
|
+
end
|
154
|
+
http.errback { send_error_response }
|
155
|
+
throw :async
|
156
|
+
end
|
157
|
+
|
158
|
+
def normalize(headers)
|
159
|
+
headers.keys.inject({}) do |acc, k|
|
160
|
+
normalized = k.sub('HTTP_', '').split('_').map {|p| p.capitalize }.join('-')
|
161
|
+
acc.tap {|h| h[normalized] = headers[k] }
|
162
|
+
end
|
163
|
+
end
|
164
|
+
|
165
|
+
def proxy_headers
|
166
|
+
keys = @request.env.keys.select {|k| k.start_with?('HTTP_') || k == 'CONTENT_TYPE' }
|
167
|
+
keys -= %w[HTTP_HOST HTTP_VERSION]
|
168
|
+
headers = keys.inject({}) do |acc, k|
|
169
|
+
acc.tap {|h| h[k] = @request.env[k] }
|
170
|
+
end
|
171
|
+
normalize(headers)
|
172
|
+
end
|
173
|
+
|
174
|
+
def response_headers
|
175
|
+
type = @request.json? ? "application/json" : "text/plain;charset=utf-8"
|
176
|
+
{
|
177
|
+
"Server" => "CouchProxy/#{CouchProxy::VERSION}",
|
178
|
+
"Date" => Time.now.httpdate,
|
179
|
+
"Content-Type" => type,
|
180
|
+
"Cache-Control" => "must-revalidate"
|
181
|
+
}
|
182
|
+
end
|
183
|
+
|
184
|
+
def design?(doc_id)
|
185
|
+
doc_id =~ DESIGN_ID
|
186
|
+
end
|
187
|
+
|
188
|
+
def delete_query_param(param)
|
189
|
+
value = @request.GET.delete(param)
|
190
|
+
if value
|
191
|
+
@request.env['QUERY_STRING'] = ::Rack::Utils.build_query(@request.GET)
|
192
|
+
end
|
193
|
+
value
|
194
|
+
end
|
195
|
+
end
|
196
|
+
end
|
197
|
+
end
|
@@ -0,0 +1,68 @@
|
|
1
|
+
# encoding: UTF-8
|
2
|
+
|
3
|
+
module CouchProxy
|
4
|
+
module Rack
|
5
|
+
class BulkDocs < Base
|
6
|
+
def post
|
7
|
+
begin
|
8
|
+
docs = JSON.parse(request.content)['docs']
|
9
|
+
rescue
|
10
|
+
send_response(400, response_headers, INVALID_JSON)
|
11
|
+
return
|
12
|
+
end
|
13
|
+
|
14
|
+
missing = docs.select {|doc| !doc['_id'] }
|
15
|
+
if missing.any?
|
16
|
+
uuids(missing.size) do |uuids|
|
17
|
+
if uuids
|
18
|
+
missing.each {|doc| doc['_id'] = uuids.shift }
|
19
|
+
save(docs)
|
20
|
+
else
|
21
|
+
send_error_response
|
22
|
+
end
|
23
|
+
end
|
24
|
+
else
|
25
|
+
save(docs)
|
26
|
+
end
|
27
|
+
end
|
28
|
+
|
29
|
+
private
|
30
|
+
|
31
|
+
def save(docs)
|
32
|
+
designs, normals = docs.partition {|d| design?(d['_id']) }
|
33
|
+
|
34
|
+
partitions = Hash.new {|h, k| h[k] = [] }
|
35
|
+
normals.each do |doc|
|
36
|
+
partitions[cluster.partition(doc['_id'])] << doc
|
37
|
+
end
|
38
|
+
design_partition = cluster.any_partition
|
39
|
+
partitions[design_partition] += designs
|
40
|
+
|
41
|
+
req = EM::MultiRequest.new
|
42
|
+
partitions.each do |p, d|
|
43
|
+
url = "#{p.uri(request.db_name)}/_bulk_docs"
|
44
|
+
req.add EM::HttpRequest.new(url).post(:head => proxy_headers,
|
45
|
+
:body => {:docs => d}.to_json)
|
46
|
+
end
|
47
|
+
|
48
|
+
callback = multi do |responses|
|
49
|
+
total = responses.map {|res| JSON.parse(res.response) }.flatten
|
50
|
+
total = docs.map do |doc|
|
51
|
+
total.find {|d| d['id'] == doc['_id'] }
|
52
|
+
end
|
53
|
+
sender = proc do
|
54
|
+
send_response(responses.first.response_header.status,
|
55
|
+
response_headers, [total.to_json])
|
56
|
+
end
|
57
|
+
if designs.any?
|
58
|
+
replicate_to_all_partitions(design_partition,
|
59
|
+
designs.map {|d| d['_id'] }, &sender)
|
60
|
+
else
|
61
|
+
sender.call
|
62
|
+
end
|
63
|
+
end
|
64
|
+
req.callback(&callback)
|
65
|
+
end
|
66
|
+
end
|
67
|
+
end
|
68
|
+
end
|