ocean-rails 7.2.2 → 7.2.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,62 +1,62 @@
1
+ # #
2
+ # # This custom Rack middleware is used to turn off logging of requests made to
3
+ # # <code>/alive</code>.
4
+ # #
5
+ # # NB: This file is no longer loaded. It needs to be updated before it works again.
6
+ # #
7
+ # class SelectiveRackLogger < Rails::Rack::Logger
1
8
  #
2
- # This custom Rack middleware is used to turn off logging of requests made to
3
- # <code>/alive</code>.
9
+ # #
10
+ # # Initialises the selective Rack logger.
11
+ # #
12
+ # def initialize(app, opts = {})
13
+ # @app = app
14
+ # super
15
+ # end
4
16
  #
5
- # NB: This file is no longer loaded. It needs to be updated before it works again.
17
+ # #
18
+ # # Suppresses logging of /alive requests from Varnish.
19
+ # #
20
+ # def call(env)
21
+ # if env['PATH_INFO'] == "/alive"
22
+ # old_level = Rails.logger.level
23
+ # Rails.logger.level = 1234567890 # > 5
24
+ # begin
25
+ # @app.call(env) # returns [..., ..., ...]
26
+ # ensure
27
+ # Rails.logger.level = old_level
28
+ # end
29
+ # else
30
+ # super(env) # returns [..., ..., ...]
31
+ # end
32
+ # end
33
+ # end
6
34
  #
7
- class SelectiveRackLogger < Rails::Rack::Logger
8
-
9
- #
10
- # Initialises the selective Rack logger.
11
- #
12
- def initialize(app, opts = {})
13
- @app = app
14
- super
15
- end
16
-
17
- #
18
- # Suppresses logging of /alive requests from Varnish.
19
- #
20
- def call(env)
21
- if env['PATH_INFO'] == "/alive"
22
- old_level = Rails.logger.level
23
- Rails.logger.level = 1234567890 # > 5
24
- begin
25
- @app.call(env) # returns [..., ..., ...]
26
- ensure
27
- Rails.logger.level = old_level
28
- end
29
- else
30
- super(env) # returns [..., ..., ...]
31
- end
32
- end
33
- end
34
-
35
-
36
- class Rails::Rack::Logger
37
-
38
- protected
39
-
40
- #
41
- # Monkey patch: no started_request_message logged in production.
42
- #
43
- def call_app(request, env)
44
- # Put some space between requests in development logs.
45
- if development?
46
- logger.debug ''
47
- logger.debug ''
48
- end
49
-
50
- instrumenter = ActiveSupport::Notifications.instrumenter
51
- instrumenter.start 'request.action_dispatch', request: request
52
- logger.info started_request_message(request) unless Rails.env.production?
53
- resp = @app.call(env)
54
- resp[2] = ::Rack::BodyProxy.new(resp[2]) { finish(request) }
55
- resp
56
- rescue
57
- finish(request)
58
- raise
59
- ensure
60
- ActiveSupport::LogSubscriber.flush_all!
61
- end
62
- end
35
+ #
36
+ # class Rails::Rack::Logger
37
+ #
38
+ # protected
39
+ #
40
+ # #
41
+ # # Monkey patch: no started_request_message logged in production.
42
+ # #
43
+ # def call_app(request, env)
44
+ # # Put some space between requests in development logs.
45
+ # if development?
46
+ # logger.debug ''
47
+ # logger.debug ''
48
+ # end
49
+ #
50
+ # instrumenter = ActiveSupport::Notifications.instrumenter
51
+ # instrumenter.start 'request.action_dispatch', request: request
52
+ # logger.info started_request_message(request) unless Rails.env.production?
53
+ # resp = @app.call(env)
54
+ # resp[2] = ::Rack::BodyProxy.new(resp[2]) { finish(request) }
55
+ # resp
56
+ # rescue
57
+ # finish(request)
58
+ # raise
59
+ # ensure
60
+ # ActiveSupport::LogSubscriber.flush_all!
61
+ # end
62
+ # end
data/lib/ocean/version.rb CHANGED
@@ -1,3 +1,3 @@
1
1
  module Ocean
2
- VERSION = "7.2.2"
2
+ VERSION = "7.2.3"
3
3
  end
@@ -1,188 +1,188 @@
1
- require 'ffi-rzmq'
2
-
3
- #
4
- # This is the class that provides the interface to ZeroMQ, both for sending log
5
- # data to a cluster of log servers, and for receiving it.
6
- #
7
- # The Rails logger is replaced in production by the initializer zero_mq_logger.rb.
8
- #
9
- # The worker daemons running on the log machines use the pull_server and
10
- # pull_worker methods in this class.
11
- #
12
- class ZeroLog
13
-
14
- #
15
- # This is the ZeroMQ context. We only need one per process, thus we
16
- # store it in a class variable.
17
- #
18
- @@context = nil
19
-
20
-
21
- #
22
- # When the first ZeroLog instance is created, a ZeroMQ Context will be
23
- # set up for the use of all threads in the process. A process exit
24
- # handler will be set up to terminate the context on exit.
25
- #
26
- def initialize(sub_push, log_hosts)
27
- super()
28
- unless @@context
29
- #puts "Creating context"
30
- @@context = ZMQ::Context.new(1)
31
- #puts "Registering at_exit context terminator"
32
- at_exit { @@context.terminate
33
- #puts "Context terminated"
34
- }
35
- #puts "Initialising the log data sender thread"
36
- setup_sub_server(sub_push, log_hosts)
37
- end
38
- connect_to_sub_server(sub_push)
39
- end
40
-
41
-
42
- #
43
- # This method returns the ZeroMQ Context object.
44
- #
45
- def context
46
- @@context
47
- end
48
-
49
-
50
- #
51
- # Register a trap handler for INT, to clean up and close any
52
- # sockets we might have opened previously.
53
- #
54
- def trap_int(*sockets)
55
- sockets.each do |s|
56
- #puts "Registering at_exit socket closer"
57
- at_exit { s.setsockopt(ZMQ::LINGER, 0)
58
- s.close
59
- #puts "Socket closed"
60
- }
61
- end
62
- end
63
-
64
-
65
- # ---------------------------------------------------------------------------
66
- #
67
- # Log data sender side (ruby, Ruby on Rails)
68
- #
69
- # ---------------------------------------------------------------------------
70
-
71
- #
72
- # This initialises the log data sender. We begin by creating the server and
73
- # binding it; this is required when using +inproc+ sockets: bind must precede
74
- # any connects. (Normally in ZeroMQ, this is not the case.) The code sets up,
75
- # in a new thread, a +SUB+ server which is then connected to a +PUSH+ socket. The
76
- # +SUB+ server waits for log data which is then pushed via the +PUSH+ socket to
77
- # all available +LOG_HOSTS+. ZeroMQ handles buffering, re-connection and multiplexing
78
- # (round-robin) to the +LOG_HOSTS+ +PULL+ servers.
79
- #
80
- def setup_sub_server(sub_push="sub_push", log_hosts=[])
81
- # First create the server and let it bind, as this is required
82
- # when using inproc: bind must precede any connects.
83
- Thread.new(context) do |c|
84
- # Set up the server socket
85
- #puts "Starting SUB server"
86
- subscriber = c.socket(ZMQ::SUB)
87
- #puts "Binding to the SUB socket"
88
- subscriber.bind("inproc://#{sub_push}")
89
- subscriber.setsockopt(ZMQ::SUBSCRIBE, "")
90
- # Set up the PUSH socket
91
- loggers = context.socket(ZMQ::PUSH)
92
- log_hosts.each do |host|
93
- #puts "Connecting to the PULL server #{host}"
94
- loggers.connect("tcp://#{host}:9999")
95
- end
96
- # Connect SUB to PUSH via a queue and block
97
- ZMQ::Device.new(ZMQ::QUEUE, subscriber, loggers)
98
- # Context has been terminated, close sockets
99
- subscriber.setsockopt(ZMQ::LINGER, 0)
100
- subscriber.close
101
- loggers.setsockopt(ZMQ::LINGER, 0)
102
- loggers.close
103
- #puts "Closed sockets in other thread"
104
- end
105
- sleep 0.1 # Brute force and primitive, but it works
106
- end
107
-
108
-
109
- def connect_to_sub_server(sub_push="sub_push")
110
- # Next create the PUB socket and connect it to the other thread
111
- #puts "Creating the PUB socket"
112
- $log_publisher = context.socket(ZMQ::PUB)
113
- trap_int($log_publisher)
114
- #puts "Connecting to the SUB server"
115
- $log_publisher.connect("inproc://#{sub_push}")
116
- end
117
-
118
-
119
- #
120
- # This is a combination of a PUB main thread log data sender,
121
- # pushing directly to the pull_server without the need for a local
122
- # aggregator.
123
- #
124
- def log(data)
125
- init_log_data_sender unless $log_publisher
126
-
127
- # Send the data
128
- json = data.to_json
129
- #puts "Sending message #{json}"
130
- $log_publisher.send_string(json)
131
- data
132
- end
133
-
134
-
135
- # ---------------------------------------------------------------------------
136
- #
137
- # Log service (aggregation) side
138
- #
139
- # ---------------------------------------------------------------------------
140
-
141
- #
142
- # This is the inter-process ZeroMQ socket to which the Log workers send
143
- # their received data.
144
- #
145
- PUSH_ADDRESS = "ipc://pull_worker.ipc"
146
-
147
-
148
- #
149
- # This is the PULL to PUSH server. It pushes the received data over IPC
150
- # round-robin fashion to each PULL log worker on the machine.
151
- #
152
- def pull_server(address=PUSH_ADDRESS)
153
- #puts "Starting PULL server"
154
- puller = context.socket(ZMQ::PULL)
155
- #puts "Binding to the PULL socket"
156
- puller.bind("tcp://*:9999")
157
- # Set up the PUSH socket
158
- pusher = context.socket(ZMQ::PUSH)
159
- #puts "Binding to the PUSH socket"
160
- pusher.bind(address)
161
- # Trap everything
162
- trap_int(puller, pusher)
163
- # Connect PULL to PUSH via a queue
164
- ZMQ::Device.new(ZMQ::QUEUE, puller, pusher)
165
- end
166
-
167
-
168
- #
169
- # This is the PULL worker. This particular version just prints all
170
- # received data to STDOUT. Overload this method for other types of
171
- # processing (such as storing received log info in a database).
172
- #
173
- def pull_worker(address=PUSH_ADDRESS)
174
- #puts "Starting PULL worker"
175
- puller = context.socket(ZMQ::PULL)
176
- #puts "Connect to the PUSH socket"
177
- puller.connect(address)
178
- trap_int(puller)
179
-
180
- while true do
181
- s = ''
182
- puller.recv_string(s)
183
- puts "Received: #{s}"
184
- end
185
- end
186
-
187
-
188
- end
1
+ # require 'ffi-rzmq'
2
+ #
3
+ # #
4
+ # # This is the class that provides the interface to ZeroMQ, both for sending log
5
+ # # data to a cluster of log servers, and for receiving it.
6
+ # #
7
+ # # The Rails logger is replaced in production by the initializer zero_mq_logger.rb.
8
+ # #
9
+ # # The worker daemons running on the log machines use the pull_server and
10
+ # # pull_worker methods in this class.
11
+ # #
12
+ # class ZeroLog
13
+ #
14
+ # #
15
+ # # This is the ZeroMQ context. We only need one per process, thus we
16
+ # # store it in a class variable.
17
+ # #
18
+ # @@context = nil
19
+ #
20
+ #
21
+ # #
22
+ # # When the first ZeroLog instance is created, a ZeroMQ Context will be
23
+ # # set up for the use of all threads in the process. A process exit
24
+ # # handler will be set up to terminate the context on exit.
25
+ # #
26
+ # def initialize(sub_push, log_hosts)
27
+ # super()
28
+ # unless @@context
29
+ # #puts "Creating context"
30
+ # @@context = ZMQ::Context.new(1)
31
+ # #puts "Registering at_exit context terminator"
32
+ # at_exit { @@context.terminate
33
+ # #puts "Context terminated"
34
+ # }
35
+ # #puts "Initialising the log data sender thread"
36
+ # setup_sub_server(sub_push, log_hosts)
37
+ # end
38
+ # connect_to_sub_server(sub_push)
39
+ # end
40
+ #
41
+ #
42
+ # #
43
+ # # This method returns the ZeroMQ Context object.
44
+ # #
45
+ # def context
46
+ # @@context
47
+ # end
48
+ #
49
+ #
50
+ # #
51
+ # # Register a trap handler for INT, to clean up and close any
52
+ # # sockets we might have opened previously.
53
+ # #
54
+ # def trap_int(*sockets)
55
+ # sockets.each do |s|
56
+ # #puts "Registering at_exit socket closer"
57
+ # at_exit { s.setsockopt(ZMQ::LINGER, 0)
58
+ # s.close
59
+ # #puts "Socket closed"
60
+ # }
61
+ # end
62
+ # end
63
+ #
64
+ #
65
+ # # ---------------------------------------------------------------------------
66
+ # #
67
+ # # Log data sender side (ruby, Ruby on Rails)
68
+ # #
69
+ # # ---------------------------------------------------------------------------
70
+ #
71
+ # #
72
+ # # This initialises the log data sender. We begin by creating the server and
73
+ # # binding it; this is required when using +inproc+ sockets: bind must precede
74
+ # # any connects. (Normally in ZeroMQ, this is not the case.) The code sets up,
75
+ # # in a new thread, a +SUB+ server which is then connected to a +PUSH+ socket. The
76
+ # # +SUB+ server waits for log data which is then pushed via the +PUSH+ socket to
77
+ # # all available +LOG_HOSTS+. ZeroMQ handles buffering, re-connection and multiplexing
78
+ # # (round-robin) to the +LOG_HOSTS+ +PULL+ servers.
79
+ # #
80
+ # def setup_sub_server(sub_push="sub_push", log_hosts=[])
81
+ # # First create the server and let it bind, as this is required
82
+ # # when using inproc: bind must precede any connects.
83
+ # Thread.new(context) do |c|
84
+ # # Set up the server socket
85
+ # #puts "Starting SUB server"
86
+ # subscriber = c.socket(ZMQ::SUB)
87
+ # #puts "Binding to the SUB socket"
88
+ # subscriber.bind("inproc://#{sub_push}")
89
+ # subscriber.setsockopt(ZMQ::SUBSCRIBE, "")
90
+ # # Set up the PUSH socket
91
+ # loggers = context.socket(ZMQ::PUSH)
92
+ # log_hosts.each do |host|
93
+ # #puts "Connecting to the PULL server #{host}"
94
+ # loggers.connect("tcp://#{host}:9999")
95
+ # end
96
+ # # Connect SUB to PUSH via a queue and block
97
+ # ZMQ::Device.new(ZMQ::QUEUE, subscriber, loggers)
98
+ # # Context has been terminated, close sockets
99
+ # subscriber.setsockopt(ZMQ::LINGER, 0)
100
+ # subscriber.close
101
+ # loggers.setsockopt(ZMQ::LINGER, 0)
102
+ # loggers.close
103
+ # #puts "Closed sockets in other thread"
104
+ # end
105
+ # sleep 0.1 # Brute force and primitive, but it works
106
+ # end
107
+ #
108
+ #
109
+ # def connect_to_sub_server(sub_push="sub_push")
110
+ # # Next create the PUB socket and connect it to the other thread
111
+ # #puts "Creating the PUB socket"
112
+ # $log_publisher = context.socket(ZMQ::PUB)
113
+ # trap_int($log_publisher)
114
+ # #puts "Connecting to the SUB server"
115
+ # $log_publisher.connect("inproc://#{sub_push}")
116
+ # end
117
+ #
118
+ #
119
+ # #
120
+ # # This is a combination of a PUB main thread log data sender,
121
+ # # pushing directly to the pull_server without the need for a local
122
+ # # aggregator.
123
+ # #
124
+ # def log(data)
125
+ # init_log_data_sender unless $log_publisher
126
+ #
127
+ # # Send the data
128
+ # json = data.to_json
129
+ # #puts "Sending message #{json}"
130
+ # $log_publisher.send_string(json)
131
+ # data
132
+ # end
133
+ #
134
+ #
135
+ # # ---------------------------------------------------------------------------
136
+ # #
137
+ # # Log service (aggregation) side
138
+ # #
139
+ # # ---------------------------------------------------------------------------
140
+ #
141
+ # #
142
+ # # This is the inter-process ZeroMQ socket to which the Log workers send
143
+ # # their received data.
144
+ # #
145
+ # PUSH_ADDRESS = "ipc://pull_worker.ipc"
146
+ #
147
+ #
148
+ # #
149
+ # # This is the PULL to PUSH server. It pushes the received data over IPC
150
+ # # round-robin fashion to each PULL log worker on the machine.
151
+ # #
152
+ # def pull_server(address=PUSH_ADDRESS)
153
+ # #puts "Starting PULL server"
154
+ # puller = context.socket(ZMQ::PULL)
155
+ # #puts "Binding to the PULL socket"
156
+ # puller.bind("tcp://*:9999")
157
+ # # Set up the PUSH socket
158
+ # pusher = context.socket(ZMQ::PUSH)
159
+ # #puts "Binding to the PUSH socket"
160
+ # pusher.bind(address)
161
+ # # Trap everything
162
+ # trap_int(puller, pusher)
163
+ # # Connect PULL to PUSH via a queue
164
+ # ZMQ::Device.new(ZMQ::QUEUE, puller, pusher)
165
+ # end
166
+ #
167
+ #
168
+ # #
169
+ # # This is the PULL worker. This particular version just prints all
170
+ # # received data to STDOUT. Overload this method for other types of
171
+ # # processing (such as storing received log info in a database).
172
+ # #
173
+ # def pull_worker(address=PUSH_ADDRESS)
174
+ # #puts "Starting PULL worker"
175
+ # puller = context.socket(ZMQ::PULL)
176
+ # #puts "Connect to the PUSH socket"
177
+ # puller.connect(address)
178
+ # trap_int(puller)
179
+ #
180
+ # while true do
181
+ # s = ''
182
+ # puller.recv_string(s)
183
+ # puts "Received: #{s}"
184
+ # end
185
+ # end
186
+ #
187
+ #
188
+ # end