sumologic-query 1.2.0 → 1.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 7b7bbf8f091fa6d3c6e213a34c1ba72952c43aa995814834379c30a051d3f4f5
4
- data.tar.gz: 4e7bc3f985d01543a5af7ce89d7746944107d4fd99ecf4367c276a5e550dbdca
3
+ metadata.gz: 120fb7b7d2ce1d70d3dbb921224e068ace399125dcd7cfab98633d4e58d7514a
4
+ data.tar.gz: 4e256d592e59a754baf942f07b81a63c4c2cce95f0d2d671cfff320d211c7d86
5
5
  SHA512:
6
- metadata.gz: 4f34b22f610d453649fff4926c7dfdff0fd2d5ff18a2e5fa6994cf1f41635990c28ace805fb2508520a1f016899c99f71ba31be2d5b0771a8b30d9e781f310a2
7
- data.tar.gz: 0a12a47f8ddfeda0bab912f2011a033b121e01f3c63b19d9fa80e7cfa0a7df32a7277784a92a88ca988819a1f42c717c61dbbae79d5bab154161d9cff5f571a8
6
+ metadata.gz: c18535243d40611c287544902924646314323aa3b01df87b9e6758c9af62afedb610753c9e27d0fec3a338f1c911fe5fca0245140c6acc18c1e099136aab4c18
7
+ data.tar.gz: b88b9887a0886962844737bd3087e9282f9fb2402e619de5ec919853727bc59f6e6c5baeeefff82cd63509c35bb9d7c36b3e626aef75aaa75e90f296a8ae0c78
data/CHANGELOG.md CHANGED
@@ -1,15 +1,20 @@
1
- # Changelog
1
+ ## [1.2.2](https://github.com/patrick204nqh/sumologic-query/compare/v1.2.1...v1.2.2) (2025-11-15)
2
2
 
3
- All notable changes to this project are documented in [GitHub Releases](https://github.com/patrick204nqh/sumologic-query/releases).
4
3
 
5
- This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
6
4
 
7
- ## Releases
5
+ ## [1.2.1](https://github.com/patrick204nqh/sumologic-query/compare/v1.2.0...v1.2.1) (2025-11-14)
6
+
7
+
8
+
9
+ # [1.2.0](https://github.com/patrick204nqh/sumologic-query/compare/v1.1.2...v1.2.0) (2025-11-14)
10
+
11
+
12
+
13
+ ## [1.1.2](https://github.com/patrick204nqh/sumologic-query/compare/v1.1.1...v1.1.2) (2025-11-14)
14
+
15
+
16
+
17
+ ## [1.1.1](https://github.com/patrick204nqh/sumologic-query/compare/v1.1.0...v1.1.1) (2025-11-14)
8
18
 
9
- - [v1.1.0](https://github.com/patrick204nqh/sumologic-query/releases/tag/v1.1.0) - Latest
10
- - [v1.0.0](https://github.com/patrick204nqh/sumologic-query/releases/tag/v1.0.0) - Initial release
11
19
 
12
- ---
13
20
 
14
- **Note:** Release notes are automatically generated from commit messages and pull requests.
15
- See [GitHub Releases](https://github.com/patrick204nqh/sumologic-query/releases) for detailed changelogs.
data/README.md CHANGED
@@ -86,7 +86,8 @@ sumo-query search --query "YOUR_QUERY" \
86
86
  --to "END_TIME" \
87
87
  [--output FILE] \
88
88
  [--limit N] \
89
- [--time-zone TZ]
89
+ [--time-zone TZ] \
90
+ [--interactive]
90
91
  ```
91
92
 
92
93
  **Required options:**
@@ -95,11 +96,50 @@ sumo-query search --query "YOUR_QUERY" \
95
96
  - `-t, --to TIME` - End time (ISO 8601 format)
96
97
 
97
98
  **Optional options:**
99
+ - `-i, --interactive` - Launch interactive browser with FZF
98
100
  - `-z, --time-zone TZ` - Time zone (default: UTC)
99
101
  - `-l, --limit N` - Limit number of messages
100
102
  - `-o, --output FILE` - Save to file (default: stdout)
101
103
  - `-d, --debug` - Enable debug output
102
104
 
105
+ ### Interactive Mode 🚀
106
+
107
+ Explore your logs interactively with a powerful FZF-based interface:
108
+
109
+ ```bash
110
+ # Launch interactive mode
111
+ sumo-query search --query 'error' \
112
+ --from '2025-11-13T14:00:00' \
113
+ --to '2025-11-13T15:00:00' \
114
+ --interactive
115
+
116
+ # Or use the shorthand
117
+ sumo-query search -q 'error' -f '2025-11-13T14:00:00' -t '2025-11-13T15:00:00' -i
118
+ ```
119
+
120
+ **Features:**
121
+ - 🔍 Fuzzy search across all message fields
122
+ - 👁️ Live preview with full JSON details
123
+ - 🎨 Color-coded log levels (ERROR, WARN, INFO)
124
+ - ⌨️ Keyboard shortcuts for quick actions
125
+ - 📋 Multi-select and batch operations
126
+ - 💾 Export selected messages
127
+
128
+ **Keybindings:**
129
+ - `Enter` - Toggle selection (mark/unmark message)
130
+ - `Tab` - Open current message in pager (copyable view)
131
+ - `Ctrl-S` - Save selected messages to `sumo-selected.txt` and exit
132
+ - `Ctrl-Y` - Copy selected messages to clipboard and exit
133
+ - `Ctrl-E` - Export selected messages to `sumo-export.jsonl` and exit
134
+ - `Ctrl-A` - Select all messages
135
+ - `Ctrl-D` - Deselect all messages
136
+ - `Ctrl-/` - Toggle preview pane
137
+ - `Ctrl-Q` - Quit without saving
138
+
139
+ **Requirements:**
140
+ - Install FZF: `brew install fzf` (macOS) or `apt-get install fzf` (Linux)
141
+ - See: https://github.com/junegunn/fzf#installation
142
+
103
143
  ### List Collectors
104
144
 
105
145
  ```bash
@@ -215,7 +255,7 @@ Query execution time depends on data volume:
215
255
  - **[Quick Reference (tldr)](docs/tldr.md)** - Concise command examples in tldr format
216
256
  - **[Query Examples](examples/queries.md)** - Common query patterns and use cases
217
257
  - **[API Reference](docs/api-reference.md)** - Complete Ruby library documentation
218
- - **[Architecture](docs/architecture.md)** - How the tool works internally
258
+ - **[Architecture](docs/architecture/)** - System design and architecture decisions
219
259
  - **[Troubleshooting](docs/troubleshooting.md)** - Common issues and solutions
220
260
 
221
261
  ## Development
data/lib/sumologic/cli.rb CHANGED
@@ -26,12 +26,18 @@ module Sumologic
26
26
  sumo-query search --query '"connection timeout"' \\
27
27
  --from '2025-11-13T14:00:00' --to '2025-11-13T15:00:00' \\
28
28
  --limit 100
29
+
30
+ # Interactive mode with FZF
31
+ sumo-query search --query 'error' \\
32
+ --from '2025-11-13T14:00:00' --to '2025-11-13T15:00:00' \\
33
+ --interactive
29
34
  DESC
30
35
  option :query, type: :string, required: true, aliases: '-q', desc: 'Search query'
31
36
  option :from, type: :string, required: true, aliases: '-f', desc: 'Start time (ISO 8601)'
32
37
  option :to, type: :string, required: true, aliases: '-t', desc: 'End time (ISO 8601)'
33
38
  option :time_zone, type: :string, default: 'UTC', aliases: '-z', desc: 'Time zone'
34
- option :limit, type: :numeric, aliases: '-l', desc: 'Limit number of messages'
39
+ option :limit, type: :numeric, aliases: '-l', desc: 'Maximum messages to return'
40
+ option :interactive, type: :boolean, aliases: '-i', desc: 'Launch interactive browser (requires fzf)'
35
41
  def search
36
42
  $DEBUG = true if options[:debug]
37
43
 
@@ -39,9 +45,17 @@ module Sumologic
39
45
 
40
46
  log_search_info
41
47
  results = execute_search(client)
42
- output_search_results(results)
43
48
 
44
- warn "\nMessage count: #{results.size}"
49
+ warn '=' * 60
50
+ warn "Results: #{results.size} messages"
51
+ warn '=' * 60
52
+ $stderr.puts
53
+
54
+ if options[:interactive]
55
+ launch_interactive_mode(results)
56
+ else
57
+ output_search_results(results)
58
+ end
45
59
  end
46
60
 
47
61
  desc 'list-collectors', 'List all Sumo Logic collectors'
@@ -178,9 +192,14 @@ module Sumologic
178
192
  end
179
193
 
180
194
  def log_search_info
181
- warn "Querying Sumo Logic: #{options[:from]} to #{options[:to]}"
195
+ warn '=' * 60
196
+ warn 'Sumo Logic Search Query'
197
+ warn '=' * 60
198
+ warn "Time Range: #{options[:from]} to #{options[:to]}"
182
199
  warn "Query: #{options[:query]}"
183
- warn 'This may take 1-3 minutes depending on data volume...'
200
+ warn "Limit: #{options[:limit] || 'unlimited'}"
201
+ warn '-' * 60
202
+ warn 'Creating search job...'
184
203
  $stderr.puts
185
204
  end
186
205
 
@@ -204,5 +223,24 @@ module Sumologic
204
223
  messages: results
205
224
  )
206
225
  end
226
+
227
+ def launch_interactive_mode(results)
228
+ require_relative 'interactive'
229
+
230
+ # Format results for interactive mode
231
+ formatted_results = {
232
+ 'query' => options[:query],
233
+ 'from' => options[:from],
234
+ 'to' => options[:to],
235
+ 'time_zone' => options[:time_zone],
236
+ 'message_count' => results.size,
237
+ 'messages' => results
238
+ }
239
+
240
+ Sumologic::Interactive.launch(formatted_results)
241
+ rescue Sumologic::Interactive::Error => e
242
+ error e.message
243
+ exit 1
244
+ end
207
245
  end
208
246
  end
@@ -28,6 +28,8 @@ module Sumologic
28
28
 
29
29
  # Search logs with query
30
30
  # Returns array of messages
31
+ #
32
+ # @param limit [Integer, nil] Maximum number of messages to return (stops fetching after limit)
31
33
  def search(query:, from_time:, to_time:, time_zone: 'UTC', limit: nil)
32
34
  @search.execute(
33
35
  query: query,
@@ -15,8 +15,8 @@ module Sumologic
15
15
  @deployment = ENV['SUMO_DEPLOYMENT'] || 'us2'
16
16
 
17
17
  # Search job polling
18
- @initial_poll_interval = 5 # seconds - start fast for small queries
19
- @max_poll_interval = 20 # seconds - slow down for large queries
18
+ @initial_poll_interval = 2 # seconds - aggressive polling for faster response
19
+ @max_poll_interval = 15 # seconds - slow down for large queries
20
20
  @poll_backoff_factor = 1.5 # increase interval by 50% each time
21
21
 
22
22
  # Timeouts and limits
@@ -3,27 +3,36 @@
3
3
  require 'net/http'
4
4
  require 'json'
5
5
  require 'uri'
6
+ require_relative 'connection_pool'
6
7
 
7
8
  module Sumologic
8
9
  module Http
9
10
  # Handles HTTP communication with Sumo Logic API
10
11
  # Responsibilities: request execution, error handling, SSL configuration
12
+ # Uses connection pooling for thread-safe parallel requests
11
13
  class Client
12
- READ_TIMEOUT = 60
13
- OPEN_TIMEOUT = 10
14
-
15
14
  def initialize(base_url:, authenticator:)
16
15
  @base_url = base_url
17
16
  @authenticator = authenticator
17
+ @connection_pool = ConnectionPool.new(base_url: base_url, max_connections: 10)
18
18
  end
19
19
 
20
20
  # Execute HTTP request with error handling
21
+ # Uses connection pool for thread-safe parallel execution
21
22
  def request(method:, path:, body: nil, query_params: nil)
22
23
  uri = build_uri(path, query_params)
23
24
  request = build_request(method, uri, body)
24
25
 
25
26
  response = execute_request(uri, request)
26
27
  handle_response(response)
28
+ rescue Errno::ECONNRESET, Errno::EPIPE, EOFError, Net::HTTPBadResponse => e
29
+ # Connection error - raise for retry at higher level
30
+ raise Error, "Connection error: #{e.message}"
31
+ end
32
+
33
+ # Close all connections in the pool
34
+ def close_all_connections
35
+ @connection_pool.close_all
27
36
  end
28
37
 
29
38
  private
@@ -55,12 +64,9 @@ module Sumologic
55
64
  end
56
65
 
57
66
  def execute_request(uri, request)
58
- http = Net::HTTP.new(uri.host, uri.port)
59
- http.use_ssl = true
60
- http.read_timeout = READ_TIMEOUT
61
- http.open_timeout = OPEN_TIMEOUT
62
-
63
- http.request(request)
67
+ @connection_pool.with_connection(uri) do |http|
68
+ http.request(request)
69
+ end
64
70
  end
65
71
 
66
72
  def handle_response(response)
@@ -0,0 +1,97 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Sumologic
4
+ module Http
5
+ # Thread-safe connection pool for HTTP clients
6
+ # Allows multiple threads to have their own connections
7
+ class ConnectionPool
8
+ READ_TIMEOUT = 60
9
+ OPEN_TIMEOUT = 10
10
+
11
+ def initialize(base_url:, max_connections: 10)
12
+ @base_url = base_url
13
+ @max_connections = max_connections
14
+ @pool = []
15
+ @mutex = Mutex.new
16
+ end
17
+
18
+ # Get a connection from the pool (or create new one)
19
+ def with_connection(uri)
20
+ connection = acquire_connection(uri)
21
+ yield connection
22
+ ensure
23
+ release_connection(connection) if connection
24
+ end
25
+
26
+ # Close all connections in the pool
27
+ def close_all
28
+ @mutex.synchronize do
29
+ @pool.each do |conn|
30
+ conn[:http].finish if conn[:http].started?
31
+ rescue StandardError => e
32
+ warn "Error closing connection: #{e.message}"
33
+ end
34
+ @pool.clear
35
+ end
36
+ end
37
+
38
+ private
39
+
40
+ def acquire_connection(uri)
41
+ @mutex.synchronize do
42
+ # Try to find an available connection for this host
43
+ connection = find_available_connection(uri)
44
+ return connection[:http] if connection
45
+
46
+ # Create new connection if under limit
47
+ if @pool.size < @max_connections
48
+ http = create_connection(uri)
49
+ @pool << { http: http, in_use: true, host: uri.host, port: uri.port }
50
+ return http
51
+ end
52
+
53
+ # Wait and retry if pool is full
54
+ nil
55
+ end || create_temporary_connection(uri)
56
+ end
57
+
58
+ def find_available_connection(uri)
59
+ connection = @pool.find do |conn|
60
+ !conn[:in_use] &&
61
+ conn[:host] == uri.host &&
62
+ conn[:port] == uri.port &&
63
+ conn[:http].started?
64
+ rescue StandardError
65
+ # Connection is invalid
66
+ @pool.delete(conn)
67
+ nil
68
+ end
69
+
70
+ connection[:in_use] = true if connection
71
+ connection
72
+ end
73
+
74
+ def release_connection(http)
75
+ @mutex.synchronize do
76
+ connection = @pool.find { |conn| conn[:http] == http }
77
+ connection[:in_use] = false if connection
78
+ end
79
+ end
80
+
81
+ def create_connection(uri)
82
+ http = Net::HTTP.new(uri.host, uri.port)
83
+ http.use_ssl = true
84
+ http.read_timeout = READ_TIMEOUT
85
+ http.open_timeout = OPEN_TIMEOUT
86
+ http.keep_alive_timeout = 30
87
+ http.start
88
+ http
89
+ end
90
+
91
+ def create_temporary_connection(uri)
92
+ # Fallback: create a temporary connection if pool is exhausted
93
+ create_connection(uri)
94
+ end
95
+ end
96
+ end
97
+ end
@@ -0,0 +1,187 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'json'
4
+ require 'tempfile'
5
+ require 'time'
6
+ require 'open3'
7
+ require 'shellwords'
8
+
9
+ module Sumologic
10
+ module Interactive
11
+ class FzfViewer
12
+ DELIMITER = '||'
13
+
14
+ def initialize(results)
15
+ @results = results
16
+ @messages = results['messages'] || []
17
+ end
18
+
19
+ def run
20
+ return if @messages.empty?
21
+
22
+ Dir.mktmpdir('sumo-interactive') do |tmpdir|
23
+ input_file = File.join(tmpdir, 'input.txt')
24
+ preview_file = File.join(tmpdir, 'preview.jsonl')
25
+
26
+ prepare_data(input_file, preview_file)
27
+ selected = execute_fzf(input_file, preview_file)
28
+ handle_selection(selected) unless selected.empty?
29
+ end
30
+ end
31
+
32
+ private
33
+
34
+ def prepare_data(input_file, preview_file)
35
+ # Write data lines only (no header in file - handled by FZF --header)
36
+ File.open(input_file, 'w') do |f|
37
+ @messages.each do |msg|
38
+ f.puts format_line(msg)
39
+ end
40
+ end
41
+
42
+ # Write JSONL for preview (one JSON per line, line numbers match input)
43
+ File.open(preview_file, 'w') do |f|
44
+ @messages.each do |msg|
45
+ f.puts JSON.generate(msg['map'])
46
+ end
47
+ end
48
+ end
49
+
50
+ def format_line(msg)
51
+ map = msg['map']
52
+
53
+ time = format_time(map['_messagetime'])
54
+ level = format_level(map['level'] || map['severity'] || 'INFO')
55
+ source = truncate(map['_sourceCategory'] || '-', 25)
56
+ message = truncate(sanitize(map['_raw'] || map['message'] || ''), 80)
57
+
58
+ # No index in display - use FZF line number instead
59
+ "#{time} #{level} #{source.ljust(25)} #{message}"
60
+ end
61
+
62
+ def format_time(timestamp_ms)
63
+ return 'N/A' unless timestamp_ms
64
+
65
+ Time.at(timestamp_ms.to_i / 1000).strftime('%H:%M:%S')
66
+ end
67
+
68
+ def format_level(level)
69
+ level_str = level.to_s.upcase.ljust(7)
70
+
71
+ case level_str.strip
72
+ when 'ERROR', 'FATAL', 'CRITICAL'
73
+ "\e[31m#{level_str}\e[0m" # Red
74
+ when 'WARN', 'WARNING'
75
+ "\e[33m#{level_str}\e[0m" # Yellow
76
+ when 'INFO'
77
+ "\e[36m#{level_str}\e[0m" # Cyan
78
+ when 'DEBUG', 'TRACE'
79
+ "\e[90m#{level_str}\e[0m" # Gray
80
+ else
81
+ level_str
82
+ end
83
+ end
84
+
85
+ def sanitize(text)
86
+ text.to_s.gsub(/[\n\r\t]/, ' ').squeeze(' ')
87
+ end
88
+
89
+ def truncate(text, length)
90
+ text = text.to_s
91
+ text.length > length ? "#{text[0...(length - 3)]}..." : text
92
+ end
93
+
94
+ def colorize_json(data)
95
+ JSON.pretty_generate(data)
96
+ end
97
+
98
+ def execute_fzf(input_path, preview_path)
99
+ fzf_args = build_fzf_args(input_path, preview_path)
100
+
101
+ # Use IO.popen with array to avoid shell escaping issues
102
+ result = IO.popen(fzf_args, 'r+') do |io|
103
+ File.readlines(input_path).each { |line| io.puts line }
104
+ io.close_write
105
+ io.read
106
+ end
107
+
108
+ result.strip
109
+ end
110
+
111
+ def build_fzf_args(input_path, preview_path)
112
+ preview_cmd = build_preview_command(preview_path)
113
+ view_cmd = build_view_command(preview_path)
114
+ header_text = build_header_text
115
+
116
+ [
117
+ 'fzf',
118
+ '--ansi',
119
+ '--multi',
120
+ "--header=#{header_text}",
121
+ "--preview=#{preview_cmd}",
122
+ '--preview-window=right:60%:wrap:follow',
123
+ '--bind=enter:toggle',
124
+ "--bind=tab:execute(#{view_cmd})",
125
+ '--bind=ctrl-a:select-all',
126
+ '--bind=ctrl-d:deselect-all',
127
+ '--bind=ctrl-s:execute-silent(echo {+} > sumo-selected.txt)+abort',
128
+ '--bind=ctrl-y:execute-silent(echo {+} | pbcopy || echo {+} | xclip -selection clipboard 2>/dev/null)+abort',
129
+ '--bind=ctrl-e:execute-silent(echo {+} > sumo-export.jsonl)+abort',
130
+ '--bind=ctrl-/:toggle-preview',
131
+ "--bind=ctrl-r:reload(cat #{input_path})",
132
+ '--bind=ctrl-q:abort',
133
+ '--height=100%'
134
+ ]
135
+ end
136
+
137
+ def build_view_command(preview_path)
138
+ # FZF {n} is 0-indexed! Add 1 to get sed line number (1-indexed)
139
+ 'LINE=$(({n} + 1)); ' \
140
+ "sed -n \"$LINE\"p #{Shellwords.escape(preview_path)} | jq -C . | less -R"
141
+ end
142
+
143
+ def build_preview_command(preview_path)
144
+ # FZF {n} is 0-indexed! Add 1 to get JSONL line number (1-indexed)
145
+ escaped_path = Shellwords.escape(preview_path)
146
+ calc = "LINE=$(({n} + 1)); TOTAL=$(wc -l < #{escaped_path}); "
147
+ display = 'echo "Message $LINE of $TOTAL"; echo ""; '
148
+ extract = "sed -n \"$LINE\"p #{escaped_path}"
149
+
150
+ calc + display + "#{extract} | jq -C . || #{extract}"
151
+ end
152
+
153
+ def build_header_text
154
+ query = @results['query'] || 'N/A'
155
+ count = @messages.size
156
+ sources = @messages.map { |m| m['map']['_sourceCategory'] }.compact.uniq.size
157
+
158
+ # Column headers
159
+ columns = "#{pad('TIME', 8)} #{pad('LEVEL', 7)} #{pad('SOURCE', 25)} MESSAGE"
160
+ # Info and keys on second line
161
+ info = "#{count} msgs | #{sources} sources | Query: #{truncate(query, 40)}"
162
+ keys = 'Enter=select Tab=view Ctrl-S=save Ctrl-Y=copy Ctrl-E=export Ctrl-Q=quit'
163
+
164
+ "#{columns}\n#{info} | #{keys}"
165
+ end
166
+
167
+ def pad(text, width)
168
+ text.ljust(width)
169
+ end
170
+
171
+ def handle_selection(selected)
172
+ # Selected contains the actual display lines (no index field)
173
+ # We don't show them since user already saw in FZF
174
+ # The keybindings (Ctrl-S, Ctrl-Y, Ctrl-E) handle the export
175
+ return if selected.empty?
176
+
177
+ puts "\n#{'═' * 80}"
178
+ puts '📋 Exited interactive mode'
179
+ puts '═' * 80
180
+ puts "\n💡 Your selected messages were:"
181
+ puts ' • Saved to file (if you pressed Ctrl-S)'
182
+ puts ' • Copied to clipboard (if you pressed Ctrl-Y)'
183
+ puts ' • Exported to JSONL (if you pressed Ctrl-E)'
184
+ end
185
+ end
186
+ end
187
+ end
@@ -0,0 +1,42 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Sumologic
4
+ module Interactive
5
+ class Error < StandardError; end
6
+
7
+ class << self
8
+ def launch(results)
9
+ raise Error, fzf_install_message unless fzf_available?
10
+
11
+ require_relative 'interactive/fzf_viewer'
12
+ FzfViewer.new(results).run
13
+ end
14
+
15
+ private
16
+
17
+ def fzf_available?
18
+ system('which fzf > /dev/null 2>&1')
19
+ end
20
+
21
+ def fzf_install_message
22
+ <<~MSG
23
+
24
+ ╔════════════════════════════════════════════════════════════╗
25
+ ║ Interactive mode requires FZF to be installed ║
26
+ ╚════════════════════════════════════════════════════════════╝
27
+
28
+ 📦 Install FZF:
29
+
30
+ macOS: brew install fzf
31
+ Ubuntu: sudo apt-get install fzf
32
+ Fedora: sudo dnf install fzf
33
+ Arch: sudo pacman -S fzf
34
+
35
+ 🔗 Or visit: https://github.com/junegunn/fzf#installation
36
+
37
+ After installing, run your command again with -i flag.
38
+ MSG
39
+ end
40
+ end
41
+ end
42
+ end
@@ -0,0 +1,41 @@
1
+ # frozen_string_literal: true
2
+
3
+ require_relative '../utils/worker'
4
+
5
+ module Sumologic
6
+ module Metadata
7
+ # Fetches sources from multiple collectors efficiently
8
+ # Uses Worker utility for concurrent fetching
9
+ class CollectorSourceFetcher
10
+ def initialize
11
+ @worker = Utils::Worker.new
12
+ end
13
+
14
+ # Fetch sources for collectors concurrently
15
+ # Returns array of results with collector info and sources
16
+ def fetch_all(collectors, &block)
17
+ @worker.execute(collectors, callbacks: {
18
+ start: ->(workers, total) { log_start(workers, total) },
19
+ progress: ->(done, total) { log_progress(done, total) },
20
+ finish: ->(results, duration) { log_finish(results.size, duration) }
21
+ }, &block)
22
+ end
23
+
24
+ private
25
+
26
+ def log_start(workers, total)
27
+ warn " Created #{workers} workers for #{total} collectors" if ENV['SUMO_DEBUG'] || $DEBUG
28
+ end
29
+
30
+ def log_progress(done, total)
31
+ return unless ENV['SUMO_DEBUG'] || $DEBUG
32
+
33
+ warn " Progress: #{done}/#{total} collectors processed" if (done % 10).zero? || done == total
34
+ end
35
+
36
+ def log_finish(count, duration)
37
+ warn " Fetched sources from #{count} collectors in #{duration.round(2)}s" if ENV['SUMO_DEBUG'] || $DEBUG
38
+ end
39
+ end
40
+ end
41
+ end
@@ -1,6 +1,6 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require_relative 'parallel_fetcher'
3
+ require_relative 'collector_source_fetcher'
4
4
 
5
5
  module Sumologic
6
6
  module Metadata
@@ -9,7 +9,7 @@ module Sumologic
9
9
  def initialize(http_client:, collector_client:)
10
10
  @http = http_client
11
11
  @collector_client = collector_client
12
- @parallel_fetcher = ParallelFetcher.new(max_threads: 10)
12
+ @fetcher = CollectorSourceFetcher.new
13
13
  end
14
14
 
15
15
  # List sources for a specific collector
@@ -36,7 +36,7 @@ module Sumologic
36
36
 
37
37
  log_info "Fetching sources for #{active_collectors.size} active collectors in parallel..."
38
38
 
39
- result = @parallel_fetcher.fetch_all(active_collectors) do |collector|
39
+ result = @fetcher.fetch_all(active_collectors) do |collector|
40
40
  fetch_collector_sources(collector)
41
41
  end
42
42
 
@@ -1,5 +1,7 @@
1
1
  # frozen_string_literal: true
2
2
 
3
+ require_relative 'message_fetcher'
4
+
3
5
  module Sumologic
4
6
  module Search
5
7
  # Manages search job lifecycle: create, poll, fetch, delete
@@ -8,7 +10,7 @@ module Sumologic
8
10
  @http = http_client
9
11
  @config = config
10
12
  @poller = Poller.new(http_client: http_client, config: config)
11
- @paginator = Paginator.new(http_client: http_client, config: config)
13
+ @message_fetcher = MessageFetcher.new(http_client: http_client, config: config)
12
14
  end
13
15
 
14
16
  # Execute a complete search workflow
@@ -16,7 +18,7 @@ module Sumologic
16
18
  def execute(query:, from_time:, to_time:, time_zone: 'UTC', limit: nil)
17
19
  job_id = create(query, from_time, to_time, time_zone)
18
20
  @poller.poll(job_id)
19
- messages = @paginator.fetch_all(job_id, limit: limit)
21
+ messages = @message_fetcher.fetch_all(job_id, limit: limit)
20
22
  delete(job_id)
21
23
  messages
22
24
  rescue StandardError => e
@@ -57,7 +59,12 @@ module Sumologic
57
59
  end
58
60
 
59
61
  def log_info(message)
60
- warn "[Sumologic::Search::Job] #{message}" if ENV['SUMO_DEBUG'] || $DEBUG
62
+ # Always show job creation (with ID) for user reference
63
+ if message.start_with?('Created search job:')
64
+ warn " #{message}"
65
+ elsif ENV['SUMO_DEBUG'] || $DEBUG
66
+ warn "[Sumologic::Search::Job] #{message}"
67
+ end
61
68
  end
62
69
 
63
70
  def log_error(message)
@@ -0,0 +1,122 @@
1
+ # frozen_string_literal: true
2
+
3
+ require_relative '../utils/worker'
4
+
5
+ module Sumologic
6
+ module Search
7
+ # Fetches search messages with automatic pagination
8
+ # Uses Worker utility for concurrent page fetching when beneficial
9
+ class MessageFetcher
10
+ PAGE_SIZE = 10_000
11
+
12
+ def initialize(http_client:, config:)
13
+ @http = http_client
14
+ @config = config
15
+ @worker = Utils::Worker.new
16
+ end
17
+
18
+ # Fetch all messages for a job with automatic pagination
19
+ # Single page: fetches directly
20
+ # Multiple pages: uses Worker for concurrent fetching
21
+ def fetch_all(job_id, limit: nil)
22
+ # Fetch first page to check size
23
+ first_batch_limit = calculate_batch_limit(limit, 0)
24
+ return [] if first_batch_limit <= 0
25
+
26
+ first_batch = fetch_page(job_id, 0, first_batch_limit)
27
+ return [] if first_batch.empty?
28
+
29
+ # Single page result? Return immediately
30
+ return first_batch if first_batch.size < first_batch_limit || (limit && first_batch.size >= limit)
31
+
32
+ # Multi-page result: calculate remaining pages and fetch in parallel
33
+ fetch_all_pages(job_id, first_batch, limit)
34
+ end
35
+
36
+ private
37
+
38
+ def fetch_all_pages(job_id, first_batch, limit)
39
+ messages = first_batch.dup
40
+ offset = first_batch.size
41
+
42
+ # Calculate remaining pages to fetch
43
+ pages = calculate_remaining_pages(job_id, offset, limit)
44
+ return messages if pages.empty?
45
+
46
+ total_pages = pages.size + 1 # +1 for first page already fetched
47
+
48
+ # Fetch remaining pages in parallel using Worker with progress callbacks
49
+ additional_messages = @worker.execute(pages, callbacks: {
50
+ start: lambda { |workers, _total|
51
+ warn " Created #{workers} workers for #{total_pages} pages"
52
+ },
53
+ progress: lambda { |done, _total|
54
+ warn " Progress: #{done + 1}/#{total_pages} pages fetched"
55
+ },
56
+ finish: lambda { |_results, duration|
57
+ warn " All workers completed in #{duration.round(2)}s"
58
+ }
59
+ }) do |page|
60
+ fetch_page(page[:job_id], page[:offset], page[:limit])
61
+ end
62
+
63
+ # Flatten and combine results
64
+ additional_messages.each { |batch| messages.concat(batch) }
65
+
66
+ # Respect limit if specified
67
+ limit ? messages.first(limit) : messages
68
+ end
69
+
70
+ def calculate_remaining_pages(job_id, offset, limit)
71
+ pages = []
72
+ total_fetched = offset
73
+
74
+ loop do
75
+ batch_limit = calculate_batch_limit(limit, total_fetched)
76
+ break if batch_limit <= 0
77
+
78
+ pages << { job_id: job_id, offset: offset, limit: batch_limit }
79
+ total_fetched += batch_limit
80
+ offset += batch_limit
81
+
82
+ # Stop estimating if we've planned enough
83
+ break if pages.size >= 9 # First page + 9 more = 10 parallel fetches
84
+ break if limit && total_fetched >= limit
85
+ end
86
+
87
+ pages
88
+ end
89
+
90
+ def calculate_batch_limit(user_limit, total_fetched)
91
+ return PAGE_SIZE unless user_limit
92
+
93
+ remaining = user_limit - total_fetched
94
+ [PAGE_SIZE, remaining].min
95
+ end
96
+
97
+ def fetch_page(job_id, offset, limit)
98
+ data = @http.request(
99
+ method: :get,
100
+ path: "/search/jobs/#{job_id}/messages",
101
+ query_params: { offset: offset, limit: limit }
102
+ )
103
+
104
+ messages = data['messages'] || []
105
+ log_progress(messages.size, offset) if messages.any?
106
+ messages
107
+ end
108
+
109
+ def log_progress(batch_size, offset)
110
+ total = offset + batch_size
111
+ warn " Fetched #{batch_size} messages (total: #{total})"
112
+
113
+ # Detailed info in debug mode
114
+ log_debug " [Offset: #{offset}, batch: #{batch_size}]" if ENV['SUMO_DEBUG'] || $DEBUG
115
+ end
116
+
117
+ def log_debug(message)
118
+ warn "[Sumologic::Search::MessageFetcher] #{message}"
119
+ end
120
+ end
121
+ end
122
+ end
@@ -63,18 +63,24 @@ module Sumologic
63
63
  end
64
64
 
65
65
  def log_poll_status(state, data, interval, count)
66
- msg_count = data['messageCount']
67
- rec_count = data['recordCount']
68
- log_info "Job state: #{state} (#{msg_count} messages, #{rec_count} records) " \
69
- "[interval: #{interval}s, poll: #{count}]"
66
+ msg_count = data['messageCount'] || 0
67
+ rec_count = data['recordCount'] || 0
68
+
69
+ # Always show progress to user (not just in debug mode)
70
+ warn " Status: #{state} | Messages: #{msg_count} | Records: #{rec_count}"
71
+
72
+ # Detailed info in debug mode
73
+ log_debug " [Poll #{count + 1}, interval: #{interval}s]"
70
74
  end
71
75
 
72
- def log_completion(start_time, poll_count)
76
+ def log_completion(start_time, _poll_count)
73
77
  elapsed = Time.now - start_time
74
- log_info "Job completed in #{elapsed.round(1)} seconds after #{poll_count + 1} polls"
78
+ warn "Search job completed in #{elapsed.round(1)}s"
79
+ warn 'Fetching messages...'
80
+ $stderr.puts
75
81
  end
76
82
 
77
- def log_info(message)
83
+ def log_debug(message)
78
84
  warn "[Sumologic::Search::Poller] #{message}" if ENV['SUMO_DEBUG'] || $DEBUG
79
85
  end
80
86
  end
@@ -0,0 +1,106 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Sumologic
4
+ module Utils
5
+ # Generic worker pool for parallel execution of tasks
6
+ # Uses Queue + Mutex pattern for thread-safe concurrent processing
7
+ #
8
+ # This utility abstracts the parallel execution pattern used across the codebase
9
+ # (metadata fetching, search pagination, etc.) into a reusable component.
10
+ #
11
+ # Example:
12
+ # worker = Worker.new
13
+ # results = worker.execute(items) do |item|
14
+ # fetch_data(item)
15
+ # end
16
+ class Worker
17
+ MAX_THREADS = 10
18
+
19
+ # Execute work items using a thread pool
20
+ # Returns array of results from the block execution
21
+ #
22
+ # @param items [Array] Work items to process
23
+ # @param callbacks [Hash] Optional callbacks for progress tracking:
24
+ # - :start => ->(worker_count, total_items) { }
25
+ # - :progress => ->(completed_count, total_items) { }
26
+ # - :finish => ->(results, duration) { }
27
+ # @yield [item] Block to execute for each item
28
+ # @return [Array] Results from block executions (nil results are filtered out)
29
+ def execute(items, callbacks: {}, &block)
30
+ return [] if items.empty?
31
+
32
+ start_time = Time.now
33
+ context = {
34
+ result: [],
35
+ completed: { count: 0 },
36
+ mutex: Mutex.new,
37
+ total_items: items.size,
38
+ callbacks: callbacks
39
+ }
40
+
41
+ queue = create_work_queue(items)
42
+ worker_count = [MAX_THREADS, queue.size].min
43
+
44
+ # Callback: start
45
+ callbacks[:start]&.call(worker_count, items.size)
46
+
47
+ threads = create_workers(queue, context, &block)
48
+
49
+ threads.each(&:join)
50
+
51
+ # Callback: finish
52
+ duration = Time.now - start_time
53
+ callbacks[:finish]&.call(context[:result], duration)
54
+
55
+ context[:result]
56
+ end
57
+
58
+ private
59
+
60
+ def create_work_queue(items)
61
+ queue = Queue.new
62
+ items.each { |item| queue << item }
63
+ queue
64
+ end
65
+
66
+ def create_workers(queue, context, &block)
67
+ worker_count = [MAX_THREADS, queue.size].min
68
+
69
+ Array.new(worker_count) do
70
+ Thread.new { process_queue(queue, context, &block) }
71
+ end
72
+ end
73
+
74
+ def process_queue(queue, context, &block)
75
+ until queue.empty?
76
+ item = pop_safely(queue)
77
+ break unless item
78
+
79
+ process_item(item, context[:result], context[:mutex], &block)
80
+
81
+ # Callback: progress (thread-safe)
82
+ next unless context[:callbacks][:progress]
83
+
84
+ context[:mutex].synchronize do
85
+ context[:completed][:count] += 1
86
+ context[:callbacks][:progress].call(context[:completed][:count], context[:total_items])
87
+ end
88
+ end
89
+ end
90
+
91
+ def pop_safely(queue)
92
+ queue.pop(true)
93
+ rescue ThreadError
94
+ nil
95
+ end
96
+
97
+ def process_item(item, result, mutex, &block)
98
+ item_result = block.call(item)
99
+
100
+ mutex.synchronize do
101
+ result << item_result if item_result
102
+ end
103
+ end
104
+ end
105
+ end
106
+ end
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Sumologic
4
- VERSION = '1.2.0'
4
+ VERSION = '1.3.0'
5
5
  end
data/lib/sumologic.rb CHANGED
@@ -20,13 +20,17 @@ require_relative 'sumologic/configuration'
20
20
  require_relative 'sumologic/http/authenticator'
21
21
  require_relative 'sumologic/http/client'
22
22
 
23
+ # Load utilities
24
+ require_relative 'sumologic/utils/worker'
25
+
23
26
  # Load search domain
24
27
  require_relative 'sumologic/search/poller'
25
- require_relative 'sumologic/search/paginator'
28
+ require_relative 'sumologic/search/message_fetcher'
26
29
  require_relative 'sumologic/search/job'
27
30
 
28
31
  # Load metadata domain
29
32
  require_relative 'sumologic/metadata/collector'
33
+ require_relative 'sumologic/metadata/collector_source_fetcher'
30
34
  require_relative 'sumologic/metadata/source'
31
35
 
32
36
  # Load main client (facade)
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: sumologic-query
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.2.0
4
+ version: 1.3.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - patrick204nqh
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2025-11-14 00:00:00.000000000 Z
11
+ date: 2025-11-15 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: base64
@@ -101,12 +101,16 @@ files:
101
101
  - lib/sumologic/configuration.rb
102
102
  - lib/sumologic/http/authenticator.rb
103
103
  - lib/sumologic/http/client.rb
104
+ - lib/sumologic/http/connection_pool.rb
105
+ - lib/sumologic/interactive.rb
106
+ - lib/sumologic/interactive/fzf_viewer.rb
104
107
  - lib/sumologic/metadata/collector.rb
105
- - lib/sumologic/metadata/parallel_fetcher.rb
108
+ - lib/sumologic/metadata/collector_source_fetcher.rb
106
109
  - lib/sumologic/metadata/source.rb
107
110
  - lib/sumologic/search/job.rb
108
- - lib/sumologic/search/paginator.rb
111
+ - lib/sumologic/search/message_fetcher.rb
109
112
  - lib/sumologic/search/poller.rb
113
+ - lib/sumologic/utils/worker.rb
110
114
  - lib/sumologic/version.rb
111
115
  homepage: https://github.com/patrick204nqh/sumologic-query
112
116
  licenses:
@@ -1,63 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- module Sumologic
4
- module Metadata
5
- # Handles parallel fetching of sources from multiple collectors
6
- class ParallelFetcher
7
- def initialize(max_threads: 10)
8
- @max_threads = max_threads
9
- end
10
-
11
- # Fetch sources for collectors in parallel
12
- # Returns array of results with collector info and sources
13
- def fetch_all(collectors, &block)
14
- result = []
15
- mutex = Mutex.new
16
- queue = create_work_queue(collectors)
17
- threads = create_workers(queue, result, mutex, &block)
18
-
19
- threads.each(&:join)
20
- result
21
- end
22
-
23
- private
24
-
25
- def create_work_queue(collectors)
26
- queue = Queue.new
27
- collectors.each { |collector| queue << collector }
28
- queue
29
- end
30
-
31
- def create_workers(queue, result, mutex, &block)
32
- worker_count = [@max_threads, queue.size].min
33
-
34
- Array.new(worker_count) do
35
- Thread.new { process_queue(queue, result, mutex, &block) }
36
- end
37
- end
38
-
39
- def process_queue(queue, result, mutex, &block)
40
- until queue.empty?
41
- collector = pop_safely(queue)
42
- break unless collector
43
-
44
- process_collector(collector, result, mutex, &block)
45
- end
46
- end
47
-
48
- def pop_safely(queue)
49
- queue.pop(true)
50
- rescue ThreadError
51
- nil
52
- end
53
-
54
- def process_collector(collector, result, mutex, &block)
55
- collector_result = block.call(collector)
56
-
57
- mutex.synchronize do
58
- result << collector_result if collector_result
59
- end
60
- end
61
- end
62
- end
63
- end
@@ -1,67 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- module Sumologic
4
- module Search
5
- # Handles paginated fetching of search job messages
6
- class Paginator
7
- def initialize(http_client:, config:)
8
- @http = http_client
9
- @config = config
10
- end
11
-
12
- # Fetch all messages for a job with automatic pagination
13
- # Returns array of message objects
14
- def fetch_all(job_id, limit: nil)
15
- messages = []
16
- offset = 0
17
- total_fetched = 0
18
-
19
- loop do
20
- batch_limit = calculate_batch_limit(limit, total_fetched)
21
- break if batch_limit <= 0
22
-
23
- batch = fetch_batch(job_id, offset, batch_limit)
24
- messages.concat(batch)
25
- total_fetched += batch.size
26
-
27
- log_progress(batch.size, total_fetched)
28
-
29
- break if batch.size < batch_limit # No more messages
30
- break if limit && total_fetched >= limit
31
-
32
- offset += batch.size
33
- end
34
-
35
- messages
36
- end
37
-
38
- private
39
-
40
- def calculate_batch_limit(user_limit, total_fetched)
41
- if user_limit
42
- [@config.max_messages_per_request, user_limit - total_fetched].min
43
- else
44
- @config.max_messages_per_request
45
- end
46
- end
47
-
48
- def fetch_batch(job_id, offset, limit)
49
- data = @http.request(
50
- method: :get,
51
- path: "/search/jobs/#{job_id}/messages",
52
- query_params: { offset: offset, limit: limit }
53
- )
54
-
55
- data['messages'] || []
56
- end
57
-
58
- def log_progress(batch_size, total)
59
- log_info "Fetched #{batch_size} messages (total: #{total})"
60
- end
61
-
62
- def log_info(message)
63
- warn "[Sumologic::Search::Paginator] #{message}" if ENV['SUMO_DEBUG'] || $DEBUG
64
- end
65
- end
66
- end
67
- end