splunk-sdk-ruby 0.1.0 → 0.8.1

Sign up to get free protection for your applications and to get access to all the features.
@@ -1,6 +1,8 @@
1
1
  require_relative "test_helper"
2
2
  require "splunk-sdk-ruby"
3
3
 
4
+ require 'json'
5
+
4
6
  include Splunk
5
7
 
6
8
  # URI's classes compare by object identity, which is exactly what we
@@ -37,33 +39,42 @@ class TestAtomFeed < Test::Unit::TestCase
37
39
  puts "Nokogiri not installed. Skipping."
38
40
  end
39
41
 
40
- test_cases = eval(open("test/atom_test_data.rb").read())
42
+ test_cases = JSON::parse(open("test/data/atom_test_data.json").read())
41
43
 
42
44
  xml_libraries.each do |xml_library|
43
45
  test_cases.each_entry do |filename, expected|
44
46
  define_method("test_#{xml_library}_#{filename}".intern()) do
45
47
  file = File.open("test/data/atom/#{filename}.xml")
48
+ Splunk::require_xml_library(xml_library)
46
49
  feed = Splunk::AtomFeed.new(file)
47
50
 
51
+ # In the assert statements below, the output of the code is first,
52
+ # the expected data second, which is breaking convention, but has
53
+ # to be this way since we need to match URLs which are URI objects
54
+ # in the output of the code to URLs which are strings in the expected
55
+ # data from the JSON file. URI has been patched to make this
56
+ # work...but only if the == method is called on the URI object,
57
+ # not the string. == is not commutative.
58
+
48
59
  # To make debugging easy, test the metadata a key at
49
60
  # a time, since Test::Unit doesn't display diffs.
50
61
  # Then test the whole thing at the end to make sure it all matches.
51
- expected[:metadata].each_entry do |key, value|
52
- assert_equal([filename, key, value],
53
- [filename, key, feed.metadata[key]])
62
+ expected["metadata"].each_entry do |key, value|
63
+ assert_equal([filename, key, feed.metadata[key]],
64
+ [filename, key, value])
54
65
  end
55
- assert_equal(expected[:metadata], feed.metadata)
66
+ assert_equal(feed.metadata, expected["metadata"])
56
67
 
57
68
  # To make debugging easy, test each key of each entry
58
69
  # separately, since Test::Unit doesn't display diffs.
59
70
  # Then test the whole thing at the end to make sure it all matches.
60
- expected[:entries].each_with_index do |entry, index|
71
+ expected["entries"].each_with_index do |entry, index|
61
72
  entry.each_entry do |key, value|
62
- assert_equal([filename, index, key, value],
63
- [filename, index, key, feed.entries[index][key]])
73
+ assert_equal([filename, index, key, feed.entries[index][key]],
74
+ [filename, index, key, value])
64
75
  end
65
76
  end
66
- assert_equal(expected[:entries], feed.entries)
77
+ assert_equal(feed.entries, expected["entries"])
67
78
  end
68
79
  end
69
80
  end
@@ -68,6 +68,28 @@ class ConfigurationFileTestCase < TestCaseWithSplunkConnection
68
68
  assert_equal(created_conf.name, bracket_fetched_conf.name)
69
69
  end
70
70
 
71
+ ##
72
+ # Check that fetching nonexistent configuration files returns nil.
73
+ #
74
+ def test_fetch_nonexistent
75
+ nonexistent_name = temporary_name()
76
+
77
+ assert_nil(@confs[nonexistent_name])
78
+ assert_nil(@confs.fetch(nonexistent_name))
79
+ end
80
+
81
+ ##
82
+ # Check that server errors during fetch are encapsulated sensibly.
83
+ #
84
+ def test_error_from_fetch
85
+ new_service = Splunk::Service.new(@splunkrc)
86
+ new_confs = new_service.confs()
87
+ new_service.logout()
88
+ assert_raise(SplunkHTTPError) do
89
+ new_confs[temporary_name()]
90
+ end
91
+ end
92
+
71
93
  def test_each_and_values
72
94
  each_names = []
73
95
  @confs.each() { |entity| each_names << entity.name }
@@ -97,7 +97,7 @@ class TestContext < TestCaseWithSplunkConnection
97
97
 
98
98
  def test_server_accepting_connections?
99
99
  values = @splunkrc.clone()
100
- values[:port] = 10253
100
+ values[:port] = 8000
101
101
  service = Context.new(values)
102
102
  assert_false(service.server_accepting_connections?)
103
103
 
@@ -79,14 +79,24 @@ class TestCaseWithSplunkConnection < Test::Unit::TestCase
79
79
  fail("Test left server in a state requiring restart.")
80
80
  end
81
81
 
82
+ # Are we on Windows or Unix? We need this below.
83
+ splunk_home = @service.settings["SPLUNK_HOME"]
84
+ is_windows = splunk_home.include?("\\") == true
85
+
82
86
  if @service.splunk_version[0..1] != [4,2]
83
87
  @installed_apps.each() do |app_name|
84
- @service.apps.delete(app_name)
85
- assert_eventually_true() do
86
- !@service.apps.has_key?(app_name)
87
- end
88
- if @service.server_requires_restart?
89
- clear_restart_message(@service)
88
+ # There is a bug in Python on Windows which results in the
89
+ # sleep_command not deleting properly because there are still
90
+ # hung jobs that haven't terminated when we reach this point.
91
+ # We skip deleting the sleep_command app on Windows.
92
+ if app_name != 'sleep_command' or !is_windows
93
+ @service.apps.delete(app_name)
94
+ assert_eventually_true() do
95
+ !@service.apps.has_key?(app_name)
96
+ end
97
+ if @service.server_requires_restart?
98
+ clear_restart_message(@service)
99
+ end
90
100
  end
91
101
  end
92
102
  end
@@ -113,13 +123,15 @@ class TestCaseWithSplunkConnection < Test::Unit::TestCase
113
123
  end
114
124
 
115
125
  def assert_not_logged_in(service)
116
- begin
117
- service.request(:method=>:GET,
118
- :resource=>["data", "indexes"])
119
- rescue SplunkHTTPError => err
120
- assert_equal(401, err.code, "Expected HTTP status code 401, found: #{err.code}")
121
- else
122
- fail("Context is logged in.")
126
+ if service.server_accepting_connections?
127
+ begin
128
+ service.request(:method=>:GET,
129
+ :resource=>["data", "indexes"])
130
+ rescue SplunkHTTPError => err
131
+ assert_equal(401, err.code, "Expected HTTP status code 401, found: #{err.code}")
132
+ else
133
+ fail("Context is logged in.")
134
+ end
123
135
  end
124
136
  end
125
137
 
@@ -143,7 +155,7 @@ class TestCaseWithSplunkConnection < Test::Unit::TestCase
143
155
  end
144
156
  end
145
157
 
146
- def has_app_collection?(service)
158
+ def has_test_data?(service)
147
159
  collection_name = 'sdk-app-collection'
148
160
  return service.apps.has_key?(collection_name)
149
161
  end
@@ -247,4 +259,4 @@ class TestCaseWithSplunkConnection < Test::Unit::TestCase
247
259
  def unchecked_restart(service)
248
260
  service.restart(DEFAULT_RESTART_TIMEOUT)
249
261
  end
250
- end
262
+ end
@@ -74,9 +74,8 @@ class IndexTestCase < TestCaseWithSplunkConnection
74
74
  end
75
75
 
76
76
  def test_upload
77
- if !has_app_collection?(@service)
78
- print "Test requires sdk-app-collection. Skipping."
79
- return
77
+ if not has_test_data?(@service)
78
+ fail("Install the SDK test data to test uploads.")
80
79
  end
81
80
 
82
81
  install_app_from_collection("file_to_upload")
@@ -0,0 +1,211 @@
1
+ require_relative 'test_helper'
2
+ require 'splunk-sdk-ruby'
3
+
4
+ include Splunk
5
+
6
+ class InputsTest < TestCaseWithSplunkConnection
7
+ def setup
8
+ super
9
+ @ports_to_delete = []
10
+ end
11
+
12
+ def teardown
13
+ @ports_to_delete.each do |spec|
14
+ resource, name = spec
15
+ inputs = @service.inputs
16
+ resource.each do |r|
17
+ inputs = inputs[r]
18
+ end
19
+ if inputs.has_key?(name)
20
+ inputs.delete(name)
21
+ end
22
+ assert !inputs.has_key?(name)
23
+ end
24
+ end
25
+
26
+ def get_free_port(input_collection)
27
+ port_names = input_collection.map() {|ent| ent.name}
28
+ proper_port_names = port_names.select() {|name| name != nil}
29
+ ports = proper_port_names.map() do |name|
30
+ if name.include?(":")
31
+ name.split(":")[1]
32
+ else
33
+ name
34
+ end
35
+ end
36
+ highest_existing_port = ports.map() {|p| Integer(p)}.max()
37
+
38
+ if highest_existing_port == nil
39
+ port = "10000"
40
+ else
41
+ port = (highest_existing_port + 1).to_s()
42
+ end
43
+ end
44
+
45
+ def test_create_and_delete_tcp_raw
46
+ tcp_inputs = @service.inputs["tcp"]["raw"]
47
+ all_inputs = @service.inputs["all"]
48
+
49
+ port = get_free_port(tcp_inputs)
50
+ @ports_to_delete << [["tcp", "raw"], port]
51
+
52
+ input = tcp_inputs.create(port)
53
+ assert_equal(port, input.name)
54
+ assert_true(tcp_inputs.has_key?(port))
55
+ if @service.splunk_version[0] >= 5
56
+ assert_true(all_inputs.has_key?(port))
57
+ end
58
+
59
+ tcp_inputs.delete(port)
60
+ assert_false(tcp_inputs.has_key?(port))
61
+ if @service.splunk_version[0] >= 5
62
+ assert_false(all_inputs.has_key?(port))
63
+ end
64
+ end
65
+
66
+ ##
67
+ # Check that fetching with namespaces provided works.
68
+ #
69
+ def test_fetch_with_namespaces
70
+ user_ns = Splunk::namespace(
71
+ :sharing => "user",
72
+ :app => "search",
73
+ :owner => @splunkrc[:username]
74
+ )
75
+ begin
76
+ user_udp_inputs = @service.inputs.fetch("udp", namespace=user_ns)
77
+ port = get_free_port(user_udp_inputs)
78
+ user_udp_inputs.create(port.to_s, :namespace => user_ns)
79
+ user_udp_inputs.fetch(port.to_s, namespace=user_ns)
80
+ ensure
81
+ @service.inputs.fetch("udp", namespace=user_ns).delete(port.to_s)
82
+ end
83
+ end
84
+
85
+ ##
86
+ # Check that fetching a nonexistent input kind returns nil.
87
+ #
88
+ def test_fetch_nonexistent_input_kind
89
+ assert_nil(@service.inputs[temporary_name()])
90
+ end
91
+
92
+ ##
93
+ # Test that fetch resulting in server error raises SplunkHTTPError.
94
+ def test_fetch_with_server_error
95
+ new_service = Splunk::Service.new(@splunkrc)
96
+ new_inputs = new_service.inputs()
97
+ new_service.logout()
98
+ assert_raise(SplunkHTTPError) do
99
+ new_inputs["tcp"]
100
+ end
101
+ end
102
+
103
+ def test_create_and_delete_tcp_raw_with_restrictToHost
104
+ tcp_inputs = @service.inputs["tcp"]["raw"]
105
+
106
+ port = get_free_port(tcp_inputs)
107
+ name = "localhost:" + port
108
+ @ports_to_delete << [["tcp", "raw"], name]
109
+
110
+ input = tcp_inputs.create(port, :restrictToHost => "localhost")
111
+ assert_equal(name, input.name)
112
+ assert_equal("localhost", input["restrictToHost"])
113
+ assert_true(tcp_inputs.has_key?(name))
114
+ assert_false(tcp_inputs.has_key?(port))
115
+
116
+ if @service.splunk_version[0] >= 5
117
+ all_inputs = @service.inputs["all"]
118
+ assert_true(all_inputs.has_key?(name))
119
+ assert_false(all_inputs.has_key?(port))
120
+
121
+ end
122
+
123
+ tcp_inputs.delete(name)
124
+
125
+ assert_false(tcp_inputs.has_key?(name))
126
+ if @service.splunk_version[0] >= 5
127
+ assert_false(all_inputs.has_key?(name))
128
+ end
129
+ end
130
+
131
+ def test_update_on_restrictToHost_does_not_clear
132
+ tcp_inputs = @service.inputs["tcp"]["raw"]
133
+
134
+ port = get_free_port(tcp_inputs)
135
+ name = "localhost:" + port
136
+ @ports_to_delete << [["tcp", "raw"], name]
137
+
138
+ input = tcp_inputs.create(port, :restrictToHost => "localhost")
139
+
140
+ input.update({:sourcetype => "boris"})
141
+ input.refresh()
142
+ assert_equal("localhost", input["restrictToHost"])
143
+ assert_true(tcp_inputs.has_key?(name))
144
+
145
+ if @service.splunk_version[0] >= 5
146
+ all_inputs = @service.inputs["all"]
147
+ assert_true(all_inputs.has_key?(name))
148
+ end
149
+ end
150
+
151
+ def test_create_and_delete_udp
152
+ udp_inputs = @service.inputs["udp"]
153
+
154
+ port = get_free_port(udp_inputs)
155
+ @ports_to_delete << [["udp"], port]
156
+
157
+ input = udp_inputs.create(port)
158
+ assert_equal(port, input.name)
159
+ assert_true(udp_inputs.has_key?(port))
160
+
161
+ if @service.splunk_version[0] >= 5
162
+ all_inputs = @service.inputs["all"]
163
+ assert_true(all_inputs.has_key?(port))
164
+ end
165
+
166
+ udp_inputs.delete(port)
167
+ assert_false(udp_inputs.has_key?(port))
168
+
169
+ if @service.splunk_version[0] >= 5
170
+ assert_false(all_inputs.has_key?(port))
171
+ end
172
+ end
173
+
174
+ def test_oneshot_input
175
+ if not has_test_data?(@service)
176
+ fail("Install the SDK test data to test oneshot inputs.")
177
+ return
178
+ end
179
+
180
+ install_app_from_collection("file_to_upload")
181
+
182
+ index_name = temporary_name()
183
+ index = @service.indexes.create(index_name)
184
+ begin
185
+ assert_eventually_true do
186
+ index.refresh()
187
+ index["disabled"] == "0"
188
+ end
189
+
190
+ event_count = Integer(index['totalEventCount'])
191
+ path = path_in_app("file_to_upload", ["log.txt"])
192
+ @service.inputs["oneshot"].create(path, :index => index_name)
193
+
194
+ assert_eventually_true do
195
+ index.refresh()
196
+ Integer(index['totalEventCount']) == event_count + 4
197
+ end
198
+ ensure
199
+ if @service.splunk_version[0] >= 5
200
+ index.delete()
201
+ end
202
+ end
203
+ end
204
+
205
+ def test_oneshot_on_nonexistant_file
206
+ name = temporary_name()
207
+ assert_raises(Splunk::SplunkHTTPError) do
208
+ @service.inputs["oneshot"].create(name)
209
+ end
210
+ end
211
+ end
@@ -27,9 +27,9 @@ class JobsTestCase < TestCaseWithSplunkConnection
27
27
  # There is a convenience method on service to create an asynchronous
28
28
  # search job. Test it the same way.
29
29
  #
30
- def service_create_and_idempotent_cancel
30
+ def test_service_create_and_idempotent_cancel
31
31
  jobs = @service.jobs
32
- job = @service.create(QUERY)
32
+ job = @service.create_search(QUERY)
33
33
  assert_true(jobs.has_key?(job.sid))
34
34
  job.cancel()
35
35
  assert_eventually_true() { !jobs.has_key?(job.sid) }
@@ -102,6 +102,77 @@ class JobsTestCase < TestCaseWithSplunkConnection
102
102
  assert_true(3 >= results.length())
103
103
  end
104
104
 
105
+ ##
106
+ # Test that oneshot jobs have no <sg> elements in the XML they return
107
+ # by default.
108
+ #
109
+ def test_oneshot_has_no_segmentation_by_default
110
+ omit_if(@service.splunk_version[0] == 4)
111
+ stream = @service.create_oneshot("search index=_internal GET | head 3")
112
+ assert_false(stream.include?("<sg"))
113
+ end
114
+
115
+ ##
116
+ # Are <sg> elements returned in the XML from a oneshot job when we pass
117
+ # the option segmentation=raw?
118
+ #
119
+ def test_oneshot_has_forced_segmentation
120
+ omit_if(@service.splunk_version[0] == 4)
121
+ stream = @service.create_oneshot("search index=_internal GET | head 3",
122
+ :segmentation => "raw")
123
+ assert_true(stream.include?("<sg"))
124
+ end
125
+
126
+ ##
127
+ # Test that export jobs have no <sg> elements in the XML they return by
128
+ # default.
129
+ #
130
+ def test_export_has_no_segmentation_by_default
131
+ omit_if(@service.splunk_version[0] == 4)
132
+ stream = @service.create_export("search index=_internal GET | head 3")
133
+ assert_false(stream.include?("<sg"))
134
+ end
135
+
136
+ ##
137
+ # Export jobs should have <sg> elements in the XML they return when a
138
+ # value is passed to the segmentation argument to make it so.
139
+ #
140
+ def test_export_has_forced_segmentation
141
+ omit_if(@service.splunk_version[0] == 4)
142
+
143
+ stream = @service.create_export("search index=_internal GET | head 3",
144
+ :segmentation => "raw")
145
+ assert_true(stream.include?("<sg"))
146
+ end
147
+
148
+ ##
149
+ # Results and preview on a search job should have no segmentation
150
+ # by default.
151
+ #
152
+ def test_asynchronous_job_has_no_segmentation_by_default
153
+ omit_if(@service.splunk_version[0] == 4)
154
+ job = @service.jobs.create("search index=_internal GET | head 3")
155
+ until job.is_done?()
156
+ sleep(0.1)
157
+ end
158
+ assert_false(job.events().include?("<sg"))
159
+ assert_false(job.preview().include?("<sg"))
160
+ end
161
+
162
+ ##
163
+ # Results and preview on a search job should have segmentation when
164
+ # it is forced.
165
+ #
166
+ def test_asynchronous_job_has_segmentation_when_forced
167
+ omit_if(@service.splunk_version[0] == 4)
168
+ job = @service.jobs.create("search index=_internal GET | head 3")
169
+ until job.is_done?()
170
+ sleep(0.1)
171
+ end
172
+ assert_true(job.events(:segmentation => "raw").include?("<sg"))
173
+ assert_true(job.preview(:segmentation => "raw").include?("<sg"))
174
+ end
175
+
105
176
  def test_each_and_values
106
177
  jobs = Jobs.new(@service)
107
178