fluent-plugin-juniper-telemetry_tech-mocha 0.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/lib/cpu_memory_utilization.pb.rb +53 -0
- data/lib/fabric.pb.rb +85 -0
- data/lib/firewall.pb.rb +83 -0
- data/lib/fluent/plugin/parser_juniper_analyticsd.rb +83 -0
- data/lib/fluent/plugin/parser_juniper_jti.rb +1145 -0
- data/lib/fluent/plugin/parser_juniper_na.rb +147 -0
- data/lib/google/protobuf/descriptor.pb.rb +290 -0
- data/lib/inline_jflow.pb.rb +117 -0
- data/lib/juniper_telemetry_lib.rb +114 -0
- data/lib/logical_port.pb.rb +98 -0
- data/lib/lsp_mon.pb.rb +115 -0
- data/lib/lsp_stats.pb.rb +46 -0
- data/lib/npu_memory_utilization.pb.rb +59 -0
- data/lib/npu_utilization.pb.rb +63 -0
- data/lib/optics.pb.rb +93 -0
- data/lib/packet_stats.pb.rb +56 -0
- data/lib/pbj.pb.rb +71 -0
- data/lib/port.pb.rb +105 -0
- data/lib/port_exp.pb.rb +41 -0
- data/lib/qmon.pb.rb +69 -0
- data/lib/telemetry_top.pb.rb +59 -0
- metadata +109 -0
checksums.yaml
ADDED
@@ -0,0 +1,7 @@
|
|
1
|
+
---
|
2
|
+
SHA256:
|
3
|
+
metadata.gz: 23a6189ba3255c16f3d40b3b7935d0cbeb1e25761fc61f2b8ead06430d25379a
|
4
|
+
data.tar.gz: 16871c15cfd83777766c7badd596c9257758676420659b89a0033d8aab58ca36
|
5
|
+
SHA512:
|
6
|
+
metadata.gz: e0e77bf2c27b5df8a46fab27f240cfc137829eee14f599d3e8e79510838fc49d0b83509f3e98e1248efea51d7e62c2022f58ce2f48032d09773c03d135edfe1a
|
7
|
+
data.tar.gz: ee1dd76f80feb7d1bd5544ceeb4ef14f7aeaf53315fc3a170fb0d6053851dd91eb36691b2df906140ce91c3150c15f73b4bc35004fdef402be7fbd7ae1f42576
|
@@ -0,0 +1,53 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
|
3
|
+
##
|
4
|
+
# This file is auto-generated. DO NOT EDIT!
|
5
|
+
#
|
6
|
+
require 'protobuf'
|
7
|
+
|
8
|
+
|
9
|
+
##
|
10
|
+
# Imports
|
11
|
+
#
|
12
|
+
require 'telemetry_top.pb'
|
13
|
+
|
14
|
+
|
15
|
+
##
|
16
|
+
# Message Classes
|
17
|
+
#
|
18
|
+
class CpuMemoryUtilization < ::Protobuf::Message; end
|
19
|
+
class CpuMemoryUtilizationSummary < ::Protobuf::Message; end
|
20
|
+
class CpuMemoryUtilizationPerApplication < ::Protobuf::Message; end
|
21
|
+
|
22
|
+
|
23
|
+
##
|
24
|
+
# Message Fields
|
25
|
+
#
|
26
|
+
class CpuMemoryUtilization
|
27
|
+
repeated ::CpuMemoryUtilizationSummary, :utilization, 1
|
28
|
+
end
|
29
|
+
|
30
|
+
class CpuMemoryUtilizationSummary
|
31
|
+
optional :string, :name, 1
|
32
|
+
optional :uint64, :size, 2
|
33
|
+
optional :uint64, :bytes_allocated, 3
|
34
|
+
optional :int32, :utilization, 4
|
35
|
+
repeated ::CpuMemoryUtilizationPerApplication, :application_utilization, 5
|
36
|
+
end
|
37
|
+
|
38
|
+
class CpuMemoryUtilizationPerApplication
|
39
|
+
optional :string, :name, 1
|
40
|
+
optional :uint64, :bytes_allocated, 2
|
41
|
+
optional :uint64, :allocations, 3
|
42
|
+
optional :uint64, :frees, 4
|
43
|
+
optional :uint64, :allocations_failed, 5
|
44
|
+
end
|
45
|
+
|
46
|
+
|
47
|
+
##
|
48
|
+
# Extended Message Fields
|
49
|
+
#
|
50
|
+
class ::JuniperNetworksSensors < ::Protobuf::Message
|
51
|
+
optional ::CpuMemoryUtilization, :".cpu_memory_util_ext", 1, :extension => true
|
52
|
+
end
|
53
|
+
|
data/lib/fabric.pb.rb
ADDED
@@ -0,0 +1,85 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
|
3
|
+
##
|
4
|
+
# This file is auto-generated. DO NOT EDIT!
|
5
|
+
#
|
6
|
+
require 'protobuf'
|
7
|
+
|
8
|
+
|
9
|
+
##
|
10
|
+
# Imports
|
11
|
+
#
|
12
|
+
require 'telemetry_top.pb'
|
13
|
+
|
14
|
+
|
15
|
+
##
|
16
|
+
# Message Classes
|
17
|
+
#
|
18
|
+
class Fabric_message < ::Protobuf::Message
|
19
|
+
class Sensor_location < ::Protobuf::Enum
|
20
|
+
define :Linecard, 1
|
21
|
+
define :Switch_Fabric, 2
|
22
|
+
end
|
23
|
+
|
24
|
+
end
|
25
|
+
|
26
|
+
class Edge_stats < ::Protobuf::Message
|
27
|
+
class Identifier_type < ::Protobuf::Enum
|
28
|
+
define :Switch_Fabric, 1
|
29
|
+
define :Linecard, 2
|
30
|
+
end
|
31
|
+
|
32
|
+
end
|
33
|
+
|
34
|
+
class Class_stats < ::Protobuf::Message; end
|
35
|
+
class Counters < ::Protobuf::Message; end
|
36
|
+
|
37
|
+
|
38
|
+
##
|
39
|
+
# Message Fields
|
40
|
+
#
|
41
|
+
class Fabric_message
|
42
|
+
repeated ::Edge_stats, :edges, 1
|
43
|
+
optional ::Fabric_message::Sensor_location, :location, 2, :".telemetry_options" => { :is_key => true }
|
44
|
+
end
|
45
|
+
|
46
|
+
class Edge_stats
|
47
|
+
optional ::Edge_stats::Identifier_type, :source_type, 1, :".telemetry_options" => { :is_key => true }
|
48
|
+
optional :uint32, :source_slot, 2, :".telemetry_options" => { :is_key => true }
|
49
|
+
optional :uint32, :source_pfe, 3, :".telemetry_options" => { :is_key => true }
|
50
|
+
optional ::Edge_stats::Identifier_type, :destination_type, 4, :".telemetry_options" => { :is_key => true }
|
51
|
+
optional :uint32, :destination_slot, 5, :".telemetry_options" => { :is_key => true }
|
52
|
+
optional :uint32, :destination_pfe, 6, :".telemetry_options" => { :is_key => true }
|
53
|
+
repeated ::Class_stats, :class_stats, 7
|
54
|
+
end
|
55
|
+
|
56
|
+
class Class_stats
|
57
|
+
optional :string, :priority, 1, :".telemetry_options" => { :is_key => true }
|
58
|
+
optional ::Counters, :transmit_counts, 2
|
59
|
+
end
|
60
|
+
|
61
|
+
class Counters
|
62
|
+
optional :uint64, :packets, 1, :".telemetry_options" => { :is_counter => true }
|
63
|
+
optional :uint64, :bytes, 2, :".telemetry_options" => { :is_counter => true }
|
64
|
+
optional :uint64, :packets_per_second, 3, :".telemetry_options" => { :is_gauge => true }
|
65
|
+
optional :uint64, :bytes_per_second, 4, :".telemetry_options" => { :is_gauge => true }
|
66
|
+
optional :uint64, :drop_packets, 5, :".telemetry_options" => { :is_counter => true }
|
67
|
+
optional :uint64, :drop_bytes, 6, :".telemetry_options" => { :is_counter => true }
|
68
|
+
optional :uint64, :drop_packets_per_second, 7, :".telemetry_options" => { :is_gauge => true }
|
69
|
+
optional :uint64, :drop_bytes_per_second, 8, :".telemetry_options" => { :is_gauge => true }
|
70
|
+
optional :uint64, :queue_depth_average, 9, :".telemetry_options" => { :is_gauge => true }
|
71
|
+
optional :uint64, :queue_depth_current, 10, :".telemetry_options" => { :is_gauge => true }
|
72
|
+
optional :uint64, :queue_depth_peak, 11, :".telemetry_options" => { :is_gauge => true }
|
73
|
+
optional :uint64, :queue_depth_maximum, 12, :".telemetry_options" => { :is_gauge => true }
|
74
|
+
optional :uint64, :error_packets, 13, :".telemetry_options" => { :is_counter => true }
|
75
|
+
optional :uint64, :error_packets_per_second, 14, :".telemetry_options" => { :is_gauge => true }
|
76
|
+
end
|
77
|
+
|
78
|
+
|
79
|
+
##
|
80
|
+
# Extended Message Fields
|
81
|
+
#
|
82
|
+
class ::JuniperNetworksSensors < ::Protobuf::Message
|
83
|
+
optional ::Fabric_message, :".fabricMessageExt", 2, :extension => true
|
84
|
+
end
|
85
|
+
|
data/lib/firewall.pb.rb
ADDED
@@ -0,0 +1,83 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
|
3
|
+
##
|
4
|
+
# This file is auto-generated. DO NOT EDIT!
|
5
|
+
#
|
6
|
+
require 'protobuf'
|
7
|
+
|
8
|
+
|
9
|
+
##
|
10
|
+
# Imports
|
11
|
+
#
|
12
|
+
require 'telemetry_top.pb'
|
13
|
+
|
14
|
+
|
15
|
+
##
|
16
|
+
# Message Classes
|
17
|
+
#
|
18
|
+
class Firewall < ::Protobuf::Message; end
|
19
|
+
class FirewallStats < ::Protobuf::Message; end
|
20
|
+
class MemoryUsage < ::Protobuf::Message; end
|
21
|
+
class CounterStats < ::Protobuf::Message; end
|
22
|
+
class PolicerStats < ::Protobuf::Message; end
|
23
|
+
class ExtendedPolicerStats < ::Protobuf::Message; end
|
24
|
+
class HierarchicalPolicerStats < ::Protobuf::Message; end
|
25
|
+
|
26
|
+
|
27
|
+
##
|
28
|
+
# Message Fields
|
29
|
+
#
|
30
|
+
class Firewall
|
31
|
+
repeated ::FirewallStats, :firewall_stats, 1
|
32
|
+
end
|
33
|
+
|
34
|
+
class FirewallStats
|
35
|
+
required :string, :filter_name, 1, :".telemetry_options" => { :is_key => true }
|
36
|
+
optional :uint64, :timestamp, 2, :".telemetry_options" => { :is_timestamp => true }
|
37
|
+
repeated ::MemoryUsage, :memory_usage, 3
|
38
|
+
repeated ::CounterStats, :counter_stats, 4
|
39
|
+
repeated ::PolicerStats, :policer_stats, 5
|
40
|
+
repeated ::HierarchicalPolicerStats, :hierarchical_policer_stats, 6
|
41
|
+
end
|
42
|
+
|
43
|
+
class MemoryUsage
|
44
|
+
required :string, :name, 1, :".telemetry_options" => { :is_key => true }
|
45
|
+
optional :uint64, :allocated, 2, :".telemetry_options" => { :is_gauge => true }
|
46
|
+
end
|
47
|
+
|
48
|
+
class CounterStats
|
49
|
+
required :string, :name, 1, :".telemetry_options" => { :is_key => true }
|
50
|
+
optional :uint64, :packets, 2, :".telemetry_options" => { :is_counter => true }
|
51
|
+
optional :uint64, :bytes, 3, :".telemetry_options" => { :is_counter => true }
|
52
|
+
end
|
53
|
+
|
54
|
+
class PolicerStats
|
55
|
+
required :string, :name, 1, :".telemetry_options" => { :is_key => true }
|
56
|
+
optional :uint64, :out_of_spec_packets, 2, :".telemetry_options" => { :is_counter => true }
|
57
|
+
optional :uint64, :out_of_spec_bytes, 3, :".telemetry_options" => { :is_counter => true }
|
58
|
+
optional ::ExtendedPolicerStats, :extended_policer_stats, 4
|
59
|
+
end
|
60
|
+
|
61
|
+
class ExtendedPolicerStats
|
62
|
+
optional :uint64, :offered_packets, 1, :".telemetry_options" => { :is_counter => true }
|
63
|
+
optional :uint64, :offered_bytes, 2, :".telemetry_options" => { :is_counter => true }
|
64
|
+
optional :uint64, :transmitted_packets, 3, :".telemetry_options" => { :is_counter => true }
|
65
|
+
optional :uint64, :transmitted_bytes, 4, :".telemetry_options" => { :is_counter => true }
|
66
|
+
end
|
67
|
+
|
68
|
+
class HierarchicalPolicerStats
|
69
|
+
required :string, :name, 1, :".telemetry_options" => { :is_key => true }
|
70
|
+
optional :uint64, :premium_packets, 2, :".telemetry_options" => { :is_counter => true }
|
71
|
+
optional :uint64, :premium_bytes, 3, :".telemetry_options" => { :is_counter => true }
|
72
|
+
optional :uint64, :aggregate_packets, 4, :".telemetry_options" => { :is_counter => true }
|
73
|
+
optional :uint64, :aggregate_bytes, 5, :".telemetry_options" => { :is_counter => true }
|
74
|
+
end
|
75
|
+
|
76
|
+
|
77
|
+
##
|
78
|
+
# Extended Message Fields
|
79
|
+
#
|
80
|
+
class ::JuniperNetworksSensors < ::Protobuf::Message
|
81
|
+
optional ::Firewall, :".jnpr_firewall_ext", 6, :extension => true
|
82
|
+
end
|
83
|
+
|
@@ -0,0 +1,83 @@
|
|
1
|
+
require 'juniper_telemetry_lib.rb'
|
2
|
+
|
3
|
+
module Fluent
|
4
|
+
class TextParser
|
5
|
+
class JuniperAnalyticsdParser < Parser
|
6
|
+
|
7
|
+
Plugin.register_parser("juniper_analyticsd", self)
|
8
|
+
|
9
|
+
config_param :output_format, :string, :default => 'structured'
|
10
|
+
|
11
|
+
# This method is called after config_params have read configuration parameters
|
12
|
+
def configure(conf)
|
13
|
+
super
|
14
|
+
|
15
|
+
## Check if "output_format" has a valid value
|
16
|
+
unless @output_format.to_s == "structured" ||
|
17
|
+
@output_format.to_s == "flat" ||
|
18
|
+
@output_format.to_s == "statsd"
|
19
|
+
|
20
|
+
raise ConfigError, "output_format value '#{@output_format}' is not valid. Must be : structured, flat or statsd"
|
21
|
+
end
|
22
|
+
end
|
23
|
+
|
24
|
+
def parse(text)
|
25
|
+
|
26
|
+
payload = JSON.parse(text)
|
27
|
+
|
28
|
+
## Extract contextual info
|
29
|
+
record_type = payload["record-type"]
|
30
|
+
record_time = payload["time"]
|
31
|
+
device_name = payload["router-id"]
|
32
|
+
port_name = payload["port"]
|
33
|
+
|
34
|
+
## Record time is in microsecond and until 0.14 Fluentd do not support lower than 1s
|
35
|
+
## We need to trim record time for now to fit fluentd
|
36
|
+
json_time = (record_time/1000000).to_i
|
37
|
+
|
38
|
+
if record_type == 'traffic-stats'
|
39
|
+
|
40
|
+
## Delete contextual info
|
41
|
+
payload.delete("record-type")
|
42
|
+
payload.delete("time")
|
43
|
+
payload.delete("router-id")
|
44
|
+
payload.delete("port")
|
45
|
+
|
46
|
+
payload.each do |key, value|
|
47
|
+
|
48
|
+
# Save all info extracted on a list
|
49
|
+
sensor_data = []
|
50
|
+
sensor_data.push({ 'device' => device_name })
|
51
|
+
sensor_data.push({ 'interface' => port_name })
|
52
|
+
sensor_data.push({ 'type' => record_type + '.' + key })
|
53
|
+
sensor_data.push({ 'value' => value })
|
54
|
+
|
55
|
+
record = build_record(output_format, sensor_data)
|
56
|
+
yield json_time, record
|
57
|
+
end
|
58
|
+
elsif record_type == 'queue-stats'
|
59
|
+
|
60
|
+
## Delete contextual info
|
61
|
+
payload.delete("record-type")
|
62
|
+
payload.delete("time")
|
63
|
+
payload.delete("router-id")
|
64
|
+
payload.delete("port")
|
65
|
+
|
66
|
+
payload.each do |key, value|
|
67
|
+
|
68
|
+
sensor_data = []
|
69
|
+
sensor_data.push({ 'device' => device_name })
|
70
|
+
sensor_data.push({ 'interface' => port_name })
|
71
|
+
sensor_data.push({ 'type' => record_type + '.' + key })
|
72
|
+
sensor_data.push({ 'value' => value })
|
73
|
+
|
74
|
+
record = build_record(output_format, sensor_data)
|
75
|
+
yield json_time, record
|
76
|
+
end
|
77
|
+
else
|
78
|
+
$log.warn "Recard type '#{record_type}' not supported"
|
79
|
+
end
|
80
|
+
end
|
81
|
+
end
|
82
|
+
end
|
83
|
+
end
|
@@ -0,0 +1,1145 @@
|
|
1
|
+
require 'cpu_memory_utilization.pb'
|
2
|
+
require 'fabric.pb'
|
3
|
+
require 'firewall.pb'
|
4
|
+
require 'fluent/parser'
|
5
|
+
require 'inline_jflow.pb'
|
6
|
+
require 'juniper_telemetry_lib.rb'
|
7
|
+
require 'lsp_mon.pb'
|
8
|
+
require 'lsp_stats.pb'
|
9
|
+
require 'npu_memory_utilization.pb'
|
10
|
+
require 'npu_utilization.pb'
|
11
|
+
require 'optics.pb'
|
12
|
+
require 'packet_stats.pb'
|
13
|
+
require 'pbj.pb'
|
14
|
+
require 'port.pb'
|
15
|
+
require 'port_exp.pb'
|
16
|
+
require 'protobuf'
|
17
|
+
require 'qmon.pb'
|
18
|
+
require 'logical_port.pb'
|
19
|
+
require 'telemetry_top.pb'
|
20
|
+
|
21
|
+
|
22
|
+
module Fluent
|
23
|
+
class TextParser
|
24
|
+
class JuniperJtiParser < Parser
|
25
|
+
|
26
|
+
# Register this parser as "juniper_jti"
|
27
|
+
Plugin.register_parser("juniper_jti", self)
|
28
|
+
|
29
|
+
config_param :output_format, :string, :default => 'structured'
|
30
|
+
|
31
|
+
|
32
|
+
|
33
|
+
# This method is called after config_params have read configuration parameters
|
34
|
+
def configure(conf)
|
35
|
+
super
|
36
|
+
|
37
|
+
## Check if "output_format" has a valid value
|
38
|
+
unless @output_format.to_s == "structured" ||
|
39
|
+
@output_format.to_s == "flat" ||
|
40
|
+
@output_format.to_s == "statsd"
|
41
|
+
|
42
|
+
raise ConfigError, "output_format value '#{@output_format}' is not valid. Must be : structured, flat or statsd"
|
43
|
+
end
|
44
|
+
end
|
45
|
+
|
46
|
+
|
47
|
+
|
48
|
+
# This is the main method. The input "text" is the unit of data to be parsed.
|
49
|
+
|
50
|
+
# The JTI sensor data that we get from the device will have the following high-level structure ...
|
51
|
+
#
|
52
|
+
# system_id: "nanostring:3.3.3.2"
|
53
|
+
# component_id: 1
|
54
|
+
# sensor_name: "SENSOR1:/junos/system/linecard/interface/:/junos/system/linecard/interface/:PFE"
|
55
|
+
# sequence_number: 97
|
56
|
+
# timestamp: 1510774932270
|
57
|
+
# version_major: 1
|
58
|
+
# version_minor: 1
|
59
|
+
# enterprise {
|
60
|
+
# [juniperNetworks] {
|
61
|
+
# [jnpr_interface_ext] {
|
62
|
+
# interface_stats {
|
63
|
+
# if_name: "xe-1/0/0"
|
64
|
+
# init_time: 1510755787
|
65
|
+
# snmp_if_index: 17555
|
66
|
+
# egress_queue_info {
|
67
|
+
# queue_number: 0
|
68
|
+
# packets: 0
|
69
|
+
# bytes: 0
|
70
|
+
# tail_drop_packets: 0
|
71
|
+
# rl_drop_packets: 0
|
72
|
+
# rl_drop_bytes: 0
|
73
|
+
# red_drop_packets: 0
|
74
|
+
# red_drop_bytes: 0
|
75
|
+
# avg_buffer_occupancy: 0
|
76
|
+
# cur_buffer_occupancy: 0
|
77
|
+
# peak_buffer_occupancy: 0
|
78
|
+
# allocated_buffer_size: 120061952
|
79
|
+
# }
|
80
|
+
# ...
|
81
|
+
def parse(text)
|
82
|
+
|
83
|
+
# Decode GBP packet.
|
84
|
+
jti_msg = TelemetryStream.decode(text)
|
85
|
+
#$log.debug "Value of 'jti_msg': '#{jti_msg}'"
|
86
|
+
|
87
|
+
resource = ""
|
88
|
+
|
89
|
+
# Extract device name & timestamp from the JTI sensor data.
|
90
|
+
device_name = jti_msg.system_id
|
91
|
+
gpb_time = epoc_to_sec(jti_msg.timestamp)
|
92
|
+
$log.debug "Received JTI sensor data from device '#{device_name}' at time '#{gpb_time}'"
|
93
|
+
|
94
|
+
# Convert the JTI message into JSON format and parse it with JSON.parse() to convert it to a hash so we can access values.
|
95
|
+
# Extract the sensor type and sensor data from the incoming JTI data.
|
96
|
+
begin
|
97
|
+
jti_msg_json = JSON.parse(jti_msg.to_json)
|
98
|
+
$log.debug "Value of 'jti_msg_json': '#{jti_msg_json}'"
|
99
|
+
|
100
|
+
datas_sensors = jti_msg_json["enterprise"]["juniperNetworks"]
|
101
|
+
$log.debug "Extracted the following sensor data from device '#{device_name}': #{datas_sensors}"
|
102
|
+
rescue => e
|
103
|
+
$log.warn "Unable to extract sensor data sensor from jti_msg.enterprise.juniperNetworks, Error during processing: #{$!}"
|
104
|
+
$log.debug "Unable to extract sensor data sensor from jti_msg.enterprise.juniperNetworks, Data Dump : " + jti_msg.inspect.to_s
|
105
|
+
return
|
106
|
+
end
|
107
|
+
|
108
|
+
|
109
|
+
|
110
|
+
# Iterate over each sensor ...
|
111
|
+
# At this point in the code, 'datas_sensors' has the following value:
|
112
|
+
# {"jnpr_interface_ext"=>{"interface_stats"=>[{"if_name"=>"xe-7/2/0", ... }]}}
|
113
|
+
#
|
114
|
+
# The ".each" iterator below has the format ".each do |key, value|", which means that
|
115
|
+
# 'sensor' is the key, eg. 'jnpr_interface_ext', 'jnpr_qmon_ext', etc. and that
|
116
|
+
# 's_data' is the rest of the sensor data, eg. '{"interface_stats"=>[{"if_name"=>"xe-7/2/0", ... }]}'
|
117
|
+
|
118
|
+
datas_sensors.each do |sensor, s_data|
|
119
|
+
|
120
|
+
|
121
|
+
############################################################
|
122
|
+
## SENSOR: /junos/services/label-switched-path/usage/ ##
|
123
|
+
############################################################
|
124
|
+
if sensor == "jnpr_lsp_statistics_ext"
|
125
|
+
|
126
|
+
resource = "/junos/services/label-switched-path/usage/"
|
127
|
+
$log.debug "Processing sensor '#{sensor}' with resource '#{resource}'"
|
128
|
+
|
129
|
+
# At this point in the code, 'data_sensors' has the following value:
|
130
|
+
=begin
|
131
|
+
NOTE: DATA UNAVAILABLE AT THE TIME OF CODING ... CODE BELOW WRITTEN DIRECTLY FROM VISUAL ANALYSIS OF ASSOCIATED .PROTO FILE
|
132
|
+
TODO: VERIFY THAT THE FOLLOWING CODE WORKS!!
|
133
|
+
=end
|
134
|
+
# Iterate over each LSP stats record contained within the 'lsp_stats_records' array ...
|
135
|
+
# Note that each LSP's associated data is stored in 'datas'.
|
136
|
+
datas_sensors[sensor]['lsp_stats_records'].each do |datas|
|
137
|
+
|
138
|
+
# Save all extracted sensor data in a list.
|
139
|
+
sensor_data = []
|
140
|
+
|
141
|
+
# Block to catch exceptions during sensor data parsing.
|
142
|
+
begin
|
143
|
+
|
144
|
+
# Add the device name to "sensor_data" for correlation purposes.
|
145
|
+
sensor_data.push({ 'device' => device_name })
|
146
|
+
|
147
|
+
# According to the LSP_Stats.proto file, each of the child elements under "lsp_stats_records" is going to be a
|
148
|
+
# "leaf" node (eg. Integer, String, Float, etc.). These values can be written directly to "sensor_data".
|
149
|
+
datas.each do |level_1_key, level_1_value|
|
150
|
+
|
151
|
+
if level_1_key == "name"
|
152
|
+
sensor_data.push({ 'lsp_name' => level_1_value })
|
153
|
+
elsif level_1_key == "instance_identifier"
|
154
|
+
sensor_data.push({ 'instance_id' => level_1_value })
|
155
|
+
elsif level_1_key == "counter_name"
|
156
|
+
sensor_data.push({ 'counter_name' => level_1_value })
|
157
|
+
else
|
158
|
+
# By default, InfluxDB assigns the type of a field based on the type of the first value inserted.
|
159
|
+
# So, in the "value" field, if an Integer is inserted, then the "value" field will only accept Integer
|
160
|
+
# values hereon after ... so, a String value insertion will result in an error.
|
161
|
+
# To alleviate this, we will have "value" as the default field for Integers, so as not to break existing code.
|
162
|
+
# We will add additional "value_string", "value_float", fields to support different value types. This way,
|
163
|
+
# we can persist all the various telemetry sensor parameters in InfluxDB, not just the Integer values.
|
164
|
+
|
165
|
+
# Create local copy of 'sensor_data' variable.
|
166
|
+
local_sensor_data = sensor_data.dup
|
167
|
+
local_sensor_data = process_value(local_sensor_data, level_1_key, level_1_value, '')
|
168
|
+
|
169
|
+
record = build_record(output_format, local_sensor_data)
|
170
|
+
## For debug only ...
|
171
|
+
#$log.debug "Value of 'local_sensor_data': '#{local_sensor_data}'"
|
172
|
+
#$log.debug "Value of 'record': '#{record}'"
|
173
|
+
yield gpb_time, record
|
174
|
+
end
|
175
|
+
end
|
176
|
+
|
177
|
+
rescue => e
|
178
|
+
$log.warn "Unable to parse '" + sensor + "' sensor, Error during processing: #{$!}"
|
179
|
+
$log.debug "Unable to parse '" + sensor + "' sensor, Data Dump: " + datas.inspect.to_s
|
180
|
+
end
|
181
|
+
end
|
182
|
+
|
183
|
+
|
184
|
+
|
185
|
+
|
186
|
+
####################################################
|
187
|
+
## SENSOR: /junos/system/linecard/cpu/memory/ ##
|
188
|
+
####################################################
|
189
|
+
elsif sensor == "cpu_memory_util_ext"
|
190
|
+
|
191
|
+
resource = "/junos/system/linecard/cpu/memory/"
|
192
|
+
$log.debug "Processing sensor '#{sensor}' with resource '#{resource}'"
|
193
|
+
|
194
|
+
# At this point in the code, 'data_sensors' has the following value:
|
195
|
+
=begin
|
196
|
+
{
|
197
|
+
"cpu_memory_util_ext": {
|
198
|
+
"utilization": [
|
199
|
+
{
|
200
|
+
"name": "Kernel",
|
201
|
+
"size": 3288330216,
|
202
|
+
"bytes_allocated": 581290432,
|
203
|
+
"utilization": 17,
|
204
|
+
"application_utilization": [
|
205
|
+
{
|
206
|
+
"name": "ifd",
|
207
|
+
"bytes_allocated": 11336,
|
208
|
+
"allocations": 109,
|
209
|
+
"frees": 0,
|
210
|
+
"allocations_failed": 0
|
211
|
+
},
|
212
|
+
{
|
213
|
+
"name": "ifl",
|
214
|
+
"bytes_allocated": 47832,
|
215
|
+
"allocations": 115,
|
216
|
+
"frees": 0,
|
217
|
+
"allocations_failed": 0
|
218
|
+
},
|
219
|
+
...
|
220
|
+
{
|
221
|
+
"name": "inline ka",
|
222
|
+
"bytes_allocated": 1104,
|
223
|
+
"allocations": 36,
|
224
|
+
"frees": 4,
|
225
|
+
"allocations_failed": 0
|
226
|
+
}
|
227
|
+
]
|
228
|
+
},
|
229
|
+
{
|
230
|
+
"name": "DMA",
|
231
|
+
"size": 268435456,
|
232
|
+
"bytes_allocated": 60600272,
|
233
|
+
"utilization": 22
|
234
|
+
},
|
235
|
+
{
|
236
|
+
"name": "Turbotx",
|
237
|
+
"size": 21221376,
|
238
|
+
"bytes_allocated": 368,
|
239
|
+
"utilization": 1
|
240
|
+
}
|
241
|
+
]
|
242
|
+
}
|
243
|
+
}
|
244
|
+
=end
|
245
|
+
# Iterate over each record contained within the 'utilization' array ...
|
246
|
+
datas_sensors[sensor]['utilization'].each do |datas|
|
247
|
+
|
248
|
+
# Save all extracted sensor data in a list.
|
249
|
+
sensor_data = []
|
250
|
+
|
251
|
+
# Block to catch exceptions during sensor data parsing.
|
252
|
+
begin
|
253
|
+
|
254
|
+
# Add the device name to "sensor_data" for correlation purposes.
|
255
|
+
sensor_data.push({ 'device' => device_name })
|
256
|
+
|
257
|
+
# Each of the child elements under "utilization" is going to be either a "leaf" node (eg. Integer, String, Float, etc.)
|
258
|
+
# or a "branch" node (eg. Array or Hash), in which case these branch sections need additional level of processing.
|
259
|
+
# For the leaf nodes, these values can be written directly to "sensor_data"
|
260
|
+
|
261
|
+
datas.each do |level_1_key, level_1_value|
|
262
|
+
|
263
|
+
# If the node currently being processed is a "branch node" (ie. it has child nodes)
|
264
|
+
if level_1_value.is_a?(Hash) || level_1_value.is_a?(Array)
|
265
|
+
|
266
|
+
# From the proto file, we know that the level_1 branch nodes are all Hash values, so we can ignore the conditional
|
267
|
+
# below testing for an array
|
268
|
+
if level_1_value.is_a?(Array)
|
269
|
+
|
270
|
+
level_1_value.each do |level_2|
|
271
|
+
|
272
|
+
# Create local copy of 'sensor_data' variable.
|
273
|
+
local_sensor_data = sensor_data.dup
|
274
|
+
|
275
|
+
level_2.each do |level_2_key, level_2_value|
|
276
|
+
## For debug only ...
|
277
|
+
#$log.debug "Value of 'level_2_key': '#{level_2_key}'"
|
278
|
+
#$log.debug "Value of 'level_2_value': '#{level_2_value}'"
|
279
|
+
|
280
|
+
if level_2_key == "name"
|
281
|
+
local_sensor_data.push({ 'cpu_mem_app_name' => level_2_value })
|
282
|
+
else
|
283
|
+
local_sensor_data = process_value(local_sensor_data, level_2_key, level_2_value, level_1_key)
|
284
|
+
|
285
|
+
record = build_record(output_format, local_sensor_data)
|
286
|
+
## For debug only ...
|
287
|
+
#$log.debug "Value of 'local_sensor_data': '#{local_sensor_data}'"
|
288
|
+
yield gpb_time, record
|
289
|
+
end
|
290
|
+
end
|
291
|
+
end
|
292
|
+
|
293
|
+
# If the branch node is not an Array, then we can simply write the key/value pairs straight to "sensor_data". The exception is
|
294
|
+
# "application_utilization", which is an array of "CpuMemoryUtilizationPerApplication", which in turn is a collection of leaf nodes.
|
295
|
+
else
|
296
|
+
# Do nothing, as per reasons cited above.
|
297
|
+
end
|
298
|
+
|
299
|
+
# If the node currently being processed is a "leaf node" (ie. it has NO child nodes)
|
300
|
+
else
|
301
|
+
## For debug only ...
|
302
|
+
#$log.debug "Value of 'level_2_key': '#{level_2_key}'"
|
303
|
+
#$log.debug "Value of 'level_2_value': '#{level_2_value}'"
|
304
|
+
|
305
|
+
# Create local copy of 'sensor_data' variable.
|
306
|
+
local_sensor_data = sensor_data.dup
|
307
|
+
|
308
|
+
if level_1_key == "name"
|
309
|
+
sensor_data.push({ 'cpu_mem_partition_name' => level_1_value })
|
310
|
+
else
|
311
|
+
# By default, InfluxDB assigns the type of a field based on the type of the first value inserted.
|
312
|
+
# So, in the "value" field, if an Integer is inserted, then the "value" field will only accept Integer
|
313
|
+
# values hereon after ... so, a String value insertion will result in an error.
|
314
|
+
# To alleviate this, we will have "value" as the default field for Integers, so as not to break existing code.
|
315
|
+
# We will add additional "value_string", "value_float", fields to support different value types. This way,
|
316
|
+
# we can persist all the various telemetry sensor parameters in InfluxDB, not just the Integer values.
|
317
|
+
|
318
|
+
# Create local copy of 'sensor_data' variable.
|
319
|
+
local_sensor_data = sensor_data.dup
|
320
|
+
local_sensor_data = process_value(local_sensor_data, level_1_key, level_1_value, '')
|
321
|
+
|
322
|
+
record = build_record(output_format, local_sensor_data)
|
323
|
+
## For debug only ...
|
324
|
+
#$log.debug "Value of 'local_sensor_data': '#{local_sensor_data}'"
|
325
|
+
#$log.debug "Value of 'record': '#{record}'"
|
326
|
+
yield gpb_time, record
|
327
|
+
end
|
328
|
+
end
|
329
|
+
end
|
330
|
+
|
331
|
+
rescue => e
|
332
|
+
$log.warn "Unable to parse '" + sensor + "' sensor, Error during processing: #{$!}"
|
333
|
+
$log.debug "Unable to parse '" + sensor + "' sensor, Data Dump: " + datas.inspect.to_s
|
334
|
+
end
|
335
|
+
end
|
336
|
+
|
337
|
+
|
338
|
+
|
339
|
+
|
340
|
+
##################################################
|
341
|
+
## SENSOR: /junos/system/linecard/fabric/ ##
|
342
|
+
##################################################
|
343
|
+
elsif sensor == "fabricMessageExt"
|
344
|
+
|
345
|
+
resource = "/junos/system/linecard/fabric/"
|
346
|
+
$log.debug "Processing sensor '#{sensor}' with resource '#{resource}'"
|
347
|
+
|
348
|
+
# At this point in the code, 'data_sensors' has the following value:
|
349
|
+
=begin
|
350
|
+
|
351
|
+
=end
|
352
|
+
# Iterate over each record contained within the 'edges' array ...
|
353
|
+
datas_sensors[sensor]['edges'].each do |datas|
|
354
|
+
|
355
|
+
# Save all extracted sensor data in a list.
|
356
|
+
sensor_data = []
|
357
|
+
|
358
|
+
# Block to catch exceptions during sensor data parsing.
|
359
|
+
begin
|
360
|
+
|
361
|
+
|
362
|
+
|
363
|
+
|
364
|
+
|
365
|
+
rescue => e
|
366
|
+
$log.warn "Unable to parse '" + sensor + "' sensor, Error during processing: #{$!}"
|
367
|
+
$log.debug "Unable to parse '" + sensor + "' sensor, Data Dump: " + datas.inspect.to_s
|
368
|
+
end
|
369
|
+
end
|
370
|
+
|
371
|
+
|
372
|
+
|
373
|
+
|
374
|
+
|
375
|
+
##################################################
|
376
|
+
## SENSOR: /junos/system/linecard/firewall/ ##
|
377
|
+
##################################################
|
378
|
+
elsif sensor == "jnpr_firewall_ext"
|
379
|
+
|
380
|
+
resource = "/junos/system/linecard/firewall/"
|
381
|
+
$log.debug "Processing sensor '#{sensor}' with resource '#{resource}'"
|
382
|
+
|
383
|
+
# At this point in the code, 'data_sensors' has the following value:
|
384
|
+
=begin
|
385
|
+
{
|
386
|
+
"jnpr_firewall_ext": {
|
387
|
+
"firewall_stats": [
|
388
|
+
{
|
389
|
+
"filter_name": "FILTER1",
|
390
|
+
"timestamp": 1511326161,
|
391
|
+
"memory_usage": [
|
392
|
+
{
|
393
|
+
"name": "HEAP",
|
394
|
+
"allocated": 4076
|
395
|
+
},
|
396
|
+
...
|
397
|
+
],
|
398
|
+
"counter_stats": [
|
399
|
+
{
|
400
|
+
"name": "COUNTER1",
|
401
|
+
"packets": 4,
|
402
|
+
"bytes": 1068
|
403
|
+
},
|
404
|
+
...
|
405
|
+
]
|
406
|
+
},
|
407
|
+
...
|
408
|
+
]
|
409
|
+
}
|
410
|
+
}
|
411
|
+
=end
|
412
|
+
# Iterate over each firewall filter contained within the 'firewall_stats' array ...
|
413
|
+
# Note that each interface's associated data is stored in 'datas'.
|
414
|
+
datas_sensors[sensor]['firewall_stats'].each do |datas|
|
415
|
+
|
416
|
+
# Save all extracted sensor data in a list.
|
417
|
+
sensor_data = []
|
418
|
+
|
419
|
+
# Block to catch exceptions during sensor data parsing.
|
420
|
+
begin
|
421
|
+
|
422
|
+
# Add the device name to "sensor_data" for correlation purposes.
|
423
|
+
sensor_data.push({ 'device' => device_name })
|
424
|
+
|
425
|
+
# Each of the child elements under "firewall_stats" is going to be either a "leaf" node (eg. Integer, String, Float, etc.)
|
426
|
+
# or a "branch" node (eg. Array or Hash), in which case these branch sections need additional level of processing.
|
427
|
+
# For the leaf nodes, these values can be written directly to "sensor_data"
|
428
|
+
datas.each do |level_1_key, level_1_value|
|
429
|
+
|
430
|
+
# If the node currently being processed is a "branch node" (ie. it has child nodes)
|
431
|
+
if level_1_value.is_a?(Hash) || level_1_value.is_a?(Array)
|
432
|
+
|
433
|
+
# From the Firewall.proto file, we know that the level_1 branch nodes are all Array values, ie. "memory_usage",
|
434
|
+
# "counter_stats", "policer_stats", "hierarchical_policer_stats".
|
435
|
+
|
436
|
+
# We need to treat separately the cases where the branch node is an Array or not.
|
437
|
+
# If the branch node is an Array, then we must iterate through each element of the Array and then write the key/value
|
438
|
+
# pairs straight to "sensor_data".
|
439
|
+
if level_1_value.is_a?(Array)
|
440
|
+
|
441
|
+
# Iterate through each element in the Array ...
|
442
|
+
level_1_value.each do |level_2|
|
443
|
+
|
444
|
+
# Process the "memory_usage" array separately to avoid adding an unnecessary column to the table.
|
445
|
+
if level_1_key == "memory_usage"
|
446
|
+
# Create local copy of 'sensor_data' variable.
|
447
|
+
local_sensor_data = sensor_data.dup
|
448
|
+
local_sensor_data = process_value(local_sensor_data, level_2['name'], level_2['allocated'], level_1_key)
|
449
|
+
|
450
|
+
record = build_record(output_format, local_sensor_data)
|
451
|
+
## For debug only ...
|
452
|
+
#$log.debug "Value of 'local_sensor_data': '#{local_sensor_data}'"
|
453
|
+
yield gpb_time, record
|
454
|
+
|
455
|
+
# Process the remaining arrays, namely "counter_stats", "policer_stats", "hierarchical_policer_stats".
|
456
|
+
else
|
457
|
+
|
458
|
+
level_2.each do |level_2_key, level_2_value|
|
459
|
+
## For debug only ...
|
460
|
+
#$log.debug "Value of 'level_2_key': '#{level_2_key}'"
|
461
|
+
#$log.debug "Value of 'level_2_value': '#{level_2_value}'"
|
462
|
+
|
463
|
+
# Create local copy of 'sensor_data' variable.
|
464
|
+
local_sensor_data = sensor_data.dup
|
465
|
+
|
466
|
+
if level_1_key == "counter_stats"
|
467
|
+
local_sensor_data.push({ 'filter_counter_name' => level_2['name'] })
|
468
|
+
|
469
|
+
elsif level_1_key == "policer_stats"
|
470
|
+
local_sensor_data.push({ 'filter_policer_name' => level_2['name'] })
|
471
|
+
|
472
|
+
elsif level_1_key == "hierarchical_policer_stats"
|
473
|
+
local_sensor_data.push({ 'filter_hierachical_policer_name' => level_2['name'] })
|
474
|
+
|
475
|
+
end
|
476
|
+
|
477
|
+
local_sensor_data = process_value(local_sensor_data, level_2_key, level_2_value, level_1_key)
|
478
|
+
|
479
|
+
record = build_record(output_format, local_sensor_data)
|
480
|
+
## For debug only ...
|
481
|
+
#$log.debug "Value of 'local_sensor_data': '#{local_sensor_data}'"
|
482
|
+
yield gpb_time, record
|
483
|
+
end
|
484
|
+
end
|
485
|
+
end
|
486
|
+
|
487
|
+
# If the branch node is not an Array, then we can simply write the key/value pairs straight to "sensor_data".
|
488
|
+
# However, we know from the Firewall.proto file that there are no Hash values at level1.
|
489
|
+
else
|
490
|
+
# Do nothing, as per reasons cited above.
|
491
|
+
end
|
492
|
+
|
493
|
+
# If the node currently being processed is a "leaf node" (ie. it has NO child nodes)
|
494
|
+
else
|
495
|
+
|
496
|
+
## For debug only ...
|
497
|
+
#$log.debug "Value of 'level_1_key': '#{level_1_key}'"
|
498
|
+
#$log.debug "Value of 'level_1_value': '#{level_1_value}'"
|
499
|
+
|
500
|
+
# We know from the Firewall.proto file that the only two level_1 leaf nodes are "filter_name" and "timestamp"
|
501
|
+
if level_1_key == "filter_name"
|
502
|
+
sensor_data.push({ 'filter_name' => level_1_value })
|
503
|
+
elsif level_1_key == "timestamp"
|
504
|
+
sensor_data.push({ 'filter_timestamp' => level_1_value })
|
505
|
+
end
|
506
|
+
end
|
507
|
+
end
|
508
|
+
|
509
|
+
rescue => e
|
510
|
+
$log.warn "Unable to parse '" + sensor + "' sensor, Error during processing: #{$!}"
|
511
|
+
$log.debug "Unable to parse '" + sensor + "' sensor, Data Dump: " + datas.inspect.to_s
|
512
|
+
end
|
513
|
+
end
|
514
|
+
|
515
|
+
|
516
|
+
|
517
|
+
|
518
|
+
###################################################
|
519
|
+
## SENSOR: /junos/system/linecard/interface/ ##
|
520
|
+
###################################################
|
521
|
+
elsif sensor == "jnpr_interface_ext"
|
522
|
+
|
523
|
+
resource = "/junos/system/linecard/interface/"
|
524
|
+
$log.debug "Processing sensor '#{sensor}' with resource '#{resource}'"
|
525
|
+
|
526
|
+
# At this point in the code, 'datas_sensors' has the following value:
|
527
|
+
=begin
|
528
|
+
{
|
529
|
+
"jnpr_interface_ext": {
|
530
|
+
"interface_stats": [
|
531
|
+
{
|
532
|
+
"if_name": "xe-7/2/0",
|
533
|
+
"init_time": 1510755828,
|
534
|
+
"snmp_if_index": 17897,
|
535
|
+
"egress_queue_info": [
|
536
|
+
{
|
537
|
+
"queue_number": 0,
|
538
|
+
"packets": 0,
|
539
|
+
"bytes": 0,
|
540
|
+
"tail_drop_packets": 0,
|
541
|
+
"rl_drop_packets": 0,
|
542
|
+
"rl_drop_bytes": 0,
|
543
|
+
"red_drop_packets": 0,
|
544
|
+
"red_drop_bytes": 0,
|
545
|
+
"avg_buffer_occupancy": 0,
|
546
|
+
"cur_buffer_occupancy": 0,
|
547
|
+
"peak_buffer_occupancy": 0,
|
548
|
+
"allocated_buffer_size": 123207680
|
549
|
+
},
|
550
|
+
...
|
551
|
+
{
|
552
|
+
"queue_number": 7,
|
553
|
+
"packets": 0,
|
554
|
+
"bytes": 0,
|
555
|
+
"tail_drop_packets": 0,
|
556
|
+
"rl_drop_packets": 0,
|
557
|
+
"rl_drop_bytes": 0,
|
558
|
+
"red_drop_packets": 0,
|
559
|
+
"red_drop_bytes": 0,
|
560
|
+
"avg_buffer_occupancy": 0,
|
561
|
+
"cur_buffer_occupancy": 0,
|
562
|
+
"peak_buffer_occupancy": 0,
|
563
|
+
"allocated_buffer_size": 123207680
|
564
|
+
}
|
565
|
+
],
|
566
|
+
"ingress_stats": {
|
567
|
+
"if_pkts": 0,
|
568
|
+
"if_octets": 0,
|
569
|
+
"if_1sec_pkts": 0,
|
570
|
+
"if_1sec_octets": 0,
|
571
|
+
"if_uc_pkts": 0,
|
572
|
+
"if_mc_pkts": 0,
|
573
|
+
"if_bc_pkts": 0,
|
574
|
+
"if_error": 0,
|
575
|
+
"if_pause_pkts": 0
|
576
|
+
},
|
577
|
+
"egress_stats": {
|
578
|
+
"if_pkts": 0,
|
579
|
+
"if_octets": 0,
|
580
|
+
"if_1sec_pkts": 0,
|
581
|
+
"if_1sec_octets": 0,
|
582
|
+
"if_uc_pkts": 0,
|
583
|
+
"if_mc_pkts": 0,
|
584
|
+
"if_bc_pkts": 0,
|
585
|
+
"if_error": 0,
|
586
|
+
"if_pause_pkts": 0
|
587
|
+
},
|
588
|
+
"ingress_errors": {
|
589
|
+
"if_errors": 0,
|
590
|
+
"if_in_qdrops": 0,
|
591
|
+
"if_in_frame_errors": 0,
|
592
|
+
"if_discards": 0,
|
593
|
+
"if_in_runts": 0,
|
594
|
+
"if_in_l3_incompletes": 0,
|
595
|
+
"if_in_l2chan_errors": 0,
|
596
|
+
"if_in_l2_mismatch_timeouts": 0,
|
597
|
+
"if_in_fifo_errors": 0,
|
598
|
+
"if_in_resource_errors": 0
|
599
|
+
},
|
600
|
+
"if_operational_status": "UP",
|
601
|
+
"if_transitions": 1,
|
602
|
+
"ifLastChange": 0,
|
603
|
+
"ifHighSpeed": 10000,
|
604
|
+
"egress_errors": {
|
605
|
+
"if_errors": 0,
|
606
|
+
"if_discards": 0
|
607
|
+
}
|
608
|
+
},
|
609
|
+
...
|
610
|
+
]
|
611
|
+
}
|
612
|
+
}
|
613
|
+
=end
|
614
|
+
# Iterate over each interface contained within the 'interface_stats' array ...
|
615
|
+
# Note that each interface's associated data is stored in 'datas'.
|
616
|
+
datas_sensors[sensor]['interface_stats'].each do |datas|
|
617
|
+
|
618
|
+
# Save all extracted sensor data in a list.
|
619
|
+
sensor_data = []
|
620
|
+
|
621
|
+
# Block to catch exceptions during sensor data parsing.
|
622
|
+
begin
|
623
|
+
|
624
|
+
# Add the device name to "sensor_data" for correlation purposes.
|
625
|
+
sensor_data.push({ 'device' => device_name })
|
626
|
+
|
627
|
+
# Each of the child elements under "interface_stats" is going to be either a "leaf" node (eg. Integer, String, Float, etc.)
|
628
|
+
# or a "branch" node (eg. Array or Hash), in which case these branch sections need additional level of processing.
|
629
|
+
# For the leaf nodes, these values can be written directly to "sensor_data"
|
630
|
+
datas.each do |level_1_key, level_1_value|
|
631
|
+
|
632
|
+
# If the node currently being processed is a "branch node" (ie. it has child nodes)
|
633
|
+
if level_1_value.is_a?(Hash) || level_1_value.is_a?(Array)
|
634
|
+
|
635
|
+
# We need to treat separately the cases where the branch node is an Array or not.
|
636
|
+
# If the branch node is an Array, then we must iterate through each element of the Array and then write the key/value
|
637
|
+
# pairs straight to "sensor_data".
|
638
|
+
if level_1_value.is_a?(Array)
|
639
|
+
|
640
|
+
# Iterate through each element in the Array ...
|
641
|
+
level_1_value.each do |level_2|
|
642
|
+
|
643
|
+
level_2.each do |level_2_key, level_2_value|
|
644
|
+
## For debug only ...
|
645
|
+
#$log.debug "Value of 'level_2_key': '#{level_2_key}'"
|
646
|
+
#$log.debug "Value of 'level_2_value': '#{level_2_value}'"
|
647
|
+
|
648
|
+
# Create local copy of 'sensor_data' variable.
|
649
|
+
local_sensor_data = sensor_data.dup
|
650
|
+
|
651
|
+
# According the Port.proto file, QueueStats should be the only type of data that results in an Array branch node
|
652
|
+
# for the /junos/system/linecard/interface/ sensor. For queue stats, we need to correlate the stats with the
|
653
|
+
# queue number, so we process this separately. The proto file states that we can have egress or ingress queues.
|
654
|
+
if level_1_key == "egress_queue_info"
|
655
|
+
local_sensor_data.push({ 'egress_queue' => level_2['queue_number'] })
|
656
|
+
elsif level_1_key == "ingress_queue_info"
|
657
|
+
local_sensor_data.push({ 'ingress_queue' => level_2['queue_number'] })
|
658
|
+
end
|
659
|
+
|
660
|
+
local_sensor_data = process_value(local_sensor_data, level_2_key, level_2_value, level_1_key)
|
661
|
+
|
662
|
+
record = build_record(output_format, local_sensor_data)
|
663
|
+
## For debug only ...
|
664
|
+
#$log.debug "Value of 'local_sensor_data': '#{local_sensor_data}'"
|
665
|
+
yield gpb_time, record
|
666
|
+
end
|
667
|
+
end
|
668
|
+
|
669
|
+
# If the branch node is not an Array, then we can simply write the key/value pairs straight to "sensor_data"
|
670
|
+
else
|
671
|
+
level_1_value.each do |level_2_key, level_2_value|
|
672
|
+
## For debug only ...
|
673
|
+
#$log.debug "Value of 'level_2_key': '#{level_2_key}'"
|
674
|
+
#$log.debug "Value of 'level_2_value': '#{level_2_value}'"
|
675
|
+
|
676
|
+
# Create local copy of 'sensor_data' variable.
|
677
|
+
local_sensor_data = sensor_data.dup
|
678
|
+
local_sensor_data = process_value(local_sensor_data, level_2_key, level_2_value, level_1_key)
|
679
|
+
|
680
|
+
record = build_record(output_format, local_sensor_data)
|
681
|
+
## For debug only ...
|
682
|
+
#$log.debug "Value of 'local_sensor_data': '#{local_sensor_data}'"
|
683
|
+
yield gpb_time, record
|
684
|
+
end
|
685
|
+
end
|
686
|
+
|
687
|
+
# If the node currently being processed is a "leaf node" (ie. it has NO child nodes)
|
688
|
+
else
|
689
|
+
|
690
|
+
## For debug only ...
|
691
|
+
#$log.debug "Value of 'level_1_key': '#{level_1_key}'"
|
692
|
+
#$log.debug "Value of 'level_1_value': '#{level_1_value}'"
|
693
|
+
|
694
|
+
if level_1_key == "if_name"
|
695
|
+
sensor_data.push({ 'interface' => level_1_value })
|
696
|
+
elsif level_1_key == "init_time"
|
697
|
+
# do nothing.
|
698
|
+
else
|
699
|
+
# By default, InfluxDB assigns the type of a field based on the type of the first value inserted.
|
700
|
+
# So, in the "value" field, if an Integer is inserted, then the "value" field will only accept Integer
|
701
|
+
# values hereon after ... so, a String value insertion will result in an error.
|
702
|
+
# To alleviate this, we will have "value" as the default field for Integers, so as not to break existing code.
|
703
|
+
# We will add additional "value_string", "value_float", fields to support different value types. This way,
|
704
|
+
# we can persist all the various telemetry sensor parameters in InfluxDB, not just the Integer values.
|
705
|
+
|
706
|
+
# Create local copy of 'sensor_data' variable.
|
707
|
+
local_sensor_data = sensor_data.dup
|
708
|
+
local_sensor_data = process_value(local_sensor_data, level_1_key, level_1_value, '')
|
709
|
+
|
710
|
+
record = build_record(output_format, local_sensor_data)
|
711
|
+
## For debug only ...
|
712
|
+
#$log.debug "Value of 'local_sensor_data': '#{local_sensor_data}'"
|
713
|
+
#$log.debug "Value of 'record': '#{record}'"
|
714
|
+
yield gpb_time, record
|
715
|
+
end
|
716
|
+
end
|
717
|
+
end
|
718
|
+
|
719
|
+
rescue => e
|
720
|
+
$log.warn "Unable to parse '" + sensor + "' sensor, Error during processing: #{$!}"
|
721
|
+
$log.debug "Unable to parse '" + sensor + "' sensor, Data Dump: " + datas.inspect.to_s
|
722
|
+
end
|
723
|
+
end
|
724
|
+
|
725
|
+
|
726
|
+
|
727
|
+
#################################################################
|
728
|
+
## SENSOR: /junos/system/linecard/interface/logical/usage/ ##
|
729
|
+
#################################################################
|
730
|
+
elsif sensor == "jnprLogicalInterfaceExt"
|
731
|
+
|
732
|
+
resource = "/junos/system/linecard/interface/logical/usage"
|
733
|
+
$log.debug "Processing sensor '#{sensor}' with resource '#{resource}'"
|
734
|
+
|
735
|
+
# At this point in the code, 'datas_sensors' has the following value:
|
736
|
+
=begin
|
737
|
+
{
|
738
|
+
"jnprLogicalInterfaceExt": {
|
739
|
+
"interface_info": [
|
740
|
+
{
|
741
|
+
"if_name": "xe-8/0/3:0.0",
|
742
|
+
"init_time": 1511187519,
|
743
|
+
"snmp_if_index": 19630,
|
744
|
+
"ingress_stats": {
|
745
|
+
"if_packets": 48510,
|
746
|
+
"if_octets": 10347612,
|
747
|
+
"if_ucast_packets": 43858,
|
748
|
+
"if_mcast_packets": 4652
|
749
|
+
},
|
750
|
+
"egress_stats": {
|
751
|
+
"if_packets": 71474,
|
752
|
+
"if_octets": 89157457,
|
753
|
+
"if_ucast_packets": 71474,
|
754
|
+
"if_mcast_packets": 0
|
755
|
+
},
|
756
|
+
"op_state": {
|
757
|
+
"operational_status": "up"
|
758
|
+
}
|
759
|
+
},
|
760
|
+
...
|
761
|
+
]
|
762
|
+
}
|
763
|
+
}
|
764
|
+
=end
|
765
|
+
# Iterate over each interface contained within the 'interface_info' array ...
|
766
|
+
# Note that each interface's associated data is stored in 'datas'.
|
767
|
+
datas_sensors[sensor]['interface_info'].each do |datas|
|
768
|
+
|
769
|
+
# Save all extracted sensor data in a list.
|
770
|
+
sensor_data = []
|
771
|
+
|
772
|
+
# Block to catch exceptions during sensor data parsing.
|
773
|
+
begin
|
774
|
+
|
775
|
+
# Add the device name to "sensor_data" for correlation purposes.
|
776
|
+
sensor_data.push({ 'device' => device_name })
|
777
|
+
|
778
|
+
# Each of the child elements under "queue_monitor_element_info" is going to be either a "leaf" node (eg. Integer, String, Float, etc.)
|
779
|
+
# or a "branch" node (eg. Array or Hash), in which case these branch sections need additional level of processing.
|
780
|
+
# For the leaf nodes, these values can be written directly to "sensor_data"
|
781
|
+
datas.each do |level_1_key, level_1_value|
|
782
|
+
|
783
|
+
# If the node currently being processed is a "branch node" (ie. it has child nodes)
|
784
|
+
if level_1_value.is_a?(Hash) || level_1_value.is_a?(Array)
|
785
|
+
|
786
|
+
# According the Logical_Port.proto file, logicalInterfaceQueueStats should be the only type of data that results in an Array branch node
|
787
|
+
# for the /junos/system/linecard/interface/logical/usage sensor. For queue stats, we need to correlate the stats with the
|
788
|
+
# queue number, so we process this separately. The proto file states that we can have egress or ingress queues.
|
789
|
+
if level_1_value.is_a?(Array)
|
790
|
+
|
791
|
+
# Iterate through each element in the Array ...
|
792
|
+
level_1_value.each do |level_2|
|
793
|
+
|
794
|
+
level_2.each do |level_2_key, level_2_value|
|
795
|
+
## For debug only ...
|
796
|
+
#$log.debug "Value of 'level_2_key': '#{level_2_key}'"
|
797
|
+
#$log.debug "Value of 'level_2_value': '#{level_2_value}'"
|
798
|
+
|
799
|
+
# Create local copy of 'sensor_data' variable.
|
800
|
+
local_sensor_data = sensor_data.dup
|
801
|
+
|
802
|
+
if level_1_key == "egress_queue_info"
|
803
|
+
local_sensor_data.push({ 'egress_queue' => level_2['queue_number'] })
|
804
|
+
elsif level_1_key == "ingress_queue_info"
|
805
|
+
local_sensor_data.push({ 'ingress_queue' => level_2['queue_number'] })
|
806
|
+
end
|
807
|
+
|
808
|
+
local_sensor_data = process_value(local_sensor_data, level_2_key, level_2_value, level_1_key)
|
809
|
+
|
810
|
+
record = build_record(output_format, local_sensor_data)
|
811
|
+
## For debug only ...
|
812
|
+
#$log.debug "Value of 'local_sensor_data': '#{local_sensor_data}'"
|
813
|
+
yield gpb_time, record
|
814
|
+
end
|
815
|
+
end
|
816
|
+
|
817
|
+
# If the branch node is not an Array, then we can simply write the key/value pairs straight to "sensor_data". We can do this for
|
818
|
+
# "EgressInterfaceStats" and "OperationalState" since these are just collections of leaf nodes. The exception is "IngressInterfaceStats",
|
819
|
+
# which contains an array of "ForwardingClassAccounting", which in turn is a collection of leaf nodes.
|
820
|
+
else
|
821
|
+
level_1_value.each do |level_2_key, level_2_value|
|
822
|
+
if level_2_value.is_a?(Array)
|
823
|
+
level_2_value.each do |level_3|
|
824
|
+
|
825
|
+
level_3.each do |level_3_key, level_3_value|
|
826
|
+
## For debug only ...
|
827
|
+
#$log.debug "Value of 'level_3_key': '#{level_3_key}'"
|
828
|
+
#$log.debug "Value of 'level_3_value': '#{level_3_value}'"
|
829
|
+
|
830
|
+
# Create local copy of 'sensor_data' variable.
|
831
|
+
local_sensor_data = sensor_data.dup
|
832
|
+
|
833
|
+
# For ForwardingClassAccounting stats, we need to correlate the stats with the forwarding class 'fc_number', so we process this separately.
|
834
|
+
local_sensor_data.push({ 'family' => level_3['if_family'] })
|
835
|
+
local_sensor_data.push({ 'forwarding_class' => level_3['fc_number'] })
|
836
|
+
|
837
|
+
local_sensor_data = process_value(local_sensor_data, level_3_key, level_3_value, level_2_key)
|
838
|
+
|
839
|
+
record = build_record(output_format, local_sensor_data)
|
840
|
+
## For debug only ...
|
841
|
+
#$log.debug "Value of 'local_sensor_data': '#{local_sensor_data}'"
|
842
|
+
yield gpb_time, record
|
843
|
+
end
|
844
|
+
end
|
845
|
+
else
|
846
|
+
## For debug only ...
|
847
|
+
#$log.debug "Value of 'level_2_key': '#{level_2_key}'"
|
848
|
+
#$log.debug "Value of 'level_2_value': '#{level_2_value}'"
|
849
|
+
|
850
|
+
# Create local copy of 'sensor_data' variable.
|
851
|
+
local_sensor_data = sensor_data.dup
|
852
|
+
local_sensor_data = process_value(local_sensor_data, level_2_key, level_2_value, level_1_key)
|
853
|
+
|
854
|
+
record = build_record(output_format, local_sensor_data)
|
855
|
+
## For debug only ...
|
856
|
+
#$log.debug "Value of 'local_sensor_data': '#{local_sensor_data}'"
|
857
|
+
yield gpb_time, record
|
858
|
+
end
|
859
|
+
end
|
860
|
+
end
|
861
|
+
|
862
|
+
|
863
|
+
# If the node currently being processed is a "leaf node" (ie. it has NO child nodes)
|
864
|
+
else
|
865
|
+
## For debug only ...
|
866
|
+
$log.debug "Value of 'level_1_key': '#{level_1_key}'"
|
867
|
+
$log.debug "Value of 'level_1_value': '#{level_1_value}'"
|
868
|
+
|
869
|
+
if level_1_key == "if_name"
|
870
|
+
sensor_data.push({ 'interface' => level_1_value })
|
871
|
+
elsif level_1_key == "init_time"
|
872
|
+
# do nothing.
|
873
|
+
else
|
874
|
+
# By default, InfluxDB assigns the type of a field based on the type of the first value inserted.
|
875
|
+
# So, in the "value" field, if an Integer is inserted, then the "value" field will only accept Integer
|
876
|
+
# values hereon after ... so, a String value insertion will result in an error.
|
877
|
+
# To alleviate this, we will have "value" as the default field for Integers, so as not to break existing code.
|
878
|
+
# We will add additional "value_string", "value_float", fields to support different value types. This way,
|
879
|
+
# we can persist all the various telemetry sensor parameters in InfluxDB, not just the Integer values.
|
880
|
+
|
881
|
+
# Create local copy of 'sensor_data' variable.
|
882
|
+
local_sensor_data = sensor_data.dup
|
883
|
+
local_sensor_data = process_value(local_sensor_data, level_1_key, level_1_value, '')
|
884
|
+
|
885
|
+
record = build_record(output_format, local_sensor_data)
|
886
|
+
## For debug only ...
|
887
|
+
#$log.debug "Value of 'local_sensor_data': '#{local_sensor_data}'"
|
888
|
+
#$log.debug "Value of 'record': '#{record}'"
|
889
|
+
yield gpb_time, record
|
890
|
+
end
|
891
|
+
end
|
892
|
+
end
|
893
|
+
|
894
|
+
rescue => e
|
895
|
+
$log.warn "Unable to parse '" + sensor + "' sensor, Error during processing: #{$!}"
|
896
|
+
$log.debug "Unable to parse '" + sensor + "' sensor, Data Dump: " + datas.inspect.to_s
|
897
|
+
end
|
898
|
+
|
899
|
+
end
|
900
|
+
|
901
|
+
|
902
|
+
|
903
|
+
##############################################
|
904
|
+
## SENSOR: /junos/system/linecard/qmon/ ##
|
905
|
+
##############################################
|
906
|
+
elsif sensor == "jnpr_qmon_ext"
|
907
|
+
|
908
|
+
resource = "/junos/system/linecard/qmon/"
|
909
|
+
$log.debug "Processing sensor '#{sensor}' with resource '#{resource}'"
|
910
|
+
|
911
|
+
# At this point in the code, 'datas_sensors' has the following value:
|
912
|
+
=begin
|
913
|
+
{
|
914
|
+
"jnpr_qmon_ext": {
|
915
|
+
"queue_monitor_element_info": [
|
916
|
+
{
|
917
|
+
"if_name": "xe-8/0/0:0",
|
918
|
+
"queue_monitor_stats_egress": {
|
919
|
+
"queue_monitor_stats_info": [
|
920
|
+
{
|
921
|
+
"queue_number": 0,
|
922
|
+
"queue_id": 8,
|
923
|
+
"peak_buffer_occupancy_bytes": 0,
|
924
|
+
"peak_buffer_occupancy_percent": 0,
|
925
|
+
"packets": 0,
|
926
|
+
"octets": 0,
|
927
|
+
"tail_drop_packets": 0,
|
928
|
+
"tail_drop_octets": 0,
|
929
|
+
"red_drop_packets_color_0": 0,
|
930
|
+
"red_drop_octets_color_0": 0,
|
931
|
+
"red_drop_packets_color_1": 0,
|
932
|
+
"red_drop_octets_color_1": 0,
|
933
|
+
"red_drop_packets_color_2": 0,
|
934
|
+
"red_drop_octets_color_2": 0,
|
935
|
+
"red_drop_packets_color_3": 0,
|
936
|
+
"red_drop_octets_color_3": 0
|
937
|
+
},
|
938
|
+
{
|
939
|
+
"queue_number": 1,
|
940
|
+
"queue_id": 9,
|
941
|
+
"peak_buffer_occupancy_bytes": 0,
|
942
|
+
"peak_buffer_occupancy_percent": 0,
|
943
|
+
"packets": 0,
|
944
|
+
"octets": 0,
|
945
|
+
"tail_drop_packets": 0,
|
946
|
+
"tail_drop_octets": 0,
|
947
|
+
"red_drop_packets_color_0": 0,
|
948
|
+
"red_drop_octets_color_0": 0,
|
949
|
+
"red_drop_packets_color_1": 0,
|
950
|
+
"red_drop_octets_color_1": 0,
|
951
|
+
"red_drop_packets_color_2": 0,
|
952
|
+
"red_drop_octets_color_2": 0,
|
953
|
+
"red_drop_packets_color_3": 0,
|
954
|
+
"red_drop_octets_color_3": 0
|
955
|
+
},
|
956
|
+
...
|
957
|
+
{
|
958
|
+
"queue_number": 7,
|
959
|
+
"queue_id": 15,
|
960
|
+
"peak_buffer_occupancy_bytes": 0,
|
961
|
+
"peak_buffer_occupancy_percent": 0,
|
962
|
+
"packets": 0,
|
963
|
+
"octets": 0,
|
964
|
+
"tail_drop_packets": 0,
|
965
|
+
"tail_drop_octets": 0,
|
966
|
+
"red_drop_packets_color_0": 0,
|
967
|
+
"red_drop_octets_color_0": 0,
|
968
|
+
"red_drop_packets_color_1": 0,
|
969
|
+
"red_drop_octets_color_1": 0,
|
970
|
+
"red_drop_packets_color_2": 0,
|
971
|
+
"red_drop_octets_color_2": 0,
|
972
|
+
"red_drop_packets_color_3": 0,
|
973
|
+
"red_drop_octets_color_3": 0
|
974
|
+
}
|
975
|
+
]
|
976
|
+
}
|
977
|
+
},
|
978
|
+
{
|
979
|
+
"if_name": "xe-8/0/0:1",
|
980
|
+
"queue_monitor_stats_egress": {
|
981
|
+
"queue_monitor_stats_info": [
|
982
|
+
{
|
983
|
+
"queue_number": 0,
|
984
|
+
"queue_id": 16,
|
985
|
+
...
|
986
|
+
]
|
987
|
+
}
|
988
|
+
}
|
989
|
+
|
990
|
+
=end
|
991
|
+
# Iterate over each interface contained within the 'queue_monitor_element_info' array ...
|
992
|
+
# Note that each interface's associated data is stored in 'datas'.
|
993
|
+
datas_sensors[sensor]['queue_monitor_element_info'].each do |datas|
|
994
|
+
|
995
|
+
# Save all extracted sensor data in a list.
|
996
|
+
sensor_data = []
|
997
|
+
|
998
|
+
# Block to catch exceptions during sensor data parsing.
|
999
|
+
begin
|
1000
|
+
|
1001
|
+
# Add the device name to "sensor_data" for correlation purposes.
|
1002
|
+
sensor_data.push({ 'device' => device_name })
|
1003
|
+
|
1004
|
+
# Each of the child elements under "queue_monitor_element_info" is going to be either a "leaf" node (eg. Integer, String, Float, etc.)
|
1005
|
+
# or a "branch" node (eg. Array or Hash), in which case these branch sections need additional level of processing.
|
1006
|
+
# For the leaf nodes, these values can be written directly to "sensor_data"
|
1007
|
+
datas.each do |level_1_key, level_1_value|
|
1008
|
+
|
1009
|
+
# If the node currently being processed is a "branch node" (ie. it has child nodes)
|
1010
|
+
if level_1_value.is_a?(Hash) || level_1_value.is_a?(Array)
|
1011
|
+
|
1012
|
+
# According the qmon.proto file, the level_1 branch nodes are the Hash elements "queue_monitor_stats_egress" or "queue_monitor_stats_ingress",
|
1013
|
+
# each of which contains an Array called "queue_monitor_stats_info" which is an array of "QueueMonitorStats" instances.
|
1014
|
+
if level_1_value.is_a?(Array)
|
1015
|
+
# Do nothing, for the reasons cited above.
|
1016
|
+
else
|
1017
|
+
# Level_2_key will be either "queue_monitor_stats_egress" or "queue_monitor_stats_ingress", each of which contains an Array of leaf node collections.
|
1018
|
+
level_1_value.each do |level_2_key, level_2_value|
|
1019
|
+
if level_2_value.is_a?(Array)
|
1020
|
+
## For debug only ...
|
1021
|
+
$log.debug "Value of 'level_2_key': '#{level_2_key}'"
|
1022
|
+
$log.debug "Value of 'level_2_value': '#{level_2_value}'"
|
1023
|
+
|
1024
|
+
level_2_value.each do |level_3|
|
1025
|
+
# 'level_3' will look something like this:
|
1026
|
+
#{"queue_number"=>6, "queue_id"=>102, "peak_buffer_occupancy_bytes"=>0, "peak_buffer_occupancy_percent"=>0, "packets"=>0, "octets"=>0, "tail_drop_packets"=>0, "tail_drop_octets"=>0, "red_drop_packets_color_0"=>0, "red_drop_octets_color_0"=>0, "red_drop_packets_color_1"=>0, "red_drop_octets_color_1"=>0, "red_drop_packets_color_2"=>0, "red_drop_octets_color_2"=>0, "red_drop_packets_color_3"=>0, "red_drop_octets_color_3"=>0}
|
1027
|
+
## For debug only ...
|
1028
|
+
$log.debug "Value of 'level_3': '#{level_3}'"
|
1029
|
+
|
1030
|
+
level_3.each do |level_3_key, level_3_value|
|
1031
|
+
# Debug only
|
1032
|
+
#$log.debug "Value of 'level_3_key': '#{level_3_key}'"
|
1033
|
+
#$log.debug "Value of 'level_3_value': '#{level_3_value}'"
|
1034
|
+
|
1035
|
+
# Create local copy of 'sensor_data' variable.
|
1036
|
+
local_sensor_data = sensor_data.dup
|
1037
|
+
|
1038
|
+
# For queue stats, we need to correlate the stats with the queue number, so we process this separately.
|
1039
|
+
# The proto file states that we can have egress or ingress queues.
|
1040
|
+
if level_1_key == "queue_monitor_stats_egress"
|
1041
|
+
local_sensor_data.push({ 'egress_queue' => level_3['queue_number'] })
|
1042
|
+
elsif level_1_key == "queue_monitor_stats_ingress"
|
1043
|
+
local_sensor_data.push({ 'ingress_queue' => level_3['queue_number'] })
|
1044
|
+
end
|
1045
|
+
|
1046
|
+
local_sensor_data = process_value(local_sensor_data, level_3_key, level_3_value, level_2_key)
|
1047
|
+
|
1048
|
+
record = build_record(output_format, local_sensor_data)
|
1049
|
+
## For debug only ...
|
1050
|
+
#$log.debug "Value of 'local_sensor_data': '#{local_sensor_data}'"
|
1051
|
+
yield gpb_time, record
|
1052
|
+
end
|
1053
|
+
end
|
1054
|
+
else
|
1055
|
+
# Do nothing, as per reasons cited above.
|
1056
|
+
end
|
1057
|
+
end
|
1058
|
+
end
|
1059
|
+
|
1060
|
+
# If the node currently being processed is a "leaf node" (ie. it has NO child nodes)
|
1061
|
+
else
|
1062
|
+
## For debug only ...
|
1063
|
+
#$log.debug "Value of 'level_1_key': '#{level_1_key}'"
|
1064
|
+
#$log.debug "Value of 'level_1_value': '#{level_1_value}'"
|
1065
|
+
|
1066
|
+
if level_1_key == "if_name"
|
1067
|
+
sensor_data.push({ 'interface' => level_1_value })
|
1068
|
+
elsif level_1_key == "init_time"
|
1069
|
+
# do nothing.
|
1070
|
+
else
|
1071
|
+
# By default, InfluxDB assigns the type of a field based on the type of the first value inserted.
|
1072
|
+
# So, in the "value" field, if an Integer is inserted, then the "value" field will only accept Integer
|
1073
|
+
# values hereon after ... so, a String value insertion will result in an error.
|
1074
|
+
# To alleviate this, we will have "value" as the default field for Integers, so as not to break existing code.
|
1075
|
+
# We will add additional "value_string", "value_float", fields to support different value types. This way,
|
1076
|
+
# we can persist all the various telemetry sensor parameters in InfluxDB, not just the Integer values.
|
1077
|
+
|
1078
|
+
# Create local copy of 'sensor_data' variable.
|
1079
|
+
local_sensor_data = sensor_data.dup
|
1080
|
+
local_sensor_data = process_value(local_sensor_data, level_1_key, level_1_value, '')
|
1081
|
+
|
1082
|
+
record = build_record(output_format, local_sensor_data)
|
1083
|
+
## For debug only ...
|
1084
|
+
#$log.debug "Value of 'local_sensor_data': '#{local_sensor_data}'"
|
1085
|
+
#$log.debug "Value of 'record': '#{record}'"
|
1086
|
+
yield gpb_time, record
|
1087
|
+
end
|
1088
|
+
end
|
1089
|
+
end
|
1090
|
+
|
1091
|
+
rescue => e
|
1092
|
+
$log.warn "Unable to parse '" + sensor + "' sensor, Error during processing: #{$!}"
|
1093
|
+
$log.debug "Unable to parse '" + sensor + "' sensor, Data Dump: " + datas.inspect.to_s
|
1094
|
+
end
|
1095
|
+
|
1096
|
+
end
|
1097
|
+
|
1098
|
+
|
1099
|
+
|
1100
|
+
end
|
1101
|
+
end
|
1102
|
+
end
|
1103
|
+
|
1104
|
+
|
1105
|
+
|
1106
|
+
def process_value(local_sensor_data, key, value, parent_key)
|
1107
|
+
|
1108
|
+
if value.is_a?(Integer)
|
1109
|
+
if parent_key == ''
|
1110
|
+
local_sensor_data.push({ 'type' => key })
|
1111
|
+
elsif
|
1112
|
+
local_sensor_data.push({ 'type' => parent_key + '.' + key })
|
1113
|
+
end
|
1114
|
+
local_sensor_data.push({ 'value' => value })
|
1115
|
+
local_sensor_data.push({ 'value_string' => '' })
|
1116
|
+
local_sensor_data.push({ 'value_float' => -0.0 })
|
1117
|
+
|
1118
|
+
elsif value.is_a?(String)
|
1119
|
+
if parent_key == ''
|
1120
|
+
local_sensor_data.push({ 'type' => key })
|
1121
|
+
elsif
|
1122
|
+
local_sensor_data.push({ 'type' => parent_key + '.' + key })
|
1123
|
+
end
|
1124
|
+
local_sensor_data.push({ 'value' => -1 })
|
1125
|
+
local_sensor_data.push({ 'value_string' => value })
|
1126
|
+
local_sensor_data.push({ 'value_float' => -0.0 })
|
1127
|
+
|
1128
|
+
elsif value.is_a?(Float)
|
1129
|
+
if parent_key == ''
|
1130
|
+
local_sensor_data.push({ 'type' => key })
|
1131
|
+
elsif
|
1132
|
+
local_sensor_data.push({ 'type' => parent_key + '.' + key })
|
1133
|
+
end
|
1134
|
+
local_sensor_data.push({ 'value' => -1 })
|
1135
|
+
local_sensor_data.push({ 'value_string' => '' })
|
1136
|
+
local_sensor_data.push({ 'value_float' => value })
|
1137
|
+
end
|
1138
|
+
|
1139
|
+
return local_sensor_data
|
1140
|
+
end
|
1141
|
+
|
1142
|
+
|
1143
|
+
end
|
1144
|
+
end
|
1145
|
+
end
|