superinstance-equipment-consensus-engine 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/LICENSE.txt +21 -0
- data/README.md +448 -0
- data/lib/equipment/consensus_engine/conflict_resolution.rb +507 -0
- data/lib/equipment/consensus_engine/consensus_engine.rb +451 -0
- data/lib/equipment/consensus_engine/tripartite_deliberation.rb +645 -0
- data/lib/equipment/consensus_engine/types.rb +229 -0
- data/lib/equipment/consensus_engine/version.rb +9 -0
- data/lib/equipment/consensus_engine/weight_calculator.rb +438 -0
- data/lib/equipment/consensus_engine.rb +17 -0
- metadata +65 -0
|
@@ -0,0 +1,451 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require_relative 'types'
|
|
4
|
+
require_relative 'weight_calculator'
|
|
5
|
+
require_relative 'tripartite_deliberation'
|
|
6
|
+
require_relative 'conflict_resolution'
|
|
7
|
+
|
|
8
|
+
module SuperInstance
|
|
9
|
+
module Equipment
|
|
10
|
+
module ConsensusEngine
|
|
11
|
+
# ConsensusEngine - Main equipment class for multi-agent deliberation
|
|
12
|
+
#
|
|
13
|
+
# Coordinates the tripartite deliberation process across Pathos (intent/emotion),
|
|
14
|
+
# Logos (logic/reason), and Ethos (truth/ethics) perspectives to build consensus.
|
|
15
|
+
#
|
|
16
|
+
# @example
|
|
17
|
+
# engine = SuperInstance::Equipment::ConsensusEngine::ConsensusEngine.new(
|
|
18
|
+
# max_rounds: 5,
|
|
19
|
+
# confidence_threshold: 0.7,
|
|
20
|
+
# domain: :balanced,
|
|
21
|
+
# enable_audit: true
|
|
22
|
+
# )
|
|
23
|
+
#
|
|
24
|
+
# result = engine.deliberate(
|
|
25
|
+
# proposition: 'Should we implement feature X?',
|
|
26
|
+
# context: 'Given our current resources and timeline...'
|
|
27
|
+
# )
|
|
28
|
+
class ConsensusEngine
|
|
29
|
+
# Creates a new ConsensusEngine instance
|
|
30
|
+
# @param config [Hash] Configuration options for the engine
|
|
31
|
+
def initialize(config = {})
|
|
32
|
+
@config = {
|
|
33
|
+
max_rounds: config[:max_rounds] || 5,
|
|
34
|
+
confidence_threshold: config[:confidence_threshold] || 0.7,
|
|
35
|
+
include_dissent: config.fetch(:include_dissent, true),
|
|
36
|
+
domain: config[:domain] || :balanced,
|
|
37
|
+
custom_weights: config[:custom_weights],
|
|
38
|
+
enable_audit: config.fetch(:enable_audit, true),
|
|
39
|
+
timeout: config[:timeout] || 30000
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
@deliberation = TripartiteDeliberation.new
|
|
43
|
+
@weight_calculator = WeightCalculator.new(@config[:custom_weights])
|
|
44
|
+
@conflict_resolution = ConflictResolution.new
|
|
45
|
+
@audit_trail = []
|
|
46
|
+
@audit_id_counter = 0
|
|
47
|
+
|
|
48
|
+
add_audit_entry(:deliberation_start, 'ConsensusEngine initialized', { config: @config })
|
|
49
|
+
end
|
|
50
|
+
|
|
51
|
+
# Conducts a deliberation on a proposition to reach consensus
|
|
52
|
+
# @param input [Hash] The deliberation input containing proposition and context
|
|
53
|
+
# @return [Hash] The consensus result
|
|
54
|
+
def deliberate(input)
|
|
55
|
+
start_time = Time.now.to_f * 1000
|
|
56
|
+
domain = input[:domain_override] || @config[:domain]
|
|
57
|
+
|
|
58
|
+
add_audit_entry(
|
|
59
|
+
:deliberation_start,
|
|
60
|
+
"Starting deliberation on: #{input[:proposition]}",
|
|
61
|
+
{ domain: domain, context: input[:context] }
|
|
62
|
+
)
|
|
63
|
+
|
|
64
|
+
rounds = []
|
|
65
|
+
resolved_conflicts = []
|
|
66
|
+
consensus_reached = false
|
|
67
|
+
final_opinions = []
|
|
68
|
+
|
|
69
|
+
begin
|
|
70
|
+
# Conduct deliberation rounds
|
|
71
|
+
round_num = 1
|
|
72
|
+
while round_num <= @config[:max_rounds] && !consensus_reached
|
|
73
|
+
round = conduct_round(round_num, input, domain, final_opinions)
|
|
74
|
+
rounds << round
|
|
75
|
+
|
|
76
|
+
# Check for conflicts
|
|
77
|
+
conflicts = detect_conflicts(round[:opinions])
|
|
78
|
+
if conflicts.any?
|
|
79
|
+
add_audit_entry(
|
|
80
|
+
:conflict_detected,
|
|
81
|
+
"Detected #{conflicts.length} conflicts in round #{round_num}",
|
|
82
|
+
{ conflicts: conflicts.map { |c| { type: c[:type], perspectives: c[:perspectives] } } }
|
|
83
|
+
)
|
|
84
|
+
|
|
85
|
+
conflicts.each do |conflict|
|
|
86
|
+
resolution = @conflict_resolution.resolve(conflict, round[:opinions])
|
|
87
|
+
resolved_conflicts << resolution
|
|
88
|
+
|
|
89
|
+
add_audit_entry(
|
|
90
|
+
:conflict_resolved,
|
|
91
|
+
"Resolved conflict: #{conflict[:type]}",
|
|
92
|
+
{ strategy: resolution[:strategy], outcome: resolution[:outcome] }
|
|
93
|
+
)
|
|
94
|
+
end
|
|
95
|
+
end
|
|
96
|
+
|
|
97
|
+
# Check for consensus
|
|
98
|
+
consensus_reached = evaluate_consensus(round[:opinions])
|
|
99
|
+
|
|
100
|
+
if consensus_reached
|
|
101
|
+
add_audit_entry(
|
|
102
|
+
:consensus_reached,
|
|
103
|
+
"Consensus reached in round #{round_num}",
|
|
104
|
+
{ consensus_score: round[:interim_score] }
|
|
105
|
+
)
|
|
106
|
+
end
|
|
107
|
+
|
|
108
|
+
final_opinions = round[:opinions]
|
|
109
|
+
round_num += 1
|
|
110
|
+
end
|
|
111
|
+
|
|
112
|
+
# Calculate final confidence and verdict
|
|
113
|
+
verdict = synthesize_verdict(final_opinions, consensus_reached)
|
|
114
|
+
confidence = calculate_overall_confidence(final_opinions, consensus_reached)
|
|
115
|
+
dissenting_opinions = @config[:include_dissent] ?
|
|
116
|
+
final_opinions.select { |op| op[:confidence] < @config[:confidence_threshold] } :
|
|
117
|
+
nil
|
|
118
|
+
|
|
119
|
+
duration_ms = (Time.now.to_f * 1000 - start_time).to_i
|
|
120
|
+
|
|
121
|
+
add_audit_entry(:deliberation_complete, 'Deliberation completed', {
|
|
122
|
+
consensus_reached: consensus_reached,
|
|
123
|
+
verdict: verdict,
|
|
124
|
+
confidence: confidence,
|
|
125
|
+
duration_ms: duration_ms
|
|
126
|
+
})
|
|
127
|
+
|
|
128
|
+
{
|
|
129
|
+
consensus: consensus_reached,
|
|
130
|
+
verdict: verdict,
|
|
131
|
+
confidence: confidence,
|
|
132
|
+
perspectives: final_opinions,
|
|
133
|
+
rounds: rounds,
|
|
134
|
+
resolved_conflicts: resolved_conflicts,
|
|
135
|
+
dissenting_opinions: dissenting_opinions,
|
|
136
|
+
audit_trail: @audit_trail.dup,
|
|
137
|
+
metadata: {
|
|
138
|
+
duration_ms: duration_ms,
|
|
139
|
+
rounds_completed: rounds.length,
|
|
140
|
+
forced_consensus: !consensus_reached && rounds.length >= @config[:max_rounds],
|
|
141
|
+
domain: domain,
|
|
142
|
+
weight_profile: @weight_calculator.get_profile(domain),
|
|
143
|
+
conflicts_resolved: resolved_conflicts.length
|
|
144
|
+
}
|
|
145
|
+
}
|
|
146
|
+
rescue StandardError => e
|
|
147
|
+
error_message = e.message
|
|
148
|
+
add_audit_entry(:error, "Deliberation failed: #{error_message}", { error: error_message })
|
|
149
|
+
raise
|
|
150
|
+
end
|
|
151
|
+
end
|
|
152
|
+
|
|
153
|
+
# Clears the audit trail
|
|
154
|
+
def clear_audit_trail
|
|
155
|
+
@audit_trail.clear
|
|
156
|
+
@audit_id_counter = 0
|
|
157
|
+
end
|
|
158
|
+
|
|
159
|
+
# Gets the current audit trail
|
|
160
|
+
# @return [Array<Hash>] Audit trail entries
|
|
161
|
+
def get_audit_trail
|
|
162
|
+
@audit_trail.dup
|
|
163
|
+
end
|
|
164
|
+
|
|
165
|
+
# Updates the weight profile for a specific domain
|
|
166
|
+
# @param domain [Symbol] The domain to update
|
|
167
|
+
# @param weights [Hash] The weights to apply
|
|
168
|
+
def update_domain_weights(domain, weights)
|
|
169
|
+
@weight_calculator.set_profile(domain, weights)
|
|
170
|
+
end
|
|
171
|
+
|
|
172
|
+
# Gets the current configuration
|
|
173
|
+
# @return [Hash] Configuration
|
|
174
|
+
def get_config
|
|
175
|
+
@config.dup
|
|
176
|
+
end
|
|
177
|
+
|
|
178
|
+
private
|
|
179
|
+
|
|
180
|
+
# Conducts a single round of deliberation
|
|
181
|
+
def conduct_round(round_number, input, domain, previous_opinions)
|
|
182
|
+
weight_profile = @weight_calculator.get_profile(domain)
|
|
183
|
+
opinions = []
|
|
184
|
+
cross_examinations = []
|
|
185
|
+
|
|
186
|
+
# Gather opinions from each perspective
|
|
187
|
+
perspectives = PerspectiveType::ALL.to_a
|
|
188
|
+
|
|
189
|
+
perspectives.each do |perspective_type|
|
|
190
|
+
weight = get_perspective_weight(perspective_type, weight_profile)
|
|
191
|
+
analysis = @deliberation.analyze(
|
|
192
|
+
perspective_type,
|
|
193
|
+
input[:proposition],
|
|
194
|
+
input[:context],
|
|
195
|
+
previous_opinions
|
|
196
|
+
)
|
|
197
|
+
|
|
198
|
+
opinions << {
|
|
199
|
+
perspective: perspective_type,
|
|
200
|
+
verdict: analysis[:verdict],
|
|
201
|
+
confidence: analysis[:confidence],
|
|
202
|
+
arguments: analysis[:arguments],
|
|
203
|
+
concerns: analysis[:concerns],
|
|
204
|
+
weight: weight,
|
|
205
|
+
timestamp: Time.now
|
|
206
|
+
}
|
|
207
|
+
end
|
|
208
|
+
|
|
209
|
+
# Conduct cross-examinations between perspectives
|
|
210
|
+
perspectives.each_with_index do |challenger, i|
|
|
211
|
+
perspectives.drop(i + 1).each do |responder|
|
|
212
|
+
cross_exam = conduct_cross_examination(challenger, responder, opinions)
|
|
213
|
+
cross_examinations << cross_exam
|
|
214
|
+
end
|
|
215
|
+
end
|
|
216
|
+
|
|
217
|
+
# Apply cross-examination impacts to opinions
|
|
218
|
+
apply_cross_examination_impacts(opinions, cross_examinations)
|
|
219
|
+
|
|
220
|
+
interim_score = calculate_interim_score(opinions)
|
|
221
|
+
consensus_reached = evaluate_consensus(opinions)
|
|
222
|
+
|
|
223
|
+
{
|
|
224
|
+
round_number: round_number,
|
|
225
|
+
opinions: opinions,
|
|
226
|
+
cross_examinations: cross_examinations,
|
|
227
|
+
consensus_reached: consensus_reached,
|
|
228
|
+
interim_score: interim_score,
|
|
229
|
+
timestamp: Time.now
|
|
230
|
+
}
|
|
231
|
+
end
|
|
232
|
+
|
|
233
|
+
# Conducts cross-examination between two perspectives
|
|
234
|
+
def conduct_cross_examination(challenger, responder, opinions)
|
|
235
|
+
challenger_opinion = opinions.find { |op| op[:perspective] == challenger }
|
|
236
|
+
responder_opinion = opinions.find { |op| op[:perspective] == responder }
|
|
237
|
+
|
|
238
|
+
if !challenger_opinion || !responder_opinion
|
|
239
|
+
raise "Missing opinion for cross-examination: #{challenger} vs #{responder}"
|
|
240
|
+
end
|
|
241
|
+
|
|
242
|
+
challenge = generate_challenge(challenger, responder_opinion)
|
|
243
|
+
response = generate_response(responder, challenge)
|
|
244
|
+
satisfactory = evaluate_response(challenge, response, challenger)
|
|
245
|
+
impact = satisfactory ? 0 : -0.1
|
|
246
|
+
|
|
247
|
+
{
|
|
248
|
+
challenger: challenger,
|
|
249
|
+
responder: responder,
|
|
250
|
+
challenge: challenge,
|
|
251
|
+
response: response,
|
|
252
|
+
satisfactory: satisfactory,
|
|
253
|
+
impact: impact
|
|
254
|
+
}
|
|
255
|
+
end
|
|
256
|
+
|
|
257
|
+
# Generates a challenge from one perspective to another
|
|
258
|
+
def generate_challenge(challenger, target_opinion)
|
|
259
|
+
challenge_templates = {
|
|
260
|
+
pathos: [
|
|
261
|
+
'Does this decision truly serve the emotional needs of those affected?',
|
|
262
|
+
'Have we considered how this will make people feel?',
|
|
263
|
+
'Is there sufficient passion and commitment behind this choice?'
|
|
264
|
+
],
|
|
265
|
+
logos: [
|
|
266
|
+
'What is the logical basis for this conclusion?',
|
|
267
|
+
'Are there any logical fallacies in the reasoning?',
|
|
268
|
+
'How does this conclusion follow from the premises?'
|
|
269
|
+
],
|
|
270
|
+
ethos: [
|
|
271
|
+
'Is this decision ethically justifiable?',
|
|
272
|
+
'Does this align with our moral principles?',
|
|
273
|
+
'What are the broader ethical implications?'
|
|
274
|
+
]
|
|
275
|
+
}
|
|
276
|
+
|
|
277
|
+
templates = challenge_templates[challenger] || []
|
|
278
|
+
templates[rand(templates.length)] || ''
|
|
279
|
+
end
|
|
280
|
+
|
|
281
|
+
# Generates a response to a challenge
|
|
282
|
+
def generate_response(responder, challenge)
|
|
283
|
+
response_templates = {
|
|
284
|
+
pathos: [
|
|
285
|
+
'The emotional impact has been carefully considered through empathy analysis.',
|
|
286
|
+
'We\'ve accounted for how stakeholders will emotionally respond to this.',
|
|
287
|
+
'The passion driving this decision is grounded in genuine care for outcomes.'
|
|
288
|
+
],
|
|
289
|
+
logos: [
|
|
290
|
+
'The logical chain of reasoning has been validated through systematic analysis.',
|
|
291
|
+
'Each premise has been examined for consistency and soundness.',
|
|
292
|
+
'The conclusion follows deductively from well-established facts.'
|
|
293
|
+
],
|
|
294
|
+
ethos: [
|
|
295
|
+
'This decision was evaluated against our core ethical framework.',
|
|
296
|
+
'Moral principles have been applied consistently throughout the analysis.',
|
|
297
|
+
'The ethical implications align with widely accepted standards of conduct.'
|
|
298
|
+
]
|
|
299
|
+
}
|
|
300
|
+
|
|
301
|
+
templates = response_templates[responder] || []
|
|
302
|
+
templates[rand(templates.length)] || ''
|
|
303
|
+
end
|
|
304
|
+
|
|
305
|
+
# Evaluates whether a response is satisfactory
|
|
306
|
+
def evaluate_response(_challenge, response, _challenger)
|
|
307
|
+
# Simplified evaluation - in practice this would be more sophisticated
|
|
308
|
+
response.length > 20 && response.include?('considered')
|
|
309
|
+
end
|
|
310
|
+
|
|
311
|
+
# Applies cross-examination impacts to opinions
|
|
312
|
+
def apply_cross_examination_impacts(opinions, cross_examinations)
|
|
313
|
+
cross_examinations.each do |cross_exam|
|
|
314
|
+
responder_opinion = opinions.find { |op| op[:perspective] == cross_exam[:responder] }
|
|
315
|
+
if responder_opinion
|
|
316
|
+
responder_opinion[:confidence] = [
|
|
317
|
+
[responder_opinion[:confidence] + cross_exam[:impact], 0].max, 1
|
|
318
|
+
].min
|
|
319
|
+
end
|
|
320
|
+
end
|
|
321
|
+
end
|
|
322
|
+
|
|
323
|
+
# Gets the weight for a specific perspective from a weight profile
|
|
324
|
+
def get_perspective_weight(perspective, profile)
|
|
325
|
+
case perspective
|
|
326
|
+
when :pathos
|
|
327
|
+
profile[:pathos_weight]
|
|
328
|
+
when :logos
|
|
329
|
+
profile[:logos_weight]
|
|
330
|
+
when :ethos
|
|
331
|
+
profile[:ethos_weight]
|
|
332
|
+
else
|
|
333
|
+
1.0 / 3.0
|
|
334
|
+
end
|
|
335
|
+
end
|
|
336
|
+
|
|
337
|
+
# Detects conflicts between perspective opinions
|
|
338
|
+
def detect_conflicts(opinions)
|
|
339
|
+
conflicts = []
|
|
340
|
+
|
|
341
|
+
# Check for high-confidence disagreements
|
|
342
|
+
high_confidence_opinions = opinions.select { |op| op[:confidence] >= @config[:confidence_threshold] }
|
|
343
|
+
|
|
344
|
+
if high_confidence_opinions.length >= 2
|
|
345
|
+
# Check if verdicts are contradictory
|
|
346
|
+
verdicts = high_confidence_opinions.map { |op| op[:verdict].downcase }
|
|
347
|
+
has_positive = verdicts.any? { |v| VerdictIndicators::POSITIVE.any? { |p| v.include?(p) } }
|
|
348
|
+
has_negative = verdicts.any? { |v| VerdictIndicators::NEGATIVE.any? { |n| v.include?(n) } }
|
|
349
|
+
|
|
350
|
+
if has_positive && has_negative
|
|
351
|
+
conflicts << {
|
|
352
|
+
type: :fundamental_disagreement,
|
|
353
|
+
perspectives: high_confidence_opinions.map { |op| op[:perspective] },
|
|
354
|
+
severity: :high,
|
|
355
|
+
description: 'Perspectives have fundamentally opposing views on the proposition',
|
|
356
|
+
context: {
|
|
357
|
+
conflicting_verdicts: high_confidence_opinions.map do |op|
|
|
358
|
+
{ perspective: op[:perspective], verdict: op[:verdict] }
|
|
359
|
+
end
|
|
360
|
+
}
|
|
361
|
+
}
|
|
362
|
+
end
|
|
363
|
+
end
|
|
364
|
+
|
|
365
|
+
# Check for low confidence across all perspectives
|
|
366
|
+
if opinions.all? { |op| op[:confidence] < @config[:confidence_threshold] }
|
|
367
|
+
conflicts << {
|
|
368
|
+
type: :uncertainty,
|
|
369
|
+
perspectives: opinions.map { |op| op[:perspective] },
|
|
370
|
+
severity: :medium,
|
|
371
|
+
description: 'All perspectives have low confidence in their assessments',
|
|
372
|
+
context: {
|
|
373
|
+
confidence_levels: opinions.map do |op|
|
|
374
|
+
{ perspective: op[:perspective], confidence: op[:confidence] }
|
|
375
|
+
end
|
|
376
|
+
}
|
|
377
|
+
}
|
|
378
|
+
end
|
|
379
|
+
|
|
380
|
+
conflicts
|
|
381
|
+
end
|
|
382
|
+
|
|
383
|
+
# Evaluates whether consensus has been reached
|
|
384
|
+
def evaluate_consensus(opinions)
|
|
385
|
+
# All opinions must meet confidence threshold
|
|
386
|
+
all_confident = opinions.all? { |op| op[:confidence] >= @config[:confidence_threshold] }
|
|
387
|
+
|
|
388
|
+
# Opinions should be aligned (simplified check)
|
|
389
|
+
verdicts = opinions.map { |op| op[:verdict].downcase }
|
|
390
|
+
is_aligned = are_verdicts_aligned(verdicts)
|
|
391
|
+
|
|
392
|
+
all_confident && is_aligned
|
|
393
|
+
end
|
|
394
|
+
|
|
395
|
+
# Checks if verdicts are aligned (not contradictory)
|
|
396
|
+
def are_verdicts_aligned(verdicts)
|
|
397
|
+
has_positive = verdicts.any? { |v| VerdictIndicators::POSITIVE.any? { |p| v.include?(p) } }
|
|
398
|
+
has_negative = verdicts.any? { |v| VerdictIndicators::NEGATIVE.any? { |n| v.include?(n) } }
|
|
399
|
+
|
|
400
|
+
!(has_positive && has_negative)
|
|
401
|
+
end
|
|
402
|
+
|
|
403
|
+
# Calculates interim consensus score
|
|
404
|
+
def calculate_interim_score(opinions)
|
|
405
|
+
total_weight = opinions.sum { |op| op[:weight] }
|
|
406
|
+
weighted_confidence = opinions.sum { |op| op[:confidence] * op[:weight] }
|
|
407
|
+
total_weight > 0 ? weighted_confidence / total_weight : 0
|
|
408
|
+
end
|
|
409
|
+
|
|
410
|
+
# Synthesizes a final verdict from all perspectives
|
|
411
|
+
def synthesize_verdict(opinions, consensus_reached)
|
|
412
|
+
if consensus_reached
|
|
413
|
+
# All perspectives agree - use the most confident opinion as template
|
|
414
|
+
sorted_opinions = opinions.sort_by { |op| -op[:confidence] }
|
|
415
|
+
return sorted_opinions[0]&.dig(:verdict) || 'Consensus reached'
|
|
416
|
+
end
|
|
417
|
+
|
|
418
|
+
# No consensus - synthesize a balanced view
|
|
419
|
+
parts = opinions.map do |opinion|
|
|
420
|
+
"[#{opinion[:perspective]}]: #{opinion[:verdict]} (#{(opinion[:confidence] * 100).round(0)}% confidence)"
|
|
421
|
+
end
|
|
422
|
+
|
|
423
|
+
"No single consensus reached. Perspectives differ:\n#{parts.join("\n")}"
|
|
424
|
+
end
|
|
425
|
+
|
|
426
|
+
# Calculates overall confidence in the result
|
|
427
|
+
def calculate_overall_confidence(opinions, consensus_reached)
|
|
428
|
+
total_weight = opinions.sum { |op| op[:weight] }
|
|
429
|
+
weighted_confidence = opinions.sum { |op| op[:confidence] * op[:weight] }
|
|
430
|
+
base_confidence = total_weight > 0 ? weighted_confidence / total_weight : 0
|
|
431
|
+
|
|
432
|
+
# Boost confidence if consensus was reached
|
|
433
|
+
consensus_reached ? [base_confidence * 1.1, 1.0].min : base_confidence * 0.9
|
|
434
|
+
end
|
|
435
|
+
|
|
436
|
+
# Adds an entry to the audit trail
|
|
437
|
+
def add_audit_entry(action, description, data = nil)
|
|
438
|
+
return unless @config[:enable_audit]
|
|
439
|
+
|
|
440
|
+
@audit_trail << {
|
|
441
|
+
id: "audit-#{@audit_id_counter += 1}",
|
|
442
|
+
timestamp: Time.now,
|
|
443
|
+
action: action,
|
|
444
|
+
description: description,
|
|
445
|
+
data: data
|
|
446
|
+
}
|
|
447
|
+
end
|
|
448
|
+
end
|
|
449
|
+
end
|
|
450
|
+
end
|
|
451
|
+
end
|