ollama-client 0.2.5 → 0.2.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (72) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +13 -0
  3. data/README.md +138 -76
  4. data/docs/EXAMPLE_REORGANIZATION.md +412 -0
  5. data/docs/GETTING_STARTED.md +361 -0
  6. data/docs/INTEGRATION_TESTING.md +170 -0
  7. data/docs/NEXT_STEPS_SUMMARY.md +114 -0
  8. data/docs/PERSONAS.md +383 -0
  9. data/docs/QUICK_START.md +195 -0
  10. data/docs/TESTING.md +392 -170
  11. data/docs/TEST_CHECKLIST.md +450 -0
  12. data/examples/README.md +51 -66
  13. data/examples/basic_chat.rb +33 -0
  14. data/examples/basic_generate.rb +29 -0
  15. data/examples/tool_calling_parsing.rb +59 -0
  16. data/exe/ollama-client +128 -1
  17. data/lib/ollama/agent/planner.rb +7 -2
  18. data/lib/ollama/chat_session.rb +101 -0
  19. data/lib/ollama/client.rb +41 -35
  20. data/lib/ollama/config.rb +4 -1
  21. data/lib/ollama/document_loader.rb +1 -1
  22. data/lib/ollama/embeddings.rb +41 -26
  23. data/lib/ollama/errors.rb +1 -0
  24. data/lib/ollama/personas.rb +287 -0
  25. data/lib/ollama/version.rb +1 -1
  26. data/lib/ollama_client.rb +7 -0
  27. metadata +14 -48
  28. data/examples/advanced_complex_schemas.rb +0 -366
  29. data/examples/advanced_edge_cases.rb +0 -241
  30. data/examples/advanced_error_handling.rb +0 -200
  31. data/examples/advanced_multi_step_agent.rb +0 -341
  32. data/examples/advanced_performance_testing.rb +0 -186
  33. data/examples/chat_console.rb +0 -143
  34. data/examples/complete_workflow.rb +0 -245
  35. data/examples/dhan_console.rb +0 -843
  36. data/examples/dhanhq/README.md +0 -236
  37. data/examples/dhanhq/agents/base_agent.rb +0 -74
  38. data/examples/dhanhq/agents/data_agent.rb +0 -66
  39. data/examples/dhanhq/agents/orchestrator_agent.rb +0 -120
  40. data/examples/dhanhq/agents/technical_analysis_agent.rb +0 -252
  41. data/examples/dhanhq/agents/trading_agent.rb +0 -81
  42. data/examples/dhanhq/analysis/market_structure.rb +0 -138
  43. data/examples/dhanhq/analysis/pattern_recognizer.rb +0 -192
  44. data/examples/dhanhq/analysis/trend_analyzer.rb +0 -88
  45. data/examples/dhanhq/builders/market_context_builder.rb +0 -67
  46. data/examples/dhanhq/dhanhq_agent.rb +0 -829
  47. data/examples/dhanhq/indicators/technical_indicators.rb +0 -158
  48. data/examples/dhanhq/scanners/intraday_options_scanner.rb +0 -492
  49. data/examples/dhanhq/scanners/swing_scanner.rb +0 -247
  50. data/examples/dhanhq/schemas/agent_schemas.rb +0 -61
  51. data/examples/dhanhq/services/base_service.rb +0 -46
  52. data/examples/dhanhq/services/data_service.rb +0 -118
  53. data/examples/dhanhq/services/trading_service.rb +0 -59
  54. data/examples/dhanhq/technical_analysis_agentic_runner.rb +0 -411
  55. data/examples/dhanhq/technical_analysis_runner.rb +0 -420
  56. data/examples/dhanhq/test_tool_calling.rb +0 -538
  57. data/examples/dhanhq/test_tool_calling_verbose.rb +0 -251
  58. data/examples/dhanhq/utils/instrument_helper.rb +0 -32
  59. data/examples/dhanhq/utils/parameter_cleaner.rb +0 -28
  60. data/examples/dhanhq/utils/parameter_normalizer.rb +0 -45
  61. data/examples/dhanhq/utils/rate_limiter.rb +0 -23
  62. data/examples/dhanhq/utils/trading_parameter_normalizer.rb +0 -72
  63. data/examples/dhanhq_agent.rb +0 -964
  64. data/examples/dhanhq_tools.rb +0 -1663
  65. data/examples/multi_step_agent_with_external_data.rb +0 -368
  66. data/examples/structured_outputs_chat.rb +0 -72
  67. data/examples/structured_tools.rb +0 -89
  68. data/examples/test_dhanhq_tool_calling.rb +0 -375
  69. data/examples/test_tool_calling.rb +0 -160
  70. data/examples/tool_calling_direct.rb +0 -124
  71. data/examples/tool_calling_pattern.rb +0 -269
  72. data/exe/dhan_console +0 -4
@@ -1,186 +0,0 @@
1
- #!/usr/bin/env ruby
2
- # frozen_string_literal: true
3
-
4
- # Advanced Example: Performance Testing and Observability
5
- # Demonstrates: Latency measurement, throughput testing, error rate tracking, concurrent requests
6
-
7
- require "json"
8
- require "benchmark"
9
- require "time"
10
- require_relative "../lib/ollama_client"
11
-
12
- class PerformanceMonitor
13
- def initialize(client:)
14
- @client = client
15
- @metrics = {
16
- calls: [],
17
- errors: [],
18
- latencies: []
19
- }
20
- end
21
-
22
- def measure_call(prompt:, schema:)
23
- start_time = Time.now
24
-
25
- begin
26
- result = @client.generate(prompt: prompt, schema: schema)
27
- latency = (Time.now - start_time) * 1000 # Convert to milliseconds
28
-
29
- @metrics[:calls] << {
30
- success: true,
31
- latency_ms: latency,
32
- timestamp: Time.now.iso8601
33
- }
34
- @metrics[:latencies] << latency
35
-
36
- { success: true, result: result, latency_ms: latency }
37
- rescue Ollama::Error => e
38
- latency = (Time.now - start_time) * 1000
39
- @metrics[:calls] << {
40
- success: false,
41
- latency_ms: latency,
42
- error: e.class.name,
43
- timestamp: Time.now.iso8601
44
- }
45
- @metrics[:errors] << { error: e.class.name, message: e.message, latency_ms: latency }
46
-
47
- { success: false, error: e, latency_ms: latency }
48
- end
49
- end
50
-
51
- def run_throughput_test(prompt:, schema:, iterations: 10)
52
- puts "🚀 Running throughput test (#{iterations} iterations)..."
53
- results = []
54
-
55
- total_time = Benchmark.realtime do
56
- iterations.times do |i|
57
- print " #{i + 1}/#{iterations}... "
58
- result = measure_call(prompt: prompt, schema: schema)
59
- results << result
60
- puts result[:success] ? "✓" : "✗"
61
- end
62
- end
63
-
64
- {
65
- total_time: total_time,
66
- iterations: iterations,
67
- throughput: iterations / total_time,
68
- results: results
69
- }
70
- end
71
-
72
- def run_latency_test(prompt:, schema:, iterations: 10)
73
- puts "⏱️ Running latency test (#{iterations} iterations)..."
74
- latencies = []
75
-
76
- iterations.times do |i|
77
- print " #{i + 1}/#{iterations}... "
78
- result = measure_call(prompt: prompt, schema: schema)
79
- if result[:success]
80
- latencies << result[:latency_ms]
81
- puts "#{result[:latency_ms].round(2)}ms"
82
- else
83
- puts "ERROR"
84
- end
85
- end
86
-
87
- {
88
- latencies: latencies,
89
- min: latencies.min,
90
- max: latencies.max,
91
- avg: latencies.sum / latencies.length,
92
- median: latencies.sort[latencies.length / 2],
93
- p95: latencies.sort[(latencies.length * 0.95).to_i],
94
- p99: latencies.sort[(latencies.length * 0.99).to_i]
95
- }
96
- end
97
-
98
- def display_metrics
99
- puts "\n" + "=" * 60
100
- puts "Performance Metrics"
101
- puts "=" * 60
102
-
103
- total_calls = @metrics[:calls].length
104
- successful = @metrics[:calls].count { |c| c[:success] }
105
- failed = total_calls - successful
106
-
107
- puts "Total calls: #{total_calls}"
108
- puts "Successful: #{successful} (#{(successful.to_f / total_calls * 100).round(2)}%)"
109
- puts "Failed: #{failed} (#{(failed.to_f / total_calls * 100).round(2)}%)"
110
-
111
- if @metrics[:latencies].any?
112
- latencies = @metrics[:latencies]
113
- puts "\nLatency Statistics (ms):"
114
- puts " Min: #{latencies.min.round(2)}"
115
- puts " Max: #{latencies.max.round(2)}"
116
- puts " Avg: #{(latencies.sum / latencies.length).round(2)}"
117
- puts " Median: #{latencies.sort[latencies.length / 2].round(2)}"
118
- puts " P95: #{latencies.sort[(latencies.length * 0.95).to_i].round(2)}"
119
- puts " P99: #{latencies.sort[(latencies.length * 0.99).to_i].round(2)}"
120
- end
121
-
122
- return unless @metrics[:errors].any?
123
-
124
- puts "\nErrors by type:"
125
- error_counts = @metrics[:errors].group_by { |e| e[:error] }
126
- error_counts.each do |error_type, errors|
127
- puts " #{error_type}: #{errors.length}"
128
- end
129
- end
130
-
131
- def export_metrics(filename: "metrics.json")
132
- File.write(filename, JSON.pretty_generate(@metrics))
133
- puts "\n📊 Metrics exported to #{filename}"
134
- end
135
- end
136
-
137
- # Run performance tests
138
- if __FILE__ == $PROGRAM_NAME
139
- # Use longer timeout for performance testing
140
- config = Ollama::Config.new
141
- config.timeout = 60 # 60 seconds for complex operations
142
- client = Ollama::Client.new(config: config)
143
- monitor = PerformanceMonitor.new(client: client)
144
-
145
- schema = {
146
- "type" => "object",
147
- "required" => ["response"],
148
- "properties" => {
149
- "response" => { "type" => "string" }
150
- }
151
- }
152
-
153
- puts "=" * 60
154
- puts "Performance Testing Suite"
155
- puts "=" * 60
156
-
157
- # Test 1: Latency
158
- latency_results = monitor.run_latency_test(
159
- prompt: "Respond with a simple acknowledgment",
160
- schema: schema,
161
- iterations: 5
162
- )
163
-
164
- puts "\nLatency Results:"
165
- puts " Average: #{latency_results[:avg].round(2)}ms"
166
- puts " P95: #{latency_results[:p95].round(2)}ms"
167
- puts " P99: #{latency_results[:p99].round(2)}ms"
168
-
169
- # Test 2: Throughput
170
- throughput_results = monitor.run_throughput_test(
171
- prompt: "Count to 5",
172
- schema: schema,
173
- iterations: 5
174
- )
175
-
176
- puts "\nThroughput Results:"
177
- puts " Total time: #{throughput_results[:total_time].round(2)}s"
178
- puts " Throughput: #{throughput_results[:throughput].round(2)} calls/sec"
179
-
180
- # Display all metrics
181
- monitor.display_metrics
182
-
183
- # Export metrics
184
- monitor.export_metrics
185
- end
186
-
@@ -1,143 +0,0 @@
1
- #!/usr/bin/env ruby
2
- # frozen_string_literal: true
3
-
4
- require_relative "../lib/ollama_client"
5
- require "tty-reader"
6
- require "tty-screen"
7
- require "tty-cursor"
8
-
9
- def build_config
10
- config = Ollama::Config.new
11
- config.base_url = ENV["OLLAMA_BASE_URL"] if ENV["OLLAMA_BASE_URL"]
12
- config.model = ENV["OLLAMA_MODEL"] if ENV["OLLAMA_MODEL"]
13
- config.temperature = ENV["OLLAMA_TEMPERATURE"].to_f if ENV["OLLAMA_TEMPERATURE"]
14
- config
15
- end
16
-
17
- def exit_command?(text)
18
- %w[/exit /quit exit quit].include?(text.downcase)
19
- end
20
-
21
- def add_system_message(messages)
22
- system_prompt = ENV.fetch("OLLAMA_SYSTEM", nil)
23
- return unless system_prompt && !system_prompt.strip.empty?
24
-
25
- messages << { role: "system", content: system_prompt }
26
- end
27
-
28
- def print_banner(config)
29
- puts "Ollama chat console"
30
- puts "Model: #{config.model}"
31
- puts "Base URL: #{config.base_url}"
32
- puts "Type /exit to quit."
33
- puts "Screen: #{TTY::Screen.width}x#{TTY::Screen.height}"
34
- puts
35
- end
36
-
37
- HISTORY_PATH = ".ollama_chat_history"
38
- MAX_HISTORY = 200
39
- COLOR_RESET = "\e[0m"
40
- COLOR_USER = "\e[32m"
41
- COLOR_LLM = "\e[36m"
42
- USER_PROMPT = "#{COLOR_USER}you>#{COLOR_RESET} ".freeze
43
- LLM_PROMPT = "#{COLOR_LLM}llm>#{COLOR_RESET} ".freeze
44
-
45
- def build_reader
46
- TTY::Reader.new
47
- end
48
-
49
- def read_input(reader)
50
- reader.read_line(USER_PROMPT)
51
- end
52
-
53
- def load_history(reader, path)
54
- history = load_history_list(path)
55
- history.reverse_each { |line| reader.add_to_history(line) }
56
- end
57
-
58
- def load_history_list(path)
59
- return [] unless File.exist?(path)
60
-
61
- unique_history(normalize_history(File.readlines(path, chomp: true)))
62
- end
63
-
64
- def normalize_history(lines)
65
- lines.map(&:strip).reject(&:empty?)
66
- end
67
-
68
- def unique_history(lines)
69
- seen = {}
70
- lines.each_with_object([]) do |line, unique|
71
- next if seen[line]
72
-
73
- unique << line
74
- seen[line] = true
75
- end
76
- end
77
-
78
- def update_history(path, text)
79
- history = load_history_list(path)
80
- history.delete(text)
81
- history.unshift(text)
82
- history = history.first(MAX_HISTORY)
83
-
84
- File.write(path, history.join("\n") + (history.empty? ? "" : "\n"))
85
- end
86
-
87
- def chat_response(client, messages, config)
88
- content = +""
89
- prompt_printed = false
90
-
91
- print "#{COLOR_LLM}...#{COLOR_RESET}"
92
- $stdout.flush
93
-
94
- client.chat_raw(
95
- messages: messages,
96
- allow_chat: true,
97
- options: { temperature: config.temperature },
98
- stream: true
99
- ) do |chunk|
100
- token = chunk.dig("message", "content").to_s
101
- next if token.empty?
102
-
103
- unless prompt_printed
104
- print "\r#{LLM_PROMPT}"
105
- prompt_printed = true
106
- end
107
-
108
- content << token
109
- print token
110
- $stdout.flush
111
- end
112
-
113
- puts
114
- content
115
- end
116
-
117
- def run_console(client, config)
118
- messages = []
119
- add_system_message(messages)
120
- print_banner(config)
121
- reader = build_reader
122
- load_history(reader, HISTORY_PATH)
123
-
124
- loop do
125
- input = read_input(reader)
126
- break unless input
127
-
128
- text = input.strip
129
- next if text.empty?
130
- break if exit_command?(text)
131
-
132
- update_history(HISTORY_PATH, text)
133
- messages << { role: "user", content: text }
134
- content = chat_response(client, messages, config)
135
- messages << { role: "assistant", content: content }
136
- end
137
- rescue Interrupt
138
- puts "\nExiting..."
139
- end
140
-
141
- config = build_config
142
- client = Ollama::Client.new(config: config)
143
- run_console(client, config)
@@ -1,245 +0,0 @@
1
- #!/usr/bin/env ruby
2
- # frozen_string_literal: true
3
-
4
- # Complete example showing how to use structured outputs in a real workflow
5
- # This demonstrates the full cycle: schema definition -> LLM call -> using the result
6
-
7
- require "json"
8
- require_relative "../lib/ollama_client"
9
-
10
- # Example: Task Planning Agent
11
- # The LLM decides what action to take, and we execute it
12
-
13
- class TaskPlanner
14
- def initialize(client:)
15
- @client = client
16
- @task_schema = {
17
- "type" => "object",
18
- "required" => ["action", "reasoning", "confidence", "next_step"],
19
- "properties" => {
20
- "action" => {
21
- "type" => "string",
22
- "description" => "The action to take",
23
- "enum" => ["search", "calculate", "store", "retrieve", "finish"]
24
- },
25
- "reasoning" => {
26
- "type" => "string",
27
- "description" => "Why this action was chosen"
28
- },
29
- "confidence" => {
30
- "type" => "number",
31
- "minimum" => 0,
32
- "maximum" => 1,
33
- "description" => "Confidence in this decision (0.0 to 1.0, where 1.0 is 100% confident)"
34
- },
35
- "next_step" => {
36
- "type" => "string",
37
- "description" => "What to do next"
38
- },
39
- "parameters" => {
40
- "type" => "object",
41
- "description" => "Parameters needed for the action"
42
- }
43
- }
44
- }
45
- end
46
-
47
- def plan(context:)
48
- puts "🤔 Planning next action..."
49
- puts "Context: #{context}\n\n"
50
-
51
- begin
52
- prompt = "Given this context: #{context}\n\n" \
53
- "Decide the next action to take.\n\n" \
54
- "IMPORTANT: Use decimal values for confidence " \
55
- "(e.g., 0.95 for 95% confident, 0.80 for 80% confident, 1.0 for 100% confident)."
56
-
57
- result = @client.generate(
58
- prompt: prompt,
59
- schema: @task_schema
60
- )
61
-
62
- # The result is guaranteed to match our schema
63
- display_decision(result)
64
- execute_action(result)
65
-
66
- result
67
- rescue Ollama::SchemaViolationError => e
68
- puts "❌ Invalid response structure: #{e.message}"
69
- puts " This shouldn't happen with format parameter, but we handle it gracefully"
70
- nil
71
- rescue Ollama::Error => e
72
- puts "❌ Error: #{e.message}"
73
- nil
74
- end
75
- end
76
-
77
- private
78
-
79
- def display_decision(result)
80
- puts "📋 Decision:"
81
- puts " Action: #{result['action']}"
82
- puts " Reasoning: #{result['reasoning']}"
83
- puts " Confidence: #{(result['confidence'] * 100).round}%"
84
- puts " Next Step: #{result['next_step']}"
85
- puts " Parameters: #{JSON.pretty_generate(result['parameters'] || {})}\n"
86
- end
87
-
88
- def execute_action(result)
89
- case result["action"]
90
- when "search"
91
- query = result.dig("parameters", "query") || "default"
92
- puts "🔍 Executing search: #{query}"
93
- # In real code, you'd call your search function here
94
- puts " → Search results would appear here\n"
95
-
96
- when "calculate"
97
- operation = result.dig("parameters", "operation") || "unknown"
98
- puts "🧮 Executing calculation: #{operation}"
99
- # In real code, you'd call your calculator here
100
- puts " → Calculation result would appear here\n"
101
-
102
- when "store"
103
- key = result.dig("parameters", "key") || "unknown"
104
- puts "💾 Storing data with key: #{key}"
105
- # In real code, you'd save to your storage
106
- puts " → Data stored successfully\n"
107
-
108
- when "retrieve"
109
- key = result.dig("parameters", "key") || "unknown"
110
- puts "📂 Retrieving data with key: #{key}"
111
- # In real code, you'd fetch from your storage
112
- puts " → Data retrieved successfully\n"
113
-
114
- when "finish"
115
- puts "✅ Task complete!\n"
116
-
117
- else
118
- puts "⚠️ Unknown action: #{result['action']}\n"
119
- end
120
- end
121
- end
122
-
123
- # Example: Data Analyzer
124
- # The LLM analyzes data and returns structured insights
125
-
126
- class DataAnalyzer
127
- def initialize(client:)
128
- @client = client
129
- @analysis_schema = {
130
- "type" => "object",
131
- "required" => ["summary", "confidence", "key_points"],
132
- "properties" => {
133
- "summary" => {
134
- "type" => "string",
135
- "description" => "Brief summary of the analysis"
136
- },
137
- "confidence" => {
138
- "type" => "number",
139
- "minimum" => 0,
140
- "maximum" => 1,
141
- "description" => "Confidence level (0.0 to 1.0, where 1.0 is 100% confident)"
142
- },
143
- "key_points" => {
144
- "type" => "array",
145
- "items" => { "type" => "string" },
146
- "minItems" => 1,
147
- "maxItems" => 5
148
- },
149
- "sentiment" => {
150
- "type" => "string",
151
- "enum" => ["positive", "neutral", "negative"]
152
- },
153
- "recommendations" => {
154
- "type" => "array",
155
- "items" => { "type" => "string" }
156
- }
157
- }
158
- }
159
- end
160
-
161
- def analyze(data:)
162
- puts "📊 Analyzing data..."
163
- puts "Data: #{data}\n\n"
164
-
165
- begin
166
- prompt = "Analyze this data and provide insights: #{data}\n\n" \
167
- "IMPORTANT: Express confidence as a decimal between 0.0 and 1.0 " \
168
- "(e.g., 0.85 for 85% confidence, not 85)."
169
-
170
- result = @client.generate(
171
- prompt: prompt,
172
- schema: @analysis_schema
173
- )
174
-
175
- display_analysis(result)
176
- make_recommendations(result)
177
-
178
- result
179
- rescue Ollama::Error => e
180
- puts "❌ Error: #{e.message}"
181
- nil
182
- end
183
- end
184
-
185
- private
186
-
187
- def display_analysis(result)
188
- puts "📈 Analysis Results:"
189
- puts " Summary: #{result['summary']}"
190
- puts " Confidence: #{(result['confidence'] * 100).round}%"
191
- puts " Sentiment: #{result['sentiment']}"
192
- puts "\n Key Points:"
193
- result["key_points"].each_with_index do |point, i|
194
- puts " #{i + 1}. #{point}"
195
- end
196
-
197
- if result["recommendations"] && !result["recommendations"].empty?
198
- puts "\n Recommendations:"
199
- result["recommendations"].each_with_index do |rec, i|
200
- puts " #{i + 1}. #{rec}"
201
- end
202
- end
203
- puts
204
- end
205
-
206
- def make_recommendations(result)
207
- if result["confidence"] > 0.8 && result["sentiment"] == "positive"
208
- puts "✅ High confidence positive analysis - safe to proceed"
209
- elsif result["confidence"] < 0.5
210
- puts "⚠️ Low confidence - manual review recommended"
211
- elsif result["sentiment"] == "negative"
212
- puts "⚠️ Negative sentiment detected - investigate further"
213
- end
214
- puts
215
- end
216
- end
217
-
218
- # Main execution
219
- if __FILE__ == $PROGRAM_NAME
220
- client = Ollama::Client.new
221
-
222
- puts "=" * 60
223
- puts "Example 1: Task Planning Agent"
224
- puts "=" * 60
225
- puts
226
-
227
- planner = TaskPlanner.new(client: client)
228
- planner.plan(context: "User wants to know the weather in Paris")
229
-
230
- puts "\n" + "=" * 60
231
- puts "Example 2: Data Analysis"
232
- puts "=" * 60
233
- puts
234
-
235
- analyzer = DataAnalyzer.new(client: client)
236
- analyzer.analyze(
237
- data: "Sales increased 25% this quarter. Customer satisfaction is at 4.8/5. " \
238
- "Revenue: $1.2M. New customers: 150."
239
- )
240
-
241
- puts "=" * 60
242
- puts "Examples complete!"
243
- puts "=" * 60
244
- end
245
-