soka 0.0.1.beta2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. checksums.yaml +7 -0
  2. data/.rspec +3 -0
  3. data/.rubocop.yml +365 -0
  4. data/CHANGELOG.md +31 -0
  5. data/CLAUDE.md +213 -0
  6. data/LICENSE +21 -0
  7. data/README.md +650 -0
  8. data/Rakefile +10 -0
  9. data/examples/1_basic.rb +94 -0
  10. data/examples/2_event_handling.rb +120 -0
  11. data/examples/3_memory.rb +182 -0
  12. data/examples/4_hooks.rb +140 -0
  13. data/examples/5_error_handling.rb +85 -0
  14. data/examples/6_retry.rb +164 -0
  15. data/examples/7_tool_conditional.rb +180 -0
  16. data/examples/8_multi_provider.rb +112 -0
  17. data/lib/soka/agent.rb +130 -0
  18. data/lib/soka/agent_tool.rb +146 -0
  19. data/lib/soka/agent_tools/params_validator.rb +139 -0
  20. data/lib/soka/agents/dsl_methods.rb +140 -0
  21. data/lib/soka/agents/hook_manager.rb +68 -0
  22. data/lib/soka/agents/llm_builder.rb +32 -0
  23. data/lib/soka/agents/retry_handler.rb +74 -0
  24. data/lib/soka/agents/tool_builder.rb +78 -0
  25. data/lib/soka/configuration.rb +60 -0
  26. data/lib/soka/engines/base.rb +67 -0
  27. data/lib/soka/engines/concerns/prompt_template.rb +130 -0
  28. data/lib/soka/engines/concerns/response_processor.rb +103 -0
  29. data/lib/soka/engines/react.rb +136 -0
  30. data/lib/soka/engines/reasoning_context.rb +92 -0
  31. data/lib/soka/llm.rb +85 -0
  32. data/lib/soka/llms/anthropic.rb +124 -0
  33. data/lib/soka/llms/base.rb +114 -0
  34. data/lib/soka/llms/concerns/response_parser.rb +47 -0
  35. data/lib/soka/llms/concerns/streaming_handler.rb +78 -0
  36. data/lib/soka/llms/gemini.rb +106 -0
  37. data/lib/soka/llms/openai.rb +97 -0
  38. data/lib/soka/memory.rb +83 -0
  39. data/lib/soka/result.rb +136 -0
  40. data/lib/soka/test_helpers.rb +162 -0
  41. data/lib/soka/thoughts_memory.rb +112 -0
  42. data/lib/soka/version.rb +5 -0
  43. data/lib/soka.rb +49 -0
  44. data/sig/soka.rbs +4 -0
  45. metadata +158 -0
@@ -0,0 +1,124 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Soka
4
+ module LLMs
5
+ # Anthropic (Claude) LLM provider implementation
6
+ class Anthropic < Base
7
+ include Concerns::ResponseParser
8
+
9
+ ENV_KEY = 'ANTHROPIC_API_KEY'
10
+
11
+ private
12
+
13
+ def default_model
14
+ 'claude-4-sonnet'
15
+ end
16
+
17
+ def base_url
18
+ 'https://api.anthropic.com'
19
+ end
20
+
21
+ def default_options
22
+ {
23
+ temperature: 0.7,
24
+ top_p: 1.0,
25
+ top_k: 1,
26
+ max_tokens: 2048,
27
+ anthropic_version: '2023-06-01'
28
+ }
29
+ end
30
+
31
+ public
32
+
33
+ def chat(messages, **params)
34
+ request_params = build_request_params(messages, params)
35
+
36
+ response = connection.post do |req|
37
+ req.url '/v1/messages'
38
+ req.headers['x-api-key'] = api_key
39
+ req.headers['anthropic-version'] = options[:anthropic_version]
40
+ req.body = request_params
41
+ end
42
+
43
+ parse_response(response)
44
+ rescue Faraday::Error => e
45
+ handle_error(e)
46
+ end
47
+
48
+ def supports_streaming?
49
+ true
50
+ end
51
+
52
+ def streaming_chat(messages, **params, &)
53
+ request_params = build_streaming_params(messages, params)
54
+ execute_streaming_request(request_params, &)
55
+ rescue Faraday::Error => e
56
+ handle_error(e)
57
+ end
58
+
59
+ def build_streaming_params(messages, params)
60
+ request_params = build_request_params(messages, params)
61
+ request_params[:stream] = true
62
+ request_params
63
+ end
64
+
65
+ def execute_streaming_request(request_params, &)
66
+ connection.post('/v1/messages') do |req|
67
+ req.headers['x-api-key'] = api_key
68
+ req.headers['anthropic-version'] = options[:anthropic_version]
69
+ req.body = request_params
70
+ req.options.on_data = proc do |chunk, _overall_received_bytes|
71
+ process_stream_chunk(chunk, &)
72
+ end
73
+ end
74
+ end
75
+
76
+ private
77
+
78
+ def build_request_params(messages, params)
79
+ formatted_messages, system_prompt = extract_system_prompt(messages)
80
+ request = build_base_request(formatted_messages, params)
81
+ request[:system] = system_prompt if system_prompt
82
+ request
83
+ end
84
+
85
+ def build_base_request(formatted_messages, params)
86
+ {
87
+ model: model,
88
+ messages: formatted_messages,
89
+ temperature: params[:temperature] || options[:temperature],
90
+ top_p: params[:top_p] || options[:top_p],
91
+ top_k: params[:top_k] || options[:top_k],
92
+ max_tokens: params[:max_tokens] || options[:max_tokens]
93
+ }
94
+ end
95
+
96
+ def extract_system_prompt(messages)
97
+ system_message = messages.find { |m| m[:role] == 'system' }
98
+ other_messages = messages.reject { |m| m[:role] == 'system' }
99
+
100
+ formatted_messages = other_messages.map do |message|
101
+ {
102
+ role: map_role(message[:role]),
103
+ content: message[:content]
104
+ }
105
+ end
106
+
107
+ [formatted_messages, system_message&.dig(:content)]
108
+ end
109
+
110
+ def map_role(role)
111
+ case role.to_s
112
+ when 'user'
113
+ 'user'
114
+ when 'assistant'
115
+ 'assistant'
116
+ else
117
+ role.to_s
118
+ end
119
+ end
120
+
121
+ # Response parsing methods are in ResponseParser module
122
+ end
123
+ end
124
+ end
@@ -0,0 +1,114 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Soka
4
+ module LLMs
5
+ # Result structure for LLM responses
6
+ Result = Struct.new(:model, :content, :input_tokens, :output_tokens, :finish_reason, :raw_response,
7
+ keyword_init: true) do
8
+ def successful?
9
+ !content.nil? && !content.empty?
10
+ end
11
+ end
12
+
13
+ # Base class for LLM providers
14
+ class Base
15
+ attr_reader :model, :api_key, :options
16
+
17
+ def initialize(model: nil, api_key: nil, **options)
18
+ @model = model || default_model
19
+ @api_key = api_key || api_key_from_env
20
+ @options = default_options.merge(options)
21
+ validate_configuration!
22
+ end
23
+
24
+ def chat(messages, **params)
25
+ raise NotImplementedError, "#{self.class} must implement #chat method"
26
+ end
27
+
28
+ def streaming_chat(messages, **params, &)
29
+ raise NotImplementedError, "#{self.class} does not support streaming"
30
+ end
31
+
32
+ def supports_streaming?
33
+ false
34
+ end
35
+
36
+ private
37
+
38
+ def default_model
39
+ raise NotImplementedError, "#{self.class} must implement #default_model method"
40
+ end
41
+
42
+ def default_options
43
+ {}
44
+ end
45
+
46
+ def validate_configuration!
47
+ raise LLMError, 'API key is required' if api_key.nil? || api_key.empty?
48
+ raise LLMError, 'Model is required' if model.nil? || model.empty?
49
+ end
50
+
51
+ def api_key_from_env
52
+ ENV.fetch(self.class::ENV_KEY, nil)
53
+ end
54
+
55
+ def connection
56
+ @connection ||= Faraday.new(url: base_url) do |faraday|
57
+ faraday.request :json
58
+ faraday.response :json
59
+ faraday.adapter Faraday.default_adapter
60
+ faraday.options.timeout = options[:timeout] || 30
61
+ faraday.options.open_timeout = options[:open_timeout] || 10
62
+ end
63
+ end
64
+
65
+ def base_url
66
+ raise NotImplementedError, "#{self.class} must implement #base_url method"
67
+ end
68
+
69
+ def handle_error(error)
70
+ case error
71
+ when Faraday::TimeoutError
72
+ raise LLMError, 'Request timed out'
73
+ when Faraday::ConnectionFailed
74
+ raise LLMError, "Connection failed: #{error.message}"
75
+ when Faraday::ClientError
76
+ handle_client_error(error)
77
+ else
78
+ raise LLMError, "Unexpected error: #{error.message}"
79
+ end
80
+ end
81
+
82
+ def handle_client_error(error)
83
+ status = error.response[:status]
84
+ body = error.response[:body]
85
+ raise_error_for_status(status, body)
86
+ end
87
+
88
+ def raise_error_for_status(status, body)
89
+ error_message = build_error_message(status, body)
90
+ raise LLMError, error_message
91
+ end
92
+
93
+ def build_error_message(status, body)
94
+ case status
95
+ when 401 then 'Unauthorized: Invalid API key'
96
+ when 429 then 'Rate limit exceeded'
97
+ when 400..499 then extract_error_message(body) || "Client error: #{status}"
98
+ when 500..599 then "Server error: #{status}"
99
+ else "HTTP error: #{status}"
100
+ end
101
+ end
102
+
103
+ def extract_error_message(body)
104
+ return body if body.is_a?(String)
105
+
106
+ # Try common error message paths
107
+ body.dig('error', 'message') ||
108
+ body.dig('error', 'text') ||
109
+ body['message'] ||
110
+ body.to_s
111
+ end
112
+ end
113
+ end
114
+ end
@@ -0,0 +1,47 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Soka
4
+ module LLMs
5
+ module Concerns
6
+ # Module for parsing Anthropic API responses
7
+ module ResponseParser
8
+ private
9
+
10
+ # Parse API response
11
+ # @param response [Faraday::Response] The HTTP response
12
+ # @return [String] The parsed content
13
+ # @raise [LLMError] If response indicates an error
14
+ def parse_response(response)
15
+ handle_error(response) unless response.success?
16
+
17
+ data = JSON.parse(response.body)
18
+ extract_content(data)
19
+ end
20
+
21
+ # Extract content from response data
22
+ # @param data [Hash] The parsed response data
23
+ # @return [String] The extracted content
24
+ # @raise [LLMError] If content is missing
25
+ def extract_content(data)
26
+ content = data.dig('content', 0, 'text')
27
+ raise LLMError, 'No content in response' unless content
28
+
29
+ content
30
+ end
31
+
32
+ # Handle API errors
33
+ # @param response [Faraday::Response] The HTTP response
34
+ # @raise [LLMError] Always raises with error details
35
+ def handle_error(response)
36
+ error_data = begin
37
+ JSON.parse(response.body)
38
+ rescue StandardError
39
+ {}
40
+ end
41
+ error_message = error_data.dig('error', 'message') || "HTTP #{response.status}"
42
+ raise LLMError, "Anthropic API error: #{error_message}"
43
+ end
44
+ end
45
+ end
46
+ end
47
+ end
@@ -0,0 +1,78 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Soka
4
+ module LLMs
5
+ module Concerns
6
+ # Module for handling streaming responses from OpenAI
7
+ module StreamingHandler
8
+ private
9
+
10
+ # Stream chat completion
11
+ # @param messages [Array<Hash>] The messages to send
12
+ # @param params [Hash] Additional parameters
13
+ # @yield [String] Yields each chunk of the response
14
+ # @return [String] The complete response
15
+ def stream_chat(messages, **params, &)
16
+ return regular_chat(messages, **params) unless block_given?
17
+
18
+ complete_response = +''
19
+ request_params = build_request_params(messages, **params, stream: true)
20
+
21
+ response = connection.post('/v1/chat/completions') do |req|
22
+ req.body = request_params.to_json
23
+ end
24
+
25
+ handle_streaming_response(response, complete_response, &)
26
+ end
27
+
28
+ # Handle streaming response
29
+ # @param response [Faraday::Response] The HTTP response
30
+ # @param complete_response [String] Buffer for complete response
31
+ # @yield [String] Yields each chunk
32
+ # @return [String] The complete response
33
+ def handle_streaming_response(response, complete_response)
34
+ response.body.each_line do |line|
35
+ chunk = process_streaming_line(line)
36
+ next unless chunk
37
+
38
+ complete_response << chunk
39
+ yield chunk
40
+ end
41
+ complete_response
42
+ end
43
+
44
+ # Process a single streaming line
45
+ # @param line [String] The line to process
46
+ # @return [String, nil] The parsed chunk or nil
47
+ def process_streaming_line(line)
48
+ return nil if line.strip.empty? || !line.start_with?('data: ')
49
+
50
+ data = line[6..].strip
51
+ return nil if data == '[DONE]'
52
+
53
+ parse_streaming_chunk(data)
54
+ end
55
+
56
+ # Parse a streaming chunk
57
+ # @param data [String] The chunk data
58
+ # @return [String, nil] The parsed content
59
+ def parse_streaming_chunk(data)
60
+ parsed = JSON.parse(data)
61
+ parsed.dig('choices', 0, 'delta', 'content')
62
+ rescue JSON::ParserError
63
+ nil
64
+ end
65
+
66
+ # Perform regular (non-streaming) chat
67
+ # @param messages [Array<Hash>] The messages
68
+ # @param params [Hash] Additional parameters
69
+ # @return [String] The response content
70
+ def regular_chat(messages, **params)
71
+ request_params = build_request_params(messages, **params)
72
+ response = connection.post('/v1/chat/completions', request_params.to_json)
73
+ parse_response(response)
74
+ end
75
+ end
76
+ end
77
+ end
78
+ end
@@ -0,0 +1,106 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Soka
4
+ module LLMs
5
+ # Google Gemini LLM provider implementation
6
+ class Gemini < Base
7
+ ENV_KEY = 'GEMINI_API_KEY'
8
+
9
+ private
10
+
11
+ def default_model
12
+ 'gemini-2.5-flash-lite'
13
+ end
14
+
15
+ def base_url
16
+ 'https://generativelanguage.googleapis.com'
17
+ end
18
+
19
+ def default_options
20
+ {
21
+ temperature: 0.7,
22
+ top_p: 1.0,
23
+ top_k: 1,
24
+ max_output_tokens: 2048
25
+ }
26
+ end
27
+
28
+ public
29
+
30
+ def chat(messages, **params)
31
+ request_params = build_request_params(messages, params)
32
+
33
+ response = connection.post do |req|
34
+ req.url "/v1beta/models/#{model}:generateContent"
35
+ req.params['key'] = api_key
36
+ req.body = request_params
37
+ end
38
+
39
+ parse_response(response)
40
+ rescue Faraday::Error => e
41
+ handle_error(e)
42
+ end
43
+
44
+ private
45
+
46
+ def build_request_params(messages, params)
47
+ {
48
+ contents: format_messages(messages),
49
+ generationConfig: {
50
+ temperature: params[:temperature] || options[:temperature],
51
+ topP: params[:top_p] || options[:top_p],
52
+ topK: params[:top_k] || options[:top_k],
53
+ maxOutputTokens: params[:max_output_tokens] || options[:max_output_tokens]
54
+ }
55
+ }
56
+ end
57
+
58
+ def format_messages(messages)
59
+ messages.map do |message|
60
+ {
61
+ role: map_role(message[:role]),
62
+ parts: [{ text: message[:content] }]
63
+ }
64
+ end
65
+ end
66
+
67
+ def map_role(role)
68
+ case role.to_s
69
+ when 'system', 'assistant'
70
+ 'model'
71
+ when 'user'
72
+ 'user'
73
+ else
74
+ role.to_s
75
+ end
76
+ end
77
+
78
+ def parse_response(response)
79
+ body = response.body
80
+ validate_response_status(response.status, body)
81
+ build_result_from_response(body)
82
+ end
83
+
84
+ def validate_response_status(status, body)
85
+ return if status == 200
86
+
87
+ error_message = body.dig('error', 'message') || 'Unknown error'
88
+ raise LLMError, "Gemini API error: #{error_message}"
89
+ end
90
+
91
+ def build_result_from_response(body)
92
+ candidate = body.dig('candidates', 0)
93
+ content = candidate.dig('content', 'parts', 0, 'text')
94
+
95
+ Result.new(
96
+ model: model,
97
+ content: content,
98
+ input_tokens: body.dig('usageMetadata', 'promptTokenCount'),
99
+ output_tokens: body.dig('usageMetadata', 'candidatesTokenCount'),
100
+ finish_reason: candidate['finishReason'],
101
+ raw_response: body
102
+ )
103
+ end
104
+ end
105
+ end
106
+ end
@@ -0,0 +1,97 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Soka
4
+ module LLMs
5
+ # OpenAI (GPT) LLM provider implementation
6
+ class OpenAI < Base
7
+ include Concerns::StreamingHandler
8
+
9
+ ENV_KEY = 'OPENAI_API_KEY'
10
+
11
+ private
12
+
13
+ def default_model
14
+ 'gpt-4.1-mini'
15
+ end
16
+
17
+ def base_url
18
+ 'https://api.openai.com'
19
+ end
20
+
21
+ def default_options
22
+ {
23
+ temperature: 0.7,
24
+ top_p: 1.0,
25
+ frequency_penalty: 0,
26
+ presence_penalty: 0,
27
+ max_tokens: 2048
28
+ }
29
+ end
30
+
31
+ public
32
+
33
+ def chat(messages, **params)
34
+ request_params = build_request_params(messages, params)
35
+
36
+ response = connection.post do |req|
37
+ req.url '/v1/chat/completions'
38
+ req.headers['Authorization'] = "Bearer #{api_key}"
39
+ req.body = request_params
40
+ end
41
+
42
+ parse_response(response)
43
+ rescue Faraday::Error => e
44
+ handle_error(e)
45
+ end
46
+
47
+ def supports_streaming?
48
+ true
49
+ end
50
+
51
+ # Streaming methods are in StreamingHandler module
52
+
53
+ private
54
+
55
+ def build_request_params(messages, params)
56
+ {
57
+ model: model,
58
+ messages: messages,
59
+ temperature: params[:temperature] || options[:temperature],
60
+ top_p: params[:top_p] || options[:top_p],
61
+ frequency_penalty: params[:frequency_penalty] || options[:frequency_penalty],
62
+ presence_penalty: params[:presence_penalty] || options[:presence_penalty],
63
+ max_tokens: params[:max_tokens] || options[:max_tokens]
64
+ }
65
+ end
66
+
67
+ def parse_response(response)
68
+ body = response.body
69
+ validate_response_status(response.status, body)
70
+ build_result_from_response(body)
71
+ end
72
+
73
+ def validate_response_status(status, body)
74
+ return if status == 200
75
+
76
+ error_message = body.dig('error', 'message') || 'Unknown error'
77
+ raise LLMError, "OpenAI API error: #{error_message}"
78
+ end
79
+
80
+ def build_result_from_response(body)
81
+ choice = body.dig('choices', 0)
82
+ message = choice['message']
83
+
84
+ Result.new(
85
+ model: body['model'],
86
+ content: message['content'],
87
+ input_tokens: body.dig('usage', 'prompt_tokens'),
88
+ output_tokens: body.dig('usage', 'completion_tokens'),
89
+ finish_reason: choice['finish_reason'],
90
+ raw_response: body
91
+ )
92
+ end
93
+
94
+ # Stream processing methods are in StreamingHandler module
95
+ end
96
+ end
97
+ end
@@ -0,0 +1,83 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Soka
4
+ # Manages conversation history for agents
5
+ class Memory
6
+ attr_reader :messages
7
+
8
+ def initialize(initial_messages = [])
9
+ @messages = []
10
+
11
+ # Add initial messages if provided
12
+ return unless initial_messages.is_a?(Array)
13
+
14
+ initial_messages.each { |msg| add(**msg) }
15
+ end
16
+
17
+ def add(role:, content:)
18
+ validate_role!(role)
19
+ validate_content!(content)
20
+
21
+ @messages << {
22
+ role: role.to_s,
23
+ content: content,
24
+ timestamp: Time.now
25
+ }
26
+ end
27
+
28
+ def to_messages
29
+ @messages.map { |msg| { role: msg[:role], content: msg[:content] } }
30
+ end
31
+
32
+ def clear
33
+ @messages.clear
34
+ end
35
+
36
+ def size
37
+ @messages.size
38
+ end
39
+
40
+ def empty?
41
+ @messages.empty?
42
+ end
43
+
44
+ def last
45
+ @messages.last
46
+ end
47
+
48
+ def to_s
49
+ return '<Soka::Memory> []' if empty?
50
+
51
+ formatted_messages = @messages.map do |msg|
52
+ " { role: '#{msg[:role]}', content: '#{truncate(msg[:content])}' }"
53
+ end.join(",\n")
54
+
55
+ "<Soka::Memory> [\n#{formatted_messages}\n]"
56
+ end
57
+
58
+ def inspect
59
+ to_s
60
+ end
61
+
62
+ private
63
+
64
+ def validate_role!(role)
65
+ valid_roles = %w[system user assistant]
66
+ return if valid_roles.include?(role.to_s)
67
+
68
+ raise MemoryError, "Invalid role: #{role}. Must be one of: #{valid_roles.join(', ')}"
69
+ end
70
+
71
+ def validate_content!(content)
72
+ return unless content.nil? || content.to_s.strip.empty?
73
+
74
+ raise MemoryError, 'Content cannot be nil or empty'
75
+ end
76
+
77
+ def truncate(text, length = 50)
78
+ return text if text.length <= length
79
+
80
+ "#{text[0..length]}..."
81
+ end
82
+ end
83
+ end