raif 1.3.0 → 1.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (130) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +6 -5
  3. data/app/assets/builds/raif.css +4 -1
  4. data/app/assets/builds/raif_admin.css +13 -1
  5. data/app/assets/javascript/raif/controllers/conversations_controller.js +1 -1
  6. data/app/assets/stylesheets/raif/admin/conversation.scss +16 -0
  7. data/app/assets/stylesheets/raif/conversations.scss +3 -0
  8. data/app/assets/stylesheets/raif.scss +2 -1
  9. data/app/controllers/raif/admin/application_controller.rb +16 -0
  10. data/app/controllers/raif/admin/configs_controller.rb +94 -0
  11. data/app/controllers/raif/admin/model_completions_controller.rb +18 -1
  12. data/app/controllers/raif/admin/model_tool_invocations_controller.rb +7 -1
  13. data/app/controllers/raif/admin/stats/model_tool_invocations_controller.rb +21 -0
  14. data/app/controllers/raif/admin/stats/tasks_controller.rb +15 -6
  15. data/app/controllers/raif/admin/stats_controller.rb +32 -3
  16. data/app/controllers/raif/conversation_entries_controller.rb +1 -0
  17. data/app/controllers/raif/conversations_controller.rb +10 -2
  18. data/app/jobs/raif/conversation_entry_job.rb +8 -6
  19. data/app/models/raif/admin/task_stat.rb +7 -0
  20. data/app/models/raif/agent.rb +63 -2
  21. data/app/models/raif/agents/native_tool_calling_agent.rb +101 -56
  22. data/app/models/raif/application_record.rb +18 -0
  23. data/app/models/raif/concerns/agent_inference_stats.rb +35 -0
  24. data/app/models/raif/concerns/json_schema_definition.rb +40 -5
  25. data/app/models/raif/concerns/llms/anthropic/message_formatting.rb +28 -0
  26. data/app/models/raif/concerns/llms/anthropic/response_tool_calls.rb +24 -0
  27. data/app/models/raif/concerns/llms/anthropic/tool_formatting.rb +4 -0
  28. data/app/models/raif/concerns/llms/bedrock/message_formatting.rb +36 -0
  29. data/app/models/raif/concerns/llms/bedrock/response_tool_calls.rb +26 -0
  30. data/app/models/raif/concerns/llms/bedrock/tool_formatting.rb +4 -0
  31. data/app/models/raif/concerns/llms/google/message_formatting.rb +109 -0
  32. data/app/models/raif/concerns/llms/google/response_tool_calls.rb +32 -0
  33. data/app/models/raif/concerns/llms/google/tool_formatting.rb +72 -0
  34. data/app/models/raif/concerns/llms/message_formatting.rb +11 -5
  35. data/app/models/raif/concerns/llms/open_ai/json_schema_validation.rb +3 -3
  36. data/app/models/raif/concerns/llms/open_ai_completions/message_formatting.rb +22 -0
  37. data/app/models/raif/concerns/llms/open_ai_completions/response_tool_calls.rb +22 -0
  38. data/app/models/raif/concerns/llms/open_ai_completions/tool_formatting.rb +4 -0
  39. data/app/models/raif/concerns/llms/open_ai_responses/message_formatting.rb +17 -0
  40. data/app/models/raif/concerns/llms/open_ai_responses/response_tool_calls.rb +26 -0
  41. data/app/models/raif/concerns/llms/open_ai_responses/tool_formatting.rb +4 -0
  42. data/app/models/raif/concerns/run_with.rb +127 -0
  43. data/app/models/raif/conversation.rb +91 -8
  44. data/app/models/raif/conversation_entry.rb +32 -1
  45. data/app/models/raif/embedding_model.rb +2 -1
  46. data/app/models/raif/embedding_models/open_ai.rb +1 -1
  47. data/app/models/raif/llm.rb +27 -2
  48. data/app/models/raif/llms/anthropic.rb +7 -19
  49. data/app/models/raif/llms/bedrock.rb +6 -20
  50. data/app/models/raif/llms/google.rb +140 -0
  51. data/app/models/raif/llms/open_ai_base.rb +19 -5
  52. data/app/models/raif/llms/open_ai_completions.rb +6 -11
  53. data/app/models/raif/llms/open_ai_responses.rb +6 -16
  54. data/app/models/raif/llms/open_router.rb +7 -13
  55. data/app/models/raif/model_completion.rb +61 -0
  56. data/app/models/raif/model_tool.rb +10 -2
  57. data/app/models/raif/model_tool_invocation.rb +38 -6
  58. data/app/models/raif/model_tools/agent_final_answer.rb +2 -7
  59. data/app/models/raif/model_tools/provider_managed/code_execution.rb +4 -0
  60. data/app/models/raif/model_tools/provider_managed/image_generation.rb +4 -0
  61. data/app/models/raif/model_tools/provider_managed/web_search.rb +4 -0
  62. data/app/models/raif/streaming_responses/google.rb +71 -0
  63. data/app/models/raif/task.rb +55 -12
  64. data/app/models/raif/user_tool_invocation.rb +19 -0
  65. data/app/views/layouts/raif/admin.html.erb +12 -1
  66. data/app/views/raif/admin/agents/_agent.html.erb +8 -0
  67. data/app/views/raif/admin/agents/_conversation_message.html.erb +28 -6
  68. data/app/views/raif/admin/agents/index.html.erb +2 -0
  69. data/app/views/raif/admin/agents/show.html.erb +46 -1
  70. data/app/views/raif/admin/configs/show.html.erb +117 -0
  71. data/app/views/raif/admin/conversations/_conversation_entry.html.erb +29 -34
  72. data/app/views/raif/admin/conversations/show.html.erb +2 -0
  73. data/app/views/raif/admin/model_completions/_model_completion.html.erb +9 -0
  74. data/app/views/raif/admin/model_completions/index.html.erb +26 -0
  75. data/app/views/raif/admin/model_completions/show.html.erb +124 -61
  76. data/app/views/raif/admin/model_tool_invocations/index.html.erb +22 -1
  77. data/app/views/raif/admin/model_tools/_list.html.erb +16 -0
  78. data/app/views/raif/admin/model_tools/_model_tool.html.erb +36 -0
  79. data/app/views/raif/admin/stats/_stats_tile.html.erb +34 -0
  80. data/app/views/raif/admin/stats/index.html.erb +71 -88
  81. data/app/views/raif/admin/stats/model_tool_invocations/index.html.erb +43 -0
  82. data/app/views/raif/admin/stats/tasks/index.html.erb +20 -6
  83. data/app/views/raif/admin/tasks/index.html.erb +6 -1
  84. data/app/views/raif/admin/tasks/show.html.erb +36 -3
  85. data/app/views/raif/conversation_entries/_form.html.erb +3 -0
  86. data/app/views/raif/conversations/_conversation.html.erb +10 -0
  87. data/app/views/raif/conversations/_entry_processed.turbo_stream.erb +12 -0
  88. data/app/views/raif/conversations/index.html.erb +23 -0
  89. data/config/locales/admin.en.yml +33 -1
  90. data/config/locales/en.yml +33 -4
  91. data/config/routes.rb +2 -0
  92. data/db/migrate/20250904194456_add_generating_entry_response_to_raif_conversations.rb +7 -0
  93. data/db/migrate/20250911125234_add_source_to_raif_tasks.rb +7 -0
  94. data/db/migrate/20251020005853_add_source_to_raif_agents.rb +7 -0
  95. data/db/migrate/20251020011346_rename_task_run_args_to_run_with.rb +7 -0
  96. data/db/migrate/20251020011405_add_run_with_to_raif_agents.rb +13 -0
  97. data/db/migrate/20251024160119_add_llm_messages_max_length_to_raif_conversations.rb +14 -0
  98. data/db/migrate/20251124185033_add_provider_tool_call_id_to_raif_model_tool_invocations.rb +7 -0
  99. data/db/migrate/20251128202941_add_tool_choice_to_raif_model_completions.rb +7 -0
  100. data/db/migrate/20260118144846_add_source_to_raif_conversations.rb +7 -0
  101. data/db/migrate/20260119000000_add_failure_tracking_to_raif_model_completions.rb +10 -0
  102. data/db/migrate/20260119000001_add_completed_at_to_raif_model_completions.rb +8 -0
  103. data/db/migrate/20260119000002_add_started_at_to_raif_model_completions.rb +8 -0
  104. data/lib/generators/raif/agent/templates/agent.rb.tt +1 -1
  105. data/lib/generators/raif/agent/templates/application_agent.rb.tt +1 -1
  106. data/lib/generators/raif/conversation/templates/conversation.rb.tt +6 -0
  107. data/lib/generators/raif/install/templates/initializer.rb +78 -10
  108. data/lib/generators/raif/task/templates/task.rb.tt +1 -1
  109. data/lib/raif/configuration.rb +37 -2
  110. data/lib/raif/engine.rb +8 -0
  111. data/lib/raif/errors/instance_dependent_schema_error.rb +8 -0
  112. data/lib/raif/errors/streaming_error.rb +6 -3
  113. data/lib/raif/errors.rb +1 -0
  114. data/lib/raif/evals/llm_judge.rb +2 -2
  115. data/lib/raif/evals/llm_judges/binary.rb +3 -3
  116. data/lib/raif/evals/llm_judges/comparative.rb +3 -3
  117. data/lib/raif/evals/llm_judges/scored.rb +1 -1
  118. data/lib/raif/evals/llm_judges/summarization.rb +2 -2
  119. data/lib/raif/evals/run.rb +1 -0
  120. data/lib/raif/json_schema_builder.rb +14 -0
  121. data/lib/raif/llm_registry.rb +207 -37
  122. data/lib/raif/messages.rb +180 -0
  123. data/lib/raif/version.rb +1 -1
  124. data/lib/raif.rb +9 -0
  125. data/lib/tasks/annotate_rb.rake +10 -0
  126. data/spec/support/rspec_helpers.rb +8 -8
  127. metadata +44 -9
  128. data/app/models/raif/agents/re_act_agent.rb +0 -127
  129. data/app/models/raif/agents/re_act_step.rb +0 -32
  130. data/app/models/raif/concerns/task_run_args.rb +0 -62
@@ -10,6 +10,22 @@ Raif.configure do |config|
10
10
  # Whether OpenAI embedding models are enabled.
11
11
  # config.open_ai_embedding_models_enabled = ENV["OPENAI_API_KEY"].present?
12
12
 
13
+ # The base URL for OpenAI API requests.
14
+ # Set this if you want to use the OpenAI adapter with a different provider (e.g. for using Azure instead of OpenAI)
15
+ # config.open_ai_base_url = "https://api.openai.com/v1"
16
+
17
+ # The base URL for OpenAI embedding API requests.
18
+ # Set this if you want to use a different provider for embeddings (e.g. Ollama, vLLM, or other OpenAI-compatible APIs)
19
+ # config.open_ai_embedding_base_url = "https://api.openai.com/v1"
20
+
21
+ # When set, this will be included as an api-version parameter in any OpenAI API requests (e.g. for using Azure instead of OpenAI)
22
+ # config.open_ai_api_version = nil
23
+
24
+ # The authentication header style for OpenAI API requests. Defaults to :bearer
25
+ # Use :bearer for standard OpenAI API (Authorization: Bearer <token>)
26
+ # Use :api_key for Azure OpenAI API (api-key: <token>)
27
+ # config.open_ai_auth_header_style = :bearer
28
+
13
29
  # Your Anthropic API key. Defaults to ENV["ANTHROPIC_API_KEY"]
14
30
  # config.anthropic_api_key = ENV["ANTHROPIC_API_KEY"]
15
31
 
@@ -40,24 +56,56 @@ Raif.configure do |config|
40
56
  # The site URL to include in OpenRouter API requests headers. Optional.
41
57
  # config.open_router_site_url = "https://myapp.com"
42
58
 
59
+ # Your Google AI API key. Defaults to ENV["GOOGLE_AI_API_KEY"].presence || ENV["GOOGLE_API_KEY"]
60
+ # config.google_api_key = ENV["GOOGLE_AI_API_KEY"].presence || ENV["GOOGLE_API_KEY"]
61
+
62
+ # Whether Google models are enabled.
63
+ # config.google_models_enabled = ENV["GOOGLE_API_KEY"].present?
64
+
43
65
  # The default LLM model to use. Defaults to "open_ai_gpt_4o"
44
66
  # Available keys:
45
- # open_ai_gpt_4_1
46
- # open_ai_gpt_4_1_mini
47
- # open_ai_gpt_4_1_nano
48
67
  # open_ai_gpt_4o_mini
49
68
  # open_ai_gpt_4o
50
69
  # open_ai_gpt_3_5_turbo
70
+ # open_ai_gpt_4_1
71
+ # open_ai_gpt_4_1_mini
72
+ # open_ai_gpt_4_1_nano
73
+ # open_ai_o1
74
+ # open_ai_o1_mini
75
+ # open_ai_o3
76
+ # open_ai_o3_mini
77
+ # open_ai_o4_mini
78
+ # open_ai_gpt_5
79
+ # open_ai_gpt_5_mini
80
+ # open_ai_gpt_5_nano
81
+ # open_ai_responses_gpt_4o_mini
82
+ # open_ai_responses_gpt_4o
83
+ # open_ai_responses_gpt_3_5_turbo
51
84
  # open_ai_responses_gpt_4_1
52
85
  # open_ai_responses_gpt_4_1_mini
53
86
  # open_ai_responses_gpt_4_1_nano
54
- # open_ai_responses_gpt_4o_mini
55
- # open_ai_responses_gpt_4o
56
- # open_ai_gpt_3_5_turbo
87
+ # open_ai_responses_o1
88
+ # open_ai_responses_o1_mini
89
+ # open_ai_responses_o3
90
+ # open_ai_responses_o3_mini
91
+ # open_ai_responses_o4_mini
92
+ # open_ai_responses_gpt_5
93
+ # open_ai_responses_gpt_5_mini
94
+ # open_ai_responses_gpt_5_nano
95
+ # open_ai_responses_o1_pro
96
+ # open_ai_responses_o3_pro
97
+ # anthropic_claude_4_sonnet
98
+ # anthropic_claude_4_5_sonnet
99
+ # anthropic_claude_4_opus
100
+ # anthropic_claude_4_1_opus
57
101
  # anthropic_claude_3_7_sonnet
58
102
  # anthropic_claude_3_5_sonnet
59
103
  # anthropic_claude_3_5_haiku
60
104
  # anthropic_claude_3_opus
105
+ # bedrock_claude_4_sonnet
106
+ # bedrock_claude_4_5_sonnet
107
+ # bedrock_claude_4_opus
108
+ # bedrock_claude_4_1_opus
61
109
  # bedrock_claude_3_5_sonnet
62
110
  # bedrock_claude_3_7_sonnet
63
111
  # bedrock_claude_3_5_haiku
@@ -66,10 +114,21 @@ Raif.configure do |config|
66
114
  # bedrock_amazon_nova_lite
67
115
  # bedrock_amazon_nova_pro
68
116
  # open_router_claude_3_7_sonnet
69
- # open_router_llama_3_3_70b_instruct
70
- # open_router_llama_3_1_8b_instruct
71
- # open_router_gemini_2_0_flash
72
117
  # open_router_deepseek_chat_v3
118
+ # open_router_deepseek_v3_1
119
+ # open_router_gemini_2_0_flash
120
+ # open_router_gemini_2_5_pro
121
+ # open_router_grok_4
122
+ # open_router_llama_3_1_8b_instruct
123
+ # open_router_llama_3_3_70b_instruct
124
+ # open_router_llama_4_maverick
125
+ # open_router_llama_4_scout
126
+ # open_router_open_ai_gpt_oss_120b
127
+ # open_router_open_ai_gpt_oss_20b
128
+ # google_gemini_2_5_pro
129
+ # google_gemini_2_5_flash
130
+ # google_gemini_3_0_pro
131
+ # google_gemini_3_0_flash
73
132
  #
74
133
  # config.default_llm_model_key = "open_ai_gpt_4o"
75
134
 
@@ -118,10 +177,14 @@ Raif.configure do |config|
118
177
  # If you want to use a custom controller that inherits from Raif::ConversationEntriesController, you can set it here.
119
178
  # config.conversation_entries_controller = "Raif::ConversationEntriesController"
120
179
 
180
+ # The default maximum number of conversation entries to include in LLM messages. Defaults to 50.
181
+ # Set to nil to include all entries. Each conversation can override this with its own llm_messages_max_length attribute.
182
+ # config.conversation_llm_messages_max_length_default = 50
183
+
121
184
  # The method to call to get the current user. Defaults to :current_user
122
185
  # config.current_user_method = :current_user
123
186
 
124
- # The agent types that are available. Defaults to Set.new(["Raif::Agents::ReActAgent", "Raif::Agents::NativeToolCallingAgent"])
187
+ # The agent types that are available. Defaults to Set.new(["Raif::Agents::NativeToolCallingAgent"])
125
188
  # If you want to use custom agent types that inherits from Raif::Agent, you can add them here.
126
189
  # config.agent_types += ["MyAgent"]
127
190
 
@@ -138,6 +201,11 @@ Raif.configure do |config|
138
201
  # Use this to globally disable requests to LLM APIs.
139
202
  # config.llm_api_requests_enabled = true
140
203
 
204
+ # Timeout settings for LLM API requests (in seconds). All default to nil (use Faraday defaults).
205
+ # config.request_open_timeout = nil # Time to wait for a connection to be opened
206
+ # config.request_read_timeout = nil # Time to wait for data to be read
207
+ # config.request_write_timeout = nil # Time to wait for data to be written
208
+
141
209
  # The default LLM model to use for LLM-as-judge evaluations.
142
210
  # If not set, falls back to the default_llm_model_key.
143
211
  # config.evals_default_llm_judge_model_key = ENV["RAIF_EVALS_DEFAULT_LLM_JUDGE_MODEL_KEY"].presence
@@ -17,7 +17,7 @@
17
17
  # Define any attributes that are needed for the task.
18
18
  # You can then pass them when running the task and they will be available in build_prompt:
19
19
  # Raif::Tasks::<%= class_name %>.run(your_attribute: "some value")
20
- # task_run_arg :your_attribute
20
+ # run_with :your_attribute
21
21
  <%- if options[:response_format] == "json" -%>
22
22
 
23
23
  # Define a JSON schema that the model's response should adhere to
@@ -12,6 +12,7 @@ module Raif
12
12
  :aws_bedrock_region,
13
13
  :bedrock_embedding_models_enabled,
14
14
  :conversation_entries_controller,
15
+ :conversation_llm_messages_max_length_default,
15
16
  :conversation_system_prompt_intro,
16
17
  :conversation_types,
17
18
  :conversations_controller,
@@ -20,17 +21,26 @@ module Raif
20
21
  :default_llm_model_key,
21
22
  :evals_default_llm_judge_model_key,
22
23
  :evals_verbose_output,
24
+ :google_api_key,
25
+ :google_models_enabled,
23
26
  :llm_api_requests_enabled,
24
27
  :llm_request_max_retries,
25
28
  :llm_request_retriable_exceptions,
26
29
  :model_superclass,
27
30
  :open_ai_api_key,
31
+ :open_ai_api_version,
32
+ :open_ai_auth_header_style,
33
+ :open_ai_base_url,
34
+ :open_ai_embedding_base_url,
28
35
  :open_ai_embedding_models_enabled,
29
36
  :open_ai_models_enabled,
30
37
  :open_router_api_key,
31
38
  :open_router_models_enabled,
32
39
  :open_router_app_name,
33
40
  :open_router_site_url,
41
+ :request_open_timeout,
42
+ :request_read_timeout,
43
+ :request_write_timeout,
34
44
  :streaming_update_chunk_size_threshold,
35
45
  :task_creator_optional,
36
46
  :task_system_prompt_intro,
@@ -43,7 +53,7 @@ module Raif
43
53
  alias_method :aws_bedrock_titan_embedding_models_enabled=, :bedrock_embedding_models_enabled=
44
54
 
45
55
  def initialize
46
- @agent_types = Set.new(["Raif::Agents::ReActAgent", "Raif::Agents::NativeToolCallingAgent"])
56
+ @agent_types = Set.new(["Raif::Agents::NativeToolCallingAgent"])
47
57
  @anthropic_api_key = default_disable_llm_api_requests? ? "placeholder-anthropic-api-key" : ENV["ANTHROPIC_API_KEY"]
48
58
  @bedrock_models_enabled = false
49
59
  @anthropic_models_enabled = ENV["ANTHROPIC_API_KEY"].present?
@@ -54,6 +64,7 @@ module Raif
54
64
  @bedrock_embedding_models_enabled = false
55
65
  @task_system_prompt_intro = "You are a helpful assistant."
56
66
  @conversation_entries_controller = "Raif::ConversationEntriesController"
67
+ @conversation_llm_messages_max_length_default = 50
57
68
  @conversation_system_prompt_intro = "You are a helpful assistant who is collaborating with a teammate."
58
69
  @conversation_types = Set.new(["Raif::Conversation"])
59
70
  @conversations_controller = "Raif::ConversationsController"
@@ -62,15 +73,24 @@ module Raif
62
73
  @default_llm_model_key = default_disable_llm_api_requests? ? :raif_test_llm : (ENV["RAIF_DEFAULT_LLM_MODEL_KEY"].presence || "open_ai_gpt_4o")
63
74
  @evals_default_llm_judge_model_key = ENV["RAIF_EVALS_DEFAULT_LLM_JUDGE_MODEL_KEY"].presence
64
75
  @evals_verbose_output = false
76
+ google_api_key = ENV["GOOGLE_AI_API_KEY"].presence || ENV["GOOGLE_API_KEY"]
77
+ @google_api_key = default_disable_llm_api_requests? ? "placeholder-google-api-key" : google_api_key
78
+ @google_models_enabled = @google_api_key.present?
65
79
  @llm_api_requests_enabled = !default_disable_llm_api_requests?
66
80
  @llm_request_max_retries = 2
67
81
  @llm_request_retriable_exceptions = [
68
82
  Faraday::ConnectionFailed,
69
83
  Faraday::TimeoutError,
70
84
  Faraday::ServerError,
85
+ Net::ReadTimeout,
86
+ Net::OpenTimeout,
71
87
  ]
72
88
  @model_superclass = "ApplicationRecord"
73
89
  @open_ai_api_key = default_disable_llm_api_requests? ? "placeholder-open-ai-api-key" : ENV["OPENAI_API_KEY"]
90
+ @open_ai_api_version = nil
91
+ @open_ai_auth_header_style = :bearer
92
+ @open_ai_base_url = "https://api.openai.com/v1"
93
+ @open_ai_embedding_base_url = "https://api.openai.com/v1"
74
94
  @open_ai_embedding_models_enabled = ENV["OPENAI_API_KEY"].present?
75
95
  @open_ai_models_enabled = ENV["OPENAI_API_KEY"].present?
76
96
  open_router_api_key = ENV["OPEN_ROUTER_API_KEY"].presence || ENV["OPENROUTER_API_KEY"]
@@ -78,6 +98,9 @@ module Raif
78
98
  @open_router_models_enabled = @open_router_api_key.present?
79
99
  @open_router_app_name = nil
80
100
  @open_router_site_url = nil
101
+ @request_open_timeout = nil
102
+ @request_read_timeout = nil
103
+ @request_write_timeout = nil
81
104
  @streaming_update_chunk_size_threshold = 25
82
105
  @task_creator_optional = true
83
106
  @user_tool_types = []
@@ -103,7 +126,9 @@ module Raif
103
126
  "Raif.config.default_llm_model_key was set to #{default_llm_model_key}, but must be one of: #{Raif.available_llm_keys.join(", ")}"
104
127
  end
105
128
 
106
- if Raif.embedding_model_registry.present? && !Raif.available_embedding_model_keys.include?(default_embedding_model_key.to_sym)
129
+ if default_embedding_model_key.present? &&
130
+ Raif.embedding_model_registry.present? &&
131
+ !Raif.available_embedding_model_keys.include?(default_embedding_model_key.to_sym)
107
132
  raise Raif::Errors::InvalidConfigError,
108
133
  "Raif.config.default_embedding_model_key was set to #{default_embedding_model_key}, but must be one of: #{Raif.available_embedding_model_keys.join(", ")}" # rubocop:disable Layout/LineLength
109
134
  end
@@ -127,6 +152,11 @@ module Raif
127
152
  "Raif.config.open_ai_api_key is required when Raif.config.open_ai_models_enabled is true. Set it via Raif.config.open_ai_api_key or ENV[\"OPENAI_API_KEY\"]" # rubocop:disable Layout/LineLength
128
153
  end
129
154
 
155
+ if open_ai_models_enabled && ![:bearer, :api_key].include?(open_ai_auth_header_style)
156
+ raise Raif::Errors::InvalidConfigError,
157
+ "Raif.config.open_ai_auth_header_style must be either :bearer or :api_key"
158
+ end
159
+
130
160
  if open_ai_embedding_models_enabled && open_ai_api_key.blank?
131
161
  raise Raif::Errors::InvalidConfigError,
132
162
  "Raif.config.open_ai_api_key is required when Raif.config.open_ai_embedding_models_enabled is true. Set it via Raif.config.open_ai_api_key or ENV[\"OPENAI_API_KEY\"]" # rubocop:disable Layout/LineLength
@@ -141,6 +171,11 @@ module Raif
141
171
  raise Raif::Errors::InvalidConfigError,
142
172
  "Raif.config.open_router_api_key is required when Raif.config.open_router_models_enabled is true. Set it via Raif.config.open_router_api_key or ENV['OPEN_ROUTER_API_KEY']" # rubocop:disable Layout/LineLength
143
173
  end
174
+
175
+ if google_models_enabled && google_api_key.blank?
176
+ raise Raif::Errors::InvalidConfigError,
177
+ "Raif.config.google_api_key is required when Raif.config.google_models_enabled is true. Set it via Raif.config.google_api_key or ENV['GOOGLE_API_KEY']" # rubocop:disable Layout/LineLength
178
+ end
144
179
  end
145
180
 
146
181
  private
data/lib/raif/engine.rb CHANGED
@@ -72,6 +72,14 @@ module Raif
72
72
  end
73
73
  end
74
74
 
75
+ config.after_initialize do
76
+ next unless Raif.config.google_models_enabled
77
+
78
+ Raif.default_llms[Raif::Llms::Google].each do |llm_config|
79
+ Raif.register_llm(Raif::Llms::Google, **llm_config)
80
+ end
81
+ end
82
+
75
83
  config.after_initialize do
76
84
  next unless Raif.config.bedrock_embedding_models_enabled
77
85
 
@@ -0,0 +1,8 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Raif
4
+ module Errors
5
+ class InstanceDependentSchemaError < StandardError
6
+ end
7
+ end
8
+ end
@@ -3,16 +3,19 @@
3
3
  module Raif
4
4
  module Errors
5
5
  class StreamingError < StandardError
6
- attr_reader :message, :type, :code, :event
6
+ attr_reader :type, :code, :event
7
7
 
8
8
  def initialize(message:, type:, event:, code: nil)
9
- super
9
+ super(message)
10
10
 
11
- @message = message
12
11
  @type = type
13
12
  @code = code
14
13
  @event = event
15
14
  end
15
+
16
+ def to_s
17
+ "[#{type}] #{super} (code=#{code}, event=#{event})"
18
+ end
16
19
  end
17
20
  end
18
21
  end
data/lib/raif/errors.rb CHANGED
@@ -9,3 +9,4 @@ require "raif/errors/invalid_model_image_input_error"
9
9
  require "raif/errors/invalid_model_file_input_error"
10
10
  require "raif/errors/unsupported_feature_error"
11
11
  require "raif/errors/streaming_error"
12
+ require "raif/errors/instance_dependent_schema_error"
@@ -9,8 +9,8 @@ module Raif
9
9
  # Default to JSON response format for structured output
10
10
  llm_response_format :json
11
11
 
12
- task_run_arg :content_to_judge # the content to judge
13
- task_run_arg :additional_context # additional context to be provided to the judge
12
+ run_with :content_to_judge # the content to judge
13
+ run_with :additional_context # additional context to be provided to the judge
14
14
 
15
15
  def default_llm_model_key
16
16
  Raif.config.evals_default_llm_judge_model_key || super
@@ -4,9 +4,9 @@ module Raif
4
4
  module Evals
5
5
  module LlmJudges
6
6
  class Binary < Raif::Evals::LlmJudge
7
- task_run_arg :criteria
8
- task_run_arg :examples
9
- task_run_arg :strict_mode
7
+ run_with :criteria
8
+ run_with :examples
9
+ run_with :strict_mode
10
10
 
11
11
  json_response_schema do
12
12
  boolean :passes, description: "Whether the content passes the criteria"
@@ -4,9 +4,9 @@ module Raif
4
4
  module Evals
5
5
  module LlmJudges
6
6
  class Comparative < Raif::Evals::LlmJudge
7
- task_run_arg :over_content # the content to compare against
8
- task_run_arg :comparison_criteria # the criteria to use when comparing content_to_judge to over_content
9
- task_run_arg :allow_ties # whether to allow ties in the comparison
7
+ run_with :over_content # the content to compare against
8
+ run_with :comparison_criteria # the criteria to use when comparing content_to_judge to over_content
9
+ run_with :allow_ties # whether to allow ties in the comparison
10
10
 
11
11
  attr_accessor :content_a, :content_b, :expected_winner
12
12
 
@@ -4,7 +4,7 @@ module Raif
4
4
  module Evals
5
5
  module LlmJudges
6
6
  class Scored < Raif::Evals::LlmJudge
7
- task_run_arg :scoring_rubric # the scoring rubric to use when evaluating the content
7
+ run_with :scoring_rubric # the scoring rubric to use when evaluating the content
8
8
 
9
9
  json_response_schema do
10
10
  number :score, description: "Numerical score based on the rubric"
@@ -4,8 +4,8 @@ module Raif
4
4
  module Evals
5
5
  module LlmJudges
6
6
  class Summarization < Raif::Evals::LlmJudge
7
- task_run_arg :original_content # the original content to evaluate the summary against
8
- task_run_arg :summary # the summary to evaluate against the original content
7
+ run_with :original_content # the original content to evaluate the summary against
8
+ run_with :summary # the summary to evaluate against the original content
9
9
 
10
10
  json_response_schema do
11
11
  object :coverage do
@@ -33,6 +33,7 @@ module Raif
33
33
  output.puts "\nStarting Raif Eval Run"
34
34
  output.puts ""
35
35
  output.puts "Raif.config.default_llm_model_key: #{Raif.config.default_llm_model_key}"
36
+ output.puts "Raif.config.evals_default_llm_judge_model_key: #{Raif.config.evals_default_llm_judge_model_key}"
36
37
  output.puts ""
37
38
  output.puts "=" * 50
38
39
 
@@ -10,6 +10,20 @@ module Raif
10
10
  @items_schema = nil
11
11
  end
12
12
 
13
+ # Build schema with instance context for instance-dependent schemas
14
+ # The block receives the instance as a parameter and has access to the builder methods
15
+ #
16
+ # @param instance [Object] The instance to use as context
17
+ # @param block [Proc] The block to evaluate with instance context
18
+ # @return [JsonSchemaBuilder] self for chaining
19
+ def build_with_instance(instance, &block)
20
+ # Evaluate the block in the context of the builder, passing the instance as parameter
21
+ # This allows the block to use both builder methods (string, integer, etc.)
22
+ # and access the instance parameter for conditional logic
23
+ instance_exec(instance, &block)
24
+ self
25
+ end
26
+
13
27
  def string(name, options = {})
14
28
  add_property(name, "string", options)
15
29
  end