code_to_query 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,92 @@
1
+ # frozen_string_literal: true
2
+
3
+ # Configuration management
4
+
5
+ require 'singleton'
6
+ require 'logger'
7
+
8
+ module CodeToQuery
9
+ # Centralized configuration with sensible defaults
10
+ class Configuration
11
+ include Singleton
12
+
13
+ attr_accessor :adapter, :readonly_role, :default_limit, :max_limit, :max_joins,
14
+ :block_subqueries, :allow_seq_scans, :max_query_cost, :max_query_rows,
15
+ :query_timeout, :force_readonly_session, :reset_session_after_query,
16
+ :policy_adapter, :context_pack_path, :enable_explain_gate, :provider,
17
+ :openai_api_key, :openai_model, :stub_llm,
18
+ :auto_glossary_with_llm, :max_glossary_suggestions, :count_limit,
19
+ :aggregation_limit, :distinct_limit, :exists_limit,
20
+ :planner_max_attempts, :planner_feedback_mode, :prefer_static_scan,
21
+ :static_scan_dirs, :context_rag_top_k, :require_limit_by_default,
22
+ :explain_fail_open
23
+
24
+ # Extended configuration knobs (added for LLM transport and logging)
25
+ attr_accessor :logger, :llm_api_base, :llm_timeout, :llm_temperature, :provider_options, :system_prompt_template, :llm_client
26
+
27
+ def initialize
28
+ @adapter = :postgres
29
+ @readonly_role = nil
30
+ @default_limit = 100
31
+ @max_limit = 10_000
32
+ @max_joins = 3
33
+ @block_subqueries = false
34
+ @allow_seq_scans = false
35
+ @max_query_cost = 10_000
36
+ @max_query_rows = 100_000
37
+ @query_timeout = 30
38
+ @force_readonly_session = false
39
+ @reset_session_after_query = false
40
+ @policy_adapter = nil
41
+ @context_pack_path = if defined?(Rails)
42
+ Rails.root.join('db/code_to_query/context.json')
43
+ else
44
+ File.join(Dir.pwd, 'db/code_to_query/context.json')
45
+ end
46
+ @enable_explain_gate = false
47
+ @explain_fail_open = true
48
+ @provider = :auto
49
+ @openai_api_key = ENV.fetch('OPENAI_API_KEY', nil)
50
+ @openai_model = 'gpt-4'
51
+ @stub_llm = false
52
+ # LLM-assisted glossary enrichment during bootstrap (on by default for better UX)
53
+ @auto_glossary_with_llm = true
54
+ @max_glossary_suggestions = 200
55
+ # Query type specific limits for flexibility
56
+ @count_limit = nil # No limit for COUNT operations by default
57
+ @aggregation_limit = nil # No limit for SUM/AVG/MAX/MIN operations by default
58
+ @distinct_limit = 10_000 # Higher limit for DISTINCT queries
59
+ @exists_limit = 1 # LIMIT 1 for existence checks
60
+
61
+ # Planner iteration
62
+ @planner_max_attempts = Integer(ENV.fetch('CODE_TO_QUERY_PLANNER_MAX_ATTEMPTS', 2))
63
+ # feedback modes: :none, :schema_strict, :adaptive
64
+ @planner_feedback_mode = ENV.fetch('CODE_TO_QUERY_PLANNER_FEEDBACK_MODE', 'adaptive').to_sym
65
+
66
+ # Logging and LLM provider knobs
67
+ @logger = if defined?(Rails) && Rails.respond_to?(:logger)
68
+ Rails.logger
69
+ else
70
+ Logger.new($stdout).tap { |l| l.level = Logger::WARN }
71
+ end
72
+ @llm_api_base = ENV.fetch('CODE_TO_QUERY_LLM_API_BASE', 'https://api.openai.com/v1')
73
+ @llm_timeout = Integer(ENV.fetch('CODE_TO_QUERY_LLM_TIMEOUT', 30))
74
+ @llm_temperature = Float(ENV.fetch('CODE_TO_QUERY_LLM_TEMPERATURE', 0.1))
75
+ @provider_options = {}
76
+ @system_prompt_template = nil
77
+ @llm_client = nil
78
+
79
+ # Static analysis and RAG context options
80
+ @prefer_static_scan = true
81
+ @static_scan_dirs = if defined?(Rails)
82
+ [Rails.root.join('app/models').to_s]
83
+ else
84
+ [File.join(Dir.pwd, 'app/models')]
85
+ end
86
+ @context_rag_top_k = Integer(ENV.fetch('CODE_TO_QUERY_CONTEXT_RAG_TOP_K', 6))
87
+
88
+ # Guardrail defaults
89
+ @require_limit_by_default = true
90
+ end
91
+ end
92
+ end