prescient 0.0.0 → 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,355 @@
1
+ #!/usr/bin/env ruby
2
+ # frozen_string_literal: true
3
+
4
+ # Example: Custom context configurations with the Prescient gem
5
+ # This example shows how to configure context formatting and embedding field extraction
6
+
7
+ require_relative '../lib/prescient'
8
+
9
+ puts "=== Custom Context Configurations Example ==="
10
+ puts "This example shows how to define your own context types and configurations."
11
+
12
+ # Example 1: E-commerce Product Catalog
13
+ puts "\n--- Example 1: E-commerce Product Catalog ---"
14
+
15
+ Prescient.configure do |config|
16
+ config.add_provider(:ecommerce, Prescient::Provider::Ollama,
17
+ url: ENV.fetch('OLLAMA_URL', 'http://localhost:11434'),
18
+ embedding_model: 'nomic-embed-text',
19
+ chat_model: 'llama3.1:8b',
20
+ # Define your own context types - no hardcoded assumptions!
21
+ context_configs: {
22
+ 'product' => {
23
+ fields: %w[name description price category brand stock_count],
24
+ format: '%{name} by %{brand}: %{description} - $%{price} (%{category}) [Stock: %{stock_count}]',
25
+ embedding_fields: %w[name description category brand] # Only these fields for embeddings
26
+ },
27
+ 'review' => {
28
+ fields: %w[product_name rating review_text reviewer_name date],
29
+ format: '%{product_name} - %{rating}/5 stars by %{reviewer_name}: "%{review_text}"',
30
+ embedding_fields: %w[product_name review_text] # Exclude rating, reviewer, date from embeddings
31
+ }
32
+ },
33
+ prompt_templates: {
34
+ system_prompt: 'You are a helpful e-commerce assistant. Help customers find products and understand reviews.',
35
+ with_context_template: <<~TEMPLATE.strip
36
+ %{system_prompt}
37
+
38
+ Product Catalog:
39
+ %{context}
40
+
41
+ Customer Question: %{query}
42
+
43
+ Based on our product catalog above, provide helpful recommendations.
44
+ TEMPLATE
45
+ }
46
+ )
47
+ end
48
+
49
+ begin
50
+ client = Prescient.client(:ecommerce)
51
+
52
+ if client.available?
53
+ # Product catalog context
54
+ products = [
55
+ {
56
+ 'type' => 'product',
57
+ 'name' => 'UltraBook Pro',
58
+ 'description' => 'High-performance laptop with 16GB RAM and 512GB SSD',
59
+ 'price' => '1299.99',
60
+ 'category' => 'Laptops',
61
+ 'brand' => 'TechCorp',
62
+ 'stock_count' => '15'
63
+ },
64
+ {
65
+ 'type' => 'review',
66
+ 'product_name' => 'UltraBook Pro',
67
+ 'rating' => '5',
68
+ 'review_text' => 'Amazing performance and battery life. Perfect for development work.',
69
+ 'reviewer_name' => 'John D.',
70
+ 'date' => '2024-01-15'
71
+ }
72
+ ]
73
+
74
+ response = client.generate_response("I need a laptop for programming work", products)
75
+ puts "šŸ›’ E-commerce Assistant Response:"
76
+ puts response[:response]
77
+ else
78
+ puts "āŒ E-commerce provider not available"
79
+ end
80
+ rescue Prescient::Error => e
81
+ puts "āŒ Error: #{e.message}"
82
+ end
83
+
84
+ # Example 2: Healthcare Patient Records
85
+ puts "\n--- Example 2: Healthcare Patient Records ---"
86
+
87
+ Prescient.configure do |config|
88
+ config.add_provider(:healthcare, Prescient::Provider::Ollama,
89
+ url: ENV.fetch('OLLAMA_URL', 'http://localhost:11434'),
90
+ embedding_model: 'nomic-embed-text',
91
+ chat_model: 'llama3.1:8b',
92
+ context_configs: {
93
+ 'patient' => {
94
+ fields: %w[name age gender medical_conditions medications],
95
+ format: 'Patient: %{name} (Age: %{age}, Gender: %{gender}) - Conditions: %{medical_conditions}, Medications: %{medications}',
96
+ embedding_fields: %w[medical_conditions medications]
97
+ },
98
+ 'appointment' => {
99
+ fields: %w[patient_name date type notes doctor],
100
+ format: 'Appointment for %{patient_name} on %{date} - %{type} with Dr. %{doctor}: %{notes}',
101
+ embedding_fields: %w[type notes]
102
+ }
103
+ },
104
+ prompt_templates: {
105
+ system_prompt: 'You are a medical assistant AI. Provide helpful information while emphasizing the importance of consulting healthcare professionals.',
106
+ with_context_template: <<~TEMPLATE.strip
107
+ %{system_prompt}
108
+
109
+ Patient Information:
110
+ %{context}
111
+
112
+ Medical Query: %{query}
113
+
114
+ Based on the patient information, provide helpful guidance while emphasizing professional medical consultation.
115
+ TEMPLATE
116
+ }
117
+ )
118
+ end
119
+
120
+ begin
121
+ client = Prescient.client(:healthcare)
122
+
123
+ if client.available?
124
+ # Patient records context (anonymized example data)
125
+ patient_data = [
126
+ {
127
+ 'type' => 'patient',
128
+ 'name' => 'Patient A',
129
+ 'age' => '45',
130
+ 'gender' => 'Female',
131
+ 'medical_conditions' => 'Type 2 Diabetes, Hypertension',
132
+ 'medications' => 'Metformin, Lisinopril'
133
+ },
134
+ {
135
+ 'type' => 'appointment',
136
+ 'patient_name' => 'Patient A',
137
+ 'date' => '2024-01-20',
138
+ 'type' => 'Follow-up',
139
+ 'notes' => 'Blood sugar levels improving with current treatment',
140
+ 'doctor' => 'Johnson'
141
+ }
142
+ ]
143
+
144
+ response = client.generate_response("What dietary considerations should be noted?", patient_data)
145
+ puts "šŸ„ Healthcare Assistant Response:"
146
+ puts response[:response]
147
+ else
148
+ puts "āŒ Healthcare provider not available"
149
+ end
150
+ rescue Prescient::Error => e
151
+ puts "āŒ Error: #{e.message}"
152
+ end
153
+
154
+ # Example 3: Software Project Management
155
+ puts "\n--- Example 3: Software Project Management ---"
156
+
157
+ Prescient.configure do |config|
158
+ config.add_provider(:project_mgmt, Prescient::Provider::Ollama,
159
+ url: ENV.fetch('OLLAMA_URL', 'http://localhost:11434'),
160
+ embedding_model: 'nomic-embed-text',
161
+ chat_model: 'llama3.1:8b',
162
+ context_configs: {
163
+ 'issue' => {
164
+ fields: %w[title description status priority assignee labels created_date],
165
+ format: '#%{title} (%{status}) - Priority: %{priority}, Assigned: %{assignee} - %{description}',
166
+ embedding_fields: %w[title description labels]
167
+ },
168
+ 'pull_request' => {
169
+ fields: %w[title description author status files_changed],
170
+ format: 'PR: %{title} by %{author} (%{status}) - %{files_changed} files: %{description}',
171
+ embedding_fields: %w[title description]
172
+ },
173
+ 'team_member' => {
174
+ fields: %w[name role skills experience projects],
175
+ format: '%{name} - %{role} with %{experience} experience in %{skills}, working on: %{projects}',
176
+ embedding_fields: %w[role skills projects]
177
+ }
178
+ },
179
+ prompt_templates: {
180
+ system_prompt: 'You are a software project management assistant. Help with planning, issue tracking, and team coordination.',
181
+ with_context_template: <<~TEMPLATE.strip
182
+ %{system_prompt}
183
+
184
+ Project Context:
185
+ %{context}
186
+
187
+ Management Question: %{query}
188
+
189
+ Based on the project information above, provide actionable project management advice.
190
+ TEMPLATE
191
+ }
192
+ )
193
+ end
194
+
195
+ begin
196
+ client = Prescient.client(:project_mgmt)
197
+
198
+ if client.available?
199
+ # Project management context
200
+ project_data = [
201
+ {
202
+ 'type' => 'issue',
203
+ 'title' => 'API Performance Optimization',
204
+ 'description' => 'Database queries are slow in user dashboard endpoint',
205
+ 'status' => 'In Progress',
206
+ 'priority' => 'High',
207
+ 'assignee' => 'Sarah Chen',
208
+ 'labels' => 'performance database api',
209
+ 'created_date' => '2024-01-18'
210
+ },
211
+ {
212
+ 'type' => 'team_member',
213
+ 'name' => 'Sarah Chen',
214
+ 'role' => 'Senior Backend Developer',
215
+ 'skills' => 'Python, PostgreSQL, Redis, Docker',
216
+ 'experience' => '5 years',
217
+ 'projects' => 'API Optimization, User Dashboard'
218
+ }
219
+ ]
220
+
221
+ response = client.generate_response("What's the status of our performance issues?", project_data)
222
+ puts "šŸ“Š Project Management Response:"
223
+ puts response[:response]
224
+ else
225
+ puts "āŒ Project management provider not available"
226
+ end
227
+ rescue Prescient::Error => e
228
+ puts "āŒ Error: #{e.message}"
229
+ end
230
+
231
+ # Example 4: Embedding Text Extraction
232
+ puts "\n--- Example 4: Embedding Text Extraction ---"
233
+
234
+ begin
235
+ # Configure a provider with context configs
236
+ Prescient.configure do |config|
237
+ config.add_provider(:embedding_demo, Prescient::Provider::Ollama,
238
+ url: ENV.fetch('OLLAMA_URL', 'http://localhost:11434'),
239
+ embedding_model: 'nomic-embed-text',
240
+ chat_model: 'llama3.1:8b',
241
+ context_configs: {
242
+ 'blog_post' => {
243
+ fields: %w[title content author tags category publish_date],
244
+ format: '%{title} by %{author} in %{category}: %{content}',
245
+ embedding_fields: %w[title content tags] # Only these fields used for embeddings
246
+ }
247
+ }
248
+ )
249
+ end
250
+
251
+ client = Prescient.client(:embedding_demo)
252
+
253
+ if client.available?
254
+ # Test embedding text extraction
255
+ blog_post = {
256
+ 'type' => 'blog_post',
257
+ 'title' => 'Getting Started with AI',
258
+ 'content' => 'Artificial Intelligence is revolutionizing how we solve complex problems...',
259
+ 'author' => 'Dr. Smith',
260
+ 'tags' => 'AI machine-learning tutorial',
261
+ 'category' => 'Technology',
262
+ 'publish_date' => '2024-01-15'
263
+ }
264
+
265
+ # The extract_embedding_text method will only use title, content, and tags
266
+ # This demonstrates how embedding generation can focus on specific fields
267
+ embedding_text = client.provider.send(:extract_embedding_text, blog_post)
268
+ puts "šŸ“Š Embedding Text Extracted:"
269
+ puts "\"#{embedding_text}\""
270
+ puts "\n(Notice how only title, content, and tags are included - not author, category, or date)"
271
+
272
+ # Generate actual embedding
273
+ puts "\nšŸ”¢ Generating embedding..."
274
+ embedding = client.generate_embedding(embedding_text)
275
+ puts "Generated embedding with #{embedding.length} dimensions"
276
+ else
277
+ puts "āŒ Embedding demo provider not available"
278
+ end
279
+ rescue Prescient::Error => e
280
+ puts "āŒ Error: #{e.message}"
281
+ end
282
+
283
+ # Example 5: No Context Configuration (Pure Default Behavior)
284
+ puts "\n--- Example 5: No Context Configuration ---"
285
+ puts "Shows how the system works without any context_configs defined."
286
+
287
+ Prescient.configure do |config|
288
+ config.add_provider(:no_config, Prescient::Provider::Ollama,
289
+ url: ENV.fetch('OLLAMA_URL', 'http://localhost:11434'),
290
+ embedding_model: 'nomic-embed-text',
291
+ chat_model: 'llama3.1:8b'
292
+ # No context_configs defined - uses pure default behavior
293
+ )
294
+ end
295
+
296
+ begin
297
+ client = Prescient.client(:no_config)
298
+
299
+ if client.available?
300
+ # Random data structure - no predefined context type
301
+ random_data = [
302
+ {
303
+ 'title' => 'Meeting Notes',
304
+ 'content' => 'Discussed project timeline and deliverables',
305
+ 'author' => 'Jane Smith',
306
+ 'created_at' => '2024-01-20',
307
+ 'priority' => 'high'
308
+ },
309
+ {
310
+ 'name' => 'Server Performance',
311
+ 'description' => 'CPU usage spiked to 90% during peak hours',
312
+ 'severity' => 'warning',
313
+ 'timestamp' => '2024-01-20T10:30:00Z'
314
+ }
315
+ ]
316
+
317
+ puts "šŸ”§ Raw data (no context config):"
318
+ random_data.each { |item| puts " #{item}" }
319
+
320
+ puts "\nšŸ“„ How items are formatted without context config:"
321
+ random_data.each do |item|
322
+ formatted = client.provider.send(:format_context_item, item)
323
+ puts " #{formatted}"
324
+ end
325
+
326
+ puts "\nšŸ”¤ Embedding text extraction (automatic field filtering):"
327
+ random_data.each do |item|
328
+ embedding_text = client.provider.send(:extract_embedding_text, item)
329
+ puts " \"#{embedding_text}\""
330
+ puts " (Notice: excludes 'created_at', 'timestamp' - common metadata fields)"
331
+ end
332
+
333
+ response = client.generate_response("Summarize the key issues", random_data)
334
+ puts "\nšŸ¤– AI Response (using default formatting):"
335
+ puts response[:response]
336
+ else
337
+ puts "āŒ No config provider not available"
338
+ end
339
+ rescue Prescient::Error => e
340
+ puts "āŒ Error: #{e.message}"
341
+ end
342
+
343
+ puts "\nšŸŽ‰ Custom context configurations completed!"
344
+ puts "\nšŸ’” Key Features Demonstrated:"
345
+ puts " āœ… User-defined context types (no hardcoded assumptions)"
346
+ puts " āœ… Automatic context detection based on YOUR field configurations"
347
+ puts " āœ… Custom field formatting with templates"
348
+ puts " āœ… Selective embedding field extraction"
349
+ puts " āœ… Fallback formatting for unconfigured data"
350
+ puts " āœ… Works without any context configuration (pure default behavior)"
351
+ puts "\nšŸŽÆ Best Practices:"
352
+ puts " - Define context_configs for your specific domain"
353
+ puts " - Use explicit 'type' field when context detection isn't reliable"
354
+ puts " - Exclude sensitive/metadata fields from embedding_fields"
355
+ puts " - Test with and without context configs to see the difference"
@@ -0,0 +1,212 @@
1
+ #!/usr/bin/env ruby
2
+ # frozen_string_literal: true
3
+
4
+ # Example: Custom prompt templates with the Prescient gem
5
+ # This example shows how to customize AI assistant behavior with custom prompts
6
+
7
+ require_relative '../lib/prescient'
8
+
9
+ puts "=== Custom Prompt Templates Example ==="
10
+
11
+ # Example 1: Customer Service Assistant
12
+ puts "\n--- Example 1: Customer Service Assistant ---"
13
+
14
+ Prescient.configure do |config|
15
+ config.add_provider(:customer_service, Prescient::Provider::Ollama,
16
+ url: ENV.fetch('OLLAMA_URL', 'http://localhost:11434'),
17
+ embedding_model: 'nomic-embed-text',
18
+ chat_model: 'llama3.1:8b',
19
+ prompt_templates: {
20
+ system_prompt: 'You are a friendly customer service representative. Be helpful, empathetic, and professional.',
21
+ no_context_template: <<~TEMPLATE.strip,
22
+ %{system_prompt}
23
+
24
+ Customer Question: %{query}
25
+
26
+ Please provide a helpful and professional response.
27
+ TEMPLATE
28
+ with_context_template: <<~TEMPLATE.strip
29
+ %{system_prompt} Use the following company information to help answer the customer's question.
30
+
31
+ Company Information:
32
+ %{context}
33
+
34
+ Customer Question: %{query}
35
+
36
+ Please provide a helpful response based on our company policies and information above.
37
+ TEMPLATE
38
+ }
39
+ )
40
+ end
41
+
42
+ begin
43
+ client = Prescient.client(:customer_service)
44
+
45
+ if client.available?
46
+ # Test without context
47
+ response = client.generate_response("What's your return policy?")
48
+ puts "šŸŽ§ Customer Service Response:"
49
+ puts response[:response]
50
+
51
+ # Test with context
52
+ context = [
53
+ {
54
+ 'title' => 'Return Policy',
55
+ 'content' => 'We offer 30-day returns on all items in original condition with receipt.'
56
+ }
57
+ ]
58
+
59
+ response = client.generate_response("What's your return policy?", context)
60
+ puts "\nšŸŽ§ With Policy Context:"
61
+ puts response[:response]
62
+ else
63
+ puts "āŒ Customer service provider not available"
64
+ end
65
+ rescue Prescient::Error => e
66
+ puts "āŒ Error: #{e.message}"
67
+ end
68
+
69
+ # Example 2: Technical Documentation Assistant
70
+ puts "\n--- Example 2: Technical Documentation Assistant ---"
71
+
72
+ Prescient.configure do |config|
73
+ config.add_provider(:tech_docs, Prescient::Provider::Ollama,
74
+ url: ENV.fetch('OLLAMA_URL', 'http://localhost:11434'),
75
+ embedding_model: 'nomic-embed-text',
76
+ chat_model: 'llama3.1:8b',
77
+ prompt_templates: {
78
+ system_prompt: 'You are a technical documentation assistant. Provide clear, accurate, and detailed technical explanations with code examples when relevant.',
79
+ no_context_template: <<~TEMPLATE.strip,
80
+ %{system_prompt}
81
+
82
+ Technical Question: %{query}
83
+
84
+ Please provide a detailed technical explanation with examples if applicable.
85
+ TEMPLATE
86
+ with_context_template: <<~TEMPLATE.strip
87
+ %{system_prompt}
88
+
89
+ Documentation Context:
90
+ %{context}
91
+
92
+ Technical Question: %{query}
93
+
94
+ Based on the documentation above, provide a comprehensive technical answer with relevant code examples.
95
+ TEMPLATE
96
+ }
97
+ )
98
+ end
99
+
100
+ begin
101
+ client = Prescient.client(:tech_docs)
102
+
103
+ if client.available?
104
+ context = [
105
+ {
106
+ 'title' => 'API Authentication',
107
+ 'content' => 'Use Bearer tokens in the Authorization header: Authorization: Bearer your_token_here'
108
+ }
109
+ ]
110
+
111
+ response = client.generate_response("How do I authenticate with the API?", context)
112
+ puts "šŸ’» Technical Documentation Response:"
113
+ puts response[:response]
114
+ else
115
+ puts "āŒ Technical docs provider not available"
116
+ end
117
+ rescue Prescient::Error => e
118
+ puts "āŒ Error: #{e.message}"
119
+ end
120
+
121
+ # Example 3: Creative Writing Assistant
122
+ puts "\n--- Example 3: Creative Writing Assistant ---"
123
+
124
+ Prescient.configure do |config|
125
+ config.add_provider(:creative, Prescient::Provider::Ollama,
126
+ url: ENV.fetch('OLLAMA_URL', 'http://localhost:11434'),
127
+ embedding_model: 'nomic-embed-text',
128
+ chat_model: 'llama3.1:8b',
129
+ prompt_templates: {
130
+ system_prompt: 'You are a creative writing assistant. Help with storytelling, character development, and creative inspiration. Be imaginative and encouraging.',
131
+ no_context_template: <<~TEMPLATE.strip,
132
+ %{system_prompt}
133
+
134
+ Writing Request: %{query}
135
+
136
+ Let's create something amazing together!
137
+ TEMPLATE
138
+ with_context_template: <<~TEMPLATE.strip
139
+ %{system_prompt}
140
+
141
+ Story Elements:
142
+ %{context}
143
+
144
+ Writing Request: %{query}
145
+
146
+ Use the story elements above to craft your creative response.
147
+ TEMPLATE
148
+ }
149
+ )
150
+ end
151
+
152
+ begin
153
+ client = Prescient.client(:creative)
154
+
155
+ if client.available?
156
+ context = [
157
+ {
158
+ 'title' => 'Setting',
159
+ 'content' => 'A mysterious library that exists between dimensions, where books write themselves'
160
+ },
161
+ {
162
+ 'title' => 'Character',
163
+ 'content' => 'Maya, a young librarian who can read the thoughts of books'
164
+ }
165
+ ]
166
+
167
+ response = client.generate_response("Write an opening paragraph for this story", context)
168
+ puts "āœļø Creative Writing Response:"
169
+ puts response[:response]
170
+ else
171
+ puts "āŒ Creative provider not available"
172
+ end
173
+ rescue Prescient::Error => e
174
+ puts "āŒ Error: #{e.message}"
175
+ end
176
+
177
+ # Example 4: Default prompt override
178
+ puts "\n--- Example 4: Override Default Prompts ---"
179
+
180
+ # You can also override the default prompts for any provider
181
+ Prescient.configure do |config|
182
+ config.add_provider(:custom_default, Prescient::Provider::Ollama,
183
+ url: ENV.fetch('OLLAMA_URL', 'http://localhost:11434'),
184
+ embedding_model: 'nomic-embed-text',
185
+ chat_model: 'llama3.1:8b',
186
+ prompt_templates: {
187
+ # Only override the system prompt, keep default templates
188
+ system_prompt: 'You are Sherlock Holmes. Approach every question with deductive reasoning and attention to detail.'
189
+ }
190
+ )
191
+ end
192
+
193
+ begin
194
+ client = Prescient.client(:custom_default)
195
+
196
+ if client.available?
197
+ response = client.generate_response("How should I approach solving a complex problem?")
198
+ puts "šŸ” Sherlock Holmes Response:"
199
+ puts response[:response]
200
+ else
201
+ puts "āŒ Custom default provider not available"
202
+ end
203
+ rescue Prescient::Error => e
204
+ puts "āŒ Error: #{e.message}"
205
+ end
206
+
207
+ puts "\nšŸŽ‰ Custom prompt examples completed!"
208
+ puts "\nšŸ’” Tips:"
209
+ puts " - Use %{system_prompt}, %{query}, and %{context} placeholders in templates"
210
+ puts " - Templates use Ruby's % string formatting"
211
+ puts " - Override any or all template parts (system_prompt, no_context_template, with_context_template)"
212
+ puts " - Each provider can have completely different prompt behavior"