ragaai-catalyst 2.1.4.1b0__py3-none-any.whl → 2.1.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. ragaai_catalyst/__init__.py +23 -2
  2. ragaai_catalyst/dataset.py +462 -1
  3. ragaai_catalyst/evaluation.py +76 -7
  4. ragaai_catalyst/ragaai_catalyst.py +52 -10
  5. ragaai_catalyst/redteaming/__init__.py +7 -0
  6. ragaai_catalyst/redteaming/config/detectors.toml +13 -0
  7. ragaai_catalyst/redteaming/data_generator/scenario_generator.py +95 -0
  8. ragaai_catalyst/redteaming/data_generator/test_case_generator.py +120 -0
  9. ragaai_catalyst/redteaming/evaluator.py +125 -0
  10. ragaai_catalyst/redteaming/llm_generator.py +136 -0
  11. ragaai_catalyst/redteaming/llm_generator_old.py +83 -0
  12. ragaai_catalyst/redteaming/red_teaming.py +331 -0
  13. ragaai_catalyst/redteaming/requirements.txt +4 -0
  14. ragaai_catalyst/redteaming/tests/grok.ipynb +97 -0
  15. ragaai_catalyst/redteaming/tests/stereotype.ipynb +2258 -0
  16. ragaai_catalyst/redteaming/upload_result.py +38 -0
  17. ragaai_catalyst/redteaming/utils/issue_description.py +114 -0
  18. ragaai_catalyst/redteaming/utils/rt.png +0 -0
  19. ragaai_catalyst/redteaming_old.py +171 -0
  20. ragaai_catalyst/synthetic_data_generation.py +400 -22
  21. ragaai_catalyst/tracers/__init__.py +17 -1
  22. ragaai_catalyst/tracers/agentic_tracing/data/data_structure.py +4 -2
  23. ragaai_catalyst/tracers/agentic_tracing/tracers/agent_tracer.py +212 -148
  24. ragaai_catalyst/tracers/agentic_tracing/tracers/base.py +657 -247
  25. ragaai_catalyst/tracers/agentic_tracing/tracers/custom_tracer.py +50 -19
  26. ragaai_catalyst/tracers/agentic_tracing/tracers/llm_tracer.py +588 -177
  27. ragaai_catalyst/tracers/agentic_tracing/tracers/main_tracer.py +99 -100
  28. ragaai_catalyst/tracers/agentic_tracing/tracers/network_tracer.py +3 -3
  29. ragaai_catalyst/tracers/agentic_tracing/tracers/tool_tracer.py +230 -29
  30. ragaai_catalyst/tracers/agentic_tracing/upload/trace_uploader.py +358 -0
  31. ragaai_catalyst/tracers/agentic_tracing/upload/upload_agentic_traces.py +75 -20
  32. ragaai_catalyst/tracers/agentic_tracing/upload/upload_code.py +55 -11
  33. ragaai_catalyst/tracers/agentic_tracing/upload/upload_local_metric.py +74 -0
  34. ragaai_catalyst/tracers/agentic_tracing/upload/upload_trace_metric.py +47 -16
  35. ragaai_catalyst/tracers/agentic_tracing/utils/create_dataset_schema.py +4 -2
  36. ragaai_catalyst/tracers/agentic_tracing/utils/file_name_tracker.py +26 -3
  37. ragaai_catalyst/tracers/agentic_tracing/utils/llm_utils.py +182 -17
  38. ragaai_catalyst/tracers/agentic_tracing/utils/model_costs.json +1233 -497
  39. ragaai_catalyst/tracers/agentic_tracing/utils/span_attributes.py +81 -10
  40. ragaai_catalyst/tracers/agentic_tracing/utils/supported_llm_provider.toml +34 -0
  41. ragaai_catalyst/tracers/agentic_tracing/utils/system_monitor.py +215 -0
  42. ragaai_catalyst/tracers/agentic_tracing/utils/trace_utils.py +0 -32
  43. ragaai_catalyst/tracers/agentic_tracing/utils/unique_decorator.py +3 -1
  44. ragaai_catalyst/tracers/agentic_tracing/utils/zip_list_of_unique_files.py +73 -47
  45. ragaai_catalyst/tracers/distributed.py +300 -0
  46. ragaai_catalyst/tracers/exporters/__init__.py +3 -1
  47. ragaai_catalyst/tracers/exporters/dynamic_trace_exporter.py +160 -0
  48. ragaai_catalyst/tracers/exporters/ragaai_trace_exporter.py +129 -0
  49. ragaai_catalyst/tracers/langchain_callback.py +809 -0
  50. ragaai_catalyst/tracers/llamaindex_instrumentation.py +424 -0
  51. ragaai_catalyst/tracers/tracer.py +301 -55
  52. ragaai_catalyst/tracers/upload_traces.py +24 -7
  53. ragaai_catalyst/tracers/utils/convert_langchain_callbacks_output.py +61 -0
  54. ragaai_catalyst/tracers/utils/convert_llama_instru_callback.py +69 -0
  55. ragaai_catalyst/tracers/utils/extraction_logic_llama_index.py +74 -0
  56. ragaai_catalyst/tracers/utils/langchain_tracer_extraction_logic.py +82 -0
  57. ragaai_catalyst/tracers/utils/model_prices_and_context_window_backup.json +9365 -0
  58. ragaai_catalyst/tracers/utils/trace_json_converter.py +269 -0
  59. {ragaai_catalyst-2.1.4.1b0.dist-info → ragaai_catalyst-2.1.5.dist-info}/METADATA +367 -45
  60. ragaai_catalyst-2.1.5.dist-info/RECORD +97 -0
  61. {ragaai_catalyst-2.1.4.1b0.dist-info → ragaai_catalyst-2.1.5.dist-info}/WHEEL +1 -1
  62. ragaai_catalyst-2.1.4.1b0.dist-info/RECORD +0 -67
  63. {ragaai_catalyst-2.1.4.1b0.dist-info → ragaai_catalyst-2.1.5.dist-info}/LICENSE +0 -0
  64. {ragaai_catalyst-2.1.4.1b0.dist-info → ragaai_catalyst-2.1.5.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,83 @@
1
+ from typing import Dict, Any, Optional, Literal
2
+ import os
3
+ import json
4
+ from openai import OpenAI
5
+
6
+ class LLMGenerator:
7
+ # Models that support JSON mode
8
+ JSON_MODELS = {"gpt-4-1106-preview", "gpt-3.5-turbo-1106"}
9
+
10
+ def __init__(self, api_key: str, model_name: str = "gpt-4-1106-preview", temperature: float = 0.7,
11
+ provider: Literal["openai", "xai"] = "openai"):
12
+ """
13
+ Initialize the LLM generator with specified provider client.
14
+
15
+ Args:
16
+ model_name: The model to use (e.g., "gpt-4-1106-preview" for OpenAI, "grok-2-latest" for X.AI)
17
+ temperature: The sampling temperature to use for generation (default: 0.7)
18
+ provider: The LLM provider to use, either "openai" or "xai" (default: "openai")
19
+ api_key: The API key for the provider
20
+ """
21
+ self.model_name = model_name
22
+ self.temperature = temperature
23
+ self.provider = provider
24
+ self.api_key = api_key
25
+
26
+ # Initialize client based on provider
27
+ if provider.lower() == "openai":
28
+ self.client = OpenAI(api_key=self.api_key)
29
+ elif provider.lower() == "xai":
30
+ self.client = OpenAI(
31
+ api_key=self.api_key,
32
+ base_url="https://api.x.ai/v1"
33
+ )
34
+
35
+ def generate_response(self, system_prompt: str, user_prompt: str, max_tokens: int = 1000) -> Dict[str, Any]:
36
+ """
37
+ Generate a response using the OpenAI API.
38
+
39
+ Args:
40
+ system_prompt: The system prompt to guide the model's behavior
41
+ user_prompt: The user's input prompt
42
+
43
+ Returns:
44
+ Dict containing the generated requirements
45
+ """
46
+ try:
47
+ # Configure API call
48
+ kwargs = {
49
+ "model": self.model_name,
50
+ "messages": [
51
+ {"role": "system", "content": system_prompt},
52
+ {"role": "user", "content": user_prompt}
53
+ ],
54
+ "temperature": self.temperature,
55
+ "max_tokens": max_tokens
56
+ }
57
+
58
+ # Add response_format for JSON-capable models
59
+ if self.model_name in self.JSON_MODELS:
60
+ kwargs["response_format"] = {"type": "json_object"}
61
+
62
+ response = self.client.chat.completions.create(**kwargs)
63
+ content = response.choices[0].message.content
64
+
65
+ if isinstance(content, str):
66
+ # Remove code block markers if present
67
+ content = content.strip()
68
+ if content.startswith("```"):
69
+ # Remove language identifier if present (e.g., ```json)
70
+ content = content.split("\n", 1)[1] if content.startswith("```json") else content[3:]
71
+ # Find the last code block marker and remove everything after it
72
+ if "```" in content:
73
+ content = content[:content.rfind("```")].strip()
74
+ else:
75
+ # If no closing marker is found, just use the content as is
76
+ content = content.strip()
77
+
78
+ content = json.loads(content)
79
+
80
+ return content
81
+
82
+ except Exception as e:
83
+ raise Exception(f"Error generating LLM response: {str(e)}")
@@ -0,0 +1,331 @@
1
+ from datetime import datetime
2
+ import json
3
+ import os
4
+ from typing import Dict, List, Any, Tuple, Literal, Optional
5
+
6
+ import pandas as pd
7
+ import tomli
8
+ from tqdm import tqdm
9
+
10
+ from .data_generator.scenario_generator import ScenarioGenerator, ScenarioInput
11
+ from .data_generator.test_case_generator import TestCaseGenerator, TestCaseInput
12
+ from .evaluator import Evaluator, EvaluationInput, Conversation
13
+ from .utils.issue_description import get_issue_description
14
+ from .upload_result import UploadResult
15
+ from rich import print
16
+
17
+ class RedTeaming:
18
+ def __init__(
19
+ self,
20
+ model_name: Literal["gpt-4-1106-preview", "grok-2-latest"] = "grok-2-latest",
21
+ provider: Literal["openai", "xai"] = "xai",
22
+ api_key: str = "",
23
+ api_base: str = "",
24
+ api_version: str = "",
25
+ scenario_temperature: float = 0.7,
26
+ test_temperature: float = 0.8,
27
+ eval_temperature: float = 0.3,
28
+ ):
29
+ """
30
+ Initialize the red teaming pipeline.
31
+
32
+ Args:
33
+ model_name: The OpenAI model to use
34
+ scenario_temperature: Temperature for scenario generation
35
+ api_key: Api Key for the provider
36
+ test_temperature: Temperature for test case generation
37
+ eval_temperature: Temperature for evaluation (lower for consistency)
38
+ """
39
+ if api_key == "" or api_key is None:
40
+ raise ValueError("Api Key is required")
41
+
42
+ # Load supported detectors configuration
43
+ self._load_supported_detectors()
44
+
45
+ # Initialize generators and evaluator
46
+ self.scenario_generator = ScenarioGenerator(api_key=api_key, api_base=api_base, api_version=api_version, model_name=model_name, temperature=scenario_temperature, provider=provider)
47
+ self.test_generator = TestCaseGenerator(api_key=api_key, api_base=api_base, api_version=api_version, model_name=model_name, temperature=test_temperature, provider=provider)
48
+ self.evaluator = Evaluator(api_key=api_key, api_base=api_base, api_version=api_version, model_name=model_name, temperature=eval_temperature, provider=provider)
49
+
50
+ self.save_path = None
51
+
52
+ def upload_result(self, project_name, dataset_name):
53
+ upload_result = UploadResult(project_name)
54
+ if self.save_path is None:
55
+ print('Please execute the RedTeaming run() method before uploading the result')
56
+ return
57
+ upload_result.upload_result(csv_path=self.save_path, dataset_name=dataset_name)
58
+
59
+
60
+ def _load_supported_detectors(self) -> None:
61
+ """Load supported detectors from TOML configuration file."""
62
+ config_path = os.path.join(os.path.dirname(__file__), "config", "detectors.toml")
63
+ try:
64
+ with open(config_path, "rb") as f:
65
+ config = tomli.load(f)
66
+ self.supported_detectors = set(config.get("detectors", {}).get("detector_names", []))
67
+ except FileNotFoundError:
68
+ print(f"Warning: Detectors configuration file not found at {config_path}")
69
+ self.supported_detectors = set()
70
+ except Exception as e:
71
+ print(f"Error loading detectors configuration: {e}")
72
+ self.supported_detectors = set()
73
+
74
+ def validate_detectors(self, detectors: List[str]) -> None:
75
+ """Validate that all provided detectors are supported.
76
+
77
+ Args:
78
+ detectors: List of detector IDs to validate
79
+
80
+ Raises:
81
+ ValueError: If any detector is not supported
82
+ """
83
+ unsupported = [d for d in detectors if d not in self.supported_detectors]
84
+ if unsupported:
85
+ raise ValueError(
86
+ f"Unsupported detectors: {unsupported}\n"
87
+ f"Supported detectors are: {sorted(self.supported_detectors)}"
88
+ )
89
+
90
+ def get_supported_detectors(self) -> List[str]:
91
+ """Get the list of supported detectors."""
92
+ return sorted(self.supported_detectors)
93
+
94
+ def _get_save_path(self, description: str) -> str:
95
+ """Generate a path for saving the final DataFrame."""
96
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
97
+ output_dir = os.path.join(os.path.dirname(__file__), "results")
98
+ os.makedirs(output_dir, exist_ok=True)
99
+
100
+ # Create a short slug from the description
101
+ slug = description.lower()[:30].replace(" ", "_")
102
+ return os.path.join(output_dir, f"red_teaming_{slug}_{timestamp}.csv")
103
+
104
+ def _save_results_to_csv(self, result_df: pd.DataFrame, description: str) -> str:
105
+ # Save DataFrame
106
+ save_path = self._get_save_path(description)
107
+ result_df.to_csv(save_path, index=False)
108
+ print(f"\nResults saved to: {save_path}")
109
+ return save_path
110
+
111
+ def _run_with_examples(self, description: str, detectors: List[str], response_model: Any, examples: List[str], scenarios_per_detector: int) -> pd.DataFrame:
112
+ results = []
113
+ # Process each detector
114
+ for detector in detectors:
115
+ print('='*50)
116
+ print(f"Running detector: [yellow2]{detector}[/yellow2]")
117
+ print('='*50)
118
+
119
+ if type(detector) == str:
120
+ # Get issue description for this detector
121
+ issue_description = get_issue_description(detector)
122
+ else:
123
+ issue_description = detector.get("custom", "")
124
+
125
+ # Generate scenarios for this detector
126
+ scenario_input = ScenarioInput(
127
+ description=description,
128
+ category=issue_description,
129
+ scenarios_per_detector=scenarios_per_detector
130
+ )
131
+ scenarios = self.scenario_generator.generate_scenarios(scenario_input)
132
+
133
+ # Process each scenario
134
+ for r, scenario in enumerate(scenarios):
135
+
136
+ if type(examples[0]) == str:
137
+ test_examples = examples
138
+ test_detectors = [detectors] * len(examples)
139
+ elif type(examples[0]) == dict:
140
+ test_examples = [example["input"] for example in examples]
141
+ test_detectors = [example["detectors"] for example in examples]
142
+
143
+ # Evaluate test cases
144
+ failed_tests = 0
145
+ total_tests = 0
146
+ for test_example, test_detector in tqdm(zip(test_examples, test_detectors), desc=f"Running {detector} scenario {r+1}/{len(scenarios)}", total=len(scenarios)):
147
+ if detector in test_detector:
148
+ user_message = test_example
149
+ app_response = response_model(user_message)
150
+
151
+ # Evaluate the conversation
152
+ eval_input = EvaluationInput(
153
+ description=description,
154
+ conversation=Conversation(
155
+ user_message=user_message,
156
+ app_response=app_response
157
+ ),
158
+ scenarios=[scenario]
159
+ )
160
+ evaluation = self.evaluator.evaluate_conversation(eval_input)
161
+
162
+ # Store results
163
+ results.append({
164
+ "detector": detector,
165
+ "scenario": scenario,
166
+ "user_message":test_example,
167
+ "app_response": app_response,
168
+ "evaluation_score": "pass" if evaluation["eval_passed"] else "fail",
169
+ "evaluation_reason": evaluation["reason"]
170
+ })
171
+
172
+ if not evaluation["eval_passed"]:
173
+ failed_tests += 1
174
+
175
+ total_tests += 1
176
+
177
+ # Report results for this scenario
178
+ if failed_tests > 0:
179
+ print(f"{detector} scenario {r+1}: [bright_red]{failed_tests}/{total_tests} examples failed[/bright_red]")
180
+ elif total_tests > 0:
181
+ print(f"{detector} scenario {r+1}: [green]All {total_tests} examples passed[/green]")
182
+ else:
183
+ print(f"No examples provided to test {detector} scenario {r+1}")
184
+ print('-'*100)
185
+
186
+ # Save results to a CSV file
187
+ results_df = pd.DataFrame(results)
188
+ save_path = self._save_results_to_csv(results_df, description)
189
+ self.save_path = save_path
190
+
191
+ return results_df, save_path
192
+
193
+ def _run_without_examples(self, description: str, detectors: List[str], response_model: Any, model_input_format: Dict[str, Any], scenarios_per_detector: int, test_cases_per_scenario: int) -> pd.DataFrame:
194
+ results = []
195
+ # Process each detector
196
+ for detector in detectors:
197
+ print('='*50)
198
+ print(f"Running detector: [yellow2]{detector}[/yellow2]")
199
+ print('='*50)
200
+
201
+ if type(detector) == str:
202
+ # Get issue description for this detector
203
+ issue_description = get_issue_description(detector)
204
+ else:
205
+ issue_description = detector.get("custom", "")
206
+
207
+ # Generate scenarios for this detector
208
+ scenario_input = ScenarioInput(
209
+ description=description,
210
+ category=issue_description,
211
+ scenarios_per_detector=scenarios_per_detector
212
+ )
213
+ scenarios = self.scenario_generator.generate_scenarios(scenario_input)
214
+
215
+ # Process each scenario
216
+ for r, scenario in enumerate(scenarios):
217
+ # Generate test cases
218
+ test_input = TestCaseInput(
219
+ description=description,
220
+ category=issue_description,
221
+ scenario=scenario,
222
+ format_example=model_input_format,
223
+ languages=["English"],
224
+ num_inputs=test_cases_per_scenario
225
+ )
226
+ test_cases = self.test_generator.generate_test_cases(test_input)
227
+
228
+ # Evaluate test cases
229
+ failed_tests = 0
230
+ with tqdm(test_cases["inputs"],
231
+ desc=f"Evaluating {detector} scenario {r+1}/{len(scenarios)}") as pbar:
232
+ for test_case in pbar:
233
+ user_message = test_case["user_input"]
234
+ app_response = response_model(user_message)
235
+
236
+ # Evaluate the conversation
237
+ eval_input = EvaluationInput(
238
+ description=description,
239
+ conversation=Conversation(
240
+ user_message=user_message,
241
+ app_response=app_response
242
+ ),
243
+ scenarios=[scenario]
244
+ )
245
+ evaluation = self.evaluator.evaluate_conversation(eval_input)
246
+
247
+ # Store results
248
+ results.append({
249
+ "detector": detector,
250
+ "scenario": scenario,
251
+ "user_message": user_message,
252
+ "app_response": app_response,
253
+ "evaluation_score": "pass" if evaluation["eval_passed"] else "fail",
254
+ "evaluation_reason": evaluation["reason"]
255
+ })
256
+
257
+ if not evaluation["eval_passed"]:
258
+ failed_tests += 1
259
+
260
+ # Report results for this scenario
261
+ total_tests = len(test_cases["inputs"])
262
+ if failed_tests > 0:
263
+ print(f"{detector} scenario {r+1}: [bright_red]{failed_tests}/{total_tests} tests failed[/bright_red]")
264
+ else:
265
+ print(f"{detector} scenario {r+1}: [green]All {total_tests} tests passed[/green]")
266
+ print('-'*100)
267
+
268
+ # Save results to a CSV file
269
+ results_df = pd.DataFrame(results)
270
+ save_path = self._save_results_to_csv(results_df, description)
271
+ self.save_path = save_path
272
+
273
+ return results_df, save_path
274
+
275
+
276
+ def run(
277
+ self,
278
+ description: str,
279
+ detectors: List[str],
280
+ response_model: Any,
281
+ examples: Optional[List[str]] = None,
282
+ model_input_format: Optional[Dict[str, Any]] = None,
283
+ scenarios_per_detector: int = 4,
284
+ examples_per_scenario: int = 5 # used only if examples are not provided
285
+ ) -> pd.DataFrame:
286
+ """
287
+ Run the complete red teaming pipeline.
288
+
289
+ Args:
290
+ description: Description of the app being tested
291
+ detectors: List of detector names to test against (e.g., ["stereotypes", "harmful_content"])
292
+ response_model: Function that takes a user message and returns the app's response
293
+ model_input_format: Format for test case generation
294
+ examples: List of example inputs to test. If provided, uses these instead of generating test cases
295
+ scenarios_per_detector: Number of test scenarios to generate per detector
296
+ examples_per_scenario: Number of test cases to generate per scenario
297
+
298
+ Returns:
299
+ DataFrame containing all test results with columns:
300
+ - scenario: The scenario being tested
301
+ - user_message: The test input
302
+ - app_response: The model's response
303
+ - evaluation_score: Score of whether the response passed evaluation
304
+ - evaluation_reason: Reason for pass/fail
305
+ """
306
+
307
+ if examples is None:
308
+ examples = []
309
+ if model_input_format is None:
310
+ model_input_format = {
311
+ "user_input": "Hi, I am looking for job recommendations",
312
+ "user_name": "John"
313
+ }
314
+
315
+ # Validate detectors
316
+ inbuild_detector = []
317
+ for detector in detectors:
318
+ if type(detector) == str:
319
+ inbuild_detector.append(detector)
320
+ elif type(detector) == dict:
321
+ if 'custom' not in detector.keys() or len(detector.keys()) != 1:
322
+ raise ValueError('The custom detector must be a dictionary with only key "custom" and a string as a value')
323
+ else:
324
+ raise ValueError('Detector must be a string or a dictionary with only key "custom" and a string as a value')
325
+
326
+ self.validate_detectors(inbuild_detector)
327
+
328
+ if examples:
329
+ return self._run_with_examples(description, detectors, response_model, examples, scenarios_per_detector)
330
+
331
+ return self._run_without_examples(description, detectors, response_model, model_input_format, scenarios_per_detector, examples_per_scenario)
@@ -0,0 +1,4 @@
1
+ openai>=1.0.0
2
+ pandas>=2.0.0
3
+ tomli>=2.0.0
4
+ tqdm>=4.65.0
@@ -0,0 +1,97 @@
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 11,
6
+ "metadata": {},
7
+ "outputs": [
8
+ {
9
+ "data": {
10
+ "text/plain": [
11
+ "True"
12
+ ]
13
+ },
14
+ "execution_count": 11,
15
+ "metadata": {},
16
+ "output_type": "execute_result"
17
+ }
18
+ ],
19
+ "source": [
20
+ "from dotenv import load_dotenv\n",
21
+ "\n",
22
+ "load_dotenv()"
23
+ ]
24
+ },
25
+ {
26
+ "cell_type": "code",
27
+ "execution_count": 12,
28
+ "metadata": {},
29
+ "outputs": [
30
+ {
31
+ "name": "stdout",
32
+ "output_type": "stream",
33
+ "text": [
34
+ "The answer to the ultimate question of life, the universe, and everything is 42. However, the actual question itself remains unknown. It's a bit of a cosmic joke, really. But hey, who needs a definitive answer when you can enjoy the journey of figuring it out? Just remember, the answer is out there, and it's 42. Now, go forth and explore the vastness of existence!\n"
35
+ ]
36
+ }
37
+ ],
38
+ "source": [
39
+ "# In your terminal, first run:\n",
40
+ "# pip install openai\n",
41
+ "\n",
42
+ "import os\n",
43
+ "from openai import OpenAI\n",
44
+ "\n",
45
+ "XAI_API_KEY = os.getenv('XAI_API_KEY')\n",
46
+ "client = OpenAI(\n",
47
+ " api_key=XAI_API_KEY,\n",
48
+ " base_url=\"https://api.x.ai/v1\",\n",
49
+ ")\n",
50
+ "\n",
51
+ "completion = client.chat.completions.create(\n",
52
+ " model=\"grok-2-latest\",\n",
53
+ " messages=[\n",
54
+ " {\n",
55
+ " \"role\": \"system\",\n",
56
+ " \"content\": \"You are Grok, a chatbot inspired by the Hitchhikers Guide to the Galaxy.\"\n",
57
+ " },\n",
58
+ " {\n",
59
+ " \"role\": \"user\",\n",
60
+ " \"content\": \"What is the meaning of life, the universe, and everything?\"\n",
61
+ " },\n",
62
+ " ],\n",
63
+ ")\n",
64
+ "\n",
65
+ "print(completion.choices[0].message.content)"
66
+ ]
67
+ },
68
+ {
69
+ "cell_type": "code",
70
+ "execution_count": null,
71
+ "metadata": {},
72
+ "outputs": [],
73
+ "source": []
74
+ }
75
+ ],
76
+ "metadata": {
77
+ "kernelspec": {
78
+ "display_name": "base",
79
+ "language": "python",
80
+ "name": "python3"
81
+ },
82
+ "language_info": {
83
+ "codemirror_mode": {
84
+ "name": "ipython",
85
+ "version": 3
86
+ },
87
+ "file_extension": ".py",
88
+ "mimetype": "text/x-python",
89
+ "name": "python",
90
+ "nbconvert_exporter": "python",
91
+ "pygments_lexer": "ipython3",
92
+ "version": "3.12.2"
93
+ }
94
+ },
95
+ "nbformat": 4,
96
+ "nbformat_minor": 2
97
+ }