rapidfireai 0.9.10__py3-none-any.whl → 0.9.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of rapidfireai might be problematic. Click here for more details.
- rapidfireai/cli.py +23 -3
- rapidfireai/experiment.py +5 -1
- rapidfireai/start.sh +152 -139
- rapidfireai/utils/constants.py +1 -2
- rapidfireai/utils/ping.py +29 -0
- rapidfireai/utils/shm_manager.py +15 -11
- rapidfireai/version.py +2 -2
- {rapidfireai-0.9.10.dist-info → rapidfireai-0.9.11.dist-info}/METADATA +31 -7
- {rapidfireai-0.9.10.dist-info → rapidfireai-0.9.11.dist-info}/RECORD +19 -12
- tutorial_notebooks/rf-tutorial-dpo-alignment-lite.ipynb +412 -0
- tutorial_notebooks/rf-tutorial-dpo-alignment.ipynb +427 -0
- tutorial_notebooks/rf-tutorial-grpo-mathreasoning-lite.ipynb +358 -0
- tutorial_notebooks/rf-tutorial-grpo-mathreasoning.ipynb +371 -0
- tutorial_notebooks/rf-tutorial-sft-chatqa-lite.ipynb +329 -0
- tutorial_notebooks/rf-tutorial-sft-chatqa.ipynb +331 -0
- {rapidfireai-0.9.10.dist-info → rapidfireai-0.9.11.dist-info}/WHEEL +0 -0
- {rapidfireai-0.9.10.dist-info → rapidfireai-0.9.11.dist-info}/entry_points.txt +0 -0
- {rapidfireai-0.9.10.dist-info → rapidfireai-0.9.11.dist-info}/licenses/LICENSE +0 -0
- {rapidfireai-0.9.10.dist-info → rapidfireai-0.9.11.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,412 @@
|
|
|
1
|
+
{
|
|
2
|
+
"cells": [
|
|
3
|
+
{
|
|
4
|
+
"cell_type": "markdown",
|
|
5
|
+
"id": "668ecb93",
|
|
6
|
+
"metadata": {},
|
|
7
|
+
"source": [
|
|
8
|
+
"### RapidFire AI Tutorial Use Case: DPO for Alignment"
|
|
9
|
+
]
|
|
10
|
+
},
|
|
11
|
+
{
|
|
12
|
+
"cell_type": "code",
|
|
13
|
+
"execution_count": null,
|
|
14
|
+
"id": "bb9cb2b3",
|
|
15
|
+
"metadata": {},
|
|
16
|
+
"outputs": [],
|
|
17
|
+
"source": [
|
|
18
|
+
"from rapidfireai import Experiment\n",
|
|
19
|
+
"from rapidfireai.automl import List, RFGridSearch, RFModelConfig, RFLoraConfig, RFDPOConfig"
|
|
20
|
+
]
|
|
21
|
+
},
|
|
22
|
+
{
|
|
23
|
+
"cell_type": "markdown",
|
|
24
|
+
"id": "9bb751f2",
|
|
25
|
+
"metadata": {},
|
|
26
|
+
"source": [
|
|
27
|
+
"### Load Dataset and Sample Train Subset"
|
|
28
|
+
]
|
|
29
|
+
},
|
|
30
|
+
{
|
|
31
|
+
"cell_type": "code",
|
|
32
|
+
"execution_count": null,
|
|
33
|
+
"id": "e4c3655f",
|
|
34
|
+
"metadata": {},
|
|
35
|
+
"outputs": [],
|
|
36
|
+
"source": [
|
|
37
|
+
"from datasets import load_dataset\n",
|
|
38
|
+
"\n",
|
|
39
|
+
"# Select a subset of the dataset for demo purposes\n",
|
|
40
|
+
"train_dataset = load_dataset(\n",
|
|
41
|
+
" \"trl-lib/ultrafeedback_binarized\", \n",
|
|
42
|
+
" split=\"train\").select(range(100))"
|
|
43
|
+
]
|
|
44
|
+
},
|
|
45
|
+
{
|
|
46
|
+
"cell_type": "markdown",
|
|
47
|
+
"id": "2485d399",
|
|
48
|
+
"metadata": {},
|
|
49
|
+
"source": [
|
|
50
|
+
"### Initialize Experiment"
|
|
51
|
+
]
|
|
52
|
+
},
|
|
53
|
+
{
|
|
54
|
+
"cell_type": "code",
|
|
55
|
+
"execution_count": null,
|
|
56
|
+
"id": "cf47412f",
|
|
57
|
+
"metadata": {},
|
|
58
|
+
"outputs": [],
|
|
59
|
+
"source": [
|
|
60
|
+
"# Every experiment instance must be uniquely named\n",
|
|
61
|
+
"experiment = Experiment(experiment_name=\"exp1-sft-dpo-lite\")"
|
|
62
|
+
]
|
|
63
|
+
},
|
|
64
|
+
{
|
|
65
|
+
"cell_type": "markdown",
|
|
66
|
+
"id": "3378be4c",
|
|
67
|
+
"metadata": {},
|
|
68
|
+
"source": [
|
|
69
|
+
"#### Direct Preference Optimization (DPO) Training \n",
|
|
70
|
+
"\n",
|
|
71
|
+
"The first step is to train a **Supervised Fine-Tuning (SFT)** model to ensure the data we train on is in-distribution for the DPO algorithm.\n",
|
|
72
|
+
"\n",
|
|
73
|
+
"Fine-tuning a language model via DPO consists of **two steps** and is significantly easier than PPO:\n",
|
|
74
|
+
"\n",
|
|
75
|
+
"1. Data Collection: Gather a preference dataset containing:\n",
|
|
76
|
+
" - **Positive examples**: High-quality generations given a prompt\n",
|
|
77
|
+
" - **Negative examples**: Lower-quality generations for the same prompt\n",
|
|
78
|
+
" - **Paired comparisons**: Each prompt has both chosen and rejected responses\n",
|
|
79
|
+
"\n",
|
|
80
|
+
"2. Optimization: Maximize the log-likelihood of the DPO loss directly.\n",
|
|
81
|
+
"\n",
|
|
82
|
+
"Since our goal is to demonstrate DPO, we will assume you already have **pretrained model or adapters from SFT training** and use them as our starting point."
|
|
83
|
+
]
|
|
84
|
+
},
|
|
85
|
+
{
|
|
86
|
+
"cell_type": "markdown",
|
|
87
|
+
"id": "51df13d9",
|
|
88
|
+
"metadata": {},
|
|
89
|
+
"source": [
|
|
90
|
+
"### Model"
|
|
91
|
+
]
|
|
92
|
+
},
|
|
93
|
+
{
|
|
94
|
+
"cell_type": "markdown",
|
|
95
|
+
"id": "abbbd1f3",
|
|
96
|
+
"metadata": {},
|
|
97
|
+
"source": [
|
|
98
|
+
"##### Quantization and LoRA Config"
|
|
99
|
+
]
|
|
100
|
+
},
|
|
101
|
+
{
|
|
102
|
+
"cell_type": "markdown",
|
|
103
|
+
"id": "c46f0565",
|
|
104
|
+
"metadata": {},
|
|
105
|
+
"source": [
|
|
106
|
+
"We supervised fine-tuned (QLoRA) the `mistralai/Mistral-7B-Instruct-v0.3` base model by using preferred completions for supervision from the `trl-lib/ultrafeedback_binarized`. This ensures that the output paths sampled during the alignment training already exist in the model's distribution. We do not want the model to have to learn a new task from ground up during the alignment training. The goal of DPO training is to achieve alignment with human preferences by contrastively learning which outputs are considered preferred and which ones are dispreferred.\n",
|
|
107
|
+
"\n",
|
|
108
|
+
"The quantization config for `rapidfire-ai-inc/mistral-7b-sft-bnb-4bit` is below:\n",
|
|
109
|
+
"```python\n",
|
|
110
|
+
"bnb_config = BitsAndBytesConfig(\n",
|
|
111
|
+
" load_in_4bit=True,\n",
|
|
112
|
+
" bnb_4bit_compute_dtype=torch.bfloat16,\n",
|
|
113
|
+
" bnb_4bit_use_double_quant=True,\n",
|
|
114
|
+
" bnb_4bit_quant_type=\"nf4\",\n",
|
|
115
|
+
")\n",
|
|
116
|
+
"```\n",
|
|
117
|
+
"We will use QLoRA to further fine-tune the SFT model for alignment with DPO"
|
|
118
|
+
]
|
|
119
|
+
},
|
|
120
|
+
{
|
|
121
|
+
"cell_type": "markdown",
|
|
122
|
+
"id": "0ac53010",
|
|
123
|
+
"metadata": {},
|
|
124
|
+
"source": [
|
|
125
|
+
"### Define Multi-Config Knobs for Model, LoRA, and DPO Trainer using RapidFire AI Wrapper APIs"
|
|
126
|
+
]
|
|
127
|
+
},
|
|
128
|
+
{
|
|
129
|
+
"cell_type": "markdown",
|
|
130
|
+
"id": "11960e7a",
|
|
131
|
+
"metadata": {},
|
|
132
|
+
"source": [
|
|
133
|
+
"We create a common base config, since most of the knobs will remain same across the configs in our experiment."
|
|
134
|
+
]
|
|
135
|
+
},
|
|
136
|
+
{
|
|
137
|
+
"cell_type": "code",
|
|
138
|
+
"execution_count": null,
|
|
139
|
+
"id": "af5aab6b",
|
|
140
|
+
"metadata": {},
|
|
141
|
+
"outputs": [],
|
|
142
|
+
"source": [
|
|
143
|
+
"from peft import TaskType\n",
|
|
144
|
+
"\n",
|
|
145
|
+
"MODEL_NAME_OR_PATH = \"rapidfire-ai-inc/mistral-7b-sft-bnb-4bit\"\n",
|
|
146
|
+
"\n",
|
|
147
|
+
"base_lora_config_lite = RFLoraConfig(\n",
|
|
148
|
+
" task_type=TaskType.CAUSAL_LM,\n",
|
|
149
|
+
" r=64, \n",
|
|
150
|
+
" lora_alpha=64, \n",
|
|
151
|
+
" lora_dropout=0.1, \n",
|
|
152
|
+
" target_modules=[\"q_proj\", \"k_proj\", \"v_proj\", \"o_proj\", \"gate_proj\", \"up_proj\", \"down_proj\"],\n",
|
|
153
|
+
" bias=\"none\", \n",
|
|
154
|
+
")\n",
|
|
155
|
+
"\n",
|
|
156
|
+
"base_dpo_config_lite = RFDPOConfig(\n",
|
|
157
|
+
" gradient_checkpointing=True,\n",
|
|
158
|
+
" gradient_checkpointing_kwargs={\"use_reentrant\": True},\n",
|
|
159
|
+
" model_adapter_name=\"default\", # LoRA Adapter from original SFT trained model, if any. This adapter will be trained using DPO.\n",
|
|
160
|
+
" ref_adapter_name=\"reference\", # LoRA Adapter from original SFT trained model, if any. This adapter will remain unchanged and will be used for reference model.\n",
|
|
161
|
+
" force_use_ref_model=False, \n",
|
|
162
|
+
" loss_type=\"sigmoid\", # Uses Bradley-Terry model to calculate the loss.\n",
|
|
163
|
+
" beta=0.1, \n",
|
|
164
|
+
" max_prompt_length=512,\n",
|
|
165
|
+
" max_completion_length=512,\n",
|
|
166
|
+
" max_length=1024, # Prompt + completion\n",
|
|
167
|
+
" per_device_train_batch_size=2,\n",
|
|
168
|
+
" gradient_accumulation_steps=2,\n",
|
|
169
|
+
" learning_rate=5e-4,\n",
|
|
170
|
+
" warmup_ratio=0.1,\n",
|
|
171
|
+
" weight_decay=0,\n",
|
|
172
|
+
" lr_scheduler_type=\"linear\",\n",
|
|
173
|
+
" num_train_epochs=1, \n",
|
|
174
|
+
" optim=\"adamw_8bit\",\n",
|
|
175
|
+
" bf16=True,\n",
|
|
176
|
+
" save_strategy=\"epoch\",\n",
|
|
177
|
+
" logging_strategy=\"steps\",\n",
|
|
178
|
+
" logging_steps=1\n",
|
|
179
|
+
")"
|
|
180
|
+
]
|
|
181
|
+
},
|
|
182
|
+
{
|
|
183
|
+
"cell_type": "markdown",
|
|
184
|
+
"id": "0d8d056c",
|
|
185
|
+
"metadata": {},
|
|
186
|
+
"source": [
|
|
187
|
+
"Now we create 3 different configs for experimentation by cloning the base config and only modifying the knobs we need."
|
|
188
|
+
]
|
|
189
|
+
},
|
|
190
|
+
{
|
|
191
|
+
"cell_type": "code",
|
|
192
|
+
"execution_count": null,
|
|
193
|
+
"id": "3f7c2957",
|
|
194
|
+
"metadata": {},
|
|
195
|
+
"outputs": [],
|
|
196
|
+
"source": [
|
|
197
|
+
"# Config 1 - Basic Bradley-Terry model with medium LoRA and large beta\n",
|
|
198
|
+
"lora_config_1 = base_lora_config_lite.copy()\n",
|
|
199
|
+
"lora_config_1.r = 16\n",
|
|
200
|
+
"lora_config_1.lora_alpha = 16\n",
|
|
201
|
+
"\n",
|
|
202
|
+
"dpo_config_1 = base_dpo_config_lite.copy()\n",
|
|
203
|
+
"dpo_config_1.loss_type = \"sigmoid\"\n",
|
|
204
|
+
"dpo_config_1.beta = 0.1\n",
|
|
205
|
+
"\n",
|
|
206
|
+
"# Config 2 - Assumes noisy preference data, use label smoothing to handle it with Robust loss\n",
|
|
207
|
+
"lora_config_2 = base_lora_config_lite.copy()\n",
|
|
208
|
+
"lora_config_2.r = 16\n",
|
|
209
|
+
"lora_config_2.lora_alpha = 16\n",
|
|
210
|
+
"\n",
|
|
211
|
+
"dpo_config_2 = base_dpo_config_lite.copy()\n",
|
|
212
|
+
"dpo_config_2.loss_type = \"robust\"\n",
|
|
213
|
+
"dpo_config_2.beta = 0.1\n",
|
|
214
|
+
"dpo_config_2.label_smoothing = 0.5\n",
|
|
215
|
+
"\n",
|
|
216
|
+
"# Config 3 - Use a combined loss function with weighted sum\n",
|
|
217
|
+
"lora_config_3 = base_lora_config_lite.copy()\n",
|
|
218
|
+
"lora_config_3.r = 16\n",
|
|
219
|
+
"lora_config_3.lora_alpha = 16\n",
|
|
220
|
+
"\n",
|
|
221
|
+
"dpo_config_3 = base_dpo_config_lite.copy()\n",
|
|
222
|
+
"dpo_config_3.loss_type = [\"sigmoid\", \"bco_pair\", \"sft\"]\n",
|
|
223
|
+
"dpo_config_3.loss_weights = [0.8, 0.2, 1.0]\n",
|
|
224
|
+
"dpo_config_3.beta = 0.1"
|
|
225
|
+
]
|
|
226
|
+
},
|
|
227
|
+
{
|
|
228
|
+
"cell_type": "code",
|
|
229
|
+
"execution_count": null,
|
|
230
|
+
"id": "5992c776",
|
|
231
|
+
"metadata": {},
|
|
232
|
+
"outputs": [],
|
|
233
|
+
"source": [
|
|
234
|
+
"import torch\n",
|
|
235
|
+
"\n",
|
|
236
|
+
"# List of 3 separate configs\n",
|
|
237
|
+
"config_set = List([\n",
|
|
238
|
+
" RFModelConfig(\n",
|
|
239
|
+
" model_name=MODEL_NAME_OR_PATH,\n",
|
|
240
|
+
" ref_model_name=None,\n",
|
|
241
|
+
" peft_config=lora_config_1,\n",
|
|
242
|
+
" training_args=dpo_config_1,\n",
|
|
243
|
+
" model_kwargs={\"device_map\": \"auto\", \"torch_dtype\": torch.bfloat16},\n",
|
|
244
|
+
" tokenizer_kwargs={\"model_max_length\": 1024, \"padding_side\": \"left\", \"truncation\": True}\n",
|
|
245
|
+
" ),\n",
|
|
246
|
+
" RFModelConfig(\n",
|
|
247
|
+
" model_name=MODEL_NAME_OR_PATH,\n",
|
|
248
|
+
" ref_model_name=None,\n",
|
|
249
|
+
" peft_config=lora_config_2,\n",
|
|
250
|
+
" training_args=dpo_config_2,\n",
|
|
251
|
+
" model_kwargs={ \"device_map\": \"auto\", \"torch_dtype\": torch.bfloat16},\n",
|
|
252
|
+
" tokenizer_kwargs={\"model_max_length\": 1024, \"padding_side\": \"left\", \"truncation\": True}\n",
|
|
253
|
+
" ),\n",
|
|
254
|
+
" RFModelConfig(\n",
|
|
255
|
+
" model_name=MODEL_NAME_OR_PATH,\n",
|
|
256
|
+
" ref_model_name=None,\n",
|
|
257
|
+
" peft_config=lora_config_3,\n",
|
|
258
|
+
" training_args=dpo_config_3,\n",
|
|
259
|
+
" model_kwargs={ \"device_map\": \"auto\", \"torch_dtype\": torch.bfloat16},\n",
|
|
260
|
+
" tokenizer_kwargs={\"model_max_length\": 1024, \"padding_side\": \"left\", \"truncation\": True}\n",
|
|
261
|
+
" )\n",
|
|
262
|
+
"])"
|
|
263
|
+
]
|
|
264
|
+
},
|
|
265
|
+
{
|
|
266
|
+
"cell_type": "markdown",
|
|
267
|
+
"id": "b02f3152",
|
|
268
|
+
"metadata": {},
|
|
269
|
+
"source": [
|
|
270
|
+
"#### Define Model Creation Function"
|
|
271
|
+
]
|
|
272
|
+
},
|
|
273
|
+
{
|
|
274
|
+
"cell_type": "code",
|
|
275
|
+
"execution_count": null,
|
|
276
|
+
"id": "1854ddf0",
|
|
277
|
+
"metadata": {},
|
|
278
|
+
"outputs": [],
|
|
279
|
+
"source": [
|
|
280
|
+
"def sample_create_model(model_config): \n",
|
|
281
|
+
" \"\"\"Function to create model object for any given config; must return tuple of (model, tokenizer)\"\"\"\n",
|
|
282
|
+
" from transformers import AutoModelForCausalLM, AutoTokenizer\n",
|
|
283
|
+
"\n",
|
|
284
|
+
" model = AutoModelForCausalLM.from_pretrained(\n",
|
|
285
|
+
" model_config[\"model_name\"], \n",
|
|
286
|
+
" **model_config[\"model_kwargs\"]\n",
|
|
287
|
+
" )\n",
|
|
288
|
+
" model.config.use_cache = False # Disable caching to save memory and stability during training\n",
|
|
289
|
+
"\n",
|
|
290
|
+
" tokenizer = AutoTokenizer.from_pretrained(\n",
|
|
291
|
+
" model_config[\"model_name\"], \n",
|
|
292
|
+
" **model_config[\"tokenizer_kwargs\"]\n",
|
|
293
|
+
" )\n",
|
|
294
|
+
" if tokenizer.pad_token is None:\n",
|
|
295
|
+
" tokenizer.pad_token = tokenizer.eos_token\n",
|
|
296
|
+
"\n",
|
|
297
|
+
" return (model, tokenizer)"
|
|
298
|
+
]
|
|
299
|
+
},
|
|
300
|
+
{
|
|
301
|
+
"cell_type": "markdown",
|
|
302
|
+
"id": "abf1feb4",
|
|
303
|
+
"metadata": {},
|
|
304
|
+
"source": [
|
|
305
|
+
"#### Generate Config Group"
|
|
306
|
+
]
|
|
307
|
+
},
|
|
308
|
+
{
|
|
309
|
+
"cell_type": "code",
|
|
310
|
+
"execution_count": null,
|
|
311
|
+
"id": "7c2402aa",
|
|
312
|
+
"metadata": {},
|
|
313
|
+
"outputs": [],
|
|
314
|
+
"source": [
|
|
315
|
+
"# Simple grid search across all sets of config knob values = 3 combinations in total\n",
|
|
316
|
+
"config_group = RFGridSearch(\n",
|
|
317
|
+
" configs=config_set,\n",
|
|
318
|
+
" trainer_type=\"DPO\",\n",
|
|
319
|
+
")"
|
|
320
|
+
]
|
|
321
|
+
},
|
|
322
|
+
{
|
|
323
|
+
"cell_type": "markdown",
|
|
324
|
+
"id": "d0f3d1ab",
|
|
325
|
+
"metadata": {},
|
|
326
|
+
"source": [
|
|
327
|
+
"### Run Multi-Config Training"
|
|
328
|
+
]
|
|
329
|
+
},
|
|
330
|
+
{
|
|
331
|
+
"cell_type": "markdown",
|
|
332
|
+
"id": "b2cc2c0d",
|
|
333
|
+
"metadata": {},
|
|
334
|
+
"source": [
|
|
335
|
+
"##### DPO Training Metrics\n",
|
|
336
|
+
"\n",
|
|
337
|
+
"During training and evaluation, we track the following reward metrics:\n",
|
|
338
|
+
"\n",
|
|
339
|
+
"| Metric | Description |\n",
|
|
340
|
+
"|--------|-------------|\n",
|
|
341
|
+
"| `rewards/chosen` | Mean difference between policy and reference model log probabilities for chosen responses (scaled by β) |\n",
|
|
342
|
+
"| `rewards/rejected` | Mean difference between policy and reference model log probabilities for rejected responses (scaled by β) |\n",
|
|
343
|
+
"| `rewards/accuracies` | Percentage of cases where chosen rewards > rejected rewards |\n",
|
|
344
|
+
"| `rewards/margins` | Mean difference between chosen and rejected rewards |\n",
|
|
345
|
+
"\n",
|
|
346
|
+
"##### Mathematical Representation\n",
|
|
347
|
+
"\n",
|
|
348
|
+
"- **Chosen Rewards**: $\\beta * (\\log \\pi_\\theta(y_w|x) - \\log \\pi_{ref}(y_w|x))$\n",
|
|
349
|
+
"- **Rejected Rewards**: $\\beta * (\\log \\pi_\\theta(y_l|x) - \\log \\pi_{ref}(y_l|x))$\n",
|
|
350
|
+
"- **Accuracy**: $\\mathbb{E}[\\mathbf{1}[r_{chosen} > r_{rejected}]]$\n",
|
|
351
|
+
"- **Margin**: $\\mathbb{E}[r_{chosen} - r_{rejected}]$\n",
|
|
352
|
+
"\n",
|
|
353
|
+
"Where:\n",
|
|
354
|
+
"- $\\pi_\\theta$ = policy model\n",
|
|
355
|
+
"- $\\pi_{ref}$ = reference model \n",
|
|
356
|
+
"- $y_w$ = chosen (winning) response\n",
|
|
357
|
+
"- $y_l$ = rejected (losing) response\n",
|
|
358
|
+
"- $\\beta$ = temperature parameter"
|
|
359
|
+
]
|
|
360
|
+
},
|
|
361
|
+
{
|
|
362
|
+
"cell_type": "code",
|
|
363
|
+
"execution_count": null,
|
|
364
|
+
"id": "e8c2dcad",
|
|
365
|
+
"metadata": {},
|
|
366
|
+
"outputs": [],
|
|
367
|
+
"source": [
|
|
368
|
+
"# Launch training of all configs in the config_group with swap granularity of 3 chunks\n",
|
|
369
|
+
"experiment.run_fit(config_group, sample_create_model, train_dataset, eval_dataset=None, num_chunks=3, seed=42)"
|
|
370
|
+
]
|
|
371
|
+
},
|
|
372
|
+
{
|
|
373
|
+
"cell_type": "markdown",
|
|
374
|
+
"id": "4b0a81b9",
|
|
375
|
+
"metadata": {},
|
|
376
|
+
"source": [
|
|
377
|
+
"### End Current Experiment"
|
|
378
|
+
]
|
|
379
|
+
},
|
|
380
|
+
{
|
|
381
|
+
"cell_type": "code",
|
|
382
|
+
"execution_count": null,
|
|
383
|
+
"id": "e85f16ee",
|
|
384
|
+
"metadata": {},
|
|
385
|
+
"outputs": [],
|
|
386
|
+
"source": [
|
|
387
|
+
"experiment.end()"
|
|
388
|
+
]
|
|
389
|
+
}
|
|
390
|
+
],
|
|
391
|
+
"metadata": {
|
|
392
|
+
"kernelspec": {
|
|
393
|
+
"display_name": "lite",
|
|
394
|
+
"language": "python",
|
|
395
|
+
"name": "python3"
|
|
396
|
+
},
|
|
397
|
+
"language_info": {
|
|
398
|
+
"codemirror_mode": {
|
|
399
|
+
"name": "ipython",
|
|
400
|
+
"version": 3
|
|
401
|
+
},
|
|
402
|
+
"file_extension": ".py",
|
|
403
|
+
"mimetype": "text/x-python",
|
|
404
|
+
"name": "python",
|
|
405
|
+
"nbconvert_exporter": "python",
|
|
406
|
+
"pygments_lexer": "ipython3",
|
|
407
|
+
"version": "3.12.11"
|
|
408
|
+
}
|
|
409
|
+
},
|
|
410
|
+
"nbformat": 4,
|
|
411
|
+
"nbformat_minor": 5
|
|
412
|
+
}
|