gitarsenal-cli 1.9.76 → 1.9.77

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (29) hide show
  1. package/.venv_status.json +1 -1
  2. package/kill_claude/prompts/claude-code-tool-prompts.md +9 -0
  3. package/package.json +1 -1
  4. package/python/test_modalSandboxScript.py +13 -43
  5. package/kill_claude/nanoGPT/.gitattributes +0 -3
  6. package/kill_claude/nanoGPT/LICENSE +0 -21
  7. package/kill_claude/nanoGPT/README.md +0 -227
  8. package/kill_claude/nanoGPT/assets/gpt2_124M_loss.png +0 -0
  9. package/kill_claude/nanoGPT/assets/nanogpt.jpg +0 -0
  10. package/kill_claude/nanoGPT/bench.py +0 -117
  11. package/kill_claude/nanoGPT/config/eval_gpt2.py +0 -8
  12. package/kill_claude/nanoGPT/config/eval_gpt2_large.py +0 -8
  13. package/kill_claude/nanoGPT/config/eval_gpt2_medium.py +0 -8
  14. package/kill_claude/nanoGPT/config/eval_gpt2_xl.py +0 -8
  15. package/kill_claude/nanoGPT/config/finetune_shakespeare.py +0 -25
  16. package/kill_claude/nanoGPT/config/train_gpt2.py +0 -25
  17. package/kill_claude/nanoGPT/config/train_shakespeare_char.py +0 -37
  18. package/kill_claude/nanoGPT/configurator.py +0 -47
  19. package/kill_claude/nanoGPT/data/openwebtext/prepare.py +0 -81
  20. package/kill_claude/nanoGPT/data/openwebtext/readme.md +0 -15
  21. package/kill_claude/nanoGPT/data/shakespeare/prepare.py +0 -33
  22. package/kill_claude/nanoGPT/data/shakespeare/readme.md +0 -9
  23. package/kill_claude/nanoGPT/data/shakespeare_char/prepare.py +0 -68
  24. package/kill_claude/nanoGPT/data/shakespeare_char/readme.md +0 -9
  25. package/kill_claude/nanoGPT/model.py +0 -330
  26. package/kill_claude/nanoGPT/sample.py +0 -89
  27. package/kill_claude/nanoGPT/scaling_laws.ipynb +0 -792
  28. package/kill_claude/nanoGPT/train.py +0 -336
  29. package/kill_claude/nanoGPT/transformer_sizing.ipynb +0 -402
package/.venv_status.json CHANGED
@@ -1 +1 @@
1
- {"created":"2025-08-17T05:50:53.850Z","packages":["modal","gitingest","requests","anthropic"],"uv_version":"uv 0.8.4 (Homebrew 2025-07-30)"}
1
+ {"created":"2025-08-17T08:32:36.269Z","packages":["modal","gitingest","requests","anthropic"],"uv_version":"uv 0.8.4 (Homebrew 2025-07-30)"}
@@ -56,6 +56,15 @@ Usage notes:
56
56
  - VERY IMPORTANT: You MUST avoid using search commands like `find` and `grep`. Instead use Grep, Glob, or Task to search. You MUST avoid read tools like `cat`, `head`, `tail`, and `ls`, and use Read and LS to read files.
57
57
  - If you _still_ need to run `grep`, STOP. ALWAYS USE ripgrep at `rg` first, which all Claude Code users have pre-installed.
58
58
  - IMPORTANT: Always use `uv pip` instead of just `pip` when running pip commands. For example, use `uv pip install package_name` instead of `pip install package_name`.
59
+ - TESTING MODE: For ML training and inference commands, always use minimal/quick parameters to enable fast testing:
60
+ - FIRST: Read and examine the training script/config files to understand which parameters actually control training speed and duration
61
+ - Add `--max_steps 10` or `--epochs 1` for training commands
62
+ - Add `--batch_size 1` or `--batch_size 2` for memory efficiency
63
+ - Add `--eval_steps 5` for quick evaluation
64
+ - Use `--fast_dev_run` flag when available (PyTorch Lightning)
65
+ - Check for GPU availability with `nvidia-smi` or `python -c "import torch; print(torch.cuda.is_available())"` and adjust device settings accordingly
66
+ - Use `--device cuda` or `--gpu` flags when GPU is available, `--device cpu` when not
67
+ - Example: `python train.py --epochs 1 --batch_size 2 --max_steps 10 --device cuda`
59
68
  - When issuing multiple commands, use the ';' or '&&' operator to separate them. DO NOT use newlines (newlines are ok in quoted strings).
60
69
  - Try to maintain your current working directory throughout the session by using absolute paths and avoiding usage of `cd`. You may use `cd` if the User explicitly requests it.
61
70
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "gitarsenal-cli",
3
- "version": "1.9.76",
3
+ "version": "1.9.77",
4
4
  "description": "CLI tool for creating Modal sandboxes with GitHub repositories",
5
5
  "main": "index.js",
6
6
  "bin": {
@@ -46,21 +46,6 @@ def ssh_container_function(ssh_password=None, repo_url=None, repo_name=None, set
46
46
  import os
47
47
  import json
48
48
  import sys
49
- import modal
50
-
51
- print("🐳 SSH Container Function Started!")
52
- print(f"šŸ“‹ Parameters received:")
53
- print(f" - SSH Password: {'***' if ssh_password else 'None'}")
54
- print(f" - Repo URL: {repo_url or 'None'}")
55
- print(f" - Repo Name: {repo_name or 'None'}")
56
- print(f" - Setup Commands: {len(setup_commands) if setup_commands else 0} commands")
57
- print(f" - OpenAI API Key: {'Set' if openai_api_key else 'Not set'}")
58
- print(f" - Anthropic API Key: {'Set' if anthropic_api_key else 'Not set'}")
59
- print(f" - Stored Credentials: {len(stored_credentials) if stored_credentials else 0} items")
60
-
61
- # Import only the modules we actually need (none currently for Agent-based approach)
62
- # Note: CommandListManager and llm_debugging functions are not used in the Agent-based approach
63
- print("āœ… Container setup complete - using Agent-based repository setup")
64
49
 
65
50
  # Set root password
66
51
  subprocess.run(["bash", "-c", f"echo 'root:{ssh_password}' | chpasswd"], check=True)
@@ -113,7 +98,6 @@ def ssh_container_function(ssh_password=None, repo_url=None, repo_name=None, set
113
98
  os.environ['ANTHROPIC_API_KEY'] = anthropic_api_key
114
99
 
115
100
  # Set up Anthropic API key from stored credentials
116
- anthropic_api_key = None
117
101
  if stored_credentials:
118
102
  # Look for Anthropic API key in various possible names
119
103
  for key_name in ['ANTHROPIC_API_KEY', 'anthropic_api_key', 'anthropic-api-key']:
@@ -566,35 +550,21 @@ def create_modal_ssh_container(gpu_type, repo_url=None, repo_name=None, setup_co
566
550
  try:
567
551
  # Wait for the function to start and print connection info (with timeout)
568
552
  print("ā³ Waiting for container to initialize...")
569
-
570
- # Use a timeout to see if the container is starting properly
571
- print("šŸ” Checking container status with 30-second timeout...")
572
553
  try:
573
- result = function_call.get(timeout=30)
554
+ print("\nā³ Monitoring container (press Ctrl+C to stop monitoring)...")
555
+ result = function_call.get() # Wait indefinitely
574
556
  print(f"šŸ”š Container function completed with result: {result}")
575
- except TimeoutError:
576
- print("ā° Container is still running after 30 seconds - this is expected!")
577
- print("šŸŽÆ The container should be accessible via SSH now.")
578
- print("šŸ’” The function will continue running until manually stopped.")
579
- print("šŸ”— Use Ctrl+C to stop monitoring, but the container will keep running.")
580
- print("šŸ”’ Keeping tokens active since container is still running.")
581
-
582
- # Continue waiting for user interrupt
583
- try:
584
- print("\nā³ Monitoring container (press Ctrl+C to stop monitoring)...")
585
- result = function_call.get() # Wait indefinitely
586
- print(f"šŸ”š Container function completed with result: {result}")
587
- except KeyboardInterrupt:
588
- print("\nšŸ›‘ Stopped monitoring. Container is still running remotely.")
589
- print("šŸ’” Use Modal's web UI or CLI to stop the container when done.")
590
- print("šŸ”’ Keeping tokens active since container is still running.")
591
- return {
592
- "app_name": app_name,
593
- "ssh_password": ssh_password,
594
- "volume_name": volume_name,
595
- "status": "monitoring_stopped",
596
- "function_call_id": function_call.object_id
597
- }
557
+ except KeyboardInterrupt:
558
+ print("\nšŸ›‘ Stopped monitoring. Container is still running remotely.")
559
+ print("šŸ’” Use Modal's web UI or CLI to stop the container when done.")
560
+ print("šŸ”’ Keeping tokens active since container is still running.")
561
+ return {
562
+ "app_name": app_name,
563
+ "ssh_password": ssh_password,
564
+ "volume_name": volume_name,
565
+ "status": "monitoring_stopped",
566
+ "function_call_id": function_call.object_id
567
+ }
598
568
 
599
569
  except KeyboardInterrupt:
600
570
  print("\nšŸ›‘ Interrupted by user. Container may still be running remotely.")
@@ -1,3 +0,0 @@
1
- # Override jupyter in Github language stats for more accurate estimate of repo code languages
2
- # reference: https://github.com/github/linguist/blob/master/docs/overrides.md#generated-code
3
- *.ipynb linguist-generated
@@ -1,21 +0,0 @@
1
- MIT License
2
-
3
- Copyright (c) 2022 Andrej Karpathy
4
-
5
- Permission is hereby granted, free of charge, to any person obtaining a copy
6
- of this software and associated documentation files (the "Software"), to deal
7
- in the Software without restriction, including without limitation the rights
8
- to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
- copies of the Software, and to permit persons to whom the Software is
10
- furnished to do so, subject to the following conditions:
11
-
12
- The above copyright notice and this permission notice shall be included in all
13
- copies or substantial portions of the Software.
14
-
15
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
- SOFTWARE.
@@ -1,227 +0,0 @@
1
-
2
- # nanoGPT
3
-
4
- ![nanoGPT](assets/nanogpt.jpg)
5
-
6
- The simplest, fastest repository for training/finetuning medium-sized GPTs. It is a rewrite of [minGPT](https://github.com/karpathy/minGPT) that prioritizes teeth over education. Still under active development, but currently the file `train.py` reproduces GPT-2 (124M) on OpenWebText, running on a single 8XA100 40GB node in about 4 days of training. The code itself is plain and readable: `train.py` is a ~300-line boilerplate training loop and `model.py` a ~300-line GPT model definition, which can optionally load the GPT-2 weights from OpenAI. That's it.
7
-
8
- ![repro124m](assets/gpt2_124M_loss.png)
9
-
10
- Because the code is so simple, it is very easy to hack to your needs, train new models from scratch, or finetune pretrained checkpoints (e.g. biggest one currently available as a starting point would be the GPT-2 1.3B model from OpenAI).
11
-
12
- ## install
13
-
14
- ```
15
- pip install torch numpy transformers datasets tiktoken wandb tqdm
16
- ```
17
-
18
- Dependencies:
19
-
20
- - [pytorch](https://pytorch.org) <3
21
- - [numpy](https://numpy.org/install/) <3
22
- - `transformers` for huggingface transformers <3 (to load GPT-2 checkpoints)
23
- - `datasets` for huggingface datasets <3 (if you want to download + preprocess OpenWebText)
24
- - `tiktoken` for OpenAI's fast BPE code <3
25
- - `wandb` for optional logging <3
26
- - `tqdm` for progress bars <3
27
-
28
- ## quick start
29
-
30
- If you are not a deep learning professional and you just want to feel the magic and get your feet wet, the fastest way to get started is to train a character-level GPT on the works of Shakespeare. First, we download it as a single (1MB) file and turn it from raw text into one large stream of integers:
31
-
32
- ```sh
33
- python data/shakespeare_char/prepare.py
34
- ```
35
-
36
- This creates a `train.bin` and `val.bin` in that data directory. Now it is time to train your GPT. The size of it very much depends on the computational resources of your system:
37
-
38
- **I have a GPU**. Great, we can quickly train a baby GPT with the settings provided in the [config/train_shakespeare_char.py](config/train_shakespeare_char.py) config file:
39
-
40
- ```sh
41
- python train.py config/train_shakespeare_char.py
42
- ```
43
-
44
- If you peek inside it, you'll see that we're training a GPT with a context size of up to 256 characters, 384 feature channels, and it is a 6-layer Transformer with 6 heads in each layer. On one A100 GPU this training run takes about 3 minutes and the best validation loss is 1.4697. Based on the configuration, the model checkpoints are being written into the `--out_dir` directory `out-shakespeare-char`. So once the training finishes we can sample from the best model by pointing the sampling script at this directory:
45
-
46
- ```sh
47
- python sample.py --out_dir=out-shakespeare-char
48
- ```
49
-
50
- This generates a few samples, for example:
51
-
52
- ```
53
- ANGELO:
54
- And cowards it be strawn to my bed,
55
- And thrust the gates of my threats,
56
- Because he that ale away, and hang'd
57
- An one with him.
58
-
59
- DUKE VINCENTIO:
60
- I thank your eyes against it.
61
-
62
- DUKE VINCENTIO:
63
- Then will answer him to save the malm:
64
- And what have you tyrannous shall do this?
65
-
66
- DUKE VINCENTIO:
67
- If you have done evils of all disposition
68
- To end his power, the day of thrust for a common men
69
- That I leave, to fight with over-liking
70
- Hasting in a roseman.
71
- ```
72
-
73
- lol `ĀÆ\_(惄)_/ĀÆ`. Not bad for a character-level model after 3 minutes of training on a GPU. Better results are quite likely obtainable by instead finetuning a pretrained GPT-2 model on this dataset (see finetuning section later).
74
-
75
- **I only have a macbook** (or other cheap computer). No worries, we can still train a GPT but we want to dial things down a notch. I recommend getting the bleeding edge PyTorch nightly ([select it here](https://pytorch.org/get-started/locally/) when installing) as it is currently quite likely to make your code more efficient. But even without it, a simple train run could look as follows:
76
-
77
- ```sh
78
- python train.py config/train_shakespeare_char.py --device=cpu --compile=False --eval_iters=20 --log_interval=1 --block_size=64 --batch_size=12 --n_layer=4 --n_head=4 --n_embd=128 --max_iters=2000 --lr_decay_iters=2000 --dropout=0.0
79
- ```
80
-
81
- Here, since we are running on CPU instead of GPU we must set both `--device=cpu` and also turn off PyTorch 2.0 compile with `--compile=False`. Then when we evaluate we get a bit more noisy but faster estimate (`--eval_iters=20`, down from 200), our context size is only 64 characters instead of 256, and the batch size only 12 examples per iteration, not 64. We'll also use a much smaller Transformer (4 layers, 4 heads, 128 embedding size), and decrease the number of iterations to 2000 (and correspondingly usually decay the learning rate to around max_iters with `--lr_decay_iters`). Because our network is so small we also ease down on regularization (`--dropout=0.0`). This still runs in about ~3 minutes, but gets us a loss of only 1.88 and therefore also worse samples, but it's still good fun:
82
-
83
- ```sh
84
- python sample.py --out_dir=out-shakespeare-char --device=cpu
85
- ```
86
- Generates samples like this:
87
-
88
- ```
89
- GLEORKEN VINGHARD III:
90
- Whell's the couse, the came light gacks,
91
- And the for mought you in Aut fries the not high shee
92
- bot thou the sought bechive in that to doth groan you,
93
- No relving thee post mose the wear
94
- ```
95
-
96
- Not bad for ~3 minutes on a CPU, for a hint of the right character gestalt. If you're willing to wait longer, feel free to tune the hyperparameters, increase the size of the network, the context length (`--block_size`), the length of training, etc.
97
-
98
- Finally, on Apple Silicon Macbooks and with a recent PyTorch version make sure to add `--device=mps` (short for "Metal Performance Shaders"); PyTorch then uses the on-chip GPU that can *significantly* accelerate training (2-3X) and allow you to use larger networks. See [Issue 28](https://github.com/karpathy/nanoGPT/issues/28) for more.
99
-
100
- ## reproducing GPT-2
101
-
102
- A more serious deep learning professional may be more interested in reproducing GPT-2 results. So here we go - we first tokenize the dataset, in this case the [OpenWebText](https://openwebtext2.readthedocs.io/en/latest/), an open reproduction of OpenAI's (private) WebText:
103
-
104
- ```sh
105
- python data/openwebtext/prepare.py
106
- ```
107
-
108
- This downloads and tokenizes the [OpenWebText](https://huggingface.co/datasets/openwebtext) dataset. It will create a `train.bin` and `val.bin` which holds the GPT2 BPE token ids in one sequence, stored as raw uint16 bytes. Then we're ready to kick off training. To reproduce GPT-2 (124M) you'll want at least an 8X A100 40GB node and run:
109
-
110
- ```sh
111
- torchrun --standalone --nproc_per_node=8 train.py config/train_gpt2.py
112
- ```
113
-
114
- This will run for about 4 days using PyTorch Distributed Data Parallel (DDP) and go down to loss of ~2.85. Now, a GPT-2 model just evaluated on OWT gets a val loss of about 3.11, but if you finetune it it will come down to ~2.85 territory (due to an apparent domain gap), making the two models ~match.
115
-
116
- If you're in a cluster environment and you are blessed with multiple GPU nodes you can make GPU go brrrr e.g. across 2 nodes like:
117
-
118
- ```sh
119
- # Run on the first (master) node with example IP 123.456.123.456:
120
- torchrun --nproc_per_node=8 --nnodes=2 --node_rank=0 --master_addr=123.456.123.456 --master_port=1234 train.py
121
- # Run on the worker node:
122
- torchrun --nproc_per_node=8 --nnodes=2 --node_rank=1 --master_addr=123.456.123.456 --master_port=1234 train.py
123
- ```
124
-
125
- It is a good idea to benchmark your interconnect (e.g. iperf3). In particular, if you don't have Infiniband then also prepend `NCCL_IB_DISABLE=1` to the above launches. Your multinode training will work, but most likely _crawl_. By default checkpoints are periodically written to the `--out_dir`. We can sample from the model by simply `python sample.py`.
126
-
127
- Finally, to train on a single GPU simply run the `python train.py` script. Have a look at all of its args, the script tries to be very readable, hackable and transparent. You'll most likely want to tune a number of those variables depending on your needs.
128
-
129
- ## baselines
130
-
131
- OpenAI GPT-2 checkpoints allow us to get some baselines in place for openwebtext. We can get the numbers as follows:
132
-
133
- ```sh
134
- $ python train.py config/eval_gpt2.py
135
- $ python train.py config/eval_gpt2_medium.py
136
- $ python train.py config/eval_gpt2_large.py
137
- $ python train.py config/eval_gpt2_xl.py
138
- ```
139
-
140
- and observe the following losses on train and val:
141
-
142
- | model | params | train loss | val loss |
143
- | ------| ------ | ---------- | -------- |
144
- | gpt2 | 124M | 3.11 | 3.12 |
145
- | gpt2-medium | 350M | 2.85 | 2.84 |
146
- | gpt2-large | 774M | 2.66 | 2.67 |
147
- | gpt2-xl | 1558M | 2.56 | 2.54 |
148
-
149
- However, we have to note that GPT-2 was trained on (closed, never released) WebText, while OpenWebText is just a best-effort open reproduction of this dataset. This means there is a dataset domain gap. Indeed, taking the GPT-2 (124M) checkpoint and finetuning on OWT directly for a while reaches loss down to ~2.85. This then becomes the more appropriate baseline w.r.t. reproduction.
150
-
151
- ## finetuning
152
-
153
- Finetuning is no different than training, we just make sure to initialize from a pretrained model and train with a smaller learning rate. For an example of how to finetune a GPT on new text go to `data/shakespeare` and run `prepare.py` to download the tiny shakespeare dataset and render it into a `train.bin` and `val.bin`, using the OpenAI BPE tokenizer from GPT-2. Unlike OpenWebText this will run in seconds. Finetuning can take very little time, e.g. on a single GPU just a few minutes. Run an example finetuning like:
154
-
155
- ```sh
156
- python train.py config/finetune_shakespeare.py
157
- ```
158
-
159
- This will load the config parameter overrides in `config/finetune_shakespeare.py` (I didn't tune them much though). Basically, we initialize from a GPT2 checkpoint with `init_from` and train as normal, except shorter and with a small learning rate. If you're running out of memory try decreasing the model size (they are `{'gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'}`) or possibly decreasing the `block_size` (context length). The best checkpoint (lowest validation loss) will be in the `out_dir` directory, e.g. in `out-shakespeare` by default, per the config file. You can then run the code in `sample.py --out_dir=out-shakespeare`:
160
-
161
- ```
162
- THEODORE:
163
- Thou shalt sell me to the highest bidder: if I die,
164
- I sell thee to the first; if I go mad,
165
- I sell thee to the second; if I
166
- lie, I sell thee to the third; if I slay,
167
- I sell thee to the fourth: so buy or sell,
168
- I tell thee again, thou shalt not sell my
169
- possession.
170
-
171
- JULIET:
172
- And if thou steal, thou shalt not sell thyself.
173
-
174
- THEODORE:
175
- I do not steal; I sell the stolen goods.
176
-
177
- THEODORE:
178
- Thou know'st not what thou sell'st; thou, a woman,
179
- Thou art ever a victim, a thing of no worth:
180
- Thou hast no right, no right, but to be sold.
181
- ```
182
-
183
- Whoa there, GPT, entering some dark place over there. I didn't really tune the hyperparameters in the config too much, feel free to try!
184
-
185
- ## sampling / inference
186
-
187
- Use the script `sample.py` to sample either from pre-trained GPT-2 models released by OpenAI, or from a model you trained yourself. For example, here is a way to sample from the largest available `gpt2-xl` model:
188
-
189
- ```sh
190
- python sample.py \
191
- --init_from=gpt2-xl \
192
- --start="What is the answer to life, the universe, and everything?" \
193
- --num_samples=5 --max_new_tokens=100
194
- ```
195
-
196
- If you'd like to sample from a model you trained, use the `--out_dir` to point the code appropriately. You can also prompt the model with some text from a file, e.g. ```python sample.py --start=FILE:prompt.txt```.
197
-
198
- ## efficiency notes
199
-
200
- For simple model benchmarking and profiling, `bench.py` might be useful. It's identical to what happens in the meat of the training loop of `train.py`, but omits much of the other complexities.
201
-
202
- Note that the code by default uses [PyTorch 2.0](https://pytorch.org/get-started/pytorch-2.0/). At the time of writing (Dec 29, 2022) this makes `torch.compile()` available in the nightly release. The improvement from the one line of code is noticeable, e.g. cutting down iteration time from ~250ms / iter to 135ms / iter. Nice work PyTorch team!
203
-
204
- ## todos
205
-
206
- - Investigate and add FSDP instead of DDP
207
- - Eval zero-shot perplexities on standard evals (e.g. LAMBADA? HELM? etc.)
208
- - Finetune the finetuning script, I think the hyperparams are not great
209
- - Schedule for linear batch size increase during training
210
- - Incorporate other embeddings (rotary, alibi)
211
- - Separate out the optim buffers from model params in checkpoints I think
212
- - Additional logging around network health (e.g. gradient clip events, magnitudes)
213
- - Few more investigations around better init etc.
214
-
215
- ## troubleshooting
216
-
217
- Note that by default this repo uses PyTorch 2.0 (i.e. `torch.compile`). This is fairly new and experimental, and not yet available on all platforms (e.g. Windows). If you're running into related error messages try to disable this by adding `--compile=False` flag. This will slow down the code but at least it will run.
218
-
219
- For some context on this repository, GPT, and language modeling it might be helpful to watch my [Zero To Hero series](https://karpathy.ai/zero-to-hero.html). Specifically, the [GPT video](https://www.youtube.com/watch?v=kCc8FmEb1nY) is popular if you have some prior language modeling context.
220
-
221
- For more questions/discussions feel free to stop by **#nanoGPT** on Discord:
222
-
223
- [![](https://dcbadge.vercel.app/api/server/3zy8kqD9Cp?compact=true&style=flat)](https://discord.gg/3zy8kqD9Cp)
224
-
225
- ## acknowledgements
226
-
227
- All nanoGPT experiments are powered by GPUs on [Lambda labs](https://lambdalabs.com), my favorite Cloud GPU provider. Thank you Lambda labs for sponsoring nanoGPT!
@@ -1,117 +0,0 @@
1
- """
2
- A much shorter version of train.py for benchmarking
3
- """
4
- import os
5
- from contextlib import nullcontext
6
- import numpy as np
7
- import time
8
- import torch
9
- from model import GPTConfig, GPT
10
-
11
- # -----------------------------------------------------------------------------
12
- batch_size = 12
13
- block_size = 1024
14
- bias = False
15
- real_data = True
16
- seed = 1337
17
- device = 'cuda' # examples: 'cpu', 'cuda', 'cuda:0', 'cuda:1', etc.
18
- dtype = 'bfloat16' if torch.cuda.is_available() and torch.cuda.is_bf16_supported() else 'float16' # 'float32' or 'bfloat16' or 'float16'
19
- compile = True # use PyTorch 2.0 to compile the model to be faster
20
- profile = False # use pytorch profiler, or just simple benchmarking?
21
- exec(open('configurator.py').read()) # overrides from command line or config file
22
- # -----------------------------------------------------------------------------
23
-
24
- torch.manual_seed(seed)
25
- torch.cuda.manual_seed(seed)
26
- torch.backends.cuda.matmul.allow_tf32 = True # allow tf32 on matmul
27
- torch.backends.cudnn.allow_tf32 = True # allow tf32 on cudnn
28
- device_type = 'cuda' if 'cuda' in device else 'cpu' # for later use in torch.autocast
29
- ptdtype = {'float32': torch.float32, 'bfloat16': torch.bfloat16, 'float16': torch.float16}[dtype]
30
- ctx = nullcontext() if device_type == 'cpu' else torch.amp.autocast(device_type=device_type, dtype=ptdtype)
31
-
32
- # data loading init
33
- if real_data:
34
- dataset = 'openwebtext'
35
- data_dir = os.path.join('data', dataset)
36
- train_data = np.memmap(os.path.join(data_dir, 'train.bin'), dtype=np.uint16, mode='r')
37
- def get_batch(split):
38
- data = train_data # note ignore split in benchmarking script
39
- ix = torch.randint(len(data) - block_size, (batch_size,))
40
- x = torch.stack([torch.from_numpy((data[i:i+block_size]).astype(np.int64)) for i in ix])
41
- y = torch.stack([torch.from_numpy((data[i+1:i+1+block_size]).astype(np.int64)) for i in ix])
42
- x, y = x.pin_memory().to(device, non_blocking=True), y.pin_memory().to(device, non_blocking=True)
43
- return x, y
44
- else:
45
- # alternatively, if fixed data is desired to not care about data loading
46
- x = torch.randint(50304, (batch_size, block_size), device=device)
47
- y = torch.randint(50304, (batch_size, block_size), device=device)
48
- get_batch = lambda split: (x, y)
49
-
50
- # model init
51
- gptconf = GPTConfig(
52
- block_size = block_size, # how far back does the model look? i.e. context size
53
- n_layer = 12, n_head = 12, n_embd = 768, # size of the model
54
- dropout = 0, # for determinism
55
- bias = bias,
56
- )
57
- model = GPT(gptconf)
58
- model.to(device)
59
-
60
- optimizer = model.configure_optimizers(weight_decay=1e-2, learning_rate=1e-4, betas=(0.9, 0.95), device_type=device_type)
61
-
62
- if compile:
63
- print("Compiling model...")
64
- model = torch.compile(model) # pytorch 2.0
65
-
66
- if profile:
67
- # useful docs on pytorch profiler:
68
- # - tutorial https://pytorch.org/tutorials/intermediate/tensorboard_profiler_tutorial.html
69
- # - api https://pytorch.org/docs/stable/profiler.html#torch.profiler.profile
70
- wait, warmup, active = 5, 5, 5
71
- num_steps = wait + warmup + active
72
- with torch.profiler.profile(
73
- activities=[torch.profiler.ProfilerActivity.CPU, torch.profiler.ProfilerActivity.CUDA],
74
- schedule=torch.profiler.schedule(wait=wait, warmup=warmup, active=active, repeat=1),
75
- on_trace_ready=torch.profiler.tensorboard_trace_handler('./bench_log'),
76
- record_shapes=False,
77
- profile_memory=False,
78
- with_stack=False, # incurs an additional overhead, disable if not needed
79
- with_flops=True,
80
- with_modules=False, # only for torchscript models atm
81
- ) as prof:
82
-
83
- X, Y = get_batch('train')
84
- for k in range(num_steps):
85
- with ctx:
86
- logits, loss = model(X, Y)
87
- X, Y = get_batch('train')
88
- optimizer.zero_grad(set_to_none=True)
89
- loss.backward()
90
- optimizer.step()
91
- lossf = loss.item()
92
- print(f"{k}/{num_steps} loss: {lossf:.4f}")
93
-
94
- prof.step() # notify the profiler at end of each step
95
-
96
- else:
97
-
98
- # simple benchmarking
99
- torch.cuda.synchronize()
100
- for stage, num_steps in enumerate([10, 20]): # burnin, then benchmark
101
- t0 = time.time()
102
- X, Y = get_batch('train')
103
- for k in range(num_steps):
104
- with ctx:
105
- logits, loss = model(X, Y)
106
- X, Y = get_batch('train')
107
- optimizer.zero_grad(set_to_none=True)
108
- loss.backward()
109
- optimizer.step()
110
- lossf = loss.item()
111
- print(f"{k}/{num_steps} loss: {lossf:.4f}")
112
- torch.cuda.synchronize()
113
- t1 = time.time()
114
- dt = t1-t0
115
- mfu = model.estimate_mfu(batch_size * 1 * num_steps, dt)
116
- if stage == 1:
117
- print(f"time per iteration: {dt/num_steps*1000:.4f}ms, MFU: {mfu*100:.2f}%")
@@ -1,8 +0,0 @@
1
- # evaluate the base gpt2
2
- # n_layer=12, n_head=12, n_embd=768
3
- # 124M parameters
4
- batch_size = 8
5
- eval_iters = 500 # use more iterations to get good estimate
6
- eval_only = True
7
- wandb_log = False
8
- init_from = 'gpt2'
@@ -1,8 +0,0 @@
1
- # evaluate the base gpt2
2
- # n_layer=36, n_head=20, n_embd=1280
3
- # 774M parameters
4
- batch_size = 8
5
- eval_iters = 500 # use more iterations to get good estimate
6
- eval_only = True
7
- wandb_log = False
8
- init_from = 'gpt2-large'
@@ -1,8 +0,0 @@
1
- # evaluate the base gpt2
2
- # n_layer=24, n_head=16, n_embd=1024
3
- # 350M parameters
4
- batch_size = 8
5
- eval_iters = 500 # use more iterations to get good estimate
6
- eval_only = True
7
- wandb_log = False
8
- init_from = 'gpt2-medium'
@@ -1,8 +0,0 @@
1
- # evaluate the base gpt2
2
- # n_layer=48, n_head=25, n_embd=1600
3
- # 1558M parameters
4
- batch_size = 8
5
- eval_iters = 500 # use more iterations to get good estimate
6
- eval_only = True
7
- wandb_log = False
8
- init_from = 'gpt2-xl'
@@ -1,25 +0,0 @@
1
- import time
2
-
3
- out_dir = 'out-shakespeare'
4
- eval_interval = 5
5
- eval_iters = 40
6
- wandb_log = False # feel free to turn on
7
- wandb_project = 'shakespeare'
8
- wandb_run_name = 'ft-' + str(time.time())
9
-
10
- dataset = 'shakespeare'
11
- init_from = 'gpt2-xl' # this is the largest GPT-2 model
12
-
13
- # only save checkpoints if the validation loss improves
14
- always_save_checkpoint = False
15
-
16
- # the number of examples per iter:
17
- # 1 batch_size * 32 grad_accum * 1024 tokens = 32,768 tokens/iter
18
- # shakespeare has 301,966 tokens, so 1 epoch ~= 9.2 iters
19
- batch_size = 1
20
- gradient_accumulation_steps = 32
21
- max_iters = 20
22
-
23
- # finetune at constant LR
24
- learning_rate = 3e-5
25
- decay_lr = False
@@ -1,25 +0,0 @@
1
- # config for training GPT-2 (124M) down to very nice loss of ~2.85 on 1 node of 8X A100 40GB
2
- # launch as the following (e.g. in a screen session) and wait ~5 days:
3
- # $ torchrun --standalone --nproc_per_node=8 train.py config/train_gpt2.py
4
-
5
- wandb_log = True
6
- wandb_project = 'owt'
7
- wandb_run_name='gpt2-124M'
8
-
9
- # these make the total batch size be ~0.5M
10
- # 12 batch size * 1024 block size * 5 gradaccum * 8 GPUs = 491,520
11
- batch_size = 12
12
- block_size = 1024
13
- gradient_accumulation_steps = 5 * 8
14
-
15
- # this makes total number of tokens be 300B
16
- max_iters = 600000
17
- lr_decay_iters = 600000
18
-
19
- # eval stuff
20
- eval_interval = 1000
21
- eval_iters = 200
22
- log_interval = 10
23
-
24
- # weight decay
25
- weight_decay = 1e-1
@@ -1,37 +0,0 @@
1
- # train a miniature character-level shakespeare model
2
- # good for debugging and playing on macbooks and such
3
-
4
- out_dir = 'out-shakespeare-char'
5
- eval_interval = 250 # keep frequent because we'll overfit
6
- eval_iters = 200
7
- log_interval = 10 # don't print too too often
8
-
9
- # we expect to overfit on this small dataset, so only save when val improves
10
- always_save_checkpoint = False
11
-
12
- wandb_log = False # override via command line if you like
13
- wandb_project = 'shakespeare-char'
14
- wandb_run_name = 'mini-gpt'
15
-
16
- dataset = 'shakespeare_char'
17
- gradient_accumulation_steps = 1
18
- batch_size = 64
19
- block_size = 256 # context of up to 256 previous characters
20
-
21
- # baby GPT model :)
22
- n_layer = 6
23
- n_head = 6
24
- n_embd = 384
25
- dropout = 0.2
26
-
27
- learning_rate = 1e-3 # with baby networks can afford to go a bit higher
28
- max_iters = 5000
29
- lr_decay_iters = 5000 # make equal to max_iters usually
30
- min_lr = 1e-4 # learning_rate / 10 usually
31
- beta2 = 0.99 # make a bit bigger because number of tokens per iter is small
32
-
33
- warmup_iters = 100 # not super necessary potentially
34
-
35
- # on macbook also add
36
- # device = 'cpu' # run on cpu only
37
- # compile = False # do not torch compile the model