isa-model 0.2.0__py3-none-any.whl → 0.2.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (77) hide show
  1. isa_model/__init__.py +1 -1
  2. isa_model/core/storage/hf_storage.py +419 -0
  3. isa_model/deployment/__init__.py +52 -0
  4. isa_model/deployment/core/__init__.py +34 -0
  5. isa_model/deployment/core/deployment_config.py +356 -0
  6. isa_model/deployment/core/deployment_manager.py +549 -0
  7. isa_model/deployment/core/isa_deployment_service.py +401 -0
  8. isa_model/eval/factory.py +381 -140
  9. isa_model/inference/ai_factory.py +142 -240
  10. isa_model/inference/providers/ml_provider.py +50 -0
  11. isa_model/inference/services/audio/openai_tts_service.py +104 -3
  12. isa_model/inference/services/embedding/base_embed_service.py +112 -0
  13. isa_model/inference/services/embedding/ollama_embed_service.py +28 -2
  14. isa_model/inference/services/llm/__init__.py +2 -0
  15. isa_model/inference/services/llm/base_llm_service.py +111 -1
  16. isa_model/inference/services/llm/ollama_llm_service.py +234 -26
  17. isa_model/inference/services/llm/openai_llm_service.py +243 -28
  18. isa_model/inference/services/llm/triton_llm_service.py +481 -0
  19. isa_model/inference/services/ml/base_ml_service.py +78 -0
  20. isa_model/inference/services/ml/sklearn_ml_service.py +140 -0
  21. isa_model/inference/services/vision/__init__.py +3 -3
  22. isa_model/inference/services/vision/base_image_gen_service.py +161 -0
  23. isa_model/inference/services/vision/base_vision_service.py +177 -0
  24. isa_model/inference/services/vision/ollama_vision_service.py +143 -17
  25. isa_model/inference/services/vision/replicate_image_gen_service.py +139 -7
  26. isa_model/training/__init__.py +62 -32
  27. isa_model/training/cloud/__init__.py +22 -0
  28. isa_model/training/cloud/job_orchestrator.py +402 -0
  29. isa_model/training/cloud/runpod_trainer.py +454 -0
  30. isa_model/training/cloud/storage_manager.py +482 -0
  31. isa_model/training/core/__init__.py +23 -0
  32. isa_model/training/core/config.py +181 -0
  33. isa_model/training/core/dataset.py +222 -0
  34. isa_model/training/core/trainer.py +720 -0
  35. isa_model/training/core/utils.py +213 -0
  36. isa_model/training/factory.py +229 -198
  37. isa_model-0.2.9.dist-info/METADATA +465 -0
  38. isa_model-0.2.9.dist-info/RECORD +86 -0
  39. isa_model/core/model_router.py +0 -226
  40. isa_model/core/model_version.py +0 -0
  41. isa_model/core/resource_manager.py +0 -202
  42. isa_model/deployment/gpu_fp16_ds8/models/deepseek_r1/1/model.py +0 -120
  43. isa_model/deployment/gpu_fp16_ds8/scripts/download_model.py +0 -18
  44. isa_model/training/engine/llama_factory/__init__.py +0 -39
  45. isa_model/training/engine/llama_factory/config.py +0 -115
  46. isa_model/training/engine/llama_factory/data_adapter.py +0 -284
  47. isa_model/training/engine/llama_factory/examples/__init__.py +0 -6
  48. isa_model/training/engine/llama_factory/examples/finetune_with_tracking.py +0 -185
  49. isa_model/training/engine/llama_factory/examples/rlhf_with_tracking.py +0 -163
  50. isa_model/training/engine/llama_factory/factory.py +0 -331
  51. isa_model/training/engine/llama_factory/rl.py +0 -254
  52. isa_model/training/engine/llama_factory/trainer.py +0 -171
  53. isa_model/training/image_model/configs/create_config.py +0 -37
  54. isa_model/training/image_model/configs/create_flux_config.py +0 -26
  55. isa_model/training/image_model/configs/create_lora_config.py +0 -21
  56. isa_model/training/image_model/prepare_massed_compute.py +0 -97
  57. isa_model/training/image_model/prepare_upload.py +0 -17
  58. isa_model/training/image_model/raw_data/create_captions.py +0 -16
  59. isa_model/training/image_model/raw_data/create_lora_captions.py +0 -20
  60. isa_model/training/image_model/raw_data/pre_processing.py +0 -200
  61. isa_model/training/image_model/train/train.py +0 -42
  62. isa_model/training/image_model/train/train_flux.py +0 -41
  63. isa_model/training/image_model/train/train_lora.py +0 -57
  64. isa_model/training/image_model/train_main.py +0 -25
  65. isa_model-0.2.0.dist-info/METADATA +0 -327
  66. isa_model-0.2.0.dist-info/RECORD +0 -92
  67. isa_model-0.2.0.dist-info/licenses/LICENSE +0 -21
  68. /isa_model/training/{llm_model/annotation → annotation}/annotation_schema.py +0 -0
  69. /isa_model/training/{llm_model/annotation → annotation}/processors/annotation_processor.py +0 -0
  70. /isa_model/training/{llm_model/annotation → annotation}/storage/dataset_manager.py +0 -0
  71. /isa_model/training/{llm_model/annotation → annotation}/storage/dataset_schema.py +0 -0
  72. /isa_model/training/{llm_model/annotation → annotation}/tests/test_annotation_flow.py +0 -0
  73. /isa_model/training/{llm_model/annotation → annotation}/tests/test_minio copy.py +0 -0
  74. /isa_model/training/{llm_model/annotation → annotation}/tests/test_minio_upload.py +0 -0
  75. /isa_model/training/{llm_model/annotation → annotation}/views/annotation_controller.py +0 -0
  76. {isa_model-0.2.0.dist-info → isa_model-0.2.9.dist-info}/WHEEL +0 -0
  77. {isa_model-0.2.0.dist-info → isa_model-0.2.9.dist-info}/top_level.txt +0 -0
@@ -1,254 +0,0 @@
1
- """
2
- LlamaFactory reinforcement learning implementation.
3
- """
4
-
5
- import os
6
- import json
7
- import logging
8
- import subprocess
9
- from typing import Dict, List, Optional, Union, Any, Tuple
10
-
11
- from .config import (
12
- LlamaFactoryConfig,
13
- RLConfig,
14
- DPOConfig,
15
- TrainingStrategy
16
- )
17
-
18
- logger = logging.getLogger(__name__)
19
-
20
-
21
- class LlamaFactoryRL:
22
- """
23
- Reinforcement Learning class for LlamaFactory.
24
-
25
- This class provides methods to train language models using reinforcement
26
- learning approaches such as RLHF (PPO) and DPO.
27
-
28
- Example:
29
- ```python
30
- # Create RL configuration
31
- config = RLConfig(
32
- model_path="meta-llama/Llama-2-7b-hf",
33
- reward_model="reward-model-path",
34
- train_file="path/to/data.json",
35
- batch_size=8
36
- )
37
-
38
- # Initialize and run RL training
39
- rl_trainer = LlamaFactoryRL(config)
40
- rl_trainer.train()
41
- ```
42
-
43
- For DPO:
44
- ```python
45
- # Create DPO configuration
46
- config = DPOConfig(
47
- model_path="meta-llama/Llama-2-7b-hf",
48
- train_file="path/to/preferences.json",
49
- reference_model="meta-llama/Llama-2-7b-hf", # Optional
50
- batch_size=4
51
- )
52
-
53
- # Initialize and run DPO training
54
- dpo_trainer = LlamaFactoryRL(config)
55
- dpo_trainer.train()
56
- ```
57
- """
58
-
59
- def __init__(self, config: Union[RLConfig, DPOConfig]):
60
- """
61
- Initialize the LlamaFactory RL trainer.
62
-
63
- Args:
64
- config: Configuration for RL training
65
- """
66
- self.config = config
67
- self._validate_config()
68
- self.output_dir = config.output_dir
69
- os.makedirs(self.output_dir, exist_ok=True)
70
-
71
- def _validate_config(self) -> None:
72
- """Validate configuration parameters."""
73
- if not self.config.model_path:
74
- raise ValueError("Model path must be specified")
75
-
76
- if isinstance(self.config, RLConfig):
77
- if not self.config.reward_model:
78
- raise ValueError("Reward model must be specified for RLHF")
79
- if not self.config.train_file:
80
- raise ValueError("Training file must be specified for RLHF")
81
-
82
- if isinstance(self.config, DPOConfig) and not self.config.train_file:
83
- raise ValueError("Training file must be specified for DPO")
84
-
85
- def _prepare_training_args(self) -> Dict[str, Any]:
86
- """
87
- Prepare training arguments for LlamaFactory CLI.
88
-
89
- Returns:
90
- Dictionary of training arguments
91
- """
92
- base_args = self.config.to_dict()
93
-
94
- # Add stage-specific args
95
- if self.config.strategy == TrainingStrategy.REINFORCEMENT_LEARNING:
96
- base_args["stage"] = "rm" if self._is_reward_model_training() else "ppo"
97
- elif self.config.strategy == TrainingStrategy.PREFERENCE_OPTIMIZATION:
98
- base_args["stage"] = "dpo"
99
-
100
- # Handle LoRA settings
101
- if self.config.use_lora:
102
- base_args["lora_target"] = "q_proj,v_proj"
103
-
104
- return base_args
105
-
106
- def _is_reward_model_training(self) -> bool:
107
- """
108
- Check if we're training a reward model for RLHF.
109
-
110
- Returns:
111
- True if reward model training, False otherwise
112
- """
113
- # This is a placeholder. In a real implementation,
114
- # you'd have a separate flag in the config
115
- return False
116
-
117
- def _save_training_args(self, args: Dict[str, Any]) -> str:
118
- """
119
- Save training arguments to a JSON file.
120
-
121
- Args:
122
- args: Training arguments
123
-
124
- Returns:
125
- Path to the saved JSON file
126
- """
127
- args_file = os.path.join(self.output_dir, "train_args.json")
128
- with open(args_file, "w") as f:
129
- json.dump(args, f, indent=2)
130
- return args_file
131
-
132
- def _build_command(self, args_file: str) -> List[str]:
133
- """
134
- Build the command to run LlamaFactory.
135
-
136
- Args:
137
- args_file: Path to the arguments file
138
-
139
- Returns:
140
- Command list for subprocess
141
- """
142
- cmd_module = "llmtuner.cli.ppo"
143
- if self.config.strategy == TrainingStrategy.PREFERENCE_OPTIMIZATION:
144
- cmd_module = "llmtuner.cli.dpo"
145
- elif self._is_reward_model_training():
146
- cmd_module = "llmtuner.cli.rm"
147
-
148
- return [
149
- "python", "-m", cmd_module,
150
- "--cfg_file", args_file
151
- ]
152
-
153
- def train(self) -> str:
154
- """
155
- Run the RL training process.
156
-
157
- Returns:
158
- Path to the output directory with trained model
159
- """
160
- args = self._prepare_training_args()
161
- args_file = self._save_training_args(args)
162
-
163
- cmd = self._build_command(args_file)
164
- logger.info(f"Running command: {' '.join(cmd)}")
165
-
166
- try:
167
- subprocess.run(
168
- cmd,
169
- check=True,
170
- text=True,
171
- stderr=subprocess.STDOUT
172
- )
173
- logger.info(f"RL training completed successfully. Model saved to {self.output_dir}")
174
- return self.output_dir
175
- except subprocess.CalledProcessError as e:
176
- logger.error(f"RL training failed with error: {e}")
177
- raise RuntimeError(f"LlamaFactory RL training failed: {e}")
178
-
179
- def train_reward_model(self) -> str:
180
- """
181
- Train a reward model for RLHF.
182
-
183
- Returns:
184
- Path to the trained reward model
185
- """
186
- # Create temporary config for reward model training
187
- reward_config = RLConfig(
188
- model_path=self.config.model_path,
189
- output_dir=os.path.join(self.output_dir, "reward_model"),
190
- train_file=self.config.train_file,
191
- batch_size=self.config.batch_size,
192
- num_epochs=self.config.num_epochs,
193
- learning_rate=self.config.learning_rate
194
- )
195
-
196
- # Set as reward model training
197
- # In a real implementation, you'd have a proper flag in the config
198
- self.config = reward_config
199
-
200
- args = self._prepare_training_args()
201
- args_file = self._save_training_args(args)
202
-
203
- cmd = [
204
- "python", "-m", "llmtuner.cli.rm",
205
- "--cfg_file", args_file
206
- ]
207
-
208
- logger.info(f"Running command: {' '.join(cmd)}")
209
-
210
- try:
211
- subprocess.run(
212
- cmd,
213
- check=True,
214
- text=True,
215
- stderr=subprocess.STDOUT
216
- )
217
- logger.info(f"Reward model training completed successfully. Model saved to {reward_config.output_dir}")
218
- return reward_config.output_dir
219
- except subprocess.CalledProcessError as e:
220
- logger.error(f"Reward model training failed with error: {e}")
221
- raise RuntimeError(f"LlamaFactory reward model training failed: {e}")
222
-
223
- def export_model(self, output_path: Optional[str] = None) -> str:
224
- """
225
- Export the trained model.
226
-
227
- Args:
228
- output_path: Path to save the exported model
229
-
230
- Returns:
231
- Path to the exported model
232
- """
233
- if output_path is None:
234
- output_path = os.path.join(self.output_dir, "exported")
235
-
236
- os.makedirs(output_path, exist_ok=True)
237
-
238
- # If using LoRA, need to merge weights
239
- if self.config.use_lora:
240
- cmd = [
241
- "python", "-m", "llmtuner.cli.merge",
242
- "--model_name_or_path", self.config.model_path,
243
- "--adapter_name_or_path", self.output_dir,
244
- "--output_dir", output_path
245
- ]
246
-
247
- subprocess.run(cmd, check=True, text=True)
248
- logger.info(f"Model exported successfully to {output_path}")
249
- else:
250
- # Just copy the model
251
- import shutil
252
- shutil.copytree(self.output_dir, output_path, dirs_exist_ok=True)
253
-
254
- return output_path
@@ -1,171 +0,0 @@
1
- """
2
- LlamaFactory training implementation.
3
- """
4
-
5
- import os
6
- import json
7
- import logging
8
- import subprocess
9
- from typing import Dict, List, Optional, Union, Any
10
-
11
- from .config import (
12
- LlamaFactoryConfig,
13
- SFTConfig,
14
- TrainingStrategy,
15
- DatasetFormat
16
- )
17
-
18
- logger = logging.getLogger(__name__)
19
-
20
-
21
- class LlamaFactoryTrainer:
22
- """
23
- Trainer class for LlamaFactory fine-tuning operations.
24
-
25
- This class provides methods to fine-tune language models using LlamaFactory.
26
- It supports supervised fine-tuning (SFT) and manages the execution of the
27
- training process.
28
-
29
- Example:
30
- ```python
31
- # Create configuration
32
- config = SFTConfig(
33
- model_path="meta-llama/Llama-2-7b-hf",
34
- train_file="path/to/data.json",
35
- batch_size=8,
36
- num_epochs=3
37
- )
38
-
39
- # Initialize and run trainer
40
- trainer = LlamaFactoryTrainer(config)
41
- trainer.train()
42
- ```
43
- """
44
-
45
- def __init__(self, config: LlamaFactoryConfig):
46
- """
47
- Initialize the LlamaFactory trainer.
48
-
49
- Args:
50
- config: Configuration for training
51
- """
52
- self.config = config
53
- self._validate_config()
54
- self.output_dir = config.output_dir
55
- os.makedirs(self.output_dir, exist_ok=True)
56
-
57
- def _validate_config(self) -> None:
58
- """Validate configuration parameters."""
59
- if not self.config.model_path:
60
- raise ValueError("Model path must be specified")
61
-
62
- if isinstance(self.config, SFTConfig) and not self.config.train_file:
63
- raise ValueError("Training file must be specified for SFT")
64
-
65
- def _prepare_training_args(self) -> Dict[str, Any]:
66
- """
67
- Prepare training arguments for LlamaFactory CLI.
68
-
69
- Returns:
70
- Dictionary of training arguments
71
- """
72
- base_args = self.config.to_dict()
73
-
74
- # Add stage-specific args
75
- if self.config.strategy == TrainingStrategy.SUPERVISED_FINETUNING:
76
- base_args["stage"] = "sft"
77
-
78
- # Handle LoRA settings
79
- if self.config.use_lora:
80
- base_args["lora_target"] = "q_proj,v_proj"
81
-
82
- return base_args
83
-
84
- def _save_training_args(self, args: Dict[str, Any]) -> str:
85
- """
86
- Save training arguments to a JSON file.
87
-
88
- Args:
89
- args: Training arguments
90
-
91
- Returns:
92
- Path to the saved JSON file
93
- """
94
- args_file = os.path.join(self.output_dir, "train_args.json")
95
- with open(args_file, "w") as f:
96
- json.dump(args, f, indent=2)
97
- return args_file
98
-
99
- def _build_command(self, args_file: str) -> List[str]:
100
- """
101
- Build the command to run LlamaFactory.
102
-
103
- Args:
104
- args_file: Path to the arguments file
105
-
106
- Returns:
107
- Command list for subprocess
108
- """
109
- return [
110
- "python", "-m", "llmtuner.cli.sft",
111
- "--cfg_file", args_file
112
- ]
113
-
114
- def train(self) -> str:
115
- """
116
- Run the training process.
117
-
118
- Returns:
119
- Path to the output directory with trained model
120
- """
121
- args = self._prepare_training_args()
122
- args_file = self._save_training_args(args)
123
-
124
- cmd = self._build_command(args_file)
125
- logger.info(f"Running command: {' '.join(cmd)}")
126
-
127
- try:
128
- subprocess.run(
129
- cmd,
130
- check=True,
131
- text=True,
132
- stderr=subprocess.STDOUT
133
- )
134
- logger.info(f"Training completed successfully. Model saved to {self.output_dir}")
135
- return self.output_dir
136
- except subprocess.CalledProcessError as e:
137
- logger.error(f"Training failed with error: {e}")
138
- raise RuntimeError(f"LlamaFactory training failed: {e}")
139
-
140
- def export_model(self, output_path: Optional[str] = None) -> str:
141
- """
142
- Export the trained model.
143
-
144
- Args:
145
- output_path: Path to save the exported model
146
-
147
- Returns:
148
- Path to the exported model
149
- """
150
- if output_path is None:
151
- output_path = os.path.join(self.output_dir, "exported")
152
-
153
- os.makedirs(output_path, exist_ok=True)
154
-
155
- # If using LoRA, need to merge weights
156
- if self.config.use_lora:
157
- cmd = [
158
- "python", "-m", "llmtuner.cli.merge",
159
- "--model_name_or_path", self.config.model_path,
160
- "--adapter_name_or_path", self.output_dir,
161
- "--output_dir", output_path
162
- ]
163
-
164
- subprocess.run(cmd, check=True, text=True)
165
- logger.info(f"Model exported successfully to {output_path}")
166
- else:
167
- # Just copy the model
168
- import shutil
169
- shutil.copytree(self.output_dir, output_path, dirs_exist_ok=True)
170
-
171
- return output_path
@@ -1,37 +0,0 @@
1
- import json
2
-
3
- config = {
4
- "general": {
5
- "enable_bucket": True,
6
- "min_bucket_reso": 256,
7
- "max_bucket_reso": 1024,
8
- "batch_size_per_device": 4,
9
- "train_batch_size": 4,
10
- "epoch": 100,
11
- "save_every_n_epochs": 10,
12
- "save_model_as": "safetensors",
13
- "mixed_precision": "fp16",
14
- "seed": 42,
15
- "num_cpu_threads_per_process": 8
16
- },
17
- "model": {
18
- "pretrained_model_name_or_path": "runwayml/stable-diffusion-v1-5",
19
- "v2": False,
20
- "v_parameterization": False
21
- },
22
- "optimizer": {
23
- "learning_rate": 1e-5,
24
- "lr_scheduler": "cosine_with_restarts",
25
- "lr_warmup_steps": 100,
26
- "optimizer_type": "AdamW8bit"
27
- },
28
- "dataset": {
29
- "resolution": 512,
30
- "center_crop": False,
31
- "random_crop": False,
32
- "flip_aug": True
33
- }
34
- }
35
-
36
- with open("training_config.json", "w") as f:
37
- json.dump(config, f, indent=2)
@@ -1,26 +0,0 @@
1
- import json
2
- from pathlib import Path
3
-
4
- # Get the project root directory
5
- project_root = Path(__file__).parents[4] # Go up 4 levels from configs folder
6
-
7
- flux_config = {
8
- "pretrained_model_name_or_path": "/home/Ubuntu/Downloads/flux1-dev.safetensors",
9
- # Update train_data_dir to your processed images
10
- "train_data_dir": str(project_root / "app/services/training/image_model/raw_data/training_images_processed"),
11
- "output_dir": "/home/Ubuntu/apps/StableSwarmUI/Models/diffusion_models",
12
- "output_name": "demi_flux_v1",
13
- "save_model_as": "safetensors",
14
- "learning_rate": 4e-6,
15
- "train_batch_size": 1,
16
- "epoch": 200,
17
- "save_every_n_epochs": 25,
18
- "mixed_precision": "bf16",
19
- "num_cpu_threads_per_process": 2,
20
- "flux1_t5xxl": "/home/Ubuntu/Downloads/t5xxl_fp16.safetensors",
21
- "flux1_clip_l": "/home/Ubuntu/Downloads/clip_l.safetensors",
22
- }
23
-
24
- config_path = Path(__file__).parent / "flux_config.json"
25
- with open(config_path, "w") as f:
26
- json.dump(flux_config, f, indent=2)
@@ -1,21 +0,0 @@
1
- import json
2
-
3
- lora_config = {
4
- "pretrained_model_name_or_path": "/home/Ubuntu/Downloads/flux1-dev.safetensors",
5
- "train_data_dir": "/home/Ubuntu/Downloads/training_imgs",
6
- "output_dir": "/home/Ubuntu/apps/StableSwarmUI/Models/lora",
7
- "output_name": "demi_lora_v1",
8
- "save_model_as": "safetensors",
9
- "learning_rate": 1e-4,
10
- "train_batch_size": 1,
11
- "epoch": 100,
12
- "save_every_n_epochs": 10,
13
- "mixed_precision": "bf16",
14
- "num_cpu_threads_per_process": 2,
15
- "flux1_checkbox": True,
16
- "flux1_t5xxl": "/home/Ubuntu/Downloads/t5xxl_fp16.safetensors",
17
- "flux1_clip_l": "/home/Ubuntu/Downloads/clip_l.safetensors",
18
- }
19
-
20
- with open("lora_config.json", "w") as f:
21
- json.dump(lora_config, f, indent=2)
@@ -1,97 +0,0 @@
1
- import os
2
- import shutil
3
- from pathlib import Path
4
-
5
- def prepare_massed_package():
6
- # Get current directory and create desktop folder structure
7
- project_root = Path(__file__).parent
8
- desktop_dir = Path.home() / "Desktop" / "massed_training"
9
-
10
- # Create necessary directories
11
- desktop_dir.mkdir(parents=True, exist_ok=True)
12
- training_dir = desktop_dir / "training_imgs"
13
- training_dir.mkdir(exist_ok=True)
14
-
15
- # Copy processed images and captions
16
- processed_images_dir = project_root / "raw_data/training_images_processed"
17
- for img_file in processed_images_dir.glob("*.jpg"):
18
- shutil.copy2(img_file, training_dir)
19
- # Copy or create corresponding caption file
20
- caption_file = img_file.with_suffix(".txt")
21
- if caption_file.exists():
22
- shutil.copy2(caption_file, training_dir)
23
- else:
24
- with open(training_dir / f"{img_file.stem}.txt", "w") as f:
25
- f.write("a photo of demi person, (high quality, photorealistic:1.2), professional portrait")
26
-
27
- # Create Kohya FLUX setup script
28
- kohya_script = """#!/bin/bash
29
- cd /home/Ubuntu/apps/kohya_ss
30
-
31
- git pull
32
-
33
- git checkout sd3-flux.1
34
-
35
- source venv/bin/activate
36
-
37
- ./setup.sh
38
-
39
- git submodule update --init --recursive
40
-
41
- pip uninstall xformers --yes
42
-
43
- pip install torch==2.5.1+cu124 torchvision --index-url https://download.pytorch.org/whl/cu124
44
-
45
- pip install xformers==0.0.28.post3 --index-url https://download.pytorch.org/whl/cu124
46
-
47
- ./gui.sh --listen=0.0.0.0 --inbrowser --noverify
48
-
49
- # Keep the terminal open
50
- read -p "Press Enter to continue..."
51
- """
52
-
53
- # Create Models download script
54
- models_script = """#!/bin/bash
55
- pip install huggingface_hub
56
-
57
- pip install ipywidgets
58
- pip install hf_transfer
59
- export HF_HUB_ENABLE_HF_TRANSFER=1
60
-
61
- python3 Download_Train_Models.py --dir /home/Ubuntu/Downloads
62
- """
63
-
64
- # Write scripts
65
- with open(desktop_dir / "Massed_Compute_Kohya_FLUX.sh", "w", newline='\n') as f:
66
- f.write(kohya_script)
67
-
68
- with open(desktop_dir / "Massed_Compute_Download_Models.sh", "w", newline='\n') as f:
69
- f.write(models_script)
70
-
71
- # Make scripts executable
72
- os.chmod(desktop_dir / "Massed_Compute_Kohya_FLUX.sh", 0o755)
73
- os.chmod(desktop_dir / "Massed_Compute_Download_Models.sh", 0o755)
74
-
75
- print(f"""
76
- Package prepared in: {desktop_dir}
77
-
78
- Next steps:
79
- 1. Upload the entire '{desktop_dir.name}' folder to your Massed Compute instance
80
- 2. In Massed Compute terminal:
81
- cd ~/Desktop/massed_training
82
- chmod +x Massed_Compute_Kohya_FLUX.sh
83
- ./Massed_Compute_Kohya_FLUX.sh
84
-
85
- 3. In a new terminal:
86
- cd ~/Desktop/massed_training
87
- chmod +x Massed_Compute_Download_Models.sh
88
- ./Massed_Compute_Download_Models.sh
89
-
90
- 4. When Kohya GUI opens (http://0.0.0.0:7860/):
91
- - Go to the Training tab
92
- - Set training data directory to: /home/Ubuntu/Desktop/massed_training/training_imgs
93
- - Use the settings from the Flux tutorial
94
- """)
95
-
96
- if __name__ == "__main__":
97
- prepare_massed_package()
@@ -1,17 +0,0 @@
1
- import shutil
2
- import os
3
-
4
- def prepare_training_package():
5
- # Create a directory for all training materials
6
- os.makedirs("training_package", exist_ok=True)
7
-
8
- # Copy training data
9
- shutil.copytree("training_data", "training_package/training_data", dirs_exist_ok=True)
10
-
11
- # Copy config
12
- shutil.copy("training_config.json", "training_package/training_config.json")
13
-
14
- # Create zip file
15
- shutil.make_archive("demi_training", "zip", "training_package")
16
-
17
- prepare_training_package()
@@ -1,16 +0,0 @@
1
- import os
2
- from pathlib import Path
3
-
4
- def create_captions(image_dir):
5
- """Create a caption file for each image with a basic description"""
6
- image_dir = Path(image_dir)
7
-
8
- for image_file in image_dir.glob("*.jpg"):
9
- caption_file = image_dir / f"{image_file.stem}.txt"
10
-
11
- # Create a basic caption - you can modify this
12
- with open(caption_file, "w") as f:
13
- f.write("a photo of demi")
14
-
15
- # Use the function
16
- create_captions("training_data/demi")
@@ -1,20 +0,0 @@
1
- from pathlib import Path
2
-
3
- def create_lora_captions(image_dir):
4
- """Create detailed captions for LoRA training"""
5
- image_dir = Path(image_dir)
6
-
7
- # LoRA-specific caption with trigger word
8
- caption_text = (
9
- "a photo of demi person, (high quality, photorealistic:1.2), "
10
- "professional portrait, detailed facial features, "
11
- "natural lighting, sharp focus, clear skin texture"
12
- )
13
-
14
- for image_file in image_dir.glob("*.jpg"):
15
- caption_file = image_dir / f"{image_file.stem}.txt"
16
- with open(caption_file, "w") as f:
17
- f.write(caption_text)
18
-
19
- # Use the function
20
- create_lora_captions("/home/Ubuntu/Downloads/training_imgs")