optimum-rbln 0.1.9__py3-none-any.whl → 0.1.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (73) hide show
  1. optimum/rbln/__init__.py +47 -9
  2. optimum/rbln/__version__.py +1 -1
  3. optimum/rbln/diffusers/models/autoencoder_kl.py +36 -31
  4. optimum/rbln/diffusers/models/controlnet.py +53 -43
  5. optimum/rbln/diffusers/models/unet_2d_condition.py +40 -31
  6. optimum/rbln/diffusers/pipelines/controlnet/multicontrolnet.py +4 -0
  7. optimum/rbln/diffusers/pipelines/controlnet/pipeline_controlnet.py +28 -23
  8. optimum/rbln/diffusers/pipelines/controlnet/pipeline_controlnet_img2img.py +28 -23
  9. optimum/rbln/diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl.py +28 -37
  10. optimum/rbln/diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img.py +30 -39
  11. optimum/rbln/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py +24 -14
  12. optimum/rbln/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py +24 -15
  13. optimum/rbln/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py +26 -17
  14. optimum/rbln/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py +26 -17
  15. optimum/rbln/modeling_alias.py +6 -11
  16. optimum/rbln/modeling_base.py +467 -261
  17. optimum/rbln/modeling_config.py +199 -73
  18. optimum/rbln/transformers/__init__.py +43 -1
  19. optimum/rbln/transformers/models/__init__.py +23 -1
  20. optimum/rbln/transformers/models/auto/__init__.py +14 -0
  21. optimum/rbln/transformers/models/auto/auto_factory.py +84 -0
  22. optimum/rbln/transformers/models/auto/modeling_auto.py +95 -0
  23. optimum/rbln/transformers/models/bart/__init__.py +1 -0
  24. optimum/rbln/transformers/models/bart/bart_architecture.py +203 -58
  25. optimum/rbln/transformers/models/bart/modeling_bart.py +125 -0
  26. optimum/rbln/transformers/models/bert/__init__.py +24 -0
  27. optimum/rbln/transformers/models/bert/modeling_bert.py +101 -0
  28. optimum/rbln/transformers/models/clip/__init__.py +1 -1
  29. optimum/rbln/transformers/models/clip/modeling_clip.py +127 -26
  30. optimum/rbln/transformers/models/decoderonly/decoderonly_architecture.py +28 -4
  31. optimum/rbln/transformers/models/decoderonly/modeling_decoderonly.py +409 -150
  32. optimum/rbln/transformers/models/dpt/modeling_dpt.py +21 -8
  33. optimum/rbln/transformers/models/exaone/__init__.py +32 -0
  34. optimum/rbln/transformers/models/exaone/exaone_architecture.py +72 -0
  35. optimum/rbln/transformers/models/exaone/hf_hub_cached/configuration_exaone.py +181 -0
  36. optimum/rbln/transformers/models/exaone/hf_hub_cached/modeling_exaone.py +1725 -0
  37. optimum/rbln/transformers/models/exaone/modeling_exaone.py +78 -0
  38. optimum/rbln/transformers/models/gemma/modeling_gemma.py +1 -1
  39. optimum/rbln/transformers/models/gpt2/gpt2_architecture.py +4 -1
  40. optimum/rbln/transformers/models/gpt2/modeling_gpt2.py +1 -1
  41. optimum/rbln/transformers/models/llama/modeling_llama.py +1 -1
  42. optimum/rbln/transformers/models/llava_next/__init__.py +24 -0
  43. optimum/rbln/transformers/models/llava_next/modeling_llava_next.py +662 -0
  44. optimum/rbln/transformers/models/midm/midm_architecture.py +5 -1
  45. optimum/rbln/transformers/models/midm/modeling_midm.py +6 -1
  46. optimum/rbln/transformers/models/mistral/modeling_mistral.py +1 -1
  47. optimum/rbln/transformers/models/phi/__init__.py +24 -0
  48. optimum/rbln/transformers/models/phi/modeling_phi.py +69 -0
  49. optimum/rbln/transformers/models/phi/phi_architecture.py +406 -0
  50. optimum/rbln/transformers/models/qwen2/__init__.py +24 -0
  51. optimum/rbln/transformers/models/qwen2/modeling_qwen2.py +67 -0
  52. optimum/rbln/transformers/models/qwen2/qwen2_architecture.py +29 -0
  53. optimum/rbln/transformers/models/seq2seq/__init__.py +24 -0
  54. optimum/rbln/{modeling_seq2seq.py → transformers/models/seq2seq/modeling_seq2seq.py} +198 -168
  55. optimum/rbln/transformers/models/t5/__init__.py +1 -0
  56. optimum/rbln/transformers/models/t5/modeling_t5.py +55 -0
  57. optimum/rbln/transformers/models/t5/t5_architecture.py +122 -47
  58. optimum/rbln/transformers/models/wav2vec2/modeling_wav2vec2.py +17 -12
  59. optimum/rbln/transformers/models/whisper/generation_whisper.py +68 -0
  60. optimum/rbln/transformers/models/whisper/modeling_whisper.py +172 -111
  61. optimum/rbln/transformers/models/whisper/whisper_architecture.py +44 -17
  62. optimum/rbln/transformers/models/xlm_roberta/modeling_xlm_roberta.py +18 -16
  63. optimum/rbln/transformers/utils/rbln_quantization.py +48 -60
  64. optimum/rbln/utils/import_utils.py +50 -1
  65. optimum/rbln/utils/logging.py +82 -0
  66. optimum/rbln/utils/runtime_utils.py +33 -0
  67. optimum/rbln/utils/timer_utils.py +43 -0
  68. {optimum_rbln-0.1.9.dist-info → optimum_rbln-0.1.12.dist-info}/METADATA +9 -7
  69. optimum_rbln-0.1.12.dist-info/RECORD +103 -0
  70. {optimum_rbln-0.1.9.dist-info → optimum_rbln-0.1.12.dist-info}/WHEEL +1 -1
  71. optimum_rbln-0.1.12.dist-info/entry_points.txt +4 -0
  72. optimum_rbln-0.1.9.dist-info/RECORD +0 -78
  73. {optimum_rbln-0.1.9.dist-info → optimum_rbln-0.1.12.dist-info}/licenses/LICENSE +0 -0
@@ -24,13 +24,15 @@
24
24
 
25
25
  from diffusers import StableDiffusionPipeline
26
26
 
27
- from ....modeling_base import RBLNBaseModel
27
+ from ....modeling_config import use_rbln_config
28
28
  from ....transformers import RBLNCLIPTextModel
29
+ from ....utils.runtime_utils import ContextRblnConfig
29
30
  from ...models import RBLNAutoencoderKL, RBLNUNet2DConditionModel
30
31
 
31
32
 
32
33
  class RBLNStableDiffusionPipeline(StableDiffusionPipeline):
33
34
  @classmethod
35
+ @use_rbln_config
34
36
  def from_pretrained(cls, model_id, **kwargs):
35
37
  """
36
38
  Pipeline for text-to-image generation using Stable Diffusion.
@@ -50,14 +52,27 @@ class RBLNStableDiffusionPipeline(StableDiffusionPipeline):
50
52
  """
51
53
  export = kwargs.pop("export", None)
52
54
  model_save_dir = kwargs.pop("model_save_dir", None)
53
- rbln_config_kwargs, rbln_constructor_kwargs = RBLNBaseModel.pop_rbln_kwargs_from_kwargs(kwargs)
54
- model = super().from_pretrained(pretrained_model_name_or_path=model_id, **kwargs)
55
+ rbln_config = kwargs.pop("rbln_config", None)
56
+ rbln_config = {} if rbln_config is None else rbln_config
57
+
58
+ device = rbln_config.get("device", None)
59
+ device_map = rbln_config.get("device_map", None)
60
+ create_runtimes = rbln_config.get("create_runtimes", None)
61
+ optimize_host_memory = rbln_config.get("optimize_host_memory", None)
62
+
63
+ with ContextRblnConfig(
64
+ device=device,
65
+ device_map=device_map,
66
+ create_runtimes=create_runtimes,
67
+ optimze_host_mem=optimize_host_memory,
68
+ ):
69
+ model = super().from_pretrained(pretrained_model_name_or_path=model_id, **kwargs)
55
70
 
56
71
  if export is None or export is False:
57
72
  return model
58
73
 
59
74
  do_classifier_free_guidance = (
60
- rbln_config_kwargs.pop("rbln_guidance_scale", 5.0) > 1.0 and model.unet.config.time_cond_proj_dim is None
75
+ rbln_config.pop("guidance_scale", 5.0) > 1.0 and model.unet.config.time_cond_proj_dim is None
61
76
  )
62
77
 
63
78
  vae = RBLNAutoencoderKL.from_pretrained(
@@ -67,19 +82,17 @@ class RBLNStableDiffusionPipeline(StableDiffusionPipeline):
67
82
  model_save_dir=model_save_dir,
68
83
  rbln_unet_sample_size=model.unet.config.sample_size,
69
84
  rbln_use_encode=False,
70
- **rbln_config_kwargs,
71
- **rbln_constructor_kwargs,
85
+ rbln_config={**rbln_config},
72
86
  )
73
87
  text_encoder = RBLNCLIPTextModel.from_pretrained(
74
88
  model_id=model_id,
75
89
  subfolder="text_encoder",
76
90
  export=True,
77
91
  model_save_dir=model_save_dir,
78
- **rbln_config_kwargs,
79
- **rbln_constructor_kwargs,
92
+ rbln_config={**rbln_config},
80
93
  )
81
94
 
82
- batch_size = rbln_config_kwargs.pop("rbln_batch_size", 1)
95
+ batch_size = rbln_config.pop("batch_size", 1)
83
96
  unet_batch_size = batch_size * 2 if do_classifier_free_guidance else batch_size
84
97
 
85
98
  unet = RBLNUNet2DConditionModel.from_pretrained(
@@ -91,8 +104,7 @@ class RBLNStableDiffusionPipeline(StableDiffusionPipeline):
91
104
  rbln_batch_size=unet_batch_size,
92
105
  rbln_use_encode=False,
93
106
  rbln_is_controlnet=True if "controlnet" in model.config.keys() else False,
94
- **rbln_config_kwargs,
95
- **rbln_constructor_kwargs,
107
+ rbln_config={**rbln_config},
96
108
  )
97
109
 
98
110
  if model_save_dir is not None:
@@ -120,9 +132,7 @@ class RBLNStableDiffusionPipeline(StableDiffusionPipeline):
120
132
  # overwrite to replace incorrect config
121
133
  model.save_config(model_save_dir)
122
134
 
123
- model.models = [vae.model[0], text_encoder.model[0], unet.model[0]]
124
-
125
- if rbln_constructor_kwargs.pop("rbln_optimize_host_memory", None) is False:
135
+ if optimize_host_memory is False:
126
136
  model.compiled_models = [vae.compiled_models[0], text_encoder.compiled_models[0], unet.compiled_models[0]]
127
137
 
128
138
  return model
@@ -24,13 +24,15 @@
24
24
 
25
25
  from diffusers import StableDiffusionImg2ImgPipeline
26
26
 
27
- from ....modeling_base import RBLNBaseModel
27
+ from ....modeling_config import use_rbln_config
28
28
  from ....transformers import RBLNCLIPTextModel
29
+ from ....utils.runtime_utils import ContextRblnConfig
29
30
  from ...models import RBLNAutoencoderKL, RBLNUNet2DConditionModel
30
31
 
31
32
 
32
33
  class RBLNStableDiffusionImg2ImgPipeline(StableDiffusionImg2ImgPipeline):
33
34
  @classmethod
35
+ @use_rbln_config
34
36
  def from_pretrained(cls, model_id, **kwargs):
35
37
  """
36
38
  Pipeline for image-to-image generation using Stable Diffusion.
@@ -50,14 +52,27 @@ class RBLNStableDiffusionImg2ImgPipeline(StableDiffusionImg2ImgPipeline):
50
52
  """
51
53
  export = kwargs.pop("export", None)
52
54
  model_save_dir = kwargs.pop("model_save_dir", None)
53
- rbln_config_kwargs, rbln_constructor_kwargs = RBLNBaseModel.pop_rbln_kwargs_from_kwargs(kwargs)
54
- model = super().from_pretrained(pretrained_model_name_or_path=model_id, **kwargs)
55
+ rbln_config = kwargs.pop("rbln_config", None)
56
+ rbln_config = {} if rbln_config is None else rbln_config
57
+
58
+ device = rbln_config.get("device", None)
59
+ device_map = rbln_config.get("device_map", None)
60
+ create_runtimes = rbln_config.get("create_runtimes", None)
61
+ optimize_host_memory = rbln_config.get("optimize_host_memory", None)
62
+
63
+ with ContextRblnConfig(
64
+ device=device,
65
+ device_map=device_map,
66
+ create_runtimes=create_runtimes,
67
+ optimze_host_mem=optimize_host_memory,
68
+ ):
69
+ model = super().from_pretrained(pretrained_model_name_or_path=model_id, **kwargs)
55
70
 
56
71
  if export is None or export is False:
57
72
  return model
58
73
 
59
74
  do_classifier_free_guidance = (
60
- rbln_config_kwargs.pop("rbln_guidance_scale", 5.0) > 1.0 and model.unet.config.time_cond_proj_dim is None
75
+ rbln_config.pop("guidance_scale", 5.0) > 1.0 and model.unet.config.time_cond_proj_dim is None
61
76
  )
62
77
 
63
78
  # compile model, create runtime
@@ -69,19 +84,17 @@ class RBLNStableDiffusionImg2ImgPipeline(StableDiffusionImg2ImgPipeline):
69
84
  rbln_unet_sample_size=model.unet.config.sample_size,
70
85
  rbln_use_encode=True,
71
86
  rbln_vae_scale_factor=model.vae_scale_factor,
72
- **rbln_config_kwargs,
73
- **rbln_constructor_kwargs,
87
+ rbln_config={**rbln_config},
74
88
  )
75
89
  text_encoder = RBLNCLIPTextModel.from_pretrained(
76
90
  model_id=model_id,
77
91
  subfolder="text_encoder",
78
92
  export=True,
79
93
  model_save_dir=model_save_dir,
80
- **rbln_config_kwargs,
81
- **rbln_constructor_kwargs,
94
+ rbln_config={**rbln_config},
82
95
  )
83
96
 
84
- batch_size = rbln_config_kwargs.pop("rbln_batch_size", 1)
97
+ batch_size = rbln_config.pop("batch_size", 1)
85
98
  unet_batch_size = batch_size * 2 if do_classifier_free_guidance else batch_size
86
99
 
87
100
  unet = RBLNUNet2DConditionModel.from_pretrained(
@@ -94,8 +107,7 @@ class RBLNStableDiffusionImg2ImgPipeline(StableDiffusionImg2ImgPipeline):
94
107
  rbln_use_encode=True,
95
108
  rbln_vae_scale_factor=model.vae_scale_factor,
96
109
  rbln_is_controlnet=True if "controlnet" in model.config.keys() else False,
97
- **rbln_config_kwargs,
98
- **rbln_constructor_kwargs,
110
+ rbln_config={**rbln_config},
99
111
  )
100
112
 
101
113
  if model_save_dir is not None:
@@ -123,10 +135,7 @@ class RBLNStableDiffusionImg2ImgPipeline(StableDiffusionImg2ImgPipeline):
123
135
  # overwrite to replace incorrect config
124
136
  model.save_config(model_save_dir)
125
137
 
126
- # vae encoder, vae decoder, text_encoder, unet
127
- model.models = [vae.model[0], vae.model[1], text_encoder.model[0], unet.model[0]]
128
-
129
- if rbln_constructor_kwargs.pop("rbln_optimize_host_memory", None) is False:
138
+ if optimize_host_memory is False:
130
139
  model.compiled_models = [
131
140
  vae.compiled_models[0],
132
141
  vae.compiled_models[1],
@@ -16,13 +16,15 @@
16
16
 
17
17
  from diffusers import StableDiffusionXLPipeline
18
18
 
19
- from ....modeling_base import RBLNBaseModel
19
+ from ....modeling_config import use_rbln_config
20
20
  from ....transformers import RBLNCLIPTextModel, RBLNCLIPTextModelWithProjection
21
+ from ....utils.runtime_utils import ContextRblnConfig
21
22
  from ...models import RBLNAutoencoderKL, RBLNUNet2DConditionModel
22
23
 
23
24
 
24
25
  class RBLNStableDiffusionXLPipeline(StableDiffusionXLPipeline):
25
26
  @classmethod
27
+ @use_rbln_config
26
28
  def from_pretrained(cls, model_id, **kwargs):
27
29
  """
28
30
  Pipeline for text-to-image generation using Stable Diffusion XL.
@@ -42,14 +44,27 @@ class RBLNStableDiffusionXLPipeline(StableDiffusionXLPipeline):
42
44
  """
43
45
  export = kwargs.pop("export", None)
44
46
  model_save_dir = kwargs.pop("model_save_dir", None)
45
- rbln_config_kwargs, rbln_constructor_kwargs = RBLNBaseModel.pop_rbln_kwargs_from_kwargs(kwargs)
46
- model = super().from_pretrained(pretrained_model_name_or_path=model_id, **kwargs)
47
+ rbln_config = kwargs.pop("rbln_config", None)
48
+ rbln_config = {} if rbln_config is None else rbln_config
49
+
50
+ device = rbln_config.get("device", None)
51
+ device_map = rbln_config.get("device_map", None)
52
+ create_runtimes = rbln_config.get("create_runtimes", None)
53
+ optimize_host_memory = rbln_config.get("optimize_host_memory", None)
54
+
55
+ with ContextRblnConfig(
56
+ device=device,
57
+ device_map=device_map,
58
+ create_runtimes=create_runtimes,
59
+ optimze_host_mem=optimize_host_memory,
60
+ ):
61
+ model = super().from_pretrained(pretrained_model_name_or_path=model_id, **kwargs)
47
62
 
48
63
  if export is None or export is False:
49
64
  return model
50
65
 
51
66
  do_classifier_free_guidance = (
52
- rbln_config_kwargs.pop("rbln_guidance_scale", 5.0) > 1.0 and model.unet.config.time_cond_proj_dim is None
67
+ rbln_config.pop("guidance_scale", 5.0) > 1.0 and model.unet.config.time_cond_proj_dim is None
53
68
  )
54
69
 
55
70
  vae = RBLNAutoencoderKL.from_pretrained(
@@ -59,27 +74,24 @@ class RBLNStableDiffusionXLPipeline(StableDiffusionXLPipeline):
59
74
  model_save_dir=model_save_dir,
60
75
  rbln_unet_sample_size=model.unet.config.sample_size,
61
76
  rbln_use_encode=False,
62
- **rbln_config_kwargs,
63
- **rbln_constructor_kwargs,
77
+ rbln_config={**rbln_config},
64
78
  )
65
79
  text_encoder = RBLNCLIPTextModel.from_pretrained(
66
80
  model_id=model_id,
67
81
  subfolder="text_encoder",
68
82
  export=True,
69
83
  model_save_dir=model_save_dir,
70
- **rbln_config_kwargs,
71
- **rbln_constructor_kwargs,
84
+ rbln_config={**rbln_config},
72
85
  )
73
86
  text_encoder_2 = RBLNCLIPTextModelWithProjection.from_pretrained(
74
87
  model_id=model_id,
75
88
  subfolder="text_encoder_2",
76
89
  export=True,
77
90
  model_save_dir=model_save_dir,
78
- **rbln_config_kwargs,
79
- **rbln_constructor_kwargs,
91
+ rbln_config={**rbln_config},
80
92
  )
81
93
 
82
- batch_size = rbln_config_kwargs.pop("rbln_batch_size", 1)
94
+ batch_size = rbln_config.pop("batch_size", 1)
83
95
  unet_batch_size = batch_size * 2 if do_classifier_free_guidance else batch_size
84
96
 
85
97
  unet = RBLNUNet2DConditionModel.from_pretrained(
@@ -92,8 +104,7 @@ class RBLNStableDiffusionXLPipeline(StableDiffusionXLPipeline):
92
104
  rbln_batch_size=unet_batch_size,
93
105
  rbln_use_encode=False,
94
106
  rbln_is_controlnet=True if "controlnet" in model.config.keys() else False,
95
- **rbln_config_kwargs,
96
- **rbln_constructor_kwargs,
107
+ rbln_config={**rbln_config},
97
108
  )
98
109
 
99
110
  if model_save_dir is not None:
@@ -112,7 +123,7 @@ class RBLNStableDiffusionXLPipeline(StableDiffusionXLPipeline):
112
123
  "vae": ("optimum.rbln", "RBLNAutoencoderKL"),
113
124
  "text_encoder": ("optimum.rbln", "RBLNCLIPTextModel"),
114
125
  "unet": ("optimum.rbln", "RBLNUNet2DConditionModel"),
115
- "text_encoder_2": ("optimum.rbln", "RBLNCLIPTextModel"),
126
+ "text_encoder_2": ("optimum.rbln", "RBLNCLIPTextModelWithProjection"),
116
127
  }
117
128
  model.register_to_config(**update_dict)
118
129
 
@@ -120,9 +131,7 @@ class RBLNStableDiffusionXLPipeline(StableDiffusionXLPipeline):
120
131
  # overwrite to replace incorrect config
121
132
  model.save_config(model_save_dir)
122
133
 
123
- model.models = [vae.model[0], unet.model[0], text_encoder.model[0], text_encoder_2.model[0]]
124
-
125
- if rbln_constructor_kwargs.pop("rbln_optimize_host_memory", None) is False:
134
+ if optimize_host_memory is False:
126
135
  model.compiled_models = [
127
136
  vae.compiled_models[0],
128
137
  unet.compiled_models[0],
@@ -16,13 +16,15 @@
16
16
 
17
17
  from diffusers import StableDiffusionXLImg2ImgPipeline
18
18
 
19
- from ....modeling_base import RBLNBaseModel
19
+ from ....modeling_config import use_rbln_config
20
20
  from ....transformers import RBLNCLIPTextModel, RBLNCLIPTextModelWithProjection
21
+ from ....utils.runtime_utils import ContextRblnConfig
21
22
  from ...models import RBLNAutoencoderKL, RBLNUNet2DConditionModel
22
23
 
23
24
 
24
25
  class RBLNStableDiffusionXLImg2ImgPipeline(StableDiffusionXLImg2ImgPipeline):
25
26
  @classmethod
27
+ @use_rbln_config
26
28
  def from_pretrained(cls, model_id, **kwargs):
27
29
  """
28
30
  Pipeline for image-to-image generation using Stable Diffusion XL.
@@ -42,14 +44,27 @@ class RBLNStableDiffusionXLImg2ImgPipeline(StableDiffusionXLImg2ImgPipeline):
42
44
  """
43
45
  export = kwargs.pop("export", None)
44
46
  model_save_dir = kwargs.pop("model_save_dir", None)
45
- rbln_config_kwargs, rbln_constructor_kwargs = RBLNBaseModel.pop_rbln_kwargs_from_kwargs(kwargs)
46
- model = super().from_pretrained(pretrained_model_name_or_path=model_id, **kwargs)
47
+ rbln_config = kwargs.pop("rbln_config", None)
48
+ rbln_config = {} if rbln_config is None else rbln_config
49
+
50
+ device = rbln_config.get("device", None)
51
+ device_map = rbln_config.get("device_map", None)
52
+ create_runtimes = rbln_config.get("create_runtimes", None)
53
+ optimize_host_memory = rbln_config.get("optimize_host_memory", None)
54
+
55
+ with ContextRblnConfig(
56
+ device=device,
57
+ device_map=device_map,
58
+ create_runtimes=create_runtimes,
59
+ optimze_host_mem=optimize_host_memory,
60
+ ):
61
+ model = super().from_pretrained(pretrained_model_name_or_path=model_id, **kwargs)
47
62
 
48
63
  if export is None or export is False:
49
64
  return model
50
65
 
51
66
  do_classifier_free_guidance = (
52
- rbln_config_kwargs.pop("rbln_guidance_scale", 5.0) > 1.0 and model.unet.config.time_cond_proj_dim is None
67
+ rbln_config.pop("guidance_scale", 5.0) > 1.0 and model.unet.config.time_cond_proj_dim is None
53
68
  )
54
69
 
55
70
  vae = RBLNAutoencoderKL.from_pretrained(
@@ -60,27 +75,24 @@ class RBLNStableDiffusionXLImg2ImgPipeline(StableDiffusionXLImg2ImgPipeline):
60
75
  rbln_unet_sample_size=model.unet.config.sample_size,
61
76
  rbln_use_encode=True,
62
77
  rbln_vae_scale_factor=model.vae_scale_factor,
63
- **rbln_config_kwargs,
64
- **rbln_constructor_kwargs,
78
+ rbln_config={**rbln_config},
65
79
  )
66
80
  text_encoder = RBLNCLIPTextModel.from_pretrained(
67
81
  model_id=model_id,
68
82
  subfolder="text_encoder",
69
83
  export=True,
70
84
  model_save_dir=model_save_dir,
71
- **rbln_config_kwargs,
72
- **rbln_constructor_kwargs,
85
+ rbln_config={**rbln_config},
73
86
  )
74
87
  text_encoder_2 = RBLNCLIPTextModelWithProjection.from_pretrained(
75
88
  model_id=model_id,
76
89
  subfolder="text_encoder_2",
77
90
  export=True,
78
91
  model_save_dir=model_save_dir,
79
- **rbln_config_kwargs,
80
- **rbln_constructor_kwargs,
92
+ rbln_config={**rbln_config},
81
93
  )
82
94
 
83
- batch_size = rbln_config_kwargs.pop("rbln_batch_size", 1)
95
+ batch_size = rbln_config.pop("batch_size", 1)
84
96
  unet_batch_size = batch_size * 2 if do_classifier_free_guidance else batch_size
85
97
 
86
98
  unet = RBLNUNet2DConditionModel.from_pretrained(
@@ -94,8 +106,7 @@ class RBLNStableDiffusionXLImg2ImgPipeline(StableDiffusionXLImg2ImgPipeline):
94
106
  rbln_use_encode=True,
95
107
  rbln_vae_scale_factor=model.vae_scale_factor,
96
108
  rbln_is_controlnet=True if "controlnet" in model.config.keys() else False,
97
- **rbln_config_kwargs,
98
- **rbln_constructor_kwargs,
109
+ rbln_config={**rbln_config},
99
110
  )
100
111
 
101
112
  if model_save_dir is not None:
@@ -114,7 +125,7 @@ class RBLNStableDiffusionXLImg2ImgPipeline(StableDiffusionXLImg2ImgPipeline):
114
125
  "vae": ("optimum.rbln", "RBLNAutoencoderKL"),
115
126
  "text_encoder": ("optimum.rbln", "RBLNCLIPTextModel"),
116
127
  "unet": ("optimum.rbln", "RBLNUNet2DConditionModel"),
117
- "text_encoder_2": ("optimum.rbln", "RBLNCLIPTextModel"),
128
+ "text_encoder_2": ("optimum.rbln", "RBLNCLIPTextModelWithProjection"),
118
129
  }
119
130
  model.register_to_config(**update_dict)
120
131
 
@@ -122,9 +133,7 @@ class RBLNStableDiffusionXLImg2ImgPipeline(StableDiffusionXLImg2ImgPipeline):
122
133
  # overwrite to replace incorrect config
123
134
  model.save_config(model_save_dir)
124
135
 
125
- model.models = [vae.model[0], vae.model[1], unet.model[0], text_encoder.model[0], text_encoder_2.model[0]]
126
-
127
- if rbln_constructor_kwargs.pop("rbln_optimize_host_memory", None) is False:
136
+ if optimize_host_memory is False:
128
137
  model.compiled_models = [
129
138
  vae.compiled_models[0],
130
139
  vae.compiled_models[1],
@@ -28,7 +28,6 @@ from .modeling_base import (
28
28
  RBLNModelForQuestionAnswering,
29
29
  RBLNModelForSequenceClassification,
30
30
  )
31
- from .modeling_seq2seq import RBLNModelForSeq2SeqLM
32
31
 
33
32
 
34
33
  class RBLNASTForAudioClassification(RBLNModelForAudioClassification):
@@ -47,21 +46,17 @@ class RBLNResNetForImageClassification(RBLNModelForImageClassification):
47
46
  pass
48
47
 
49
48
 
50
- class RBLNT5ForConditionalGeneration(RBLNModelForSeq2SeqLM):
51
- pass
52
-
53
-
54
- class RBLNBartForConditionalGeneration(RBLNModelForSeq2SeqLM):
55
- pass
56
-
57
-
58
49
  class RBLNXLMRobertaForSequenceClassification(RBLNModelForSequenceClassification):
59
- pass
50
+ rbln_model_input_names = ["input_ids", "attention_mask"]
60
51
 
61
52
 
62
53
  class RBLNRobertaForSequenceClassification(RBLNModelForSequenceClassification):
63
- pass
54
+ rbln_model_input_names = ["input_ids", "attention_mask"]
64
55
 
65
56
 
66
57
  class RBLNRobertaForMaskedLM(RBLNModelForMaskedLM):
58
+ rbln_model_input_names = ["input_ids", "attention_mask"]
59
+
60
+
61
+ class RBLNViTForImageClassification(RBLNModelForImageClassification):
67
62
  pass