optimum-rbln 0.1.11__py3-none-any.whl → 0.1.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. optimum/rbln/__init__.py +10 -7
  2. optimum/rbln/__version__.py +1 -1
  3. optimum/rbln/diffusers/models/autoencoder_kl.py +0 -2
  4. optimum/rbln/diffusers/models/controlnet.py +0 -6
  5. optimum/rbln/diffusers/models/unet_2d_condition.py +0 -3
  6. optimum/rbln/diffusers/pipelines/controlnet/multicontrolnet.py +4 -0
  7. optimum/rbln/diffusers/pipelines/controlnet/pipeline_controlnet.py +18 -20
  8. optimum/rbln/diffusers/pipelines/controlnet/pipeline_controlnet_img2img.py +18 -20
  9. optimum/rbln/diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl.py +19 -34
  10. optimum/rbln/diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img.py +20 -35
  11. optimum/rbln/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py +12 -13
  12. optimum/rbln/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py +12 -14
  13. optimum/rbln/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py +13 -14
  14. optimum/rbln/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py +13 -14
  15. optimum/rbln/modeling_alias.py +4 -9
  16. optimum/rbln/modeling_base.py +105 -139
  17. optimum/rbln/modeling_config.py +51 -0
  18. optimum/rbln/transformers/__init__.py +8 -0
  19. optimum/rbln/transformers/models/__init__.py +4 -1
  20. optimum/rbln/transformers/models/auto/modeling_auto.py +1 -0
  21. optimum/rbln/transformers/models/bart/__init__.py +1 -1
  22. optimum/rbln/transformers/models/bart/bart_architecture.py +18 -12
  23. optimum/rbln/transformers/models/bart/modeling_bart.py +25 -6
  24. optimum/rbln/transformers/models/bert/modeling_bert.py +1 -2
  25. optimum/rbln/transformers/models/clip/modeling_clip.py +0 -1
  26. optimum/rbln/transformers/models/decoderonly/modeling_decoderonly.py +172 -100
  27. optimum/rbln/transformers/models/dpt/modeling_dpt.py +0 -1
  28. optimum/rbln/transformers/models/exaone/__init__.py +32 -0
  29. optimum/rbln/transformers/models/exaone/exaone_architecture.py +72 -0
  30. optimum/rbln/transformers/models/exaone/hf_hub_cached/configuration_exaone.py +181 -0
  31. optimum/rbln/transformers/models/exaone/hf_hub_cached/modeling_exaone.py +1725 -0
  32. optimum/rbln/transformers/models/exaone/modeling_exaone.py +78 -0
  33. optimum/rbln/transformers/models/llava_next/modeling_llava_next.py +148 -152
  34. optimum/rbln/transformers/models/midm/modeling_midm.py +5 -0
  35. optimum/rbln/transformers/models/qwen2/__init__.py +24 -0
  36. optimum/rbln/transformers/models/qwen2/modeling_qwen2.py +67 -0
  37. optimum/rbln/transformers/models/qwen2/qwen2_architecture.py +29 -0
  38. optimum/rbln/transformers/models/seq2seq/__init__.py +24 -0
  39. optimum/rbln/{modeling_seq2seq.py → transformers/models/seq2seq/modeling_seq2seq.py} +107 -166
  40. optimum/rbln/transformers/models/t5/__init__.py +1 -0
  41. optimum/rbln/transformers/models/t5/modeling_t5.py +55 -0
  42. optimum/rbln/transformers/models/t5/t5_architecture.py +46 -32
  43. optimum/rbln/transformers/models/wav2vec2/modeling_wav2vec2.py +0 -1
  44. optimum/rbln/transformers/models/whisper/modeling_whisper.py +37 -12
  45. optimum/rbln/transformers/models/xlm_roberta/modeling_xlm_roberta.py +1 -2
  46. optimum/rbln/utils/import_utils.py +14 -0
  47. optimum/rbln/utils/logging.py +1 -1
  48. optimum/rbln/utils/runtime_utils.py +1 -1
  49. optimum/rbln/utils/timer_utils.py +26 -2
  50. {optimum_rbln-0.1.11.dist-info → optimum_rbln-0.1.12.dist-info}/METADATA +4 -3
  51. {optimum_rbln-0.1.11.dist-info → optimum_rbln-0.1.12.dist-info}/RECORD +54 -44
  52. {optimum_rbln-0.1.11.dist-info → optimum_rbln-0.1.12.dist-info}/WHEEL +1 -1
  53. {optimum_rbln-0.1.11.dist-info → optimum_rbln-0.1.12.dist-info}/entry_points.txt +0 -0
  54. {optimum_rbln-0.1.11.dist-info → optimum_rbln-0.1.12.dist-info}/licenses/LICENSE +0 -0
@@ -24,7 +24,7 @@
24
24
 
25
25
  from diffusers import StableDiffusionPipeline
26
26
 
27
- from ....modeling_base import RBLNBaseModel
27
+ from ....modeling_config import use_rbln_config
28
28
  from ....transformers import RBLNCLIPTextModel
29
29
  from ....utils.runtime_utils import ContextRblnConfig
30
30
  from ...models import RBLNAutoencoderKL, RBLNUNet2DConditionModel
@@ -32,6 +32,7 @@ from ...models import RBLNAutoencoderKL, RBLNUNet2DConditionModel
32
32
 
33
33
  class RBLNStableDiffusionPipeline(StableDiffusionPipeline):
34
34
  @classmethod
35
+ @use_rbln_config
35
36
  def from_pretrained(cls, model_id, **kwargs):
36
37
  """
37
38
  Pipeline for text-to-image generation using Stable Diffusion.
@@ -52,12 +53,12 @@ class RBLNStableDiffusionPipeline(StableDiffusionPipeline):
52
53
  export = kwargs.pop("export", None)
53
54
  model_save_dir = kwargs.pop("model_save_dir", None)
54
55
  rbln_config = kwargs.pop("rbln_config", None)
55
- rbln_kwargs, _ = RBLNBaseModel.resolve_rbln_config(rbln_config, kwargs)
56
+ rbln_config = {} if rbln_config is None else rbln_config
56
57
 
57
- device = rbln_kwargs.get("device", None)
58
- device_map = rbln_kwargs.get("device_map", None)
59
- create_runtimes = rbln_kwargs.get("create_runtimes", None)
60
- optimize_host_memory = rbln_kwargs.get("optimize_host_memory", None)
58
+ device = rbln_config.get("device", None)
59
+ device_map = rbln_config.get("device_map", None)
60
+ create_runtimes = rbln_config.get("create_runtimes", None)
61
+ optimize_host_memory = rbln_config.get("optimize_host_memory", None)
61
62
 
62
63
  with ContextRblnConfig(
63
64
  device=device,
@@ -71,7 +72,7 @@ class RBLNStableDiffusionPipeline(StableDiffusionPipeline):
71
72
  return model
72
73
 
73
74
  do_classifier_free_guidance = (
74
- rbln_kwargs.pop("guidance_scale", 5.0) > 1.0 and model.unet.config.time_cond_proj_dim is None
75
+ rbln_config.pop("guidance_scale", 5.0) > 1.0 and model.unet.config.time_cond_proj_dim is None
75
76
  )
76
77
 
77
78
  vae = RBLNAutoencoderKL.from_pretrained(
@@ -81,17 +82,17 @@ class RBLNStableDiffusionPipeline(StableDiffusionPipeline):
81
82
  model_save_dir=model_save_dir,
82
83
  rbln_unet_sample_size=model.unet.config.sample_size,
83
84
  rbln_use_encode=False,
84
- rbln_config={**rbln_kwargs},
85
+ rbln_config={**rbln_config},
85
86
  )
86
87
  text_encoder = RBLNCLIPTextModel.from_pretrained(
87
88
  model_id=model_id,
88
89
  subfolder="text_encoder",
89
90
  export=True,
90
91
  model_save_dir=model_save_dir,
91
- rbln_config={**rbln_kwargs},
92
+ rbln_config={**rbln_config},
92
93
  )
93
94
 
94
- batch_size = rbln_kwargs.pop("batch_size", 1)
95
+ batch_size = rbln_config.pop("batch_size", 1)
95
96
  unet_batch_size = batch_size * 2 if do_classifier_free_guidance else batch_size
96
97
 
97
98
  unet = RBLNUNet2DConditionModel.from_pretrained(
@@ -103,7 +104,7 @@ class RBLNStableDiffusionPipeline(StableDiffusionPipeline):
103
104
  rbln_batch_size=unet_batch_size,
104
105
  rbln_use_encode=False,
105
106
  rbln_is_controlnet=True if "controlnet" in model.config.keys() else False,
106
- rbln_config={**rbln_kwargs},
107
+ rbln_config={**rbln_config},
107
108
  )
108
109
 
109
110
  if model_save_dir is not None:
@@ -131,8 +132,6 @@ class RBLNStableDiffusionPipeline(StableDiffusionPipeline):
131
132
  # overwrite to replace incorrect config
132
133
  model.save_config(model_save_dir)
133
134
 
134
- model.models = [vae.model[0], text_encoder.model[0], unet.model[0]]
135
-
136
135
  if optimize_host_memory is False:
137
136
  model.compiled_models = [vae.compiled_models[0], text_encoder.compiled_models[0], unet.compiled_models[0]]
138
137
 
@@ -24,7 +24,7 @@
24
24
 
25
25
  from diffusers import StableDiffusionImg2ImgPipeline
26
26
 
27
- from ....modeling_base import RBLNBaseModel
27
+ from ....modeling_config import use_rbln_config
28
28
  from ....transformers import RBLNCLIPTextModel
29
29
  from ....utils.runtime_utils import ContextRblnConfig
30
30
  from ...models import RBLNAutoencoderKL, RBLNUNet2DConditionModel
@@ -32,6 +32,7 @@ from ...models import RBLNAutoencoderKL, RBLNUNet2DConditionModel
32
32
 
33
33
  class RBLNStableDiffusionImg2ImgPipeline(StableDiffusionImg2ImgPipeline):
34
34
  @classmethod
35
+ @use_rbln_config
35
36
  def from_pretrained(cls, model_id, **kwargs):
36
37
  """
37
38
  Pipeline for image-to-image generation using Stable Diffusion.
@@ -52,12 +53,12 @@ class RBLNStableDiffusionImg2ImgPipeline(StableDiffusionImg2ImgPipeline):
52
53
  export = kwargs.pop("export", None)
53
54
  model_save_dir = kwargs.pop("model_save_dir", None)
54
55
  rbln_config = kwargs.pop("rbln_config", None)
55
- rbln_kwargs, _ = RBLNBaseModel.resolve_rbln_config(rbln_config, kwargs)
56
+ rbln_config = {} if rbln_config is None else rbln_config
56
57
 
57
- device = rbln_kwargs.get("device", None)
58
- device_map = rbln_kwargs.get("device_map", None)
59
- create_runtimes = rbln_kwargs.get("create_runtimes", None)
60
- optimize_host_memory = rbln_kwargs.get("optimize_host_memory", None)
58
+ device = rbln_config.get("device", None)
59
+ device_map = rbln_config.get("device_map", None)
60
+ create_runtimes = rbln_config.get("create_runtimes", None)
61
+ optimize_host_memory = rbln_config.get("optimize_host_memory", None)
61
62
 
62
63
  with ContextRblnConfig(
63
64
  device=device,
@@ -71,7 +72,7 @@ class RBLNStableDiffusionImg2ImgPipeline(StableDiffusionImg2ImgPipeline):
71
72
  return model
72
73
 
73
74
  do_classifier_free_guidance = (
74
- rbln_kwargs.pop("guidance_scale", 5.0) > 1.0 and model.unet.config.time_cond_proj_dim is None
75
+ rbln_config.pop("guidance_scale", 5.0) > 1.0 and model.unet.config.time_cond_proj_dim is None
75
76
  )
76
77
 
77
78
  # compile model, create runtime
@@ -83,17 +84,17 @@ class RBLNStableDiffusionImg2ImgPipeline(StableDiffusionImg2ImgPipeline):
83
84
  rbln_unet_sample_size=model.unet.config.sample_size,
84
85
  rbln_use_encode=True,
85
86
  rbln_vae_scale_factor=model.vae_scale_factor,
86
- rbln_config={**rbln_kwargs},
87
+ rbln_config={**rbln_config},
87
88
  )
88
89
  text_encoder = RBLNCLIPTextModel.from_pretrained(
89
90
  model_id=model_id,
90
91
  subfolder="text_encoder",
91
92
  export=True,
92
93
  model_save_dir=model_save_dir,
93
- rbln_config={**rbln_kwargs},
94
+ rbln_config={**rbln_config},
94
95
  )
95
96
 
96
- batch_size = rbln_kwargs.pop("batch_size", 1)
97
+ batch_size = rbln_config.pop("batch_size", 1)
97
98
  unet_batch_size = batch_size * 2 if do_classifier_free_guidance else batch_size
98
99
 
99
100
  unet = RBLNUNet2DConditionModel.from_pretrained(
@@ -106,7 +107,7 @@ class RBLNStableDiffusionImg2ImgPipeline(StableDiffusionImg2ImgPipeline):
106
107
  rbln_use_encode=True,
107
108
  rbln_vae_scale_factor=model.vae_scale_factor,
108
109
  rbln_is_controlnet=True if "controlnet" in model.config.keys() else False,
109
- rbln_config={**rbln_kwargs},
110
+ rbln_config={**rbln_config},
110
111
  )
111
112
 
112
113
  if model_save_dir is not None:
@@ -134,9 +135,6 @@ class RBLNStableDiffusionImg2ImgPipeline(StableDiffusionImg2ImgPipeline):
134
135
  # overwrite to replace incorrect config
135
136
  model.save_config(model_save_dir)
136
137
 
137
- # vae encoder, vae decoder, text_encoder, unet
138
- model.models = [vae.model[0], vae.model[1], text_encoder.model[0], unet.model[0]]
139
-
140
138
  if optimize_host_memory is False:
141
139
  model.compiled_models = [
142
140
  vae.compiled_models[0],
@@ -16,7 +16,7 @@
16
16
 
17
17
  from diffusers import StableDiffusionXLPipeline
18
18
 
19
- from ....modeling_base import RBLNBaseModel
19
+ from ....modeling_config import use_rbln_config
20
20
  from ....transformers import RBLNCLIPTextModel, RBLNCLIPTextModelWithProjection
21
21
  from ....utils.runtime_utils import ContextRblnConfig
22
22
  from ...models import RBLNAutoencoderKL, RBLNUNet2DConditionModel
@@ -24,6 +24,7 @@ from ...models import RBLNAutoencoderKL, RBLNUNet2DConditionModel
24
24
 
25
25
  class RBLNStableDiffusionXLPipeline(StableDiffusionXLPipeline):
26
26
  @classmethod
27
+ @use_rbln_config
27
28
  def from_pretrained(cls, model_id, **kwargs):
28
29
  """
29
30
  Pipeline for text-to-image generation using Stable Diffusion XL.
@@ -44,12 +45,12 @@ class RBLNStableDiffusionXLPipeline(StableDiffusionXLPipeline):
44
45
  export = kwargs.pop("export", None)
45
46
  model_save_dir = kwargs.pop("model_save_dir", None)
46
47
  rbln_config = kwargs.pop("rbln_config", None)
47
- rbln_kwargs, _ = RBLNBaseModel.resolve_rbln_config(rbln_config, kwargs)
48
+ rbln_config = {} if rbln_config is None else rbln_config
48
49
 
49
- device = rbln_kwargs.get("device", None)
50
- device_map = rbln_kwargs.get("device_map", None)
51
- create_runtimes = rbln_kwargs.get("create_runtimes", None)
52
- optimize_host_memory = rbln_kwargs.get("optimize_host_memory", None)
50
+ device = rbln_config.get("device", None)
51
+ device_map = rbln_config.get("device_map", None)
52
+ create_runtimes = rbln_config.get("create_runtimes", None)
53
+ optimize_host_memory = rbln_config.get("optimize_host_memory", None)
53
54
 
54
55
  with ContextRblnConfig(
55
56
  device=device,
@@ -63,7 +64,7 @@ class RBLNStableDiffusionXLPipeline(StableDiffusionXLPipeline):
63
64
  return model
64
65
 
65
66
  do_classifier_free_guidance = (
66
- rbln_kwargs.pop("guidance_scale", 5.0) > 1.0 and model.unet.config.time_cond_proj_dim is None
67
+ rbln_config.pop("guidance_scale", 5.0) > 1.0 and model.unet.config.time_cond_proj_dim is None
67
68
  )
68
69
 
69
70
  vae = RBLNAutoencoderKL.from_pretrained(
@@ -73,24 +74,24 @@ class RBLNStableDiffusionXLPipeline(StableDiffusionXLPipeline):
73
74
  model_save_dir=model_save_dir,
74
75
  rbln_unet_sample_size=model.unet.config.sample_size,
75
76
  rbln_use_encode=False,
76
- rbln_config={**rbln_kwargs},
77
+ rbln_config={**rbln_config},
77
78
  )
78
79
  text_encoder = RBLNCLIPTextModel.from_pretrained(
79
80
  model_id=model_id,
80
81
  subfolder="text_encoder",
81
82
  export=True,
82
83
  model_save_dir=model_save_dir,
83
- rbln_config={**rbln_kwargs},
84
+ rbln_config={**rbln_config},
84
85
  )
85
86
  text_encoder_2 = RBLNCLIPTextModelWithProjection.from_pretrained(
86
87
  model_id=model_id,
87
88
  subfolder="text_encoder_2",
88
89
  export=True,
89
90
  model_save_dir=model_save_dir,
90
- rbln_config={**rbln_kwargs},
91
+ rbln_config={**rbln_config},
91
92
  )
92
93
 
93
- batch_size = rbln_kwargs.pop("batch_size", 1)
94
+ batch_size = rbln_config.pop("batch_size", 1)
94
95
  unet_batch_size = batch_size * 2 if do_classifier_free_guidance else batch_size
95
96
 
96
97
  unet = RBLNUNet2DConditionModel.from_pretrained(
@@ -103,7 +104,7 @@ class RBLNStableDiffusionXLPipeline(StableDiffusionXLPipeline):
103
104
  rbln_batch_size=unet_batch_size,
104
105
  rbln_use_encode=False,
105
106
  rbln_is_controlnet=True if "controlnet" in model.config.keys() else False,
106
- rbln_config={**rbln_kwargs},
107
+ rbln_config={**rbln_config},
107
108
  )
108
109
 
109
110
  if model_save_dir is not None:
@@ -130,8 +131,6 @@ class RBLNStableDiffusionXLPipeline(StableDiffusionXLPipeline):
130
131
  # overwrite to replace incorrect config
131
132
  model.save_config(model_save_dir)
132
133
 
133
- model.models = [vae.model[0], unet.model[0], text_encoder.model[0], text_encoder_2.model[0]]
134
-
135
134
  if optimize_host_memory is False:
136
135
  model.compiled_models = [
137
136
  vae.compiled_models[0],
@@ -16,7 +16,7 @@
16
16
 
17
17
  from diffusers import StableDiffusionXLImg2ImgPipeline
18
18
 
19
- from ....modeling_base import RBLNBaseModel
19
+ from ....modeling_config import use_rbln_config
20
20
  from ....transformers import RBLNCLIPTextModel, RBLNCLIPTextModelWithProjection
21
21
  from ....utils.runtime_utils import ContextRblnConfig
22
22
  from ...models import RBLNAutoencoderKL, RBLNUNet2DConditionModel
@@ -24,6 +24,7 @@ from ...models import RBLNAutoencoderKL, RBLNUNet2DConditionModel
24
24
 
25
25
  class RBLNStableDiffusionXLImg2ImgPipeline(StableDiffusionXLImg2ImgPipeline):
26
26
  @classmethod
27
+ @use_rbln_config
27
28
  def from_pretrained(cls, model_id, **kwargs):
28
29
  """
29
30
  Pipeline for image-to-image generation using Stable Diffusion XL.
@@ -44,12 +45,12 @@ class RBLNStableDiffusionXLImg2ImgPipeline(StableDiffusionXLImg2ImgPipeline):
44
45
  export = kwargs.pop("export", None)
45
46
  model_save_dir = kwargs.pop("model_save_dir", None)
46
47
  rbln_config = kwargs.pop("rbln_config", None)
47
- rbln_kwargs, _ = RBLNBaseModel.resolve_rbln_config(rbln_config, kwargs)
48
+ rbln_config = {} if rbln_config is None else rbln_config
48
49
 
49
- device = rbln_kwargs.get("device", None)
50
- device_map = rbln_kwargs.get("device_map", None)
51
- create_runtimes = rbln_kwargs.get("create_runtimes", None)
52
- optimize_host_memory = rbln_kwargs.get("optimize_host_memory", None)
50
+ device = rbln_config.get("device", None)
51
+ device_map = rbln_config.get("device_map", None)
52
+ create_runtimes = rbln_config.get("create_runtimes", None)
53
+ optimize_host_memory = rbln_config.get("optimize_host_memory", None)
53
54
 
54
55
  with ContextRblnConfig(
55
56
  device=device,
@@ -63,7 +64,7 @@ class RBLNStableDiffusionXLImg2ImgPipeline(StableDiffusionXLImg2ImgPipeline):
63
64
  return model
64
65
 
65
66
  do_classifier_free_guidance = (
66
- rbln_kwargs.pop("guidance_scale", 5.0) > 1.0 and model.unet.config.time_cond_proj_dim is None
67
+ rbln_config.pop("guidance_scale", 5.0) > 1.0 and model.unet.config.time_cond_proj_dim is None
67
68
  )
68
69
 
69
70
  vae = RBLNAutoencoderKL.from_pretrained(
@@ -74,24 +75,24 @@ class RBLNStableDiffusionXLImg2ImgPipeline(StableDiffusionXLImg2ImgPipeline):
74
75
  rbln_unet_sample_size=model.unet.config.sample_size,
75
76
  rbln_use_encode=True,
76
77
  rbln_vae_scale_factor=model.vae_scale_factor,
77
- rbln_config={**rbln_kwargs},
78
+ rbln_config={**rbln_config},
78
79
  )
79
80
  text_encoder = RBLNCLIPTextModel.from_pretrained(
80
81
  model_id=model_id,
81
82
  subfolder="text_encoder",
82
83
  export=True,
83
84
  model_save_dir=model_save_dir,
84
- rbln_config={**rbln_kwargs},
85
+ rbln_config={**rbln_config},
85
86
  )
86
87
  text_encoder_2 = RBLNCLIPTextModelWithProjection.from_pretrained(
87
88
  model_id=model_id,
88
89
  subfolder="text_encoder_2",
89
90
  export=True,
90
91
  model_save_dir=model_save_dir,
91
- rbln_config={**rbln_kwargs},
92
+ rbln_config={**rbln_config},
92
93
  )
93
94
 
94
- batch_size = rbln_kwargs.pop("batch_size", 1)
95
+ batch_size = rbln_config.pop("batch_size", 1)
95
96
  unet_batch_size = batch_size * 2 if do_classifier_free_guidance else batch_size
96
97
 
97
98
  unet = RBLNUNet2DConditionModel.from_pretrained(
@@ -105,7 +106,7 @@ class RBLNStableDiffusionXLImg2ImgPipeline(StableDiffusionXLImg2ImgPipeline):
105
106
  rbln_use_encode=True,
106
107
  rbln_vae_scale_factor=model.vae_scale_factor,
107
108
  rbln_is_controlnet=True if "controlnet" in model.config.keys() else False,
108
- rbln_config={**rbln_kwargs},
109
+ rbln_config={**rbln_config},
109
110
  )
110
111
 
111
112
  if model_save_dir is not None:
@@ -132,8 +133,6 @@ class RBLNStableDiffusionXLImg2ImgPipeline(StableDiffusionXLImg2ImgPipeline):
132
133
  # overwrite to replace incorrect config
133
134
  model.save_config(model_save_dir)
134
135
 
135
- model.models = [vae.model[0], vae.model[1], unet.model[0], text_encoder.model[0], text_encoder_2.model[0]]
136
-
137
136
  if optimize_host_memory is False:
138
137
  model.compiled_models = [
139
138
  vae.compiled_models[0],
@@ -28,7 +28,6 @@ from .modeling_base import (
28
28
  RBLNModelForQuestionAnswering,
29
29
  RBLNModelForSequenceClassification,
30
30
  )
31
- from .modeling_seq2seq import RBLNModelForSeq2SeqLM
32
31
 
33
32
 
34
33
  class RBLNASTForAudioClassification(RBLNModelForAudioClassification):
@@ -47,14 +46,6 @@ class RBLNResNetForImageClassification(RBLNModelForImageClassification):
47
46
  pass
48
47
 
49
48
 
50
- class RBLNT5ForConditionalGeneration(RBLNModelForSeq2SeqLM):
51
- pass
52
-
53
-
54
- class RBLNBartForConditionalGeneration(RBLNModelForSeq2SeqLM):
55
- pass
56
-
57
-
58
49
  class RBLNXLMRobertaForSequenceClassification(RBLNModelForSequenceClassification):
59
50
  rbln_model_input_names = ["input_ids", "attention_mask"]
60
51
 
@@ -65,3 +56,7 @@ class RBLNRobertaForSequenceClassification(RBLNModelForSequenceClassification):
65
56
 
66
57
  class RBLNRobertaForMaskedLM(RBLNModelForMaskedLM):
67
58
  rbln_model_input_names = ["input_ids", "attention_mask"]
59
+
60
+
61
+ class RBLNViTForImageClassification(RBLNModelForImageClassification):
62
+ pass