thestage 0.5.37__py3-none-any.whl → 0.5.39__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. thestage/__init__.py +1 -1
  2. thestage/controllers/config_controller.py +3 -4
  3. thestage/controllers/container_controller.py +12 -16
  4. thestage/controllers/project_controller.py +145 -3
  5. thestage/controllers/utils_controller.py +2 -3
  6. thestage/entities/file_item.py +27 -0
  7. thestage/entities/project_inference_simulator_model.py +1 -0
  8. thestage/exceptions/file_system_exception.py +6 -0
  9. thestage/helpers/error_handler.py +2 -2
  10. thestage/helpers/logger/app_logger.py +3 -4
  11. thestage/services/abstract_service.py +1 -2
  12. thestage/services/app_config_service.py +2 -3
  13. thestage/services/clients/git/git_client.py +3 -3
  14. thestage/services/clients/thestage_api/api_client.py +58 -61
  15. thestage/services/clients/thestage_api/core/api_client_abstract.py +91 -0
  16. thestage/services/clients/thestage_api/core/api_client_core.py +25 -0
  17. thestage/services/clients/thestage_api/core/http_client_exception.py +12 -0
  18. thestage/services/clients/thestage_api/dtos/inference_controller/deploy_inference_model_to_instance_request.py +14 -0
  19. thestage/services/clients/thestage_api/dtos/inference_controller/deploy_inference_model_to_instance_response.py +13 -0
  20. thestage/services/clients/thestage_api/dtos/inference_controller/deploy_inference_model_to_sagemaker_request.py +12 -0
  21. thestage/services/clients/thestage_api/dtos/inference_controller/deploy_inference_model_to_sagemaker_response.py +12 -0
  22. thestage/services/clients/thestage_api/dtos/logging_controller/log_polling_request.py +1 -1
  23. thestage/services/clients/thestage_api/dtos/sftp_path_helper.py +3 -2
  24. thestage/services/config_provider/config_provider.py +98 -44
  25. thestage/services/connect/connect_service.py +1 -1
  26. thestage/services/container/container_service.py +2 -8
  27. thestage/services/core_files/config_entity.py +25 -0
  28. thestage/services/filesystem_service.py +115 -0
  29. thestage/services/instance/instance_service.py +1 -2
  30. thestage/services/logging/logging_service.py +76 -95
  31. thestage/services/project/dto/inference_simulator_model_dto.py +1 -0
  32. thestage/services/project/mapper/project_inference_simulator_model_mapper.py +1 -0
  33. thestage/services/project/project_service.py +184 -7
  34. thestage/services/remote_server_service.py +3 -3
  35. thestage/services/service_factory.py +1 -2
  36. thestage/services/validation_service.py +26 -10
  37. {thestage-0.5.37.dist-info → thestage-0.5.39.dist-info}/METADATA +2 -2
  38. {thestage-0.5.37.dist-info → thestage-0.5.39.dist-info}/RECORD +41 -30
  39. {thestage-0.5.37.dist-info → thestage-0.5.39.dist-info}/LICENSE.txt +0 -0
  40. {thestage-0.5.37.dist-info → thestage-0.5.39.dist-info}/WHEEL +0 -0
  41. {thestage-0.5.37.dist-info → thestage-0.5.39.dist-info}/entry_points.txt +0 -0
@@ -8,8 +8,9 @@ import aioconsole
8
8
  import typer
9
9
  from httpx import ReadTimeout, ConnectError, ConnectTimeout
10
10
  from requests.exceptions import ChunkedEncodingError
11
- from thestage_core.entities.config_entity import ConfigEntity
12
11
 
12
+ from thestage.services.clients.thestage_api.dtos.enums.container_status import DockerContainerStatus
13
+ from thestage.services.core_files.config_entity import ConfigEntity
13
14
  from thestage.services.clients.thestage_api.dtos.enums.inference_simulator_status import InferenceSimulatorStatus
14
15
  from thestage.services.clients.thestage_api.dtos.enums.task_status import TaskStatus
15
16
  from thestage.services.clients.thestage_api.dtos.inference_controller.get_inference_simulator_response import \
@@ -69,25 +70,23 @@ class LoggingService(AbstractService):
69
70
 
70
71
 
71
72
  @error_handler()
72
- def stream_container_logs(self, config: ConfigEntity, container: DockerContainerDto):
73
- typer.echo(__(
74
- f"Log stream for docker container '%container_slug%' started",
75
- {
76
- 'container_slug': container.slug,
77
- }
78
- ))
79
- typer.echo(__("Press CTRL+C to stop"))
80
- try:
81
- for log_json in self.__thestage_api_client.get_container_log_stream(
82
- token=config.main.thestage_auth_token,
83
- container_id=container.id
84
- ):
85
- self.__print_log_line(log_json)
86
- except ChunkedEncodingError as e1: # handling server timeout
87
- typer.echo(__('Log stream disconnected (1)'))
73
+ def print_last_container_logs(self, config: ConfigEntity, container_uid: str, logs_number: Optional[int]):
74
+ container: Optional[DockerContainerDto] = self.__thestage_api_client.get_container(
75
+ token=config.main.thestage_auth_token,
76
+ container_slug=container_uid,
77
+ )
78
+
79
+ if not container:
80
+ typer.echo("Container was not found")
88
81
  raise typer.Exit(1)
89
82
 
90
- typer.echo(__('Log stream disconnected'))
83
+ logs = self.__thestage_api_client.query_user_logs(
84
+ token=config.main.thestage_auth_token,
85
+ container_id=container.id,
86
+ limit=logs_number
87
+ )
88
+ for log_message in reversed(logs.queryResult):
89
+ self.__print_log_line_object(log_message)
91
90
 
92
91
 
93
92
  @error_handler()
@@ -134,7 +133,7 @@ class LoggingService(AbstractService):
134
133
 
135
134
  typer.echo(__("CTRL+C to cancel the task. CTRL+D to disconnect from log stream."))
136
135
 
137
- print_logs_task = asyncio.create_task(self.print_task_or_inference_simulator_logs(config.main.thestage_auth_token, task.id))
136
+ print_logs_task = asyncio.create_task(self.print_realtime_logs(token=config.main.thestage_auth_token, task_id=task.id))
138
137
  input_task = asyncio.create_task(self.read_log_stream_input())
139
138
 
140
139
  def sigint_handler():
@@ -200,7 +199,7 @@ class LoggingService(AbstractService):
200
199
  typer.echo(__("CTRL+D to disconnect from log stream."))
201
200
 
202
201
  print_task_or_inference_simulator_logs = asyncio.create_task(
203
- self.print_task_or_inference_simulator_logs(config.main.thestage_auth_token, inference_simulator_id=inference_simulator.id)
202
+ self.print_realtime_logs(token=config.main.thestage_auth_token, inference_simulator_id=inference_simulator.id)
204
203
  )
205
204
  input_task = asyncio.create_task(self.read_log_stream_input())
206
205
 
@@ -212,6 +211,49 @@ class LoggingService(AbstractService):
212
211
  typer.echo(__(f"Disconnected from log stream. You can try to reconnect with 'thestage project inference-simulator logs {slug}'."))
213
212
 
214
213
 
214
+ @error_handler()
215
+ def stream_container_logs_with_controls(self, config: ConfigEntity, container_uid: str):
216
+ asyncio.run(
217
+ self.__stream_container_logs_with_controls_async(
218
+ config=config,
219
+ container_uid=container_uid
220
+ )
221
+ )
222
+
223
+
224
+ @error_handler()
225
+ async def __stream_container_logs_with_controls_async(self, config: ConfigEntity, container_uid: str):
226
+ container: Optional[DockerContainerDto] = self.__thestage_api_client.get_container(
227
+ token=config.main.thestage_auth_token,
228
+ container_slug=container_uid,
229
+ )
230
+
231
+ if container:
232
+ if container.frontend_status.status_key not in [DockerContainerStatus.RUNNING]:
233
+ typer.echo(f"Container status: '{container.frontend_status.status_translation}'")
234
+ else:
235
+ typer.echo("Container was not found")
236
+ raise typer.Exit(1)
237
+
238
+ typer.echo(f"Log stream for Docker container started")
239
+ typer.echo("CTRL+D to disconnect from log stream.")
240
+
241
+ print_logs_task = asyncio.create_task(self.print_realtime_logs(token=config.main.thestage_auth_token, docker_container_id=container.id))
242
+ input_task = asyncio.create_task(self.read_log_stream_input())
243
+
244
+ def sigint_handler():
245
+ input_task.cancel()
246
+
247
+ loop = asyncio.get_event_loop()
248
+ for signal_item in [SIGINT]: # SIGINT == CTRL+C
249
+ loop.add_signal_handler(signal_item, sigint_handler)
250
+
251
+ done, pending = await asyncio.wait([print_logs_task, input_task], return_when=asyncio.FIRST_COMPLETED)
252
+
253
+ if input_task in done:
254
+ print_logs_task.cancel()
255
+
256
+
215
257
  async def read_log_stream_input(self):
216
258
  try:
217
259
  while True:
@@ -223,7 +265,13 @@ class LoggingService(AbstractService):
223
265
  pass
224
266
 
225
267
 
226
- async def print_task_or_inference_simulator_logs(self, token: str, task_id: Optional[int] = None, inference_simulator_id: Optional[int] = None):
268
+ async def print_realtime_logs(
269
+ self,
270
+ token: str,
271
+ task_id: Optional[int] = None,
272
+ inference_simulator_id: Optional[int] = None,
273
+ docker_container_id: Optional[int] = None,
274
+ ):
227
275
  polling_interval_seconds: float = 4 # also adjust polling api method timeout if changed
228
276
  between_logs_sleeping_coef: float = 1 # we emulate delay between logs, but if for any reason code runs for too long - delays will be controlled with this coef
229
277
  last_iteration_log_timestamp: Optional[str] = None # pointer to next iteration polling start (obtained from each response)
@@ -249,7 +297,7 @@ class LoggingService(AbstractService):
249
297
  token=token,
250
298
  task_id=task_id,
251
299
  inference_simulator_id=inference_simulator_id,
252
- docker_container_id=None,
300
+ docker_container_id=docker_container_id,
253
301
  last_log_timestamp=last_iteration_log_timestamp,
254
302
  last_log_id=last_log_id
255
303
  )
@@ -283,82 +331,15 @@ class LoggingService(AbstractService):
283
331
  errors_started_at = datetime.utcnow()
284
332
 
285
333
  if consecutive_error_count > 7:
334
+ seconds_with_error = (datetime.utcnow() - errors_started_at).total_seconds()
286
335
  if inference_simulator_id:
287
- seconds_with_error = (datetime.utcnow() - errors_started_at).total_seconds()
288
336
  print_nonblocking(f"Log stream: disconnected from server (connectivity issues for {seconds_with_error} seconds). Try 'thestage inference-simulator logs <inference-simulator-UID>' to reconnect.", writer)
289
- break
337
+ elif task_id:
338
+ print_nonblocking(f"Log stream: disconnected from server (connectivity issues for {seconds_with_error} seconds). Try 'thestage project task logs {task_id}' to reconnect.", writer)
339
+ elif docker_container_id:
340
+ print_nonblocking(f"Log stream: disconnected from server (connectivity issues for {seconds_with_error} seconds). Try 'thestage container logs <docker-container-UID>' to reconnect.", writer)
290
341
  else:
291
- seconds_with_error = (datetime.utcnow() - errors_started_at).total_seconds()
292
- print_nonblocking(f"Log stream: disconnected from server (connectivity issues for {seconds_with_error} seconds). Try 'thestage project inference-simulator logs {task_id}' to reconnect.", writer)
293
- break
294
-
295
- # depending on iteration duration - sleep for the remaining time and adjust log sleep coefficient if needed
296
- iteration_duration = (datetime.utcnow() - iteration_started_at).total_seconds()
297
- if iteration_duration > polling_interval_seconds:
298
- between_logs_sleeping_coef *= 0.85
299
- else:
300
- await asyncio.sleep(polling_interval_seconds - iteration_duration)
301
- if between_logs_sleeping_coef < 1:
302
- between_logs_sleeping_coef = min(1.0, between_logs_sleeping_coef * 1.15)
303
-
304
-
305
- async def print_inference_simulator_logs(self, token: str, inference_simulator_id: int):
306
- polling_interval_seconds: float = 4 # also adjust polling api method timeout if changed
307
- between_logs_sleeping_coef: float = 1 # we emulate delay between logs, but if for any reason code runs for too long - delays will be controlled with this coef
308
- last_iteration_log_timestamp: Optional[str] = None # pointer to next iteration polling start (obtained from each response)
309
- last_log_id: Optional[str] = None # pointer to next iteration polling start - to exclude the log id from result (obtained from each response)
310
- consecutive_error_count: int = 0 # connectivity errors count - stream will disconnect if too many errors in a row
311
- iteration_started_at: datetime # used to control iteration duration - polling should be done at around exact rate
312
- errors_started_at: Optional[datetime] = None # time since errors started to stream disconnect
313
-
314
- is_no_more_logs = False
315
- while not is_no_more_logs:
316
- log_wait_remaining_limit: float = polling_interval_seconds # hard limit just in case
317
- iteration_started_at = datetime.utcnow()
318
- last_printed_log_timestamp: Optional[datetime] = None
319
- reader, writer = await aioconsole.get_standard_streams()
320
-
321
- # this shows (somewhat accurate) time difference between logs here and in real time. should not grow.
322
- # if last_iteration_log_timestamp:
323
- # last_log_timestamp_parsed = datetime.strptime(last_iteration_log_timestamp, '%Y-%m-%dT%H:%M:%S.%f')
324
- # stream_to_logs_diff = datetime.utcnow() - last_log_timestamp_parsed
325
- # print_nonblocking(f'TDIFF {stream_to_logs_diff.total_seconds()}', writer)
326
- try:
327
- logs_response = await self.__thestage_api_client.poll_logs_httpx(
328
- token=token,
329
- inference_simulator_id=inference_simulator_id,
330
- docker_container_id=None,
331
- last_log_timestamp=last_iteration_log_timestamp,
332
- last_log_id=last_log_id
333
- )
334
-
335
- if consecutive_error_count > 0:
336
- consecutive_error_count = 0
337
- errors_started_at = None
338
- log_wait_remaining_limit = 0 # no log delays after reconnect
339
-
340
- last_iteration_log_timestamp = logs_response.lastLogTimestamp
341
- last_log_id = logs_response.lastLogId
342
-
343
- for log_item in logs_response.logs:
344
- current_log_timestamp = datetime.strptime(log_item.timestamp[:26], '%Y-%m-%dT%H:%M:%S.%f') # python does not like nanoseconds
345
- if last_printed_log_timestamp is not None and log_wait_remaining_limit > 0:
346
- logs_sleeptime = (current_log_timestamp - last_printed_log_timestamp).total_seconds() * between_logs_sleeping_coef
347
- await asyncio.sleep(logs_sleeptime)
348
- log_wait_remaining_limit -= logs_sleeptime
349
- self.__print_log_line_object_nonblocking(log_item, writer)
350
- last_printed_log_timestamp = current_log_timestamp
351
-
352
- except (ReadTimeout, ConnectError, ConnectTimeout) as e:
353
- consecutive_error_count += 1
354
- if consecutive_error_count == 1:
355
- print_nonblocking("Network issues, attempting to re-establish connection...", writer, BytePrintStyle.ORANGE)
356
- if not errors_started_at:
357
- errors_started_at = datetime.utcnow()
358
-
359
- if consecutive_error_count > 7:
360
- seconds_with_error = (datetime.utcnow() - errors_started_at).total_seconds()
361
- print_nonblocking(f"Log stream: disconnected from server (connectivity issues for {seconds_with_error} seconds).", writer)
342
+ print_nonblocking(f"Log stream: disconnected from server (connectivity issues for {seconds_with_error} seconds)", writer)
362
343
  break
363
344
 
364
345
  # depending on iteration duration - sleep for the remaining time and adjust log sleep coefficient if needed
@@ -5,6 +5,7 @@ class InferenceSimulatorModelDto(BaseModel):
5
5
  model_config = ConfigDict(use_enum_values=True)
6
6
 
7
7
  id: Optional[int] = Field(None, alias='id')
8
+ slug: Optional[str] = Field(None, alias='slug')
8
9
  client_id: Optional[int] = Field(None, alias='clientId')
9
10
  instance_rented_id: Optional[int] = Field(None, alias='instanceRentedId')
10
11
  selfhosted_instance_id: Optional[int] = Field(None, alias='selfhostedInstanceId')
@@ -12,6 +12,7 @@ class ProjectInferenceSimulatorModelMapper(AbstractMapper):
12
12
 
13
13
  return ProjectInferenceSimulatorModelEntity(
14
14
  id=item.id,
15
+ slug=item.slug,
15
16
  status=item.status or '',
16
17
  commit_hash=item.commit_hash or '',
17
18
  environment_metadata=item.environment_metadata or '',
@@ -1,16 +1,19 @@
1
1
  import os
2
+ import time
3
+ from datetime import datetime
2
4
  from pathlib import Path
3
5
  from typing import Optional, List
4
6
 
5
7
  import json
8
+
9
+ import boto3
6
10
  import click
7
11
  import typer
8
12
  from git import Commit
9
13
  from tabulate import tabulate
10
- from thestage_core.entities.config_entity import ConfigEntity
11
- from thestage_core.exceptions.http_error_exception import HttpClientException
12
- from thestage_core.services.filesystem_service import FileSystemServiceCore
13
14
 
15
+ from thestage.services.clients.thestage_api.core.http_client_exception import HttpClientException
16
+ from thestage.services.core_files.config_entity import ConfigEntity
14
17
  from thestage.color_scheme.color_scheme import ColorScheme
15
18
  from thestage.entities.enums.yes_no_response import YesOrNoResponse
16
19
  from thestage.exceptions.git_access_exception import GitAccessException
@@ -18,6 +21,10 @@ from thestage.i18n.translation import __
18
21
  from thestage.services.clients.git.git_client import GitLocalClient
19
22
  from thestage.services.clients.thestage_api.dtos.container_response import DockerContainerDto
20
23
  from thestage.services.clients.thestage_api.dtos.enums.container_status import DockerContainerStatus
24
+ from thestage.services.clients.thestage_api.dtos.inference_controller.deploy_inference_model_to_instance_response import \
25
+ DeployInferenceModelToInstanceResponse
26
+ from thestage.services.clients.thestage_api.dtos.inference_controller.deploy_inference_model_to_sagemaker_response import \
27
+ DeployInferenceModelToSagemakerResponse
21
28
  from thestage.services.clients.thestage_api.dtos.inference_controller.get_inference_simulator_response import \
22
29
  GetInferenceSimulatorResponse
23
30
  from thestage.services.clients.thestage_api.dtos.paginated_entity_list import PaginatedEntityList
@@ -29,6 +36,7 @@ from thestage.services.clients.thestage_api.dtos.project_controller.project_star
29
36
  ProjectStartInferenceSimulatorResponse
30
37
  from thestage.services.clients.thestage_api.dtos.project_response import ProjectDto
31
38
  from thestage.services.clients.thestage_api.dtos.task_controller.task_view_response import TaskViewResponse
39
+ from thestage.services.filesystem_service import FileSystemServiceCore
32
40
  from thestage.services.project.dto.inference_simulator_dto import InferenceSimulatorDto
33
41
  from thestage.services.project.dto.inference_simulator_model_dto import InferenceSimulatorModelDto
34
42
  from thestage.services.task.dto.task_dto import TaskDto
@@ -359,11 +367,13 @@ class ProjectService(AbstractService):
359
367
  commit = self.__git_local_client.get_current_commit(path=config.runtime.working_directory)
360
368
  if commit and isinstance(commit, Commit):
361
369
  commit_hash = commit.hexsha
362
- task_title = commit.message.strip()
370
+ if not task_title:
371
+ task_title = commit.message.strip()
363
372
  else: # if commit_hash is defined
364
- commit = self.__git_local_client.get_commit_by_hash(path=config.runtime.working_directory, commit_hash=commit_hash)
365
- if commit and isinstance(commit, Commit):
366
- task_title = commit.message.strip()
373
+ if not task_title:
374
+ commit = self.__git_local_client.get_commit_by_hash(path=config.runtime.working_directory, commit_hash=commit_hash)
375
+ if commit and isinstance(commit, Commit):
376
+ task_title = commit.message.strip()
367
377
 
368
378
  if not task_title: # should not happen but maybe git allows some kind of empty messages
369
379
  task_title = f'Task_{commit_hash}'
@@ -902,3 +912,170 @@ class ProjectService(AbstractService):
902
912
 
903
913
  return project_config
904
914
 
915
+ @error_handler()
916
+ def project_deploy_inference_simulator_model_to_instance(
917
+ self,
918
+ config: ConfigEntity,
919
+ unique_id: Optional[str] = None,
920
+ unique_id_with_timestamp: Optional[str] = None,
921
+ rented_instance_unique_id: Optional[str] = None,
922
+ self_hosted_instance_unique_id: Optional[str] = None,
923
+ ) -> None:
924
+ project_config: ProjectConfig = self.__get_fixed_project_config(config=config)
925
+ if not project_config:
926
+ typer.echo(
927
+ __("No project found at the path: %path%. Please initialize or clone a project first. Or provide path to project using --working-directory option.",
928
+ {"path": config.runtime.working_directory}))
929
+ raise typer.Exit(1)
930
+
931
+ if rented_instance_unique_id and self_hosted_instance_unique_id:
932
+ typer.echo(__("Error: Cannot provide both rented and self-hosted instance unique IDs."))
933
+ raise typer.Exit(1)
934
+
935
+ if not rented_instance_unique_id and not self_hosted_instance_unique_id:
936
+ typer.echo(__("Error: Either a rented instance ID or a self-hosted instance unique ID must be provided."))
937
+ raise typer.Exit(1)
938
+
939
+ project_config: ProjectConfig = self.__config_provider.read_project_config()
940
+ if not project_config:
941
+ typer.echo(__("No project found at the path: %path%. Please initialize or clone a project first.",
942
+ {"path": config.runtime.working_directory}))
943
+ raise typer.Exit(1)
944
+
945
+ typer.echo(__("Creating inference simulator with unique ID: %unique_id_with_timestamp%", {"unique_id_with_timestamp": unique_id_with_timestamp}))
946
+ deploy_model_to_instance_response: DeployInferenceModelToInstanceResponse = self.__thestage_api_client.deploy_inference_model_to_instance(
947
+ token=config.main.thestage_auth_token,
948
+ unique_id=unique_id,
949
+ unique_id_with_timestamp=unique_id_with_timestamp,
950
+ rented_instance_unique_id=rented_instance_unique_id,
951
+ self_hosted_instance_unique_id=self_hosted_instance_unique_id
952
+ )
953
+ if deploy_model_to_instance_response:
954
+ if deploy_model_to_instance_response.message:
955
+ typer.echo(deploy_model_to_instance_response.message)
956
+ if deploy_model_to_instance_response.is_success:
957
+ typer.echo("Inference simulator has been scheduled to run successfully.")
958
+ else:
959
+ typer.echo(__(
960
+ 'Inference simulator failed to run with an error: %server_massage%',
961
+ {'server_massage': deploy_model_to_instance_response.message or ""}
962
+ ))
963
+ raise typer.Exit(1)
964
+ else:
965
+ typer.echo(__("Inference simulator failed to run with an error"))
966
+ raise typer.Exit(1)
967
+
968
+
969
+ @error_handler()
970
+ def project_deploy_inference_simulator_model_to_sagemaker(
971
+ self,
972
+ config: ConfigEntity,
973
+ unique_id: Optional[str] = None,
974
+ arn: Optional[str] = None,
975
+ instance_type: Optional[str] = None,
976
+ initial_variant_weight: Optional[float] = 1.0,
977
+ initial_instance_count: Optional[int] = None,
978
+ ) -> None:
979
+ project_config: ProjectConfig = self.__get_fixed_project_config(config=config)
980
+ if not project_config:
981
+ typer.echo(
982
+ __("No project found at the path: %path%. Please initialize or clone a project first. Or provide path to project using --working-directory option.",
983
+ {"path": config.runtime.working_directory}))
984
+ raise typer.Exit(1)
985
+
986
+ if not instance_type:
987
+ typer.echo(__("Error: Instance type is required."))
988
+ raise typer.Exit(1)
989
+
990
+ if not initial_instance_count:
991
+ typer.echo(__("Error: Initial instance count is required."))
992
+ raise typer.Exit(1)
993
+
994
+ if not arn:
995
+ typer.echo(__("Error: ARN is required."))
996
+ raise typer.Exit(1)
997
+
998
+ project_config: ProjectConfig = self.__config_provider.read_project_config()
999
+ if not project_config:
1000
+ typer.echo(__("No project found at the path: %path%. Please initialize or clone a project first.",
1001
+ {"path": config.runtime.working_directory}))
1002
+ raise typer.Exit(1)
1003
+
1004
+ deploy_model_to_sagemaker_response: DeployInferenceModelToSagemakerResponse = self.__thestage_api_client.deploy_inference_model_to_sagemaker(
1005
+ token=config.main.thestage_auth_token,
1006
+ unique_id=unique_id,
1007
+ arn=arn,
1008
+ )
1009
+
1010
+ if not deploy_model_to_sagemaker_response.is_success:
1011
+ typer.echo(__(
1012
+ 'Failed to prepare model for deployment with an error: %server_massage%',
1013
+ {'server_massage': deploy_model_to_sagemaker_response.message or ""}
1014
+ ))
1015
+ raise typer.Exit(1)
1016
+
1017
+ model_id = deploy_model_to_sagemaker_response.modelId
1018
+ image_uri = deploy_model_to_sagemaker_response.ecrImageUrl
1019
+ model_uri = deploy_model_to_sagemaker_response.s3ArtifactsUrl
1020
+ region = "us-east-1"
1021
+ sm_client = boto3.client('sagemaker', region_name=region)
1022
+
1023
+ try:
1024
+ container = {
1025
+ "Image": image_uri,
1026
+ "ModelDataUrl": model_uri,
1027
+ "Environment": {
1028
+ "SAGEMAKER_TRITON_DEFAULT_MODEL_NAME": model_id,
1029
+ "THESTAGE_API_URL": config.main.thestage_api_url,
1030
+ "THESTAGE_AUTH_TOKEN": config.main.thestage_auth_token
1031
+ },
1032
+ }
1033
+
1034
+ sm_model_name = f"{unique_id}-{datetime.now().strftime('%Y%m%d-%H%M%S')}"
1035
+ create_model_response = sm_client.create_model(
1036
+ ModelName=sm_model_name,
1037
+ ExecutionRoleArn=arn,
1038
+ PrimaryContainer=container,
1039
+ )
1040
+ typer.echo(f"Model created successfully. Model ARN: {create_model_response['ModelArn']}")
1041
+
1042
+ endpoint_config_name = f"{unique_id}-{datetime.now().strftime('%Y%m%d-%H%M%S')}"
1043
+ create_endpoint_config_response = sm_client.create_endpoint_config(
1044
+ EndpointConfigName=endpoint_config_name,
1045
+ ProductionVariants=[
1046
+ {
1047
+ "InstanceType": instance_type,
1048
+ "InitialVariantWeight": initial_variant_weight,
1049
+ "InitialInstanceCount": initial_instance_count,
1050
+ "ModelName": sm_model_name,
1051
+ "VariantName": "AllTraffic",
1052
+ }
1053
+ ],
1054
+ )
1055
+ typer.echo(
1056
+ f"Endpoint configuration created successfully. Endpoint Config ARN: {create_endpoint_config_response['EndpointConfigArn']}")
1057
+
1058
+ endpoint_name = f"{unique_id}-{datetime.now().strftime('%Y%m%d-%H%M%S')}"
1059
+ create_endpoint_response = sm_client.create_endpoint(
1060
+ EndpointName=endpoint_name,
1061
+ EndpointConfigName=endpoint_config_name,
1062
+ )
1063
+ typer.echo(f"Endpoint created successfully. Endpoint ARN: {create_endpoint_response['EndpointArn']}")
1064
+
1065
+ typer.echo("Waiting for the endpoint to become active...")
1066
+ while True:
1067
+ resp = sm_client.describe_endpoint(EndpointName=endpoint_name)
1068
+ status = resp["EndpointStatus"]
1069
+ typer.echo(f"Status: {status}")
1070
+ if status == "InService":
1071
+ break
1072
+ elif status == "Failed":
1073
+ typer.echo(f"Endpoint creation failed. Reason: {resp.get('FailureReason', 'Unknown')}")
1074
+ raise typer.Exit(1)
1075
+ time.sleep(60)
1076
+
1077
+ typer.echo(f"Endpoint is ready. ARN: {resp['EndpointArn']} Status: {status}")
1078
+
1079
+ except Exception as e:
1080
+ typer.echo(__("Failed to deploy the inference simulator model to SageMaker: %error%", {"error": str(e)}))
1081
+ raise typer.Exit(1)
@@ -11,10 +11,9 @@ from click import Abort
11
11
  from paramiko.client import SSHClient
12
12
  from paramiko.pkey import PKey
13
13
  from paramiko.sftp_client import SFTPClient
14
- from thestage_core.entities.config_entity import ConfigEntity
15
- from thestage_core.entities.file_item import FileItemEntity
16
- from thestage_core.services.filesystem_service import FileSystemServiceCore
17
14
 
15
+ from thestage.entities.file_item import FileItemEntity
16
+ from thestage.services.core_files.config_entity import ConfigEntity
18
17
  from thestage.exceptions.remote_server_exception import RemoteServerException
19
18
  from thestage.helpers.logger.app_logger import app_logger
20
19
  from thestage.entities.enums.shell_type import ShellType
@@ -22,6 +21,7 @@ from thestage.helpers.ssh_util import parse_private_key
22
21
  from thestage.i18n.translation import __
23
22
  from thestage.services.clients.thestage_api.dtos.sftp_path_helper import SftpFileItemEntity
24
23
  from thestage.services.config_provider.config_provider import ConfigProvider
24
+ from thestage.services.filesystem_service import FileSystemServiceCore
25
25
 
26
26
  old_value: int = 0
27
27
 
@@ -1,8 +1,7 @@
1
1
  from typing import Optional
2
2
 
3
- from thestage_core.services.filesystem_service import FileSystemServiceCore
4
-
5
3
  from thestage.services.connect.connect_service import ConnectService
4
+ from thestage.services.filesystem_service import FileSystemServiceCore
6
5
  from thestage.services.logging.logging_service import LoggingService
7
6
  from thestage.services.project.project_service import ProjectService
8
7
  from thestage.services.remote_server_service import RemoteServerService
@@ -1,15 +1,12 @@
1
- from typing import Dict, Optional
2
-
3
1
  import typer
4
- from thestage_core.entities.config_entity import ConfigEntity, MainConfigEntity
5
- from thestage_core.services.validation_service import ValidationServiceCore
6
2
 
7
3
  from thestage.i18n.translation import __
8
4
  from thestage.services.config_provider.config_provider import ConfigProvider
9
5
  from thestage.services.clients.thestage_api.api_client import TheStageApiClient
6
+ from thestage.services.core_files.config_entity import ConfigEntity
10
7
 
11
8
 
12
- class ValidationService(ValidationServiceCore):
9
+ class ValidationService:
13
10
  _thestage_api_client: TheStageApiClient = None
14
11
 
15
12
  def __init__(
@@ -17,10 +14,9 @@ class ValidationService(ValidationServiceCore):
17
14
  thestage_api_client: TheStageApiClient,
18
15
  config_provider: ConfigProvider,
19
16
  ):
20
- super(ValidationService, self).__init__(
21
- thestage_api_client=thestage_api_client,
22
- config_provider=config_provider,
23
- )
17
+ self._thestage_api_client = thestage_api_client
18
+ self._config_provider = config_provider
19
+
24
20
 
25
21
  def check_token(
26
22
  self,
@@ -36,7 +32,9 @@ class ValidationService(ValidationServiceCore):
36
32
  )
37
33
 
38
34
  # TODO this fails with 503 error - AttributeError("'bytes' object has no attribute 'text'") from _parse_api_response method in core
39
- is_valid = self.validate_token(token,)
35
+ is_valid: bool = False
36
+ if token:
37
+ is_valid = self._thestage_api_client.validate_token(token=token)
40
38
  if not is_valid:
41
39
  typer.echo(__(
42
40
  'API token is invalid: generate API token using TheStage AI WebApp'
@@ -44,3 +42,21 @@ class ValidationService(ValidationServiceCore):
44
42
  raise typer.Exit(1)
45
43
 
46
44
  config.main.thestage_auth_token = token
45
+
46
+
47
+ @staticmethod
48
+ def is_present_token(config: ConfigEntity) -> bool:
49
+ present_token = True
50
+ if not config:
51
+ present_token = False
52
+ else:
53
+ if not config.main.thestage_auth_token:
54
+ present_token = False
55
+
56
+ if config.start_on_daemon:
57
+ if config.daemon and config.daemon.daemon_token:
58
+ present_token = True
59
+ else:
60
+ present_token = False
61
+
62
+ return present_token
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: thestage
3
- Version: 0.5.37
3
+ Version: 0.5.39
4
4
  Summary:
5
5
  Author: TheStage AI team
6
6
  Author-email: hello@thestage.ai
@@ -11,6 +11,7 @@ Classifier: Programming Language :: Python :: 3.10
11
11
  Classifier: Programming Language :: Python :: 3.11
12
12
  Classifier: Programming Language :: Python :: 3.12
13
13
  Requires-Dist: aioconsole (>=0.8.0,<0.9.0)
14
+ Requires-Dist: boto3 (>=1.35.80,<2.0.0)
14
15
  Requires-Dist: gitpython (>=3.1.40,<4.0.0)
15
16
  Requires-Dist: httpx (>=0.27.2,<0.28.0)
16
17
  Requires-Dist: paramiko (>=3.4.0,<4.0.0)
@@ -19,7 +20,6 @@ Requires-Dist: python-dotenv (>=1.0.0,<2.0.0)
19
20
  Requires-Dist: python-gettext-translations (>=1.1.0,<2.0.0)
20
21
  Requires-Dist: requests (>=2.31.0,<3.0.0)
21
22
  Requires-Dist: tabulate (>=0.9.0,<0.10.0)
22
- Requires-Dist: thestage-core (==0.0.16)
23
23
  Requires-Dist: typer[all] (>=0.9.0,<0.10.0)
24
24
  Description-Content-Type: text/markdown
25
25