truefoundry 0.2.10__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of truefoundry might be problematic. Click here for more details.

Files changed (112) hide show
  1. truefoundry/__init__.py +1 -0
  2. truefoundry/autodeploy/cli.py +31 -18
  3. truefoundry/deploy/__init__.py +112 -1
  4. truefoundry/deploy/auto_gen/models.py +1714 -0
  5. truefoundry/deploy/builder/__init__.py +134 -0
  6. truefoundry/deploy/builder/builders/__init__.py +22 -0
  7. truefoundry/deploy/builder/builders/dockerfile.py +57 -0
  8. truefoundry/deploy/builder/builders/tfy_notebook_buildpack/__init__.py +46 -0
  9. truefoundry/deploy/builder/builders/tfy_notebook_buildpack/dockerfile_template.py +66 -0
  10. truefoundry/deploy/builder/builders/tfy_python_buildpack/__init__.py +44 -0
  11. truefoundry/deploy/builder/builders/tfy_python_buildpack/dockerfile_template.py +158 -0
  12. truefoundry/deploy/builder/docker_service.py +168 -0
  13. truefoundry/deploy/cli/cli.py +21 -26
  14. truefoundry/deploy/cli/commands/__init__.py +18 -0
  15. truefoundry/deploy/cli/commands/apply_command.py +52 -0
  16. truefoundry/deploy/cli/commands/build_command.py +45 -0
  17. truefoundry/deploy/cli/commands/build_logs_command.py +89 -0
  18. truefoundry/deploy/cli/commands/create_command.py +75 -0
  19. truefoundry/deploy/cli/commands/delete_command.py +77 -0
  20. truefoundry/deploy/cli/commands/deploy_command.py +102 -0
  21. truefoundry/deploy/cli/commands/get_command.py +216 -0
  22. truefoundry/deploy/cli/commands/list_command.py +171 -0
  23. truefoundry/deploy/cli/commands/login_command.py +33 -0
  24. truefoundry/deploy/cli/commands/logout_command.py +20 -0
  25. truefoundry/deploy/cli/commands/logs_command.py +134 -0
  26. truefoundry/deploy/cli/commands/patch_application_command.py +81 -0
  27. truefoundry/deploy/cli/commands/patch_command.py +70 -0
  28. truefoundry/deploy/cli/commands/redeploy_command.py +41 -0
  29. truefoundry/deploy/cli/commands/terminate_comand.py +44 -0
  30. truefoundry/deploy/cli/commands/trigger_command.py +145 -0
  31. truefoundry/deploy/cli/config.py +10 -0
  32. truefoundry/deploy/cli/console.py +5 -0
  33. truefoundry/deploy/cli/const.py +12 -0
  34. truefoundry/deploy/cli/display_util.py +118 -0
  35. truefoundry/deploy/cli/util.py +129 -0
  36. truefoundry/deploy/core/__init__.py +7 -0
  37. truefoundry/deploy/core/login.py +9 -0
  38. truefoundry/deploy/core/logout.py +5 -0
  39. truefoundry/deploy/function_service/__init__.py +3 -0
  40. truefoundry/deploy/function_service/__main__.py +27 -0
  41. truefoundry/deploy/function_service/app.py +92 -0
  42. truefoundry/deploy/function_service/build.py +45 -0
  43. truefoundry/deploy/function_service/remote/__init__.py +6 -0
  44. truefoundry/deploy/function_service/remote/context.py +3 -0
  45. truefoundry/deploy/function_service/remote/method.py +67 -0
  46. truefoundry/deploy/function_service/remote/remote.py +144 -0
  47. truefoundry/deploy/function_service/route.py +137 -0
  48. truefoundry/deploy/function_service/service.py +113 -0
  49. truefoundry/deploy/function_service/utils.py +53 -0
  50. truefoundry/deploy/io/__init__.py +0 -0
  51. truefoundry/deploy/io/output_callback.py +23 -0
  52. truefoundry/deploy/io/rich_output_callback.py +27 -0
  53. truefoundry/deploy/json_util.py +7 -0
  54. truefoundry/deploy/lib/__init__.py +0 -0
  55. truefoundry/deploy/lib/auth/auth_service_client.py +181 -0
  56. truefoundry/deploy/lib/auth/credential_file_manager.py +115 -0
  57. truefoundry/deploy/lib/auth/credential_provider.py +131 -0
  58. truefoundry/deploy/lib/auth/servicefoundry_session.py +59 -0
  59. truefoundry/deploy/lib/clients/__init__.py +0 -0
  60. truefoundry/deploy/lib/clients/servicefoundry_client.py +746 -0
  61. truefoundry/deploy/lib/clients/shell_client.py +13 -0
  62. truefoundry/deploy/lib/clients/utils.py +41 -0
  63. truefoundry/deploy/lib/const.py +43 -0
  64. truefoundry/deploy/lib/dao/__init__.py +0 -0
  65. truefoundry/deploy/lib/dao/application.py +263 -0
  66. truefoundry/deploy/lib/dao/apply.py +80 -0
  67. truefoundry/deploy/lib/dao/version.py +33 -0
  68. truefoundry/deploy/lib/dao/workspace.py +71 -0
  69. truefoundry/deploy/lib/exceptions.py +26 -0
  70. truefoundry/deploy/lib/logs_utils.py +43 -0
  71. truefoundry/deploy/lib/messages.py +12 -0
  72. truefoundry/deploy/lib/model/__init__.py +0 -0
  73. truefoundry/deploy/lib/model/entity.py +400 -0
  74. truefoundry/deploy/lib/session.py +158 -0
  75. truefoundry/deploy/lib/util.py +90 -0
  76. truefoundry/deploy/lib/win32.py +129 -0
  77. truefoundry/deploy/v2/__init__.py +0 -0
  78. truefoundry/deploy/v2/lib/__init__.py +3 -0
  79. truefoundry/deploy/v2/lib/deploy.py +283 -0
  80. truefoundry/deploy/v2/lib/deploy_workflow.py +295 -0
  81. truefoundry/deploy/v2/lib/deployable_patched_models.py +86 -0
  82. truefoundry/deploy/v2/lib/models.py +53 -0
  83. truefoundry/deploy/v2/lib/patched_models.py +479 -0
  84. truefoundry/deploy/v2/lib/source.py +267 -0
  85. truefoundry/langchain/__init__.py +12 -1
  86. truefoundry/langchain/deprecated.py +302 -0
  87. truefoundry/langchain/truefoundry_chat.py +130 -0
  88. truefoundry/langchain/truefoundry_embeddings.py +171 -0
  89. truefoundry/langchain/truefoundry_llm.py +106 -0
  90. truefoundry/langchain/utils.py +85 -0
  91. truefoundry/logger.py +17 -0
  92. truefoundry/pydantic_v1.py +5 -0
  93. truefoundry/python_deploy_codegen.py +132 -0
  94. truefoundry/version.py +6 -0
  95. truefoundry/workflow/__init__.py +19 -0
  96. truefoundry/workflow/container_task.py +12 -0
  97. truefoundry/workflow/example/deploy.sh +1 -0
  98. truefoundry/workflow/example/hello_world_package/workflow.py +20 -0
  99. truefoundry/workflow/example/package/test_workflow.py +152 -0
  100. truefoundry/workflow/example/truefoundry.yaml +9 -0
  101. truefoundry/workflow/example/workflow.yaml +116 -0
  102. truefoundry/workflow/map_task.py +45 -0
  103. truefoundry/workflow/python_task.py +32 -0
  104. truefoundry/workflow/task.py +50 -0
  105. truefoundry/workflow/workflow.py +114 -0
  106. {truefoundry-0.2.10.dist-info → truefoundry-0.3.0.dist-info}/METADATA +27 -7
  107. truefoundry-0.3.0.dist-info/RECORD +136 -0
  108. truefoundry/deploy/cli/deploy.py +0 -165
  109. truefoundry/deploy/cli/version.py +0 -6
  110. truefoundry-0.2.10.dist-info/RECORD +0 -38
  111. {truefoundry-0.2.10.dist-info → truefoundry-0.3.0.dist-info}/WHEEL +0 -0
  112. {truefoundry-0.2.10.dist-info → truefoundry-0.3.0.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,130 @@
1
+ from typing import Any, Dict, List, Optional
2
+
3
+ from langchain.chat_models.base import SimpleChatModel
4
+ from langchain.pydantic_v1 import Extra, Field, root_validator
5
+ from langchain.schema.messages import (
6
+ AIMessage,
7
+ BaseMessage,
8
+ ChatMessage,
9
+ HumanMessage,
10
+ SystemMessage,
11
+ )
12
+
13
+ from truefoundry.langchain.utils import (
14
+ requests_retry_session,
15
+ validate_tfy_environment,
16
+ )
17
+ from truefoundry.logger import logger
18
+
19
+
20
+ class TrueFoundryChat(SimpleChatModel):
21
+ """`TrueFoundry LLM Gateway` chat models API.
22
+
23
+ To use, you must have the environment variable ``TFY_API_KEY`` set with your API key and ``TFY_HOST`` set with your host or pass it as a named parameter to the constructor.
24
+ """
25
+
26
+ model: str = Field(description="The model to use for chat.")
27
+ """The model to use for chat."""
28
+ tfy_llm_gateway_url: Optional[str] = Field(default=None)
29
+ """TrueFoundry LLM Gateway endpoint URL. Automatically inferred from env var `TFY_LLM_GATEWAY_URL` if not provided."""
30
+ tfy_api_key: Optional[str] = Field(default=None)
31
+ """TrueFoundry API Key. Automatically inferred from env var `TFY_API_KEY` if not provided."""
32
+ model_parameters: Optional[dict] = Field(default_factory=dict)
33
+ """Model parameters"""
34
+ request_timeout: int = Field(default=30)
35
+ """The timeout for the request in seconds."""
36
+ max_retries: int = Field(default=5)
37
+ """The number of retries for HTTP requests."""
38
+ retry_backoff_factor: float = Field(default=0.3)
39
+ """The backoff factor for exponential backoff during retries."""
40
+ system_prompt: str = Field(default="You are a AI assistant")
41
+
42
+ class Config:
43
+ """Configuration for this pydantic object."""
44
+
45
+ extra = Extra.forbid
46
+ allow_population_by_field_name = True
47
+
48
+ @root_validator()
49
+ def validate_environment(cls, values: Dict) -> Dict:
50
+ values = validate_tfy_environment(values)
51
+ if not values["tfy_api_key"]:
52
+ raise ValueError(
53
+ "Did not find `tfy_api_key`, please add an environment variable"
54
+ " `TFY_API_KEY` which contains it, or pass"
55
+ " `tfy_api_key` as a named parameter."
56
+ )
57
+ if not values["tfy_llm_gateway_url"]:
58
+ raise ValueError(
59
+ "Did not find `tfy_llm_gateway_url`, please add an environment variable"
60
+ " `TFY_LLM_GATEWAY_URL` which contains it, or pass"
61
+ " `tfy_llm_gateway_url` as a named parameter."
62
+ )
63
+ return values
64
+
65
+ @property
66
+ def _llm_type(self) -> str:
67
+ """Return type of chat model."""
68
+ return "truefoundry-chat"
69
+
70
+ def _call(
71
+ self,
72
+ messages: List[BaseMessage],
73
+ stop: Optional[List[str]] = None,
74
+ **kwargs: Any,
75
+ ) -> str:
76
+ if len(messages) == 0:
77
+ raise ValueError("No messages provided to chat.")
78
+
79
+ if not isinstance(messages[0], SystemMessage):
80
+ messages.insert(0, SystemMessage(content=self.system_prompt))
81
+
82
+ message_dicts = [
83
+ TrueFoundryChat._convert_message_to_dict(message) for message in messages
84
+ ]
85
+
86
+ payload = {**self.model_parameters} if self.model_parameters else {}
87
+
88
+ if stop:
89
+ payload["stop_sequences"] = stop
90
+
91
+ payload["messages"] = message_dicts
92
+ payload["model"] = self.model
93
+
94
+ session = requests_retry_session(
95
+ retries=self.max_retries, backoff_factor=self.retry_backoff_factor
96
+ )
97
+
98
+ url = f"{self.tfy_llm_gateway_url}/openai/chat/completions"
99
+ logger.debug(f"Chat using - model: {self.model} at endpoint: {url}")
100
+ response = session.post(
101
+ url=url,
102
+ json=payload,
103
+ headers={
104
+ "Authorization": f"Bearer {self.tfy_api_key}",
105
+ },
106
+ timeout=self.request_timeout,
107
+ )
108
+ response.raise_for_status()
109
+ output = response.json()
110
+ return output["choices"][0]["message"]["content"]
111
+
112
+ @staticmethod
113
+ def _convert_message_to_dict(message: BaseMessage) -> dict:
114
+ if isinstance(message, ChatMessage):
115
+ message_dict = {"role": message.role, "content": message.content}
116
+ elif isinstance(message, HumanMessage):
117
+ message_dict = {"role": "user", "content": message.content}
118
+ elif isinstance(message, AIMessage):
119
+ message_dict = {"role": "assistant", "content": message.content}
120
+ elif isinstance(message, SystemMessage):
121
+ message_dict = {"role": "system", "content": message.content}
122
+ else:
123
+ raise ValueError(f"Got unknown message type: {message}")
124
+ if message.additional_kwargs:
125
+ logger.debug(
126
+ "Additional message arguments are unsupported by TrueFoundry LLM Gateway "
127
+ " and will be ignored: %s",
128
+ message.additional_kwargs,
129
+ )
130
+ return message_dict
@@ -0,0 +1,171 @@
1
+ import concurrent.futures
2
+ import math
3
+ from typing import Dict, List, Optional
4
+
5
+ import tqdm
6
+ from langchain.embeddings.base import Embeddings
7
+ from langchain.pydantic_v1 import BaseModel, Extra, Field, root_validator
8
+
9
+ from truefoundry.langchain.utils import (
10
+ requests_retry_session,
11
+ validate_tfy_environment,
12
+ )
13
+ from truefoundry.logger import logger
14
+
15
+ EMBEDDER_BATCH_SIZE = 32
16
+ PARALLEL_WORKERS = 4
17
+
18
+
19
+ class TrueFoundryEmbeddings(BaseModel, Embeddings):
20
+ """`TrueFoundry LLM Gateway` embedding models API.
21
+
22
+ To use, you must have the environment variable ``TFY_API_KEY`` set with your API key and ``TFY_HOST`` set with your host or pass it
23
+ as a named parameter to the constructor.
24
+ """
25
+
26
+ model: str = Field(description="The model to use for embedding.")
27
+ """The model to use for embedding."""
28
+ tfy_llm_gateway_url: Optional[str] = Field(default=None)
29
+ """TrueFoundry LLM Gateway endpoint URL. Automatically inferred from env var `TFY_LLM_GATEWAY_URL` if not provided."""
30
+ tfy_api_key: Optional[str] = Field(default=None)
31
+ """TrueFoundry API Key. Automatically inferred from env var `TFY_API_KEY` if not provided."""
32
+ model_parameters: Optional[dict] = Field(default_factory=dict)
33
+ """Model parameters"""
34
+ request_timeout: int = Field(default=30)
35
+ """The timeout for the request in seconds."""
36
+ max_retries: int = Field(default=5)
37
+ """The number of retries for HTTP requests."""
38
+ retry_backoff_factor: float = Field(default=0.3)
39
+ """The backoff factor for exponential backoff during retries."""
40
+ batch_size: int = Field(default=EMBEDDER_BATCH_SIZE)
41
+ """The batch size to use for embedding."""
42
+ parallel_workers: int = Field(default=PARALLEL_WORKERS)
43
+ """The number of parallel workers to use for embedding."""
44
+
45
+ __private_attributes__ = {"_executor"}
46
+
47
+ class Config:
48
+ """Configuration for this pydantic object."""
49
+
50
+ extra = Extra.forbid
51
+ allow_population_by_field_name = True
52
+
53
+ @root_validator()
54
+ def validate_environment(cls, values: Dict) -> Dict:
55
+ values = validate_tfy_environment(values)
56
+ if not values["tfy_api_key"]:
57
+ raise ValueError(
58
+ "Did not find `tfy_api_key`, please add an environment variable"
59
+ " `TFY_API_KEY` which contains it, or pass"
60
+ " `tfy_api_key` as a named parameter."
61
+ )
62
+ if not values["tfy_llm_gateway_url"]:
63
+ raise ValueError(
64
+ "Did not find `tfy_llm_gateway_url`, please add an environment variable"
65
+ " `TFY_LLM_GATEWAY_URL` which contains it, or pass"
66
+ " `tfy_llm_gateway_url` as a named parameter."
67
+ )
68
+ return values
69
+
70
+ def _init_private_attributes(self):
71
+ self._executor = concurrent.futures.ThreadPoolExecutor(
72
+ max_workers=self.parallel_workers
73
+ )
74
+
75
+ @property
76
+ def _llm_type(self) -> str:
77
+ """Return type of embedding model."""
78
+ return "truefoundry-embeddings"
79
+
80
+ def __del__(self):
81
+ """
82
+ Destructor method to clean up the executor when the object is deleted.
83
+
84
+ Returns:
85
+ None
86
+ """
87
+ self._executor.shutdown()
88
+
89
+ def _remote_embed(self, texts, query_mode=False):
90
+ """
91
+ Perform remote embedding using a HTTP POST request to a designated endpoint.
92
+
93
+ Args:
94
+ texts (List[str]): A list of text strings to be embedded.
95
+ query_mode (bool): A flag to indicate if running in query mode or in embed mode (indexing).
96
+ Returns:
97
+ List[List[float]]: A list of embedded representations of the input texts.
98
+ """
99
+ session = requests_retry_session(
100
+ retries=self.max_retries, backoff_factor=self.retry_backoff_factor
101
+ )
102
+
103
+ payload = {
104
+ "input": texts,
105
+ "model": self.model,
106
+ }
107
+
108
+ url = f"{self.tfy_llm_gateway_url}/openai/embeddings"
109
+ logger.debug(
110
+ f"Embedding using - model: {self.model} at endpoint: {url}, for {len(texts)} texts"
111
+ )
112
+ response = session.post(
113
+ url=url,
114
+ json=payload,
115
+ headers={
116
+ "Authorization": f"Bearer {self.tfy_api_key}",
117
+ },
118
+ timeout=self.request_timeout,
119
+ )
120
+ response.raise_for_status()
121
+ output = response.json()
122
+ return [data["embedding"] for data in output["data"]]
123
+
124
+ def _embed(self, texts: List[str], query_mode: bool):
125
+ """
126
+ Perform embedding on a list of texts using remote embedding in chunks.
127
+
128
+ Args:
129
+ texts (List[str]): A list of text strings to be embedded.
130
+ query_mode (bool): A flag to indicate if running in query mode or in embed mode (indexing).
131
+ Returns:
132
+ List[List[float]]: A list of embedded representations of the input texts.
133
+ """
134
+ embeddings = []
135
+
136
+ def _feeder():
137
+ for i in range(0, len(texts), self.batch_size):
138
+ chunk = texts[i : i + self.batch_size]
139
+ yield chunk
140
+
141
+ embeddings = list(
142
+ tqdm.tqdm(
143
+ self._executor.map(self._remote_embed, _feeder()),
144
+ total=int(math.ceil(len(texts) / self.batch_size)),
145
+ )
146
+ )
147
+ return [item for batch in embeddings for item in batch]
148
+
149
+ def embed_documents(self, texts: List[str]) -> List[List[float]]:
150
+ """
151
+ Embed a list of text documents.
152
+
153
+ Args:
154
+ texts (List[str]): A list of text documents to be embedded.
155
+
156
+ Returns:
157
+ List[List[float]]: A list of embedded representations of the input documents.
158
+ """
159
+ return self._embed(texts, query_mode=False)
160
+
161
+ def embed_query(self, text: str) -> List[float]:
162
+ """
163
+ Embed a query text.
164
+
165
+ Args:
166
+ text (str): The query text to be embedded.
167
+
168
+ Returns:
169
+ List[float]: The embedded representation of the input query text.
170
+ """
171
+ return self._embed([text], query_mode=True)[0]
@@ -0,0 +1,106 @@
1
+ from typing import Any, Dict, List, Optional
2
+
3
+ from langchain.llms.base import LLM
4
+ from langchain.pydantic_v1 import Extra, Field, root_validator
5
+
6
+ from truefoundry.langchain.utils import (
7
+ requests_retry_session,
8
+ validate_tfy_environment,
9
+ )
10
+ from truefoundry.logger import logger
11
+
12
+
13
+ class TrueFoundryLLM(LLM):
14
+ """`TrueFoundry LLM Gateway` completion models API.
15
+
16
+ To use, you must have the environment variable ``TFY_API_KEY`` set with your API key and ``TFY_HOST`` set with your host or pass it as a named parameter to the constructor.
17
+ """
18
+
19
+ model: str = Field(description="The model to use for completion.")
20
+ """The model to use for completion."""
21
+ tfy_llm_gateway_url: Optional[str] = Field(default=None)
22
+ """TrueFoundry LLM Gateway endpoint URL. Automatically inferred from env var `TFY_LLM_GATEWAY_URL` if not provided."""
23
+ tfy_api_key: Optional[str] = Field(default=None)
24
+ """TrueFoundry API Key. Automatically inferred from env var `TFY_API_KEY` if not provided."""
25
+ model_parameters: Optional[dict] = Field(default_factory=dict)
26
+ """Model parameters"""
27
+ request_timeout: int = Field(default=30)
28
+ """The timeout for the request in seconds."""
29
+ max_retries: int = Field(default=5)
30
+ """The number of retries for HTTP requests."""
31
+ retry_backoff_factor: float = Field(default=0.3)
32
+ """The backoff factor for exponential backoff during retries."""
33
+
34
+ class Config:
35
+ """Configuration for this pydantic object."""
36
+
37
+ extra = Extra.forbid
38
+ allow_population_by_field_name = True
39
+
40
+ @root_validator()
41
+ def validate_environment(cls, values: Dict) -> Dict:
42
+ values = validate_tfy_environment(values)
43
+ if not values["tfy_api_key"]:
44
+ raise ValueError(
45
+ "Did not find `tfy_api_key`, please add an environment variable"
46
+ " `TFY_API_KEY` which contains it, or pass"
47
+ " `tfy_api_key` as a named parameter."
48
+ )
49
+ if not values["tfy_llm_gateway_url"]:
50
+ raise ValueError(
51
+ "Did not find `tfy_llm_gateway_url`, please add an environment variable"
52
+ " `TFY_LLM_GATEWAY_URL` which contains it, or pass"
53
+ " `tfy_llm_gateway_url` as a named parameter."
54
+ )
55
+ return values
56
+
57
+ @property
58
+ def _llm_type(self) -> str:
59
+ """Return type of llm model."""
60
+ return "truefoundry-llm"
61
+
62
+ def _call(
63
+ self,
64
+ prompt: str,
65
+ stop: Optional[List[str]] = None,
66
+ **kwargs: Any,
67
+ ) -> str:
68
+ """Call out to the deployed model
69
+
70
+ Args:
71
+ prompt: The prompt to pass into the model.
72
+ stop: Optional list of stop words to use when generating.
73
+
74
+ Returns:
75
+ The string generated by the model.
76
+
77
+ Example:
78
+ .. code-block:: python
79
+
80
+ response = model("I have a joke for you...")
81
+ """
82
+
83
+ payload = {**self.model_parameters} if self.model_parameters else {}
84
+ if stop:
85
+ payload["stop_sequences"] = stop
86
+
87
+ payload["prompt"] = prompt
88
+ payload["model"] = self.model
89
+
90
+ session = requests_retry_session(
91
+ retries=self.max_retries, backoff_factor=self.retry_backoff_factor
92
+ )
93
+
94
+ url = f"{self.tfy_llm_gateway_url}/openai/completions"
95
+ logger.debug(f"Completion using - model: {self.model} at endpoint: {url}")
96
+ response = session.post(
97
+ url=url,
98
+ json=payload,
99
+ headers={
100
+ "Authorization": f"Bearer {self.tfy_api_key}",
101
+ },
102
+ timeout=self.request_timeout,
103
+ )
104
+ response.raise_for_status()
105
+ output = response.json()
106
+ return output["choices"][0]["text"]
@@ -0,0 +1,85 @@
1
+ import os
2
+ from typing import Dict, List, Optional
3
+ from urllib.parse import urljoin
4
+
5
+ import requests
6
+ from langchain.pydantic_v1 import BaseModel
7
+ from requests.adapters import HTTPAdapter
8
+ from urllib3.util.retry import Retry
9
+
10
+ from truefoundry.deploy.lib.auth.servicefoundry_session import ServiceFoundrySession
11
+
12
+
13
+ class ModelParameters(BaseModel):
14
+ temperature: Optional[float]
15
+ maximum_length: Optional[int]
16
+ top_p: Optional[float]
17
+ top_k: Optional[int]
18
+ repetition_penalty: Optional[float]
19
+ frequency_penalty: Optional[float]
20
+ presence_penalty: Optional[float]
21
+ stop_sequences: Optional[List[str]]
22
+
23
+
24
+ def validate_tfy_environment(values: Dict):
25
+ gateway_url = values["tfy_llm_gateway_url"] or os.getenv("TFY_LLM_GATEWAY_URL")
26
+ api_key = values["tfy_api_key"] or os.getenv("TFY_API_KEY")
27
+
28
+ if gateway_url and api_key:
29
+ values["tfy_llm_gateway_url"] = gateway_url
30
+ values["tfy_api_key"] = api_key
31
+ return values
32
+
33
+ sfy_session = ServiceFoundrySession()
34
+ if not sfy_session:
35
+ raise Exception(
36
+ "Unauthenticated: Please login using truefoundry login --host <https://example-domain.com>"
37
+ )
38
+
39
+ if not gateway_url:
40
+ gateway_url = urljoin(sfy_session.base_url, "/api/llm")
41
+
42
+ if not api_key:
43
+ api_key = sfy_session.access_token
44
+
45
+ values["tfy_llm_gateway_url"] = gateway_url
46
+ values["tfy_api_key"] = api_key
47
+ return values
48
+
49
+
50
+ def requests_retry_session(
51
+ retries=5,
52
+ backoff_factor=0.3,
53
+ status_forcelist=(500, 502, 503, 504),
54
+ method_whitelist=frozenset({"GET", "POST"}),
55
+ session=None,
56
+ ):
57
+ """
58
+ Returns a `requests` session with retry capabilities for certain HTTP status codes.
59
+
60
+ Args:
61
+ retries (int): The number of retries for HTTP requests.
62
+ backoff_factor (float): The backoff factor for exponential backoff during retries.
63
+ status_forcelist (tuple): A tuple of HTTP status codes that should trigger a retry.
64
+ method_whitelist (frozenset): The set of HTTP methods that should be retried.
65
+ session (requests.Session, optional): An optional existing requests session to use.
66
+
67
+ Returns:
68
+ requests.Session: A session with retry capabilities.
69
+ """
70
+ # Implementation taken from https://www.peterbe.com/plog/best-practice-with-retries-with-requests
71
+ session = session or requests.Session()
72
+ retry = Retry(
73
+ total=retries,
74
+ read=retries,
75
+ connect=retries,
76
+ status=retries,
77
+ backoff_factor=backoff_factor,
78
+ allowed_methods=method_whitelist,
79
+ status_forcelist=status_forcelist,
80
+ respect_retry_after_header=True,
81
+ )
82
+ adapter = HTTPAdapter(max_retries=retry)
83
+ session.mount("http://", adapter)
84
+ session.mount("https://", adapter)
85
+ return session
truefoundry/logger.py ADDED
@@ -0,0 +1,17 @@
1
+ # https://docs.python.org/3/howto/logging.html#library-config
2
+ import logging
3
+
4
+ from rich.logging import RichHandler
5
+
6
+ logger = logging.getLogger("truefoundry")
7
+
8
+
9
+ def add_cli_handler(level: int = logging.INFO, show_path=False):
10
+ # TODO (chiragjn): Probably don't use rich handler, it adds hard breaks
11
+ # See: https://github.com/Textualize/rich/discussions/344
12
+ # Maybe try: https://github.com/pycontribs/enrich/blob/main/src/enrich/logging.py
13
+ # or simpler override of logging.Handler with console.print(rich.Text("..."))
14
+ handler = RichHandler(level=level, show_path=show_path)
15
+ handler.setLevel(level)
16
+ logger.addHandler(handler)
17
+ logger.setLevel(logging.DEBUG)
@@ -0,0 +1,5 @@
1
+ try:
2
+ from pydantic.v1 import * # noqa: F403
3
+ from pydantic.v1 import utils # noqa: F401
4
+ except ImportError:
5
+ from pydantic import * # noqa: F403
@@ -0,0 +1,132 @@
1
+ import ast
2
+ import io
3
+ import re
4
+ from typing import List
5
+
6
+ from rich.console import Console
7
+ from rich.pretty import pprint
8
+
9
+ from truefoundry.deploy import Application
10
+
11
+
12
+ def generate_code(
13
+ symbols_to_import: List[str],
14
+ application_type: str,
15
+ spec_repr: str,
16
+ workspace_fqn: str,
17
+ ):
18
+ symbols = ",".join(symbols_to_import)
19
+ code = f"""\
20
+ import logging
21
+ from truefoundry.deploy import (
22
+ {symbols},
23
+ )
24
+ logging.basicConfig(level=logging.INFO)
25
+
26
+ {application_type} = {spec_repr}
27
+
28
+ {application_type}.deploy(workspace_fqn="{workspace_fqn}")\
29
+ """
30
+ return code
31
+
32
+
33
+ def extract_class_names(code):
34
+ tree = ast.parse(code)
35
+
36
+ # Function to extract keywords from the AST
37
+ def extract_class_names_from_ast_tree(node):
38
+ keywords = set()
39
+ for child_node in ast.iter_child_nodes(node):
40
+ if isinstance(child_node, ast.Call):
41
+ keywords.add(child_node.func.id)
42
+ keywords.update(extract_class_names_from_ast_tree(child_node))
43
+ return keywords
44
+
45
+ # Get keywords from the main body of the code
46
+ main_keywords = extract_class_names_from_ast_tree(tree)
47
+ return list(main_keywords)
48
+
49
+
50
+ def replace_enums_with_values(raw_str):
51
+ # required to replace enums of format <AppProtocol.HTTP: 'http'> with 'http'
52
+ pattern = r'<([a-zA-Z0-9_]+).[a-zA-Z0-9_]+: [\'"](.+)[\'"]>'
53
+ replacement = r"'\2'"
54
+
55
+ result = re.sub(pattern, replacement, raw_str)
56
+ return result
57
+
58
+
59
+ def remove_none_type_fields(code):
60
+ lines = code.split("\n")
61
+ new_lines = [
62
+ line
63
+ for line in lines
64
+ if not (line.endswith("=None") or line.endswith("=None,"))
65
+ ]
66
+ formatted_code = "\n".join(new_lines)
67
+ return formatted_code
68
+
69
+
70
+ def remove_type_field(code):
71
+ lines = code.split("\n")
72
+ new_lines = [re.sub(r'^[ \t]*type=[\'"][^"]*[\'"],', "", line) for line in lines]
73
+ return "\n".join(new_lines)
74
+
75
+
76
+ def add_deploy_line(code, workspace_fqn, application_type):
77
+ deploy_line = f"{application_type}.deploy('workspace_fqn={workspace_fqn}')"
78
+ return code + "\n" + deploy_line
79
+
80
+
81
+ def get_python_repr(obj):
82
+ stream = io.StringIO()
83
+ console = Console(file=stream, no_color=True, highlighter=None)
84
+ pprint(obj, expand_all=True, console=console, indent_guides=False)
85
+ return stream.getvalue()
86
+
87
+
88
+ COMMENT_FOR_LOCAL_SOURCE = """# Set build_source=LocalSource(local_build=False), in order to deploy code from your local.
89
+ # With local_build=False flag, docker image will be built on cloud instead of local
90
+ # Else it will try to use docker installed on your local machine to build the image"""
91
+
92
+
93
+ def add_local_source_comment(code):
94
+ lines = code.split("\n")
95
+ new_lines = []
96
+ for line in lines:
97
+ if line.lstrip(" ").startswith("build_source=GitSource"):
98
+ new_lines.append(COMMENT_FOR_LOCAL_SOURCE)
99
+ new_lines.append(line)
100
+ return "\n".join(new_lines)
101
+
102
+
103
+ def convert_deployment_config_to_python(workspace_fqn: str, deployment_config: dict):
104
+ """
105
+ Convert a deployment config to a python file that can be used to deploy to a workspace
106
+ """
107
+ application = Application.parse_obj(deployment_config)
108
+ application_type = application.__root__.type
109
+
110
+ spec_repr = get_python_repr(application.__root__)
111
+ spec_repr = replace_enums_with_values(spec_repr)
112
+ spec_repr = remove_none_type_fields(spec_repr)
113
+ spec_repr = remove_type_field(spec_repr)
114
+
115
+ # extract class names to import
116
+ symbols_to_import = extract_class_names(spec_repr)
117
+
118
+ # check if GitSource exists in array of symbols to import
119
+ if "GitSource" in symbols_to_import:
120
+ symbols_to_import.append("LocalSource")
121
+
122
+ generated_code = generate_code(
123
+ symbols_to_import=symbols_to_import,
124
+ application_type=application_type,
125
+ spec_repr=spec_repr,
126
+ workspace_fqn=workspace_fqn,
127
+ )
128
+
129
+ if "GitSource" in symbols_to_import:
130
+ generated_code = add_local_source_comment(generated_code)
131
+
132
+ return generated_code
truefoundry/version.py ADDED
@@ -0,0 +1,6 @@
1
+ try:
2
+ import importlib.metadata
3
+
4
+ __version__ = importlib.metadata.version("truefoundry")
5
+ except Exception:
6
+ __version__ = "NA"
@@ -0,0 +1,19 @@
1
+ try:
2
+ from flytekit import task
3
+ except ImportError:
4
+ print("To use workflows, please run 'pip install truefoundry[workflow]'.")
5
+
6
+ from flytekit import conditional
7
+ from flytekit.types.directory import FlyteDirectory
8
+
9
+ from truefoundry.deploy.v2.lib.patched_models import (
10
+ ContainerTaskConfig,
11
+ PythonTaskConfig,
12
+ TaskDockerFileBuild,
13
+ TaskPythonBuild,
14
+ )
15
+ from truefoundry.workflow.container_task import ContainerTask
16
+ from truefoundry.workflow.map_task import map_task
17
+ from truefoundry.workflow.python_task import PythonFunctionTask
18
+ from truefoundry.workflow.task import task
19
+ from truefoundry.workflow.workflow import ExecutionConfig, workflow
@@ -0,0 +1,12 @@
1
+ from flytekit import ContainerTask
2
+
3
+ from truefoundry.workflow import ContainerTaskConfig
4
+
5
+
6
+ class ContainerTask(ContainerTask):
7
+ def __init__(self, name: str, task_config: ContainerTaskConfig):
8
+ super().__init__(name=name, image="", command=[])
9
+ self.tfy_task_config = task_config
10
+
11
+ def get_custom(self, settings) -> dict:
12
+ return {"truefoundry": self.tfy_task_config.dict()}