truefoundry 0.1.1__tar.gz → 0.2.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of truefoundry might be problematic. Click here for more details.

Files changed (39) hide show
  1. {truefoundry-0.1.1 → truefoundry-0.2.0}/PKG-INFO +13 -5
  2. truefoundry-0.2.0/pyproject.toml +50 -0
  3. truefoundry-0.2.0/truefoundry/autodeploy/agents/__init__.py +0 -0
  4. truefoundry-0.2.0/truefoundry/autodeploy/agents/base.py +181 -0
  5. truefoundry-0.2.0/truefoundry/autodeploy/agents/developer.py +113 -0
  6. truefoundry-0.2.0/truefoundry/autodeploy/agents/project_identifier.py +124 -0
  7. truefoundry-0.2.0/truefoundry/autodeploy/agents/tester.py +75 -0
  8. truefoundry-0.2.0/truefoundry/autodeploy/cli.py +348 -0
  9. truefoundry-0.2.0/truefoundry/autodeploy/constants.py +22 -0
  10. truefoundry-0.2.0/truefoundry/autodeploy/exception.py +2 -0
  11. truefoundry-0.2.0/truefoundry/autodeploy/logger.py +13 -0
  12. truefoundry-0.2.0/truefoundry/autodeploy/tools/__init__.py +26 -0
  13. truefoundry-0.2.0/truefoundry/autodeploy/tools/ask.py +33 -0
  14. truefoundry-0.2.0/truefoundry/autodeploy/tools/base.py +31 -0
  15. truefoundry-0.2.0/truefoundry/autodeploy/tools/commit.py +139 -0
  16. truefoundry-0.2.0/truefoundry/autodeploy/tools/docker_build.py +109 -0
  17. truefoundry-0.2.0/truefoundry/autodeploy/tools/docker_run.py +150 -0
  18. truefoundry-0.2.0/truefoundry/autodeploy/tools/file_type_counts.py +79 -0
  19. truefoundry-0.2.0/truefoundry/autodeploy/tools/list_files.py +82 -0
  20. truefoundry-0.2.0/truefoundry/autodeploy/tools/read_file.py +66 -0
  21. truefoundry-0.2.0/truefoundry/autodeploy/tools/send_request.py +54 -0
  22. truefoundry-0.2.0/truefoundry/autodeploy/tools/write_file.py +101 -0
  23. truefoundry-0.2.0/truefoundry/autodeploy/utils/diff.py +157 -0
  24. truefoundry-0.2.0/truefoundry/autodeploy/utils/pydantic_compat.py +19 -0
  25. truefoundry-0.2.0/truefoundry/cli/__init__.py +0 -0
  26. {truefoundry-0.1.1 → truefoundry-0.2.0}/truefoundry/cli/__main__.py +11 -5
  27. truefoundry-0.2.0/truefoundry/deploy/__init__.py +1 -0
  28. truefoundry-0.2.0/truefoundry/deploy/cli/__init__.py +0 -0
  29. truefoundry-0.2.0/truefoundry/deploy/cli/cli.py +99 -0
  30. truefoundry-0.2.0/truefoundry/deploy/cli/deploy.py +184 -0
  31. truefoundry-0.2.0/truefoundry/langchain/__init__.py +1 -0
  32. truefoundry-0.2.0/truefoundry/ml/__init__.py +6 -0
  33. truefoundry-0.1.1/pyproject.toml +0 -25
  34. truefoundry-0.1.1/truefoundry/deploy/__init__.py +0 -1
  35. truefoundry-0.1.1/truefoundry/langchain/__init__.py +0 -1
  36. truefoundry-0.1.1/truefoundry/ml/__init__.py +0 -4
  37. {truefoundry-0.1.1 → truefoundry-0.2.0}/README.md +0 -0
  38. {truefoundry-0.1.1 → truefoundry-0.2.0}/truefoundry/__init__.py +0 -0
  39. {truefoundry-0.1.1/truefoundry/cli → truefoundry-0.2.0/truefoundry/autodeploy}/__init__.py +0 -0
@@ -1,19 +1,27 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: truefoundry
3
- Version: 0.1.1
3
+ Version: 0.2.0
4
4
  Summary: Truefoundry CLI
5
5
  Author: Abhishek Choudhary
6
6
  Author-email: abhichoudhary06@gmail.com
7
- Requires-Python: >=3.8,<3.13
7
+ Requires-Python: >=3.8.1,<3.13
8
8
  Classifier: Programming Language :: Python :: 3
9
- Classifier: Programming Language :: Python :: 3.8
10
9
  Classifier: Programming Language :: Python :: 3.9
11
10
  Classifier: Programming Language :: Python :: 3.10
12
11
  Classifier: Programming Language :: Python :: 3.11
13
12
  Classifier: Programming Language :: Python :: 3.12
14
13
  Provides-Extra: ml
15
- Requires-Dist: mlfoundry (==0.10.8) ; extra == "ml"
16
- Requires-Dist: servicefoundry (==0.10.5)
14
+ Requires-Dist: docker (>=7.0.0,<8.0.0)
15
+ Requires-Dist: gitignorefile (>=1.1.2,<2.0.0)
16
+ Requires-Dist: gitpython (>=3.1.43,<4.0.0)
17
+ Requires-Dist: inquirer (>=3.2.4,<4.0.0)
18
+ Requires-Dist: mlfoundry (==0.10.9) ; extra == "ml"
19
+ Requires-Dist: openai (>=1.16.2,<2.0.0)
20
+ Requires-Dist: pydantic (>=1.10.0,<3)
21
+ Requires-Dist: python-dotenv (>=1.0.1,<2.0.0)
22
+ Requires-Dist: requests (>=2.31.0,<3.0.0)
23
+ Requires-Dist: rich (>=13.7.1,<14.0.0)
24
+ Requires-Dist: servicefoundry (==0.10.10)
17
25
  Description-Content-Type: text/markdown
18
26
 
19
27
  # Truefoundry
@@ -0,0 +1,50 @@
1
+ [tool.poetry]
2
+ name = "truefoundry"
3
+ version = "0.2.0"
4
+ description = "Truefoundry CLI"
5
+ authors = ["Abhishek Choudhary <abhichoudhary06@gmail.com>"]
6
+ readme = "README.md"
7
+
8
+ [tool.poetry.dependencies]
9
+ python = "^3.8.1,<3.13"
10
+ servicefoundry = "0.10.10"
11
+ mlfoundry = { version = "0.10.9", optional = true }
12
+ openai = "^1.16.2"
13
+ docker = "^7.0.0"
14
+ pydantic = ">=1.10.0,<3"
15
+ rich = "^13.7.1"
16
+ requests = "^2.31.0"
17
+ python-dotenv = "^1.0.1"
18
+ gitignorefile = "^1.1.2"
19
+ gitpython = "^3.1.43"
20
+ inquirer = "^3.2.4"
21
+
22
+ [tool.poetry.extras]
23
+ ml = ["mlfoundry"]
24
+
25
+ [tool.poetry.group.dev.dependencies]
26
+ ruff = "^0.3.5"
27
+
28
+ [tool.poetry-dynamic-versioning]
29
+ enable = false
30
+
31
+ [build-system]
32
+ requires = ["poetry-core>=1.0.0", "poetry-dynamic-versioning>=1.0.0,<2.0.0"]
33
+ build-backend = "poetry_dynamic_versioning.backend"
34
+
35
+ [tool.poetry.plugins."console_scripts"]
36
+ tfy = "truefoundry.cli.__main__:main"
37
+ truefoundry = "truefoundry.cli.__main__:main"
38
+
39
+ [tool.ruff]
40
+ lint.select = [
41
+ "E", # pycodestyle errors
42
+ "W", # pycodestyle warnings
43
+ "F", # pyflakes
44
+ "C", # flake8-comprehensions
45
+ "B", # flake8-bugbear
46
+ "Q", # flake8-quotes
47
+ "I",
48
+ ]
49
+ lint.ignore = ["E501"]
50
+ exclude = ["venv", "repos"]
@@ -0,0 +1,181 @@
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ from typing import ClassVar, Iterable, List, Protocol, Type, runtime_checkable
5
+
6
+ from openai import OpenAI
7
+ from openai.types.chat.chat_completion_message import ChatCompletionMessage
8
+ from openai.types.chat.chat_completion_message_param import ChatCompletionMessageParam
9
+ from openai.types.chat.chat_completion_system_message_param import (
10
+ ChatCompletionSystemMessageParam,
11
+ )
12
+ from openai.types.chat.chat_completion_tool_message_param import (
13
+ ChatCompletionToolMessageParam,
14
+ )
15
+ from openai.types.chat.chat_completion_tool_param import ChatCompletionToolParam
16
+ from openai.types.chat.chat_completion_user_message_param import (
17
+ ChatCompletionUserMessageParam,
18
+ )
19
+ from openai.types.shared_params.function_definition import FunctionDefinition
20
+ from pydantic import BaseModel, ValidationError
21
+
22
+ from truefoundry.autodeploy.constants import AUTODEPLOY_MODEL_NAME
23
+ from truefoundry.autodeploy.logger import logger
24
+ from truefoundry.autodeploy.tools import Event, RequestEvent, Tool
25
+ from truefoundry.autodeploy.utils.pydantic_compat import model_dump, model_json_schema
26
+
27
+
28
+ def llm(
29
+ openai_client: OpenAI,
30
+ messages: List[ChatCompletionMessageParam],
31
+ tools: List[ChatCompletionToolParam],
32
+ model: str,
33
+ max_tokens: int = 4096,
34
+ ) -> ChatCompletionMessage:
35
+ completion = openai_client.chat.completions.create(
36
+ tools=tools,
37
+ stream=False,
38
+ messages=messages,
39
+ model=model,
40
+ max_tokens=max_tokens,
41
+ temperature=1,
42
+ top_p=0.01,
43
+ n=1,
44
+ frequency_penalty=0.01,
45
+ )
46
+ return completion.choices[0].message
47
+
48
+
49
+ def format_tool_response(
50
+ response: BaseModel, tool_call_id: str
51
+ ) -> ChatCompletionToolMessageParam:
52
+ return ChatCompletionToolMessageParam(
53
+ role="tool",
54
+ content=json.dumps(model_dump(response), indent=1),
55
+ tool_call_id=tool_call_id,
56
+ )
57
+
58
+
59
+ def format_user_response(response: str) -> ChatCompletionUserMessageParam:
60
+ return ChatCompletionUserMessageParam(
61
+ role="user",
62
+ content=response,
63
+ )
64
+
65
+
66
+ def get_tool_descriptions(
67
+ tools: List, response: Type[BaseModel]
68
+ ) -> List[ChatCompletionToolParam]:
69
+ descriptions = []
70
+ for tool in tools:
71
+ tool = ChatCompletionToolParam(
72
+ type="function",
73
+ function=FunctionDefinition(
74
+ name=tool.__class__.__name__,
75
+ description=tool.description.strip(),
76
+ parameters=model_json_schema(tool.Request),
77
+ ),
78
+ )
79
+ descriptions.append(tool)
80
+
81
+ if response:
82
+ tool = ChatCompletionToolParam(
83
+ type="function",
84
+ function=FunctionDefinition(
85
+ name="Response",
86
+ parameters=model_json_schema(response),
87
+ ),
88
+ )
89
+ descriptions.append(tool)
90
+
91
+ return descriptions
92
+
93
+
94
+ class ToolParamParseError(BaseModel):
95
+ error: str
96
+
97
+
98
+ @runtime_checkable
99
+ class Agent(Tool, Protocol):
100
+ system_prompt: ClassVar[str]
101
+ tools: List[Tool]
102
+ max_iter: ClassVar[int] = 30
103
+ openai_client: OpenAI
104
+ model: str = AUTODEPLOY_MODEL_NAME
105
+
106
+ def run(self, request: RequestEvent) -> Iterable[Event]: # noqa: C901
107
+ messages: List[ChatCompletionMessageParam] = [
108
+ ChatCompletionSystemMessageParam(
109
+ role="system",
110
+ content=self.system_prompt + "\nrequest:\n" + request.json(),
111
+ ),
112
+ ]
113
+ tool_descriptions = get_tool_descriptions(self.tools, self.Response)
114
+ tool_map = {}
115
+ for tool in self.tools:
116
+ tool_map[tool.__class__.__name__] = tool
117
+ for _ in range(self.max_iter):
118
+ r = llm(
119
+ messages=messages,
120
+ tools=tool_descriptions,
121
+ openai_client=self.openai_client,
122
+ model=self.model,
123
+ )
124
+ messages.append(r)
125
+ if r.content:
126
+ logger.debug(r.content)
127
+ if not r.tool_calls:
128
+ messages.append(
129
+ format_user_response(
130
+ "You must respond with a tool call. Use the Ask tool to ask user."
131
+ )
132
+ )
133
+ continue
134
+
135
+ for tool_call in r.tool_calls:
136
+ if tool_call.function.name == "Response":
137
+ try:
138
+ response = self.Response(
139
+ **json.loads(tool_call.function.arguments)
140
+ )
141
+ except (json.decoder.JSONDecodeError, ValidationError) as ex:
142
+ logger.debug(f"{tool_call.function.arguments}, {ex}")
143
+ messages.append(
144
+ format_tool_response(
145
+ ToolParamParseError(error=str(ex)), tool_call.id
146
+ )
147
+ )
148
+ continue
149
+ logger.debug(response)
150
+ yield response
151
+ return response
152
+ tool = tool_map[tool_call.function.name]
153
+ try:
154
+ request = tool.Request(**json.loads(tool_call.function.arguments))
155
+ except (json.decoder.JSONDecodeError, ValidationError) as ex:
156
+ logger.debug(f"{tool_call.function.arguments}, {ex}")
157
+ messages.append(
158
+ format_tool_response(
159
+ ToolParamParseError(error=str(ex)), tool_call.id
160
+ )
161
+ )
162
+ continue
163
+ logger.debug(f"{self.__class__.__name__}, {tool_call.function.name}")
164
+ logger.debug(request)
165
+ yield request
166
+ tool_run = tool.run(request)
167
+ response = None
168
+ inp = None
169
+ while True:
170
+ try:
171
+ event = tool_run.send(inp)
172
+ inp = yield event
173
+ except StopIteration as ex:
174
+ response = ex.value
175
+ break
176
+ logger.debug(response)
177
+ logger.debug(f"{self.__class__.__name__}, {tool_call.function.name}")
178
+ messages.append(format_tool_response(response, tool_call.id))
179
+ if not isinstance(tool, Agent):
180
+ yield response
181
+ raise Exception()
@@ -0,0 +1,113 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import Dict, Optional
4
+
5
+ import docker
6
+ from openai import OpenAI
7
+ from pydantic import Field
8
+
9
+ from truefoundry.autodeploy.agents.base import Agent
10
+ from truefoundry.autodeploy.agents.project_identifier import ProjectIdentifier
11
+ from truefoundry.autodeploy.agents.tester import Tester
12
+ from truefoundry.autodeploy.tools import (
13
+ Ask,
14
+ Commit,
15
+ DockerBuild,
16
+ ListFiles,
17
+ ReadFile,
18
+ RequestEvent,
19
+ ResponseEvent,
20
+ )
21
+
22
+
23
+ class Developer(Agent):
24
+ system_prompt = """
25
+ You are a software engineer.
26
+ You goal is to do whatever you have to do to succesfully run a project.
27
+ If the project already contains a dockerfile, you can use that.
28
+ If the project does not contain a dockerfile, you have to create one.
29
+ You need to fix any issue to ensure that the image builds succesfully.
30
+ Generate syntactically correct Dockerfile.
31
+ You need to always read a file before making any changes.
32
+ Ensure that you always make changes if you are writing a file.
33
+ This is the rough flow you have to follow:
34
+ Identify Project -> Build Docker Image -> Tester
35
+ If Tester is not successful, you need to fix the issue and try Tester again.
36
+ Tester has the final authority over whether you were successful in your task.
37
+ Do not ask the user to manually review anything. You need to review and and take a decision.
38
+ You can ask questions to the user but you need to take actions to run the project.
39
+ Do not ask users how to fix an issue.
40
+ Communicate with the user only via Ask tool.
41
+ You should add Dockerfile path to .dockerignore file, this will speed up iteration.
42
+ You can create a .dockerignore file if it does not exist.
43
+ Try to download dependencies first, before you copy the whole project. This will speed up
44
+ subsequent builds due to build cache.
45
+ Use the Project identifier tool call to identify project.
46
+ Avoid reading *.lock type files as they tend to be large
47
+ If you are using multi-stage build:
48
+ 1. For a go project the final stage should not need a go runtime.
49
+ 2. If you get a missing file error because the application is trying to load a file, you should
50
+ add the files in the final stage.
51
+ 3. While copying the file, be careful about the destination directory.
52
+
53
+ For Golang projects:
54
+ 1. you need to ensure you know where is the main.go file. Sometimes it can be in cmd dir.
55
+
56
+ """
57
+ max_iter = 50
58
+
59
+ class Request(RequestEvent):
60
+ name: str = Field(..., description="Name of the project.")
61
+ command: Optional[str] = Field(
62
+ None,
63
+ description="Preferred command to run project. This can be corrected later.",
64
+ )
65
+
66
+ class Response(ResponseEvent):
67
+ command: str = Field(
68
+ description="""
69
+ Final command to run the project within the container.
70
+ This command should be same as what is expressed uning
71
+ the entrypoint and cmd of the dockerfile.
72
+ """
73
+ )
74
+ dockerfile_path: str = Field(description="Path of dockerfile.")
75
+ port: Optional[int] = Field(description="Port in which container is running.")
76
+ justification: str = Field(
77
+ ...,
78
+ description="""
79
+ Why did you send the response back?
80
+ """,
81
+ )
82
+
83
+ def __init__(
84
+ self,
85
+ project_root_path: str,
86
+ docker_client: docker.DockerClient,
87
+ openai_client: OpenAI,
88
+ environment: Dict,
89
+ ):
90
+ self.tools = [
91
+ DockerBuild(
92
+ project_root_path=project_root_path,
93
+ docker_client=docker_client,
94
+ ),
95
+ ReadFile(
96
+ project_root_path=project_root_path,
97
+ ),
98
+ Commit(project_root_path=project_root_path),
99
+ ListFiles(
100
+ project_root_path=project_root_path,
101
+ ),
102
+ ProjectIdentifier(
103
+ project_root_path=project_root_path,
104
+ openai_client=openai_client,
105
+ ),
106
+ Tester(
107
+ openai_client=openai_client,
108
+ docker_client=docker_client,
109
+ environment=environment,
110
+ ),
111
+ Ask(),
112
+ ]
113
+ self.openai_client = openai_client
@@ -0,0 +1,124 @@
1
+ from __future__ import annotations
2
+
3
+ import enum
4
+ from typing import List, Optional
5
+
6
+ from openai import OpenAI
7
+ from pydantic import Field
8
+ from rich.console import Console
9
+
10
+ from truefoundry.autodeploy.agents.base import Agent
11
+ from truefoundry.autodeploy.tools import (
12
+ Ask,
13
+ FileTypeCounts,
14
+ ListFiles,
15
+ ReadFile,
16
+ RequestEvent,
17
+ ResponseEvent,
18
+ )
19
+
20
+
21
+ class ComponentType(str, enum.Enum):
22
+ SERVICE = "SERVICE"
23
+ JOB = "JOB"
24
+
25
+
26
+ class ProjectIdentifier(Agent):
27
+ description = """
28
+ Identify the type of project.
29
+ """
30
+ system_prompt = """
31
+ Your goal is to figure out the type of project.
32
+ Once you figure out, send the Response tool call.
33
+ The information will be later used to build a Dockerfile.
34
+ Try to figure out ALL the response tool field arguments even if something is optional.
35
+ Prefer reading files like requirements.txt, pyproject.toml, go.mod, packages.json to figure out framework.
36
+ You should have strong justification for your response. Do not assume anything.
37
+ Prefer using file type counts over list files.
38
+ use the knowledge of file type counts to create the right glob pattern while listing files.
39
+ Prefer yarn over npm if the project is using yarn.
40
+ If there is a lock file, the project maybe using yarn.
41
+ Look for *.lock if there is a lock type file persent.
42
+ Always response with a function call.
43
+ """
44
+ max_iter = 10
45
+
46
+ class Request(RequestEvent): ...
47
+
48
+ class Response(ResponseEvent):
49
+ component_type: Optional[ComponentType] = Field(
50
+ None,
51
+ description="""
52
+ A Service is designed to run always and should never terminate.
53
+ A Job is desiged to finish after sometime.
54
+ """,
55
+ )
56
+ primary_programming_language: str = Field(
57
+ ...,
58
+ description="""
59
+ Primary programming language used for the project.
60
+ Ex: Go, Python, Rust, Typescript, etc.",
61
+ """,
62
+ )
63
+ framework: Optional[str] = Field(
64
+ None,
65
+ description="""
66
+ If the project is using any specific framework.
67
+ Ex: FastAPI, Gin, Flask, NestJS, React, etc.
68
+ """,
69
+ )
70
+ version: Optional[str] = Field(
71
+ None,
72
+ description="""
73
+ Identifies and return the exact version of the project's programming language,
74
+ essential for successful Docker image creation and execution.""",
75
+ )
76
+ dependency_files: Optional[List[str]] = Field(
77
+ None,
78
+ descripton="""
79
+ requirements.txt, poetry.lock, yarn.lock, Cargo.lock, go.mod, go.sum, pyproject.toml, etc.
80
+ There can be multiple files like ["pyproject.toml", "poetry.lock"] or ["yarn.lock", "package.json"]
81
+ """,
82
+ )
83
+ dependency_manager: Optional[str] = Field(
84
+ None,
85
+ descripton="""
86
+ pip, poetry, yarn, go.mod, cargo.toml, npm, setup.py.
87
+ """,
88
+ )
89
+ justification: str = Field(
90
+ ...,
91
+ description="Justification behind each response field.",
92
+ )
93
+
94
+ def render(self, console: Console):
95
+ if self.primary_programming_language is not None:
96
+ console.print(
97
+ f"[bold magenta]TrueFoundry:[/] Identified a project using [bold cyan]{self.primary_programming_language}[/]."
98
+ )
99
+ console.print(
100
+ f"[bold magenta]TrueFoundry:[/] Framework Identified: [bold cyan]{'Not applicable' if self.framework is None else self.framework}[/]"
101
+ )
102
+ console.print(
103
+ f"[bold magenta]TrueFoundry:[/] Dependency Manager Identified: [bold cyan]{'Not applicable' if self.dependency_manager is None else self.dependency_manager}[/]"
104
+ )
105
+ else:
106
+ console.print(
107
+ "[bold magenta]TrueFoundry:[/] Unable to identify any programming language in the project."
108
+ )
109
+ console.print(
110
+ f"[bold magenta]TrueFoundry:[/] [italic]{self.justification}[/]"
111
+ )
112
+
113
+ def __init__(self, project_root_path: str, openai_client: OpenAI):
114
+ self.tools = [
115
+ ReadFile(
116
+ project_root_path=project_root_path,
117
+ ),
118
+ ListFiles(
119
+ project_root_path=project_root_path,
120
+ ),
121
+ FileTypeCounts(project_root_path=project_root_path),
122
+ Ask(),
123
+ ]
124
+ self.openai_client = openai_client
@@ -0,0 +1,75 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import Dict, Optional
4
+
5
+ import docker
6
+ from openai import OpenAI
7
+ from pydantic import Field
8
+ from rich.console import Console
9
+
10
+ from truefoundry.autodeploy.agents.base import Agent
11
+ from truefoundry.autodeploy.agents.project_identifier import ProjectIdentifier
12
+ from truefoundry.autodeploy.tools import (
13
+ Ask,
14
+ DockerRun,
15
+ RequestEvent,
16
+ ResponseEvent,
17
+ SendRequest,
18
+ )
19
+
20
+
21
+ class Tester(Agent):
22
+ description = """
23
+ Tester
24
+ """
25
+ system_prompt = """
26
+ Your goal is to test a docker image.
27
+ In case the image is running a service, you can send a request to the endpoint to verify everything is running fine.
28
+ If you find any port number in the logs. Re run the docker image by exposing that port correctly.
29
+
30
+ If the image is not running a service, try to identify whether there is any issue.
31
+ Your goal is not to fix the issue. Your goal is to create a very detailed justification and report whether
32
+ the testing was succesful.
33
+ Always response with a function call.
34
+ Return response once you are done testing.
35
+ """
36
+ max_iter = 30
37
+
38
+ class Request(RequestEvent):
39
+ project_identity: ProjectIdentifier.Response
40
+ image_tag: str
41
+ command: str
42
+ port_to_be_exposed: Optional[int] = None
43
+
44
+ class Response(ResponseEvent):
45
+ successful: bool = Field(..., description="is everything fine?")
46
+ justification: str = Field(
47
+ ...,
48
+ description="""
49
+ Why was the testing a failure or successful?
50
+ """,
51
+ )
52
+ logs: str
53
+
54
+ def render(self, console: Console):
55
+ console.print(
56
+ f"[bold cyan]TrueFoundry:[/] The given project has been {'[bold green]successfully built[/]' if self.successful else '[bold red]failed to build[/]'}"
57
+ )
58
+ console.print(
59
+ f"[bold magenta]TrueFoundry:[/] [italic]{self.justification}[/]"
60
+ )
61
+ if not self.successful:
62
+ console.print(f"[cyan]logs:[/] {self.logs}")
63
+
64
+ def __init__(
65
+ self,
66
+ docker_client: docker.DockerClient,
67
+ openai_client: OpenAI,
68
+ environment: Dict,
69
+ ):
70
+ self.tools = [
71
+ SendRequest(),
72
+ Ask(),
73
+ DockerRun(docker_client=docker_client, environment=environment),
74
+ ]
75
+ self.openai_client = openai_client