optexity 0.1.2__py3-none-any.whl → 0.1.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- optexity/examples/__init__.py +0 -0
- optexity/examples/add_example.py +88 -0
- optexity/examples/download_pdf_url.py +29 -0
- optexity/examples/extract_price_stockanalysis.py +44 -0
- optexity/examples/file_upload.py +59 -0
- optexity/examples/i94.py +126 -0
- optexity/examples/i94_travel_history.py +126 -0
- optexity/examples/peachstate_medicaid.py +201 -0
- optexity/examples/supabase_login.py +75 -0
- optexity/inference/__init__.py +0 -0
- optexity/inference/agents/__init__.py +0 -0
- optexity/inference/agents/error_handler/__init__.py +0 -0
- optexity/inference/agents/error_handler/error_handler.py +39 -0
- optexity/inference/agents/error_handler/prompt.py +60 -0
- optexity/inference/agents/index_prediction/__init__.py +0 -0
- optexity/inference/agents/index_prediction/action_prediction_locator_axtree.py +45 -0
- optexity/inference/agents/index_prediction/prompt.py +14 -0
- optexity/inference/agents/select_value_prediction/__init__.py +0 -0
- optexity/inference/agents/select_value_prediction/prompt.py +20 -0
- optexity/inference/agents/select_value_prediction/select_value_prediction.py +39 -0
- optexity/inference/agents/two_fa_extraction/__init__.py +0 -0
- optexity/inference/agents/two_fa_extraction/prompt.py +23 -0
- optexity/inference/agents/two_fa_extraction/two_fa_extraction.py +47 -0
- optexity/inference/child_process.py +251 -0
- optexity/inference/core/__init__.py +0 -0
- optexity/inference/core/interaction/__init__.py +0 -0
- optexity/inference/core/interaction/handle_agentic_task.py +79 -0
- optexity/inference/core/interaction/handle_check.py +57 -0
- optexity/inference/core/interaction/handle_click.py +79 -0
- optexity/inference/core/interaction/handle_command.py +261 -0
- optexity/inference/core/interaction/handle_input.py +76 -0
- optexity/inference/core/interaction/handle_keypress.py +16 -0
- optexity/inference/core/interaction/handle_select.py +109 -0
- optexity/inference/core/interaction/handle_select_utils.py +132 -0
- optexity/inference/core/interaction/handle_upload.py +59 -0
- optexity/inference/core/interaction/utils.py +81 -0
- optexity/inference/core/logging.py +406 -0
- optexity/inference/core/run_assertion.py +55 -0
- optexity/inference/core/run_automation.py +463 -0
- optexity/inference/core/run_extraction.py +240 -0
- optexity/inference/core/run_interaction.py +254 -0
- optexity/inference/core/run_python_script.py +20 -0
- optexity/inference/core/run_two_fa.py +120 -0
- optexity/inference/core/two_factor_auth/__init__.py +0 -0
- optexity/inference/infra/__init__.py +0 -0
- optexity/inference/infra/browser.py +455 -0
- optexity/inference/infra/browser_extension.py +20 -0
- optexity/inference/models/__init__.py +22 -0
- optexity/inference/models/gemini.py +113 -0
- optexity/inference/models/human.py +20 -0
- optexity/inference/models/llm_model.py +210 -0
- optexity/inference/run_local.py +200 -0
- optexity/schema/__init__.py +0 -0
- optexity/schema/actions/__init__.py +0 -0
- optexity/schema/actions/assertion_action.py +66 -0
- optexity/schema/actions/extraction_action.py +143 -0
- optexity/schema/actions/interaction_action.py +330 -0
- optexity/schema/actions/misc_action.py +18 -0
- optexity/schema/actions/prompts.py +27 -0
- optexity/schema/actions/two_fa_action.py +24 -0
- optexity/schema/automation.py +432 -0
- optexity/schema/callback.py +16 -0
- optexity/schema/inference.py +87 -0
- optexity/schema/memory.py +100 -0
- optexity/schema/task.py +212 -0
- optexity/schema/token_usage.py +48 -0
- optexity/utils/__init__.py +0 -0
- optexity/utils/settings.py +54 -0
- optexity/utils/utils.py +76 -0
- {optexity-0.1.2.dist-info → optexity-0.1.3.dist-info}/METADATA +1 -1
- optexity-0.1.3.dist-info/RECORD +80 -0
- optexity-0.1.2.dist-info/RECORD +0 -11
- {optexity-0.1.2.dist-info → optexity-0.1.3.dist-info}/WHEEL +0 -0
- {optexity-0.1.2.dist-info → optexity-0.1.3.dist-info}/entry_points.txt +0 -0
- {optexity-0.1.2.dist-info → optexity-0.1.3.dist-info}/licenses/LICENSE +0 -0
- {optexity-0.1.2.dist-info → optexity-0.1.3.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,75 @@
|
|
|
1
|
+
from optexity.schema.automation import Automation
|
|
2
|
+
|
|
3
|
+
description = "Supabase Login Example"
|
|
4
|
+
endpoint_name = "supabase_login"
|
|
5
|
+
automation_json = {
|
|
6
|
+
"url": "https://supabase.com",
|
|
7
|
+
"parameters": {
|
|
8
|
+
"input_parameters": {},
|
|
9
|
+
"secure_parameters": {
|
|
10
|
+
"username": [
|
|
11
|
+
{
|
|
12
|
+
"onepassword": {
|
|
13
|
+
"vault_name": "optexity_automation",
|
|
14
|
+
"item_name": "supabase",
|
|
15
|
+
"field_name": "username",
|
|
16
|
+
}
|
|
17
|
+
}
|
|
18
|
+
],
|
|
19
|
+
"password": [
|
|
20
|
+
{
|
|
21
|
+
"onepassword": {
|
|
22
|
+
"vault_name": "optexity_automation",
|
|
23
|
+
"item_name": "supabase",
|
|
24
|
+
"field_name": "password",
|
|
25
|
+
}
|
|
26
|
+
}
|
|
27
|
+
],
|
|
28
|
+
},
|
|
29
|
+
"generated_parameters": {},
|
|
30
|
+
},
|
|
31
|
+
"nodes": [
|
|
32
|
+
{
|
|
33
|
+
"type": "action_node",
|
|
34
|
+
"interaction_action": {
|
|
35
|
+
"click_element": {
|
|
36
|
+
"command": 'get_by_role("link", name="Sign in")',
|
|
37
|
+
"prompt_instructions": "Click the Sign in link",
|
|
38
|
+
}
|
|
39
|
+
},
|
|
40
|
+
"end_sleep_time": 1.0,
|
|
41
|
+
},
|
|
42
|
+
{
|
|
43
|
+
"type": "action_node",
|
|
44
|
+
"interaction_action": {
|
|
45
|
+
"input_text": {
|
|
46
|
+
"command": 'get_by_role("textbox", name="Email")',
|
|
47
|
+
"prompt_instructions": "Enter the email",
|
|
48
|
+
"input_text": "{username[0]}",
|
|
49
|
+
}
|
|
50
|
+
},
|
|
51
|
+
"end_sleep_time": 1.0,
|
|
52
|
+
},
|
|
53
|
+
{
|
|
54
|
+
"type": "action_node",
|
|
55
|
+
"interaction_action": {
|
|
56
|
+
"input_text": {
|
|
57
|
+
"command": 'get_by_role("textbox", name="Password")',
|
|
58
|
+
"prompt_instructions": "Enter the password",
|
|
59
|
+
"input_text": "{password[0]}",
|
|
60
|
+
"press_enter": True,
|
|
61
|
+
}
|
|
62
|
+
},
|
|
63
|
+
"end_sleep_time": 1.0,
|
|
64
|
+
},
|
|
65
|
+
{
|
|
66
|
+
"type": "action_node",
|
|
67
|
+
"assertion_action": {
|
|
68
|
+
"llm": {"extraction_instructions": "Check if the login was successful"}
|
|
69
|
+
},
|
|
70
|
+
"end_sleep_time": 0.0,
|
|
71
|
+
},
|
|
72
|
+
],
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
automation = Automation.model_validate(automation_json)
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
from typing import Literal
|
|
3
|
+
|
|
4
|
+
from pydantic import BaseModel
|
|
5
|
+
|
|
6
|
+
from optexity.inference.agents.error_handler.prompt import system_prompt
|
|
7
|
+
from optexity.inference.models import GeminiModels, get_llm_model
|
|
8
|
+
from optexity.schema.token_usage import TokenUsage
|
|
9
|
+
|
|
10
|
+
logger = logging.getLogger(__name__)
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class ErrorHandlerOutput(BaseModel):
|
|
14
|
+
error_type: Literal["website_not_loaded", "overlay_popup_blocking", "fatal_error"]
|
|
15
|
+
detailed_reason: str
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class ErrorHandlerAgent:
|
|
19
|
+
def __init__(self):
|
|
20
|
+
self.model = get_llm_model(GeminiModels.GEMINI_2_5_FLASH, True)
|
|
21
|
+
|
|
22
|
+
def classify_error(
|
|
23
|
+
self, command: str, screenshot: str
|
|
24
|
+
) -> tuple[str, ErrorHandlerOutput, TokenUsage]:
|
|
25
|
+
|
|
26
|
+
final_prompt = f"""
|
|
27
|
+
[INPUT]
|
|
28
|
+
Command: {command}
|
|
29
|
+
[/INPUT]
|
|
30
|
+
"""
|
|
31
|
+
|
|
32
|
+
response, token_usage = self.model.get_model_response_with_structured_output(
|
|
33
|
+
prompt=final_prompt,
|
|
34
|
+
response_schema=ErrorHandlerOutput,
|
|
35
|
+
screenshot=screenshot,
|
|
36
|
+
system_instruction=system_prompt,
|
|
37
|
+
)
|
|
38
|
+
|
|
39
|
+
return final_prompt, response, token_usage
|
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
system_prompt = """
|
|
2
|
+
You are an expert error classification agent for an unattended (no human-in-the-loop) Playwright browser automation system.
|
|
3
|
+
|
|
4
|
+
Your single task is to analyze the provided **Goal (playwright command), and Screenshot** to classify an error into one of three categories and provide a clear reason.
|
|
5
|
+
|
|
6
|
+
This automation **cannot** ask a human for help; if the script is logically stuck and cannot proceed without new data or a code change, it is a **fatal error**.
|
|
7
|
+
|
|
8
|
+
You MUST provide your output in a JSON format:
|
|
9
|
+
|
|
10
|
+
```json
|
|
11
|
+
{
|
|
12
|
+
"error_type": "website_not_loaded" | "overlay_popup_blocking" | "fatal_error",
|
|
13
|
+
"detailed_reason": "A summary of the error reason"
|
|
14
|
+
}
|
|
15
|
+
```
|
|
16
|
+
|
|
17
|
+
-----
|
|
18
|
+
|
|
19
|
+
### Error Classification Rules
|
|
20
|
+
|
|
21
|
+
Here are the definitions for each `error_type`:
|
|
22
|
+
|
|
23
|
+
**1. `website_not_loaded`**
|
|
24
|
+
|
|
25
|
+
* **Description:** This is a **transient error**. The page or a specific element is not *yet* available, but it is expected to appear.
|
|
26
|
+
* **Cause:** Typically caused by a slow network, a page still loading, or dynamic content (like a chart or data grid) still being rendered.
|
|
27
|
+
* **Common Clues:** `TimeoutError`, `waiting for selector`, "element is not visible yet".
|
|
28
|
+
* **Analysis:** The **screenshot** might show a blank page, a loading spinner, or a partially rendered page. The **goal** (e.g., "click button X") is to interact with an element that is *expected* on this page but hasn't appeared. This is NOT a fatal error, as a retry or longer wait could solve it.
|
|
29
|
+
* **Action:** The automation should typically wait longer, reload the page, or retry the action.
|
|
30
|
+
* **`detailed_reason`:** A brief summary, e.g., "Page is taking too long to load" or "Element `[selector]` not yet visible."
|
|
31
|
+
|
|
32
|
+
**2. `overlay_popup_blocking`**
|
|
33
|
+
|
|
34
|
+
* **Description:** This is an **interruption error**. The target element *is* on the page, but it is obscured or blocked by another element on top of it.
|
|
35
|
+
* **Cause:** Cookie banners, subscription pop-ups, ad modals, chat widgets, or "support" buttons.
|
|
36
|
+
* **Common Clues:** "Element is not clickable at point," "Another element would receive the click," "Element is obscured."
|
|
37
|
+
* **Analysis:** The **screenshot** is key here. It will clearly show a pop-up or modal covering the content. The **goal** will be to interact with an element *behind* this overlay.
|
|
38
|
+
* **Action:** The automation should try to find and close the overlay (e.g., click an "Accept" or "Close" button).
|
|
39
|
+
* **`detailed_reason`:** Identify the blocking element, e.g., "A cookie consent pop-up is blocking the login button."
|
|
40
|
+
|
|
41
|
+
**3. `fatal_error`**
|
|
42
|
+
|
|
43
|
+
* **Description:** This is a **permanent, non-recoverable error**. The automation is stuck, and a simple wait or reload **will not** fix the problem.
|
|
44
|
+
* **Cause:**
|
|
45
|
+
* **Wrong Page:** The script navigated to the wrong URL (e.g., got a 404, 500 server error). The **screenshot** would show this error page.
|
|
46
|
+
* **Permanently Missing Element:** A required element *does not exist* on the page (it's not just loading, it's missing from the DOM).
|
|
47
|
+
* **Analysis:** Use the **goal** (e.g., "Click the 'Next Step' button") and the **screenshot**. If the page in the screenshot appears *fully loaded* (no spinners, all other content is present) but the target element is *nowhere to be found*, it is a `fatal_error`. This indicates a change in the website's structure or a flaw in the automation script's logic.
|
|
48
|
+
* **Logical Failure:** The automation cannot proceed due to invalid data (e.g., "Incorrect username or password") or a business rule violation (e.g., "Item is out of stock"). The **screenshot** would show this error message clearly displayed on the page. Since the automation **cannot ask a human** for new data, this is fatal.
|
|
49
|
+
* **Action:** The automation must stop and report the failure.
|
|
50
|
+
* **`detailed_reason`:** This is **mandatory and must be specific**.
|
|
51
|
+
* *Good:* "Fatal error: The target element `#submit-payment` does not exist on the page, even though the page appears fully loaded."
|
|
52
|
+
* *Good:* "Fatal error: Login failed due to 'Invalid credentials' message shown on page. Automation cannot proceed without new data."
|
|
53
|
+
* *Good:* "Fatal error: Navigation failed with a 404 error page."
|
|
54
|
+
|
|
55
|
+
-----
|
|
56
|
+
|
|
57
|
+
### Your Task
|
|
58
|
+
|
|
59
|
+
Analyze the following **Goal, and Screenshot** and provide your classification in the required JSON format.
|
|
60
|
+
"""
|
|
File without changes
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
from typing import Optional
|
|
3
|
+
|
|
4
|
+
from pydantic import BaseModel, Field
|
|
5
|
+
|
|
6
|
+
from optexity.inference.agents.index_prediction.prompt import system_prompt
|
|
7
|
+
from optexity.inference.models import GeminiModels, get_llm_model
|
|
8
|
+
from optexity.schema.token_usage import TokenUsage
|
|
9
|
+
|
|
10
|
+
logger = logging.getLogger(__name__)
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class IndexPredictionOutput(BaseModel):
|
|
14
|
+
index: int = Field(
|
|
15
|
+
description="The index of the interactive element in the axtree that would achieve the desired outcome. It is always greater than 0."
|
|
16
|
+
)
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class ActionPredictionLocatorAxtree:
|
|
20
|
+
def __init__(self):
|
|
21
|
+
self.model = get_llm_model(GeminiModels.GEMINI_2_5_FLASH, True)
|
|
22
|
+
|
|
23
|
+
def predict_action(
|
|
24
|
+
self, goal: str, axtree: str, screenshot: Optional[str] = None
|
|
25
|
+
) -> tuple[str, IndexPredictionOutput, TokenUsage]:
|
|
26
|
+
|
|
27
|
+
final_prompt = f"""
|
|
28
|
+
[INPUT]
|
|
29
|
+
Goal: {goal}
|
|
30
|
+
|
|
31
|
+
[AXTREE]
|
|
32
|
+
{axtree}
|
|
33
|
+
[/AXTREE]
|
|
34
|
+
|
|
35
|
+
[/INPUT]
|
|
36
|
+
"""
|
|
37
|
+
|
|
38
|
+
response, token_usage = self.model.get_model_response_with_structured_output(
|
|
39
|
+
prompt=final_prompt,
|
|
40
|
+
response_schema=IndexPredictionOutput,
|
|
41
|
+
screenshot=screenshot,
|
|
42
|
+
system_instruction=system_prompt,
|
|
43
|
+
)
|
|
44
|
+
|
|
45
|
+
return final_prompt, response, token_usage
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
system_prompt = """
|
|
2
|
+
You are an AI assistant tasked with identifying the correct interactive element on a webpage based on a user's goal and a provided web page structure (axtree).
|
|
3
|
+
|
|
4
|
+
Your core responsibility is to translate a user's intended action, described through a goal into a specific numerical index from the given axtree. This index represents the interactive element (e.g., a button, a text field) that, if interacted with, would achieve the desired outcome.
|
|
5
|
+
|
|
6
|
+
**Input You Will Receive:**
|
|
7
|
+
|
|
8
|
+
* **Goal:** The description of the task to be accomplished on the webpage.
|
|
9
|
+
* **Axtree:** A simplified representation of the webpage's interactive elements. Each interactive element is marked with a bracketed number, like `[1]`, which is its unique index.
|
|
10
|
+
|
|
11
|
+
**Crucial Task Directives:**
|
|
12
|
+
|
|
13
|
+
Your output must be a single numerical index from the axtree. This is because index-based interaction is more reliable than trying to replicate a playwright command, which can fail if the element isn't precisely found.
|
|
14
|
+
"""
|
|
File without changes
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
system_prompt = """
|
|
2
|
+
You are an AI assistant tasked with helping users select relevant options from a webpage dropdown menu. You will be given a list of dropdown options, each with a "value" and a "label", along with a list of user-provided patterns. Your goal is to identify and return the dropdown values that best correspond to the user's patterns, taking into account both exact and approximate matches.
|
|
3
|
+
|
|
4
|
+
Guidelines:
|
|
5
|
+
- A pattern may closely resemble, partially match, or refer to either the "label" or "value" of an option.
|
|
6
|
+
- Use your reasoning to determine the most appropriate matches, even if they are not exact.
|
|
7
|
+
- Focus on what the user is likely looking for based on the patterns and the option labels/values.
|
|
8
|
+
|
|
9
|
+
Example:
|
|
10
|
+
Dropdown options:
|
|
11
|
+
[{"value": "AAPL", "label": "Apple Inc"}, {"value": "GOOGL", "label": "Google Inc"}, {"value": "MSFT", "label": "Microsoft Inc"}, {"value": "NVDA", "label": "NVIDIA Inc"}]
|
|
12
|
+
User patterns: ["apple", "nvidia"]
|
|
13
|
+
Expected output: ["AAPL", "NVDA"]
|
|
14
|
+
(Rationale: "apple" most closely matches "Apple Inc" → "AAPL"; "nvidia" matches "NVIDIA Inc" → "NVDA".)
|
|
15
|
+
|
|
16
|
+
Instructions:
|
|
17
|
+
- Return only the matched dropdown values, as a Python list of strings (e.g., ["AAPL", "NVDA"]).
|
|
18
|
+
- If there are no valid matches, return an empty Python list (e.g., []).
|
|
19
|
+
- Do not include any explanations or formatting—just the list.
|
|
20
|
+
"""
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import logging
|
|
3
|
+
|
|
4
|
+
from pydantic import BaseModel, Field
|
|
5
|
+
|
|
6
|
+
from optexity.inference.agents.select_value_prediction.prompt import system_prompt
|
|
7
|
+
from optexity.inference.models import GeminiModels, get_llm_model
|
|
8
|
+
from optexity.schema.token_usage import TokenUsage
|
|
9
|
+
|
|
10
|
+
logger = logging.getLogger(__name__)
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class SelectValuePredictionOutput(BaseModel):
|
|
14
|
+
matched_values: list[str] = Field(default_factory=list)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class SelectValuePredictionAgent:
|
|
18
|
+
def __init__(self):
|
|
19
|
+
self.model = get_llm_model(GeminiModels.GEMINI_2_5_FLASH, True)
|
|
20
|
+
|
|
21
|
+
def predict_select_value(
|
|
22
|
+
self, options: list[dict[str, str]], patterns: list[str]
|
|
23
|
+
) -> tuple[str, SelectValuePredictionOutput, TokenUsage]:
|
|
24
|
+
|
|
25
|
+
final_prompt = f"""
|
|
26
|
+
[Actual Select Options]
|
|
27
|
+
{json.dumps(options, indent=4)}
|
|
28
|
+
|
|
29
|
+
[User Provided Patterns]
|
|
30
|
+
[{', '.join(patterns)}]
|
|
31
|
+
"""
|
|
32
|
+
|
|
33
|
+
response, token_usage = self.model.get_model_response_with_structured_output(
|
|
34
|
+
prompt=final_prompt,
|
|
35
|
+
response_schema=SelectValuePredictionOutput,
|
|
36
|
+
system_instruction=system_prompt,
|
|
37
|
+
)
|
|
38
|
+
|
|
39
|
+
return final_prompt, response, token_usage
|
|
File without changes
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
system_prompt = """
|
|
2
|
+
You are an expert AI assistant specializing in extracting Two-Factor Authentication (2FA) codes from digital messages. Your goal is to accurately identify and extract ONLY valid 2FA codes from a provided list of messages.
|
|
3
|
+
|
|
4
|
+
Carefully follow these instructions:
|
|
5
|
+
|
|
6
|
+
1. Read each message in the list, looking for explicit 2FA codes.
|
|
7
|
+
2. Extract only the codes that are clearly intended for authentication—do not extract any other numbers, words, or irrelevant information.
|
|
8
|
+
3. Exclude numbers or text from headers, footers, signatures, or unrelated content, even if they appear similar to codes.
|
|
9
|
+
4. If there are multiple distinct 2FA codes across the messages, return all of them as a list.
|
|
10
|
+
5. If you find no valid 2FA code in any message, return None.
|
|
11
|
+
|
|
12
|
+
Sometimes you may be given additional, specific extraction instructions—always follow those if present and give them highest priority.
|
|
13
|
+
|
|
14
|
+
Context: Messages may come from various platforms (such as email, chat, or Slack).
|
|
15
|
+
|
|
16
|
+
**Input:**
|
|
17
|
+
- A list of messages to analyze.
|
|
18
|
+
|
|
19
|
+
**Output:**
|
|
20
|
+
- The extracted 2FA code (as a string), a list of codes (if multiple are found), or None if no code exists.
|
|
21
|
+
|
|
22
|
+
Carefully consider the content of each message and reason step-by-step before providing your answer. Return only the code(s), with no extra commentary or explanation.
|
|
23
|
+
"""
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import logging
|
|
3
|
+
|
|
4
|
+
from pydantic import BaseModel, Field
|
|
5
|
+
|
|
6
|
+
from optexity.inference.agents.two_fa_extraction.prompt import system_prompt
|
|
7
|
+
from optexity.inference.models import GeminiModels, get_llm_model
|
|
8
|
+
from optexity.schema.inference import Message
|
|
9
|
+
from optexity.schema.token_usage import TokenUsage
|
|
10
|
+
|
|
11
|
+
logger = logging.getLogger(__name__)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class TwoFAExtractionOutput(BaseModel):
|
|
15
|
+
code: str | list[str] | None = Field(
|
|
16
|
+
description="The 2FA code extracted from the messages."
|
|
17
|
+
)
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class TwoFAExtraction:
|
|
21
|
+
def __init__(self):
|
|
22
|
+
self.model = get_llm_model(GeminiModels.GEMINI_2_5_FLASH, True)
|
|
23
|
+
|
|
24
|
+
def extract_code(
|
|
25
|
+
self, instructions: str | None, messages: list[Message]
|
|
26
|
+
) -> tuple[str, TwoFAExtractionOutput, TokenUsage]:
|
|
27
|
+
|
|
28
|
+
final_prompt = ""
|
|
29
|
+
|
|
30
|
+
if instructions is not None:
|
|
31
|
+
final_prompt += f"""
|
|
32
|
+
[EXTRACTION INSTRUCTIONS]
|
|
33
|
+
{instructions}
|
|
34
|
+
[/EXTRACTION INSTRUCTIONS]
|
|
35
|
+
"""
|
|
36
|
+
final_prompt += f"""
|
|
37
|
+
[MESSAGES]
|
|
38
|
+
{json.dumps([message.model_dump(include={"message_text"}) for message in messages], indent=2)}
|
|
39
|
+
[/MESSAGES]
|
|
40
|
+
"""
|
|
41
|
+
|
|
42
|
+
response, token_usage = self.model.get_model_response_with_structured_output(
|
|
43
|
+
prompt=final_prompt,
|
|
44
|
+
response_schema=TwoFAExtractionOutput,
|
|
45
|
+
system_instruction=system_prompt,
|
|
46
|
+
)
|
|
47
|
+
return final_prompt, response, token_usage
|
|
@@ -0,0 +1,251 @@
|
|
|
1
|
+
import argparse
|
|
2
|
+
import asyncio
|
|
3
|
+
import logging
|
|
4
|
+
from contextlib import asynccontextmanager
|
|
5
|
+
from datetime import datetime, timedelta, timezone
|
|
6
|
+
from urllib.parse import urljoin
|
|
7
|
+
|
|
8
|
+
import httpx
|
|
9
|
+
from fastapi import Body, FastAPI
|
|
10
|
+
from fastapi.responses import JSONResponse
|
|
11
|
+
from pydantic import BaseModel
|
|
12
|
+
from uvicorn import run
|
|
13
|
+
|
|
14
|
+
from optexity.inference.core.run_automation import run_automation
|
|
15
|
+
from optexity.schema.inference import InferenceRequest
|
|
16
|
+
from optexity.schema.task import Task
|
|
17
|
+
from optexity.utils.settings import settings
|
|
18
|
+
|
|
19
|
+
logging.basicConfig(level=logging.INFO)
|
|
20
|
+
logger = logging.getLogger(__name__)
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class ChildProcessIdRequest(BaseModel):
|
|
24
|
+
new_child_process_id: str
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
child_process_id = None
|
|
28
|
+
task_running = False
|
|
29
|
+
last_task_start_time = None
|
|
30
|
+
task_queue: asyncio.Queue[Task] = asyncio.Queue()
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
async def task_processor():
|
|
34
|
+
"""Background worker that processes tasks from the queue one at a time."""
|
|
35
|
+
global task_running
|
|
36
|
+
global last_task_start_time
|
|
37
|
+
logger.info("Task processor started")
|
|
38
|
+
|
|
39
|
+
while True:
|
|
40
|
+
try:
|
|
41
|
+
# Get next task from queue (blocks until one is available)
|
|
42
|
+
task = await task_queue.get()
|
|
43
|
+
task_running = True
|
|
44
|
+
last_task_start_time = datetime.now()
|
|
45
|
+
await run_automation(task, child_process_id)
|
|
46
|
+
|
|
47
|
+
except asyncio.CancelledError:
|
|
48
|
+
logger.info("Task processor cancelled")
|
|
49
|
+
break
|
|
50
|
+
except Exception as e:
|
|
51
|
+
logger.error(f"Error in task processor: {e}")
|
|
52
|
+
finally:
|
|
53
|
+
|
|
54
|
+
task_running = False
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
async def register_with_master():
|
|
58
|
+
"""Register with master on startup (handles restarts automatically)."""
|
|
59
|
+
# Get my task metadata from ECS
|
|
60
|
+
async with httpx.AsyncClient(timeout=30.0) as client:
|
|
61
|
+
response = await client.get("http://169.254.170.2/v3/task")
|
|
62
|
+
response.raise_for_status()
|
|
63
|
+
metadata = response.json()
|
|
64
|
+
|
|
65
|
+
my_task_arn = metadata["TaskARN"]
|
|
66
|
+
my_ip = metadata["Containers"][0]["Networks"][0]["IPv4Addresses"][0]
|
|
67
|
+
|
|
68
|
+
my_port = None
|
|
69
|
+
for binding in metadata["Containers"][0].get("NetworkBindings", []):
|
|
70
|
+
if binding["containerPort"] == settings.CHILD_PORT_OFFSET:
|
|
71
|
+
my_port = binding["hostPort"]
|
|
72
|
+
break
|
|
73
|
+
|
|
74
|
+
if not my_port:
|
|
75
|
+
logger.error("Could not find host port binding")
|
|
76
|
+
raise ValueError("Host port not found in metadata")
|
|
77
|
+
|
|
78
|
+
# Register with master
|
|
79
|
+
async with httpx.AsyncClient(timeout=30.0) as client:
|
|
80
|
+
response = await client.post(
|
|
81
|
+
f"http://{settings.SERVER_URL}/register_child",
|
|
82
|
+
json={"task_arn": my_task_arn, "private_ip": my_ip, "port": my_port},
|
|
83
|
+
)
|
|
84
|
+
response.raise_for_status()
|
|
85
|
+
|
|
86
|
+
logger.info(f"Registered with master: {response.json()}")
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
def get_app_with_endpoints(is_aws: bool, child_id: int):
|
|
90
|
+
global child_process_id
|
|
91
|
+
child_process_id = child_id
|
|
92
|
+
|
|
93
|
+
@asynccontextmanager
|
|
94
|
+
async def lifespan(app: FastAPI):
|
|
95
|
+
"""Lifespan context manager for startup and shutdown."""
|
|
96
|
+
# Startup
|
|
97
|
+
|
|
98
|
+
if is_aws:
|
|
99
|
+
asyncio.create_task(register_with_master())
|
|
100
|
+
|
|
101
|
+
logger.info("Registered with master")
|
|
102
|
+
asyncio.create_task(task_processor())
|
|
103
|
+
logger.info("Task processor background task started")
|
|
104
|
+
yield
|
|
105
|
+
# Shutdown (if needed in the future)
|
|
106
|
+
logger.info("Shutting down task processor")
|
|
107
|
+
|
|
108
|
+
app = FastAPI(title="Optexity Inference", lifespan=lifespan)
|
|
109
|
+
|
|
110
|
+
@app.get("/is_task_running", tags=["info"])
|
|
111
|
+
async def is_task_running():
|
|
112
|
+
"""Is task running endpoint."""
|
|
113
|
+
return task_running
|
|
114
|
+
|
|
115
|
+
@app.get("/health", tags=["info"])
|
|
116
|
+
async def health():
|
|
117
|
+
"""Health check endpoint."""
|
|
118
|
+
global last_task_start_time
|
|
119
|
+
if (
|
|
120
|
+
task_running
|
|
121
|
+
and last_task_start_time
|
|
122
|
+
and datetime.now() - last_task_start_time > timedelta(minutes=15)
|
|
123
|
+
):
|
|
124
|
+
return JSONResponse(
|
|
125
|
+
status_code=503,
|
|
126
|
+
content={
|
|
127
|
+
"status": "unhealthy",
|
|
128
|
+
"message": "Task not finished in the last 15 minutes",
|
|
129
|
+
},
|
|
130
|
+
)
|
|
131
|
+
return JSONResponse(
|
|
132
|
+
status_code=200,
|
|
133
|
+
content={
|
|
134
|
+
"status": "healthy",
|
|
135
|
+
"task_running": task_running,
|
|
136
|
+
"queued_tasks": task_queue.qsize(),
|
|
137
|
+
},
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
@app.post("/set_child_process_id", tags=["info"])
|
|
141
|
+
async def set_child_process_id(request: ChildProcessIdRequest):
|
|
142
|
+
"""Set child process id endpoint."""
|
|
143
|
+
global child_process_id
|
|
144
|
+
child_process_id = int(request.new_child_process_id)
|
|
145
|
+
return JSONResponse(
|
|
146
|
+
content={"success": True, "message": "Child process id has been set"},
|
|
147
|
+
status_code=200,
|
|
148
|
+
)
|
|
149
|
+
|
|
150
|
+
@app.post("/allocate_task")
|
|
151
|
+
async def allocate_task(task: Task = Body(...)):
|
|
152
|
+
"""Get details of a specific task."""
|
|
153
|
+
try:
|
|
154
|
+
|
|
155
|
+
await task_queue.put(task)
|
|
156
|
+
return JSONResponse(
|
|
157
|
+
content={"success": True, "message": "Task has been allocated"},
|
|
158
|
+
status_code=202,
|
|
159
|
+
)
|
|
160
|
+
except Exception as e:
|
|
161
|
+
logger.error(f"Error allocating task {task.task_id}: {e}")
|
|
162
|
+
return JSONResponse(
|
|
163
|
+
content={"success": False, "message": str(e)}, status_code=500
|
|
164
|
+
)
|
|
165
|
+
|
|
166
|
+
if not is_aws:
|
|
167
|
+
|
|
168
|
+
@app.post("/inference")
|
|
169
|
+
async def inference(inference_request: InferenceRequest = Body(...)):
|
|
170
|
+
response_data: dict | None = None
|
|
171
|
+
try:
|
|
172
|
+
|
|
173
|
+
async with httpx.AsyncClient(timeout=30.0) as client:
|
|
174
|
+
url = urljoin(settings.SERVER_URL, settings.INFERENCE_ENDPOINT)
|
|
175
|
+
headers = {"x-api-key": settings.API_KEY}
|
|
176
|
+
response = await client.post(
|
|
177
|
+
url, json=inference_request.model_dump(), headers=headers
|
|
178
|
+
)
|
|
179
|
+
response_data = response.json()
|
|
180
|
+
response.raise_for_status()
|
|
181
|
+
|
|
182
|
+
task_data = response_data["task"]
|
|
183
|
+
|
|
184
|
+
task = Task.model_validate_json(task_data)
|
|
185
|
+
if task.use_proxy and settings.PROXY_URL is None:
|
|
186
|
+
raise ValueError(
|
|
187
|
+
"PROXY_URL is not set and is required when use_proxy is True"
|
|
188
|
+
)
|
|
189
|
+
task.allocated_at = datetime.now(timezone.utc)
|
|
190
|
+
await task_queue.put(task)
|
|
191
|
+
|
|
192
|
+
return JSONResponse(
|
|
193
|
+
content={
|
|
194
|
+
"success": True,
|
|
195
|
+
"message": "Task has been allocated",
|
|
196
|
+
"task_id": task.task_id,
|
|
197
|
+
},
|
|
198
|
+
status_code=202,
|
|
199
|
+
)
|
|
200
|
+
|
|
201
|
+
except Exception as e:
|
|
202
|
+
error = str(e)
|
|
203
|
+
if response_data is not None:
|
|
204
|
+
error = response_data.get("error", str(e))
|
|
205
|
+
|
|
206
|
+
logger.error(f"❌ Error fetching recordings: {error}")
|
|
207
|
+
return JSONResponse({"success": False, "error": error}, status_code=500)
|
|
208
|
+
|
|
209
|
+
return app
|
|
210
|
+
|
|
211
|
+
|
|
212
|
+
def main():
|
|
213
|
+
"""Main function to run the server."""
|
|
214
|
+
parser = argparse.ArgumentParser(
|
|
215
|
+
description="Dynamic API endpoint generator for Optexity recordings"
|
|
216
|
+
)
|
|
217
|
+
|
|
218
|
+
parser.add_argument(
|
|
219
|
+
"--host",
|
|
220
|
+
type=str,
|
|
221
|
+
default="0.0.0.0",
|
|
222
|
+
help="Host to bind the server to (default: 0.0.0.0)",
|
|
223
|
+
)
|
|
224
|
+
parser.add_argument(
|
|
225
|
+
"--port",
|
|
226
|
+
type=int,
|
|
227
|
+
help="Port to run the server ",
|
|
228
|
+
)
|
|
229
|
+
parser.add_argument(
|
|
230
|
+
"--child_process_id",
|
|
231
|
+
type=int,
|
|
232
|
+
help="Child process ID",
|
|
233
|
+
)
|
|
234
|
+
parser.add_argument(
|
|
235
|
+
"--is_aws",
|
|
236
|
+
action="store_true",
|
|
237
|
+
help="Is child process",
|
|
238
|
+
default=False,
|
|
239
|
+
)
|
|
240
|
+
|
|
241
|
+
args = parser.parse_args()
|
|
242
|
+
|
|
243
|
+
app = get_app_with_endpoints(is_aws=args.is_aws, child_id=args.child_process_id)
|
|
244
|
+
|
|
245
|
+
# Start the server (this is blocking and manages its own event loop)
|
|
246
|
+
logger.info(f"Starting server on {args.host}:{args.port}")
|
|
247
|
+
run(app, host=args.host, port=args.port)
|
|
248
|
+
|
|
249
|
+
|
|
250
|
+
if __name__ == "__main__":
|
|
251
|
+
main()
|
|
File without changes
|
|
File without changes
|