cua-agent 0.1.23__tar.gz → 0.1.25__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cua-agent might be problematic. Click here for more details.

Files changed (75) hide show
  1. {cua_agent-0.1.23 → cua_agent-0.1.25}/PKG-INFO +53 -10
  2. {cua_agent-0.1.23 → cua_agent-0.1.25}/README.md +52 -9
  3. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/core/types.py +0 -17
  4. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/providers/omni/loop.py +16 -2
  5. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/ui/gradio/app.py +11 -6
  6. {cua_agent-0.1.23 → cua_agent-0.1.25}/pyproject.toml +3 -3
  7. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/__init__.py +0 -0
  8. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/core/__init__.py +0 -0
  9. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/core/agent.py +0 -0
  10. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/core/base.py +0 -0
  11. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/core/callbacks.py +0 -0
  12. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/core/experiment.py +0 -0
  13. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/core/factory.py +0 -0
  14. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/core/messages.py +0 -0
  15. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/core/provider_config.py +0 -0
  16. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/core/telemetry.py +0 -0
  17. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/core/tools/__init__.py +0 -0
  18. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/core/tools/base.py +0 -0
  19. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/core/tools/bash.py +0 -0
  20. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/core/tools/collection.py +0 -0
  21. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/core/tools/computer.py +0 -0
  22. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/core/tools/edit.py +0 -0
  23. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/core/tools/manager.py +0 -0
  24. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/core/tools.py +0 -0
  25. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/core/visualization.py +0 -0
  26. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/providers/__init__.py +0 -0
  27. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/providers/anthropic/__init__.py +0 -0
  28. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/providers/anthropic/api/client.py +0 -0
  29. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/providers/anthropic/api/logging.py +0 -0
  30. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/providers/anthropic/api_handler.py +0 -0
  31. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/providers/anthropic/callbacks/__init__.py +0 -0
  32. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/providers/anthropic/callbacks/manager.py +0 -0
  33. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/providers/anthropic/loop.py +0 -0
  34. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/providers/anthropic/prompts.py +0 -0
  35. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/providers/anthropic/response_handler.py +0 -0
  36. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/providers/anthropic/tools/__init__.py +0 -0
  37. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/providers/anthropic/tools/base.py +0 -0
  38. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/providers/anthropic/tools/bash.py +0 -0
  39. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/providers/anthropic/tools/collection.py +0 -0
  40. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/providers/anthropic/tools/computer.py +0 -0
  41. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/providers/anthropic/tools/edit.py +0 -0
  42. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/providers/anthropic/tools/manager.py +0 -0
  43. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/providers/anthropic/tools/run.py +0 -0
  44. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/providers/anthropic/types.py +0 -0
  45. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/providers/anthropic/utils.py +0 -0
  46. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/providers/omni/__init__.py +0 -0
  47. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/providers/omni/api_handler.py +0 -0
  48. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/providers/omni/clients/anthropic.py +0 -0
  49. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/providers/omni/clients/base.py +0 -0
  50. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/providers/omni/clients/oaicompat.py +0 -0
  51. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/providers/omni/clients/ollama.py +0 -0
  52. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/providers/omni/clients/openai.py +0 -0
  53. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/providers/omni/clients/utils.py +0 -0
  54. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/providers/omni/image_utils.py +0 -0
  55. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/providers/omni/parser.py +0 -0
  56. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/providers/omni/prompts.py +0 -0
  57. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/providers/omni/tools/__init__.py +0 -0
  58. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/providers/omni/tools/base.py +0 -0
  59. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/providers/omni/tools/bash.py +0 -0
  60. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/providers/omni/tools/computer.py +0 -0
  61. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/providers/omni/tools/manager.py +0 -0
  62. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/providers/omni/utils.py +0 -0
  63. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/providers/openai/__init__.py +0 -0
  64. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/providers/openai/api_handler.py +0 -0
  65. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/providers/openai/loop.py +0 -0
  66. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/providers/openai/response_handler.py +0 -0
  67. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/providers/openai/tools/__init__.py +0 -0
  68. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/providers/openai/tools/base.py +0 -0
  69. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/providers/openai/tools/computer.py +0 -0
  70. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/providers/openai/tools/manager.py +0 -0
  71. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/providers/openai/types.py +0 -0
  72. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/providers/openai/utils.py +0 -0
  73. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/telemetry.py +0 -0
  74. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/ui/__init__.py +0 -0
  75. {cua_agent-0.1.23 → cua_agent-0.1.25}/agent/ui/gradio/__init__.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: cua-agent
3
- Version: 0.1.23
3
+ Version: 0.1.25
4
4
  Summary: CUA (Computer Use) Agent for AI-driven computer interaction
5
5
  Author-Email: TryCua <gh@trycua.com>
6
6
  Requires-Python: <3.13,>=3.10
@@ -151,12 +151,61 @@ pip install "cua-agent[ui]"
151
151
 
152
152
  # Create a simple launcher script
153
153
  ```python
154
+ # launch_ui.py
154
155
  from agent.ui.gradio.app import create_gradio_ui
155
156
 
156
157
  app = create_gradio_ui()
157
158
  app.launch(share=False)
158
159
  ```
159
160
 
161
+ # Run the launcher
162
+ python launch_ui.py
163
+ ```
164
+
165
+ ### Setting up API Keys
166
+
167
+ For the Gradio UI to show available models, you need to set API keys as environment variables:
168
+
169
+ ```bash
170
+ # For OpenAI models
171
+ export OPENAI_API_KEY=your_openai_key_here
172
+
173
+ # For Anthropic models
174
+ export ANTHROPIC_API_KEY=your_anthropic_key_here
175
+
176
+ # Launch with both keys set
177
+ OPENAI_API_KEY=your_key ANTHROPIC_API_KEY=your_key python launch_ui.py
178
+ ```
179
+
180
+ ### Using Local Models
181
+
182
+ You can use local models with the OMNI loop provider by selecting "Custom model..." from the dropdown. The default provider URL is set to `http://localhost:1234/v1` which works with LM Studio.
183
+
184
+ If you're using a different local model server:
185
+ - vLLM: `http://localhost:8000/v1`
186
+ - LocalAI: `http://localhost:8080/v1`
187
+ - Ollama with OpenAI compat API: `http://localhost:11434/v1`
188
+
189
+ To change the URL, modify the `provider_base_url` in your launcher script:
190
+
191
+ ```python
192
+ # In your launcher script
193
+ from agent.ui.gradio.app import create_gradio_ui
194
+ from agent import LLM, LLMProvider
195
+
196
+ # Create a custom model with a specific URL
197
+ custom_model = LLM(
198
+ provider=LLMProvider.OAICOMPAT,
199
+ name="your-model-name",
200
+ provider_base_url="http://localhost:8000/v1" # Change to your server URL
201
+ )
202
+
203
+ app = create_gradio_ui(custom_model=custom_model)
204
+ app.launch()
205
+ ```
206
+
207
+ Without these environment variables, the UI will show "No models available" for the corresponding providers, but you can still use local models with the OMNI loop provider.
208
+
160
209
  The Gradio UI provides:
161
210
  - Selection of different agent loops (OpenAI, Anthropic, OMNI)
162
211
  - Model selection for each provider
@@ -169,14 +218,8 @@ You can also embed the Gradio UI in your own application:
169
218
  # Import directly in your application
170
219
  from agent.ui.gradio.app import create_gradio_ui
171
220
 
172
- # Create the UI with advanced features
173
- demo = create_gradio_ui()
174
- demo.launch()
175
-
176
- # Or for a simpler interface
177
- from agent.ui.gradio import registry
178
- demo = registry(name='cua:gpt-4o')
179
- demo.launch()
221
+ app = create_gradio_ui()
222
+ app.launch()
180
223
  ```
181
224
 
182
225
  ## Agent Loops
@@ -187,7 +230,7 @@ The `cua-agent` package provides three agent loops variations, based on differen
187
230
  |:-----------|:-----------------|:------------|:-------------|
188
231
  | `AgentLoop.OPENAI` | • `computer_use_preview` | Use OpenAI Operator CUA model | Not Required |
189
232
  | `AgentLoop.ANTHROPIC` | • `claude-3-5-sonnet-20240620`<br>• `claude-3-7-sonnet-20250219` | Use Anthropic Computer-Use | Not Required |
190
- | `AgentLoop.OMNI` | • `claude-3-5-sonnet-20240620`<br>• `claude-3-7-sonnet-20250219`<br>• `gpt-4.5-preview`<br>• `gpt-4o`<br>• `gpt-4`<br>• `phi4`<br>• `phi4-mini`<br>• `gemma3`<br>• `...`<br>• `Any Ollama-compatible model` | Use OmniParser for element pixel-detection (SoM) and any VLMs for UI Grounding and Reasoning | OmniParser |
233
+ | `AgentLoop.OMNI` | • `claude-3-5-sonnet-20240620`<br>• `claude-3-7-sonnet-20250219`<br>• `gpt-4.5-preview`<br>• `gpt-4o`<br>• `gpt-4`<br>• `phi4`<br>• `phi4-mini`<br>• `gemma3`<br>• `...`<br>• `Any Ollama or OpenAI-compatible model` | Use OmniParser for element pixel-detection (SoM) and any VLMs for UI Grounding and Reasoning | OmniParser |
191
234
 
192
235
  ## AgentResponse
193
236
  The `AgentResponse` class represents the structured output returned after each agent turn. It contains the agent's response, reasoning, tool usage, and other metadata. The response format aligns with the new [OpenAI Agent SDK specification](https://platform.openai.com/docs/api-reference/responses) for better consistency across different agent loops.
@@ -83,12 +83,61 @@ pip install "cua-agent[ui]"
83
83
 
84
84
  # Create a simple launcher script
85
85
  ```python
86
+ # launch_ui.py
86
87
  from agent.ui.gradio.app import create_gradio_ui
87
88
 
88
89
  app = create_gradio_ui()
89
90
  app.launch(share=False)
90
91
  ```
91
92
 
93
+ # Run the launcher
94
+ python launch_ui.py
95
+ ```
96
+
97
+ ### Setting up API Keys
98
+
99
+ For the Gradio UI to show available models, you need to set API keys as environment variables:
100
+
101
+ ```bash
102
+ # For OpenAI models
103
+ export OPENAI_API_KEY=your_openai_key_here
104
+
105
+ # For Anthropic models
106
+ export ANTHROPIC_API_KEY=your_anthropic_key_here
107
+
108
+ # Launch with both keys set
109
+ OPENAI_API_KEY=your_key ANTHROPIC_API_KEY=your_key python launch_ui.py
110
+ ```
111
+
112
+ ### Using Local Models
113
+
114
+ You can use local models with the OMNI loop provider by selecting "Custom model..." from the dropdown. The default provider URL is set to `http://localhost:1234/v1` which works with LM Studio.
115
+
116
+ If you're using a different local model server:
117
+ - vLLM: `http://localhost:8000/v1`
118
+ - LocalAI: `http://localhost:8080/v1`
119
+ - Ollama with OpenAI compat API: `http://localhost:11434/v1`
120
+
121
+ To change the URL, modify the `provider_base_url` in your launcher script:
122
+
123
+ ```python
124
+ # In your launcher script
125
+ from agent.ui.gradio.app import create_gradio_ui
126
+ from agent import LLM, LLMProvider
127
+
128
+ # Create a custom model with a specific URL
129
+ custom_model = LLM(
130
+ provider=LLMProvider.OAICOMPAT,
131
+ name="your-model-name",
132
+ provider_base_url="http://localhost:8000/v1" # Change to your server URL
133
+ )
134
+
135
+ app = create_gradio_ui(custom_model=custom_model)
136
+ app.launch()
137
+ ```
138
+
139
+ Without these environment variables, the UI will show "No models available" for the corresponding providers, but you can still use local models with the OMNI loop provider.
140
+
92
141
  The Gradio UI provides:
93
142
  - Selection of different agent loops (OpenAI, Anthropic, OMNI)
94
143
  - Model selection for each provider
@@ -101,14 +150,8 @@ You can also embed the Gradio UI in your own application:
101
150
  # Import directly in your application
102
151
  from agent.ui.gradio.app import create_gradio_ui
103
152
 
104
- # Create the UI with advanced features
105
- demo = create_gradio_ui()
106
- demo.launch()
107
-
108
- # Or for a simpler interface
109
- from agent.ui.gradio import registry
110
- demo = registry(name='cua:gpt-4o')
111
- demo.launch()
153
+ app = create_gradio_ui()
154
+ app.launch()
112
155
  ```
113
156
 
114
157
  ## Agent Loops
@@ -119,7 +162,7 @@ The `cua-agent` package provides three agent loops variations, based on differen
119
162
  |:-----------|:-----------------|:------------|:-------------|
120
163
  | `AgentLoop.OPENAI` | • `computer_use_preview` | Use OpenAI Operator CUA model | Not Required |
121
164
  | `AgentLoop.ANTHROPIC` | • `claude-3-5-sonnet-20240620`<br>• `claude-3-7-sonnet-20250219` | Use Anthropic Computer-Use | Not Required |
122
- | `AgentLoop.OMNI` | • `claude-3-5-sonnet-20240620`<br>• `claude-3-7-sonnet-20250219`<br>• `gpt-4.5-preview`<br>• `gpt-4o`<br>• `gpt-4`<br>• `phi4`<br>• `phi4-mini`<br>• `gemma3`<br>• `...`<br>• `Any Ollama-compatible model` | Use OmniParser for element pixel-detection (SoM) and any VLMs for UI Grounding and Reasoning | OmniParser |
165
+ | `AgentLoop.OMNI` | • `claude-3-5-sonnet-20240620`<br>• `claude-3-7-sonnet-20250219`<br>• `gpt-4.5-preview`<br>• `gpt-4o`<br>• `gpt-4`<br>• `phi4`<br>• `phi4-mini`<br>• `gemma3`<br>• `...`<br>• `Any Ollama or OpenAI-compatible model` | Use OmniParser for element pixel-detection (SoM) and any VLMs for UI Grounding and Reasoning | OmniParser |
123
166
 
124
167
  ## AgentResponse
125
168
  The `AgentResponse` class represents the structured output returned after each agent turn. It contains the agent's response, reasoning, tool usage, and other metadata. The response format aligns with the new [OpenAI Agent SDK specification](https://platform.openai.com/docs/api-reference/responses) for better consistency across different agent loops.
@@ -54,23 +54,6 @@ LLMModel = LLM
54
54
  Model = LLM
55
55
 
56
56
 
57
- # Default models for each provider
58
- PROVIDER_TO_DEFAULT_MODEL: Dict[LLMProvider, str] = {
59
- LLMProvider.ANTHROPIC: "claude-3-7-sonnet-20250219",
60
- LLMProvider.OPENAI: "gpt-4o",
61
- LLMProvider.OLLAMA: "gemma3:4b-it-q4_K_M",
62
- LLMProvider.OAICOMPAT: "Qwen2.5-VL-7B-Instruct",
63
- }
64
-
65
- # Environment variable names for each provider
66
- PROVIDER_TO_ENV_VAR: Dict[LLMProvider, str] = {
67
- LLMProvider.ANTHROPIC: "ANTHROPIC_API_KEY",
68
- LLMProvider.OPENAI: "OPENAI_API_KEY",
69
- LLMProvider.OLLAMA: "none",
70
- LLMProvider.OAICOMPAT: "none",
71
- }
72
-
73
-
74
57
  class AgentResponse(TypedDict, total=False):
75
58
  """Agent response format."""
76
59
 
@@ -443,6 +443,8 @@ class OmniLoop(BaseLoop):
443
443
  except (json.JSONDecodeError, IndexError):
444
444
  try:
445
445
  # Look for JSON object pattern
446
+ import re # Local import to ensure availability
447
+
446
448
  json_pattern = r"\{[^}]+\}"
447
449
  json_match = re.search(json_pattern, raw_text)
448
450
  if json_match:
@@ -453,8 +455,20 @@ class OmniLoop(BaseLoop):
453
455
  logger.error(f"No JSON found in content")
454
456
  return True, action_screenshot_saved
455
457
  except json.JSONDecodeError as e:
456
- logger.error(f"Failed to parse JSON from text: {str(e)}")
457
- return True, action_screenshot_saved
458
+ # Try to sanitize the JSON string and retry
459
+ try:
460
+ # Remove or replace invalid control characters
461
+ import re # Local import to ensure availability
462
+
463
+ sanitized_text = re.sub(r"[\x00-\x1F\x7F]", "", raw_text)
464
+ # Try parsing again with sanitized text
465
+ parsed_content = json.loads(sanitized_text)
466
+ logger.info(
467
+ "Successfully parsed JSON after sanitizing control characters"
468
+ )
469
+ except json.JSONDecodeError:
470
+ logger.error(f"Failed to parse JSON from text: {str(e)}")
471
+ return True, action_screenshot_saved
458
472
 
459
473
  # Step 4: Process the parsed content if available
460
474
  if parsed_content:
@@ -271,16 +271,19 @@ def create_agent(
271
271
  api_key = os.environ.get("ANTHROPIC_API_KEY", "")
272
272
 
273
273
  # Create LLM model object with appropriate parameters
274
- provider_base_url = "http://localhost:8000/v1" if use_oaicompat else None
274
+ provider_base_url = "http://localhost:1234/v1" if use_oaicompat else None
275
275
 
276
276
  if use_oaicompat:
277
- # Special handling for OAICOMPAT - use OPENAI provider with custom base URL
278
- print(f"DEBUG - Creating OAICOMPAT agent with model: {model_name}")
277
+ # Special handling for OAICOMPAT - use OAICOMPAT provider with custom base URL
278
+ print(
279
+ f"DEBUG - Creating OAICOMPAT agent with model: {model_name}, URL: {provider_base_url}"
280
+ )
279
281
  llm = LLM(
280
- provider=provider, # Already set to OPENAI
282
+ provider=LLMProvider.OAICOMPAT, # Set to OAICOMPAT instead of using original provider
281
283
  name=model_name,
282
284
  provider_base_url=provider_base_url,
283
285
  )
286
+ print(f"DEBUG - LLM provider is now: {llm.provider}, base URL: {llm.provider_base_url}")
284
287
  # Note: Don't pass use_oaicompat to the agent, as it doesn't accept this parameter
285
288
  elif provider == LLMProvider.OAICOMPAT:
286
289
  # This path is unlikely to be taken with our current approach
@@ -461,8 +464,10 @@ def respond(
461
464
  # Special handling for OAICOMPAT to bypass provider-specific errors
462
465
  # Creates the agent with OPENAI provider but using custom model name and provider base URL
463
466
  is_oaicompat = str(provider) == "oaicompat"
464
- if is_oaicompat:
465
- provider = LLMProvider.OPENAI
467
+
468
+ # Don't override the provider for OAICOMPAT - instead pass it through
469
+ # if is_oaicompat:
470
+ # provider = LLMProvider.OPENAI
466
471
 
467
472
  # Get API key based on provider
468
473
  if provider == LLMProvider.OPENAI:
@@ -6,7 +6,7 @@ build-backend = "pdm.backend"
6
6
 
7
7
  [project]
8
8
  name = "cua-agent"
9
- version = "0.1.23"
9
+ version = "0.1.25"
10
10
  description = "CUA (Computer Use) Agent for AI-driven computer interaction"
11
11
  readme = "README.md"
12
12
  authors = [
@@ -105,7 +105,7 @@ target-version = [
105
105
 
106
106
  [tool.ruff]
107
107
  line-length = 100
108
- target-version = "0.1.23"
108
+ target-version = "0.1.25"
109
109
  select = [
110
110
  "E",
111
111
  "F",
@@ -119,7 +119,7 @@ docstring-code-format = true
119
119
 
120
120
  [tool.mypy]
121
121
  strict = true
122
- python_version = "0.1.23"
122
+ python_version = "0.1.25"
123
123
  ignore_missing_imports = true
124
124
  disallow_untyped_defs = true
125
125
  check_untyped_defs = true
File without changes