python2mobile 1.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. examples/example_ecommerce_app.py +189 -0
  2. examples/example_todo_app.py +159 -0
  3. p2m/__init__.py +31 -0
  4. p2m/cli.py +470 -0
  5. p2m/config.py +205 -0
  6. p2m/core/__init__.py +18 -0
  7. p2m/core/api.py +191 -0
  8. p2m/core/ast_walker.py +171 -0
  9. p2m/core/database.py +192 -0
  10. p2m/core/events.py +56 -0
  11. p2m/core/render_engine.py +597 -0
  12. p2m/core/runtime.py +128 -0
  13. p2m/core/state.py +51 -0
  14. p2m/core/validator.py +284 -0
  15. p2m/devserver/__init__.py +9 -0
  16. p2m/devserver/server.py +84 -0
  17. p2m/i18n/__init__.py +7 -0
  18. p2m/i18n/translator.py +74 -0
  19. p2m/imagine/__init__.py +35 -0
  20. p2m/imagine/agent.py +463 -0
  21. p2m/imagine/legacy.py +217 -0
  22. p2m/llm/__init__.py +20 -0
  23. p2m/llm/anthropic_provider.py +78 -0
  24. p2m/llm/base.py +42 -0
  25. p2m/llm/compatible_provider.py +120 -0
  26. p2m/llm/factory.py +72 -0
  27. p2m/llm/ollama_provider.py +89 -0
  28. p2m/llm/openai_provider.py +79 -0
  29. p2m/testing/__init__.py +41 -0
  30. p2m/ui/__init__.py +43 -0
  31. p2m/ui/components.py +301 -0
  32. python2mobile-1.0.1.dist-info/METADATA +238 -0
  33. python2mobile-1.0.1.dist-info/RECORD +50 -0
  34. python2mobile-1.0.1.dist-info/WHEEL +5 -0
  35. python2mobile-1.0.1.dist-info/entry_points.txt +2 -0
  36. python2mobile-1.0.1.dist-info/top_level.txt +3 -0
  37. tests/test_basic_engine.py +281 -0
  38. tests/test_build_generation.py +603 -0
  39. tests/test_build_test_gate.py +150 -0
  40. tests/test_carousel_modal.py +84 -0
  41. tests/test_config_system.py +272 -0
  42. tests/test_i18n.py +101 -0
  43. tests/test_ifood_app_integration.py +172 -0
  44. tests/test_imagine_cli.py +133 -0
  45. tests/test_imagine_command.py +341 -0
  46. tests/test_llm_providers.py +321 -0
  47. tests/test_new_apps_integration.py +588 -0
  48. tests/test_ollama_functional.py +329 -0
  49. tests/test_real_world_apps.py +228 -0
  50. tests/test_run_integration.py +776 -0
@@ -0,0 +1,321 @@
1
+ """
2
+ Test suite for P2M LLM providers
3
+ """
4
+
5
+ import sys
6
+ import os
7
+ from pathlib import Path
8
+
9
+ # Add project to path
10
+ sys.path.insert(0, str(Path(__file__).parent.parent))
11
+
12
+ from p2m.llm.factory import LLMFactory
13
+ from p2m.llm.base import LLMProvider, LLMResponse
14
+
15
+
16
+ def test_factory_creation():
17
+ """Test LLM factory creation"""
18
+ print("\n๐Ÿงช Test 1: LLM Factory Creation")
19
+
20
+ # Test OpenAI
21
+ try:
22
+ provider = LLMFactory.create(
23
+ provider="openai",
24
+ api_key="test-key",
25
+ model="gpt-4o"
26
+ )
27
+ assert provider is not None
28
+ assert provider.model == "gpt-4o"
29
+ print("โœ… OpenAI provider created")
30
+ except Exception as e:
31
+ print(f"โŒ OpenAI creation failed: {e}")
32
+ return False
33
+
34
+ # Test Anthropic
35
+ try:
36
+ provider = LLMFactory.create(
37
+ provider="anthropic",
38
+ api_key="test-key",
39
+ model="claude-3-opus-20240229"
40
+ )
41
+ assert provider is not None
42
+ assert provider.model == "claude-3-opus-20240229"
43
+ print("โœ… Anthropic provider created")
44
+ except Exception as e:
45
+ print(f"โŒ Anthropic creation failed: {e}")
46
+ return False
47
+
48
+ # Test Ollama
49
+ try:
50
+ provider = LLMFactory.create(
51
+ provider="ollama",
52
+ base_url="http://localhost:11434",
53
+ model="llama2"
54
+ )
55
+ assert provider is not None
56
+ assert provider.model == "llama2"
57
+ print("โœ… Ollama provider created")
58
+ except Exception as e:
59
+ print(f"โŒ Ollama creation failed: {e}")
60
+ return False
61
+
62
+ # Test OpenAI Compatible
63
+ try:
64
+ provider = LLMFactory.create(
65
+ provider="openai-compatible",
66
+ base_url="https://api.example.com/v1",
67
+ api_key="test-key",
68
+ model="custom-model",
69
+ x_api_key="optional-header"
70
+ )
71
+ assert provider is not None
72
+ assert provider.model == "custom-model"
73
+ print("โœ… OpenAI-compatible provider created")
74
+ except Exception as e:
75
+ print(f"โŒ OpenAI-compatible creation failed: {e}")
76
+ return False
77
+
78
+ return True
79
+
80
+
81
+ def test_available_providers():
82
+ """Test available providers list"""
83
+ print("\n๐Ÿงช Test 2: Available Providers")
84
+
85
+ providers = LLMFactory.get_available_providers()
86
+
87
+ assert "openai" in providers
88
+ assert "anthropic" in providers
89
+ assert "ollama" in providers
90
+ assert "openai-compatible" in providers
91
+
92
+ print(f"โœ… Available providers: {', '.join(providers)}")
93
+ return True
94
+
95
+
96
+ def test_openai_provider_config():
97
+ """Test OpenAI provider configuration"""
98
+ print("\n๐Ÿงช Test 3: OpenAI Provider Configuration")
99
+
100
+ try:
101
+ provider = LLMFactory.create(
102
+ provider="openai",
103
+ api_key="sk-test",
104
+ model="gpt-4o"
105
+ )
106
+
107
+ # Should validate config
108
+ assert provider.validate_config() == True
109
+ print("โœ… OpenAI config validation passed")
110
+ return True
111
+ except ValueError as e:
112
+ print(f"โš ๏ธ Expected validation: {e}")
113
+ return True
114
+ except Exception as e:
115
+ print(f"โŒ Unexpected error: {e}")
116
+ return False
117
+
118
+
119
+ def test_anthropic_provider_config():
120
+ """Test Anthropic provider configuration"""
121
+ print("\n๐Ÿงช Test 4: Anthropic Provider Configuration")
122
+
123
+ try:
124
+ provider = LLMFactory.create(
125
+ provider="anthropic",
126
+ api_key="sk-ant-test",
127
+ model="claude-3-opus-20240229"
128
+ )
129
+
130
+ # Should validate config
131
+ assert provider.validate_config() == True
132
+ print("โœ… Anthropic config validation passed")
133
+ return True
134
+ except ValueError as e:
135
+ print(f"โš ๏ธ Expected validation: {e}")
136
+ return True
137
+ except Exception as e:
138
+ print(f"โŒ Unexpected error: {e}")
139
+ return False
140
+
141
+
142
+ def test_compatible_provider_config():
143
+ """Test OpenAI-compatible provider configuration"""
144
+ print("\n๐Ÿงช Test 5: OpenAI-Compatible Provider Configuration")
145
+
146
+ try:
147
+ provider = LLMFactory.create(
148
+ provider="openai-compatible",
149
+ base_url="https://api.example.com/v1",
150
+ api_key="test-key",
151
+ model="custom-model"
152
+ )
153
+
154
+ # Should validate config
155
+ assert provider.validate_config() == True
156
+ print("โœ… Compatible provider config validation passed")
157
+ return True
158
+ except ValueError as e:
159
+ print(f"โš ๏ธ Expected validation: {e}")
160
+ return True
161
+ except Exception as e:
162
+ print(f"โŒ Unexpected error: {e}")
163
+ return False
164
+
165
+
166
+ def test_llm_response():
167
+ """Test LLMResponse object"""
168
+ print("\n๐Ÿงช Test 6: LLMResponse Object")
169
+
170
+ response = LLMResponse(
171
+ content="Test response",
172
+ model="gpt-4o",
173
+ usage={
174
+ "prompt_tokens": 10,
175
+ "completion_tokens": 20,
176
+ "total_tokens": 30,
177
+ }
178
+ )
179
+
180
+ assert response.content == "Test response"
181
+ assert response.model == "gpt-4o"
182
+ assert response.usage["total_tokens"] == 30
183
+ assert str(response) == "Test response"
184
+
185
+ print("โœ… LLMResponse object test passed")
186
+ return True
187
+
188
+
189
+ def test_provider_inheritance():
190
+ """Test provider inheritance"""
191
+ print("\n๐Ÿงช Test 7: Provider Inheritance")
192
+
193
+ provider = LLMFactory.create(
194
+ provider="openai",
195
+ api_key="test-key",
196
+ model="gpt-4o"
197
+ )
198
+
199
+ # Should be instance of LLMProvider
200
+ assert isinstance(provider, LLMProvider)
201
+ print("โœ… Provider inheritance test passed")
202
+ return True
203
+
204
+
205
+ def test_factory_invalid_provider():
206
+ """Test factory with invalid provider"""
207
+ print("\n๐Ÿงช Test 8: Invalid Provider Handling")
208
+
209
+ try:
210
+ provider = LLMFactory.create(
211
+ provider="invalid-provider",
212
+ api_key="test-key",
213
+ model="test-model"
214
+ )
215
+ print("โŒ Should have raised ValueError")
216
+ return False
217
+ except ValueError as e:
218
+ print(f"โœ… Correctly raised ValueError: {e}")
219
+ return True
220
+ except Exception as e:
221
+ print(f"โŒ Unexpected error: {e}")
222
+ return False
223
+
224
+
225
+ def test_compatible_provider_missing_params():
226
+ """Test compatible provider with missing parameters"""
227
+ print("\n๐Ÿงช Test 9: Compatible Provider Missing Parameters")
228
+
229
+ # Missing base_url
230
+ try:
231
+ provider = LLMFactory.create(
232
+ provider="openai-compatible",
233
+ api_key="test-key",
234
+ model="test-model"
235
+ )
236
+ print("โŒ Should have raised ValueError for missing base_url")
237
+ return False
238
+ except ValueError as e:
239
+ print(f"โœ… Correctly raised ValueError: {e}")
240
+
241
+ # Missing api_key
242
+ try:
243
+ provider = LLMFactory.create(
244
+ provider="openai-compatible",
245
+ base_url="https://api.example.com/v1",
246
+ model="test-model"
247
+ )
248
+ print("โŒ Should have raised ValueError for missing api_key")
249
+ return False
250
+ except ValueError as e:
251
+ print(f"โœ… Correctly raised ValueError: {e}")
252
+
253
+ # Missing model
254
+ try:
255
+ provider = LLMFactory.create(
256
+ provider="openai-compatible",
257
+ base_url="https://api.example.com/v1",
258
+ api_key="test-key"
259
+ )
260
+ print("โŒ Should have raised ValueError for missing model")
261
+ return False
262
+ except ValueError as e:
263
+ print(f"โœ… Correctly raised ValueError: {e}")
264
+ return True
265
+
266
+
267
+ def run_all_tests():
268
+ """Run all tests"""
269
+ print("\n" + "="*60)
270
+ print("๐Ÿงช P2M LLM Providers Test Suite")
271
+ print("="*60)
272
+
273
+ tests = [
274
+ test_factory_creation,
275
+ test_available_providers,
276
+ test_openai_provider_config,
277
+ test_anthropic_provider_config,
278
+ test_compatible_provider_config,
279
+ test_llm_response,
280
+ test_provider_inheritance,
281
+ test_factory_invalid_provider,
282
+ test_compatible_provider_missing_params,
283
+ ]
284
+
285
+ results = []
286
+
287
+ for test_func in tests:
288
+ try:
289
+ result = test_func()
290
+ results.append((test_func.__name__, result))
291
+ except Exception as e:
292
+ results.append((test_func.__name__, False))
293
+ print(f"โŒ {test_func.__name__} failed: {e}")
294
+ import traceback
295
+ traceback.print_exc()
296
+
297
+ # Summary
298
+ print("\n" + "="*60)
299
+ print("๐Ÿ“Š Test Summary")
300
+ print("="*60)
301
+
302
+ passed = sum(1 for _, success in results if success)
303
+ total = len(results)
304
+
305
+ for name, success in results:
306
+ status = "โœ… PASS" if success else "โŒ FAIL"
307
+ print(f"{status} - {name}")
308
+
309
+ print(f"\n๐Ÿ“ˆ Results: {passed}/{total} tests passed")
310
+
311
+ if passed == total:
312
+ print("\n๐ŸŽ‰ All tests passed!")
313
+ else:
314
+ print(f"\nโš ๏ธ {total - passed} test(s) failed")
315
+
316
+ return passed == total
317
+
318
+
319
+ if __name__ == "__main__":
320
+ success = run_all_tests()
321
+ sys.exit(0 if success else 1)