fixtureforge 2.0.0__tar.gz → 2.0.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. fixtureforge-2.0.2/PKG-INFO +427 -0
  2. fixtureforge-2.0.2/README.md +392 -0
  3. {fixtureforge-2.0.0 → fixtureforge-2.0.2}/pyproject.toml +78 -78
  4. {fixtureforge-2.0.0 → fixtureforge-2.0.2}/src/fixtureforge/__init__.py +15 -4
  5. {fixtureforge-2.0.0 → fixtureforge-2.0.2}/src/fixtureforge/core/batch_engine.py +10 -1
  6. {fixtureforge-2.0.0 → fixtureforge-2.0.2}/src/fixtureforge/core/generator.py +22 -4
  7. fixtureforge-2.0.0/PKG-INFO +0 -50
  8. fixtureforge-2.0.0/README.md +0 -16
  9. {fixtureforge-2.0.0 → fixtureforge-2.0.2}/LICENSE +0 -0
  10. {fixtureforge-2.0.0 → fixtureforge-2.0.2}/src/fixtureforge/ai/__init__.py +0 -0
  11. {fixtureforge-2.0.0 → fixtureforge-2.0.2}/src/fixtureforge/ai/cache.py +0 -0
  12. {fixtureforge-2.0.0 → fixtureforge-2.0.2}/src/fixtureforge/ai/engine.py +0 -0
  13. {fixtureforge-2.0.0 → fixtureforge-2.0.2}/src/fixtureforge/ai/prompts.py +0 -0
  14. {fixtureforge-2.0.0 → fixtureforge-2.0.2}/src/fixtureforge/cli/__init__.py +0 -0
  15. {fixtureforge-2.0.0 → fixtureforge-2.0.2}/src/fixtureforge/cli/commands.py +0 -0
  16. {fixtureforge-2.0.0 → fixtureforge-2.0.2}/src/fixtureforge/config/__init__.py +0 -0
  17. {fixtureforge-2.0.0 → fixtureforge-2.0.2}/src/fixtureforge/config/flags.py +0 -0
  18. {fixtureforge-2.0.0 → fixtureforge-2.0.2}/src/fixtureforge/core/__init__.py +0 -0
  19. {fixtureforge-2.0.0 → fixtureforge-2.0.2}/src/fixtureforge/core/analyzer.py +0 -0
  20. {fixtureforge-2.0.0 → fixtureforge-2.0.2}/src/fixtureforge/core/compression.py +0 -0
  21. {fixtureforge-2.0.0 → fixtureforge-2.0.2}/src/fixtureforge/core/dataset.py +0 -0
  22. {fixtureforge-2.0.0 → fixtureforge-2.0.2}/src/fixtureforge/core/exporter.py +0 -0
  23. {fixtureforge-2.0.0 → fixtureforge-2.0.2}/src/fixtureforge/core/parser.py +0 -0
  24. {fixtureforge-2.0.0 → fixtureforge-2.0.2}/src/fixtureforge/core/recipe.py +0 -0
  25. {fixtureforge-2.0.0 → fixtureforge-2.0.2}/src/fixtureforge/core/router.py +0 -0
  26. {fixtureforge-2.0.0 → fixtureforge-2.0.2}/src/fixtureforge/core/streamer.py +0 -0
  27. {fixtureforge-2.0.0 → fixtureforge-2.0.2}/src/fixtureforge/core/swarm.py +0 -0
  28. {fixtureforge-2.0.0 → fixtureforge-2.0.2}/src/fixtureforge/integrations/__init__.py +0 -0
  29. {fixtureforge-2.0.0 → fixtureforge-2.0.2}/src/fixtureforge/integrations/github.py +0 -0
  30. {fixtureforge-2.0.0 → fixtureforge-2.0.2}/src/fixtureforge/integrations/jira.py +0 -0
  31. {fixtureforge-2.0.0 → fixtureforge-2.0.2}/src/fixtureforge/library/__init__.py +0 -0
  32. {fixtureforge-2.0.0 → fixtureforge-2.0.2}/src/fixtureforge/library/sharing.py +0 -0
  33. {fixtureforge-2.0.0 → fixtureforge-2.0.2}/src/fixtureforge/library/storage.py +0 -0
  34. {fixtureforge-2.0.0 → fixtureforge-2.0.2}/src/fixtureforge/memory/__init__.py +0 -0
  35. {fixtureforge-2.0.0 → fixtureforge-2.0.2}/src/fixtureforge/memory/dream.py +0 -0
  36. {fixtureforge-2.0.0 → fixtureforge-2.0.2}/src/fixtureforge/memory/store.py +0 -0
  37. {fixtureforge-2.0.0 → fixtureforge-2.0.2}/src/fixtureforge/providers/__init__.py +0 -0
  38. {fixtureforge-2.0.0 → fixtureforge-2.0.2}/src/fixtureforge/providers/anthropic.py +0 -0
  39. {fixtureforge-2.0.0 → fixtureforge-2.0.2}/src/fixtureforge/providers/base.py +0 -0
  40. {fixtureforge-2.0.0 → fixtureforge-2.0.2}/src/fixtureforge/providers/factory.py +0 -0
  41. {fixtureforge-2.0.0 → fixtureforge-2.0.2}/src/fixtureforge/providers/gemini.py +0 -0
  42. {fixtureforge-2.0.0 → fixtureforge-2.0.2}/src/fixtureforge/providers/groq.py +0 -0
  43. {fixtureforge-2.0.0 → fixtureforge-2.0.2}/src/fixtureforge/providers/ollama.py +0 -0
  44. {fixtureforge-2.0.0 → fixtureforge-2.0.2}/src/fixtureforge/providers/openai.py +0 -0
  45. {fixtureforge-2.0.0 → fixtureforge-2.0.2}/src/fixtureforge/pyproject.toml +0 -0
  46. {fixtureforge-2.0.0 → fixtureforge-2.0.2}/src/fixtureforge/security/__init__.py +0 -0
  47. {fixtureforge-2.0.0 → fixtureforge-2.0.2}/src/fixtureforge/security/permissions.py +0 -0
@@ -0,0 +1,427 @@
1
+ Metadata-Version: 2.4
2
+ Name: fixtureforge
3
+ Version: 2.0.2
4
+ Summary: Agentic Test Data Harness: memory, multi-agent swarms, permission gates, coverage analysis. Provider-agnostic (Gemini, OpenAI, Anthropic, Ollama).
5
+ License: MIT
6
+ License-File: LICENSE
7
+ Keywords: testing,fixtures,test-data,qa,automation,synthetic-data,llm
8
+ Author: Yaniv Metuku
9
+ Requires-Python: >=3.11,<4.0
10
+ Classifier: License :: OSI Approved :: MIT License
11
+ Classifier: Programming Language :: Python :: 3
12
+ Classifier: Programming Language :: Python :: 3.11
13
+ Classifier: Programming Language :: Python :: 3.12
14
+ Classifier: Programming Language :: Python :: 3.13
15
+ Classifier: Programming Language :: Python :: 3.14
16
+ Provides-Extra: all
17
+ Provides-Extra: anthropic
18
+ Provides-Extra: gemini
19
+ Provides-Extra: openai
20
+ Provides-Extra: sql
21
+ Requires-Dist: anthropic (>=0.18.0,<0.19.0) ; extra == "anthropic" or extra == "all"
22
+ Requires-Dist: click (>=8.1.0,<9.0.0)
23
+ Requires-Dist: faker (>=22.0.0,<23.0.0)
24
+ Requires-Dist: google-genai (>=1.0.0,<2.0.0) ; extra == "gemini" or extra == "all"
25
+ Requires-Dist: openai (>=1.0.0,<2.0.0) ; extra == "openai" or extra == "all"
26
+ Requires-Dist: pydantic (>=2.5.0,<3.0.0)
27
+ Requires-Dist: pyyaml (>=6.0,<7.0)
28
+ Requires-Dist: requests (>=2.31.0,<3.0.0)
29
+ Requires-Dist: rich (>=13.7.0,<14.0.0)
30
+ Requires-Dist: sqlalchemy (>=2.0.0,<3.0.0) ; extra == "sql" or extra == "all"
31
+ Project-URL: Homepage, https://fixtureforge.dev
32
+ Project-URL: Repository, https://github.com/Yaniv2809/fixtureforge
33
+ Description-Content-Type: text/markdown
34
+
35
+ # FixtureForge
36
+
37
+ **Agentic Test Data Harness for Python.**
38
+ Generate realistic, context-aware fixtures — deterministic in CI, AI-powered in development.
39
+
40
+ [![PyPI version](https://img.shields.io/pypi/v/fixtureforge.svg)](https://pypi.org/project/fixtureforge/)
41
+ [![Python 3.11+](https://img.shields.io/badge/python-3.11+-blue.svg)](https://www.python.org/downloads/)
42
+ [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
43
+
44
+ ---
45
+
46
+ ## The Problem
47
+
48
+ ```python
49
+ # This is what most test data looks like:
50
+ user = User(name="Test User", email="test@test.com", bio="Lorem ipsum...")
51
+
52
+ # It doesn't catch real-world edge cases.
53
+ # It doesn't feel like production data.
54
+ # And writing 500 of them by hand? Not happening.
55
+ ```
56
+
57
+ FixtureForge solves this in two modes:
58
+
59
+ ```python
60
+ # CI mode — deterministic, zero AI, seed-controlled. Same seed = same data. Always.
61
+ forge = Forge(use_ai=False, seed=42)
62
+ users = forge.create_batch(User, count=500)
63
+
64
+ # Dev mode — AI-generated, context-aware, realistic
65
+ forge = Forge()
66
+ reviews = forge.create_batch(Review, count=50, context="angry holiday customers")
67
+ ```
68
+
69
+ ---
70
+
71
+ ## Installation
72
+
73
+ ```bash
74
+ pip install fixtureforge
75
+ ```
76
+
77
+ With your preferred AI provider:
78
+
79
+ ```bash
80
+ pip install "fixtureforge[anthropic]" # Claude
81
+ pip install "fixtureforge[openai]" # GPT
82
+ pip install "fixtureforge[gemini]" # Google Gemini
83
+ pip install "fixtureforge[all]" # All providers
84
+ ```
85
+
86
+ ---
87
+
88
+ ## Quick Start
89
+
90
+ ```python
91
+ from fixtureforge import Forge
92
+ from pydantic import BaseModel
93
+
94
+ class User(BaseModel):
95
+ id: int
96
+ name: str
97
+ email: str
98
+ bio: str
99
+
100
+ forge = Forge() # auto-detects provider from env vars
101
+ users = forge.create_batch(User, count=50, context="SaaS platform users")
102
+ ```
103
+
104
+ That's it. FixtureForge:
105
+ - Assigns sequential IDs automatically
106
+ - Generates `name` and `email` with Faker (zero API cost)
107
+ - Sends only `bio` to the AI — in a single batch call for all 50 records
108
+
109
+ ---
110
+
111
+ ## Core Concepts
112
+
113
+ ### Intelligent Field Routing
114
+
115
+ Every field is classified into a tier. Only semantic fields hit the AI:
116
+
117
+ | Tier | Fields | Generator | Cost |
118
+ |------|--------|-----------|------|
119
+ | **Structural** | `id`, `user_id`, `order_id` | Internal counters / FK registry | Free |
120
+ | **Standard** | `name`, `email`, `phone`, `address`, `date` | Faker | Free |
121
+ | **Computed** | `@computed_field` properties | Pydantic | Free |
122
+ | **Semantic** | `bio`, `description`, `review`, `message` | LLM (batched) | API tokens |
123
+
124
+ 100 users with 2 semantic fields = **2 API calls**, not 200.
125
+
126
+ ### CI Mode vs Dev Mode
127
+
128
+ ```python
129
+ # CI — fully deterministic, no network, reproducible
130
+ forge = Forge(use_ai=False, seed=42)
131
+
132
+ # Dev — AI-powered, realistic context
133
+ forge = Forge(provider_name="anthropic", model="claude-haiku-4-5-20251001")
134
+
135
+ # Large datasets — seed+interpolation, constant cost regardless of count
136
+ forge.create_large(Order, count=100_000, seed_ratio=0.01) # pays for ~1k, delivers 100k
137
+ ```
138
+
139
+ ### Verbose Mode
140
+
141
+ See exactly where each value comes from:
142
+
143
+ ```python
144
+ forge = Forge(use_ai=False, seed=42, verbose=True)
145
+ user = forge.create(User)
146
+
147
+ # [structural] id = 1
148
+ # [faker] name = 'Allison Hill'
149
+ # [faker] email = 'donaldgarcia@example.net'
150
+ # [ai] bio = 'Passionate developer with 8 years...'
151
+ ```
152
+
153
+ ---
154
+
155
+ ## Providers
156
+
157
+ FixtureForge auto-detects your provider from environment variables:
158
+
159
+ ```bash
160
+ export ANTHROPIC_API_KEY=... # → Claude (default: claude-haiku-4-5-20251001)
161
+ export OPENAI_API_KEY=... # → GPT (default: gpt-4o-mini)
162
+ export GOOGLE_API_KEY=... # → Gemini (default: gemini-2.0-flash)
163
+ export GROQ_API_KEY=... # → Groq (default: llama-3.3-70b-versatile)
164
+ # No key? → Ollama (localhost:11434) → Deterministic-only
165
+ ```
166
+
167
+ Or be explicit:
168
+
169
+ ```python
170
+ forge = Forge(provider_name="anthropic", model="claude-sonnet-4-6")
171
+ forge = Forge(provider_name="ollama", model="llama3.2")
172
+ forge = Forge(use_ai=False) # zero cost, zero network
173
+ ```
174
+
175
+ ---
176
+
177
+ ## Foreign Key Relationships
178
+
179
+ Register parent records first — child FKs resolve automatically:
180
+
181
+ ```python
182
+ # Step 1: generate customers
183
+ customers = forge.create_batch(Customer, count=10)
184
+
185
+ # Step 2: orders automatically reference real customer IDs
186
+ orders = forge.create_batch(Order, count=100)
187
+ # order.customer_id → always a valid customer.id
188
+ ```
189
+
190
+ ---
191
+
192
+ ## DataSwarms — Parallel Multi-Model Generation
193
+
194
+ Generate multiple models in parallel with shared AI cache.
195
+ The first model warms the cache; every subsequent model inherits it (~90% cheaper per model).
196
+
197
+ ```python
198
+ results = forge.swarm(
199
+ models=[User, Order, Product, Payment],
200
+ counts=[10, 50, 100, 30],
201
+ contexts=["SaaS users", "E-commerce orders", None, None],
202
+ )
203
+
204
+ # returns:
205
+ # {
206
+ # "User": [...10 users...],
207
+ # "Order": [...50 orders...],
208
+ # "Product": [...100 products...],
209
+ # "Payment": [...30 payments...],
210
+ # }
211
+ ```
212
+
213
+ 5 models ≈ cost of 1.5 models.
214
+
215
+ ---
216
+
217
+ ## Permission Gates
218
+
219
+ FixtureForge classifies models by data sensitivity and gates dangerous operations:
220
+
221
+ ```python
222
+ class SafeUser(BaseModel):
223
+ id: int
224
+ name: str # SAFE — auto-approved
225
+
226
+ class CustomerProfile(BaseModel):
227
+ id: int
228
+ ssn: str # SENSITIVE — requires FORGE_ALLOW_PII=1
229
+ salary: float # SENSITIVE
230
+
231
+ class SecurityTest(BaseModel):
232
+ id: int
233
+ sql_injection: str # DANGEROUS — requires interactive confirmation
234
+ ```
235
+
236
+ ```python
237
+ # PII auto-approved
238
+ forge = Forge(allow_pii=True)
239
+
240
+ # CI/headless — dangerous ops silently rejected
241
+ forge = Forge(interactive=False)
242
+ ```
243
+
244
+ Three levels: `safe` (auto) → `sensitive` (env gate) → `dangerous` (human prompt).
245
+
246
+ ---
247
+
248
+ ## Domain Rules — ForgeMemory
249
+
250
+ Persist business rules that survive across sessions.
251
+ Rules are re-read on every generation call — update a rule, next call respects it immediately.
252
+
253
+ ```python
254
+ forge.memory.add_rule("financial", "Users under 18 get restricted account type")
255
+ forge.memory.add_rule("user", "Israeli phone numbers use format 05x-xxx-xxxx")
256
+ forge.memory.add_rule("orders", "Max 3 active loans per customer at any time")
257
+
258
+ # Rules inject into AI prompts automatically
259
+ users = forge.create_batch(User, count=50, context="Israeli SaaS platform")
260
+ ```
261
+
262
+ **Skeptical Memory** — rules are hints, not truth. FixtureForge validates stored rules against the live schema before every generation call.
263
+
264
+ **Progressive Forgetting** — field names and types are never stored (re-derivable from the model). Only business rules that exist nowhere else in the code are kept.
265
+
266
+ ---
267
+
268
+ ## ForgeDream — Coverage Analysis
269
+
270
+ Find gaps in your test-data coverage automatically:
271
+
272
+ ```python
273
+ import os
274
+ os.environ["FORGE_FLAG_DREAM"] = "1"
275
+
276
+ report = forge.dream(models=[User, Order], force=True)
277
+ print(report.summary())
278
+
279
+ # ForgeDream Report - 2026-04-08
280
+ # Coverage gaps found : 3
281
+ # Rule conflicts found : 0
282
+ # Top gaps:
283
+ # [User.age] no_boundary : No boundary-value rules for numeric field 'age'
284
+ # [User.email] no_invalid : No invalid-data rules for well-known field 'email'
285
+ # [Order.total] no_boundary: No boundary-value rules for numeric field 'total'
286
+ ```
287
+
288
+ Four phases: **Orient** (read index) → **Gather** (find gaps) → **Consolidate** (merge rules) → **Prune** (trim to ≤200 lines).
289
+
290
+ Report saved as `.forge/coverage_gaps.json`.
291
+
292
+ ---
293
+
294
+ ## Streaming — Memory-Safe Large Datasets
295
+
296
+ ```python
297
+ # Lazy evaluation — writes to disk one record at a time
298
+ for user in forge.create_stream(User, count=1_000_000, filename="users.json"):
299
+ pass # process one record, never loads all into memory
300
+ ```
301
+
302
+ Supports `.json`, `.csv`, `.sql` output formats.
303
+
304
+ ---
305
+
306
+ ## Export
307
+
308
+ ```python
309
+ from fixtureforge.core.exporter import DataExporter
310
+
311
+ users = forge.create_batch(User, count=100)
312
+ DataExporter.to_json(users, "users.json")
313
+ DataExporter.to_csv(users, "users.csv")
314
+ DataExporter.to_sql(users, "users.sql", table_name="users")
315
+ ```
316
+
317
+ ---
318
+
319
+ ## Response Cache
320
+
321
+ AI responses are cached locally for 7 days. Identical requests cost nothing after the first call.
322
+
323
+ ```python
324
+ forge = Forge(use_cache=True) # default — saves to ~/.fixtureforge/cache/
325
+ forge = Forge(use_cache=False) # disable caching
326
+ ```
327
+
328
+ ---
329
+
330
+ ## Feature Flags
331
+
332
+ ```python
333
+ from fixtureforge.config import is_enabled, flag_summary
334
+
335
+ flag_summary()
336
+ # {
337
+ # 'FORGE_SWARMS': True, # shipped
338
+ # 'FORGE_PERMISSIONS': True, # shipped
339
+ # 'FORGE_COMPRESSION': True, # shipped
340
+ # 'FORGE_MCP': True, # shipped
341
+ # 'FORGE_DREAM': False, # enable with FORGE_FLAG_DREAM=1
342
+ # 'FORGE_KAIROS': False, # coming in v2.x
343
+ # 'FORGE_ULTRAPLAN': False, # coming in v2.x
344
+ # }
345
+ ```
346
+
347
+ Enable any staged feature with an env var:
348
+
349
+ ```bash
350
+ FORGE_FLAG_DREAM=1 python run_tests.py
351
+ ```
352
+
353
+ ---
354
+
355
+ ## Stats & Diagnostics
356
+
357
+ ```python
358
+ forge.stats()
359
+ # {
360
+ # "registry": {"user": 50, "order": 200},
361
+ # "session_tokens": 1240,
362
+ # "memory": {"topics": 3, "total_kb": 2.4},
363
+ # "flags": {"FORGE_SWARMS": True, "FORGE_PERMISSIONS": True}
364
+ # }
365
+
366
+ forge.clear_registry() # reset FK registry between independent test scenarios
367
+ ```
368
+
369
+ ---
370
+
371
+ ## Architecture
372
+
373
+ ```
374
+ FixtureForge v2.0
375
+ ├── Config Layer feature flags, env-var overrides
376
+ ├── Security Layer safe / sensitive / dangerous gates, mailbox pattern
377
+ ├── Memory Layer FORGE.md pointer index, on-demand topic files
378
+ ├── Generation Layer IntelligentRouter, SmartBatchEngine, DataSwarms
379
+ ├── Compression Layer Micro → Auto → Full (three-layer pipeline)
380
+ ├── Export Layer JSON / CSV / SQL / streaming
381
+ └── Background Layer ForgeDream coverage analysis (feature-flagged)
382
+ ```
383
+
384
+ **Provider-agnostic**: Claude, GPT, Gemini, Groq, Ollama, or no AI at all.
385
+ **Pydantic v2 native**: full support for `@computed_field`, validators, and constrained types.
386
+ **CI-safe**: `seed=` parameter guarantees identical output across runs.
387
+
388
+ ---
389
+
390
+ ## Comparison
391
+
392
+ | | FixtureForge | factory_boy | faker | hypothesis |
393
+ |---|---|---|---|---|
394
+ | AI-generated context | Yes | No | No | No |
395
+ | Deterministic (seed=) | Yes | Yes | Yes | Yes |
396
+ | FK relationships | Auto | Manual | No | No |
397
+ | Coverage analysis | Yes | No | No | Partial |
398
+ | CI-safe mode | Yes | Yes | Yes | Yes |
399
+ | Large datasets | Yes (100k+) | Manual | Manual | No |
400
+ | Permission gates | Yes | No | No | No |
401
+
402
+ FixtureForge is not a replacement for `faker` — it uses `faker` internally. It's not a replacement for `hypothesis` — it solves a different problem. It adds the layer between "I need realistic data" and "I need it to feel like production".
403
+
404
+ ---
405
+
406
+ ## Requirements
407
+
408
+ - Python 3.11+
409
+ - pydantic >= 2.5
410
+ - faker >= 22.0
411
+
412
+ AI providers are optional extras — the core works with zero dependencies beyond pydantic and faker.
413
+
414
+ ---
415
+
416
+ ## License
417
+
418
+ MIT — see [LICENSE](LICENSE).
419
+
420
+ ---
421
+
422
+ ## Links
423
+
424
+ - **PyPI**: https://pypi.org/project/fixtureforge/
425
+ - **Repository**: https://github.com/Yaniv2809/fixtureforge
426
+ - **Issues**: https://github.com/Yaniv2809/fixtureforge/issues
427
+
@@ -0,0 +1,392 @@
1
+ # FixtureForge
2
+
3
+ **Agentic Test Data Harness for Python.**
4
+ Generate realistic, context-aware fixtures — deterministic in CI, AI-powered in development.
5
+
6
+ [![PyPI version](https://img.shields.io/pypi/v/fixtureforge.svg)](https://pypi.org/project/fixtureforge/)
7
+ [![Python 3.11+](https://img.shields.io/badge/python-3.11+-blue.svg)](https://www.python.org/downloads/)
8
+ [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
9
+
10
+ ---
11
+
12
+ ## The Problem
13
+
14
+ ```python
15
+ # This is what most test data looks like:
16
+ user = User(name="Test User", email="test@test.com", bio="Lorem ipsum...")
17
+
18
+ # It doesn't catch real-world edge cases.
19
+ # It doesn't feel like production data.
20
+ # And writing 500 of them by hand? Not happening.
21
+ ```
22
+
23
+ FixtureForge solves this in two modes:
24
+
25
+ ```python
26
+ # CI mode — deterministic, zero AI, seed-controlled. Same seed = same data. Always.
27
+ forge = Forge(use_ai=False, seed=42)
28
+ users = forge.create_batch(User, count=500)
29
+
30
+ # Dev mode — AI-generated, context-aware, realistic
31
+ forge = Forge()
32
+ reviews = forge.create_batch(Review, count=50, context="angry holiday customers")
33
+ ```
34
+
35
+ ---
36
+
37
+ ## Installation
38
+
39
+ ```bash
40
+ pip install fixtureforge
41
+ ```
42
+
43
+ With your preferred AI provider:
44
+
45
+ ```bash
46
+ pip install "fixtureforge[anthropic]" # Claude
47
+ pip install "fixtureforge[openai]" # GPT
48
+ pip install "fixtureforge[gemini]" # Google Gemini
49
+ pip install "fixtureforge[all]" # All providers
50
+ ```
51
+
52
+ ---
53
+
54
+ ## Quick Start
55
+
56
+ ```python
57
+ from fixtureforge import Forge
58
+ from pydantic import BaseModel
59
+
60
+ class User(BaseModel):
61
+ id: int
62
+ name: str
63
+ email: str
64
+ bio: str
65
+
66
+ forge = Forge() # auto-detects provider from env vars
67
+ users = forge.create_batch(User, count=50, context="SaaS platform users")
68
+ ```
69
+
70
+ That's it. FixtureForge:
71
+ - Assigns sequential IDs automatically
72
+ - Generates `name` and `email` with Faker (zero API cost)
73
+ - Sends only `bio` to the AI — in a single batch call for all 50 records
74
+
75
+ ---
76
+
77
+ ## Core Concepts
78
+
79
+ ### Intelligent Field Routing
80
+
81
+ Every field is classified into a tier. Only semantic fields hit the AI:
82
+
83
+ | Tier | Fields | Generator | Cost |
84
+ |------|--------|-----------|------|
85
+ | **Structural** | `id`, `user_id`, `order_id` | Internal counters / FK registry | Free |
86
+ | **Standard** | `name`, `email`, `phone`, `address`, `date` | Faker | Free |
87
+ | **Computed** | `@computed_field` properties | Pydantic | Free |
88
+ | **Semantic** | `bio`, `description`, `review`, `message` | LLM (batched) | API tokens |
89
+
90
+ 100 users with 2 semantic fields = **2 API calls**, not 200.
91
+
92
+ ### CI Mode vs Dev Mode
93
+
94
+ ```python
95
+ # CI — fully deterministic, no network, reproducible
96
+ forge = Forge(use_ai=False, seed=42)
97
+
98
+ # Dev — AI-powered, realistic context
99
+ forge = Forge(provider_name="anthropic", model="claude-haiku-4-5-20251001")
100
+
101
+ # Large datasets — seed+interpolation, constant cost regardless of count
102
+ forge.create_large(Order, count=100_000, seed_ratio=0.01) # pays for ~1k, delivers 100k
103
+ ```
104
+
105
+ ### Verbose Mode
106
+
107
+ See exactly where each value comes from:
108
+
109
+ ```python
110
+ forge = Forge(use_ai=False, seed=42, verbose=True)
111
+ user = forge.create(User)
112
+
113
+ # [structural] id = 1
114
+ # [faker] name = 'Allison Hill'
115
+ # [faker] email = 'donaldgarcia@example.net'
116
+ # [ai] bio = 'Passionate developer with 8 years...'
117
+ ```
118
+
119
+ ---
120
+
121
+ ## Providers
122
+
123
+ FixtureForge auto-detects your provider from environment variables:
124
+
125
+ ```bash
126
+ export ANTHROPIC_API_KEY=... # → Claude (default: claude-haiku-4-5-20251001)
127
+ export OPENAI_API_KEY=... # → GPT (default: gpt-4o-mini)
128
+ export GOOGLE_API_KEY=... # → Gemini (default: gemini-2.0-flash)
129
+ export GROQ_API_KEY=... # → Groq (default: llama-3.3-70b-versatile)
130
+ # No key? → Ollama (localhost:11434) → Deterministic-only
131
+ ```
132
+
133
+ Or be explicit:
134
+
135
+ ```python
136
+ forge = Forge(provider_name="anthropic", model="claude-sonnet-4-6")
137
+ forge = Forge(provider_name="ollama", model="llama3.2")
138
+ forge = Forge(use_ai=False) # zero cost, zero network
139
+ ```
140
+
141
+ ---
142
+
143
+ ## Foreign Key Relationships
144
+
145
+ Register parent records first — child FKs resolve automatically:
146
+
147
+ ```python
148
+ # Step 1: generate customers
149
+ customers = forge.create_batch(Customer, count=10)
150
+
151
+ # Step 2: orders automatically reference real customer IDs
152
+ orders = forge.create_batch(Order, count=100)
153
+ # order.customer_id → always a valid customer.id
154
+ ```
155
+
156
+ ---
157
+
158
+ ## DataSwarms — Parallel Multi-Model Generation
159
+
160
+ Generate multiple models in parallel with shared AI cache.
161
+ The first model warms the cache; every subsequent model inherits it (~90% cheaper per model).
162
+
163
+ ```python
164
+ results = forge.swarm(
165
+ models=[User, Order, Product, Payment],
166
+ counts=[10, 50, 100, 30],
167
+ contexts=["SaaS users", "E-commerce orders", None, None],
168
+ )
169
+
170
+ # returns:
171
+ # {
172
+ # "User": [...10 users...],
173
+ # "Order": [...50 orders...],
174
+ # "Product": [...100 products...],
175
+ # "Payment": [...30 payments...],
176
+ # }
177
+ ```
178
+
179
+ 5 models ≈ cost of 1.5 models.
180
+
181
+ ---
182
+
183
+ ## Permission Gates
184
+
185
+ FixtureForge classifies models by data sensitivity and gates dangerous operations:
186
+
187
+ ```python
188
+ class SafeUser(BaseModel):
189
+ id: int
190
+ name: str # SAFE — auto-approved
191
+
192
+ class CustomerProfile(BaseModel):
193
+ id: int
194
+ ssn: str # SENSITIVE — requires FORGE_ALLOW_PII=1
195
+ salary: float # SENSITIVE
196
+
197
+ class SecurityTest(BaseModel):
198
+ id: int
199
+ sql_injection: str # DANGEROUS — requires interactive confirmation
200
+ ```
201
+
202
+ ```python
203
+ # PII auto-approved
204
+ forge = Forge(allow_pii=True)
205
+
206
+ # CI/headless — dangerous ops silently rejected
207
+ forge = Forge(interactive=False)
208
+ ```
209
+
210
+ Three levels: `safe` (auto) → `sensitive` (env gate) → `dangerous` (human prompt).
211
+
212
+ ---
213
+
214
+ ## Domain Rules — ForgeMemory
215
+
216
+ Persist business rules that survive across sessions.
217
+ Rules are re-read on every generation call — update a rule, next call respects it immediately.
218
+
219
+ ```python
220
+ forge.memory.add_rule("financial", "Users under 18 get restricted account type")
221
+ forge.memory.add_rule("user", "Israeli phone numbers use format 05x-xxx-xxxx")
222
+ forge.memory.add_rule("orders", "Max 3 active loans per customer at any time")
223
+
224
+ # Rules inject into AI prompts automatically
225
+ users = forge.create_batch(User, count=50, context="Israeli SaaS platform")
226
+ ```
227
+
228
+ **Skeptical Memory** — rules are hints, not truth. FixtureForge validates stored rules against the live schema before every generation call.
229
+
230
+ **Progressive Forgetting** — field names and types are never stored (re-derivable from the model). Only business rules that exist nowhere else in the code are kept.
231
+
232
+ ---
233
+
234
+ ## ForgeDream — Coverage Analysis
235
+
236
+ Find gaps in your test-data coverage automatically:
237
+
238
+ ```python
239
+ import os
240
+ os.environ["FORGE_FLAG_DREAM"] = "1"
241
+
242
+ report = forge.dream(models=[User, Order], force=True)
243
+ print(report.summary())
244
+
245
+ # ForgeDream Report - 2026-04-08
246
+ # Coverage gaps found : 3
247
+ # Rule conflicts found : 0
248
+ # Top gaps:
249
+ # [User.age] no_boundary : No boundary-value rules for numeric field 'age'
250
+ # [User.email] no_invalid : No invalid-data rules for well-known field 'email'
251
+ # [Order.total] no_boundary: No boundary-value rules for numeric field 'total'
252
+ ```
253
+
254
+ Four phases: **Orient** (read index) → **Gather** (find gaps) → **Consolidate** (merge rules) → **Prune** (trim to ≤200 lines).
255
+
256
+ Report saved as `.forge/coverage_gaps.json`.
257
+
258
+ ---
259
+
260
+ ## Streaming — Memory-Safe Large Datasets
261
+
262
+ ```python
263
+ # Lazy evaluation — writes to disk one record at a time
264
+ for user in forge.create_stream(User, count=1_000_000, filename="users.json"):
265
+ pass # process one record, never loads all into memory
266
+ ```
267
+
268
+ Supports `.json`, `.csv`, `.sql` output formats.
269
+
270
+ ---
271
+
272
+ ## Export
273
+
274
+ ```python
275
+ from fixtureforge.core.exporter import DataExporter
276
+
277
+ users = forge.create_batch(User, count=100)
278
+ DataExporter.to_json(users, "users.json")
279
+ DataExporter.to_csv(users, "users.csv")
280
+ DataExporter.to_sql(users, "users.sql", table_name="users")
281
+ ```
282
+
283
+ ---
284
+
285
+ ## Response Cache
286
+
287
+ AI responses are cached locally for 7 days. Identical requests cost nothing after the first call.
288
+
289
+ ```python
290
+ forge = Forge(use_cache=True) # default — saves to ~/.fixtureforge/cache/
291
+ forge = Forge(use_cache=False) # disable caching
292
+ ```
293
+
294
+ ---
295
+
296
+ ## Feature Flags
297
+
298
+ ```python
299
+ from fixtureforge.config import is_enabled, flag_summary
300
+
301
+ flag_summary()
302
+ # {
303
+ # 'FORGE_SWARMS': True, # shipped
304
+ # 'FORGE_PERMISSIONS': True, # shipped
305
+ # 'FORGE_COMPRESSION': True, # shipped
306
+ # 'FORGE_MCP': True, # shipped
307
+ # 'FORGE_DREAM': False, # enable with FORGE_FLAG_DREAM=1
308
+ # 'FORGE_KAIROS': False, # coming in v2.x
309
+ # 'FORGE_ULTRAPLAN': False, # coming in v2.x
310
+ # }
311
+ ```
312
+
313
+ Enable any staged feature with an env var:
314
+
315
+ ```bash
316
+ FORGE_FLAG_DREAM=1 python run_tests.py
317
+ ```
318
+
319
+ ---
320
+
321
+ ## Stats & Diagnostics
322
+
323
+ ```python
324
+ forge.stats()
325
+ # {
326
+ # "registry": {"user": 50, "order": 200},
327
+ # "session_tokens": 1240,
328
+ # "memory": {"topics": 3, "total_kb": 2.4},
329
+ # "flags": {"FORGE_SWARMS": True, "FORGE_PERMISSIONS": True}
330
+ # }
331
+
332
+ forge.clear_registry() # reset FK registry between independent test scenarios
333
+ ```
334
+
335
+ ---
336
+
337
+ ## Architecture
338
+
339
+ ```
340
+ FixtureForge v2.0
341
+ ├── Config Layer feature flags, env-var overrides
342
+ ├── Security Layer safe / sensitive / dangerous gates, mailbox pattern
343
+ ├── Memory Layer FORGE.md pointer index, on-demand topic files
344
+ ├── Generation Layer IntelligentRouter, SmartBatchEngine, DataSwarms
345
+ ├── Compression Layer Micro → Auto → Full (three-layer pipeline)
346
+ ├── Export Layer JSON / CSV / SQL / streaming
347
+ └── Background Layer ForgeDream coverage analysis (feature-flagged)
348
+ ```
349
+
350
+ **Provider-agnostic**: Claude, GPT, Gemini, Groq, Ollama, or no AI at all.
351
+ **Pydantic v2 native**: full support for `@computed_field`, validators, and constrained types.
352
+ **CI-safe**: `seed=` parameter guarantees identical output across runs.
353
+
354
+ ---
355
+
356
+ ## Comparison
357
+
358
+ | | FixtureForge | factory_boy | faker | hypothesis |
359
+ |---|---|---|---|---|
360
+ | AI-generated context | Yes | No | No | No |
361
+ | Deterministic (seed=) | Yes | Yes | Yes | Yes |
362
+ | FK relationships | Auto | Manual | No | No |
363
+ | Coverage analysis | Yes | No | No | Partial |
364
+ | CI-safe mode | Yes | Yes | Yes | Yes |
365
+ | Large datasets | Yes (100k+) | Manual | Manual | No |
366
+ | Permission gates | Yes | No | No | No |
367
+
368
+ FixtureForge is not a replacement for `faker` — it uses `faker` internally. It's not a replacement for `hypothesis` — it solves a different problem. It adds the layer between "I need realistic data" and "I need it to feel like production".
369
+
370
+ ---
371
+
372
+ ## Requirements
373
+
374
+ - Python 3.11+
375
+ - pydantic >= 2.5
376
+ - faker >= 22.0
377
+
378
+ AI providers are optional extras — the core works with zero dependencies beyond pydantic and faker.
379
+
380
+ ---
381
+
382
+ ## License
383
+
384
+ MIT — see [LICENSE](LICENSE).
385
+
386
+ ---
387
+
388
+ ## Links
389
+
390
+ - **PyPI**: https://pypi.org/project/fixtureforge/
391
+ - **Repository**: https://github.com/Yaniv2809/fixtureforge
392
+ - **Issues**: https://github.com/Yaniv2809/fixtureforge/issues
@@ -1,78 +1,78 @@
1
- [tool.poetry]
2
- name = "fixtureforge"
3
- version = "2.0.0"
4
- description = "Agentic Test Data Harness: memory, multi-agent swarms, permission gates, coverage analysis. Provider-agnostic (Gemini, OpenAI, Anthropic, Ollama)."
5
- authors = ["Yaniv Metuku"]
6
- license = "MIT"
7
- readme = "README.md"
8
- homepage = "https://fixtureforge.dev"
9
- repository = "https://github.com/Yaniv2809/fixtureforge"
10
- keywords = ["testing", "fixtures", "test-data", "qa", "automation", "synthetic-data", "llm"]
11
-
12
- # ---------------------------------------------------------------------------
13
- # Core dependencies — always installed, no AI required
14
- # ---------------------------------------------------------------------------
15
- [tool.poetry.dependencies]
16
- python = "^3.11"
17
- pydantic = "^2.5.0"
18
- faker = "^22.0.0"
19
- pyyaml = "^6.0"
20
- click = "^8.1.0"
21
- rich = "^13.7.0"
22
- requests = "^2.31.0" # used by OllamaProvider + general HTTP
23
-
24
- # SQLAlchemy is optional but common enough to keep as a soft dependency
25
- sqlalchemy = { version = "^2.0.0", optional = true }
26
-
27
- # ---------------------------------------------------------------------------
28
- # AI provider extras — install only what you need
29
- #
30
- # pip install fixtureforge[gemini] → Google Gemini
31
- # pip install fixtureforge[openai] → OpenAI / Azure OpenAI
32
- # pip install fixtureforge[anthropic] → Anthropic Claude
33
- # pip install fixtureforge[all] → all cloud providers
34
- #
35
- # Ollama (local) needs no extra pip package — just run Ollama locally.
36
- # ---------------------------------------------------------------------------
37
- google-genai = { version = "^1.0.0", optional = true }
38
- openai = { version = "^1.0.0", optional = true }
39
- anthropic = { version = "^0.18.0", optional = true }
40
-
41
- [tool.poetry.extras]
42
- gemini = ["google-genai"]
43
- openai = ["openai"]
44
- anthropic = ["anthropic"]
45
- sql = ["sqlalchemy"]
46
- all = ["google-genai", "openai", "anthropic", "sqlalchemy"]
47
-
48
- # ---------------------------------------------------------------------------
49
- # Development dependencies
50
- # ---------------------------------------------------------------------------
51
- [tool.poetry.group.dev.dependencies]
52
- pytest = "^7.4.0"
53
- pytest-asyncio = "^0.23.0"
54
- pytest-cov = "^4.1.0"
55
- black = "^23.12.0"
56
- ruff = "^0.1.9"
57
- mypy = "^1.8.0"
58
-
59
- # ---------------------------------------------------------------------------
60
- # CLI entry point
61
- # ---------------------------------------------------------------------------
62
- [tool.poetry.scripts]
63
- forge = "fixtureforge.cli.commands:cli"
64
-
65
- [build-system]
66
- requires = ["poetry-core"]
67
- build-backend = "poetry.core.masonry.api"
68
-
69
- # ---------------------------------------------------------------------------
70
- # Ruff (linting) config
71
- # ---------------------------------------------------------------------------
72
- [tool.ruff]
73
- line-length = 100
74
- target-version = "py311"
75
-
76
- [tool.ruff.lint]
77
- select = ["E", "F", "I", "UP"]
78
- ignore = ["E501"]
1
+ [tool.poetry]
2
+ name = "fixtureforge"
3
+ version = "2.0.2"
4
+ description = "Agentic Test Data Harness: memory, multi-agent swarms, permission gates, coverage analysis. Provider-agnostic (Gemini, OpenAI, Anthropic, Ollama)."
5
+ authors = ["Yaniv Metuku"]
6
+ license = "MIT"
7
+ readme = "README.md"
8
+ homepage = "https://fixtureforge.dev"
9
+ repository = "https://github.com/Yaniv2809/fixtureforge"
10
+ keywords = ["testing", "fixtures", "test-data", "qa", "automation", "synthetic-data", "llm"]
11
+
12
+ # ---------------------------------------------------------------------------
13
+ # Core dependencies — always installed, no AI required
14
+ # ---------------------------------------------------------------------------
15
+ [tool.poetry.dependencies]
16
+ python = "^3.11"
17
+ pydantic = "^2.5.0"
18
+ faker = "^22.0.0"
19
+ pyyaml = "^6.0"
20
+ click = "^8.1.0"
21
+ rich = "^13.7.0"
22
+ requests = "^2.31.0" # used by OllamaProvider + general HTTP
23
+
24
+ # SQLAlchemy is optional but common enough to keep as a soft dependency
25
+ sqlalchemy = { version = "^2.0.0", optional = true }
26
+
27
+ # ---------------------------------------------------------------------------
28
+ # AI provider extras — install only what you need
29
+ #
30
+ # pip install fixtureforge[gemini] → Google Gemini
31
+ # pip install fixtureforge[openai] → OpenAI / Azure OpenAI
32
+ # pip install fixtureforge[anthropic] → Anthropic Claude
33
+ # pip install fixtureforge[all] → all cloud providers
34
+ #
35
+ # Ollama (local) needs no extra pip package — just run Ollama locally.
36
+ # ---------------------------------------------------------------------------
37
+ google-genai = { version = "^1.0.0", optional = true }
38
+ openai = { version = "^1.0.0", optional = true }
39
+ anthropic = { version = "^0.18.0", optional = true }
40
+
41
+ [tool.poetry.extras]
42
+ gemini = ["google-genai"]
43
+ openai = ["openai"]
44
+ anthropic = ["anthropic"]
45
+ sql = ["sqlalchemy"]
46
+ all = ["google-genai", "openai", "anthropic", "sqlalchemy"]
47
+
48
+ # ---------------------------------------------------------------------------
49
+ # Development dependencies
50
+ # ---------------------------------------------------------------------------
51
+ [tool.poetry.group.dev.dependencies]
52
+ pytest = "^7.4.0"
53
+ pytest-asyncio = "^0.23.0"
54
+ pytest-cov = "^4.1.0"
55
+ black = "^23.12.0"
56
+ ruff = "^0.1.9"
57
+ mypy = "^1.8.0"
58
+
59
+ # ---------------------------------------------------------------------------
60
+ # CLI entry point
61
+ # ---------------------------------------------------------------------------
62
+ [tool.poetry.scripts]
63
+ forge = "fixtureforge.cli.commands:cli"
64
+
65
+ [build-system]
66
+ requires = ["poetry-core"]
67
+ build-backend = "poetry.core.masonry.api"
68
+
69
+ # ---------------------------------------------------------------------------
70
+ # Ruff (linting) config
71
+ # ---------------------------------------------------------------------------
72
+ [tool.ruff]
73
+ line-length = 100
74
+ target-version = "py311"
75
+
76
+ [tool.ruff.lint]
77
+ select = ["E", "F", "I", "UP"]
78
+ ignore = ["E501"]
@@ -59,7 +59,7 @@ from .security.permissions import (
59
59
  ForgeCoordinator,
60
60
  )
61
61
 
62
- __version__ = "2.0.0"
62
+ __version__ = "2.0.2"
63
63
 
64
64
  T = TypeVar("T", bound=BaseModel)
65
65
 
@@ -103,6 +103,8 @@ class Forge:
103
103
  use_ai: bool = True,
104
104
  use_cache: bool = True,
105
105
  locale: str = "en_US",
106
+ seed: Optional[int] = None,
107
+ verbose: bool = False,
106
108
  allow_pii: Optional[bool] = None,
107
109
  interactive: bool = True,
108
110
  memory_dir: Optional[Path] = None,
@@ -124,15 +126,22 @@ class Forge:
124
126
  **provider_kwargs,
125
127
  )
126
128
  except Exception as exc:
127
- print(f"⚠️ Could not initialise AI provider: {exc}")
129
+ print(f"Warning: Could not initialise AI provider: {exc}")
128
130
  print(" Running in deterministic-only mode.")
129
131
  resolved_provider = None
130
132
 
131
133
  self._provider = resolved_provider
134
+ self._seed = seed
135
+ self._verbose = verbose
132
136
 
133
137
  # ── Core generation stack ────────────────────────────────────────
134
138
  self.ai_engine = AIEngine(provider=resolved_provider, use_cache=use_cache)
135
- self.generator = BasicGenerator(locale=locale, ai_engine=self.ai_engine)
139
+ self.generator = BasicGenerator(
140
+ locale=locale,
141
+ ai_engine=self.ai_engine,
142
+ seed=seed,
143
+ verbose=verbose,
144
+ )
136
145
  self.batch_engine = SmartBatchEngine(
137
146
  generator=self.generator, ai_engine=self.ai_engine
138
147
  )
@@ -197,8 +206,10 @@ class Forge:
197
206
  domain_rules = self.memory.get_rules_for_prompt(model_name=model.__name__)
198
207
 
199
208
  for i in range(count):
200
- if count > 1:
209
+ if count > 1 and not self._verbose:
201
210
  print(f" ...generating {i + 1}/{count}...")
211
+ if self._verbose and count > 1:
212
+ print(f"\n --- record {i + 1}/{count} ---")
202
213
  item = self.generator.generate(model, context=context, **overrides)
203
214
  self._register(model, item)
204
215
  results.append(item)
@@ -29,6 +29,10 @@ class SmartBatchEngine:
29
29
  self.ai_engine = ai_engine
30
30
  self.router = IntelligentRouter()
31
31
 
32
+ @property
33
+ def verbose(self) -> bool:
34
+ return self.generator.verbose
35
+
32
36
  def generate_many(
33
37
  self,
34
38
  model: Type,
@@ -51,13 +55,18 @@ class SmartBatchEngine:
51
55
  # Each call returns a list[str] of length `count`.
52
56
  semantic_pools: Dict[str, List[str]] = {}
53
57
  for field in semantic:
54
- print(f" 🧠 Generating {count} values for '{field.name}' via AI...")
58
+ print(f" [ai] Generating {count} values for '{field.name}'...")
55
59
  semantic_pools[field.name] = self.ai_engine.generate_semantic_batch(
56
60
  field_name=field.name,
57
61
  context=context or "",
58
62
  count=count,
59
63
  )
60
64
 
65
+ if self.verbose and structural:
66
+ print(f" [structural] fields: {[f.name for f in structural]}")
67
+ if self.verbose and standard:
68
+ print(f" [faker] fields: {[f.name for f in standard]}")
69
+
61
70
  # --- Step 2: assemble N instances ---
62
71
  results: List[Any] = []
63
72
  for i in range(count):
@@ -26,6 +26,8 @@ class BasicGenerator:
26
26
  self,
27
27
  locale: str = "en_US",
28
28
  ai_engine: Optional[AIEngine] = None,
29
+ seed: Optional[int] = None,
30
+ verbose: bool = False,
29
31
  # Legacy: accept api_key for backwards compatibility
30
32
  api_key: Optional[str] = None,
31
33
  ):
@@ -33,6 +35,11 @@ class BasicGenerator:
33
35
  self.router = IntelligentRouter()
34
36
  self._id_counters: Dict[str, int] = {}
35
37
  self.registry: Dict[str, List[Any]] = {}
38
+ self.verbose = verbose
39
+
40
+ if seed is not None:
41
+ Faker.seed(seed)
42
+ random.seed(seed)
36
43
 
37
44
  if ai_engine is not None:
38
45
  self.ai_engine = ai_engine
@@ -72,15 +79,26 @@ class BasicGenerator:
72
79
  tier = self.router.classify(field)
73
80
 
74
81
  if tier == FieldTier.STRUCTURAL:
75
- return self._generate_structural(field)
82
+ val = self._generate_structural(field)
83
+ if self.verbose:
84
+ print(f" [structural] {field.name} = {val!r}")
85
+ return val
76
86
 
77
87
  if tier == FieldTier.COMPUTED:
78
- return _SKIP # Pydantic computes this automatically
88
+ if self.verbose:
89
+ print(f" [computed] {field.name} = <pydantic>")
90
+ return _SKIP
79
91
 
80
92
  if tier == FieldTier.SEMANTIC:
81
- return self._generate_semantic_content(field, context)
93
+ val = self._generate_semantic_content(field, context)
94
+ if self.verbose:
95
+ print(f" [ai] {field.name} = {str(val)[:80]!r}")
96
+ return val
82
97
 
83
- return self._generate_standard(field)
98
+ val = self._generate_standard(field)
99
+ if self.verbose:
100
+ print(f" [faker] {field.name} = {val!r}")
101
+ return val
84
102
 
85
103
  # ------------------------------------------------------------------
86
104
  # Structural (IDs / FKs)
@@ -1,50 +0,0 @@
1
- Metadata-Version: 2.4
2
- Name: fixtureforge
3
- Version: 2.0.0
4
- Summary: Agentic Test Data Harness: memory, multi-agent swarms, permission gates, coverage analysis. Provider-agnostic (Gemini, OpenAI, Anthropic, Ollama).
5
- License: MIT
6
- License-File: LICENSE
7
- Keywords: testing,fixtures,test-data,qa,automation,synthetic-data,llm
8
- Author: Yaniv Metuku
9
- Requires-Python: >=3.11,<4.0
10
- Classifier: License :: OSI Approved :: MIT License
11
- Classifier: Programming Language :: Python :: 3
12
- Classifier: Programming Language :: Python :: 3.11
13
- Classifier: Programming Language :: Python :: 3.12
14
- Classifier: Programming Language :: Python :: 3.13
15
- Classifier: Programming Language :: Python :: 3.14
16
- Provides-Extra: all
17
- Provides-Extra: anthropic
18
- Provides-Extra: gemini
19
- Provides-Extra: openai
20
- Provides-Extra: sql
21
- Requires-Dist: anthropic (>=0.18.0,<0.19.0) ; extra == "anthropic" or extra == "all"
22
- Requires-Dist: click (>=8.1.0,<9.0.0)
23
- Requires-Dist: faker (>=22.0.0,<23.0.0)
24
- Requires-Dist: google-genai (>=1.0.0,<2.0.0) ; extra == "gemini" or extra == "all"
25
- Requires-Dist: openai (>=1.0.0,<2.0.0) ; extra == "openai" or extra == "all"
26
- Requires-Dist: pydantic (>=2.5.0,<3.0.0)
27
- Requires-Dist: pyyaml (>=6.0,<7.0)
28
- Requires-Dist: requests (>=2.31.0,<3.0.0)
29
- Requires-Dist: rich (>=13.7.0,<14.0.0)
30
- Requires-Dist: sqlalchemy (>=2.0.0,<3.0.0) ; extra == "sql" or extra == "all"
31
- Project-URL: Homepage, https://fixtureforge.dev
32
- Project-URL: Repository, https://github.com/Yaniv2809/fixtureforge
33
- Description-Content-Type: text/markdown
34
-
35
- # FixtureForge 🛠️
36
-
37
- **Generate realistic, AI-powered test data using Google Gemini.**
38
- Stop using "Lorem Ipsum" or "Test User 1". Generate context-aware data for your QA & Development environments.
39
-
40
- ## 🚀 Features
41
-
42
- * **Context-Aware:** Generate "Angry Customers" or "High-Value Orders" using AI.
43
- * **Structured Output:** Exports directly to SQL, JSON, or CSV.
44
- * **Lazy Streaming:** Can generate massive datasets (1GB+) without crashing memory.
45
- * **Smart Relationships:** Automatically links Orders to Customers.
46
-
47
- ## 📦 Installation
48
-
49
- ```bash
50
- pip install fixtureforge
@@ -1,16 +0,0 @@
1
- # FixtureForge 🛠️
2
-
3
- **Generate realistic, AI-powered test data using Google Gemini.**
4
- Stop using "Lorem Ipsum" or "Test User 1". Generate context-aware data for your QA & Development environments.
5
-
6
- ## 🚀 Features
7
-
8
- * **Context-Aware:** Generate "Angry Customers" or "High-Value Orders" using AI.
9
- * **Structured Output:** Exports directly to SQL, JSON, or CSV.
10
- * **Lazy Streaming:** Can generate massive datasets (1GB+) without crashing memory.
11
- * **Smart Relationships:** Automatically links Orders to Customers.
12
-
13
- ## 📦 Installation
14
-
15
- ```bash
16
- pip install fixtureforge
File without changes