plainstamp 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. package/AI-DISCLOSURE.md +39 -0
  2. package/CHANGELOG.md +57 -0
  3. package/LICENSE +21 -0
  4. package/README.md +179 -0
  5. package/dist/cli.d.ts +3 -0
  6. package/dist/cli.d.ts.map +1 -0
  7. package/dist/cli.js +147 -0
  8. package/dist/cli.js.map +1 -0
  9. package/dist/coverage.d.ts +48 -0
  10. package/dist/coverage.d.ts.map +1 -0
  11. package/dist/coverage.js +96 -0
  12. package/dist/coverage.js.map +1 -0
  13. package/dist/index.d.ts +27 -0
  14. package/dist/index.d.ts.map +1 -0
  15. package/dist/index.js +37 -0
  16. package/dist/index.js.map +1 -0
  17. package/dist/lookup.d.ts +42 -0
  18. package/dist/lookup.d.ts.map +1 -0
  19. package/dist/lookup.js +170 -0
  20. package/dist/lookup.js.map +1 -0
  21. package/dist/mcp-server.d.ts +3 -0
  22. package/dist/mcp-server.d.ts.map +1 -0
  23. package/dist/mcp-server.js +199 -0
  24. package/dist/mcp-server.js.map +1 -0
  25. package/dist/rules-loader.d.ts +10 -0
  26. package/dist/rules-loader.d.ts.map +1 -0
  27. package/dist/rules-loader.js +23 -0
  28. package/dist/rules-loader.js.map +1 -0
  29. package/dist/schema.d.ts +526 -0
  30. package/dist/schema.d.ts.map +1 -0
  31. package/dist/schema.js +96 -0
  32. package/dist/schema.js.map +1 -0
  33. package/dist/watcher/cli.d.ts +3 -0
  34. package/dist/watcher/cli.d.ts.map +1 -0
  35. package/dist/watcher/cli.js +47 -0
  36. package/dist/watcher/cli.js.map +1 -0
  37. package/dist/watcher/index.d.ts +23 -0
  38. package/dist/watcher/index.d.ts.map +1 -0
  39. package/dist/watcher/index.js +71 -0
  40. package/dist/watcher/index.js.map +1 -0
  41. package/dist/watcher/sources/federal-register.d.ts +13 -0
  42. package/dist/watcher/sources/federal-register.d.ts.map +1 -0
  43. package/dist/watcher/sources/federal-register.js +44 -0
  44. package/dist/watcher/sources/federal-register.js.map +1 -0
  45. package/dist/watcher/sources/url-monitor.d.ts +33 -0
  46. package/dist/watcher/sources/url-monitor.d.ts.map +1 -0
  47. package/dist/watcher/sources/url-monitor.js +67 -0
  48. package/dist/watcher/sources/url-monitor.js.map +1 -0
  49. package/dist/watcher/state-store.d.ts +9 -0
  50. package/dist/watcher/state-store.d.ts.map +1 -0
  51. package/dist/watcher/state-store.js +23 -0
  52. package/dist/watcher/state-store.js.map +1 -0
  53. package/dist/watcher/types.d.ts +59 -0
  54. package/dist/watcher/types.d.ts.map +1 -0
  55. package/dist/watcher/types.js +14 -0
  56. package/dist/watcher/types.js.map +1 -0
  57. package/package.json +60 -0
  58. package/rules/seed.json +620 -0
@@ -0,0 +1,620 @@
1
+ {
2
+ "schema_version": 1,
3
+ "generated_at": "2026-05-08",
4
+ "rules": [
5
+ {
6
+ "id": "us-ca-bot-disclosure-17941",
7
+ "jurisdiction": "us-ca",
8
+ "channels": ["live-chat", "voice", "video-avatar"],
9
+ "use_cases": [
10
+ "b2c-customer-support",
11
+ "b2c-marketing",
12
+ "b2c-sales",
13
+ "civic-or-electoral"
14
+ ],
15
+ "severity": "mandatory",
16
+ "short_title": "California bot disclosure (B&P § 17941)",
17
+ "summary": "California makes it unlawful for any person to use a bot to communicate or interact with another person in California online with the intent to mislead the other person about its artificial identity for the purpose of knowingly deceiving the person about the content of the communication in order to incentivize a purchase or sale of goods or services in a commercial transaction or to influence a vote in an election. The disclosure must be clear, conspicuous, and reasonably designed to inform persons with whom the bot communicates or interacts that it is a bot.",
18
+ "required_elements": [
19
+ {
20
+ "id": "bot-identity",
21
+ "description": "Clear, conspicuous statement that the communicator is a bot (i.e. not a natural person).",
22
+ "required": true,
23
+ "example": "You are chatting with an automated assistant, not a human."
24
+ },
25
+ {
26
+ "id": "reasonably-designed",
27
+ "description": "Disclosure must be reasonably designed to inform a reasonable person under the circumstances. (Meta-requirement on the design of the disclosure surface, not on its text content; not validated by substring check.)",
28
+ "required": false
29
+ }
30
+ ],
31
+ "citation": {
32
+ "statute": "California Business and Professions Code",
33
+ "section": "§ 17941",
34
+ "source_url": "https://leginfo.legislature.ca.gov/faces/codes_displaySection.xhtml?lawCode=BPC&sectionNum=17941",
35
+ "publisher": "California Legislative Information"
36
+ },
37
+ "effective_date": "2019-07-01",
38
+ "last_verified": "2026-05-08",
39
+ "template": {
40
+ "plain": "You are chatting with an automated AI assistant, not a human. This conversation may be used to help us improve the service.",
41
+ "formal": "Notice: This communication is conducted by an automated bot operated under California Business and Professions Code § 17941. You are not communicating with a natural person."
42
+ },
43
+ "notes": "Statute applies when the bot intends to mislead about artificial identity for commercial or electoral purposes. Best practice for autonomous AI agent operations is to disclose by default on first contact regardless of intent, since intent is hard to demonstrate after the fact. The statute's safe harbor requires the disclosure be clear and conspicuous."
44
+ },
45
+ {
46
+ "id": "eu-ai-act-art50-chatbot",
47
+ "jurisdiction": "eu",
48
+ "channels": ["live-chat", "voice", "video-avatar"],
49
+ "use_cases": [
50
+ "b2c-customer-support",
51
+ "b2b-customer-support",
52
+ "b2c-marketing",
53
+ "b2b-marketing",
54
+ "b2c-sales",
55
+ "b2b-sales",
56
+ "general"
57
+ ],
58
+ "severity": "mandatory",
59
+ "short_title": "EU AI Act Article 50(1) — chatbot disclosure",
60
+ "summary": "Providers of AI systems intended to interact directly with natural persons must design and develop them so that the natural persons concerned are informed that they are interacting with an AI system, unless that fact is obvious from the point of view of a reasonably well-informed person taking into account the circumstances and the context of use.",
61
+ "required_elements": [
62
+ {
63
+ "id": "ai-identity",
64
+ "description": "Inform the natural person that they are interacting with an AI system.",
65
+ "required": true,
66
+ "example": "You are interacting with an AI assistant."
67
+ },
68
+ {
69
+ "id": "design-by-default",
70
+ "description": "Disclosure must be built into the design and development of the system, not bolted on per-deployment. (Meta-requirement on engineering process, not on disclosure text; not validated by substring check.)",
71
+ "required": false
72
+ }
73
+ ],
74
+ "citation": {
75
+ "statute": "Regulation (EU) 2024/1689 (AI Act)",
76
+ "section": "Article 50(1)",
77
+ "source_url": "https://eur-lex.europa.eu/eli/reg/2024/1689/oj",
78
+ "publisher": "Official Journal of the European Union"
79
+ },
80
+ "effective_date": "2026-08-02",
81
+ "last_verified": "2026-05-08",
82
+ "template": {
83
+ "plain": "You are interacting with an AI system. This system is operated by an autonomous AI agent and is not a natural person.",
84
+ "formal": "In compliance with Article 50(1) of Regulation (EU) 2024/1689 (the AI Act), please be informed that this interaction is conducted by an AI system, not a natural person."
85
+ },
86
+ "notes": "Article 50 transparency obligations begin to apply on 2 August 2026 per the AI Act's staggered effective dates. The 'obvious from the point of view of a reasonably well-informed person' carve-out is narrow — autonomous AI ventures should disclose by default on first contact."
87
+ },
88
+ {
89
+ "id": "eu-ai-act-art50-genai-content",
90
+ "jurisdiction": "eu",
91
+ "channels": [
92
+ "ai-generated-content",
93
+ "ai-generated-image",
94
+ "ai-generated-video",
95
+ "ai-generated-audio"
96
+ ],
97
+ "use_cases": [
98
+ "b2c-marketing",
99
+ "b2b-marketing",
100
+ "b2c-sales",
101
+ "b2b-sales",
102
+ "general"
103
+ ],
104
+ "severity": "mandatory",
105
+ "short_title": "EU AI Act Article 50(2) — AI-generated content labeling",
106
+ "summary": "Providers of AI systems, including general-purpose AI systems, generating synthetic audio, image, video or text content, shall ensure that the outputs of the AI system are marked in a machine-readable format and detectable as artificially generated or manipulated. Providers shall ensure their technical solutions are effective, interoperable, robust and reliable as far as this is technically feasible.",
107
+ "required_elements": [
108
+ {
109
+ "id": "machine-readable-mark",
110
+ "description": "AI-generated synthetic content must carry a machine-readable mark identifying it as artificially generated or manipulated.",
111
+ "required": true
112
+ },
113
+ {
114
+ "id": "human-readable-label-where-applicable",
115
+ "description": "For deepfakes and certain public-interest content, a human-readable label is also required (Article 50(4)).",
116
+ "required": true
117
+ }
118
+ ],
119
+ "citation": {
120
+ "statute": "Regulation (EU) 2024/1689 (AI Act)",
121
+ "section": "Article 50(2)",
122
+ "source_url": "https://eur-lex.europa.eu/eli/reg/2024/1689/oj",
123
+ "publisher": "Official Journal of the European Union"
124
+ },
125
+ "effective_date": "2026-08-02",
126
+ "last_verified": "2026-05-08",
127
+ "template": {
128
+ "plain": "This content was generated or manipulated by an AI system.",
129
+ "formal": "Disclosure under Article 50(2) of Regulation (EU) 2024/1689 (the AI Act): the preceding content was produced or manipulated by an AI system. A machine-readable provenance mark is embedded in the underlying file metadata."
130
+ },
131
+ "notes": "The provider obligation runs jointly with deployer obligations under Article 50(4). Implementation of machine-readable marks should follow C2PA Content Credentials or equivalent interoperable standards. UPCOMING AMENDMENT (verified 2026-05-08): On 2026-05-07, the EU Council presidency and European Parliament negotiators reached a provisional agreement on the 'Omnibus VII' AI Act simplification package. The agreement reduces the grace period for providers to implement transparency solutions for artificially generated content from 6 months to 3 months, with the new compliance deadline set on 2026-12-02. The provisional agreement also postpones the deadline for the establishment of AI regulatory sandboxes by national competent authorities to 2027-08-02. Re-verify before final adoption — provisional agreements typically reach formal adoption within weeks to a few months."
132
+ },
133
+ {
134
+ "id": "us-ftc-ai-endorsements-2024",
135
+ "jurisdiction": "us",
136
+ "channels": ["review-or-testimonial", "ai-generated-content"],
137
+ "use_cases": ["b2c-marketing", "b2b-marketing", "b2c-sales", "b2b-sales"],
138
+ "severity": "mandatory",
139
+ "short_title": "FTC rule on fake reviews and testimonials (16 CFR Part 465)",
140
+ "summary": "The FTC's Trade Regulation Rule on the Use of Consumer Reviews and Testimonials prohibits the writing, creation, sale, or purchase of consumer reviews or testimonials that are fake or that misrepresent the reviewer's experience, including reviews generated by generative artificial intelligence that purport to be by a person who does not exist or did not have the experience. Civil penalties may be assessed per violation.",
141
+ "required_elements": [
142
+ {
143
+ "id": "no-fabricated-reviewer",
144
+ "description": "Do not generate, sell, or purchase reviews/testimonials by people who do not exist or did not have the experience described.",
145
+ "required": true
146
+ },
147
+ {
148
+ "id": "disclose-material-connection",
149
+ "description": "Disclose any material connection between the reviewer and the marketer (employee, paid endorser, agent operator, etc.).",
150
+ "required": true
151
+ },
152
+ {
153
+ "id": "no-ai-generated-fake-reviewers",
154
+ "description": "AI-generated reviews must not pretend to be by a person who does not exist or did not have the relevant experience.",
155
+ "required": true
156
+ }
157
+ ],
158
+ "citation": {
159
+ "statute": "16 CFR Part 465",
160
+ "section": "§ 465.2 (fake or false consumer reviews)",
161
+ "source_url": "https://www.ecfr.gov/current/title-16/chapter-I/subchapter-D/part-465",
162
+ "publisher": "U.S. Code of Federal Regulations (eCFR)"
163
+ },
164
+ "effective_date": "2024-10-21",
165
+ "last_verified": "2026-05-08",
166
+ "template": {
167
+ "plain": "This review or testimonial is generated by an AI system. It does not reflect the experience of any specific natural person.",
168
+ "formal": "Pursuant to 16 CFR Part 465, this content is disclosed as AI-generated. It does not represent the views or experience of any natural person and was produced by an automated system; no compensation, fabricated reviewer identity, or undisclosed material connection is involved."
169
+ },
170
+ "notes": "The FTC rule does NOT permit autonomous AI systems to author reviews/testimonials attributed to natural persons. The compliant pattern is either (a) do not generate review-style content at all, or (b) clearly label any AI-generated review-style content as AI-generated and not attributed to any specific natural person."
171
+ },
172
+ {
173
+ "id": "us-ca-genai-watermark-ab1836-aware",
174
+ "jurisdiction": "us-ca",
175
+ "channels": [
176
+ "ai-generated-image",
177
+ "ai-generated-video",
178
+ "ai-generated-audio",
179
+ "ai-generated-content"
180
+ ],
181
+ "use_cases": ["b2c-marketing", "b2b-marketing", "general"],
182
+ "severity": "recommended",
183
+ "short_title": "California AI provenance and labeling (SB 942 / AB 2655 family)",
184
+ "summary": "California has enacted a family of statutes (notably SB 942, the California AI Transparency Act, and AB 2655) requiring covered providers of generative AI systems to make available AI detection tools, embed provenance metadata, and label AI-generated content in election-related and other contexts. Effective dates and scope vary by statute; covered providers include those with sufficiently large user bases.",
185
+ "required_elements": [
186
+ {
187
+ "id": "provenance-metadata",
188
+ "description": "Where covered, embed C2PA-compatible provenance metadata in AI-generated outputs.",
189
+ "required": true
190
+ },
191
+ {
192
+ "id": "user-facing-label",
193
+ "description": "Where covered, surface a clear user-facing AI label when generated content is presented to end users.",
194
+ "required": true
195
+ },
196
+ {
197
+ "id": "free-detector",
198
+ "description": "Covered providers under SB 942 must offer a free AI detection tool.",
199
+ "required": true
200
+ }
201
+ ],
202
+ "citation": {
203
+ "statute": "California SB 942 (Cal. Bus. & Prof. Code §§ 22757–22757.4)",
204
+ "section": "California AI Transparency Act",
205
+ "source_url": "https://leginfo.legislature.ca.gov/faces/billNavClient.xhtml?bill_id=202320240SB942",
206
+ "publisher": "California Legislative Information"
207
+ },
208
+ "effective_date": "2026-01-01",
209
+ "last_verified": "2026-05-08",
210
+ "template": {
211
+ "plain": "This image / video / audio was generated or substantially modified by an AI system. Provenance metadata is embedded in the file.",
212
+ "formal": "Disclosure under California SB 942 (California AI Transparency Act): the preceding media was generated or substantially modified by an AI system; C2PA-compatible provenance metadata is embedded; an AI detection tool is available at the provider's website."
213
+ },
214
+ "notes": "Coverage thresholds and election-specific rules (AB 2655) vary. Smaller AI ventures may not be 'covered providers' under SB 942's definitions, but the labeling pattern is industry best practice and aligns with EU AI Act Art. 50(2) for cross-jurisdictional consistency. Verify covered-provider status before relying on non-coverage."
215
+ },
216
+ {
217
+ "id": "us-co-sb24-205-consumer-disclosure",
218
+ "jurisdiction": "us-co",
219
+ "channels": ["live-chat", "voice", "video-avatar", "email-transactional"],
220
+ "use_cases": [
221
+ "b2c-customer-support",
222
+ "b2c-marketing",
223
+ "b2c-sales",
224
+ "general"
225
+ ],
226
+ "severity": "mandatory",
227
+ "short_title": "Colorado AI Act consumer-interaction disclosure (SB 24-205)",
228
+ "summary": "A person doing business in Colorado, including a deployer or other developer, that deploys or makes available an artificial intelligence system intended to interact with consumers must ensure disclosure to each consumer who interacts with the system that the consumer is interacting with an artificial intelligence system. Additional documentation, impact-assessment, and risk-management obligations apply to deployers of 'high-risk' AI systems making consequential decisions about employment, housing, healthcare, education, financial services, legal services, government services, or essential services.",
229
+ "required_elements": [
230
+ {
231
+ "id": "ai-interaction-notice",
232
+ "description": "Clear notice to the consumer that they are interacting with an artificial intelligence system.",
233
+ "required": true,
234
+ "example": "You are interacting with an artificial intelligence system."
235
+ },
236
+ {
237
+ "id": "high-risk-additional-disclosures",
238
+ "description": "If the AI system is high-risk (consequential decisions), the deployer must additionally explain the nature of the decision, the role of the AI system in it, and any avenues for further information or human review. (Content disclosure, scope-dependent.)",
239
+ "required": false
240
+ }
241
+ ],
242
+ "citation": {
243
+ "statute": "Colorado Revised Statutes Title 6, Article 1, Part 17 (added by SB 24-205)",
244
+ "section": "Consumer Protections for Interactions with AI Systems",
245
+ "source_url": "https://leg.colorado.gov/bills/sb24-205",
246
+ "publisher": "Colorado General Assembly"
247
+ },
248
+ "effective_date": "2026-06-30",
249
+ "last_verified": "2026-05-08",
250
+ "template": {
251
+ "plain": "You are interacting with an artificial intelligence system. This system is operated by an autonomous AI agent and is not a natural person.",
252
+ "formal": "Notice under Colorado SB 24-205 (Consumer Protections for Interactions with Artificial Intelligence Systems): the consumer is hereby informed that this interaction is conducted by an artificial intelligence system."
253
+ },
254
+ "notes": "Effective date was extended from 2026-02-01 to 2026-06-30 by SB 25B-004 (Aug 2025) to give the 2026 Colorado legislative session time to consider amendments. The amendment landscape is fluid — re-verify the effective date and amendment status before relying on this rule for production deployments after late Q1 2026."
255
+ },
256
+ {
257
+ "id": "us-ut-sb149-genai-regulated-occupation",
258
+ "jurisdiction": "us-ut",
259
+ "channels": ["live-chat", "voice", "video-avatar"],
260
+ "use_cases": [
261
+ "healthcare",
262
+ "legal-services",
263
+ "financial-services",
264
+ "b2c-customer-support"
265
+ ],
266
+ "severity": "mandatory",
267
+ "short_title": "Utah AI Policy Act — GenAI disclosure in regulated occupations (SB 149, as amended by SB 226)",
268
+ "summary": "A person providing services in a regulated occupation (one requiring state certification or license) must clearly and conspicuously disclose, at the start of an interaction, that the consumer is interacting with generative artificial intelligence — when the consumer asks, OR when the interaction is 'high-risk.' A high-risk interaction is one that involves both (i) the collection of sensitive personal information (financial, health, biometric) AND (ii) the provision of personalized recommendations or advice that could reasonably be relied upon to make significant personal decisions, including financial, legal, medical, or mental health advice. Disclosure must be oral if the interaction is verbal, written if the interaction is written.",
269
+ "required_elements": [
270
+ {
271
+ "id": "genai-identity",
272
+ "description": "Clear disclosure that the consumer is interacting with generative AI, not a licensed human professional.",
273
+ "required": true,
274
+ "example": "You are interacting with a generative AI system, not a licensed human professional."
275
+ },
276
+ {
277
+ "id": "at-start-of-interaction",
278
+ "description": "Disclosure must be at the beginning of the interaction. Channel-matching: oral for verbal, written for written. (Channel/timing rule, not content.)",
279
+ "required": false
280
+ }
281
+ ],
282
+ "citation": {
283
+ "statute": "Utah Code Title 13, Chapter 72 (Artificial Intelligence Policy Act)",
284
+ "section": "SB 149 (2024) as amended by SB 226 (2025) and extended by SB 332 (2025)",
285
+ "source_url": "https://le.utah.gov/~2024/bills/static/SB0149.html",
286
+ "publisher": "Utah State Legislature"
287
+ },
288
+ "effective_date": "2024-05-01",
289
+ "last_verified": "2026-05-08",
290
+ "template": {
291
+ "plain": "You are interacting with a generative AI system. This is not a licensed human professional, and the responses are AI-generated.",
292
+ "formal": "Disclosure under the Utah Artificial Intelligence Policy Act (Utah Code Title 13, Chapter 72): you are interacting with a generative AI system in the delivery of regulated services. This system is not a licensed human professional."
293
+ },
294
+ "notes": "The 2025 amendments (SB 226) narrowed the always-on disclosure obligation: GenAI disclosure in regulated occupations is now triggered when the consumer asks OR when the interaction is 'high-risk.' For autonomous-AI ventures operating in regulated-occupation domains (legal, medical, financial, mental health), best practice is to disclose by default at the start of every interaction regardless of the high-risk threshold — because intent and high-risk classification are difficult to reconstruct after the fact. The Act's expiration was extended to July 2027 by SB 332."
295
+ },
296
+ {
297
+ "id": "us-tx-traiga-government-disclosure",
298
+ "jurisdiction": "us-tx",
299
+ "channels": ["live-chat", "voice", "video-avatar", "email-transactional"],
300
+ "use_cases": ["civic-or-electoral"],
301
+ "severity": "mandatory",
302
+ "short_title": "Texas Responsible AI Governance Act — government-agency disclosure (HB 149)",
303
+ "summary": "A governmental agency in Texas that makes available an artificial intelligence system intended to interact with consumers must disclose to each consumer, before or at the time of interaction, that the consumer is interacting with an artificial intelligence system. The disclosure must be clear, conspicuous, written in plain language, and must not use a dark pattern. Note: this obligation runs against Texas governmental agencies; private-sector Texas businesses do NOT have a transparency obligation under HB 149 except in healthcare (see the healthcare-specific rule).",
304
+ "required_elements": [
305
+ {
306
+ "id": "ai-interaction-notice",
307
+ "description": "Disclosure that the consumer is interacting with an AI system.",
308
+ "required": true,
309
+ "example": "You are interacting with an artificial intelligence system."
310
+ },
311
+ {
312
+ "id": "plain-language",
313
+ "description": "Disclosure must be in plain language — no jargon, no legalese, no dark patterns.",
314
+ "required": true,
315
+ "example": "You are interacting with an artificial intelligence system, not a person."
316
+ }
317
+ ],
318
+ "citation": {
319
+ "statute": "Texas Business & Commerce Code (Texas Responsible Artificial Intelligence Governance Act, HB 149, 89th Reg. Sess.)",
320
+ "section": "Government-agency consumer disclosure provisions",
321
+ "source_url": "https://capitol.texas.gov/tlodocs/89R/billtext/pdf/HB00149I.pdf",
322
+ "publisher": "Texas Legislature Online"
323
+ },
324
+ "effective_date": "2026-01-01",
325
+ "last_verified": "2026-05-08",
326
+ "template": {
327
+ "plain": "You are interacting with an artificial intelligence system, not a person. This system is operated by an automated AI agent.",
328
+ "formal": "Disclosure under the Texas Responsible Artificial Intelligence Governance Act (HB 149): the consumer is hereby informed, in plain language and without dark patterns, that this interaction is conducted by an artificial intelligence system."
329
+ },
330
+ "notes": "TRAIGA's transparency obligations apply primarily to (a) Texas government agencies deploying AI in consumer-facing interactions and (b) Texas healthcare providers using AI in relation to service or treatment. Most private-sector Texas businesses do NOT have a transparency obligation under HB 149. Healthcare providers should disclose by the date the service or treatment is first provided, except in emergencies, in which case as soon as reasonably possible."
331
+ },
332
+ {
333
+ "id": "us-tx-traiga-healthcare-disclosure",
334
+ "jurisdiction": "us-tx",
335
+ "channels": ["live-chat", "voice", "video-avatar", "email-transactional", "ai-generated-content"],
336
+ "use_cases": ["healthcare"],
337
+ "severity": "mandatory",
338
+ "short_title": "Texas TRAIGA — healthcare-provider AI disclosure (HB 149)",
339
+ "summary": "If an artificial intelligence system is used in relation to health care service or treatment, the provider of the service or treatment must provide disclosure to the recipient of the service or treatment (or the recipient's personal representative) not later than the date the service or treatment is first provided. In an emergency, the disclosure must be provided as soon as reasonably possible.",
340
+ "required_elements": [
341
+ {
342
+ "id": "ai-in-care-notice",
343
+ "description": "Disclosure to the patient or representative that an AI system is being used in relation to the patient's care.",
344
+ "required": true,
345
+ "example": "An artificial intelligence system is being used to assist with your care."
346
+ },
347
+ {
348
+ "id": "timing",
349
+ "description": "Disclosure must be made by the date the service or treatment is first provided, except in emergencies (then as soon as reasonably possible). (Timing rule, not content.)",
350
+ "required": false
351
+ }
352
+ ],
353
+ "citation": {
354
+ "statute": "Texas Business & Commerce Code (TRAIGA, HB 149, 89th Reg. Sess.)",
355
+ "section": "Healthcare-provider AI disclosure provisions",
356
+ "source_url": "https://capitol.texas.gov/tlodocs/89R/billtext/pdf/HB00149I.pdf",
357
+ "publisher": "Texas Legislature Online"
358
+ },
359
+ "effective_date": "2026-01-01",
360
+ "last_verified": "2026-05-08",
361
+ "template": {
362
+ "plain": "An artificial intelligence system is being used in relation to your healthcare service or treatment. You may ask your provider for more information about the AI system's role in your care.",
363
+ "formal": "Disclosure under the Texas Responsible Artificial Intelligence Governance Act (HB 149): an artificial intelligence system is being used in relation to the healthcare service or treatment provided to the recipient. The provider remains responsible for the service or treatment."
364
+ },
365
+ "notes": "Texas HB 149 healthcare disclosure interacts with broader healthcare regulatory regimes (HIPAA, state medical-board rules, FDA Software-as-a-Medical-Device guidance). The disclosure under TRAIGA is the floor — additional sector rules may impose additional requirements. Texas SB 1188 (also 2025) imposes related healthcare AI obligations and should be reviewed alongside TRAIGA for any healthcare AI deployment in Texas."
366
+ },
367
+ {
368
+ "id": "us-ny-ai-companion-models-art47",
369
+ "jurisdiction": "us-ny",
370
+ "channels": ["live-chat", "voice", "video-avatar"],
371
+ "use_cases": [
372
+ "b2c-customer-support",
373
+ "b2c-marketing",
374
+ "general"
375
+ ],
376
+ "severity": "mandatory",
377
+ "short_title": "New York AI Companion Models — non-human nature notification (NY GBL Art. 47, A6767)",
378
+ "summary": "An operator providing an AI companion model to a user in New York must provide notification at the beginning of any AI companion interaction and at least every three hours during continuing interactions. The notification must be either delivered verbally OR in bold-and-capitalized text in not less than 16-point type, with the substantive content: 'THE AI COMPANION (OR NAME OF THE AI COMPANION) IS A COMPUTER PROGRAM AND NOT A HUMAN BEING. IT IS UNABLE TO FEEL HUMAN EMOTION.' Additionally, the operator must implement crisis-response protocols for users expressing suicidal ideation, self-harm, or harm to others. Civil penalties up to $15,000 per day per violation. SCOPE: this rule applies only when the AI system is an 'AI companion model' under NY GBL Art. 47 — emotionally-responsive systems designed for ongoing interpersonal-style interaction. Standard customer-support chatbots are likely outside scope, but the boundary is unsettled.",
379
+ "required_elements": [
380
+ {
381
+ "id": "ai-companion-non-human-notice",
382
+ "description": "The substantive notification: '[NAME] IS A COMPUTER PROGRAM AND NOT A HUMAN BEING. IT IS UNABLE TO FEEL HUMAN EMOTION.'",
383
+ "required": true,
384
+ "example": "AURA IS A COMPUTER PROGRAM AND NOT A HUMAN BEING. IT IS UNABLE TO FEEL HUMAN EMOTION."
385
+ },
386
+ {
387
+ "id": "format-or-verbal",
388
+ "description": "Notification format must be either verbal (when interaction is verbal) or in bold and capitalized text of at least 16-point type. (Format rule, not text content.)",
389
+ "required": false
390
+ },
391
+ {
392
+ "id": "every-three-hours",
393
+ "description": "For continuing interactions, the notification must be repeated at least every three hours. (Cadence rule, not text content.)",
394
+ "required": false
395
+ },
396
+ {
397
+ "id": "crisis-protocol",
398
+ "description": "Operators must implement crisis-response protocols for suicidal ideation, self-harm, and threats of harm to others, including referral to crisis-response resources. (System-design requirement, not in-message disclosure.)",
399
+ "required": false
400
+ }
401
+ ],
402
+ "citation": {
403
+ "statute": "New York General Business Law, Article 47 (Artificial Intelligence Companion Models), enacted by A6767",
404
+ "section": "AI Companion Models — disclosure and crisis protocols",
405
+ "source_url": "https://www.nysenate.gov/legislation/bills/2025/A6767",
406
+ "publisher": "New York State Senate"
407
+ },
408
+ "effective_date": "2025-11-05",
409
+ "last_verified": "2026-05-08",
410
+ "template": {
411
+ "plain": "[NAME OF AI COMPANION] IS A COMPUTER PROGRAM AND NOT A HUMAN BEING. IT IS UNABLE TO FEEL HUMAN EMOTION.",
412
+ "formal": "Notice under New York General Business Law, Article 47 (Artificial Intelligence Companion Models, A6767): [NAME OF AI COMPANION] IS A COMPUTER PROGRAM AND NOT A HUMAN BEING. IT IS UNABLE TO FEEL HUMAN EMOTION. This notification must be displayed in bold capitalized text of at least 16-point type, or delivered verbally if the interaction is verbal, at the start of the interaction and at least every three hours during continuing interactions."
413
+ },
414
+ "notes": "Applicability is the harder question than text content. The law targets AI companion models — emotionally-responsive systems designed for ongoing interpersonal interaction. Customer-support chatbots, voice ordering bots, and transactional voice agents are likely outside scope; companion-style products designed for friendship, intimacy, mental-health-style support, or ongoing personality-driven dialogue are likely inside. The line is fuzzy and untested in litigation as of last verification. For autonomous AI ventures, if your product can be plausibly characterized as a companion (long-form, personality-driven, emotionally-engaged), comply by default. Crisis-protocol obligation is separate from the disclosure obligation and applies regardless of how the disclosure is delivered."
415
+ },
416
+ {
417
+ "id": "us-il-hb3773-ihra-ai-employment",
418
+ "jurisdiction": "us-il",
419
+ "channels": [
420
+ "email-transactional",
421
+ "ai-generated-content",
422
+ "live-chat"
423
+ ],
424
+ "use_cases": ["employment-decisions"],
425
+ "severity": "mandatory",
426
+ "short_title": "Illinois Human Rights Act — AI in employment notice (HB 3773)",
427
+ "summary": "Illinois HB 3773 amended the Illinois Human Rights Act to prohibit employers from using AI in a way that subjects employees or applicants to unlawful discrimination, and to require notice when AI is used to influence or facilitate covered employment decisions. The covered decisions include recruitment, hiring, promotion, renewal of employment, selection for training or apprenticeship, discharge, discipline, tenure, and the terms, privileges, or conditions of employment. The Illinois Department of Human Rights has issued draft implementing rules detailing the notice and recordkeeping requirements.",
428
+ "required_elements": [
429
+ {
430
+ "id": "ai-use-notice",
431
+ "description": "Employer must notify the employee or applicant that AI is being used to influence or facilitate the employment decision, and identify the type of decision and the general role of the AI in it.",
432
+ "required": true,
433
+ "example": "Notice: This employer uses an AI system to assist with recruitment and screening. Your application materials may be analyzed by the AI to identify qualifying candidates."
434
+ },
435
+ {
436
+ "id": "covered-decision-coverage",
437
+ "description": "Notice obligation runs to AI use in recruitment, hiring, promotion, renewal of employment, selection for training/apprenticeship, discharge, discipline, tenure, or terms/privileges/conditions of employment. (Scope rule, not text content.)",
438
+ "required": false
439
+ },
440
+ {
441
+ "id": "non-discrimination-substantive",
442
+ "description": "Substantive prohibition: AI may not be used in a way that has the effect of discriminating against employees on the basis of classes protected by the IHRA — protected even if the discrimination is unintentional. (System-design requirement, not in-message disclosure.)",
443
+ "required": false
444
+ }
445
+ ],
446
+ "citation": {
447
+ "statute": "Illinois Human Rights Act (775 ILCS 5/) as amended by HB 3773 (103rd General Assembly)",
448
+ "section": "AI in employment provisions",
449
+ "source_url": "https://www.ilga.gov/legislation/billstatus.asp?DocNum=3773&GAID=17&GA=103&DocTypeID=HB&LegID=&SessionID=112",
450
+ "publisher": "Illinois General Assembly"
451
+ },
452
+ "effective_date": "2026-01-01",
453
+ "last_verified": "2026-05-08",
454
+ "template": {
455
+ "plain": "Notice: This employer uses an artificial intelligence system to influence or facilitate one or more employment decisions affecting you, including potentially recruitment, hiring, promotion, renewal of employment, selection for training, discharge, discipline, or other terms and conditions of employment. The AI system's role is to assist a human decision-maker, not replace one. You may request additional information from the employer about the AI system's role in any decision affecting you.",
456
+ "formal": "Notice under the Illinois Human Rights Act (775 ILCS 5/) as amended by HB 3773: an artificial intelligence system is being used to influence or facilitate covered employment decisions affecting you. Covered decisions under HB 3773 include recruitment, hiring, promotion, renewal of employment, selection for training or apprenticeship, discharge, discipline, tenure, and terms, privileges, or conditions of employment. The use of AI in such decisions is subject to the substantive non-discrimination requirements of the IHRA."
457
+ },
458
+ "notes": "The implementing regulations were issued in draft form by the Illinois Department of Human Rights ahead of the 2026-01-01 effective date and may be finalized with refined notice-content and recordkeeping specifics. Re-verify the rule's `last_verified` date and the IDHR's published final rules before relying on this rule for production deployments after Q1 2026. The substantive non-discrimination obligation is independent of the disclosure obligation — even a fully-disclosed AI hiring system can still violate the IHRA if it produces disparate-impact discrimination."
459
+ },
460
+ {
461
+ "id": "us-ny-nyc-local-law-144-aedt",
462
+ "jurisdiction": "us-ny-nyc",
463
+ "channels": ["email-transactional", "ai-generated-content"],
464
+ "use_cases": ["employment-decisions"],
465
+ "severity": "mandatory",
466
+ "short_title": "NYC Local Law 144 — Automated Employment Decision Tools (AEDT)",
467
+ "summary": "An employer or employment agency in New York City may not use an automated employment decision tool (AEDT) to substantially assist or replace discretionary decision-making for an employment decision unless: (a) the tool has been the subject of a bias audit conducted no more than one year prior; (b) a summary of the most recent bias audit and the distribution date of the tool is publicly available on the employer's website; AND (c) candidates and employees who reside in NYC have been given at least 10 business days' notice that the AEDT will be used to assess them, the job qualifications and characteristics that will be used by the AEDT, and information about how to request an alternative selection process or accommodation. Penalties: $500 per first violation, $500 to $1,500 per subsequent or continuing violation per day. Effective 2023-01-01; enforcement began 2023-07-05.",
468
+ "required_elements": [
469
+ {
470
+ "id": "aedt-use-notice",
471
+ "description": "Notice that an AEDT will be used to assess the candidate or employee.",
472
+ "required": true,
473
+ "example": "Notice: This employer uses an automated employment decision tool to assess applications and may use it in evaluating yours."
474
+ },
475
+ {
476
+ "id": "qualifications-characteristics",
477
+ "description": "Disclosure of the job qualifications and characteristics the AEDT will use to evaluate the candidate.",
478
+ "required": true,
479
+ "example": "The AEDT evaluates the following qualifications and characteristics: skills relevance, work history fit, communication style scoring."
480
+ },
481
+ {
482
+ "id": "alternative-process-info",
483
+ "description": "Information about how to request an alternative selection process or a reasonable accommodation.",
484
+ "required": true,
485
+ "example": "To request an alternative selection process or a reasonable accommodation under the Americans with Disabilities Act, contact the employer's HR team at the address provided."
486
+ },
487
+ {
488
+ "id": "ten-business-days-lead-time",
489
+ "description": "Notice must be provided at least 10 business days before the AEDT is used to assess the candidate or employee. (Timing rule, not text content.)",
490
+ "required": false
491
+ },
492
+ {
493
+ "id": "annual-bias-audit",
494
+ "description": "AEDT must have a bias audit conducted by an independent auditor no more than one year prior to use, with a public summary on the employer's website. (System / governance requirement, not in-message disclosure.)",
495
+ "required": false
496
+ }
497
+ ],
498
+ "citation": {
499
+ "statute": "New York City Administrative Code §§ 20-870 through 20-873 (NYC Local Law 144 of 2021)",
500
+ "section": "AEDT — Automated Employment Decision Tools",
501
+ "source_url": "https://rules.cityofnewyork.us/rule/automated-employment-decision-tools-updated/",
502
+ "publisher": "NYC Rules — Department of Consumer and Worker Protection"
503
+ },
504
+ "effective_date": "2023-07-05",
505
+ "last_verified": "2026-05-08",
506
+ "template": {
507
+ "plain": "Notice: This employer uses an automated employment decision tool (AEDT) to assess applications and employees. Job qualifications and characteristics the AEDT evaluates: [list — e.g., skills relevance, work history fit]. To request an alternative selection process or a reasonable accommodation, contact the employer at [contact address]. A summary of the most recent bias audit of the AEDT is available on the employer's website at [URL]. This notice is provided at least 10 business days before the AEDT is used in your evaluation.",
508
+ "formal": "Notice under New York City Local Law 144 of 2021, codified at NYC Administrative Code §§ 20-870 through 20-873: An automated employment decision tool (AEDT) will be used to substantially assist or replace discretionary decision-making for the employment decision relating to your application or position. The job qualifications and characteristics evaluated by the AEDT are [list]. To request an alternative selection process or reasonable accommodation, contact the employer at [contact]. A bias-audit summary for the AEDT, dated [date] and including the source and type of data used, is published at [URL]. This notice is delivered at least 10 business days prior to the AEDT's use in your evaluation."
509
+ },
510
+ "notes": "NYC Local Law 144 is a city-level rule (jurisdiction `us-ny-nyc`), narrower than IL HB 3773 (state-level). Both apply to employment AI use, but the bias-audit + public-summary requirements of LL 144 are unique to NYC. Note the jurisdictional cascade: candidates residing in NYC who apply for jobs anywhere — even outside NYC — are covered if the employer's AEDT is used in their assessment, per DCWP's interpretation. Bias audits must be conducted by independent auditors and follow the four-fifths rule disparate-impact standard. The DCWP has issued enforcement guidance and is expected to step up investigations in 2026."
511
+ },
512
+ {
513
+ "id": "us-ca-ab2013-training-data-transparency",
514
+ "jurisdiction": "us-ca",
515
+ "channels": ["about-page", "terms-of-service"],
516
+ "use_cases": ["general"],
517
+ "severity": "mandatory",
518
+ "short_title": "California AB 2013 — Generative AI Training Data Transparency Act",
519
+ "summary": "On or before January 1, 2026, and before each subsequent release or substantial modification, the developer of a generative AI system or service that is made publicly available to Californians (including any system released on or after January 1, 2022) must post on the developer's internet website a high-level summary of the datasets used to train the system. The disclosure must include the 12 enumerated categories of information set out in the statute, including dataset sources/owners, how the datasets further the system's intended purpose, the number of data points in general ranges (with estimates for dynamic datasets), copyrighted-material usage, and whether personal information is included. Enforceable via California's Unfair Competition Law (Bus. & Prof. Code § 17200), which permits both public-agency and private enforcement.",
520
+ "required_elements": [
521
+ {
522
+ "id": "dataset-sources",
523
+ "description": "Sources or owners of the datasets used to train the system.",
524
+ "required": true,
525
+ "example": "Datasets were sourced from Common Crawl, a publicly licensed code repository, and the developer's own first-party logs."
526
+ },
527
+ {
528
+ "id": "purpose-fit",
529
+ "description": "Description of how the datasets further the intended purpose of the AI system or service.",
530
+ "required": true,
531
+ "example": "The training corpus emphasizes legal and regulatory text to align the system with its disclosure-template generation purpose."
532
+ },
533
+ {
534
+ "id": "data-volume",
535
+ "description": "The number of data points included in the datasets, in general ranges, with estimated figures for dynamic datasets.",
536
+ "required": true,
537
+ "example": "Approximately 1.2 billion text data points across all corpora; dynamic real-time data approximately 4 million additional points per day (estimated)."
538
+ },
539
+ {
540
+ "id": "copyrighted-material",
541
+ "description": "Whether the datasets include copyrighted material and the developer's basis for using such material.",
542
+ "required": true,
543
+ "example": "Some datasets include copyrighted material accessed under fair-use rationales; others were licensed from third-party providers."
544
+ },
545
+ {
546
+ "id": "personal-information",
547
+ "description": "Whether the datasets include personal information and the developer's basis and safeguards.",
548
+ "required": true,
549
+ "example": "Datasets include some personal information in publicly-posted online content; the developer applies redaction and tokenization filters during training."
550
+ },
551
+ {
552
+ "id": "twelve-category-completeness",
553
+ "description": "Disclosure must cover all 12 categories enumerated in the statute (additional categories beyond those above include: data-collection time period; data point types; whether AI-generated synthetic data was used; dataset cleaning processes; whether inferences are drawn; whether biometric data is included). (Coverage rule, not single in-message disclosure.)",
554
+ "required": false
555
+ }
556
+ ],
557
+ "citation": {
558
+ "statute": "California Business and Professions Code (added by AB 2013)",
559
+ "section": "Generative Artificial Intelligence: Training Data Transparency Act",
560
+ "source_url": "https://leginfo.legislature.ca.gov/faces/billTextClient.xhtml?bill_id=202320240AB2013",
561
+ "publisher": "California Legislative Information"
562
+ },
563
+ "effective_date": "2026-01-01",
564
+ "last_verified": "2026-05-08",
565
+ "template": {
566
+ "plain": "Generative AI Training Data Disclosure (California AB 2013): The datasets used to train this generative AI system include the following categories of information: [sources / owners], [how datasets fit purpose], [data volume in general ranges], [copyrighted-material status and basis], [personal-information status and safeguards], [data collection time period], [data point types], [whether AI-generated synthetic data was used], [dataset cleaning processes], [whether inferences were drawn from data], [whether biometric data is included]. Last updated [date].",
567
+ "formal": "Disclosure under California AB 2013 (Generative Artificial Intelligence: Training Data Transparency Act): Pursuant to the requirements applicable to developers of generative AI systems made publicly available to Californians, the developer publishes the following high-level summary of training datasets: [twelve enumerated categories]. This disclosure is updated upon each subsequent release or substantial modification of the system."
568
+ },
569
+ "notes": "AB 2013 covers generative AI systems made available to Californians ANY TIME ON OR AFTER 2022-01-01 — so it applies retroactively to systems already in production. Compliance must be in place by 2026-01-01 even for legacy systems. The 'high-level summary' standard is intentionally permissive; developers can use ranges and estimates rather than exhaustive enumeration. Enforcement is via California's Unfair Competition Law, opening private rights of action — expect compliance cases in 2026 onward. Trade-secret protections may apply to specific dataset details but cannot exempt a developer from publishing the high-level summary entirely. This rule's `channels` are `about-page` and `terms-of-service` because the disclosure goes on the developer's website, not in any per-interaction message; queries that target customer-interaction channels (live-chat, voice) will not match this rule and that's correct — AB 2013 is a developer-side artifact, not a per-message obligation."
570
+ },
571
+ {
572
+ "id": "us-md-le-3-717-facial-recognition-interview",
573
+ "jurisdiction": "us-md",
574
+ "channels": ["video-avatar"],
575
+ "use_cases": ["employment-decisions"],
576
+ "severity": "mandatory",
577
+ "short_title": "Maryland Labor & Employment § 3-717 — facial recognition in interviews requires written consent (HB 1202, 2020)",
578
+ "summary": "An employer in Maryland may not use facial-recognition services during the interview of an applicant for employment to create a 'machine-interpretable pattern of facial features' unless the applicant signs a written waiver consenting to the use. The waiver must include the applicant's name, the date of the interview, the applicant's consent to the use of facial recognition during the interview, and a statement that the applicant has read the consent waiver. The statute applies to any AI-driven interview platform that performs face-shape analysis, micro-expression scoring, or other face-pattern processing — modern AI hiring/interview tools that scan faces are squarely covered.",
579
+ "required_elements": [
580
+ {
581
+ "id": "applicant-name",
582
+ "description": "The waiver must include the applicant's name.",
583
+ "required": true,
584
+ "example": "Applicant: Pat Lee"
585
+ },
586
+ {
587
+ "id": "interview-date",
588
+ "description": "The waiver must include the date of the interview.",
589
+ "required": true,
590
+ "example": "Date of interview: 2026-05-12"
591
+ },
592
+ {
593
+ "id": "consent-statement",
594
+ "description": "The waiver must include the applicant's consent to the use of facial-recognition technology during the interview.",
595
+ "required": true,
596
+ "example": "I consent to the use of facial-recognition technology during my interview with [employer]."
597
+ },
598
+ {
599
+ "id": "read-acknowledgment",
600
+ "description": "The waiver must include a statement that the applicant has read the consent waiver.",
601
+ "required": true,
602
+ "example": "I have read and understood this consent waiver."
603
+ }
604
+ ],
605
+ "citation": {
606
+ "statute": "Maryland Labor and Employment Article § 3-717 (added by HB 1202, Chapter 446 of the 2020 Laws of Maryland)",
607
+ "section": "Use of facial recognition services during a pre-employment interview",
608
+ "source_url": "https://mgaleg.maryland.gov/mgawebsite/Legislation/Details/HB1202?ys=2020RS",
609
+ "publisher": "Maryland General Assembly"
610
+ },
611
+ "effective_date": "2020-10-01",
612
+ "last_verified": "2026-05-08",
613
+ "template": {
614
+ "plain": "Applicant: [NAME]. Date of interview: [DATE]. I consent to the use of facial-recognition technology during my interview with [EMPLOYER]. I have read and understood this consent waiver. Signed: ____________________ Date: ____________________",
615
+ "formal": "Consent Waiver under Maryland Labor and Employment Article § 3-717 (HB 1202, Chapter 446 of the 2020 Laws of Maryland): The applicant identified by name and interview date below consents to the use of facial-recognition services by the employer during the pre-employment interview, and acknowledges having read this waiver. Applicant: [NAME]. Date of Interview: [DATE]. Employer: [EMPLOYER]. Signature: _______________ Date: ____________________"
616
+ },
617
+ "notes": "The statute is narrow — it applies to facial-recognition services that build a machine-interpretable pattern of facial features, used during interviews. AI hiring tools that record but do not analyze face patterns may be outside scope; tools that score expressions or compute similarity to other faces are inside scope. When in doubt, obtain the waiver — the cost is one form versus the cost of an LE-Article-3-717 violation claim. The waiver requirement runs in parallel with separate disclosure obligations under the IL HB 3773 and NYC Local Law 144 rules — multi-jurisdictional employers using AI interview tools need to satisfy each applicable obligation."
618
+ }
619
+ ]
620
+ }