julee 0.1.3__py3-none-any.whl → 0.1.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -18,15 +18,14 @@ Legacy aliases (deprecated, emit warnings):
18
18
  """
19
19
 
20
20
  import re
21
- import warnings
22
- from pathlib import Path
23
21
  from collections import defaultdict
22
+
24
23
  from docutils import nodes
25
- from sphinx.util.docutils import SphinxDirective
26
24
  from sphinx.util import logging
25
+ from sphinx.util.docutils import SphinxDirective
27
26
 
28
27
  from .config import get_config
29
- from .utils import normalize_name, slugify, path_to_root
28
+ from .utils import normalize_name, path_to_root, slugify
30
29
 
31
30
  logger = logging.getLogger(__name__)
32
31
 
@@ -60,12 +59,13 @@ def get_apps_with_stories() -> set[str]:
60
59
  def get_epics_for_story(story_title: str, env) -> list[str]:
61
60
  """Find epics that reference this story."""
62
61
  from . import epics
62
+
63
63
  epic_registry = epics.get_epic_registry(env)
64
64
  story_normalized = normalize_name(story_title)
65
65
 
66
66
  matching_epics = []
67
67
  for slug, epic in epic_registry.items():
68
- for epic_story in epic.get('stories', []):
68
+ for epic_story in epic.get("stories", []):
69
69
  if normalize_name(epic_story) == story_normalized:
70
70
  matching_epics.append(slug)
71
71
  break
@@ -76,14 +76,15 @@ def get_epics_for_story(story_title: str, env) -> list[str]:
76
76
  def get_journeys_for_story(story_title: str, env) -> list[str]:
77
77
  """Find journeys that reference this story (directly or via epic)."""
78
78
  from . import journeys
79
+
79
80
  journey_registry = journeys.get_journey_registry(env)
80
81
  story_normalized = normalize_name(story_title)
81
82
 
82
83
  matching_journeys = []
83
84
  for slug, journey in journey_registry.items():
84
- for step in journey.get('steps', []):
85
- if step.get('type') == 'story':
86
- if normalize_name(step['ref']) == story_normalized:
85
+ for step in journey.get("steps", []):
86
+ if step.get("type") == "story":
87
+ if normalize_name(step["ref"]) == story_normalized:
87
88
  matching_journeys.append(slug)
88
89
  break
89
90
 
@@ -98,38 +99,38 @@ def build_story_seealso(story: dict, env, docname: str):
98
99
  links = []
99
100
 
100
101
  # Persona link
101
- persona = story.get('persona')
102
- if persona and persona != 'unknown':
102
+ persona = story.get("persona")
103
+ if persona and persona != "unknown":
103
104
  persona_slug = slugify(persona)
104
105
  persona_path = f"{prefix}{config.get_doc_path('personas')}/{persona_slug}.html"
105
- links.append(('Persona', persona, persona_path))
106
+ links.append(("Persona", persona, persona_path))
106
107
 
107
108
  # App link
108
- app = story.get('app')
109
+ app = story.get("app")
109
110
  if app:
110
111
  app_path = f"{prefix}{config.get_doc_path('applications')}/{app}.html"
111
- links.append(('App', app.replace("-", " ").title(), app_path))
112
+ links.append(("App", app.replace("-", " ").title(), app_path))
112
113
 
113
114
  # Epic links
114
- epics_list = get_epics_for_story(story['feature'], env)
115
+ epics_list = get_epics_for_story(story["feature"], env)
115
116
  for epic_slug in epics_list:
116
117
  epic_title = epic_slug.replace("-", " ").title()
117
118
  epic_path = f"{prefix}{config.get_doc_path('epics')}/{epic_slug}.html"
118
- links.append(('Epic', epic_title, epic_path))
119
+ links.append(("Epic", epic_title, epic_path))
119
120
 
120
121
  # Journey links
121
- journeys_list = get_journeys_for_story(story['feature'], env)
122
+ journeys_list = get_journeys_for_story(story["feature"], env)
122
123
  for journey_slug in journeys_list:
123
124
  journey_title = journey_slug.replace("-", " ").title()
124
125
  journey_path = f"{prefix}{config.get_doc_path('journeys')}/{journey_slug}.html"
125
- links.append(('Journey', journey_title, journey_path))
126
+ links.append(("Journey", journey_title, journey_path))
126
127
 
127
128
  if not links:
128
129
  return None
129
130
 
130
131
  # Build seealso block with line_block for tight spacing
131
- seealso = nodes.admonition(classes=['seealso'])
132
- seealso += nodes.title(text='See also')
132
+ seealso = nodes.admonition(classes=["seealso"])
133
+ seealso += nodes.title(text="See also")
133
134
 
134
135
  line_block = nodes.line_block()
135
136
  for link_type, link_text, link_path in links:
@@ -146,6 +147,7 @@ def build_story_seealso(story: dict, env, docname: str):
146
147
 
147
148
  class StorySeeAlsoPlaceholder(nodes.General, nodes.Element):
148
149
  """Placeholder for story seealso block, replaced at doctree-read."""
150
+
149
151
  pass
150
152
 
151
153
 
@@ -157,10 +159,12 @@ def scan_feature_files(app):
157
159
 
158
160
  config = get_config()
159
161
  project_root = config.project_root
160
- tests_dir = config.get_path('feature_files')
162
+ tests_dir = config.get_path("feature_files")
161
163
 
162
164
  if not tests_dir.exists():
163
- logger.info(f"Feature files directory not found at {tests_dir} - no stories to index")
165
+ logger.info(
166
+ f"Feature files directory not found at {tests_dir} - no stories to index"
167
+ )
164
168
  return
165
169
 
166
170
  # Scan for feature files
@@ -178,7 +182,7 @@ def scan_feature_files(app):
178
182
  try:
179
183
  with open(feature_file) as f:
180
184
  content = f.read()
181
- lines = content.split('\n')
185
+ lines = content.split("\n")
182
186
  except Exception as e:
183
187
  logger.warning(f"Could not read {feature_file}: {e}")
184
188
  continue
@@ -193,11 +197,13 @@ def scan_feature_files(app):
193
197
  snippet_lines = []
194
198
  for line in lines:
195
199
  stripped = line.strip()
196
- if stripped.startswith(('Scenario', 'Background', '@', 'Given', 'When', 'Then', 'And', 'But')):
200
+ if stripped.startswith(
201
+ ("Scenario", "Background", "@", "Given", "When", "Then", "And", "But")
202
+ ):
197
203
  break
198
204
  if stripped:
199
205
  snippet_lines.append(line)
200
- gherkin_snippet = '\n'.join(snippet_lines)
206
+ gherkin_snippet = "\n".join(snippet_lines)
201
207
 
202
208
  feature_title = feature_match.group(1) if feature_match else "Unknown"
203
209
  story = {
@@ -206,7 +212,9 @@ def scan_feature_files(app):
206
212
  "feature": feature_title,
207
213
  "slug": slugify(feature_title),
208
214
  "persona": as_a_match.group(1) if as_a_match else "unknown",
209
- "persona_normalized": normalize_name(as_a_match.group(1)) if as_a_match else "unknown",
215
+ "persona_normalized": (
216
+ normalize_name(as_a_match.group(1)) if as_a_match else "unknown"
217
+ ),
210
218
  "i_want": i_want_match.group(1) if i_want_match else "do something",
211
219
  "so_that": so_that_match.group(1) if so_that_match else "achieve a goal",
212
220
  "path": str(rel_path),
@@ -229,7 +237,7 @@ def scan_known_entities(app):
229
237
  docs_dir = config.docs_dir
230
238
 
231
239
  # Scan applications
232
- apps_dir = docs_dir / config.get_doc_path('applications')
240
+ apps_dir = docs_dir / config.get_doc_path("applications")
233
241
  if apps_dir.exists():
234
242
  for rst_file in apps_dir.glob("*.rst"):
235
243
  if rst_file.name != "index.rst":
@@ -237,7 +245,7 @@ def scan_known_entities(app):
237
245
  _known_apps.add(normalize_name(app_name))
238
246
 
239
247
  # Scan personas
240
- personas_dir = docs_dir / config.get_doc_path('personas')
248
+ personas_dir = docs_dir / config.get_doc_path("personas")
241
249
  if personas_dir.exists():
242
250
  for rst_file in personas_dir.glob("*.rst"):
243
251
  if rst_file.name != "index.rst":
@@ -270,7 +278,9 @@ def builder_inited(app):
270
278
 
271
279
  # Warn about stories referencing undocumented entities
272
280
  for app_name in sorted(unknown_apps):
273
- logger.warning(f"Gherkin story references undocumented application: '{app_name}'")
281
+ logger.warning(
282
+ f"Gherkin story references undocumented application: '{app_name}'"
283
+ )
274
284
  for persona in sorted(unknown_personas):
275
285
  logger.warning(f"Gherkin story references undocumented persona: '{persona}'")
276
286
 
@@ -295,13 +305,15 @@ def get_story_ref_target(story: dict, from_docname: str) -> tuple[str, str]:
295
305
  return f"{config.get_doc_path('stories')}/{app_slug}", story_slug
296
306
 
297
307
 
298
- def make_story_reference(story: dict, from_docname: str, link_text: str | None = None) -> nodes.reference:
308
+ def make_story_reference(
309
+ story: dict, from_docname: str, link_text: str | None = None
310
+ ) -> nodes.reference:
299
311
  """Create a reference node linking to a story's anchor on its app page."""
300
312
  target_doc, anchor = get_story_ref_target(story, from_docname)
301
313
 
302
314
  # Calculate relative path from current doc to target
303
- from_parts = from_docname.split('/')
304
- target_parts = target_doc.split('/')
315
+ from_parts = from_docname.split("/")
316
+ target_parts = target_doc.split("/")
305
317
 
306
318
  # Find common prefix
307
319
  common = 0
@@ -313,12 +325,12 @@ def make_story_reference(story: dict, from_docname: str, link_text: str | None =
313
325
 
314
326
  # Build relative path
315
327
  up_levels = len(from_parts) - common - 1
316
- down_path = '/'.join(target_parts[common:])
328
+ down_path = "/".join(target_parts[common:])
317
329
 
318
330
  if up_levels > 0:
319
- rel_path = '../' * up_levels + down_path + '.html'
331
+ rel_path = "../" * up_levels + down_path + ".html"
320
332
  else:
321
- rel_path = down_path + '.html'
333
+ rel_path = down_path + ".html"
322
334
 
323
335
  ref_uri = f"{rel_path}#{anchor}"
324
336
 
@@ -348,8 +360,7 @@ class StoryAppDirective(SphinxDirective):
348
360
  app_normalized = normalize_name(app_arg)
349
361
 
350
362
  # Filter stories for this app
351
- stories = [s for s in _story_registry
352
- if s["app_normalized"] == app_normalized]
363
+ stories = [s for s in _story_registry if s["app_normalized"] == app_normalized]
353
364
 
354
365
  if not stories:
355
366
  para = nodes.paragraph()
@@ -394,7 +405,9 @@ class StoryAppDirective(SphinxDirective):
394
405
  persona = list(by_persona.keys())[0]
395
406
  persona_valid = normalize_name(persona) in _known_personas
396
407
  persona_slug = persona.lower().replace(" ", "-")
397
- persona_path = f"{prefix}{config.get_doc_path('personas')}/{persona_slug}.html"
408
+ persona_path = (
409
+ f"{prefix}{config.get_doc_path('personas')}/{persona_slug}.html"
410
+ )
398
411
 
399
412
  if total_stories != 1:
400
413
  intro_para += nodes.Text("for ")
@@ -416,7 +429,9 @@ class StoryAppDirective(SphinxDirective):
416
429
  count = len(by_persona[persona])
417
430
  persona_valid = normalize_name(persona) in _known_personas
418
431
  persona_slug = persona.lower().replace(" ", "-")
419
- persona_path = f"{prefix}{config.get_doc_path('personas')}/{persona_slug}.html"
432
+ persona_path = (
433
+ f"{prefix}{config.get_doc_path('personas')}/{persona_slug}.html"
434
+ )
420
435
 
421
436
  if persona_valid:
422
437
  persona_ref = nodes.reference("", "", refuri=persona_path)
@@ -456,7 +471,7 @@ class StoryAppDirective(SphinxDirective):
456
471
 
457
472
  # Gherkin snippet as literal block
458
473
  snippet = nodes.literal_block(text=story["gherkin_snippet"])
459
- snippet['language'] = 'gherkin'
474
+ snippet["language"] = "gherkin"
460
475
  story_section += snippet
461
476
 
462
477
  # Feature file path (for reference, not as broken link)
@@ -467,9 +482,9 @@ class StoryAppDirective(SphinxDirective):
467
482
 
468
483
  # Placeholder for seealso (filled in doctree-read when registries are complete)
469
484
  seealso_placeholder = StorySeeAlsoPlaceholder()
470
- seealso_placeholder['story_feature'] = story["feature"]
471
- seealso_placeholder['story_persona'] = story["persona"]
472
- seealso_placeholder['story_app'] = story["app"]
485
+ seealso_placeholder["story_feature"] = story["feature"]
486
+ seealso_placeholder["story_persona"] = story["persona"]
487
+ seealso_placeholder["story_app"] = story["app"]
473
488
  story_section += seealso_placeholder
474
489
 
475
490
  persona_section += story_section
@@ -496,8 +511,9 @@ class StoryListForPersonaDirective(SphinxDirective):
496
511
  persona_normalized = normalize_name(persona_arg)
497
512
 
498
513
  # Filter stories for this persona
499
- stories = [s for s in _story_registry
500
- if s["persona_normalized"] == persona_normalized]
514
+ stories = [
515
+ s for s in _story_registry if s["persona_normalized"] == persona_normalized
516
+ ]
501
517
 
502
518
  if not stories:
503
519
  para = nodes.paragraph()
@@ -513,7 +529,7 @@ class StoryListForPersonaDirective(SphinxDirective):
513
529
  # Simple bullet list: "story name (App Name)"
514
530
  story_list = nodes.bullet_list()
515
531
 
516
- for story in sorted(stories, key=lambda s: s['feature'].lower()):
532
+ for story in sorted(stories, key=lambda s: s["feature"].lower()):
517
533
  story_item = nodes.list_item()
518
534
  story_para = nodes.paragraph()
519
535
 
@@ -522,15 +538,17 @@ class StoryListForPersonaDirective(SphinxDirective):
522
538
 
523
539
  # App in parentheses
524
540
  story_para += nodes.Text(" (")
525
- app_path = f"{prefix}{config.get_doc_path('applications')}/{story['app']}.html"
526
- app_valid = normalize_name(story['app']) in _known_apps
541
+ app_path = (
542
+ f"{prefix}{config.get_doc_path('applications')}/{story['app']}.html"
543
+ )
544
+ app_valid = normalize_name(story["app"]) in _known_apps
527
545
 
528
546
  if app_valid:
529
547
  app_ref = nodes.reference("", "", refuri=app_path)
530
- app_ref += nodes.Text(story['app'].replace("-", " ").title())
548
+ app_ref += nodes.Text(story["app"].replace("-", " ").title())
531
549
  story_para += app_ref
532
550
  else:
533
- story_para += nodes.Text(story['app'].replace("-", " ").title())
551
+ story_para += nodes.Text(story["app"].replace("-", " ").title())
534
552
 
535
553
  story_para += nodes.Text(")")
536
554
 
@@ -558,8 +576,7 @@ class StoryListForAppDirective(SphinxDirective):
558
576
  app_normalized = normalize_name(app_arg)
559
577
 
560
578
  # Filter stories for this app
561
- stories = [s for s in _story_registry
562
- if s["app_normalized"] == app_normalized]
579
+ stories = [s for s in _story_registry if s["app_normalized"] == app_normalized]
563
580
 
564
581
  if not stories:
565
582
  para = nodes.paragraph()
@@ -584,7 +601,9 @@ class StoryListForAppDirective(SphinxDirective):
584
601
  # Persona heading (strong with link)
585
602
  persona_heading = nodes.paragraph()
586
603
  persona_slug = persona.lower().replace(" ", "-")
587
- persona_path = f"{prefix}{config.get_doc_path('personas')}/{persona_slug}.html"
604
+ persona_path = (
605
+ f"{prefix}{config.get_doc_path('personas')}/{persona_slug}.html"
606
+ )
588
607
 
589
608
  if persona_valid:
590
609
  persona_ref = nodes.reference("", "", refuri=persona_path)
@@ -666,7 +685,9 @@ class StoryIndexDirective(SphinxDirective):
666
685
 
667
686
  # Link to app's story page
668
687
  app_ref = nodes.reference("", "", refuri=f"{app}.html")
669
- app_ref += nodes.strong(text=app.replace("-", " ").replace("_", " ").title())
688
+ app_ref += nodes.strong(
689
+ text=app.replace("-", " ").replace("_", " ").title()
690
+ )
670
691
  app_para += app_ref
671
692
  app_para += nodes.Text(f" ({count} stories)")
672
693
 
@@ -734,7 +755,9 @@ class StoriesDirective(SphinxDirective):
734
755
  # Persona heading (strong)
735
756
  persona_heading = nodes.paragraph()
736
757
  persona_slug = persona.lower().replace(" ", "-")
737
- persona_path = f"{prefix}{config.get_doc_path('personas')}/{persona_slug}.html"
758
+ persona_path = (
759
+ f"{prefix}{config.get_doc_path('personas')}/{persona_slug}.html"
760
+ )
738
761
  persona_valid = normalize_name(persona) in _known_personas
739
762
 
740
763
  if persona_valid:
@@ -783,7 +806,9 @@ class StoriesDirective(SphinxDirective):
783
806
  app_ref += nodes.Text(story["app"].replace("-", " ").title())
784
807
  feature_para += app_ref
785
808
  else:
786
- feature_para += nodes.Text(story["app"].replace("-", " ").title())
809
+ feature_para += nodes.Text(
810
+ story["app"].replace("-", " ").title()
811
+ )
787
812
  feature_para += nodes.emphasis(text=" (?)")
788
813
 
789
814
  feature_para += nodes.Text(")")
@@ -836,6 +861,7 @@ class StoryRefDirective(SphinxDirective):
836
861
 
837
862
  # Deprecated alias directives - emit warnings and delegate to new names
838
863
 
864
+
839
865
  def _make_deprecated_directive(new_directive_class, old_name: str, new_name: str):
840
866
  """Create a deprecated alias directive that warns and delegates."""
841
867
 
@@ -859,15 +885,15 @@ def process_story_seealso_placeholders(app, doctree):
859
885
  docname = env.docname
860
886
 
861
887
  for node in doctree.traverse(StorySeeAlsoPlaceholder):
862
- story_feature = node['story_feature']
863
- story_persona = node['story_persona']
864
- story_app = node.get('story_app')
888
+ story_feature = node["story_feature"]
889
+ story_persona = node["story_persona"]
890
+ story_app = node.get("story_app")
865
891
 
866
892
  # Build a minimal story dict for the helper function
867
893
  story = {
868
- 'feature': story_feature,
869
- 'persona': story_persona,
870
- 'app': story_app,
894
+ "feature": story_feature,
895
+ "persona": story_persona,
896
+ "app": story_app,
871
897
  }
872
898
 
873
899
  seealso = build_story_seealso(story, env, docname)
@@ -892,35 +918,37 @@ def setup(app):
892
918
  # Deprecated aliases (gherkin-* -> story-*)
893
919
  app.add_directive(
894
920
  "gherkin-story",
895
- _make_deprecated_directive(StoryRefDirective, "gherkin-story", "story")
921
+ _make_deprecated_directive(StoryRefDirective, "gherkin-story", "story"),
896
922
  )
897
923
  app.add_directive(
898
924
  "gherkin-stories",
899
- _make_deprecated_directive(StoriesDirective, "gherkin-stories", "stories")
925
+ _make_deprecated_directive(StoriesDirective, "gherkin-stories", "stories"),
900
926
  )
901
927
  app.add_directive(
902
928
  "gherkin-stories-for-persona",
903
929
  _make_deprecated_directive(
904
930
  StoryListForPersonaDirective,
905
931
  "gherkin-stories-for-persona",
906
- "story-list-for-persona"
907
- )
932
+ "story-list-for-persona",
933
+ ),
908
934
  )
909
935
  app.add_directive(
910
936
  "gherkin-stories-for-app",
911
937
  _make_deprecated_directive(
912
- StoryListForAppDirective,
913
- "gherkin-stories-for-app",
914
- "story-list-for-app"
915
- )
938
+ StoryListForAppDirective, "gherkin-stories-for-app", "story-list-for-app"
939
+ ),
916
940
  )
917
941
  app.add_directive(
918
942
  "gherkin-stories-index",
919
- _make_deprecated_directive(StoryIndexDirective, "gherkin-stories-index", "story-index")
943
+ _make_deprecated_directive(
944
+ StoryIndexDirective, "gherkin-stories-index", "story-index"
945
+ ),
920
946
  )
921
947
  app.add_directive(
922
948
  "gherkin-app-stories",
923
- _make_deprecated_directive(StoryAppDirective, "gherkin-app-stories", "story-app")
949
+ _make_deprecated_directive(
950
+ StoryAppDirective, "gherkin-app-stories", "story-app"
951
+ ),
924
952
  )
925
953
 
926
954
  app.add_node(StorySeeAlsoPlaceholder)
@@ -4,6 +4,7 @@ Common functions used across multiple extension modules.
4
4
  """
5
5
 
6
6
  import re
7
+
7
8
  from docutils import nodes
8
9
 
9
10
 
@@ -29,10 +30,10 @@ def slugify(text: str) -> str:
29
30
  URL-safe slug string
30
31
  """
31
32
  slug = text.lower()
32
- slug = re.sub(r'[^a-z0-9\s-]', '', slug)
33
- slug = re.sub(r'[\s_]+', '-', slug)
34
- slug = re.sub(r'-+', '-', slug)
35
- return slug.strip('-')
33
+ slug = re.sub(r"[^a-z0-9\s-]", "", slug)
34
+ slug = re.sub(r"[\s_]+", "-", slug)
35
+ slug = re.sub(r"-+", "-", slug)
36
+ return slug.strip("-")
36
37
 
37
38
 
38
39
  def kebab_to_snake(name: str) -> str:
@@ -65,8 +66,8 @@ def parse_list_option(value: str) -> list[str]:
65
66
  if not value:
66
67
  return []
67
68
  items = []
68
- for line in value.strip().split('\n'):
69
- item = line.strip().lstrip('- ')
69
+ for line in value.strip().split("\n"):
70
+ item = line.strip().lstrip("- ")
70
71
  if item:
71
72
  items.append(item)
72
73
  return items
@@ -83,7 +84,7 @@ def parse_csv_option(value: str) -> list[str]:
83
84
  """
84
85
  if not value:
85
86
  return []
86
- return [item.strip() for item in value.split(',') if item.strip()]
87
+ return [item.strip() for item in value.split(",") if item.strip()]
87
88
 
88
89
 
89
90
  def parse_integration_options(value: str) -> list[dict]:
@@ -102,24 +103,28 @@ def parse_integration_options(value: str) -> list[dict]:
102
103
  return []
103
104
 
104
105
  items = []
105
- for line in value.strip().split('\n'):
106
- line = line.strip().lstrip('- ')
106
+ for line in value.strip().split("\n"):
107
+ line = line.strip().lstrip("- ")
107
108
  if not line:
108
109
  continue
109
110
 
110
111
  # Parse: slug (description) or just slug
111
- match = re.match(r'^([a-z0-9-]+)\s*(?:\(([^)]+)\))?$', line.strip())
112
+ match = re.match(r"^([a-z0-9-]+)\s*(?:\(([^)]+)\))?$", line.strip())
112
113
  if match:
113
- items.append({
114
- 'slug': match.group(1),
115
- 'description': match.group(2).strip() if match.group(2) else None,
116
- })
114
+ items.append(
115
+ {
116
+ "slug": match.group(1),
117
+ "description": match.group(2).strip() if match.group(2) else None,
118
+ }
119
+ )
117
120
  else:
118
121
  # Fallback: treat whole line as slug
119
- items.append({
120
- 'slug': line.strip(),
121
- 'description': None,
122
- })
122
+ items.append(
123
+ {
124
+ "slug": line.strip(),
125
+ "description": None,
126
+ }
127
+ )
123
128
 
124
129
  return items
125
130
 
@@ -91,13 +91,11 @@ class Document(BaseModel):
91
91
  # Additional data and content stream
92
92
  additional_metadata: dict[str, Any] = Field(default_factory=dict)
93
93
  content: ContentStream | None = Field(default=None, exclude=True)
94
- content_string: str | None = Field(
94
+
95
+ content_bytes: bytes | None = Field(
95
96
  default=None,
96
- description="Small content as string (few KB max). Use for "
97
- "workflow-generated content to avoid ContentStream serialization "
98
- "issues. For larger content, ensure calling from concrete "
99
- "implementations (ie. outside workflows and use-cases) and use "
100
- "content field instead.",
97
+ description="Raw content as bytes for cases where direct in-memory "
98
+ "binary payloads are preferred over ContentStream.",
101
99
  )
102
100
 
103
101
  @field_validator("document_id")
@@ -124,29 +122,22 @@ class Document(BaseModel):
124
122
  @field_validator("content_multihash")
125
123
  @classmethod
126
124
  def content_multihash_must_not_be_empty(cls, v: str) -> str:
127
- # TODO: actually validate the multihash against the content?
128
125
  if not v or not v.strip():
129
126
  raise ValueError("Content multihash cannot be empty")
130
127
  return v.strip()
131
128
 
132
129
  @model_validator(mode="after")
133
130
  def validate_content_fields(self, info: ValidationInfo) -> "Document":
134
- """Ensure document has either content or content_string, not both."""
135
- # Check if we're in a Temporal deserialization context
131
+ """Ensure document has at least content, or content_bytes."""
132
+
133
+ # Skip validation in Temporal deserialization context
136
134
  if info.context and info.context.get("temporal_validation"):
137
135
  return self
138
136
 
139
- # Normal validation for direct instantiation
140
137
  has_content = self.content is not None
141
- has_content_string = self.content_string is not None
142
-
143
- if has_content and has_content_string:
144
- raise ValueError(
145
- "Document cannot have both content and content_string. "
146
- "Provide only one."
147
- )
148
- elif not has_content and not has_content_string:
149
- raise ValueError(
150
- "Document must have either content or content_string. " "Provide one."
151
- )
138
+ has_content_bytes = self.content_bytes is not None
139
+
140
+ if not (has_content or has_content_bytes):
141
+ raise ValueError("Document must have one of: content, or content_bytes.")
142
+
152
143
  return self