langfun 0.1.2.dev202509020804__py3-none-any.whl → 0.1.2.dev202511110805__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langfun might be problematic. Click here for more details.

Files changed (133) hide show
  1. langfun/__init__.py +1 -1
  2. langfun/core/__init__.py +6 -1
  3. langfun/core/agentic/__init__.py +4 -0
  4. langfun/core/agentic/action.py +412 -103
  5. langfun/core/agentic/action_eval.py +9 -2
  6. langfun/core/agentic/action_test.py +68 -6
  7. langfun/core/async_support.py +104 -5
  8. langfun/core/async_support_test.py +23 -0
  9. langfun/core/coding/python/correction.py +19 -9
  10. langfun/core/coding/python/execution.py +14 -12
  11. langfun/core/coding/python/generation.py +21 -16
  12. langfun/core/coding/python/sandboxing.py +23 -3
  13. langfun/core/component.py +42 -3
  14. langfun/core/concurrent.py +70 -6
  15. langfun/core/concurrent_test.py +9 -2
  16. langfun/core/console.py +1 -1
  17. langfun/core/data/conversion/anthropic.py +12 -3
  18. langfun/core/data/conversion/anthropic_test.py +8 -6
  19. langfun/core/data/conversion/gemini.py +9 -2
  20. langfun/core/data/conversion/gemini_test.py +12 -9
  21. langfun/core/data/conversion/openai.py +145 -31
  22. langfun/core/data/conversion/openai_test.py +161 -17
  23. langfun/core/eval/base.py +47 -43
  24. langfun/core/eval/base_test.py +4 -4
  25. langfun/core/eval/matching.py +5 -2
  26. langfun/core/eval/patching.py +3 -3
  27. langfun/core/eval/scoring.py +4 -3
  28. langfun/core/eval/v2/__init__.py +1 -0
  29. langfun/core/eval/v2/checkpointing.py +30 -4
  30. langfun/core/eval/v2/eval_test_helper.py +1 -1
  31. langfun/core/eval/v2/evaluation.py +60 -14
  32. langfun/core/eval/v2/example.py +22 -11
  33. langfun/core/eval/v2/experiment.py +51 -8
  34. langfun/core/eval/v2/metric_values.py +31 -3
  35. langfun/core/eval/v2/metric_values_test.py +32 -0
  36. langfun/core/eval/v2/metrics.py +39 -4
  37. langfun/core/eval/v2/metrics_test.py +14 -0
  38. langfun/core/eval/v2/progress.py +30 -1
  39. langfun/core/eval/v2/progress_test.py +27 -0
  40. langfun/core/eval/v2/progress_tracking_test.py +6 -0
  41. langfun/core/eval/v2/reporting.py +90 -71
  42. langfun/core/eval/v2/reporting_test.py +20 -6
  43. langfun/core/eval/v2/runners.py +27 -7
  44. langfun/core/eval/v2/runners_test.py +3 -0
  45. langfun/core/langfunc.py +45 -130
  46. langfun/core/langfunc_test.py +6 -4
  47. langfun/core/language_model.py +151 -31
  48. langfun/core/language_model_test.py +9 -3
  49. langfun/core/llms/__init__.py +12 -1
  50. langfun/core/llms/anthropic.py +157 -2
  51. langfun/core/llms/azure_openai.py +29 -17
  52. langfun/core/llms/cache/base.py +25 -3
  53. langfun/core/llms/cache/in_memory.py +48 -7
  54. langfun/core/llms/cache/in_memory_test.py +14 -4
  55. langfun/core/llms/compositional.py +25 -1
  56. langfun/core/llms/deepseek.py +30 -2
  57. langfun/core/llms/fake.py +39 -1
  58. langfun/core/llms/fake_test.py +9 -0
  59. langfun/core/llms/gemini.py +43 -7
  60. langfun/core/llms/google_genai.py +34 -1
  61. langfun/core/llms/groq.py +28 -3
  62. langfun/core/llms/llama_cpp.py +23 -4
  63. langfun/core/llms/openai.py +93 -3
  64. langfun/core/llms/openai_compatible.py +148 -27
  65. langfun/core/llms/openai_compatible_test.py +207 -20
  66. langfun/core/llms/openai_test.py +0 -2
  67. langfun/core/llms/rest.py +16 -1
  68. langfun/core/llms/vertexai.py +59 -8
  69. langfun/core/logging.py +1 -1
  70. langfun/core/mcp/__init__.py +10 -0
  71. langfun/core/mcp/client.py +177 -0
  72. langfun/core/mcp/client_test.py +71 -0
  73. langfun/core/mcp/session.py +241 -0
  74. langfun/core/mcp/session_test.py +54 -0
  75. langfun/core/mcp/testing/simple_mcp_client.py +33 -0
  76. langfun/core/mcp/testing/simple_mcp_server.py +33 -0
  77. langfun/core/mcp/tool.py +256 -0
  78. langfun/core/mcp/tool_test.py +197 -0
  79. langfun/core/memory.py +1 -0
  80. langfun/core/message.py +160 -55
  81. langfun/core/message_test.py +65 -81
  82. langfun/core/modalities/__init__.py +8 -0
  83. langfun/core/modalities/audio.py +21 -1
  84. langfun/core/modalities/image.py +19 -1
  85. langfun/core/modalities/mime.py +62 -3
  86. langfun/core/modalities/pdf.py +19 -1
  87. langfun/core/modalities/video.py +21 -1
  88. langfun/core/modality.py +167 -29
  89. langfun/core/modality_test.py +42 -12
  90. langfun/core/natural_language.py +1 -1
  91. langfun/core/sampling.py +4 -4
  92. langfun/core/sampling_test.py +20 -4
  93. langfun/core/structured/completion.py +34 -44
  94. langfun/core/structured/completion_test.py +23 -43
  95. langfun/core/structured/description.py +54 -50
  96. langfun/core/structured/function_generation.py +29 -12
  97. langfun/core/structured/mapping.py +74 -28
  98. langfun/core/structured/parsing.py +90 -74
  99. langfun/core/structured/parsing_test.py +0 -3
  100. langfun/core/structured/querying.py +242 -156
  101. langfun/core/structured/querying_test.py +95 -64
  102. langfun/core/structured/schema.py +70 -10
  103. langfun/core/structured/schema_generation.py +33 -14
  104. langfun/core/structured/scoring.py +45 -34
  105. langfun/core/structured/tokenization.py +24 -9
  106. langfun/core/subscription.py +2 -2
  107. langfun/core/template.py +175 -50
  108. langfun/core/template_test.py +123 -17
  109. langfun/env/__init__.py +43 -0
  110. langfun/env/base_environment.py +827 -0
  111. langfun/env/base_environment_test.py +473 -0
  112. langfun/env/base_feature.py +304 -0
  113. langfun/env/base_feature_test.py +228 -0
  114. langfun/env/base_sandbox.py +842 -0
  115. langfun/env/base_sandbox_test.py +1235 -0
  116. langfun/env/event_handlers/__init__.py +14 -0
  117. langfun/env/event_handlers/chain.py +233 -0
  118. langfun/env/event_handlers/chain_test.py +253 -0
  119. langfun/env/event_handlers/event_logger.py +472 -0
  120. langfun/env/event_handlers/event_logger_test.py +304 -0
  121. langfun/env/event_handlers/metric_writer.py +726 -0
  122. langfun/env/event_handlers/metric_writer_test.py +214 -0
  123. langfun/env/interface.py +1640 -0
  124. langfun/env/interface_test.py +151 -0
  125. langfun/env/load_balancers.py +59 -0
  126. langfun/env/load_balancers_test.py +139 -0
  127. langfun/env/test_utils.py +497 -0
  128. {langfun-0.1.2.dev202509020804.dist-info → langfun-0.1.2.dev202511110805.dist-info}/METADATA +7 -3
  129. langfun-0.1.2.dev202511110805.dist-info/RECORD +200 -0
  130. langfun-0.1.2.dev202509020804.dist-info/RECORD +0 -172
  131. {langfun-0.1.2.dev202509020804.dist-info → langfun-0.1.2.dev202511110805.dist-info}/WHEEL +0 -0
  132. {langfun-0.1.2.dev202509020804.dist-info → langfun-0.1.2.dev202511110805.dist-info}/licenses/LICENSE +0 -0
  133. {langfun-0.1.2.dev202509020804.dist-info → langfun-0.1.2.dev202511110805.dist-info}/top_level.txt +0 -0
@@ -407,22 +407,17 @@ class CompleteStructureTest(unittest.TestCase):
407
407
  image: modalities.Image
408
408
  name: str
409
409
 
410
+ image_elephant = modalities.Image.from_bytes(b'image_of_elephant')
411
+ image_rabbit = modalities.Image.from_bytes(b'image_of_rabbit')
410
412
  input_value = schema_lib.mark_missing(
411
- Animal.partial(
412
- modalities.Image.from_bytes(b'image_of_elephant'),
413
- )
413
+ Animal.partial(image_elephant)
414
414
  )
415
415
  l = completion._CompleteStructure(
416
416
  input=input_value,
417
417
  examples=[
418
418
  mapping.MappingExample(
419
- input=Animal.partial(
420
- modalities.Image.from_bytes(b'image_of_rabbit')
421
- ),
422
- output=Animal(
423
- modalities.Image.from_bytes(b'image_of_rabbit'),
424
- 'rabbit',
425
- ),
419
+ input=Animal.partial(image_rabbit),
420
+ output=Animal(image_rabbit, 'rabbit'),
426
421
  )
427
422
  ],
428
423
  )
@@ -430,7 +425,7 @@ class CompleteStructureTest(unittest.TestCase):
430
425
  self.maxDiff = None
431
426
  self.assertEqual(
432
427
  lm_input.text,
433
- inspect.cleandoc("""
428
+ inspect.cleandoc(f"""
434
429
  Please generate the OUTPUT_OBJECT by completing the MISSING fields from the last INPUT_OBJECT.
435
430
 
436
431
  INSTRUCTIONS:
@@ -457,22 +452,22 @@ class CompleteStructureTest(unittest.TestCase):
457
452
  ```python
458
453
  Animal(
459
454
  image=ModalityRef(
460
- name='examples[0].input.image'
455
+ id='{image_rabbit.id}'
461
456
  ),
462
457
  name=MISSING(str)
463
458
  )
464
459
  ```
465
460
 
466
461
  MODALITY_REFERENCES:
467
- {
468
- 'examples[0].input.image': <<[[examples[0].input.image]]>>
469
- }
462
+ {{
463
+ '{image_rabbit.id}': <<[[{image_rabbit.id}]]>>
464
+ }}
470
465
 
471
466
  OUTPUT_OBJECT:
472
467
  ```python
473
468
  Animal(
474
469
  image=ModalityRef(
475
- name='examples[0].output.image'
470
+ id='{image_rabbit.id}'
476
471
  ),
477
472
  name='rabbit'
478
473
  )
@@ -483,16 +478,16 @@ class CompleteStructureTest(unittest.TestCase):
483
478
  ```python
484
479
  Animal(
485
480
  image=ModalityRef(
486
- name='input.image'
481
+ id='{image_elephant.id}'
487
482
  ),
488
483
  name=MISSING(str)
489
484
  )
490
485
  ```
491
486
 
492
487
  MODALITY_REFERENCES:
493
- {
494
- 'input.image': <<[[input.image]]>>
495
- }
488
+ {{
489
+ '{image_elephant.id}': <<[[{image_elephant.id}]]>>
490
+ }}
496
491
 
497
492
  OUTPUT_OBJECT:
498
493
  """),
@@ -500,39 +495,27 @@ class CompleteStructureTest(unittest.TestCase):
500
495
  self.assertTrue(
501
496
  pg.eq(
502
497
  {
503
- 'examples': lm_input.get('examples'),
504
- 'input': lm_input.get('input'),
498
+ 'examples': lm_input.__template_input__.examples,
499
+ 'input': lm_input.__template_input__.mapping_request.input,
505
500
  },
506
501
  {
507
502
  'examples': [
508
503
  mapping.MappingExample(
509
- input=Animal.partial(
510
- image=modalities.Image.from_bytes(
511
- b'image_of_rabbit'
512
- )
513
- ),
514
- output=Animal.partial(
515
- image=modalities.Image.from_bytes(
516
- b'image_of_rabbit'
517
- ),
518
- name='rabbit',
519
- ),
504
+ input=Animal.partial(image_rabbit),
505
+ output=Animal.partial(image_rabbit, 'rabbit'),
520
506
  )
521
507
  ],
522
- 'input': Animal(
523
- image=modalities.Image.from_bytes(b'image_of_elephant'),
524
- name=schema_lib.MISSING,
525
- ),
508
+ 'input': Animal(image_elephant, name=schema_lib.MISSING),
526
509
  },
527
510
  )
528
511
  )
529
512
  lm_output = l(
530
513
  input=input_value,
531
- lm=fake.StaticResponse(inspect.cleandoc("""
514
+ lm=fake.StaticResponse(inspect.cleandoc(f"""
532
515
  ```python
533
516
  Animal(
534
517
  image=ModalityRef(
535
- name='input.image'
518
+ id='{image_elephant.id}'
536
519
  ),
537
520
  name='elephant'
538
521
  )
@@ -542,10 +525,7 @@ class CompleteStructureTest(unittest.TestCase):
542
525
  self.assertTrue(
543
526
  pg.eq(
544
527
  lm_output.result,
545
- Animal(
546
- image=modalities.Image.from_bytes(b'image_of_elephant'),
547
- name='elephant',
548
- ),
528
+ Animal(image=image_elephant, name='elephant'),
549
529
  )
550
530
  )
551
531
 
@@ -23,7 +23,7 @@ import pyglove as pg
23
23
 
24
24
  @pg.use_init_args(['examples'])
25
25
  class _DescribeStructure(mapping.Mapping):
26
- """Describe a structured value in natural language."""
26
+ """Describes a structured value in natural language."""
27
27
 
28
28
  input_title = 'PYTHON_OBJECT'
29
29
  context_title = 'CONTEXT_FOR_DESCRIPTION'
@@ -47,64 +47,68 @@ def describe(
47
47
  cache_seed: int | None = 0,
48
48
  **kwargs,
49
49
  ) -> str:
50
- """Describes a structured value using natural language.
51
-
52
- Examples:
53
-
54
- ```
55
- class FlightDuration(pg.Object):
56
- hours: int
57
- minutes: int
58
-
59
- class Flight(pg.Object):
60
- airline: str
61
- flight_number: str
62
- departure_airport: str
63
- arrival_airport: str
64
- departure_time: str
65
- arrival_time: str
66
- duration: FlightDuration
67
- stops: int
68
- price: float
69
-
70
- text = lf.describe(
71
- Flight(
72
- airline='United Airlines',
73
- flight_number='UA2631',
74
- depature_airport: 'SFO',
75
- arrival_airport: 'JFK',
76
- depature_time: '2023-09-07T05:15:00',
77
- arrival_time: '2023-09-07T12:12:00',
78
- duration: FlightDuration(
79
- hours=7,
80
- minutes=57
81
- ),
82
- stops=1,
83
- price=227,
84
- ))
85
- print(text)
86
-
87
- >> The flight is operated by United Airlines, has the flight number UA2631,
88
- >> departs from San Francisco International Airport (SFO), arrives at John
89
- >> F. Kennedy International Airport (JFK), It departs at
90
- >> 2023-09-07T05:15:00, arrives at 2023-09-07T12:12:00, has a duration of 7
91
- >> hours and 57 minutes, makes 1 stop, and costs $227.
92
- ```
50
+ """Describes a structured value in natural language using an LLM.
51
+
52
+ `lf.describe` takes a Python object, often a `pg.Object` instance,
53
+ and uses a language model to generate a human-readable, natural language
54
+ description of its content. It is the inverse of `lf.parse`.
55
+
56
+ **Example:**
57
+
58
+ ```python
59
+ import langfun as lf
60
+ import pyglove as pg
61
+
62
+ class FlightDuration(pg.Object):
63
+ hours: int
64
+ minutes: int
65
+
66
+ class Flight(pg.Object):
67
+ airline: str
68
+ flight_number: str
69
+ departure_airport: str
70
+ arrival_airport: str
71
+ departure_time: str
72
+ arrival_time: str
73
+ duration: FlightDuration
74
+ stops: int
75
+ price: float
76
+
77
+ flight_info = Flight(
78
+ airline='United Airlines',
79
+ flight_number='UA2631',
80
+ departure_airport='SFO',
81
+ arrival_airport='JFK',
82
+ departure_time='2023-09-07T05:15:00',
83
+ arrival_time='2023-09-07T12:12:00',
84
+ duration=FlightDuration(hours=7, minutes=57),
85
+ stops=1,
86
+ price=227,
87
+ )
88
+
89
+ description = lf.describe(flight_info, lm=lf.llms.Gemini25Flash())
90
+ print(description)
91
+ # Possible output:
92
+ # The flight is operated by United Airlines, with the flight number UA2631,
93
+ # departing from SFO at 2023-09-07T05:15:00 and arriving at JFK at
94
+ # 2023-09-07T12:12:00. The flight duration is 7 hours and 57 minutes,
95
+ # with 1 stop, and costs $227.
96
+ ```
93
97
 
94
98
  Args:
95
99
  value: A structured value to be mapped.
96
100
  context: The context information for describing the structured value.
97
101
  lm: The language model to use. If not specified, the language model from
98
102
  `lf.context` context manager will be used.
99
- examples: An optional list of fewshot examples for helping parsing. If None,
100
- the default one-shot example will be added.
103
+ examples: An optional list of fewshot examples for guiding description.
104
+ If None, default examples will be used.
101
105
  cache_seed: Seed for computing cache key. The cache key is determined by a
102
106
  tuple of (lm, prompt, cache seed). If None, cache will be disabled for
103
107
  the query even cache is configured by the LM.
104
- **kwargs: Keyword arguments passed to the `lf.structured.DescribeStructure`.
108
+ **kwargs: Keyword arguments passed to the `_DescribeStructure`.
105
109
 
106
110
  Returns:
107
- The parsed result based on the schema.
111
+ A natural language description of the input value.
108
112
  """
109
113
  return _DescribeStructure(
110
114
  input=value,
@@ -115,10 +119,10 @@ def describe(
115
119
 
116
120
 
117
121
  def default_describe_examples() -> list[mapping.MappingExample]:
118
- """Default describe examples."""
122
+ """Returns default examples for `lf.describe`."""
119
123
 
120
124
  class Country(pg.Object):
121
- """A example dataclass for structured mapping."""
125
+ """An example dataclass for structured mapping."""
122
126
 
123
127
  name: str
124
128
  continents: list[
@@ -26,10 +26,10 @@ import pyglove as pg
26
26
 
27
27
 
28
28
  def unittest_gen(signature, lm, num_retries=1):
29
- """Generates unit tests for a python function signature."""
29
+ """Generates unit tests for a Python function signature."""
30
30
 
31
31
  class UnitTest(pg.Object):
32
- """A valid unit test for a python function."""
32
+ """A valid unit test for a Python function."""
33
33
 
34
34
  input: dict[str, Any]
35
35
  expected_output: Any
@@ -55,7 +55,7 @@ def unittest_gen(signature, lm, num_retries=1):
55
55
 
56
56
 
57
57
  def unittest_with_test_cases(f, unittests):
58
- """Applies unit tests to a python function to be tested."""
58
+ """Applies unit tests to a Python function to be tested."""
59
59
  if not unittests:
60
60
  raise ValueError(f"No unit tests provided: {unittests}")
61
61
 
@@ -87,10 +87,10 @@ def _function_gen(
87
87
  ] = None,
88
88
  unittest_num_retries: int = 1,
89
89
  ):
90
- """Generates a python function with LLM and verify its quality with unit testing."""
90
+ """Generates a Python function with LLM and verifies it with unit testing."""
91
91
 
92
92
  class PythonFunctionPrompt(template.Template):
93
- r"""A template for a python function generation.
93
+ r"""A template for a Python function generation.
94
94
 
95
95
  Please reply to the last PYTHON_FUNCTION_SIGNATURE with a self-sufficient,
96
96
  error-free, and efficiently coded PYTHON_FUNCTION, crafted to the standards
@@ -195,11 +195,28 @@ def function_gen(
195
195
  ] = None,
196
196
  unittest_num_retries: int = 1,
197
197
  ):
198
- """A decorator for automating function generation using a language model.
198
+ r"""Decorator for generating function implementations using an LLM.
199
199
 
200
- This decorator should be applied to functions that are not yet implemented. It
201
- facilitates the implementation via the specified LLM, ensuring
202
- quality through unit tests.
200
+ `lf.function_gen` is a decorator that automatically generates the
201
+ implementation of a Python function based on its signature and docstring,
202
+ using the specified language model. This is useful for quickly prototyping
203
+ functions or generating boilerplate code.
204
+
205
+ The decorator can also automatically generate and run unit tests to verify
206
+ the correctness of the generated implementation.
207
+
208
+ **Example:**
209
+
210
+ ```python
211
+ import langfun as lf
212
+
213
+ @lf.function_gen(lm=lf.llms.Gemini25Flash())
214
+ def product(a: int, b: int) -> int:
215
+ \"\"\"Returns product of a and b.\"\"\"
216
+
217
+ print(product(2, 3))
218
+ # Output: 6
219
+ ```
203
220
 
204
221
  Args:
205
222
  lm (lf.LanguageModel): The language model used for generating function
@@ -212,10 +229,10 @@ def function_gen(
212
229
  tests. You can either provide a list of test cases as tuples of inputs
213
230
  and outputs, or a function that throws an error if a test fails, or let
214
231
  LLM automatically create the unit test cases. If a generated function is
215
- and returned, it should pass all the unittests.
232
+ returned, it should pass all the unit tests.
216
233
  unittest_num_retries: If unittest is set to "auto", this parameter
217
- specifies the number of times the LLM's attempts to generate unit test
218
- cases.
234
+ specifies the number of times the LLM should attempt to generate unit
235
+ test cases.
219
236
 
220
237
  Returns:
221
238
  The implemented function object.
@@ -22,7 +22,16 @@ import pyglove as pg
22
22
 
23
23
 
24
24
  class MappingError(Exception): # pylint: disable=g-bad-exception-name
25
- """Mapping error."""
25
+ """Error raised during a structured mapping task.
26
+
27
+ `MappingError` is raised when a language model's response cannot be
28
+ successfully parsed or transformed into the target structure defined by
29
+ the schema in structured mapping operations like `lf.query` and `lf.parse`.
30
+
31
+ This error encapsulates both the original exception that occurred during
32
+ parsing (`cause`) and the language model response (`lm_response`) that led
33
+ to the failure, allowing for easier debugging of mapping issues.
34
+ """
26
35
 
27
36
  def __init__(self, lm_response: lf.Message, cause: Exception):
28
37
  self._lm_response = lm_response
@@ -62,7 +71,53 @@ class MappingError(Exception): # pylint: disable=g-bad-exception-name
62
71
  class MappingExample(lf.NaturalLanguageFormattable,
63
72
  lf.Component,
64
73
  pg.views.HtmlTreeView.Extension):
65
- """Mapping example between text, schema and structured value."""
74
+ """Represents an example for a structured mapping task.
75
+
76
+ A `MappingExample` defines a single instance of a mapping between an input
77
+ value and an output value, optionally guided by a schema and/or a natural
78
+ language context. It is primarily used to provide few-shot examples to
79
+ structured mapping operations (e.g., `lf.query`, `lf.complete`,
80
+ and `lf.describe`), helping to guide the LLM in performing the desired mapping
81
+ task. If `output` is not provided, the example represents a request to perform
82
+ mapping on the `input`.
83
+
84
+ **Key Attributes:**
85
+
86
+ * `input`: The source value for the mapping (e.g., text, an object).
87
+ * `output`: The target value for the mapping (e.g., a structured object,
88
+ text). If not provided, this example represents a request to perform
89
+ the mapping.
90
+ * `schema`: An optional `lf.structured.Schema` that defines or constrains
91
+ the structure of the `output`. If provided, the LLM will be instructed
92
+ to produce an output conforming to this schema.
93
+ * `context`: Optional natural language context that provides additional
94
+ information relevant to the mapping task.
95
+ * `metadata`: Optional dictionary for additional metadata.
96
+
97
+ **Example:**
98
+
99
+ ```python
100
+ import langfun as lf
101
+ import pyglove as pg
102
+
103
+ # Example for translating English to French
104
+ lf.MappingExample(
105
+ input="Hello",
106
+ output="Bonjour"
107
+ )
108
+
109
+ # Example for extracting structured data
110
+ class Flight(pg.Object):
111
+ airline: str
112
+ flight_number: str
113
+
114
+ lf.MappingExample(
115
+ input="I want to book flight AA123.",
116
+ output=Flight(airline="AA", flight_number="123"),
117
+ schema=Flight
118
+ )
119
+ ```
120
+ """
66
121
 
67
122
  input: pg.typing.Annotated[
68
123
  pg.typing.Any(transform=schema_lib.mark_missing),
@@ -84,7 +139,7 @@ class MappingExample(lf.NaturalLanguageFormattable,
84
139
  # Automatic conversion from annotation to schema.
85
140
  schema_lib.schema_spec(noneable=True),
86
141
  (
87
- 'A `lf.structured.Schema` object that constrains target value '
142
+ 'A `lf.structured.Schema` object that constrains target value. '
88
143
  'If None, the target is expected to be a natural language-based '
89
144
  'response returned from LMs.'
90
145
  ),
@@ -99,7 +154,7 @@ class MappingExample(lf.NaturalLanguageFormattable,
99
154
  dict[str, Any],
100
155
  (
101
156
  'The metadata associated with the mapping example, '
102
- 'which chould carry structured data, such as tool function input. '
157
+ 'which could carry structured data, such as tool function input. '
103
158
  'It is a `pg.Dict` object whose keys can be accessed by attributes.'
104
159
  ),
105
160
  ] = pg.Dict()
@@ -127,6 +182,8 @@ class MappingExample(lf.NaturalLanguageFormattable,
127
182
  ) -> str:
128
183
  if isinstance(value, str):
129
184
  return value
185
+ if isinstance(value, lf.Message):
186
+ return str(value)
130
187
  if isinstance(value, lf.Modality):
131
188
  with lf.modality.format_modality_as_ref():
132
189
  return str(value)
@@ -192,9 +249,7 @@ class MappingExample(lf.NaturalLanguageFormattable,
192
249
 
193
250
  def render_value(view, *, value, **kwargs):
194
251
  if isinstance(value, lf.Template):
195
- # Make a shallow copy to make sure modalities are rooted by
196
- # the input.
197
- value = value.clone().render()
252
+ value = value.render()
198
253
  if value is None:
199
254
  return None
200
255
  return view.render(value, **kwargs)
@@ -242,7 +297,7 @@ class MappingExample(lf.NaturalLanguageFormattable,
242
297
 
243
298
 
244
299
  class Mapping(lf.LangFunc):
245
- """Base class for mapping.
300
+ """Base class for LLM-based mapping operations.
246
301
 
247
302
  {{ preamble }}
248
303
 
@@ -263,19 +318,19 @@ class Mapping(lf.LangFunc):
263
318
  pg.Symbolic,
264
319
  (
265
320
  'The mapping input. It could be `lf.Message` (a pg.Symbolic '
266
- 'subclass) as natural language input, or other symbolic object '
321
+ 'subclass) as natural language input, or other symbolic objects '
267
322
  'as structured input.'
268
323
  ),
269
324
  ]
270
325
 
271
326
  context: Annotated[
272
- str | None, 'The mapping context. A string as natural language '
327
+ str | None, 'The mapping context as a natural language string.'
273
328
  ] = None
274
329
 
275
330
  schema: pg.typing.Annotated[
276
331
  # Automatic conversion from annotation to schema.
277
332
  schema_lib.schema_spec(noneable=True),
278
- 'A `lf.structured.Schema` object that constrains mapping output ',
333
+ 'A `lf.structured.Schema` object that constrains mapping output.',
279
334
  ] = None
280
335
 
281
336
  permission: Annotated[
@@ -286,12 +341,8 @@ class Mapping(lf.LangFunc):
286
341
  @property
287
342
  def mapping_request(self) -> MappingExample:
288
343
  """Returns a MappingExample as the mapping request."""
289
- if isinstance(self.input, lf.Message):
290
- input_value = self.input.text
291
- else:
292
- input_value = pg.Ref(self.input)
293
344
  return MappingExample(
294
- input=input_value,
345
+ input=pg.Ref(self.input),
295
346
  schema=pg.Ref(self.schema),
296
347
  context=self.context,
297
348
  )
@@ -382,16 +433,16 @@ class Mapping(lf.LangFunc):
382
433
  default: Annotated[
383
434
  Any,
384
435
  (
385
- 'The default value to use if the LM response is not a valid code '
386
- 'based on the schema (after autofix). '
387
- 'If unspecified, error will be raisen.'
436
+ 'The default value to use if parsing fails (after autofix). '
437
+ 'If `lf.RAISE_IF_HAS_ERROR` is used (default), an error will be '
438
+ 'raised instead.'
388
439
  ),
389
440
  ] = lf.RAISE_IF_HAS_ERROR
390
441
 
391
442
  response_postprocess: Annotated[
392
443
  Callable[[str], str] | None,
393
444
  (
394
- 'A callable object that post process the raw LLM response before '
445
+ 'A callable object that post-processes the raw LLM response before '
395
446
  'parsing it into the output Python object.'
396
447
  )
397
448
  ] = None
@@ -402,11 +453,6 @@ class Mapping(lf.LangFunc):
402
453
 
403
454
  def transform_input(self, lm_input: lf.Message) -> lf.Message:
404
455
  # Find modalities to fill the input message.
405
- lm_input.metadata.update(
406
- examples=pg.Ref(self.examples),
407
- input=pg.Ref(self.input),
408
- schema=pg.Ref(self.schema) if self.schema is not None else None,
409
- )
410
456
  if isinstance(self.input, lf.Message):
411
457
  lm_input.source = self.input
412
458
  return lm_input
@@ -429,7 +475,7 @@ class Mapping(lf.LangFunc):
429
475
  return lm_output
430
476
 
431
477
  def parse_result(self, lm_output: lf.Message) -> Any:
432
- """Parse result from LLM response."""
478
+ """Parses result from LLM response."""
433
479
  schema = self.mapping_request.schema
434
480
  if schema is None:
435
481
  return None
@@ -453,7 +499,7 @@ class Mapping(lf.LangFunc):
453
499
  )
454
500
 
455
501
  def postprocess_response(self, response: lf.Message) -> lf.Message:
456
- """Post process LLM response."""
502
+ """Post-processes LLM response."""
457
503
  if self.response_postprocess is not None:
458
504
  postprocessed_text = self.response_postprocess(response.text)
459
505
  if postprocessed_text != response.text:
@@ -461,7 +507,7 @@ class Mapping(lf.LangFunc):
461
507
  return response
462
508
 
463
509
  def postprocess_result(self, result: Any) -> Any:
464
- """Post process structured output."""
510
+ """Post-processes structured output."""
465
511
  return result
466
512
 
467
513
  def globals(self) -> dict[str, Any]: