arize-phoenix 3.24.0__py3-none-any.whl → 4.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of arize-phoenix might be problematic. Click here for more details.

Files changed (113) hide show
  1. {arize_phoenix-3.24.0.dist-info → arize_phoenix-4.0.0.dist-info}/METADATA +26 -4
  2. {arize_phoenix-3.24.0.dist-info → arize_phoenix-4.0.0.dist-info}/RECORD +80 -75
  3. phoenix/__init__.py +9 -5
  4. phoenix/config.py +109 -53
  5. phoenix/datetime_utils.py +18 -1
  6. phoenix/db/README.md +25 -0
  7. phoenix/db/__init__.py +4 -0
  8. phoenix/db/alembic.ini +119 -0
  9. phoenix/db/bulk_inserter.py +206 -0
  10. phoenix/db/engines.py +152 -0
  11. phoenix/db/helpers.py +47 -0
  12. phoenix/db/insertion/evaluation.py +209 -0
  13. phoenix/db/insertion/helpers.py +54 -0
  14. phoenix/db/insertion/span.py +142 -0
  15. phoenix/db/migrate.py +71 -0
  16. phoenix/db/migrations/env.py +121 -0
  17. phoenix/db/migrations/script.py.mako +26 -0
  18. phoenix/db/migrations/versions/cf03bd6bae1d_init.py +280 -0
  19. phoenix/db/models.py +371 -0
  20. phoenix/exceptions.py +5 -1
  21. phoenix/server/api/context.py +40 -3
  22. phoenix/server/api/dataloaders/__init__.py +97 -0
  23. phoenix/server/api/dataloaders/cache/__init__.py +3 -0
  24. phoenix/server/api/dataloaders/cache/two_tier_cache.py +67 -0
  25. phoenix/server/api/dataloaders/document_evaluation_summaries.py +152 -0
  26. phoenix/server/api/dataloaders/document_evaluations.py +37 -0
  27. phoenix/server/api/dataloaders/document_retrieval_metrics.py +98 -0
  28. phoenix/server/api/dataloaders/evaluation_summaries.py +151 -0
  29. phoenix/server/api/dataloaders/latency_ms_quantile.py +198 -0
  30. phoenix/server/api/dataloaders/min_start_or_max_end_times.py +93 -0
  31. phoenix/server/api/dataloaders/record_counts.py +125 -0
  32. phoenix/server/api/dataloaders/span_descendants.py +64 -0
  33. phoenix/server/api/dataloaders/span_evaluations.py +37 -0
  34. phoenix/server/api/dataloaders/token_counts.py +138 -0
  35. phoenix/server/api/dataloaders/trace_evaluations.py +37 -0
  36. phoenix/server/api/input_types/SpanSort.py +138 -68
  37. phoenix/server/api/routers/v1/__init__.py +11 -0
  38. phoenix/server/api/routers/v1/evaluations.py +275 -0
  39. phoenix/server/api/routers/v1/spans.py +126 -0
  40. phoenix/server/api/routers/v1/traces.py +82 -0
  41. phoenix/server/api/schema.py +112 -48
  42. phoenix/server/api/types/DocumentEvaluationSummary.py +1 -1
  43. phoenix/server/api/types/Evaluation.py +29 -12
  44. phoenix/server/api/types/EvaluationSummary.py +29 -44
  45. phoenix/server/api/types/MimeType.py +2 -2
  46. phoenix/server/api/types/Model.py +9 -9
  47. phoenix/server/api/types/Project.py +240 -171
  48. phoenix/server/api/types/Span.py +87 -131
  49. phoenix/server/api/types/Trace.py +29 -20
  50. phoenix/server/api/types/pagination.py +151 -10
  51. phoenix/server/app.py +263 -35
  52. phoenix/server/grpc_server.py +93 -0
  53. phoenix/server/main.py +75 -60
  54. phoenix/server/openapi/docs.py +218 -0
  55. phoenix/server/prometheus.py +23 -7
  56. phoenix/server/static/index.js +662 -643
  57. phoenix/server/telemetry.py +68 -0
  58. phoenix/services.py +4 -0
  59. phoenix/session/client.py +34 -30
  60. phoenix/session/data_extractor.py +8 -3
  61. phoenix/session/session.py +176 -155
  62. phoenix/settings.py +13 -0
  63. phoenix/trace/attributes.py +349 -0
  64. phoenix/trace/dsl/README.md +116 -0
  65. phoenix/trace/dsl/filter.py +660 -192
  66. phoenix/trace/dsl/helpers.py +24 -5
  67. phoenix/trace/dsl/query.py +562 -185
  68. phoenix/trace/fixtures.py +69 -7
  69. phoenix/trace/otel.py +33 -199
  70. phoenix/trace/schemas.py +14 -8
  71. phoenix/trace/span_evaluations.py +5 -2
  72. phoenix/utilities/__init__.py +0 -26
  73. phoenix/utilities/span_store.py +0 -23
  74. phoenix/version.py +1 -1
  75. phoenix/core/project.py +0 -773
  76. phoenix/core/traces.py +0 -96
  77. phoenix/datasets/dataset.py +0 -214
  78. phoenix/datasets/fixtures.py +0 -24
  79. phoenix/datasets/schema.py +0 -31
  80. phoenix/experimental/evals/__init__.py +0 -73
  81. phoenix/experimental/evals/evaluators.py +0 -413
  82. phoenix/experimental/evals/functions/__init__.py +0 -4
  83. phoenix/experimental/evals/functions/classify.py +0 -453
  84. phoenix/experimental/evals/functions/executor.py +0 -353
  85. phoenix/experimental/evals/functions/generate.py +0 -138
  86. phoenix/experimental/evals/functions/processing.py +0 -76
  87. phoenix/experimental/evals/models/__init__.py +0 -14
  88. phoenix/experimental/evals/models/anthropic.py +0 -175
  89. phoenix/experimental/evals/models/base.py +0 -170
  90. phoenix/experimental/evals/models/bedrock.py +0 -221
  91. phoenix/experimental/evals/models/litellm.py +0 -134
  92. phoenix/experimental/evals/models/openai.py +0 -453
  93. phoenix/experimental/evals/models/rate_limiters.py +0 -246
  94. phoenix/experimental/evals/models/vertex.py +0 -173
  95. phoenix/experimental/evals/models/vertexai.py +0 -186
  96. phoenix/experimental/evals/retrievals.py +0 -96
  97. phoenix/experimental/evals/templates/__init__.py +0 -50
  98. phoenix/experimental/evals/templates/default_templates.py +0 -472
  99. phoenix/experimental/evals/templates/template.py +0 -195
  100. phoenix/experimental/evals/utils/__init__.py +0 -172
  101. phoenix/experimental/evals/utils/threads.py +0 -27
  102. phoenix/server/api/routers/evaluation_handler.py +0 -110
  103. phoenix/server/api/routers/span_handler.py +0 -70
  104. phoenix/server/api/routers/trace_handler.py +0 -60
  105. phoenix/storage/span_store/__init__.py +0 -23
  106. phoenix/storage/span_store/text_file.py +0 -85
  107. phoenix/trace/dsl/missing.py +0 -60
  108. {arize_phoenix-3.24.0.dist-info → arize_phoenix-4.0.0.dist-info}/WHEEL +0 -0
  109. {arize_phoenix-3.24.0.dist-info → arize_phoenix-4.0.0.dist-info}/licenses/IP_NOTICE +0 -0
  110. {arize_phoenix-3.24.0.dist-info → arize_phoenix-4.0.0.dist-info}/licenses/LICENSE +0 -0
  111. /phoenix/{datasets → db/insertion}/__init__.py +0 -0
  112. /phoenix/{experimental → db/migrations}/__init__.py +0 -0
  113. /phoenix/{storage → server/openapi}/__init__.py +0 -0
@@ -0,0 +1,349 @@
1
+ """
2
+ Span attribute keys have a special relationship with the `.` separator. When
3
+ a span attribute is ingested from protobuf, it's in the form of a key value
4
+ pair such as `("llm.token_count.completion", 123)`. What we need to do is to split
5
+ the key by the `.` separator and turn it into part of a nested dictionary such
6
+ as {"llm": {"token_count": {"completion": 123}}}. We also need to reverse this
7
+ process, which is to flatten the nested dictionary into a list of key value
8
+ pairs. This module provides functions to do both of these operations.
9
+
10
+ Note that digit keys are treated as indices of a nested array. For example,
11
+ the digits inside `("retrieval.documents.0.document.content", 'A')` and
12
+ `("retrieval.documents.1.document.content": 'B')` turn the sub-keys following
13
+ them into a nested list of dictionaries i.e.
14
+ {`retrieval: {"documents": [{"document": {"content": "A"}}, {"document":
15
+ {"content": "B"}}]}`.
16
+ """
17
+
18
+ import inspect
19
+ import json
20
+ from typing import (
21
+ Any,
22
+ DefaultDict,
23
+ Dict,
24
+ Iterable,
25
+ Iterator,
26
+ List,
27
+ Mapping,
28
+ Optional,
29
+ Sequence,
30
+ Set,
31
+ Tuple,
32
+ Union,
33
+ cast,
34
+ )
35
+
36
+ from openinference.semconv import trace
37
+ from openinference.semconv.trace import DocumentAttributes, SpanAttributes
38
+ from typing_extensions import assert_never
39
+
40
+ DOCUMENT_METADATA = DocumentAttributes.DOCUMENT_METADATA
41
+ LLM_PROMPT_TEMPLATE_VARIABLES = SpanAttributes.LLM_PROMPT_TEMPLATE_VARIABLES
42
+ METADATA = SpanAttributes.METADATA
43
+ TOOL_PARAMETERS = SpanAttributes.TOOL_PARAMETERS
44
+
45
+ # attributes interpreted as JSON strings during ingestion
46
+ JSON_STRING_ATTRIBUTES = (
47
+ DOCUMENT_METADATA,
48
+ LLM_PROMPT_TEMPLATE_VARIABLES,
49
+ METADATA,
50
+ TOOL_PARAMETERS,
51
+ )
52
+
53
+ SEMANTIC_CONVENTIONS: List[str] = sorted(
54
+ # e.g. "input.value", "llm.token_count.total", etc.
55
+ (
56
+ cast(str, getattr(klass, attr))
57
+ for name in dir(trace)
58
+ if name.endswith("Attributes") and inspect.isclass(klass := getattr(trace, name))
59
+ for attr in dir(klass)
60
+ if attr.isupper()
61
+ ),
62
+ key=len,
63
+ reverse=True,
64
+ ) # sorted so the longer strings go first
65
+
66
+
67
+ def unflatten(
68
+ key_value_pairs: Iterable[Tuple[str, Any]],
69
+ *,
70
+ prefix_exclusions: Sequence[str] = (),
71
+ separator: str = ".",
72
+ ) -> Dict[str, Any]:
73
+ # `prefix_exclusions` is intended to contain the semantic conventions
74
+ trie = _build_trie(key_value_pairs, separator=separator, prefix_exclusions=prefix_exclusions)
75
+ return dict(_walk(trie, separator=separator))
76
+
77
+
78
+ def flatten(
79
+ obj: Union[Mapping[str, Any], Iterable[Any]],
80
+ *,
81
+ prefix: str = "",
82
+ separator: str = ".",
83
+ recurse_on_sequence: bool = False,
84
+ json_string_attributes: Optional[Sequence[str]] = None,
85
+ ) -> Iterator[Tuple[str, Any]]:
86
+ """
87
+ Flatten a nested dictionary or a sequence of dictionaries into a list of
88
+ key value pairs. If `recurse_on_sequence` is True, then the function will
89
+ also recursively flatten nested sequences of dictionaries. If
90
+ `json_string_attributes` is provided, then the function will interpret the
91
+ attributes in the list as JSON strings and convert them into dictionaries.
92
+ The `prefix` argument is used to prefix the keys in the output list, but
93
+ it's mostly used internally to facilitate recursion.
94
+ """
95
+ if isinstance(obj, Mapping):
96
+ yield from _flatten_mapping(
97
+ obj,
98
+ prefix=prefix,
99
+ recurse_on_sequence=recurse_on_sequence,
100
+ json_string_attributes=json_string_attributes,
101
+ separator=separator,
102
+ )
103
+ elif isinstance(obj, Iterable):
104
+ yield from _flatten_sequence(
105
+ obj,
106
+ prefix=prefix,
107
+ recurse_on_sequence=recurse_on_sequence,
108
+ json_string_attributes=json_string_attributes,
109
+ separator=separator,
110
+ )
111
+ else:
112
+ assert_never(obj)
113
+
114
+
115
+ def has_mapping(sequence: Iterable[Any]) -> bool:
116
+ """
117
+ Check if a sequence contains a dictionary. We don't flatten sequences that
118
+ only contain primitive types, such as strings, integers, etc. Conversely,
119
+ we'll only un-flatten digit sub-keys if it can be interpreted the index of
120
+ an array of dictionaries.
121
+ """
122
+ for item in sequence:
123
+ if isinstance(item, Mapping):
124
+ return True
125
+ return False
126
+
127
+
128
+ def get_attribute_value(
129
+ attributes: Optional[Mapping[str, Any]],
130
+ key: str,
131
+ separator: str = ".",
132
+ ) -> Optional[Any]:
133
+ """
134
+ Get the value of a nested attribute from a dictionary. The `key` is a
135
+ string that represents the path to the attribute, where each level is
136
+ separated by the `separator`. For example, if the dictionary is
137
+ `{"a": {"b": {"c": 1}}}` and the key is `"a.b.c"`, then the function
138
+ will return `1`. If the key is `"a.b"`, then the function will return
139
+ `{"c": 1}`.
140
+ """
141
+ if not attributes:
142
+ return None
143
+ sub_keys = key.split(separator)
144
+ for sub_key in sub_keys[:-1]:
145
+ attributes = attributes.get(sub_key)
146
+ if not attributes:
147
+ return None
148
+ return attributes.get(sub_keys[-1])
149
+
150
+
151
+ def load_json_strings(key_values: Iterable[Tuple[str, Any]]) -> Iterator[Tuple[str, Any]]:
152
+ for key, value in key_values:
153
+ if key.endswith(JSON_STRING_ATTRIBUTES):
154
+ try:
155
+ dict_value = json.loads(value)
156
+ except Exception:
157
+ yield key, value
158
+ else:
159
+ if dict_value:
160
+ yield key, dict_value
161
+ else:
162
+ yield key, value
163
+
164
+
165
+ def _partition_with_prefix_exclusion(
166
+ key: str,
167
+ separator: str = ".",
168
+ prefix_exclusions: Sequence[str] = (),
169
+ ) -> Tuple[str, str, str]:
170
+ """
171
+ Partition `key` by `separator`, but exclude prefixes in `prefix_exclusions`,
172
+ which is usually the list of semantic conventions. `prefix_exclusions` should
173
+ be sorted by length from the longest to the shortest
174
+ """
175
+ for prefix in prefix_exclusions:
176
+ if key.startswith(prefix) and (
177
+ len(key) == len(prefix) or key[len(prefix) :].startswith(separator)
178
+ ):
179
+ return prefix, separator, key[len(prefix) + len(separator) :]
180
+ return key.partition(separator)
181
+
182
+
183
+ class _Trie(DefaultDict[Union[str, int], "_Trie"]):
184
+ """
185
+ Prefix Tree with special handling for indices (i.e. all-digit keys). Indices
186
+ represent the position of an element in a nested list, while branches represent
187
+ the keys of a nested dictionary.
188
+ """
189
+
190
+ def __init__(self) -> None:
191
+ super().__init__(_Trie)
192
+ self.value: Any = None
193
+ self.indices: Set[int] = set()
194
+ self.branches: Set[Union[str, int]] = set()
195
+
196
+ def set_value(self, value: Any) -> None:
197
+ self.value = value
198
+ # value and indices must not coexist
199
+ self.branches.update(self.indices)
200
+ self.indices.clear()
201
+
202
+ def add_index(self, index: int) -> "_Trie":
203
+ if self.value is not None:
204
+ self.branches.add(index)
205
+ elif index not in self.branches:
206
+ self.indices.add(index)
207
+ return self[index]
208
+
209
+ def add_branch(self, branch: Union[str, int]) -> "_Trie":
210
+ if branch in self.indices:
211
+ self.indices.discard(cast(int, branch))
212
+ self.branches.add(branch)
213
+ return self[branch]
214
+
215
+
216
+ def _build_trie(
217
+ key_value_pairs: Iterable[Tuple[str, Any]],
218
+ *,
219
+ prefix_exclusions: Sequence[str] = (),
220
+ separator: str = ".",
221
+ ) -> _Trie:
222
+ """
223
+ Build a Trie (a.k.a. prefix tree) from `key_value_pairs`, by partitioning the keys by
224
+ separator. Each partition is a branch in the Trie. Special handling is done for partitions
225
+ that are all digits, e.g. "0", "12", etc., which are converted to integers and collected
226
+ as indices.
227
+ """
228
+ trie = _Trie()
229
+ for key, value in key_value_pairs:
230
+ if value is None:
231
+ continue
232
+ t = trie
233
+ while True:
234
+ prefix, _, suffix = _partition_with_prefix_exclusion(
235
+ key,
236
+ separator,
237
+ prefix_exclusions,
238
+ )
239
+ if prefix.isdigit():
240
+ index = int(prefix)
241
+ t = t.add_index(index) if suffix else t.add_branch(index)
242
+ else:
243
+ t = t.add_branch(prefix)
244
+ if not suffix:
245
+ break
246
+ key = suffix
247
+ t.set_value(value)
248
+ return trie
249
+
250
+
251
+ def _walk(
252
+ trie: _Trie,
253
+ *,
254
+ prefix: str = "",
255
+ separator: str = ".",
256
+ ) -> Iterator[Tuple[str, Any]]:
257
+ """
258
+ Walk the Trie and yield key value pairs. If the Trie node has a value, then
259
+ yield the prefix and the value. If the Trie node has indices, then yield the
260
+ prefix and a list of dictionaries. If the Trie node has branches, then yield
261
+ the prefix and a dictionary.
262
+ """
263
+ if trie.value is not None:
264
+ yield prefix, trie.value
265
+ elif prefix and trie.indices:
266
+ yield (
267
+ prefix,
268
+ [dict(_walk(trie[index], separator=separator)) for index in sorted(trie.indices)],
269
+ )
270
+ elif trie.indices:
271
+ for index in trie.indices:
272
+ yield from _walk(trie[index], prefix=f"{index}", separator=separator)
273
+ elif prefix:
274
+ yield prefix, dict(_walk(trie, separator=separator))
275
+ return
276
+ for branch in trie.branches:
277
+ new_prefix = f"{prefix}{separator}{branch}" if prefix else f"{branch}"
278
+ yield from _walk(trie[branch], prefix=new_prefix, separator=separator)
279
+
280
+
281
+ def _flatten_mapping(
282
+ mapping: Mapping[str, Any],
283
+ *,
284
+ prefix: str = "",
285
+ recurse_on_sequence: bool = False,
286
+ json_string_attributes: Optional[Sequence[str]] = None,
287
+ separator: str = ".",
288
+ ) -> Iterator[Tuple[str, Any]]:
289
+ """
290
+ Flatten a nested dictionary into a list of key value pairs. If `recurse_on_sequence`
291
+ is True, then the function will also recursively flatten nested sequences of dictionaries.
292
+ If `json_string_attributes` is provided, then the function will interpret the attributes
293
+ in the list as JSON strings and convert them into dictionaries. The `prefix` argument is
294
+ used to prefix the keys in the output list, but it's mostly used internally to facilitate
295
+ recursion.
296
+ """
297
+ for key, value in mapping.items():
298
+ prefixed_key = f"{prefix}{separator}{key}" if prefix else key
299
+ if isinstance(value, Mapping):
300
+ if json_string_attributes and prefixed_key.endswith(JSON_STRING_ATTRIBUTES):
301
+ yield prefixed_key, json.dumps(value)
302
+ else:
303
+ yield from _flatten_mapping(
304
+ value,
305
+ prefix=prefixed_key,
306
+ recurse_on_sequence=recurse_on_sequence,
307
+ json_string_attributes=json_string_attributes,
308
+ separator=separator,
309
+ )
310
+ elif isinstance(value, Sequence) and recurse_on_sequence:
311
+ yield from _flatten_sequence(
312
+ value,
313
+ prefix=prefixed_key,
314
+ recurse_on_sequence=recurse_on_sequence,
315
+ json_string_attributes=json_string_attributes,
316
+ separator=separator,
317
+ )
318
+ elif value is not None:
319
+ yield prefixed_key, value
320
+
321
+
322
+ def _flatten_sequence(
323
+ sequence: Iterable[Any],
324
+ *,
325
+ prefix: str = "",
326
+ recurse_on_sequence: bool = False,
327
+ json_string_attributes: Optional[Sequence[str]] = None,
328
+ separator: str = ".",
329
+ ) -> Iterator[Tuple[str, Any]]:
330
+ """
331
+ Flatten a sequence of dictionaries into a list of key value pairs. If `recurse_on_sequence`
332
+ is True, then the function will also recursively flatten nested sequences of dictionaries.
333
+ If `json_string_attributes` is provided, then the function will interpret the attributes
334
+ in the list as JSON strings and convert them into dictionaries. The `prefix` argument is
335
+ used to prefix the keys in the output list, but it's mostly used internally to facilitate
336
+ recursion.
337
+ """
338
+ if isinstance(sequence, str) or not has_mapping(sequence):
339
+ yield prefix, sequence
340
+ for idx, obj in enumerate(sequence):
341
+ if not isinstance(obj, Mapping):
342
+ continue
343
+ yield from _flatten_mapping(
344
+ obj,
345
+ prefix=f"{prefix}{separator}{idx}" if prefix else f"{idx}",
346
+ recurse_on_sequence=recurse_on_sequence,
347
+ json_string_attributes=json_string_attributes,
348
+ separator=separator,
349
+ )
@@ -0,0 +1,116 @@
1
+ This Phoenix module uses Python's `ast` module. The code snippets below provides a basic introduction to the `ast` module.
2
+
3
+ # Abstract Syntax Tree (AST)
4
+
5
+ The idea is that any Python expression can be parsed into an AST, and then transformed into a different one. The new AST can then be compiled back into a Python expression and evaluated at runtime.
6
+
7
+ ```python
8
+ import ast
9
+ ```
10
+
11
+ # Constant
12
+ https://docs.python.org/3/library/ast.html#ast.Constant
13
+
14
+ ```python
15
+ print(ast.dump(ast.parse("None", mode="eval").body, indent=4))
16
+ print(ast.dump(ast.parse("1", mode="eval").body, indent=4))
17
+ print(ast.dump(ast.parse("'xyz'", mode="eval").body, indent=4))
18
+ ```
19
+ ### Output
20
+ ```python
21
+ Constant(value=None)
22
+ Constant(value=1)
23
+ Constant(value='xyz')
24
+ ```
25
+
26
+ # Name
27
+ https://docs.python.org/3/library/ast.html#ast.Name
28
+
29
+ ```python
30
+ print(ast.dump(ast.parse("xyz", mode="eval").body, indent=4))
31
+ ```
32
+ ### Output
33
+ ```python
34
+ Name(id='xyz', ctx=Load())
35
+ ```
36
+
37
+ # Compilation and Evaluation
38
+ https://docs.python.org/3/library/functions.html#compile
39
+
40
+ ```python
41
+ parsed = ast.parse("xyz", mode="eval")
42
+ compiled = compile(parsed, filename="", mode="eval")
43
+
44
+ eval(compiled, {"xyz": 42})
45
+ ```
46
+ ### Output
47
+ ```python
48
+ 42
49
+ ```
50
+
51
+ # Attribute
52
+ https://docs.python.org/3/library/ast.html#ast.Attribute
53
+
54
+ ```python
55
+ print(ast.dump(ast.parse("llm.token_count.completion", mode="eval").body, indent=4))
56
+ ```
57
+ ### Output
58
+ ```python
59
+ Attribute(
60
+ value=Attribute(
61
+ value=Name(id='llm', ctx=Load()),
62
+ attr='token_count',
63
+ ctx=Load()),
64
+ attr='completion',
65
+ ctx=Load())
66
+ ```
67
+
68
+ # Subscript
69
+ https://docs.python.org/3/library/ast.html#ast.Subscript
70
+
71
+ ```python
72
+ print(ast.dump(ast.parse("attributes[['llm', 'token_count', 'completion']]", mode="eval").body, indent=4))
73
+ ```
74
+ ### Output
75
+ ```python
76
+ Subscript(
77
+ value=Name(id='attributes', ctx=Load()),
78
+ slice=List(
79
+ elts=[
80
+ Constant(value='llm'),
81
+ Constant(value='token_count'),
82
+ Constant(value='completion')],
83
+ ctx=Load()),
84
+ ctx=Load())
85
+ ```
86
+
87
+ # Translation of Attribute to Subscript
88
+ https://docs.python.org/3/library/ast.html#ast.NodeTransformer
89
+
90
+ ```python
91
+ class Translator(ast.NodeTransformer):
92
+ def visit_Attribute(self, node):
93
+ path = []
94
+ while isinstance(node, ast.Attribute):
95
+ path.append(node.attr)
96
+ node = node.value
97
+ if isinstance(node, ast.Name):
98
+ path.append(node.id)
99
+ break
100
+ return ast.Subscript(
101
+ value=ast.Name(id='attributes', ctx=ast.Load()),
102
+ slice=ast.List(
103
+ elts=[ast.Constant(value=p) for p in reversed(path)],
104
+ ctx=ast.Load(),
105
+ ),
106
+ ctx=ast.Load(),
107
+ )
108
+
109
+ parsed = ast.parse("llm.token_count.completion", mode="eval")
110
+ translated = Translator().visit(parsed)
111
+ print(ast.unparse(translated))
112
+ ```
113
+ ### Output
114
+ ```python
115
+ attributes[['llm', 'token_count', 'completion']]
116
+ ```