elasticsearch 8.19.0__py3-none-any.whl → 8.19.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- elasticsearch/_async/client/__init__.py +12 -6
- elasticsearch/_async/client/cat.py +124 -10
- elasticsearch/_async/client/cluster.py +7 -2
- elasticsearch/_async/client/esql.py +16 -6
- elasticsearch/_async/client/indices.py +1 -1
- elasticsearch/_async/client/inference.py +112 -4
- elasticsearch/_async/client/snapshot.py +262 -112
- elasticsearch/_async/client/sql.py +1 -1
- elasticsearch/_async/client/transform.py +60 -0
- elasticsearch/_sync/client/__init__.py +12 -6
- elasticsearch/_sync/client/cat.py +124 -10
- elasticsearch/_sync/client/cluster.py +7 -2
- elasticsearch/_sync/client/esql.py +16 -6
- elasticsearch/_sync/client/indices.py +1 -1
- elasticsearch/_sync/client/inference.py +112 -4
- elasticsearch/_sync/client/snapshot.py +262 -112
- elasticsearch/_sync/client/sql.py +1 -1
- elasticsearch/_sync/client/transform.py +60 -0
- elasticsearch/_version.py +1 -1
- elasticsearch/dsl/_async/document.py +84 -0
- elasticsearch/dsl/_sync/document.py +84 -0
- elasticsearch/dsl/aggs.py +20 -0
- elasticsearch/dsl/document_base.py +43 -0
- elasticsearch/dsl/field.py +49 -10
- elasticsearch/dsl/response/aggs.py +1 -1
- elasticsearch/dsl/types.py +140 -11
- elasticsearch/dsl/utils.py +1 -1
- elasticsearch/esql/__init__.py +2 -1
- elasticsearch/esql/esql.py +85 -34
- elasticsearch/esql/functions.py +37 -25
- {elasticsearch-8.19.0.dist-info → elasticsearch-8.19.1.dist-info}/METADATA +1 -3
- {elasticsearch-8.19.0.dist-info → elasticsearch-8.19.1.dist-info}/RECORD +35 -35
- {elasticsearch-8.19.0.dist-info → elasticsearch-8.19.1.dist-info}/WHEEL +0 -0
- {elasticsearch-8.19.0.dist-info → elasticsearch-8.19.1.dist-info}/licenses/LICENSE +0 -0
- {elasticsearch-8.19.0.dist-info → elasticsearch-8.19.1.dist-info}/licenses/NOTICE +0 -0
elasticsearch/dsl/types.py
CHANGED
|
@@ -170,6 +170,48 @@ class ChiSquareHeuristic(AttrDict[Any]):
|
|
|
170
170
|
super().__init__(kwargs)
|
|
171
171
|
|
|
172
172
|
|
|
173
|
+
class ChunkingSettings(AttrDict[Any]):
|
|
174
|
+
"""
|
|
175
|
+
:arg strategy: (required) The chunking strategy: `sentence` or `word`.
|
|
176
|
+
Defaults to `sentence` if omitted.
|
|
177
|
+
:arg max_chunk_size: (required) The maximum size of a chunk in words.
|
|
178
|
+
This value cannot be higher than `300` or lower than `20` (for
|
|
179
|
+
`sentence` strategy) or `10` (for `word` strategy). Defaults to
|
|
180
|
+
`250` if omitted.
|
|
181
|
+
:arg overlap: The number of overlapping words for chunks. It is
|
|
182
|
+
applicable only to a `word` chunking strategy. This value cannot
|
|
183
|
+
be higher than half the `max_chunk_size` value. Defaults to `100`
|
|
184
|
+
if omitted.
|
|
185
|
+
:arg sentence_overlap: The number of overlapping sentences for chunks.
|
|
186
|
+
It is applicable only for a `sentence` chunking strategy. It can
|
|
187
|
+
be either `1` or `0`. Defaults to `1` if omitted.
|
|
188
|
+
"""
|
|
189
|
+
|
|
190
|
+
strategy: Union[str, DefaultType]
|
|
191
|
+
max_chunk_size: Union[int, DefaultType]
|
|
192
|
+
overlap: Union[int, DefaultType]
|
|
193
|
+
sentence_overlap: Union[int, DefaultType]
|
|
194
|
+
|
|
195
|
+
def __init__(
|
|
196
|
+
self,
|
|
197
|
+
*,
|
|
198
|
+
strategy: Union[str, DefaultType] = DEFAULT,
|
|
199
|
+
max_chunk_size: Union[int, DefaultType] = DEFAULT,
|
|
200
|
+
overlap: Union[int, DefaultType] = DEFAULT,
|
|
201
|
+
sentence_overlap: Union[int, DefaultType] = DEFAULT,
|
|
202
|
+
**kwargs: Any,
|
|
203
|
+
):
|
|
204
|
+
if strategy is not DEFAULT:
|
|
205
|
+
kwargs["strategy"] = strategy
|
|
206
|
+
if max_chunk_size is not DEFAULT:
|
|
207
|
+
kwargs["max_chunk_size"] = max_chunk_size
|
|
208
|
+
if overlap is not DEFAULT:
|
|
209
|
+
kwargs["overlap"] = overlap
|
|
210
|
+
if sentence_overlap is not DEFAULT:
|
|
211
|
+
kwargs["sentence_overlap"] = sentence_overlap
|
|
212
|
+
super().__init__(kwargs)
|
|
213
|
+
|
|
214
|
+
|
|
173
215
|
class ClassificationInferenceOptions(AttrDict[Any]):
|
|
174
216
|
"""
|
|
175
217
|
:arg num_top_classes: Specifies the number of top class predictions to
|
|
@@ -3119,6 +3161,26 @@ class ScriptedHeuristic(AttrDict[Any]):
|
|
|
3119
3161
|
super().__init__(kwargs)
|
|
3120
3162
|
|
|
3121
3163
|
|
|
3164
|
+
class SemanticTextIndexOptions(AttrDict[Any]):
|
|
3165
|
+
"""
|
|
3166
|
+
:arg dense_vector:
|
|
3167
|
+
"""
|
|
3168
|
+
|
|
3169
|
+
dense_vector: Union["DenseVectorIndexOptions", Dict[str, Any], DefaultType]
|
|
3170
|
+
|
|
3171
|
+
def __init__(
|
|
3172
|
+
self,
|
|
3173
|
+
*,
|
|
3174
|
+
dense_vector: Union[
|
|
3175
|
+
"DenseVectorIndexOptions", Dict[str, Any], DefaultType
|
|
3176
|
+
] = DEFAULT,
|
|
3177
|
+
**kwargs: Any,
|
|
3178
|
+
):
|
|
3179
|
+
if dense_vector is not DEFAULT:
|
|
3180
|
+
kwargs["dense_vector"] = dense_vector
|
|
3181
|
+
super().__init__(kwargs)
|
|
3182
|
+
|
|
3183
|
+
|
|
3122
3184
|
class ShapeFieldQuery(AttrDict[Any]):
|
|
3123
3185
|
"""
|
|
3124
3186
|
:arg indexed_shape: Queries using a pre-indexed shape.
|
|
@@ -3196,10 +3258,15 @@ class SortOptions(AttrDict[Any]):
|
|
|
3196
3258
|
|
|
3197
3259
|
class SourceFilter(AttrDict[Any]):
|
|
3198
3260
|
"""
|
|
3199
|
-
:arg
|
|
3200
|
-
|
|
3261
|
+
:arg exclude_vectors: If `true`, vector fields are excluded from the
|
|
3262
|
+
returned source. This option takes precedence over `includes`:
|
|
3263
|
+
any vector field will remain excluded even if it matches an
|
|
3264
|
+
`includes` rule.
|
|
3265
|
+
:arg excludes: A list of fields to exclude from the returned source.
|
|
3266
|
+
:arg includes: A list of fields to include in the returned source.
|
|
3201
3267
|
"""
|
|
3202
3268
|
|
|
3269
|
+
exclude_vectors: Union[bool, DefaultType]
|
|
3203
3270
|
excludes: Union[
|
|
3204
3271
|
Union[str, InstrumentedField],
|
|
3205
3272
|
Sequence[Union[str, InstrumentedField]],
|
|
@@ -3214,6 +3281,7 @@ class SourceFilter(AttrDict[Any]):
|
|
|
3214
3281
|
def __init__(
|
|
3215
3282
|
self,
|
|
3216
3283
|
*,
|
|
3284
|
+
exclude_vectors: Union[bool, DefaultType] = DEFAULT,
|
|
3217
3285
|
excludes: Union[
|
|
3218
3286
|
Union[str, InstrumentedField],
|
|
3219
3287
|
Sequence[Union[str, InstrumentedField]],
|
|
@@ -3226,6 +3294,8 @@ class SourceFilter(AttrDict[Any]):
|
|
|
3226
3294
|
] = DEFAULT,
|
|
3227
3295
|
**kwargs: Any,
|
|
3228
3296
|
):
|
|
3297
|
+
if exclude_vectors is not DEFAULT:
|
|
3298
|
+
kwargs["exclude_vectors"] = exclude_vectors
|
|
3229
3299
|
if excludes is not DEFAULT:
|
|
3230
3300
|
kwargs["excludes"] = str(excludes)
|
|
3231
3301
|
if includes is not DEFAULT:
|
|
@@ -3675,6 +3745,38 @@ class SpanWithinQuery(AttrDict[Any]):
|
|
|
3675
3745
|
super().__init__(kwargs)
|
|
3676
3746
|
|
|
3677
3747
|
|
|
3748
|
+
class SparseVectorIndexOptions(AttrDict[Any]):
|
|
3749
|
+
"""
|
|
3750
|
+
:arg prune: Whether to perform pruning, omitting the non-significant
|
|
3751
|
+
tokens from the query to improve query performance. If prune is
|
|
3752
|
+
true but the pruning_config is not specified, pruning will occur
|
|
3753
|
+
but default values will be used. Default: false
|
|
3754
|
+
:arg pruning_config: Optional pruning configuration. If enabled, this
|
|
3755
|
+
will omit non-significant tokens from the query in order to
|
|
3756
|
+
improve query performance. This is only used if prune is set to
|
|
3757
|
+
true. If prune is set to true but pruning_config is not specified,
|
|
3758
|
+
default values will be used.
|
|
3759
|
+
"""
|
|
3760
|
+
|
|
3761
|
+
prune: Union[bool, DefaultType]
|
|
3762
|
+
pruning_config: Union["TokenPruningConfig", Dict[str, Any], DefaultType]
|
|
3763
|
+
|
|
3764
|
+
def __init__(
|
|
3765
|
+
self,
|
|
3766
|
+
*,
|
|
3767
|
+
prune: Union[bool, DefaultType] = DEFAULT,
|
|
3768
|
+
pruning_config: Union[
|
|
3769
|
+
"TokenPruningConfig", Dict[str, Any], DefaultType
|
|
3770
|
+
] = DEFAULT,
|
|
3771
|
+
**kwargs: Any,
|
|
3772
|
+
):
|
|
3773
|
+
if prune is not DEFAULT:
|
|
3774
|
+
kwargs["prune"] = prune
|
|
3775
|
+
if pruning_config is not DEFAULT:
|
|
3776
|
+
kwargs["pruning_config"] = pruning_config
|
|
3777
|
+
super().__init__(kwargs)
|
|
3778
|
+
|
|
3779
|
+
|
|
3678
3780
|
class SuggestContext(AttrDict[Any]):
|
|
3679
3781
|
"""
|
|
3680
3782
|
:arg name: (required)
|
|
@@ -3713,15 +3815,30 @@ class TDigest(AttrDict[Any]):
|
|
|
3713
3815
|
:arg compression: Limits the maximum number of nodes used by the
|
|
3714
3816
|
underlying TDigest algorithm to `20 * compression`, enabling
|
|
3715
3817
|
control of memory usage and approximation error.
|
|
3818
|
+
:arg execution_hint: The default implementation of TDigest is
|
|
3819
|
+
optimized for performance, scaling to millions or even billions of
|
|
3820
|
+
sample values while maintaining acceptable accuracy levels (close
|
|
3821
|
+
to 1% relative error for millions of samples in some cases). To
|
|
3822
|
+
use an implementation optimized for accuracy, set this parameter
|
|
3823
|
+
to high_accuracy instead. Defaults to `default` if omitted.
|
|
3716
3824
|
"""
|
|
3717
3825
|
|
|
3718
3826
|
compression: Union[int, DefaultType]
|
|
3827
|
+
execution_hint: Union[Literal["default", "high_accuracy"], DefaultType]
|
|
3719
3828
|
|
|
3720
3829
|
def __init__(
|
|
3721
|
-
self,
|
|
3830
|
+
self,
|
|
3831
|
+
*,
|
|
3832
|
+
compression: Union[int, DefaultType] = DEFAULT,
|
|
3833
|
+
execution_hint: Union[
|
|
3834
|
+
Literal["default", "high_accuracy"], DefaultType
|
|
3835
|
+
] = DEFAULT,
|
|
3836
|
+
**kwargs: Any,
|
|
3722
3837
|
):
|
|
3723
3838
|
if compression is not DEFAULT:
|
|
3724
3839
|
kwargs["compression"] = compression
|
|
3840
|
+
if execution_hint is not DEFAULT:
|
|
3841
|
+
kwargs["execution_hint"] = execution_hint
|
|
3725
3842
|
super().__init__(kwargs)
|
|
3726
3843
|
|
|
3727
3844
|
|
|
@@ -4444,7 +4561,7 @@ class ArrayPercentilesItem(AttrDict[Any]):
|
|
|
4444
4561
|
:arg value_as_string:
|
|
4445
4562
|
"""
|
|
4446
4563
|
|
|
4447
|
-
key:
|
|
4564
|
+
key: float
|
|
4448
4565
|
value: Union[float, None]
|
|
4449
4566
|
value_as_string: str
|
|
4450
4567
|
|
|
@@ -5290,7 +5407,9 @@ class HdrPercentileRanksAggregate(AttrDict[Any]):
|
|
|
5290
5407
|
:arg meta:
|
|
5291
5408
|
"""
|
|
5292
5409
|
|
|
5293
|
-
values: Union[
|
|
5410
|
+
values: Union[
|
|
5411
|
+
Mapping[str, Union[str, float, None]], Sequence["ArrayPercentilesItem"]
|
|
5412
|
+
]
|
|
5294
5413
|
meta: Mapping[str, Any]
|
|
5295
5414
|
|
|
5296
5415
|
|
|
@@ -5300,7 +5419,9 @@ class HdrPercentilesAggregate(AttrDict[Any]):
|
|
|
5300
5419
|
:arg meta:
|
|
5301
5420
|
"""
|
|
5302
5421
|
|
|
5303
|
-
values: Union[
|
|
5422
|
+
values: Union[
|
|
5423
|
+
Mapping[str, Union[str, float, None]], Sequence["ArrayPercentilesItem"]
|
|
5424
|
+
]
|
|
5304
5425
|
meta: Mapping[str, Any]
|
|
5305
5426
|
|
|
5306
5427
|
|
|
@@ -5809,7 +5930,9 @@ class PercentilesBucketAggregate(AttrDict[Any]):
|
|
|
5809
5930
|
:arg meta:
|
|
5810
5931
|
"""
|
|
5811
5932
|
|
|
5812
|
-
values: Union[
|
|
5933
|
+
values: Union[
|
|
5934
|
+
Mapping[str, Union[str, float, None]], Sequence["ArrayPercentilesItem"]
|
|
5935
|
+
]
|
|
5813
5936
|
meta: Mapping[str, Any]
|
|
5814
5937
|
|
|
5815
5938
|
|
|
@@ -6010,17 +6133,19 @@ class SearchProfile(AttrDict[Any]):
|
|
|
6010
6133
|
class ShardFailure(AttrDict[Any]):
|
|
6011
6134
|
"""
|
|
6012
6135
|
:arg reason: (required)
|
|
6013
|
-
:arg shard: (required)
|
|
6014
6136
|
:arg index:
|
|
6015
6137
|
:arg node:
|
|
6138
|
+
:arg shard:
|
|
6016
6139
|
:arg status:
|
|
6140
|
+
:arg primary:
|
|
6017
6141
|
"""
|
|
6018
6142
|
|
|
6019
6143
|
reason: "ErrorCause"
|
|
6020
|
-
shard: int
|
|
6021
6144
|
index: str
|
|
6022
6145
|
node: str
|
|
6146
|
+
shard: int
|
|
6023
6147
|
status: str
|
|
6148
|
+
primary: bool
|
|
6024
6149
|
|
|
6025
6150
|
|
|
6026
6151
|
class ShardProfile(AttrDict[Any]):
|
|
@@ -6344,7 +6469,9 @@ class TDigestPercentileRanksAggregate(AttrDict[Any]):
|
|
|
6344
6469
|
:arg meta:
|
|
6345
6470
|
"""
|
|
6346
6471
|
|
|
6347
|
-
values: Union[
|
|
6472
|
+
values: Union[
|
|
6473
|
+
Mapping[str, Union[str, float, None]], Sequence["ArrayPercentilesItem"]
|
|
6474
|
+
]
|
|
6348
6475
|
meta: Mapping[str, Any]
|
|
6349
6476
|
|
|
6350
6477
|
|
|
@@ -6354,7 +6481,9 @@ class TDigestPercentilesAggregate(AttrDict[Any]):
|
|
|
6354
6481
|
:arg meta:
|
|
6355
6482
|
"""
|
|
6356
6483
|
|
|
6357
|
-
values: Union[
|
|
6484
|
+
values: Union[
|
|
6485
|
+
Mapping[str, Union[str, float, None]], Sequence["ArrayPercentilesItem"]
|
|
6486
|
+
]
|
|
6358
6487
|
meta: Mapping[str, Any]
|
|
6359
6488
|
|
|
6360
6489
|
|
elasticsearch/dsl/utils.py
CHANGED
|
@@ -603,7 +603,7 @@ class ObjectBase(AttrDict[Any]):
|
|
|
603
603
|
# if this is a mapped field,
|
|
604
604
|
f = self.__get_field(k)
|
|
605
605
|
if f and f._coerce:
|
|
606
|
-
v = f.serialize(v)
|
|
606
|
+
v = f.serialize(v, skip_empty=skip_empty)
|
|
607
607
|
|
|
608
608
|
# if someone assigned AttrList, unwrap it
|
|
609
609
|
if isinstance(v, AttrList):
|
elasticsearch/esql/__init__.py
CHANGED
elasticsearch/esql/esql.py
CHANGED
|
@@ -16,6 +16,7 @@
|
|
|
16
16
|
# under the License.
|
|
17
17
|
|
|
18
18
|
import json
|
|
19
|
+
import re
|
|
19
20
|
from abc import ABC, abstractmethod
|
|
20
21
|
from typing import Any, Dict, Optional, Tuple, Type, Union
|
|
21
22
|
|
|
@@ -111,6 +112,29 @@ class ESQLBase(ABC):
|
|
|
111
112
|
def _render_internal(self) -> str:
|
|
112
113
|
pass
|
|
113
114
|
|
|
115
|
+
@staticmethod
|
|
116
|
+
def _format_index(index: IndexType) -> str:
|
|
117
|
+
return index._index._name if hasattr(index, "_index") else str(index)
|
|
118
|
+
|
|
119
|
+
@staticmethod
|
|
120
|
+
def _format_id(id: FieldType, allow_patterns: bool = False) -> str:
|
|
121
|
+
s = str(id) # in case it is an InstrumentedField
|
|
122
|
+
if allow_patterns and "*" in s:
|
|
123
|
+
return s # patterns cannot be escaped
|
|
124
|
+
if re.fullmatch(r"[a-zA-Z_@][a-zA-Z0-9_\.]*", s):
|
|
125
|
+
return s
|
|
126
|
+
# this identifier needs to be escaped
|
|
127
|
+
s.replace("`", "``")
|
|
128
|
+
return f"`{s}`"
|
|
129
|
+
|
|
130
|
+
@staticmethod
|
|
131
|
+
def _format_expr(expr: ExpressionType) -> str:
|
|
132
|
+
return (
|
|
133
|
+
json.dumps(expr)
|
|
134
|
+
if not isinstance(expr, (str, InstrumentedExpression))
|
|
135
|
+
else str(expr)
|
|
136
|
+
)
|
|
137
|
+
|
|
114
138
|
def _is_forked(self) -> bool:
|
|
115
139
|
if self.__class__.__name__ == "Fork":
|
|
116
140
|
return True
|
|
@@ -427,7 +451,7 @@ class ESQLBase(ABC):
|
|
|
427
451
|
"""
|
|
428
452
|
return Sample(self, probability)
|
|
429
453
|
|
|
430
|
-
def sort(self, *columns:
|
|
454
|
+
def sort(self, *columns: ExpressionType) -> "Sort":
|
|
431
455
|
"""The ``SORT`` processing command sorts a table on one or more columns.
|
|
432
456
|
|
|
433
457
|
:param columns: The columns to sort on.
|
|
@@ -570,15 +594,12 @@ class From(ESQLBase):
|
|
|
570
594
|
return self
|
|
571
595
|
|
|
572
596
|
def _render_internal(self) -> str:
|
|
573
|
-
indices = [
|
|
574
|
-
index if isinstance(index, str) else index._index._name
|
|
575
|
-
for index in self._indices
|
|
576
|
-
]
|
|
597
|
+
indices = [self._format_index(index) for index in self._indices]
|
|
577
598
|
s = f'{self.__class__.__name__.upper()} {", ".join(indices)}'
|
|
578
599
|
if self._metadata_fields:
|
|
579
600
|
s = (
|
|
580
601
|
s
|
|
581
|
-
+ f' METADATA {", ".join([
|
|
602
|
+
+ f' METADATA {", ".join([self._format_id(field) for field in self._metadata_fields])}'
|
|
582
603
|
)
|
|
583
604
|
return s
|
|
584
605
|
|
|
@@ -594,7 +615,11 @@ class Row(ESQLBase):
|
|
|
594
615
|
def __init__(self, **params: ExpressionType):
|
|
595
616
|
super().__init__()
|
|
596
617
|
self._params = {
|
|
597
|
-
|
|
618
|
+
self._format_id(k): (
|
|
619
|
+
json.dumps(v)
|
|
620
|
+
if not isinstance(v, InstrumentedExpression)
|
|
621
|
+
else self._format_expr(v)
|
|
622
|
+
)
|
|
598
623
|
for k, v in params.items()
|
|
599
624
|
}
|
|
600
625
|
|
|
@@ -615,7 +640,7 @@ class Show(ESQLBase):
|
|
|
615
640
|
self._item = item
|
|
616
641
|
|
|
617
642
|
def _render_internal(self) -> str:
|
|
618
|
-
return f"SHOW {self._item}"
|
|
643
|
+
return f"SHOW {self._format_id(self._item)}"
|
|
619
644
|
|
|
620
645
|
|
|
621
646
|
class Branch(ESQLBase):
|
|
@@ -667,11 +692,11 @@ class ChangePoint(ESQLBase):
|
|
|
667
692
|
return self
|
|
668
693
|
|
|
669
694
|
def _render_internal(self) -> str:
|
|
670
|
-
key = "" if not self._key else f" ON {self._key}"
|
|
695
|
+
key = "" if not self._key else f" ON {self._format_id(self._key)}"
|
|
671
696
|
names = (
|
|
672
697
|
""
|
|
673
698
|
if not self._type_name and not self._pvalue_name
|
|
674
|
-
else f' AS {self._type_name or "type"}, {self._pvalue_name or "pvalue"}'
|
|
699
|
+
else f' AS {self._format_id(self._type_name or "type")}, {self._format_id(self._pvalue_name or "pvalue")}'
|
|
675
700
|
)
|
|
676
701
|
return f"CHANGE_POINT {self._value}{key}{names}"
|
|
677
702
|
|
|
@@ -709,12 +734,13 @@ class Completion(ESQLBase):
|
|
|
709
734
|
def _render_internal(self) -> str:
|
|
710
735
|
if self._inference_id is None:
|
|
711
736
|
raise ValueError("The completion command requires an inference ID")
|
|
737
|
+
with_ = {"inference_id": self._inference_id}
|
|
712
738
|
if self._named_prompt:
|
|
713
739
|
column = list(self._named_prompt.keys())[0]
|
|
714
740
|
prompt = list(self._named_prompt.values())[0]
|
|
715
|
-
return f"COMPLETION {column} = {prompt} WITH {
|
|
741
|
+
return f"COMPLETION {self._format_id(column)} = {self._format_id(prompt)} WITH {json.dumps(with_)}"
|
|
716
742
|
else:
|
|
717
|
-
return f"COMPLETION {self._prompt[0]} WITH {
|
|
743
|
+
return f"COMPLETION {self._format_id(self._prompt[0])} WITH {json.dumps(with_)}"
|
|
718
744
|
|
|
719
745
|
|
|
720
746
|
class Dissect(ESQLBase):
|
|
@@ -742,9 +768,13 @@ class Dissect(ESQLBase):
|
|
|
742
768
|
|
|
743
769
|
def _render_internal(self) -> str:
|
|
744
770
|
sep = (
|
|
745
|
-
""
|
|
771
|
+
""
|
|
772
|
+
if self._separator is None
|
|
773
|
+
else f" APPEND_SEPARATOR={json.dumps(self._separator)}"
|
|
774
|
+
)
|
|
775
|
+
return (
|
|
776
|
+
f"DISSECT {self._format_id(self._input)} {json.dumps(self._pattern)}{sep}"
|
|
746
777
|
)
|
|
747
|
-
return f"DISSECT {self._input} {json.dumps(self._pattern)}{sep}"
|
|
748
778
|
|
|
749
779
|
|
|
750
780
|
class Drop(ESQLBase):
|
|
@@ -760,7 +790,7 @@ class Drop(ESQLBase):
|
|
|
760
790
|
self._columns = columns
|
|
761
791
|
|
|
762
792
|
def _render_internal(self) -> str:
|
|
763
|
-
return f'DROP {", ".join([
|
|
793
|
+
return f'DROP {", ".join([self._format_id(col, allow_patterns=True) for col in self._columns])}'
|
|
764
794
|
|
|
765
795
|
|
|
766
796
|
class Enrich(ESQLBase):
|
|
@@ -814,12 +844,18 @@ class Enrich(ESQLBase):
|
|
|
814
844
|
return self
|
|
815
845
|
|
|
816
846
|
def _render_internal(self) -> str:
|
|
817
|
-
on =
|
|
847
|
+
on = (
|
|
848
|
+
""
|
|
849
|
+
if self._match_field is None
|
|
850
|
+
else f" ON {self._format_id(self._match_field)}"
|
|
851
|
+
)
|
|
818
852
|
with_ = ""
|
|
819
853
|
if self._named_fields:
|
|
820
|
-
with_ = f' WITH {", ".join([f"{name} = {field}" for name, field in self._named_fields.items()])}'
|
|
854
|
+
with_ = f' WITH {", ".join([f"{self._format_id(name)} = {self._format_id(field)}" for name, field in self._named_fields.items()])}'
|
|
821
855
|
elif self._fields is not None:
|
|
822
|
-
with_ =
|
|
856
|
+
with_ = (
|
|
857
|
+
f' WITH {", ".join([self._format_id(field) for field in self._fields])}'
|
|
858
|
+
)
|
|
823
859
|
return f"ENRICH {self._policy}{on}{with_}"
|
|
824
860
|
|
|
825
861
|
|
|
@@ -832,7 +868,10 @@ class Eval(ESQLBase):
|
|
|
832
868
|
"""
|
|
833
869
|
|
|
834
870
|
def __init__(
|
|
835
|
-
self,
|
|
871
|
+
self,
|
|
872
|
+
parent: ESQLBase,
|
|
873
|
+
*columns: ExpressionType,
|
|
874
|
+
**named_columns: ExpressionType,
|
|
836
875
|
):
|
|
837
876
|
if columns and named_columns:
|
|
838
877
|
raise ValueError(
|
|
@@ -844,10 +883,13 @@ class Eval(ESQLBase):
|
|
|
844
883
|
def _render_internal(self) -> str:
|
|
845
884
|
if isinstance(self._columns, dict):
|
|
846
885
|
cols = ", ".join(
|
|
847
|
-
[
|
|
886
|
+
[
|
|
887
|
+
f"{self._format_id(name)} = {self._format_expr(value)}"
|
|
888
|
+
for name, value in self._columns.items()
|
|
889
|
+
]
|
|
848
890
|
)
|
|
849
891
|
else:
|
|
850
|
-
cols = ", ".join([f"{col}" for col in self._columns])
|
|
892
|
+
cols = ", ".join([f"{self._format_expr(col)}" for col in self._columns])
|
|
851
893
|
return f"EVAL {cols}"
|
|
852
894
|
|
|
853
895
|
|
|
@@ -900,7 +942,7 @@ class Grok(ESQLBase):
|
|
|
900
942
|
self._pattern = pattern
|
|
901
943
|
|
|
902
944
|
def _render_internal(self) -> str:
|
|
903
|
-
return f"GROK {self._input} {json.dumps(self._pattern)}"
|
|
945
|
+
return f"GROK {self._format_id(self._input)} {json.dumps(self._pattern)}"
|
|
904
946
|
|
|
905
947
|
|
|
906
948
|
class Keep(ESQLBase):
|
|
@@ -916,7 +958,7 @@ class Keep(ESQLBase):
|
|
|
916
958
|
self._columns = columns
|
|
917
959
|
|
|
918
960
|
def _render_internal(self) -> str:
|
|
919
|
-
return f'KEEP {", ".join([f"{col}" for col in self._columns])}'
|
|
961
|
+
return f'KEEP {", ".join([f"{self._format_id(col, allow_patterns=True)}" for col in self._columns])}'
|
|
920
962
|
|
|
921
963
|
|
|
922
964
|
class Limit(ESQLBase):
|
|
@@ -932,7 +974,7 @@ class Limit(ESQLBase):
|
|
|
932
974
|
self._max_number_of_rows = max_number_of_rows
|
|
933
975
|
|
|
934
976
|
def _render_internal(self) -> str:
|
|
935
|
-
return f"LIMIT {self._max_number_of_rows}"
|
|
977
|
+
return f"LIMIT {json.dumps(self._max_number_of_rows)}"
|
|
936
978
|
|
|
937
979
|
|
|
938
980
|
class LookupJoin(ESQLBase):
|
|
@@ -967,7 +1009,9 @@ class LookupJoin(ESQLBase):
|
|
|
967
1009
|
if isinstance(self._lookup_index, str)
|
|
968
1010
|
else self._lookup_index._index._name
|
|
969
1011
|
)
|
|
970
|
-
return
|
|
1012
|
+
return (
|
|
1013
|
+
f"LOOKUP JOIN {self._format_index(index)} ON {self._format_id(self._field)}"
|
|
1014
|
+
)
|
|
971
1015
|
|
|
972
1016
|
|
|
973
1017
|
class MvExpand(ESQLBase):
|
|
@@ -983,7 +1027,7 @@ class MvExpand(ESQLBase):
|
|
|
983
1027
|
self._column = column
|
|
984
1028
|
|
|
985
1029
|
def _render_internal(self) -> str:
|
|
986
|
-
return f"MV_EXPAND {self._column}"
|
|
1030
|
+
return f"MV_EXPAND {self._format_id(self._column)}"
|
|
987
1031
|
|
|
988
1032
|
|
|
989
1033
|
class Rename(ESQLBase):
|
|
@@ -999,7 +1043,7 @@ class Rename(ESQLBase):
|
|
|
999
1043
|
self._columns = columns
|
|
1000
1044
|
|
|
1001
1045
|
def _render_internal(self) -> str:
|
|
1002
|
-
return f'RENAME {", ".join([f"{old_name} AS {new_name}" for old_name, new_name in self._columns.items()])}'
|
|
1046
|
+
return f'RENAME {", ".join([f"{self._format_id(old_name)} AS {self._format_id(new_name)}" for old_name, new_name in self._columns.items()])}'
|
|
1003
1047
|
|
|
1004
1048
|
|
|
1005
1049
|
class Sample(ESQLBase):
|
|
@@ -1015,7 +1059,7 @@ class Sample(ESQLBase):
|
|
|
1015
1059
|
self._probability = probability
|
|
1016
1060
|
|
|
1017
1061
|
def _render_internal(self) -> str:
|
|
1018
|
-
return f"SAMPLE {self._probability}"
|
|
1062
|
+
return f"SAMPLE {json.dumps(self._probability)}"
|
|
1019
1063
|
|
|
1020
1064
|
|
|
1021
1065
|
class Sort(ESQLBase):
|
|
@@ -1026,12 +1070,16 @@ class Sort(ESQLBase):
|
|
|
1026
1070
|
in a single expression.
|
|
1027
1071
|
"""
|
|
1028
1072
|
|
|
1029
|
-
def __init__(self, parent: ESQLBase, *columns:
|
|
1073
|
+
def __init__(self, parent: ESQLBase, *columns: ExpressionType):
|
|
1030
1074
|
super().__init__(parent)
|
|
1031
1075
|
self._columns = columns
|
|
1032
1076
|
|
|
1033
1077
|
def _render_internal(self) -> str:
|
|
1034
|
-
|
|
1078
|
+
sorts = [
|
|
1079
|
+
" ".join([self._format_id(term) for term in str(col).split(" ")])
|
|
1080
|
+
for col in self._columns
|
|
1081
|
+
]
|
|
1082
|
+
return f'SORT {", ".join([f"{sort}" for sort in sorts])}'
|
|
1035
1083
|
|
|
1036
1084
|
|
|
1037
1085
|
class Stats(ESQLBase):
|
|
@@ -1062,14 +1110,17 @@ class Stats(ESQLBase):
|
|
|
1062
1110
|
|
|
1063
1111
|
def _render_internal(self) -> str:
|
|
1064
1112
|
if isinstance(self._expressions, dict):
|
|
1065
|
-
exprs = [
|
|
1113
|
+
exprs = [
|
|
1114
|
+
f"{self._format_id(key)} = {self._format_expr(value)}"
|
|
1115
|
+
for key, value in self._expressions.items()
|
|
1116
|
+
]
|
|
1066
1117
|
else:
|
|
1067
|
-
exprs = [f"{expr}" for expr in self._expressions]
|
|
1118
|
+
exprs = [f"{self._format_expr(expr)}" for expr in self._expressions]
|
|
1068
1119
|
expression_separator = ",\n "
|
|
1069
1120
|
by = (
|
|
1070
1121
|
""
|
|
1071
1122
|
if self._grouping_expressions is None
|
|
1072
|
-
else f'\n BY {", ".join([f"{expr}" for expr in self._grouping_expressions])}'
|
|
1123
|
+
else f'\n BY {", ".join([f"{self._format_expr(expr)}" for expr in self._grouping_expressions])}'
|
|
1073
1124
|
)
|
|
1074
1125
|
return f'STATS {expression_separator.join([f"{expr}" for expr in exprs])}{by}'
|
|
1075
1126
|
|
|
@@ -1087,7 +1138,7 @@ class Where(ESQLBase):
|
|
|
1087
1138
|
self._expressions = expressions
|
|
1088
1139
|
|
|
1089
1140
|
def _render_internal(self) -> str:
|
|
1090
|
-
return f'WHERE {" AND ".join([f"{expr}" for expr in self._expressions])}'
|
|
1141
|
+
return f'WHERE {" AND ".join([f"{self._format_expr(expr)}" for expr in self._expressions])}'
|
|
1091
1142
|
|
|
1092
1143
|
|
|
1093
1144
|
def and_(*expressions: InstrumentedExpression) -> "InstrumentedExpression":
|