sqlglot 28.4.0__py3-none-any.whl → 28.8.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. sqlglot/_version.py +2 -2
  2. sqlglot/dialects/bigquery.py +20 -23
  3. sqlglot/dialects/clickhouse.py +2 -0
  4. sqlglot/dialects/dialect.py +355 -18
  5. sqlglot/dialects/doris.py +38 -90
  6. sqlglot/dialects/druid.py +1 -0
  7. sqlglot/dialects/duckdb.py +1739 -163
  8. sqlglot/dialects/exasol.py +17 -1
  9. sqlglot/dialects/hive.py +27 -2
  10. sqlglot/dialects/mysql.py +103 -11
  11. sqlglot/dialects/oracle.py +38 -1
  12. sqlglot/dialects/postgres.py +142 -33
  13. sqlglot/dialects/presto.py +6 -2
  14. sqlglot/dialects/redshift.py +7 -1
  15. sqlglot/dialects/singlestore.py +13 -3
  16. sqlglot/dialects/snowflake.py +271 -21
  17. sqlglot/dialects/spark.py +25 -0
  18. sqlglot/dialects/spark2.py +4 -3
  19. sqlglot/dialects/starrocks.py +152 -17
  20. sqlglot/dialects/trino.py +1 -0
  21. sqlglot/dialects/tsql.py +5 -0
  22. sqlglot/diff.py +1 -1
  23. sqlglot/expressions.py +239 -47
  24. sqlglot/generator.py +173 -44
  25. sqlglot/optimizer/annotate_types.py +129 -60
  26. sqlglot/optimizer/merge_subqueries.py +13 -2
  27. sqlglot/optimizer/qualify_columns.py +7 -0
  28. sqlglot/optimizer/resolver.py +19 -0
  29. sqlglot/optimizer/scope.py +12 -0
  30. sqlglot/optimizer/unnest_subqueries.py +7 -0
  31. sqlglot/parser.py +251 -58
  32. sqlglot/schema.py +186 -14
  33. sqlglot/tokens.py +36 -6
  34. sqlglot/transforms.py +6 -5
  35. sqlglot/typing/__init__.py +29 -10
  36. sqlglot/typing/bigquery.py +5 -10
  37. sqlglot/typing/duckdb.py +39 -0
  38. sqlglot/typing/hive.py +50 -1
  39. sqlglot/typing/mysql.py +32 -0
  40. sqlglot/typing/presto.py +0 -1
  41. sqlglot/typing/snowflake.py +80 -17
  42. sqlglot/typing/spark.py +29 -0
  43. sqlglot/typing/spark2.py +9 -1
  44. sqlglot/typing/tsql.py +21 -0
  45. {sqlglot-28.4.0.dist-info → sqlglot-28.8.0.dist-info}/METADATA +47 -2
  46. sqlglot-28.8.0.dist-info/RECORD +95 -0
  47. {sqlglot-28.4.0.dist-info → sqlglot-28.8.0.dist-info}/WHEEL +1 -1
  48. sqlglot-28.4.0.dist-info/RECORD +0 -92
  49. {sqlglot-28.4.0.dist-info → sqlglot-28.8.0.dist-info}/licenses/LICENSE +0 -0
  50. {sqlglot-28.4.0.dist-info → sqlglot-28.8.0.dist-info}/top_level.txt +0 -0
sqlglot/parser.py CHANGED
@@ -155,12 +155,13 @@ def build_convert_timezone(
155
155
  return exp.ConvertTimezone.from_arg_list(args)
156
156
 
157
157
 
158
- def build_trim(args: t.List, is_left: bool = True):
159
- return exp.Trim(
160
- this=seq_get(args, 0),
161
- expression=seq_get(args, 1),
162
- position="LEADING" if is_left else "TRAILING",
163
- )
158
+ def build_trim(args: t.List, is_left: bool = True, reverse_args: bool = False):
159
+ this, expression = seq_get(args, 0), seq_get(args, 1)
160
+
161
+ if expression and reverse_args:
162
+ this, expression = expression, this
163
+
164
+ return exp.Trim(this=this, expression=expression, position="LEADING" if is_left else "TRAILING")
164
165
 
165
166
 
166
167
  def build_coalesce(
@@ -177,6 +178,90 @@ def build_locate_strposition(args: t.List):
177
178
  )
178
179
 
179
180
 
181
+ def build_array_append(args: t.List, dialect: Dialect) -> exp.ArrayAppend:
182
+ """
183
+ Builds ArrayAppend with NULL propagation semantics based on the dialect configuration.
184
+
185
+ Some dialects (Databricks, Spark, Snowflake) return NULL when the input array is NULL.
186
+ Others (DuckDB, PostgreSQL) create a new single-element array instead.
187
+
188
+ Args:
189
+ args: Function arguments [array, element]
190
+ dialect: The dialect to read ARRAY_FUNCS_PROPAGATES_NULLS from
191
+
192
+ Returns:
193
+ ArrayAppend expression with appropriate null_propagation flag
194
+ """
195
+ return exp.ArrayAppend(
196
+ this=seq_get(args, 0),
197
+ expression=seq_get(args, 1),
198
+ null_propagation=dialect.ARRAY_FUNCS_PROPAGATES_NULLS,
199
+ )
200
+
201
+
202
+ def build_array_prepend(args: t.List, dialect: Dialect) -> exp.ArrayPrepend:
203
+ """
204
+ Builds ArrayPrepend with NULL propagation semantics based on the dialect configuration.
205
+
206
+ Some dialects (Databricks, Spark, Snowflake) return NULL when the input array is NULL.
207
+ Others (DuckDB, PostgreSQL) create a new single-element array instead.
208
+
209
+ Args:
210
+ args: Function arguments [array, element]
211
+ dialect: The dialect to read ARRAY_FUNCS_PROPAGATES_NULLS from
212
+
213
+ Returns:
214
+ ArrayPrepend expression with appropriate null_propagation flag
215
+ """
216
+ return exp.ArrayPrepend(
217
+ this=seq_get(args, 0),
218
+ expression=seq_get(args, 1),
219
+ null_propagation=dialect.ARRAY_FUNCS_PROPAGATES_NULLS,
220
+ )
221
+
222
+
223
+ def build_array_concat(args: t.List, dialect: Dialect) -> exp.ArrayConcat:
224
+ """
225
+ Builds ArrayConcat with NULL propagation semantics based on the dialect configuration.
226
+
227
+ Some dialects (Redshift, Snowflake) return NULL when any input array is NULL.
228
+ Others (DuckDB, PostgreSQL) skip NULL arrays and continue concatenation.
229
+
230
+ Args:
231
+ args: Function arguments [array1, array2, ...] (variadic)
232
+ dialect: The dialect to read ARRAY_FUNCS_PROPAGATES_NULLS from
233
+
234
+ Returns:
235
+ ArrayConcat expression with appropriate null_propagation flag
236
+ """
237
+ return exp.ArrayConcat(
238
+ this=seq_get(args, 0),
239
+ expressions=args[1:],
240
+ null_propagation=dialect.ARRAY_FUNCS_PROPAGATES_NULLS,
241
+ )
242
+
243
+
244
+ def build_array_remove(args: t.List, dialect: Dialect) -> exp.ArrayRemove:
245
+ """
246
+ Builds ArrayRemove with NULL propagation semantics based on the dialect configuration.
247
+
248
+ Some dialects (Snowflake) return NULL when the removal value is NULL.
249
+ Others (DuckDB) may return empty array due to NULL comparison semantics.
250
+
251
+ Args:
252
+ args: Function arguments [array, value_to_remove]
253
+ dialect: The dialect to read ARRAY_FUNCS_PROPAGATES_NULLS from
254
+
255
+ Returns:
256
+ ArrayRemove expression with appropriate null_propagation flag
257
+ """
258
+ return exp.ArrayRemove(
259
+ this=seq_get(args, 0),
260
+ expression=seq_get(args, 1),
261
+ null_propagation=dialect.ARRAY_FUNCS_PROPAGATES_NULLS,
262
+ )
263
+
264
+
180
265
  class _Parser(type):
181
266
  def __new__(cls, clsname, bases, attrs):
182
267
  klass = super().__new__(cls, clsname, bases, attrs)
@@ -212,8 +297,11 @@ class Parser(metaclass=_Parser):
212
297
  "ARRAY_AGG": lambda args, dialect: exp.ArrayAgg(
213
298
  this=seq_get(args, 0), nulls_excluded=dialect.ARRAY_AGG_INCLUDES_NULLS is None or None
214
299
  ),
215
- "CHAR": lambda args: exp.Chr(expressions=args),
216
- "CHR": lambda args: exp.Chr(expressions=args),
300
+ "ARRAY_APPEND": build_array_append,
301
+ "ARRAY_CAT": build_array_concat,
302
+ "ARRAY_CONCAT": build_array_concat,
303
+ "ARRAY_PREPEND": build_array_prepend,
304
+ "ARRAY_REMOVE": build_array_remove,
217
305
  "COUNT": lambda args: exp.Count(this=seq_get(args, 0), expressions=args[1:], big_int=True),
218
306
  "CONCAT": lambda args, dialect: exp.Concat(
219
307
  expressions=args,
@@ -239,12 +327,23 @@ class Parser(metaclass=_Parser):
239
327
  is_string=dialect.UUID_IS_STRING_TYPE or None
240
328
  ),
241
329
  "GLOB": lambda args: exp.Glob(this=seq_get(args, 1), expression=seq_get(args, 0)),
242
- "GREATEST": lambda args: exp.Greatest(this=seq_get(args, 0), expressions=args[1:]),
243
- "LEAST": lambda args: exp.Least(this=seq_get(args, 0), expressions=args[1:]),
330
+ "GREATEST": lambda args, dialect: exp.Greatest(
331
+ this=seq_get(args, 0),
332
+ expressions=args[1:],
333
+ ignore_nulls=dialect.LEAST_GREATEST_IGNORES_NULLS,
334
+ ),
335
+ "LEAST": lambda args, dialect: exp.Least(
336
+ this=seq_get(args, 0),
337
+ expressions=args[1:],
338
+ ignore_nulls=dialect.LEAST_GREATEST_IGNORES_NULLS,
339
+ ),
244
340
  "HEX": build_hex,
245
341
  "JSON_EXTRACT": build_extract_json_with_path(exp.JSONExtract),
246
342
  "JSON_EXTRACT_SCALAR": build_extract_json_with_path(exp.JSONExtractScalar),
247
343
  "JSON_EXTRACT_PATH_TEXT": build_extract_json_with_path(exp.JSONExtractScalar),
344
+ "JSON_KEYS": lambda args, dialect: exp.JSONKeys(
345
+ this=seq_get(args, 0), expression=dialect.to_json_path(seq_get(args, 1))
346
+ ),
248
347
  "LIKE": build_like,
249
348
  "LOG": build_logarithm,
250
349
  "LOG2": lambda args: exp.Log(this=exp.Literal.number(2), expression=seq_get(args, 0)),
@@ -546,6 +645,7 @@ class Parser(metaclass=_Parser):
546
645
  TokenType.FULL,
547
646
  TokenType.GET,
548
647
  TokenType.IDENTIFIER,
648
+ TokenType.INOUT,
549
649
  TokenType.IS,
550
650
  TokenType.ISNULL,
551
651
  TokenType.INTERVAL,
@@ -904,7 +1004,7 @@ class Parser(metaclass=_Parser):
904
1004
  UNARY_PARSERS = {
905
1005
  TokenType.PLUS: lambda self: self._parse_unary(), # Unary + is handled as a no-op
906
1006
  TokenType.NOT: lambda self: self.expression(exp.Not, this=self._parse_equality()),
907
- TokenType.TILDA: lambda self: self.expression(exp.BitwiseNot, this=self._parse_unary()),
1007
+ TokenType.TILDE: lambda self: self.expression(exp.BitwiseNot, this=self._parse_unary()),
908
1008
  TokenType.DASH: lambda self: self.expression(exp.Neg, this=self._parse_unary()),
909
1009
  TokenType.PIPE_SLASH: lambda self: self.expression(exp.Sqrt, this=self._parse_unary()),
910
1010
  TokenType.DPIPE_SLASH: lambda self: self.expression(exp.Cbrt, this=self._parse_unary()),
@@ -979,6 +1079,10 @@ class Parser(metaclass=_Parser):
979
1079
  TokenType.QMARK_AMP: binary_range_parser(exp.JSONBContainsAllTopKeys),
980
1080
  TokenType.QMARK_PIPE: binary_range_parser(exp.JSONBContainsAnyTopKeys),
981
1081
  TokenType.HASH_DASH: binary_range_parser(exp.JSONBDeleteAtPath),
1082
+ TokenType.ADJACENT: binary_range_parser(exp.Adjacent),
1083
+ TokenType.OPERATOR: lambda self, this: self._parse_operator(this),
1084
+ TokenType.AMP_LT: binary_range_parser(exp.ExtendsLeft),
1085
+ TokenType.AMP_GT: binary_range_parser(exp.ExtendsRight),
982
1086
  }
983
1087
 
984
1088
  PIPE_SYNTAX_TRANSFORM_PARSERS = {
@@ -1118,11 +1222,7 @@ class Parser(metaclass=_Parser):
1118
1222
  "CHARACTER SET": lambda self: self.expression(
1119
1223
  exp.CharacterSetColumnConstraint, this=self._parse_var_or_string()
1120
1224
  ),
1121
- "CHECK": lambda self: self.expression(
1122
- exp.CheckColumnConstraint,
1123
- this=self._parse_wrapped(self._parse_assignment),
1124
- enforced=self._match_text_seq("ENFORCED"),
1125
- ),
1225
+ "CHECK": lambda self: self._parse_check_constraint(),
1126
1226
  "COLLATE": lambda self: self.expression(
1127
1227
  exp.CollateColumnConstraint,
1128
1228
  this=self._parse_identifier() or self._parse_column(),
@@ -1269,6 +1369,8 @@ class Parser(metaclass=_Parser):
1269
1369
  "CAST": lambda self: self._parse_cast(self.STRICT_CAST),
1270
1370
  "CEIL": lambda self: self._parse_ceil_floor(exp.Ceil),
1271
1371
  "CONVERT": lambda self: self._parse_convert(self.STRICT_CAST),
1372
+ "CHAR": lambda self: self._parse_char(),
1373
+ "CHR": lambda self: self._parse_char(),
1272
1374
  "DECODE": lambda self: self._parse_decode(),
1273
1375
  "EXTRACT": lambda self: self._parse_extract(),
1274
1376
  "FLOOR": lambda self: self._parse_ceil_floor(exp.Floor),
@@ -1288,11 +1390,7 @@ class Parser(metaclass=_Parser):
1288
1390
  "TRIM": lambda self: self._parse_trim(),
1289
1391
  "TRY_CAST": lambda self: self._parse_cast(False, safe=True),
1290
1392
  "TRY_CONVERT": lambda self: self._parse_convert(False, safe=True),
1291
- "XMLELEMENT": lambda self: self.expression(
1292
- exp.XMLElement,
1293
- this=self._match_text_seq("NAME") and self._parse_id_var(),
1294
- expressions=self._match(TokenType.COMMA) and self._parse_csv(self._parse_expression),
1295
- ),
1393
+ "XMLELEMENT": lambda self: self._parse_xml_element(),
1296
1394
  "XMLTABLE": lambda self: self._parse_xml_table(),
1297
1395
  }
1298
1396
 
@@ -2912,6 +3010,7 @@ class Parser(metaclass=_Parser):
2912
3010
  expressions=expressions,
2913
3011
  partition=partition,
2914
3012
  format=format,
3013
+ as_json=self._match_text_seq("AS", "JSON"),
2915
3014
  )
2916
3015
 
2917
3016
  def _parse_multitable_inserts(self, comments: t.Optional[t.List[str]]) -> exp.MultitableInserts:
@@ -3042,6 +3141,8 @@ class Parser(metaclass=_Parser):
3042
3141
  conflict_keys = self._parse_csv(self._parse_id_var)
3043
3142
  self._match_r_paren()
3044
3143
 
3144
+ index_predicate = self._parse_where()
3145
+
3045
3146
  action = self._parse_var_from_options(self.CONFLICT_ACTIONS)
3046
3147
  if self._prev.token_type == TokenType.UPDATE:
3047
3148
  self._match(TokenType.SET)
@@ -3055,6 +3156,7 @@ class Parser(metaclass=_Parser):
3055
3156
  expressions=expressions,
3056
3157
  action=action,
3057
3158
  conflict_keys=conflict_keys,
3159
+ index_predicate=index_predicate,
3058
3160
  constraint=constraint,
3059
3161
  where=self._parse_where(),
3060
3162
  )
@@ -3172,7 +3274,12 @@ class Parser(metaclass=_Parser):
3172
3274
  elif self._match(TokenType.RETURNING, advance=False):
3173
3275
  kwargs["returning"] = self._parse_returning()
3174
3276
  elif self._match(TokenType.FROM, advance=False):
3175
- kwargs["from_"] = self._parse_from(joins=True)
3277
+ from_ = self._parse_from(joins=True)
3278
+ table = from_.this if from_ else None
3279
+ if isinstance(table, exp.Subquery) and self._match(TokenType.JOIN, advance=False):
3280
+ table.set("joins", list(self._parse_joins()) or None)
3281
+
3282
+ kwargs["from_"] = from_
3176
3283
  elif self._match(TokenType.WHERE, advance=False):
3177
3284
  kwargs["where"] = self._parse_where()
3178
3285
  elif self._match(TokenType.ORDER_BY, advance=False):
@@ -3889,6 +3996,7 @@ class Parser(metaclass=_Parser):
3889
3996
 
3890
3997
  index = self._index
3891
3998
  method, side, kind = self._parse_join_parts()
3999
+ directed = self._match_text_seq("DIRECTED")
3892
4000
  hint = self._prev.text if self._match_texts(self.JOIN_HINTS) else None
3893
4001
  join = self._match(TokenType.JOIN) or (kind and kind.token_type == TokenType.STRAIGHT_JOIN)
3894
4002
  join_comments = self._prev_comments
@@ -3960,6 +4068,9 @@ class Parser(metaclass=_Parser):
3960
4068
  ):
3961
4069
  kwargs["on"] = exp.true()
3962
4070
 
4071
+ if directed:
4072
+ kwargs["directed"] = directed
4073
+
3963
4074
  return self.expression(exp.Join, comments=comments, **kwargs)
3964
4075
 
3965
4076
  def _parse_opclass(self) -> t.Optional[exp.Expression]:
@@ -4906,6 +5017,11 @@ class Parser(metaclass=_Parser):
4906
5017
  or self._try_parse(self._parse_offset, retreat=True)
4907
5018
  )
4908
5019
  self._retreat(index)
5020
+
5021
+ # MATCH_CONDITION (...) is a special construct that should not be consumed by limit/offset
5022
+ if self._next and self._next.token_type == TokenType.MATCH_CONDITION:
5023
+ result = False
5024
+
4909
5025
  return result
4910
5026
 
4911
5027
  def _parse_limit_by(self) -> t.Optional[t.List[exp.Expression]]:
@@ -5184,27 +5300,7 @@ class Parser(metaclass=_Parser):
5184
5300
  exp.Escape, this=this, expression=self._parse_string() or self._parse_null()
5185
5301
  )
5186
5302
 
5187
- def _parse_interval(self, match_interval: bool = True) -> t.Optional[exp.Add | exp.Interval]:
5188
- index = self._index
5189
-
5190
- if not self._match(TokenType.INTERVAL) and match_interval:
5191
- return None
5192
-
5193
- if self._match(TokenType.STRING, advance=False):
5194
- this = self._parse_primary()
5195
- else:
5196
- this = self._parse_term()
5197
-
5198
- if not this or (
5199
- isinstance(this, exp.Column)
5200
- and not this.table
5201
- and not this.this.quoted
5202
- and self._curr
5203
- and self._curr.text.upper() not in self.dialect.VALID_INTERVAL_UNITS
5204
- ):
5205
- self._retreat(index)
5206
- return None
5207
-
5303
+ def _parse_interval_span(self, this: exp.Expression) -> exp.Interval:
5208
5304
  # handle day-time format interval span with omitted units:
5209
5305
  # INTERVAL '<number days> hh[:][mm[:ss[.ff]]]' <maybe `unit TO unit`>
5210
5306
  interval_span_units_omitted = None
@@ -5255,10 +5351,35 @@ class Parser(metaclass=_Parser):
5255
5351
 
5256
5352
  if self.INTERVAL_SPANS and self._match_text_seq("TO"):
5257
5353
  unit = self.expression(
5258
- exp.IntervalSpan, this=unit, expression=self._parse_var(any_token=True, upper=True)
5354
+ exp.IntervalSpan,
5355
+ this=unit,
5356
+ expression=self._parse_function() or self._parse_var(any_token=True, upper=True),
5259
5357
  )
5260
5358
 
5261
- interval = self.expression(exp.Interval, this=this, unit=unit)
5359
+ return self.expression(exp.Interval, this=this, unit=unit)
5360
+
5361
+ def _parse_interval(self, match_interval: bool = True) -> t.Optional[exp.Add | exp.Interval]:
5362
+ index = self._index
5363
+
5364
+ if not self._match(TokenType.INTERVAL) and match_interval:
5365
+ return None
5366
+
5367
+ if self._match(TokenType.STRING, advance=False):
5368
+ this = self._parse_primary()
5369
+ else:
5370
+ this = self._parse_term()
5371
+
5372
+ if not this or (
5373
+ isinstance(this, exp.Column)
5374
+ and not this.table
5375
+ and not this.this.quoted
5376
+ and self._curr
5377
+ and self._curr.text.upper() not in self.dialect.VALID_INTERVAL_UNITS
5378
+ ):
5379
+ self._retreat(index)
5380
+ return None
5381
+
5382
+ interval = self._parse_interval_span(this)
5262
5383
 
5263
5384
  index = self._index
5264
5385
  self._match(TokenType.PLUS)
@@ -5590,8 +5711,8 @@ class Parser(metaclass=_Parser):
5590
5711
  elif self._match_text_seq("WITHOUT", "TIME", "ZONE"):
5591
5712
  maybe_func = False
5592
5713
  elif type_token == TokenType.INTERVAL:
5593
- unit = self._parse_var(upper=True)
5594
- if unit:
5714
+ if self._curr and self._curr.text.upper() in self.dialect.VALID_INTERVAL_UNITS:
5715
+ unit = self._parse_var(upper=True)
5595
5716
  if self._match_text_seq("TO"):
5596
5717
  unit = exp.IntervalSpan(this=unit, expression=self._parse_var(upper=True))
5597
5718
 
@@ -6076,7 +6197,8 @@ class Parser(metaclass=_Parser):
6076
6197
  this = exp.Identifier(this=this, quoted=True).update_positions(token)
6077
6198
 
6078
6199
  this = self.expression(exp.Anonymous, this=this, expressions=args)
6079
- this = this.update_positions(token)
6200
+
6201
+ this = this.update_positions(token)
6080
6202
 
6081
6203
  if isinstance(this, exp.Expression):
6082
6204
  this.add_comments(comments)
@@ -6231,6 +6353,14 @@ class Parser(metaclass=_Parser):
6231
6353
  not_null=self._match_pair(TokenType.NOT, TokenType.NULL),
6232
6354
  )
6233
6355
  constraints.append(self.expression(exp.ColumnConstraint, kind=constraint_kind))
6356
+ elif not kind and self._match_set({TokenType.IN, TokenType.OUT}, advance=False):
6357
+ in_out_constraint = self.expression(
6358
+ exp.InOutColumnConstraint,
6359
+ input_=self._match(TokenType.IN),
6360
+ output=self._match(TokenType.OUT),
6361
+ )
6362
+ constraints.append(in_out_constraint)
6363
+ kind = self._parse_types()
6234
6364
  elif (
6235
6365
  kind
6236
6366
  and self._match(TokenType.ALIAS, advance=False)
@@ -6289,6 +6419,16 @@ class Parser(metaclass=_Parser):
6289
6419
 
6290
6420
  return exp.AutoIncrementColumnConstraint()
6291
6421
 
6422
+ def _parse_check_constraint(self) -> t.Optional[exp.CheckColumnConstraint]:
6423
+ if not self._match(TokenType.L_PAREN, advance=False):
6424
+ return None
6425
+
6426
+ return self.expression(
6427
+ exp.CheckColumnConstraint,
6428
+ this=self._parse_wrapped(self._parse_assignment),
6429
+ enforced=self._match_text_seq("ENFORCED"),
6430
+ )
6431
+
6292
6432
  def _parse_auto_property(self) -> t.Optional[exp.AutoRefreshProperty]:
6293
6433
  if not self._match_text_seq("REFRESH"):
6294
6434
  self._retreat(self._index - 1)
@@ -6382,11 +6522,12 @@ class Parser(metaclass=_Parser):
6382
6522
  )
6383
6523
 
6384
6524
  if not procedure_option_follows and self._match_texts(self.CONSTRAINT_PARSERS):
6385
- return self.expression(
6386
- exp.ColumnConstraint,
6387
- this=this,
6388
- kind=self.CONSTRAINT_PARSERS[self._prev.text.upper()](self),
6389
- )
6525
+ constraint = self.CONSTRAINT_PARSERS[self._prev.text.upper()](self)
6526
+ if not constraint:
6527
+ self._retreat(self._index - 1)
6528
+ return None
6529
+
6530
+ return self.expression(exp.ColumnConstraint, this=this, kind=constraint)
6390
6531
 
6391
6532
  return this
6392
6533
 
@@ -6413,6 +6554,8 @@ class Parser(metaclass=_Parser):
6413
6554
  def _parse_unnamed_constraint(
6414
6555
  self, constraints: t.Optional[t.Collection[str]] = None
6415
6556
  ) -> t.Optional[exp.Expression]:
6557
+ index = self._index
6558
+
6416
6559
  if self._match(TokenType.IDENTIFIER, advance=False) or not self._match_texts(
6417
6560
  constraints or self.CONSTRAINT_PARSERS
6418
6561
  ):
@@ -6422,7 +6565,11 @@ class Parser(metaclass=_Parser):
6422
6565
  if constraint not in self.CONSTRAINT_PARSERS:
6423
6566
  self.raise_error(f"No parser found for schema constraint {constraint}.")
6424
6567
 
6425
- return self.CONSTRAINT_PARSERS[constraint](self)
6568
+ constraint = self.CONSTRAINT_PARSERS[constraint](self)
6569
+ if not constraint:
6570
+ self._retreat(index)
6571
+
6572
+ return constraint
6426
6573
 
6427
6574
  def _parse_unique_key(self) -> t.Optional[exp.Expression]:
6428
6575
  return self._parse_id_var(any_token=False)
@@ -6516,7 +6663,7 @@ class Parser(metaclass=_Parser):
6516
6663
  )
6517
6664
 
6518
6665
  def _parse_primary_key_part(self) -> t.Optional[exp.Expression]:
6519
- return self._parse_ordered() or self._parse_field()
6666
+ return self._parse_field()
6520
6667
 
6521
6668
  def _parse_period_for_system_time(self) -> t.Optional[exp.PeriodForSystemTimeConstraint]:
6522
6669
  if not self._match(TokenType.TIMESTAMP_SNAPSHOT):
@@ -6653,7 +6800,7 @@ class Parser(metaclass=_Parser):
6653
6800
  self._advance()
6654
6801
  end: t.Optional[exp.Expression] = -exp.Literal.number("1")
6655
6802
  else:
6656
- end = self._parse_unary()
6803
+ end = self._parse_assignment()
6657
6804
  step = self._parse_unary() if self._match(TokenType.COLON) else None
6658
6805
  return self.expression(exp.Slice, this=this, expression=end, step=step)
6659
6806
 
@@ -6747,6 +6894,13 @@ class Parser(metaclass=_Parser):
6747
6894
  gap_fill = exp.GapFill.from_arg_list(args)
6748
6895
  return self.validate_expression(gap_fill, args)
6749
6896
 
6897
+ def _parse_char(self) -> exp.Chr:
6898
+ return self.expression(
6899
+ exp.Chr,
6900
+ expressions=self._parse_csv(self._parse_assignment),
6901
+ charset=self._match(TokenType.USING) and self._parse_var(),
6902
+ )
6903
+
6750
6904
  def _parse_cast(self, strict: bool, safe: t.Optional[bool] = None) -> exp.Expression:
6751
6905
  this = self._parse_disjunction()
6752
6906
 
@@ -6864,7 +7018,7 @@ class Parser(metaclass=_Parser):
6864
7018
 
6865
7019
  if self._match(TokenType.USING):
6866
7020
  to: t.Optional[exp.Expression] = self.expression(
6867
- exp.CharacterSet, this=self._parse_var()
7021
+ exp.CharacterSet, this=self._parse_var(tokens={TokenType.BINARY})
6868
7022
  )
6869
7023
  elif self._match(TokenType.COMMA):
6870
7024
  to = self._parse_types()
@@ -6873,6 +7027,22 @@ class Parser(metaclass=_Parser):
6873
7027
 
6874
7028
  return self.build_cast(strict=strict, this=this, to=to, safe=safe)
6875
7029
 
7030
+ def _parse_xml_element(self) -> exp.XMLElement:
7031
+ if self._match_text_seq("EVALNAME"):
7032
+ evalname = True
7033
+ this = self._parse_bitwise()
7034
+ else:
7035
+ evalname = None
7036
+ self._match_text_seq("NAME")
7037
+ this = self._parse_id_var()
7038
+
7039
+ return self.expression(
7040
+ exp.XMLElement,
7041
+ this=this,
7042
+ expressions=self._match(TokenType.COMMA) and self._parse_csv(self._parse_bitwise),
7043
+ evalname=evalname,
7044
+ )
7045
+
6876
7046
  def _parse_xml_table(self) -> exp.XMLTable:
6877
7047
  namespaces = None
6878
7048
  passing = None
@@ -8498,7 +8668,7 @@ class Parser(metaclass=_Parser):
8498
8668
  if not self._match(TokenType.WITH):
8499
8669
  return this
8500
8670
 
8501
- op = self._parse_var(any_token=True)
8671
+ op = self._parse_var(any_token=True, tokens=self.RESERVED_TOKENS)
8502
8672
 
8503
8673
  return self.expression(exp.WithOperator, this=this, op=op)
8504
8674
 
@@ -9077,3 +9247,26 @@ class Parser(metaclass=_Parser):
9077
9247
  expr.set("expression", exp.Literal.string(self.dialect.INITCAP_DEFAULT_DELIMITER_CHARS))
9078
9248
 
9079
9249
  return expr
9250
+
9251
+ def _parse_operator(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
9252
+ while True:
9253
+ if not self._match(TokenType.L_PAREN):
9254
+ break
9255
+
9256
+ op = ""
9257
+ while self._curr and not self._match(TokenType.R_PAREN):
9258
+ op += self._curr.text
9259
+ self._advance()
9260
+
9261
+ this = self.expression(
9262
+ exp.Operator,
9263
+ comments=self._prev_comments,
9264
+ this=this,
9265
+ operator=op,
9266
+ expression=self._parse_bitwise(),
9267
+ )
9268
+
9269
+ if not self._match(TokenType.OPERATOR):
9270
+ break
9271
+
9272
+ return this