pbi-parsers 0.7.8__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (119) hide show
  1. pbi_parsers-0.7.8/.github/workflows/publish-to-pypi.yml +56 -0
  2. pbi_parsers-0.7.8/.gitignore +5 -0
  3. pbi_parsers-0.7.8/PKG-INFO +66 -0
  4. pbi_parsers-0.7.8/README.md +50 -0
  5. pbi_parsers-0.7.8/docs/docs/api/dax/formatter.md +1 -0
  6. pbi_parsers-0.7.8/docs/docs/api/dax/lexer.md +1 -0
  7. pbi_parsers-0.7.8/docs/docs/api/dax/parser.md +1 -0
  8. pbi_parsers-0.7.8/docs/docs/api/pq/formatter.md +1 -0
  9. pbi_parsers-0.7.8/docs/docs/api/pq/lexer.md +1 -0
  10. pbi_parsers-0.7.8/docs/docs/api/pq/parser.md +1 -0
  11. pbi_parsers-0.7.8/docs/docs/api/shared/lexer.md +1 -0
  12. pbi_parsers-0.7.8/docs/docs/api/shared/text_slice.md +1 -0
  13. pbi_parsers-0.7.8/docs/docs/api/shared/token.md +1 -0
  14. pbi_parsers-0.7.8/docs/docs/index.md +151 -0
  15. pbi_parsers-0.7.8/docs/mkdocs.yml +53 -0
  16. pbi_parsers-0.7.8/pbi_parsers/__init__.py +9 -0
  17. pbi_parsers-0.7.8/pbi_parsers/base/__init__.py +7 -0
  18. pbi_parsers-0.7.8/pbi_parsers/base/lexer.py +127 -0
  19. pbi_parsers-0.7.8/pbi_parsers/base/tokens.py +61 -0
  20. pbi_parsers-0.7.8/pbi_parsers/dax/__init__.py +22 -0
  21. pbi_parsers-0.7.8/pbi_parsers/dax/exprs/__init__.py +107 -0
  22. pbi_parsers-0.7.8/pbi_parsers/dax/exprs/_base.py +46 -0
  23. pbi_parsers-0.7.8/pbi_parsers/dax/exprs/_utils.py +45 -0
  24. pbi_parsers-0.7.8/pbi_parsers/dax/exprs/add_sub.py +73 -0
  25. pbi_parsers-0.7.8/pbi_parsers/dax/exprs/add_sub_unary.py +72 -0
  26. pbi_parsers-0.7.8/pbi_parsers/dax/exprs/array.py +75 -0
  27. pbi_parsers-0.7.8/pbi_parsers/dax/exprs/column.py +56 -0
  28. pbi_parsers-0.7.8/pbi_parsers/dax/exprs/comparison.py +76 -0
  29. pbi_parsers-0.7.8/pbi_parsers/dax/exprs/concatenation.py +73 -0
  30. pbi_parsers-0.7.8/pbi_parsers/dax/exprs/div_mul.py +75 -0
  31. pbi_parsers-0.7.8/pbi_parsers/dax/exprs/exponent.py +67 -0
  32. pbi_parsers-0.7.8/pbi_parsers/dax/exprs/function.py +102 -0
  33. pbi_parsers-0.7.8/pbi_parsers/dax/exprs/hierarchy.py +68 -0
  34. pbi_parsers-0.7.8/pbi_parsers/dax/exprs/identifier.py +46 -0
  35. pbi_parsers-0.7.8/pbi_parsers/dax/exprs/ins.py +67 -0
  36. pbi_parsers-0.7.8/pbi_parsers/dax/exprs/keyword.py +60 -0
  37. pbi_parsers-0.7.8/pbi_parsers/dax/exprs/literal_number.py +46 -0
  38. pbi_parsers-0.7.8/pbi_parsers/dax/exprs/literal_string.py +45 -0
  39. pbi_parsers-0.7.8/pbi_parsers/dax/exprs/logical.py +76 -0
  40. pbi_parsers-0.7.8/pbi_parsers/dax/exprs/measure.py +44 -0
  41. pbi_parsers-0.7.8/pbi_parsers/dax/exprs/none.py +30 -0
  42. pbi_parsers-0.7.8/pbi_parsers/dax/exprs/parens.py +61 -0
  43. pbi_parsers-0.7.8/pbi_parsers/dax/exprs/returns.py +76 -0
  44. pbi_parsers-0.7.8/pbi_parsers/dax/exprs/table.py +51 -0
  45. pbi_parsers-0.7.8/pbi_parsers/dax/exprs/variable.py +68 -0
  46. pbi_parsers-0.7.8/pbi_parsers/dax/formatter.py +215 -0
  47. pbi_parsers-0.7.8/pbi_parsers/dax/lexer.py +222 -0
  48. pbi_parsers-0.7.8/pbi_parsers/dax/main.py +63 -0
  49. pbi_parsers-0.7.8/pbi_parsers/dax/parser.py +66 -0
  50. pbi_parsers-0.7.8/pbi_parsers/dax/tokens.py +54 -0
  51. pbi_parsers-0.7.8/pbi_parsers/dax/utils.py +120 -0
  52. pbi_parsers-0.7.8/pbi_parsers/pq/__init__.py +17 -0
  53. pbi_parsers-0.7.8/pbi_parsers/pq/exprs/__init__.py +98 -0
  54. pbi_parsers-0.7.8/pbi_parsers/pq/exprs/_base.py +33 -0
  55. pbi_parsers-0.7.8/pbi_parsers/pq/exprs/_utils.py +31 -0
  56. pbi_parsers-0.7.8/pbi_parsers/pq/exprs/add_sub.py +59 -0
  57. pbi_parsers-0.7.8/pbi_parsers/pq/exprs/add_sub_unary.py +57 -0
  58. pbi_parsers-0.7.8/pbi_parsers/pq/exprs/and_or_expr.py +60 -0
  59. pbi_parsers-0.7.8/pbi_parsers/pq/exprs/array.py +53 -0
  60. pbi_parsers-0.7.8/pbi_parsers/pq/exprs/arrow.py +50 -0
  61. pbi_parsers-0.7.8/pbi_parsers/pq/exprs/column.py +42 -0
  62. pbi_parsers-0.7.8/pbi_parsers/pq/exprs/comparison.py +62 -0
  63. pbi_parsers-0.7.8/pbi_parsers/pq/exprs/concatenation.py +61 -0
  64. pbi_parsers-0.7.8/pbi_parsers/pq/exprs/div_mul.py +59 -0
  65. pbi_parsers-0.7.8/pbi_parsers/pq/exprs/each.py +41 -0
  66. pbi_parsers-0.7.8/pbi_parsers/pq/exprs/ellipsis_expr.py +28 -0
  67. pbi_parsers-0.7.8/pbi_parsers/pq/exprs/function.py +63 -0
  68. pbi_parsers-0.7.8/pbi_parsers/pq/exprs/identifier.py +77 -0
  69. pbi_parsers-0.7.8/pbi_parsers/pq/exprs/if_expr.py +70 -0
  70. pbi_parsers-0.7.8/pbi_parsers/pq/exprs/is_expr.py +54 -0
  71. pbi_parsers-0.7.8/pbi_parsers/pq/exprs/keyword.py +40 -0
  72. pbi_parsers-0.7.8/pbi_parsers/pq/exprs/literal_number.py +31 -0
  73. pbi_parsers-0.7.8/pbi_parsers/pq/exprs/literal_string.py +31 -0
  74. pbi_parsers-0.7.8/pbi_parsers/pq/exprs/meta.py +54 -0
  75. pbi_parsers-0.7.8/pbi_parsers/pq/exprs/negation.py +52 -0
  76. pbi_parsers-0.7.8/pbi_parsers/pq/exprs/none.py +22 -0
  77. pbi_parsers-0.7.8/pbi_parsers/pq/exprs/not_expr.py +39 -0
  78. pbi_parsers-0.7.8/pbi_parsers/pq/exprs/parens.py +43 -0
  79. pbi_parsers-0.7.8/pbi_parsers/pq/exprs/record.py +58 -0
  80. pbi_parsers-0.7.8/pbi_parsers/pq/exprs/row.py +54 -0
  81. pbi_parsers-0.7.8/pbi_parsers/pq/exprs/row_index.py +57 -0
  82. pbi_parsers-0.7.8/pbi_parsers/pq/exprs/statement.py +67 -0
  83. pbi_parsers-0.7.8/pbi_parsers/pq/exprs/try_expr.py +55 -0
  84. pbi_parsers-0.7.8/pbi_parsers/pq/exprs/type_expr.py +78 -0
  85. pbi_parsers-0.7.8/pbi_parsers/pq/exprs/variable.py +52 -0
  86. pbi_parsers-0.7.8/pbi_parsers/pq/formatter.py +13 -0
  87. pbi_parsers-0.7.8/pbi_parsers/pq/lexer.py +219 -0
  88. pbi_parsers-0.7.8/pbi_parsers/pq/main.py +63 -0
  89. pbi_parsers-0.7.8/pbi_parsers/pq/parser.py +65 -0
  90. pbi_parsers-0.7.8/pbi_parsers/pq/tokens.py +81 -0
  91. pbi_parsers-0.7.8/pyproject.toml +64 -0
  92. pbi_parsers-0.7.8/test.py +17 -0
  93. pbi_parsers-0.7.8/tests/__init__.py +0 -0
  94. pbi_parsers-0.7.8/tests/test_dax/__init__.py +0 -0
  95. pbi_parsers-0.7.8/tests/test_dax/test_exprs/__init__.py +0 -0
  96. pbi_parsers-0.7.8/tests/test_dax/test_exprs/test_add_sub.py +39 -0
  97. pbi_parsers-0.7.8/tests/test_dax/test_exprs/test_add_sub_unary.py +39 -0
  98. pbi_parsers-0.7.8/tests/test_dax/test_exprs/test_array.py +29 -0
  99. pbi_parsers-0.7.8/tests/test_dax/test_exprs/test_column.py +28 -0
  100. pbi_parsers-0.7.8/tests/test_dax/test_exprs/test_comparison.py +30 -0
  101. pbi_parsers-0.7.8/tests/test_dax/test_exprs/test_concatenation.py +29 -0
  102. pbi_parsers-0.7.8/tests/test_dax/test_exprs/test_div_mul.py +29 -0
  103. pbi_parsers-0.7.8/tests/test_dax/test_exprs/test_exponent.py +29 -0
  104. pbi_parsers-0.7.8/tests/test_dax/test_exprs/test_function.py +30 -0
  105. pbi_parsers-0.7.8/tests/test_dax/test_exprs/test_hierarchy.py +31 -0
  106. pbi_parsers-0.7.8/tests/test_dax/test_exprs/test_identifier.py +19 -0
  107. pbi_parsers-0.7.8/tests/test_dax/test_exprs/test_ins.py +33 -0
  108. pbi_parsers-0.7.8/tests/test_dax/test_exprs/test_keyword.py +31 -0
  109. pbi_parsers-0.7.8/tests/test_dax/test_exprs/test_literal_number.py +26 -0
  110. pbi_parsers-0.7.8/tests/test_dax/test_exprs/test_literal_string.py +19 -0
  111. pbi_parsers-0.7.8/tests/test_dax/test_exprs/test_logical.py +30 -0
  112. pbi_parsers-0.7.8/tests/test_dax/test_exprs/test_measure.py +19 -0
  113. pbi_parsers-0.7.8/tests/test_dax/test_exprs/test_parens.py +28 -0
  114. pbi_parsers-0.7.8/tests/test_dax/test_exprs/test_returns.py +40 -0
  115. pbi_parsers-0.7.8/tests/test_dax/test_exprs/test_table.py +24 -0
  116. pbi_parsers-0.7.8/tests/test_dax/test_exprs/test_variable.py +39 -0
  117. pbi_parsers-0.7.8/tests/test_dax/test_formatter/__init__.py +0 -0
  118. pbi_parsers-0.7.8/tests/test_dax/test_formatter/test_basic.py +31 -0
  119. pbi_parsers-0.7.8/tests/test_dax/test_lexer.py +43 -0
@@ -0,0 +1,56 @@
1
+ name: Publish Python Package to PyPI
2
+
3
+ on:
4
+ push
5
+
6
+ jobs:
7
+ build:
8
+ name: Build Package
9
+ runs-on: ubuntu-latest
10
+
11
+ steps:
12
+ - uses: actions/checkout@v4
13
+ with:
14
+ persist-credentials: false
15
+
16
+ - name: Set up Python
17
+ uses: actions/setup-python@v5
18
+ with:
19
+ python-version: "3.x"
20
+
21
+ - name: Install pypa/build
22
+ run: >-
23
+ python3 -m
24
+ pip install
25
+ build
26
+ --user
27
+
28
+ - name: Build a binary wheel and a source tarball
29
+ run: python3 -m build
30
+
31
+ - name: Store the distribution packages
32
+ uses: actions/upload-artifact@v4
33
+ with:
34
+ name: python-package-distributions
35
+ path: dist/
36
+
37
+ publish-to-pypi:
38
+ name: Publish Package to PyPI
39
+ needs:
40
+ - build
41
+ runs-on: ubuntu-latest
42
+ environment:
43
+ name: pypi
44
+ url: https://pypi.org/p/pbi_parsers # Replace <package-name> with your PyPI project name
45
+ permissions:
46
+ id-token: write # IMPORTANT: mandatory for trusted publishing
47
+
48
+ steps:
49
+ - name: Download all the dists
50
+ uses: actions/download-artifact@v4
51
+ with:
52
+ name: python-package-distributions
53
+ path: dist/
54
+
55
+ - name: Publish Package to PyPI
56
+ uses: pypa/gh-action-pypi-publish@release/v1
@@ -0,0 +1,5 @@
1
+ __pycache__
2
+ venv
3
+ dist
4
+ docs/site
5
+ .env
@@ -0,0 +1,66 @@
1
+ Metadata-Version: 2.4
2
+ Name: pbi_parsers
3
+ Version: 0.7.8
4
+ Summary: Power BI lexer, parsers, and formatters for DAX and M (Power Query) languages
5
+ Requires-Python: >=3.11.0
6
+ Requires-Dist: jinja2>=3.1.6
7
+ Provides-Extra: dev
8
+ Requires-Dist: build>=1.2.2; extra == 'dev'
9
+ Requires-Dist: pre-commit>=3.8.0; extra == 'dev'
10
+ Requires-Dist: ruff>=0.12.7; extra == 'dev'
11
+ Provides-Extra: docs
12
+ Requires-Dist: mkdocs-material>=9.6.16; extra == 'docs'
13
+ Requires-Dist: mkdocs>=1.6.1; extra == 'docs'
14
+ Requires-Dist: mkdocstrings-python>=0.30.0; extra == 'docs'
15
+ Description-Content-Type: text/markdown
16
+
17
+ # Overview
18
+
19
+ Based on [Crafting Interpreters](https://timothya.com/pdfs/crafting-interpreters.pdf). Library provides lexers, parsers, and formatters for DAX and Power Query (M) languages. Designed to support code introspection and analysis, not execution. This enables developement of [ruff](https://github.com/astral-sh/ruff)-equivalent tools for DAX and Power Query. It also enables extracting metadata from DAX and Power Query code, such PQ source types (Excel, SQL, etc.) and DAX lineage dependencies.
20
+
21
+ For more information, see the [docs](https://douglassimonsen.github.io/pbi_parsers/)
22
+
23
+ # Installation
24
+
25
+ ```shell
26
+ python -m pip install pbi_parsers
27
+ ```
28
+
29
+ # Dev Instructions
30
+
31
+
32
+ ## Set Up
33
+
34
+ ```shell
35
+ python -m venv venv
36
+ venv\Scripts\activate
37
+ python -m pip install .
38
+ pre-commit install
39
+ ```
40
+
41
+
42
+ # Running the Documentation Server
43
+
44
+ ```shell
45
+ python -m pip install .[docs]
46
+ mkdocs serve -f docs/mkdocs.yml
47
+ ```
48
+
49
+ ## Deploy docs to Github Pages
50
+
51
+ ```shell
52
+ mkdocs gh-deploy --clean -f docs/mkdocs.yml
53
+ ```
54
+
55
+ ## Testing
56
+
57
+ ```shell
58
+
59
+ pip install -e .
60
+ ```
61
+
62
+ # Build Wheel
63
+
64
+ ```shell
65
+ python -m build .
66
+ ```
@@ -0,0 +1,50 @@
1
+ # Overview
2
+
3
+ Based on [Crafting Interpreters](https://timothya.com/pdfs/crafting-interpreters.pdf). Library provides lexers, parsers, and formatters for DAX and Power Query (M) languages. Designed to support code introspection and analysis, not execution. This enables developement of [ruff](https://github.com/astral-sh/ruff)-equivalent tools for DAX and Power Query. It also enables extracting metadata from DAX and Power Query code, such PQ source types (Excel, SQL, etc.) and DAX lineage dependencies.
4
+
5
+ For more information, see the [docs](https://douglassimonsen.github.io/pbi_parsers/)
6
+
7
+ # Installation
8
+
9
+ ```shell
10
+ python -m pip install pbi_parsers
11
+ ```
12
+
13
+ # Dev Instructions
14
+
15
+
16
+ ## Set Up
17
+
18
+ ```shell
19
+ python -m venv venv
20
+ venv\Scripts\activate
21
+ python -m pip install .
22
+ pre-commit install
23
+ ```
24
+
25
+
26
+ # Running the Documentation Server
27
+
28
+ ```shell
29
+ python -m pip install .[docs]
30
+ mkdocs serve -f docs/mkdocs.yml
31
+ ```
32
+
33
+ ## Deploy docs to Github Pages
34
+
35
+ ```shell
36
+ mkdocs gh-deploy --clean -f docs/mkdocs.yml
37
+ ```
38
+
39
+ ## Testing
40
+
41
+ ```shell
42
+
43
+ pip install -e .
44
+ ```
45
+
46
+ # Build Wheel
47
+
48
+ ```shell
49
+ python -m build .
50
+ ```
@@ -0,0 +1 @@
1
+ ::: pbi_parsers.dax.Formatter
@@ -0,0 +1 @@
1
+ ::: pbi_parsers.dax.Lexer
@@ -0,0 +1 @@
1
+ ::: pbi_parsers.dax.Parser
@@ -0,0 +1 @@
1
+ ::: pbi_parsers.pq.Formatter
@@ -0,0 +1 @@
1
+ ::: pbi_parsers.pq.Lexer
@@ -0,0 +1 @@
1
+ ::: pbi_parsers.pq.Parser
@@ -0,0 +1 @@
1
+ ::: pbi_parsers.base.BaseLexer
@@ -0,0 +1 @@
1
+ ::: pbi_parsers.base.tokens.TextSlice
@@ -0,0 +1 @@
1
+ ::: pbi_parsers.base.BaseToken
@@ -0,0 +1,151 @@
1
+ # Overview
2
+
3
+ Based on [Crafting Interpreters](https://timothya.com/pdfs/crafting-interpreters.pdf). Library provides lexers, parsers, and formatters for DAX and Power Query (M) languages. Designed to support code introspection and analysis, not execution. This enables development of [ruff](https://github.com/astral-sh/ruff)-equivalent tools for DAX and Power Query. It also enables extracting metadata from DAX and Power Query code, such PQ source types (Excel, SQL, etc.) and DAX lineage dependencies.
4
+
5
+ # Installation
6
+
7
+ ```shell
8
+ python -m pip install pbi_parsers
9
+ ```
10
+
11
+ ## Functionality
12
+
13
+ !!! info "Rust Implementation"
14
+ Although the library is primarily implemented in Python, there are plans to implement a Rust version for performance and efficiency.
15
+
16
+ - DAX
17
+ * [x] Lexer
18
+ * [x] Parser
19
+ * [x] Formatter
20
+ * [x] Testing
21
+ * [ ] Rust Implementation
22
+ - Power Query (M)
23
+ * [x] Lexer
24
+ * [x] Parser
25
+ * [ ] Formatter
26
+ * [ ] Testing
27
+ * [ ] Rust Implementation
28
+
29
+
30
+ ## Examples
31
+
32
+ !!! info "Formatting DAX Expressions"
33
+ Like `ruff` for Python, this library can format DAX expressions to improve readability and maintainability.
34
+
35
+ ```python
36
+ from pbi_parsers.dax import format_expression
37
+
38
+ input_dax = """
39
+ func.name(arg1 + 1 + 2 + 3, func(), func(10000000000000), arg2)
40
+ """
41
+ formatted_dax = format_expression(input_dax)
42
+ print(formatted_dax)
43
+ # Output:
44
+ # func.name(
45
+ # arg1 + 1 + 2 + 3,
46
+ # func(),
47
+ # func(10000000000000),
48
+ # arg2
49
+ # )
50
+ ```
51
+
52
+ !!! info "Creating AST Trees from DAX Expressions"
53
+ The library can parse DAX expressions into Abstract Syntax Trees (ASTs) for further analysis or manipulation.
54
+
55
+ ```python
56
+ from pbi_parsers.dax import to_ast
57
+
58
+ input_dax = """
59
+ func.name(arg1 + 1 + 2 + 3, func(), func(10000000000000), arg2)
60
+ """
61
+ ast = to_ast(input_dax)
62
+ print(ast)
63
+ # Output:
64
+ # Function (
65
+ # name: func.name,
66
+ # args: Add (
67
+ # left: Identifier (arg1),
68
+ # right: Add (
69
+ # left: Number (1),
70
+ # right: Add (
71
+ # left: Number (2),
72
+ # right: Number (3)
73
+ # )
74
+ # )
75
+ # ),
76
+ # Function (
77
+ # name: func,
78
+ # args:
79
+ # ),
80
+ # Function (
81
+ # name: func,
82
+ # args: Number (10000000000000)
83
+ # ),
84
+ # Identifier (arg2)
85
+ # )
86
+ ```
87
+
88
+ !!! info "Highlighting DAX Sections"
89
+ The library can highlight sections of DAX code, making it easier to identify and analyze specific parts of the code.
90
+
91
+ Note: in the console, the caret (`^`) will be yellow and the line number will be cyan.
92
+
93
+ ```python
94
+ from pbi_parsers.dax import highlight_section, to_ast
95
+
96
+ input_dax = """
97
+ func.name(
98
+ arg1 +
99
+ 1 +
100
+ 2 + 3,
101
+ func(),
102
+ func(10000000000000),
103
+ arg2
104
+ )
105
+ """
106
+ ast = to_ast(input_dax)
107
+ assert ast is not None, "AST should not be None"
108
+ section = ast.args[0].right.left # the "1" in "arg1 + 1 + 2 + 3"
109
+ highlighted = highlight_section(section)
110
+ print(highlighted.to_console())
111
+
112
+ # Output:
113
+ # 1 | func.name(
114
+ # 2 | arg1 +
115
+ # 3 | 1 +
116
+ # ^
117
+ # 4 | 2 + 3,
118
+ # 5 | func(),
119
+ ```
120
+
121
+ Highlighting a larger section:
122
+
123
+ ```python
124
+ from pbi_parsers.dax import highlight_section, to_ast
125
+
126
+ input_dax = """
127
+ func.name(
128
+ arg1 +
129
+ 1 +
130
+ 2 + 3,
131
+ func(),
132
+ func(10000000000000),
133
+ arg2
134
+ )
135
+ """
136
+ ast = to_ast(input_dax)
137
+ assert ast is not None, "AST should not be None"
138
+ section = ast.args[0].right # The "1 + 2" in "arg1 + 1 + 2 + 3"
139
+ highlighted = highlight_section(section)
140
+ print(highlighted.to_console())
141
+ # Output:
142
+ # 1 | func.name(
143
+ # 2 | arg1 +
144
+ # 3 | 1 +
145
+ # ^^^
146
+ # 4 | 2 + 3,
147
+ # ^^^^^^^^^^^^^^^^^
148
+ # 5 | func(),
149
+ # 6 | func(10000000000000),
150
+
151
+ ```
@@ -0,0 +1,53 @@
1
+ site_name: "pbi_parsers Documentation"
2
+
3
+ nav:
4
+ - Home: index.md
5
+ - API:
6
+ - DAX:
7
+ - Lexer: api/dax/lexer.md
8
+ - Parser: api/dax/parser.md
9
+ - Formatter: api/dax/formatter.md
10
+ - PQ:
11
+ - Lexer: api/pq/lexer.md
12
+ - Parser: api/pq/parser.md
13
+ - Formatter: api/pq/formatter.md
14
+ - Shared:
15
+ - Lexer: api/shared/lexer.md
16
+ - Token: api/shared/token.md
17
+ - Text Slice: api/shared/text_slice.md
18
+
19
+ theme:
20
+ name: "material"
21
+ features:
22
+ - content.code.copy
23
+ - content.code.select
24
+ - content.code.annotate
25
+
26
+ plugins:
27
+ - search
28
+ - mkdocstrings:
29
+ default_handler: python
30
+ handlers:
31
+ python:
32
+ options:
33
+ backlinks: tree
34
+ separate_signature: true
35
+ show_signature_annotations: true
36
+
37
+ markdown_extensions:
38
+ - pymdownx.highlight:
39
+ anchor_linenums: true
40
+ line_spans: __span
41
+ pygments_lang_class: true
42
+ - admonition
43
+ - pymdownx.details
44
+ - pymdownx.inlinehilite
45
+ - pymdownx.snippets
46
+ - pymdownx.superfences:
47
+ custom_fences:
48
+ - name: mermaid
49
+ class: mermaid
50
+ format: !!python/name:pymdownx.superfences.fence_code_format
51
+
52
+ - pymdownx.tasklist:
53
+ custom_checkbox: true
@@ -0,0 +1,9 @@
1
+ from . import dax, pq
2
+
3
+ __version__ = "0.7.8"
4
+
5
+
6
+ __all__ = [
7
+ "dax",
8
+ "pq",
9
+ ]
@@ -0,0 +1,7 @@
1
+ from .lexer import BaseLexer
2
+ from .tokens import BaseToken
3
+
4
+ __all__ = [
5
+ "BaseLexer",
6
+ "BaseToken",
7
+ ]
@@ -0,0 +1,127 @@
1
+ from collections.abc import Callable
2
+
3
+ from .tokens import BaseToken
4
+
5
+ MAX_POSITION = 1_000_000
6
+
7
+
8
+ class BaseLexer:
9
+ source: str
10
+ start_position: int
11
+ current_position: int
12
+ tokens: list[BaseToken]
13
+
14
+ def __init__(self, source: str) -> None:
15
+ self.source = source
16
+ self.start_position = 0
17
+ self.current_position = 0
18
+ self.tokens = []
19
+
20
+ def scan_helper(self) -> BaseToken:
21
+ """Contains the orchestration logic for converting tokens into expressions."""
22
+ msg = "Subclasses should implement match_tokens method."
23
+ raise NotImplementedError(msg)
24
+
25
+ def match(
26
+ self,
27
+ matcher: Callable[[str], bool] | str,
28
+ chunk: int = 1,
29
+ *,
30
+ case_insensitive: bool = True,
31
+ ) -> bool:
32
+ """Match a string or a callable matcher against the current position in the source.
33
+
34
+ Args:
35
+ ----
36
+ matcher (Callable[[str], bool] | str): A string to match or a callable that
37
+ takes a string and returns a boolean.
38
+ chunk (int): The number of characters to check from the current position.
39
+ case_insensitive (bool): If True, perform a case-insensitive match __only__ for strings.
40
+
41
+ """
42
+ if isinstance(matcher, str):
43
+ chunk = len(matcher)
44
+
45
+ string_chunk = self.peek(chunk)
46
+ if not string_chunk:
47
+ return False
48
+
49
+ if isinstance(matcher, str):
50
+ if case_insensitive:
51
+ string_chunk = string_chunk.lower()
52
+ matcher = matcher.lower()
53
+ if string_chunk == matcher:
54
+ self.advance(chunk)
55
+ return True
56
+ return False
57
+
58
+ if matcher(string_chunk):
59
+ self.advance(chunk)
60
+ return True
61
+ return False
62
+
63
+ def peek(self, chunk: int = 1) -> str:
64
+ """Returns the next chunk of text from the current position. Defaults to a single character.
65
+
66
+ Args:
67
+ chunk (int): The number of characters to return from the current position.
68
+
69
+ Returns:
70
+ str: The next chunk of text from the current position.
71
+
72
+ """
73
+ return (
74
+ self.source[self.current_position : self.current_position + chunk]
75
+ if self.current_position < len(self.source)
76
+ else ""
77
+ )
78
+
79
+ def remaining(self) -> str:
80
+ """Returns the remaining text from the current position to the end of the source.
81
+
82
+ Only used for testing and debugging purposes.
83
+
84
+ Returns:
85
+ str: The remaining text from the current position to the end of the source.
86
+
87
+ """
88
+ return self.source[self.current_position :]
89
+
90
+ def advance(self, chunk: int = 1) -> None:
91
+ """Advances the current position by the specified chunk size.
92
+
93
+ Generally used alongside peek to consume characters.
94
+
95
+ Args:
96
+ chunk (int): The number of characters to advance the current position.
97
+
98
+ Raises:
99
+ ValueError: If the current position exceeds a predefined MAX_POSITION (1,000,000 characters).
100
+ This is to avoid errors with the lexer causing the process to hang
101
+
102
+ """
103
+ if self.current_position > MAX_POSITION:
104
+ msg = f"Current position exceeds {MAX_POSITION:,} characters."
105
+ raise ValueError(msg)
106
+ self.current_position += chunk
107
+
108
+ def scan(self) -> tuple[BaseToken, ...]:
109
+ """Repeatedly calls scan_helper until the end of the source is reached.
110
+
111
+ Returns:
112
+ tuple[BaseToken, ...]: A tuple of tokens scanned from the source.
113
+
114
+ """
115
+ while not self.at_end():
116
+ self.tokens.append(self.scan_helper())
117
+ return tuple(self.tokens)
118
+
119
+ def at_end(self) -> bool:
120
+ """Checks if the current position is at (or beyond) the end of the source.
121
+
122
+ Returns:
123
+ bool: True if the current position is at or beyond the end of the source, False
124
+ otherwise.
125
+
126
+ """
127
+ return self.current_position >= len(self.source)
@@ -0,0 +1,61 @@
1
+ from dataclasses import dataclass, field
2
+ from typing import Any
3
+
4
+
5
+ @dataclass
6
+ class TextSlice:
7
+ full_text: str = ""
8
+ start: int = -1
9
+ end: int = -1
10
+
11
+ def __eq__(self, other: object) -> bool:
12
+ """Checks equality based on the text slice."""
13
+ if not isinstance(other, TextSlice):
14
+ return NotImplemented
15
+ return self.full_text == other.full_text and self.start == other.start and self.end == other.end
16
+
17
+ def __hash__(self) -> int:
18
+ """Returns a hash based on the text slice."""
19
+ return hash((self.full_text, self.start, self.end))
20
+
21
+ def __repr__(self) -> str:
22
+ """Returns a string representation of the TextSlice."""
23
+ return f"TextSlice(text='{self.get_text()}', start={self.start}, end={self.end})"
24
+
25
+ def get_text(self) -> str:
26
+ """Returns the text slice."""
27
+ return self.full_text[self.start : self.end]
28
+
29
+
30
+ @dataclass
31
+ class BaseToken:
32
+ tok_type: Any
33
+ text_slice: TextSlice = field(default_factory=TextSlice)
34
+
35
+ def __repr__(self) -> str:
36
+ pretty_text = self.text_slice.get_text().replace("\n", "\\n").replace("\r", "\\r")
37
+ return f"Token(type={self.tok_type.name}, text='{pretty_text}')"
38
+
39
+ @property
40
+ def text(self) -> str:
41
+ """Returns the text underlying the token."""
42
+ return self.text_slice.get_text()
43
+
44
+ def position(self) -> tuple[int, int]:
45
+ """Returns the start and end positions of the token.
46
+
47
+ Returns:
48
+ tuple[int, int]: A tuple containing the start and end positions of the token within the source text.
49
+
50
+ """
51
+ return self.text_slice.start, self.text_slice.end
52
+
53
+ def __eq__(self, other: object) -> bool:
54
+ """Checks equality based on token type and text slice."""
55
+ if not isinstance(other, BaseToken):
56
+ return NotImplemented
57
+ return self.tok_type == other.tok_type and self.text_slice == other.text_slice
58
+
59
+ def __hash__(self) -> int:
60
+ """Returns a hash based on token type and text slice."""
61
+ return hash((self.tok_type, self.text_slice))
@@ -0,0 +1,22 @@
1
+ from . import exprs, utils
2
+ from .exprs import Expression
3
+ from .formatter import Formatter
4
+ from .lexer import Lexer
5
+ from .main import format_expression, to_ast
6
+ from .parser import Parser
7
+ from .tokens import Token, TokenType
8
+ from .utils import highlight_section
9
+
10
+ __all__ = [
11
+ "Expression",
12
+ "Formatter",
13
+ "Lexer",
14
+ "Parser",
15
+ "Token",
16
+ "TokenType",
17
+ "exprs",
18
+ "format_expression",
19
+ "highlight_section",
20
+ "to_ast",
21
+ "utils",
22
+ ]