@ota-meshi/ast-token-store 0.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +138 -0
- package/lib/index.d.mts +245 -0
- package/lib/index.mjs +423 -0
- package/package.json +112 -0
package/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 Yosuke Ota
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
package/README.md
ADDED
|
@@ -0,0 +1,138 @@
|
|
|
1
|
+
# @ota-meshi/ast-token-store
|
|
2
|
+
|
|
3
|
+
[](https://www.npmjs.com/package/@ota-meshi/ast-token-store)
|
|
4
|
+
[](https://github.com/ota-meshi/ast-token-store/blob/main/LICENSE)
|
|
5
|
+
|
|
6
|
+
A class library that provides an API similar to [ESLint's SourceCode#getFirstToken and related methods](https://eslint.org/docs/latest/extend/custom-rules#accessing-the-source-code) for any AST.
|
|
7
|
+
|
|
8
|
+
## Installation
|
|
9
|
+
|
|
10
|
+
```bash
|
|
11
|
+
npm install @ota-meshi/ast-token-store
|
|
12
|
+
```
|
|
13
|
+
|
|
14
|
+
## Requirements
|
|
15
|
+
|
|
16
|
+
- Node.js `^20.19.0 || ^22.13.0 || >=24`
|
|
17
|
+
|
|
18
|
+
## Usage
|
|
19
|
+
|
|
20
|
+
### Basic Example
|
|
21
|
+
|
|
22
|
+
The `TokenStore` class provides a way to query tokens from any AST that uses `range: [number, number]` to represent source locations.
|
|
23
|
+
|
|
24
|
+
```ts
|
|
25
|
+
import { TokenStore } from "@ota-meshi/ast-token-store";
|
|
26
|
+
|
|
27
|
+
// Define your TokenStore subclass (or use it directly)
|
|
28
|
+
const store = new TokenStore({
|
|
29
|
+
// Provide all tokens and comments, sorted or unsorted
|
|
30
|
+
tokens: [...ast.tokens, ...ast.comments],
|
|
31
|
+
// A type guard to distinguish comments from regular tokens
|
|
32
|
+
isComment: (token): token is Comment => token.type === "Comment",
|
|
33
|
+
});
|
|
34
|
+
```
|
|
35
|
+
|
|
36
|
+
### Constructor
|
|
37
|
+
|
|
38
|
+
```ts
|
|
39
|
+
new TokenStore<Node, Token, Comment>({
|
|
40
|
+
tokens: (Token | Comment)[],
|
|
41
|
+
isComment: (token: Token | Comment) => token is Comment,
|
|
42
|
+
})
|
|
43
|
+
```
|
|
44
|
+
|
|
45
|
+
- **`tokens`** — An array of all tokens and comments.
|
|
46
|
+
- **`isComment`** — A type guard function that returns `true` if a given token is a comment.
|
|
47
|
+
|
|
48
|
+
The class has three generic type parameters:
|
|
49
|
+
|
|
50
|
+
| Parameter | Description |
|
|
51
|
+
| --------- | ------------------------------------------------------- |
|
|
52
|
+
| `Node` | The AST node type (must have `range: [number, number]`) |
|
|
53
|
+
| `Token` | The token type (must have `range: [number, number]`) |
|
|
54
|
+
| `Comment` | The comment type (must have `range: [number, number]`) |
|
|
55
|
+
|
|
56
|
+
### Methods
|
|
57
|
+
|
|
58
|
+
All methods accept options for filtering, skipping, counting, and including comments.
|
|
59
|
+
|
|
60
|
+
#### Single Token Methods
|
|
61
|
+
|
|
62
|
+
| Method | Description |
|
|
63
|
+
| --------------------------------------------- | ------------------------------------------------------ |
|
|
64
|
+
| `getFirstToken(node, options?)` | Gets the first token of the given node |
|
|
65
|
+
| `getLastToken(node, options?)` | Gets the last token of the given node |
|
|
66
|
+
| `getTokenAfter(node, options?)` | Gets the token that follows the given node |
|
|
67
|
+
| `getTokenBefore(node, options?)` | Gets the token that precedes the given node |
|
|
68
|
+
| `getFirstTokenBetween(left, right, options?)` | Gets the first token between two non-overlapping nodes |
|
|
69
|
+
| `getLastTokenBetween(left, right, options?)` | Gets the last token between two non-overlapping nodes |
|
|
70
|
+
|
|
71
|
+
#### Multiple Token Methods
|
|
72
|
+
|
|
73
|
+
| Method | Description |
|
|
74
|
+
| ---------------------------------------------- | --------------------------------------------------- |
|
|
75
|
+
| `getFirstTokens(node, options?)` | Gets the first `count` tokens of the given node |
|
|
76
|
+
| `getLastTokens(node, options?)` | Gets the last `count` tokens of the given node |
|
|
77
|
+
| `getTokensAfter(node, options?)` | Gets the `count` tokens that follow the given node |
|
|
78
|
+
| `getTokensBefore(node, options?)` | Gets the `count` tokens that precede the given node |
|
|
79
|
+
| `getFirstTokensBetween(left, right, options?)` | Gets the first `count` tokens between two nodes |
|
|
80
|
+
| `getLastTokensBetween(left, right, options?)` | Gets the last `count` tokens between two nodes |
|
|
81
|
+
| `getTokens(node, options?)` | Gets all tokens within the given node |
|
|
82
|
+
| `getTokensBetween(left, right, options?)` | Gets all tokens between two nodes |
|
|
83
|
+
| `getAllTokens()` | Gets all tokens including comments |
|
|
84
|
+
|
|
85
|
+
#### Comment Methods
|
|
86
|
+
|
|
87
|
+
| Method | Description |
|
|
88
|
+
| ----------------------------------- | ------------------------------------------------------------ |
|
|
89
|
+
| `getCommentsBefore(nodeOrToken)` | Gets all comment tokens directly before the given node/token |
|
|
90
|
+
| `getCommentsAfter(nodeOrToken)` | Gets all comment tokens directly after the given node/token |
|
|
91
|
+
| `commentsExistBetween(left, right)` | Checks if any comments exist between two nodes |
|
|
92
|
+
|
|
93
|
+
### Options
|
|
94
|
+
|
|
95
|
+
Single token methods (`getFirstToken`, `getLastToken`, etc.) accept skip options:
|
|
96
|
+
|
|
97
|
+
```ts
|
|
98
|
+
// Skip N tokens
|
|
99
|
+
store.getFirstToken(node, { skip: 1 });
|
|
100
|
+
// Shorthand: pass a number directly
|
|
101
|
+
store.getFirstToken(node, 1);
|
|
102
|
+
|
|
103
|
+
// Filter tokens
|
|
104
|
+
store.getFirstToken(node, {
|
|
105
|
+
filter: (token) => token.type === "Punctuator",
|
|
106
|
+
});
|
|
107
|
+
|
|
108
|
+
// Include comments in the search
|
|
109
|
+
store.getFirstToken(node, { includeComments: true });
|
|
110
|
+
```
|
|
111
|
+
|
|
112
|
+
Multiple token methods (`getFirstTokens`, `getTokensAfter`, etc.) accept count options:
|
|
113
|
+
|
|
114
|
+
```ts
|
|
115
|
+
// Get up to N tokens
|
|
116
|
+
store.getFirstTokens(node, { count: 3 });
|
|
117
|
+
// Shorthand: pass a number directly
|
|
118
|
+
store.getFirstTokens(node, 3);
|
|
119
|
+
|
|
120
|
+
// Filter and count
|
|
121
|
+
store.getTokensAfter(node, {
|
|
122
|
+
filter: (token) => token.type === "Punctuator",
|
|
123
|
+
count: 2,
|
|
124
|
+
});
|
|
125
|
+
|
|
126
|
+
// Include comments
|
|
127
|
+
store.getTokens(node, { includeComments: true });
|
|
128
|
+
```
|
|
129
|
+
|
|
130
|
+
## Attribution
|
|
131
|
+
|
|
132
|
+
This library aims for compatibility with ESLint's `SourceCode` token API and its method surface. The method design follows ideas described by ESLint, which is licensed under MIT. The implementation here is original and does not reuse ESLint code.
|
|
133
|
+
|
|
134
|
+
ESLint is distributed under the MIT License. See the ESLint repository for details: <https://github.com/eslint/eslint/blob/main/LICENSE>
|
|
135
|
+
|
|
136
|
+
## License
|
|
137
|
+
|
|
138
|
+
[MIT](./LICENSE)
|
package/lib/index.d.mts
ADDED
|
@@ -0,0 +1,245 @@
|
|
|
1
|
+
//#region src/types.d.ts
|
|
2
|
+
type SyntaxElement = {
|
|
3
|
+
range: [number, number];
|
|
4
|
+
};
|
|
5
|
+
type TokenFilter<E extends SyntaxElement, R extends E = E> = ((tokenOrComment: E) => tokenOrComment is R) | ((tokenOrComment: E) => boolean);
|
|
6
|
+
type CursorWithSkipOptionsWithoutFilter = number | {
|
|
7
|
+
includeComments?: false;
|
|
8
|
+
filter?: undefined;
|
|
9
|
+
skip?: number;
|
|
10
|
+
};
|
|
11
|
+
type CursorWithSkipOptionsWithFilter<Token extends SyntaxElement, R extends Token = Token> = TokenFilter<Token, R> | {
|
|
12
|
+
includeComments?: false;
|
|
13
|
+
filter: TokenFilter<Token, R>;
|
|
14
|
+
skip?: number;
|
|
15
|
+
};
|
|
16
|
+
type CursorWithSkipOptionsWithComment<Token extends SyntaxElement, Comment extends SyntaxElement, R extends Token | Comment = Token | Comment> = {
|
|
17
|
+
includeComments: true;
|
|
18
|
+
filter?: TokenFilter<Token | Comment, R>;
|
|
19
|
+
skip?: number;
|
|
20
|
+
};
|
|
21
|
+
type CursorWithCountOptionsWithoutFilter = number | {
|
|
22
|
+
includeComments?: false;
|
|
23
|
+
filter?: undefined;
|
|
24
|
+
count?: number;
|
|
25
|
+
};
|
|
26
|
+
type CursorWithCountOptionsWithFilter<Token extends SyntaxElement, R extends Token = Token> = TokenFilter<Token, R> | {
|
|
27
|
+
includeComments?: false;
|
|
28
|
+
filter: TokenFilter<Token, R>;
|
|
29
|
+
count?: number;
|
|
30
|
+
};
|
|
31
|
+
type CursorWithCountOptionsWithComment<Token extends SyntaxElement, Comment extends SyntaxElement, R extends Token | Comment = Token | Comment> = {
|
|
32
|
+
includeComments: true;
|
|
33
|
+
filter?: TokenFilter<Token | Comment, R>;
|
|
34
|
+
count?: number;
|
|
35
|
+
};
|
|
36
|
+
//#endregion
|
|
37
|
+
//#region src/token-store/token-store.d.ts
|
|
38
|
+
declare const PRIVATE: unique symbol;
|
|
39
|
+
declare class TokenStore<Node extends SyntaxElement, Token extends SyntaxElement, Comment extends SyntaxElement> {
|
|
40
|
+
private readonly [PRIVATE];
|
|
41
|
+
constructor(params: {
|
|
42
|
+
tokens: (Token | Comment)[];
|
|
43
|
+
isComment: (token: Token | Comment) => token is Comment;
|
|
44
|
+
});
|
|
45
|
+
/**
|
|
46
|
+
* Gets all tokens, including comments.
|
|
47
|
+
*/
|
|
48
|
+
getAllTokens(): (Token | Comment)[];
|
|
49
|
+
/**
|
|
50
|
+
* Gets the first token of the given node.
|
|
51
|
+
*/
|
|
52
|
+
getFirstToken(node: Node | Token): Token;
|
|
53
|
+
/**
|
|
54
|
+
* Gets the first token of the given node with simple options.
|
|
55
|
+
*/
|
|
56
|
+
getFirstToken(node: Node | Token | Comment, options: CursorWithSkipOptionsWithoutFilter): Token | null;
|
|
57
|
+
/**
|
|
58
|
+
* Gets the first token of the given node with options.
|
|
59
|
+
*/
|
|
60
|
+
getFirstToken<R extends Token>(node: Node | Token | Comment, options: CursorWithSkipOptionsWithFilter<Token, R>): R | null;
|
|
61
|
+
/**
|
|
62
|
+
* Gets the first token of the given node with options.
|
|
63
|
+
*/
|
|
64
|
+
getFirstToken<R extends Token | Comment>(node: Node | Token | Comment, options: CursorWithSkipOptionsWithComment<Token, Comment, R>): R | null;
|
|
65
|
+
/**
|
|
66
|
+
* Gets the first tokens of the given node.
|
|
67
|
+
*/
|
|
68
|
+
getFirstTokens(node: Node | Token | Comment, options?: CursorWithCountOptionsWithoutFilter): Token[];
|
|
69
|
+
/**
|
|
70
|
+
* Gets the first tokens of the given node.
|
|
71
|
+
*/
|
|
72
|
+
getFirstTokens<R extends Token>(node: Node | Token | Comment, options: CursorWithCountOptionsWithFilter<Token, R>): R[];
|
|
73
|
+
/**
|
|
74
|
+
* Gets the first tokens of the given node with comment options.
|
|
75
|
+
*/
|
|
76
|
+
getFirstTokens<R extends Token | Comment>(node: Node | Token | Comment, options: CursorWithCountOptionsWithComment<Token, Comment, R>): R[];
|
|
77
|
+
/**
|
|
78
|
+
* Gets the last token of the given node.
|
|
79
|
+
*/
|
|
80
|
+
getLastToken(node: Node | Token): Token;
|
|
81
|
+
/**
|
|
82
|
+
* Gets the last token of the given node with options.
|
|
83
|
+
*/
|
|
84
|
+
getLastToken(node: Node | Token | Comment, options: CursorWithSkipOptionsWithoutFilter): Token | null;
|
|
85
|
+
/**
|
|
86
|
+
* Gets the last token of the given node with options.
|
|
87
|
+
*/
|
|
88
|
+
getLastToken<R extends Token>(node: Node | Token | Comment, options: CursorWithSkipOptionsWithFilter<Token, R>): R | null;
|
|
89
|
+
/**
|
|
90
|
+
* Gets the last token of the given node with options.
|
|
91
|
+
*/
|
|
92
|
+
getLastToken<R extends Token | Comment>(node: Node | Token | Comment, options: CursorWithSkipOptionsWithComment<Token, Comment, R>): R | null;
|
|
93
|
+
/**
|
|
94
|
+
* Get the last tokens of the given node.
|
|
95
|
+
*/
|
|
96
|
+
getLastTokens(node: Node | Token | Comment, options?: CursorWithCountOptionsWithoutFilter): Token[];
|
|
97
|
+
/**
|
|
98
|
+
* Get the last tokens of the given node.
|
|
99
|
+
*/
|
|
100
|
+
getLastTokens<R extends Token>(node: Node | Token | Comment, options: CursorWithCountOptionsWithFilter<Token, R>): R[];
|
|
101
|
+
/**
|
|
102
|
+
* Get the last tokens of the given node with comment options.
|
|
103
|
+
*/
|
|
104
|
+
getLastTokens<R extends Token | Comment>(node: Node | Token | Comment, options: CursorWithCountOptionsWithComment<Token, Comment, R>): R[];
|
|
105
|
+
/**
|
|
106
|
+
* Gets the token that follows a given node or token.
|
|
107
|
+
*/
|
|
108
|
+
getTokenAfter(node: Node | Token | Comment, options?: CursorWithSkipOptionsWithoutFilter): Token | null;
|
|
109
|
+
/**
|
|
110
|
+
* Gets the token that follows a given node or token.
|
|
111
|
+
*/
|
|
112
|
+
getTokenAfter<R extends Token>(node: Node | Token | Comment, options?: CursorWithSkipOptionsWithFilter<Token, R>): R | null;
|
|
113
|
+
/**
|
|
114
|
+
* Gets the token that follows a given node or token with comment options.
|
|
115
|
+
*/
|
|
116
|
+
getTokenAfter<R extends Token | Comment>(node: Node | Token | Comment, options: CursorWithSkipOptionsWithComment<Token, Comment, R>): R | null;
|
|
117
|
+
/**
|
|
118
|
+
* Gets the `count` tokens that follows a given node or token.
|
|
119
|
+
*/
|
|
120
|
+
getTokensAfter(node: Node | Token | Comment, options?: CursorWithCountOptionsWithoutFilter): Token[];
|
|
121
|
+
/**
|
|
122
|
+
* Gets the `count` tokens that follows a given node or token.
|
|
123
|
+
*/
|
|
124
|
+
getTokensAfter<R extends Token>(node: Node | Token | Comment, options: CursorWithCountOptionsWithFilter<Token, R>): R[];
|
|
125
|
+
/**
|
|
126
|
+
* Gets the `count` tokens that follows a given node or token with comment options.
|
|
127
|
+
*/
|
|
128
|
+
getTokensAfter<R extends Token | Comment>(node: Node | Token | Comment, options: CursorWithCountOptionsWithComment<Token, Comment, R>): R[];
|
|
129
|
+
/**
|
|
130
|
+
* Gets the token that precedes a given node or token.
|
|
131
|
+
*/
|
|
132
|
+
getTokenBefore(node: Node | Token | Comment, options?: CursorWithSkipOptionsWithoutFilter): Token | null;
|
|
133
|
+
/**
|
|
134
|
+
* Gets the token that precedes a given node or token.
|
|
135
|
+
*/
|
|
136
|
+
getTokenBefore<R extends Token>(node: Node | Token | Comment, options: CursorWithSkipOptionsWithFilter<Token, R>): R | null;
|
|
137
|
+
/**
|
|
138
|
+
* Gets the token that precedes a given node or token with comment options.
|
|
139
|
+
*/
|
|
140
|
+
getTokenBefore<R extends Token | Comment>(node: Node | Token | Comment, options: CursorWithSkipOptionsWithComment<Token, Comment, R>): R | null;
|
|
141
|
+
/**
|
|
142
|
+
* Gets the `count` tokens that precedes a given node or token.
|
|
143
|
+
*/
|
|
144
|
+
getTokensBefore(node: Node | Token | Comment, options?: CursorWithCountOptionsWithoutFilter): Token[];
|
|
145
|
+
/**
|
|
146
|
+
* Gets the `count` tokens that precedes a given node or token.
|
|
147
|
+
*/
|
|
148
|
+
getTokensBefore<R extends Token>(node: Node | Token | Comment, options: CursorWithCountOptionsWithFilter<Token, R>): R[];
|
|
149
|
+
/**
|
|
150
|
+
* Gets the `count` tokens that precedes a given node or token with comment options.
|
|
151
|
+
*/
|
|
152
|
+
getTokensBefore<R extends Token | Comment>(node: Node | Token | Comment, options: CursorWithCountOptionsWithComment<Token, Comment, R>): R[];
|
|
153
|
+
/**
|
|
154
|
+
* Gets the first token between two non-overlapping nodes.
|
|
155
|
+
*/
|
|
156
|
+
getFirstTokenBetween(left: Node | Token | Comment, right: Node | Token | Comment, options?: CursorWithSkipOptionsWithoutFilter): Token | null;
|
|
157
|
+
/**
|
|
158
|
+
* Gets the first token between two non-overlapping nodes.
|
|
159
|
+
*/
|
|
160
|
+
getFirstTokenBetween<R extends Token>(left: Node | Token | Comment, right: Node | Token | Comment, options: CursorWithSkipOptionsWithFilter<Token, R>): R | null;
|
|
161
|
+
/**
|
|
162
|
+
* Gets the first token between two non-overlapping nodes with comment options.
|
|
163
|
+
*/
|
|
164
|
+
getFirstTokenBetween<R extends Token | Comment>(left: Node | Token | Comment, right: Node | Token | Comment, options: CursorWithSkipOptionsWithComment<Token, Comment, R>): R | null;
|
|
165
|
+
/**
|
|
166
|
+
* Gets the first tokens between two non-overlapping nodes.
|
|
167
|
+
*/
|
|
168
|
+
getFirstTokensBetween(left: Node | Token | Comment, right: Node | Token | Comment, options?: CursorWithCountOptionsWithoutFilter): Token[];
|
|
169
|
+
/**
|
|
170
|
+
* Gets the first tokens between two non-overlapping nodes.
|
|
171
|
+
*/
|
|
172
|
+
getFirstTokensBetween<R extends Token>(left: Node | Token | Comment, right: Node | Token | Comment, options: CursorWithCountOptionsWithFilter<Token, R>): R[];
|
|
173
|
+
/**
|
|
174
|
+
* Gets the first tokens between two non-overlapping nodes with comment options.
|
|
175
|
+
*/
|
|
176
|
+
getFirstTokensBetween<R extends Token | Comment>(left: Node | Token | Comment, right: Node | Token | Comment, options: CursorWithCountOptionsWithComment<Token, Comment, R>): R[];
|
|
177
|
+
/**
|
|
178
|
+
* Gets the last token between two non-overlapping nodes.
|
|
179
|
+
*/
|
|
180
|
+
getLastTokenBetween(left: Node | Token | Comment, right: Node | Token | Comment, options?: CursorWithSkipOptionsWithoutFilter): Token | null;
|
|
181
|
+
/**
|
|
182
|
+
* Gets the last token between two non-overlapping nodes.
|
|
183
|
+
*/
|
|
184
|
+
getLastTokenBetween<R extends Token>(left: Node | Token | Comment, right: Node | Token | Comment, options: CursorWithSkipOptionsWithFilter<Token, R>): R | null;
|
|
185
|
+
/**
|
|
186
|
+
* Gets the last token between two non-overlapping nodes with comment options.
|
|
187
|
+
*/
|
|
188
|
+
getLastTokenBetween<R extends Token | Comment>(left: Node | Token | Comment, right: Node | Token | Comment, options: CursorWithSkipOptionsWithComment<Token, Comment, R>): R | null;
|
|
189
|
+
/**
|
|
190
|
+
* Gets the last tokens between two non-overlapping nodes.
|
|
191
|
+
*/
|
|
192
|
+
getLastTokensBetween(left: Node | Token | Comment, right: Node | Token | Comment, options?: CursorWithCountOptionsWithoutFilter): Token[];
|
|
193
|
+
/**
|
|
194
|
+
* Gets the last tokens between two non-overlapping nodes.
|
|
195
|
+
*/
|
|
196
|
+
getLastTokensBetween<R extends Token>(left: Node | Token | Comment, right: Node | Token | Comment, options: CursorWithCountOptionsWithFilter<Token, R>): R[];
|
|
197
|
+
/**
|
|
198
|
+
* Gets the last tokens between two non-overlapping nodes with comment options.
|
|
199
|
+
*/
|
|
200
|
+
getLastTokensBetween<R extends Token | Comment>(left: Node | Token | Comment, right: Node | Token | Comment, options: CursorWithCountOptionsWithComment<Token, Comment, R>): R[];
|
|
201
|
+
/**
|
|
202
|
+
* Gets all tokens that are related to the given node.
|
|
203
|
+
*/
|
|
204
|
+
getTokens(node: Node | Token | Comment, options?: CursorWithCountOptionsWithoutFilter): Token[];
|
|
205
|
+
/**
|
|
206
|
+
* Gets all tokens that are related to the given node.
|
|
207
|
+
*/
|
|
208
|
+
getTokens<R extends Token>(node: Node | Token | Comment, options?: CursorWithCountOptionsWithFilter<Token, R>): R[];
|
|
209
|
+
/**
|
|
210
|
+
* Gets all tokens that are related to the given node with comment options.
|
|
211
|
+
*/
|
|
212
|
+
getTokens<R extends Token | Comment>(node: Node | Token | Comment, options: CursorWithCountOptionsWithComment<Token, Comment, R>): R[];
|
|
213
|
+
/**
|
|
214
|
+
* Gets all of the tokens between two non-overlapping nodes.
|
|
215
|
+
*/
|
|
216
|
+
getTokensBetween(left: Node | Token | Comment, right: Node | Token | Comment, options?: CursorWithCountOptionsWithoutFilter): Token[];
|
|
217
|
+
/**
|
|
218
|
+
* Gets all of the tokens between two non-overlapping nodes.
|
|
219
|
+
*/
|
|
220
|
+
getTokensBetween<R extends Token>(left: Node | Token | Comment, right: Node | Token | Comment, options?: CursorWithCountOptionsWithFilter<Token, R>): R[];
|
|
221
|
+
/**
|
|
222
|
+
* Gets all of the tokens between two non-overlapping nodes with comment options.
|
|
223
|
+
*/
|
|
224
|
+
getTokensBetween<R extends Token | Comment>(left: Node | Token | Comment, right: Node | Token | Comment, options: CursorWithCountOptionsWithComment<Token, Comment, R>): R[];
|
|
225
|
+
/**
|
|
226
|
+
* Gets all comment tokens directly before the given node or token.
|
|
227
|
+
*/
|
|
228
|
+
getCommentsBefore(nodeOrToken: Node | Token | Comment): Comment[];
|
|
229
|
+
/**
|
|
230
|
+
* Gets all comment tokens directly after the given node or token.
|
|
231
|
+
*/
|
|
232
|
+
getCommentsAfter(nodeOrToken: Node | Token | Comment): Comment[];
|
|
233
|
+
/**
|
|
234
|
+
* Checks if there are any comment tokens between two non-overlapping nodes.
|
|
235
|
+
*/
|
|
236
|
+
commentsExistBetween(left: Node | Token | Comment, right: Node | Token | Comment): boolean;
|
|
237
|
+
}
|
|
238
|
+
//#endregion
|
|
239
|
+
//#region src/index.d.ts
|
|
240
|
+
declare const meta: {
|
|
241
|
+
name: string;
|
|
242
|
+
version: string;
|
|
243
|
+
};
|
|
244
|
+
//#endregion
|
|
245
|
+
export { type CursorWithCountOptionsWithComment, type CursorWithCountOptionsWithFilter, type CursorWithCountOptionsWithoutFilter, type CursorWithSkipOptionsWithComment, type CursorWithSkipOptionsWithFilter, type CursorWithSkipOptionsWithoutFilter, type SyntaxElement, type TokenFilter, TokenStore, meta };
|
package/lib/index.mjs
ADDED
|
@@ -0,0 +1,423 @@
|
|
|
1
|
+
//#region rolldown:runtime
|
|
2
|
+
var __defProp = Object.defineProperty;
|
|
3
|
+
var __exportAll = (all, symbols) => {
|
|
4
|
+
let target = {};
|
|
5
|
+
for (var name$2 in all) {
|
|
6
|
+
__defProp(target, name$2, {
|
|
7
|
+
get: all[name$2],
|
|
8
|
+
enumerable: true
|
|
9
|
+
});
|
|
10
|
+
}
|
|
11
|
+
if (symbols) {
|
|
12
|
+
__defProp(target, Symbol.toStringTag, { value: "Module" });
|
|
13
|
+
}
|
|
14
|
+
return target;
|
|
15
|
+
};
|
|
16
|
+
|
|
17
|
+
//#endregion
|
|
18
|
+
//#region src/token-store/token-store.ts
|
|
19
|
+
/**
|
|
20
|
+
* Binary search for the index of the first token that is after the given location.
|
|
21
|
+
*/
|
|
22
|
+
function search(tokens, location) {
|
|
23
|
+
let minIndex = 0;
|
|
24
|
+
let maxIndex = tokens.length - 1;
|
|
25
|
+
while (minIndex <= maxIndex) {
|
|
26
|
+
const index = Math.floor((minIndex + maxIndex) / 2);
|
|
27
|
+
const tokenStartLocation = tokens[index].range[0];
|
|
28
|
+
if (tokenStartLocation < location) minIndex = index + 1;
|
|
29
|
+
else if (tokenStartLocation > location) maxIndex = index - 1;
|
|
30
|
+
else return index;
|
|
31
|
+
}
|
|
32
|
+
return minIndex;
|
|
33
|
+
}
|
|
34
|
+
/**
|
|
35
|
+
* Get the index of the first token that is after the given location.
|
|
36
|
+
*/
|
|
37
|
+
function getFirstIndex(tokens, indexMap, startLoc) {
|
|
38
|
+
let index = indexMap.get(startLoc);
|
|
39
|
+
if (index == null) index = search(tokens, startLoc);
|
|
40
|
+
while (index < tokens.length && tokens[index].range[1] <= tokens[index].range[0]) index++;
|
|
41
|
+
return index;
|
|
42
|
+
}
|
|
43
|
+
/**
|
|
44
|
+
* Get the index of the last token that is before the given location.
|
|
45
|
+
*/
|
|
46
|
+
function getLastIndex(tokens, indexMap, endLoc) {
|
|
47
|
+
let index = indexMap.get(endLoc);
|
|
48
|
+
if (index != null) index--;
|
|
49
|
+
else index = search(tokens, endLoc) - 1;
|
|
50
|
+
while (index >= 0 && tokens[index].range[1] <= tokens[index].range[0]) index--;
|
|
51
|
+
return index;
|
|
52
|
+
}
|
|
53
|
+
/**
|
|
54
|
+
* Normalizes the options for cursor methods.
|
|
55
|
+
*/
|
|
56
|
+
function normalizeSkipOptions(options, ctx) {
|
|
57
|
+
if (typeof options === "number") return {
|
|
58
|
+
filter: ctx.isNotComment,
|
|
59
|
+
skip: options
|
|
60
|
+
};
|
|
61
|
+
if (typeof options === "function") return {
|
|
62
|
+
filter: (n) => {
|
|
63
|
+
if (ctx.isComment(n)) return false;
|
|
64
|
+
return options(n);
|
|
65
|
+
},
|
|
66
|
+
skip: 0
|
|
67
|
+
};
|
|
68
|
+
let filter;
|
|
69
|
+
if (options?.includeComments) filter = options?.filter ?? (() => true);
|
|
70
|
+
else if (options?.filter) {
|
|
71
|
+
const baseFilter = options?.filter;
|
|
72
|
+
filter = (token) => {
|
|
73
|
+
if (ctx.isComment(token)) return false;
|
|
74
|
+
return baseFilter(token);
|
|
75
|
+
};
|
|
76
|
+
} else filter = ctx.isNotComment;
|
|
77
|
+
return {
|
|
78
|
+
filter,
|
|
79
|
+
skip: options?.skip ?? 0
|
|
80
|
+
};
|
|
81
|
+
}
|
|
82
|
+
/**
|
|
83
|
+
* Normalizes the options for cursor methods with count.
|
|
84
|
+
*/
|
|
85
|
+
function normalizeCountOptions(options, ctx) {
|
|
86
|
+
if (typeof options === "number") return {
|
|
87
|
+
filter: ctx.isNotComment,
|
|
88
|
+
count: options
|
|
89
|
+
};
|
|
90
|
+
if (typeof options === "function") return {
|
|
91
|
+
filter: (n) => {
|
|
92
|
+
if (ctx.isComment(n)) return false;
|
|
93
|
+
return options(n);
|
|
94
|
+
},
|
|
95
|
+
count: 0
|
|
96
|
+
};
|
|
97
|
+
let filter;
|
|
98
|
+
if (options?.includeComments) filter = options?.filter ?? (() => true);
|
|
99
|
+
else if (options?.filter) {
|
|
100
|
+
const baseFilter = options?.filter;
|
|
101
|
+
filter = (token) => {
|
|
102
|
+
if (ctx.isComment(token)) return false;
|
|
103
|
+
return baseFilter(token);
|
|
104
|
+
};
|
|
105
|
+
} else filter = ctx.isNotComment;
|
|
106
|
+
return {
|
|
107
|
+
filter,
|
|
108
|
+
count: options?.count ?? 0
|
|
109
|
+
};
|
|
110
|
+
}
|
|
111
|
+
const PRIVATE = Symbol("private");
|
|
112
|
+
var TokenStore = class {
|
|
113
|
+
[PRIVATE];
|
|
114
|
+
constructor(params) {
|
|
115
|
+
const allTokens = [...params.tokens].sort((a, b) => a.range[0] - b.range[0]);
|
|
116
|
+
const tokenStartToIndex = /* @__PURE__ */ new Map();
|
|
117
|
+
for (let i = 0; i < allTokens.length; i++) {
|
|
118
|
+
const token = allTokens[i];
|
|
119
|
+
if (token.range[0] < token.range[1]) tokenStartToIndex.set(token.range[0], i);
|
|
120
|
+
}
|
|
121
|
+
this[PRIVATE] = {
|
|
122
|
+
allTokens,
|
|
123
|
+
tokenStartToIndex,
|
|
124
|
+
ctx: {
|
|
125
|
+
isComment: params.isComment,
|
|
126
|
+
isNotComment: (token) => !params.isComment(token)
|
|
127
|
+
}
|
|
128
|
+
};
|
|
129
|
+
}
|
|
130
|
+
/**
|
|
131
|
+
* Gets all tokens, including comments.
|
|
132
|
+
*/
|
|
133
|
+
getAllTokens() {
|
|
134
|
+
return this[PRIVATE].allTokens;
|
|
135
|
+
}
|
|
136
|
+
getFirstToken(node, options) {
|
|
137
|
+
const { ctx, allTokens, tokenStartToIndex } = this[PRIVATE];
|
|
138
|
+
const { filter, skip } = normalizeSkipOptions(options, ctx);
|
|
139
|
+
const startIndex = getFirstIndex(allTokens, tokenStartToIndex, node.range[0]);
|
|
140
|
+
const endIndex = getLastIndex(allTokens, tokenStartToIndex, node.range[1]);
|
|
141
|
+
let skipped = 0;
|
|
142
|
+
for (let i = startIndex; i <= endIndex && i < allTokens.length; i++) {
|
|
143
|
+
const token = allTokens[i];
|
|
144
|
+
if (filter && !filter(token)) continue;
|
|
145
|
+
if (skipped < skip) {
|
|
146
|
+
skipped++;
|
|
147
|
+
continue;
|
|
148
|
+
}
|
|
149
|
+
return token;
|
|
150
|
+
}
|
|
151
|
+
return null;
|
|
152
|
+
}
|
|
153
|
+
getFirstTokens(node, options) {
|
|
154
|
+
const { ctx, allTokens, tokenStartToIndex } = this[PRIVATE];
|
|
155
|
+
const { filter, count } = normalizeCountOptions(options, ctx);
|
|
156
|
+
const startIndex = getFirstIndex(allTokens, tokenStartToIndex, node.range[0]);
|
|
157
|
+
const endIndex = getLastIndex(allTokens, tokenStartToIndex, node.range[1]);
|
|
158
|
+
const result = [];
|
|
159
|
+
for (let i = startIndex; i <= endIndex && i < allTokens.length; i++) {
|
|
160
|
+
const token = allTokens[i];
|
|
161
|
+
if (filter && !filter(token)) continue;
|
|
162
|
+
result.push(token);
|
|
163
|
+
if (count > 0 && result.length >= count) break;
|
|
164
|
+
}
|
|
165
|
+
return result;
|
|
166
|
+
}
|
|
167
|
+
getLastToken(node, options) {
|
|
168
|
+
const { ctx, allTokens, tokenStartToIndex } = this[PRIVATE];
|
|
169
|
+
const { filter, skip } = normalizeSkipOptions(options, ctx);
|
|
170
|
+
const startIndex = getFirstIndex(allTokens, tokenStartToIndex, node.range[0]);
|
|
171
|
+
const endIndex = getLastIndex(allTokens, tokenStartToIndex, node.range[1]);
|
|
172
|
+
let skipped = 0;
|
|
173
|
+
for (let i = endIndex; i >= startIndex && i >= 0; i--) {
|
|
174
|
+
const token = allTokens[i];
|
|
175
|
+
if (filter && !filter(token)) continue;
|
|
176
|
+
if (skipped < skip) {
|
|
177
|
+
skipped++;
|
|
178
|
+
continue;
|
|
179
|
+
}
|
|
180
|
+
return token;
|
|
181
|
+
}
|
|
182
|
+
return null;
|
|
183
|
+
}
|
|
184
|
+
getLastTokens(node, options) {
|
|
185
|
+
const { ctx, allTokens, tokenStartToIndex } = this[PRIVATE];
|
|
186
|
+
const { filter, count } = normalizeCountOptions(options, ctx);
|
|
187
|
+
const startIndex = getFirstIndex(allTokens, tokenStartToIndex, node.range[0]);
|
|
188
|
+
const endIndex = getLastIndex(allTokens, tokenStartToIndex, node.range[1]);
|
|
189
|
+
const result = [];
|
|
190
|
+
for (let i = endIndex; i >= startIndex && i >= 0; i--) {
|
|
191
|
+
const token = allTokens[i];
|
|
192
|
+
if (filter && !filter(token)) continue;
|
|
193
|
+
result.unshift(token);
|
|
194
|
+
if (count > 0 && result.length >= count) break;
|
|
195
|
+
}
|
|
196
|
+
return result;
|
|
197
|
+
}
|
|
198
|
+
/**
|
|
199
|
+
* Gets the token that follows a given node or token.
|
|
200
|
+
*/
|
|
201
|
+
getTokenAfter(node, options) {
|
|
202
|
+
const { ctx, allTokens, tokenStartToIndex } = this[PRIVATE];
|
|
203
|
+
const { filter, skip } = normalizeSkipOptions(options, ctx);
|
|
204
|
+
const startIndex = getFirstIndex(allTokens, tokenStartToIndex, node.range[1]);
|
|
205
|
+
let skipped = 0;
|
|
206
|
+
for (let i = startIndex; i < allTokens.length; i++) {
|
|
207
|
+
const token = allTokens[i];
|
|
208
|
+
if (filter && !filter(token)) continue;
|
|
209
|
+
if (skipped < skip) {
|
|
210
|
+
skipped++;
|
|
211
|
+
continue;
|
|
212
|
+
}
|
|
213
|
+
return token;
|
|
214
|
+
}
|
|
215
|
+
return null;
|
|
216
|
+
}
|
|
217
|
+
getTokensAfter(node, options) {
|
|
218
|
+
const { ctx, allTokens, tokenStartToIndex } = this[PRIVATE];
|
|
219
|
+
const { filter, count } = normalizeCountOptions(options, ctx);
|
|
220
|
+
const startIndex = getFirstIndex(allTokens, tokenStartToIndex, node.range[1]);
|
|
221
|
+
const result = [];
|
|
222
|
+
for (let i = startIndex; i < allTokens.length; i++) {
|
|
223
|
+
const token = allTokens[i];
|
|
224
|
+
if (filter && !filter(token)) continue;
|
|
225
|
+
result.push(token);
|
|
226
|
+
if (count > 0 && result.length >= count) break;
|
|
227
|
+
}
|
|
228
|
+
return result;
|
|
229
|
+
}
|
|
230
|
+
/**
|
|
231
|
+
* Gets the token that precedes a given node or token.
|
|
232
|
+
*/
|
|
233
|
+
getTokenBefore(node, options) {
|
|
234
|
+
const { ctx, allTokens, tokenStartToIndex } = this[PRIVATE];
|
|
235
|
+
const { filter, skip } = normalizeSkipOptions(options, ctx);
|
|
236
|
+
const endIndex = getLastIndex(allTokens, tokenStartToIndex, node.range[0]);
|
|
237
|
+
let skipped = 0;
|
|
238
|
+
for (let i = endIndex; i >= 0; i--) {
|
|
239
|
+
const token = allTokens[i];
|
|
240
|
+
if (filter && !filter(token)) continue;
|
|
241
|
+
if (skipped < skip) {
|
|
242
|
+
skipped++;
|
|
243
|
+
continue;
|
|
244
|
+
}
|
|
245
|
+
return token;
|
|
246
|
+
}
|
|
247
|
+
return null;
|
|
248
|
+
}
|
|
249
|
+
/**
|
|
250
|
+
* Gets the `count` tokens that precedes a given node or token.
|
|
251
|
+
*/
|
|
252
|
+
getTokensBefore(node, options) {
|
|
253
|
+
const { ctx, allTokens, tokenStartToIndex } = this[PRIVATE];
|
|
254
|
+
const { filter, count } = normalizeCountOptions(options, ctx);
|
|
255
|
+
const endIndex = getLastIndex(allTokens, tokenStartToIndex, node.range[0]);
|
|
256
|
+
const result = [];
|
|
257
|
+
for (let i = endIndex; i >= 0; i--) {
|
|
258
|
+
const token = allTokens[i];
|
|
259
|
+
if (filter && !filter(token)) continue;
|
|
260
|
+
result.unshift(token);
|
|
261
|
+
if (count > 0 && result.length >= count) break;
|
|
262
|
+
}
|
|
263
|
+
return result;
|
|
264
|
+
}
|
|
265
|
+
getFirstTokenBetween(left, right, options) {
|
|
266
|
+
const { ctx, allTokens, tokenStartToIndex } = this[PRIVATE];
|
|
267
|
+
const { filter, skip } = normalizeSkipOptions(options, ctx);
|
|
268
|
+
const startIndex = getFirstIndex(allTokens, tokenStartToIndex, left.range[1]);
|
|
269
|
+
const endIndex = getLastIndex(allTokens, tokenStartToIndex, right.range[0]);
|
|
270
|
+
let skipped = 0;
|
|
271
|
+
for (let i = startIndex; i <= endIndex && i < allTokens.length; i++) {
|
|
272
|
+
const token = allTokens[i];
|
|
273
|
+
if (filter && !filter(token)) continue;
|
|
274
|
+
if (skipped < skip) {
|
|
275
|
+
skipped++;
|
|
276
|
+
continue;
|
|
277
|
+
}
|
|
278
|
+
return token;
|
|
279
|
+
}
|
|
280
|
+
return null;
|
|
281
|
+
}
|
|
282
|
+
getFirstTokensBetween(left, right, options) {
|
|
283
|
+
const { ctx, allTokens, tokenStartToIndex } = this[PRIVATE];
|
|
284
|
+
const { filter, count } = normalizeCountOptions(options, ctx);
|
|
285
|
+
const startIndex = getFirstIndex(allTokens, tokenStartToIndex, left.range[1]);
|
|
286
|
+
const endIndex = getLastIndex(allTokens, tokenStartToIndex, right.range[0]);
|
|
287
|
+
const result = [];
|
|
288
|
+
for (let i = startIndex; i <= endIndex && i < allTokens.length; i++) {
|
|
289
|
+
const token = allTokens[i];
|
|
290
|
+
if (filter && !filter(token)) continue;
|
|
291
|
+
result.push(token);
|
|
292
|
+
if (count > 0 && result.length >= count) break;
|
|
293
|
+
}
|
|
294
|
+
return result;
|
|
295
|
+
}
|
|
296
|
+
getLastTokenBetween(left, right, options) {
|
|
297
|
+
const { ctx, allTokens, tokenStartToIndex } = this[PRIVATE];
|
|
298
|
+
const { filter, skip } = normalizeSkipOptions(options, ctx);
|
|
299
|
+
const startIndex = getFirstIndex(allTokens, tokenStartToIndex, left.range[1]);
|
|
300
|
+
const endIndex = getLastIndex(allTokens, tokenStartToIndex, right.range[0]);
|
|
301
|
+
let skipped = 0;
|
|
302
|
+
for (let i = endIndex; i >= startIndex; i--) {
|
|
303
|
+
const token = allTokens[i];
|
|
304
|
+
if (filter && !filter(token)) continue;
|
|
305
|
+
if (skipped < skip) {
|
|
306
|
+
skipped++;
|
|
307
|
+
continue;
|
|
308
|
+
}
|
|
309
|
+
return token;
|
|
310
|
+
}
|
|
311
|
+
return null;
|
|
312
|
+
}
|
|
313
|
+
getLastTokensBetween(left, right, options) {
|
|
314
|
+
const { ctx, allTokens, tokenStartToIndex } = this[PRIVATE];
|
|
315
|
+
const { filter, count } = normalizeCountOptions(options, ctx);
|
|
316
|
+
const startIndex = getFirstIndex(allTokens, tokenStartToIndex, left.range[1]);
|
|
317
|
+
const endIndex = getLastIndex(allTokens, tokenStartToIndex, right.range[0]);
|
|
318
|
+
const result = [];
|
|
319
|
+
for (let i = endIndex; i >= startIndex; i--) {
|
|
320
|
+
const token = allTokens[i];
|
|
321
|
+
if (filter && !filter(token)) continue;
|
|
322
|
+
result.unshift(token);
|
|
323
|
+
if (count > 0 && result.length >= count) break;
|
|
324
|
+
}
|
|
325
|
+
return result;
|
|
326
|
+
}
|
|
327
|
+
/**
|
|
328
|
+
* Gets all tokens that are related to the given node.
|
|
329
|
+
*/
|
|
330
|
+
getTokens(node, options) {
|
|
331
|
+
const { ctx, allTokens, tokenStartToIndex } = this[PRIVATE];
|
|
332
|
+
const { filter, count } = normalizeCountOptions(options, ctx);
|
|
333
|
+
const startIndex = getFirstIndex(allTokens, tokenStartToIndex, node.range[0]);
|
|
334
|
+
const endIndex = getLastIndex(allTokens, tokenStartToIndex, node.range[1]);
|
|
335
|
+
const result = [];
|
|
336
|
+
for (let i = startIndex; i <= endIndex && i < allTokens.length; i++) {
|
|
337
|
+
const token = allTokens[i];
|
|
338
|
+
if (filter && !filter(token)) continue;
|
|
339
|
+
result.push(token);
|
|
340
|
+
if (count > 0 && result.length >= count) break;
|
|
341
|
+
}
|
|
342
|
+
return result;
|
|
343
|
+
}
|
|
344
|
+
/**
|
|
345
|
+
* Gets all of the tokens between two non-overlapping nodes.
|
|
346
|
+
*/
|
|
347
|
+
getTokensBetween(left, right, paddingOrOptions) {
|
|
348
|
+
const { ctx, allTokens, tokenStartToIndex } = this[PRIVATE];
|
|
349
|
+
const { filter, count } = normalizeCountOptions(paddingOrOptions, ctx);
|
|
350
|
+
const startIndex = getFirstIndex(allTokens, tokenStartToIndex, left.range[1]);
|
|
351
|
+
const endIndex = getLastIndex(allTokens, tokenStartToIndex, right.range[0]);
|
|
352
|
+
const result = [];
|
|
353
|
+
for (let i = startIndex; i <= endIndex && i < allTokens.length; i++) {
|
|
354
|
+
const token = allTokens[i];
|
|
355
|
+
if (filter && !filter(token)) continue;
|
|
356
|
+
result.push(token);
|
|
357
|
+
if (count > 0 && result.length >= count) break;
|
|
358
|
+
}
|
|
359
|
+
return result;
|
|
360
|
+
}
|
|
361
|
+
/**
|
|
362
|
+
* Gets all comment tokens directly before the given node or token.
|
|
363
|
+
*/
|
|
364
|
+
getCommentsBefore(nodeOrToken) {
|
|
365
|
+
const { ctx, allTokens, tokenStartToIndex } = this[PRIVATE];
|
|
366
|
+
const endIndex = getLastIndex(allTokens, tokenStartToIndex, nodeOrToken.range[0]);
|
|
367
|
+
const result = [];
|
|
368
|
+
for (let i = endIndex; i >= 0; i--) {
|
|
369
|
+
const token = allTokens[i];
|
|
370
|
+
if (ctx.isComment(token)) result.unshift(token);
|
|
371
|
+
else break;
|
|
372
|
+
}
|
|
373
|
+
return result;
|
|
374
|
+
}
|
|
375
|
+
/**
|
|
376
|
+
* Gets all comment tokens directly after the given node or token.
|
|
377
|
+
*/
|
|
378
|
+
getCommentsAfter(nodeOrToken) {
|
|
379
|
+
const { ctx, allTokens, tokenStartToIndex } = this[PRIVATE];
|
|
380
|
+
const startIndex = getFirstIndex(allTokens, tokenStartToIndex, nodeOrToken.range[1]);
|
|
381
|
+
const result = [];
|
|
382
|
+
for (let i = startIndex; i < allTokens.length; i++) {
|
|
383
|
+
const token = allTokens[i];
|
|
384
|
+
if (ctx.isComment(token)) result.push(token);
|
|
385
|
+
else break;
|
|
386
|
+
}
|
|
387
|
+
return result;
|
|
388
|
+
}
|
|
389
|
+
/**
|
|
390
|
+
* Checks if there are any comment tokens between two non-overlapping nodes.
|
|
391
|
+
*/
|
|
392
|
+
commentsExistBetween(left, right) {
|
|
393
|
+
const { ctx, allTokens, tokenStartToIndex } = this[PRIVATE];
|
|
394
|
+
const startIndex = getFirstIndex(allTokens, tokenStartToIndex, left.range[1]);
|
|
395
|
+
const endIndex = getLastIndex(allTokens, tokenStartToIndex, right.range[0]);
|
|
396
|
+
for (let i = startIndex; i <= endIndex && i < allTokens.length; i++) {
|
|
397
|
+
const token = allTokens[i];
|
|
398
|
+
if (ctx.isComment(token)) return true;
|
|
399
|
+
}
|
|
400
|
+
return false;
|
|
401
|
+
}
|
|
402
|
+
};
|
|
403
|
+
|
|
404
|
+
//#endregion
|
|
405
|
+
//#region package.json
|
|
406
|
+
var name$1 = "@ota-meshi/ast-token-store";
|
|
407
|
+
var version$1 = "0.0.0";
|
|
408
|
+
|
|
409
|
+
//#endregion
|
|
410
|
+
//#region src/meta.ts
|
|
411
|
+
var meta_exports = /* @__PURE__ */ __exportAll({
|
|
412
|
+
name: () => name,
|
|
413
|
+
version: () => version
|
|
414
|
+
});
|
|
415
|
+
const name = name$1;
|
|
416
|
+
const version = version$1;
|
|
417
|
+
|
|
418
|
+
//#endregion
|
|
419
|
+
//#region src/index.ts
|
|
420
|
+
const meta = { ...meta_exports };
|
|
421
|
+
|
|
422
|
+
//#endregion
|
|
423
|
+
export { TokenStore, meta };
|
package/package.json
ADDED
|
@@ -0,0 +1,112 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@ota-meshi/ast-token-store",
|
|
3
|
+
"version": "0.0.0",
|
|
4
|
+
"description": "A class library that provides an API similar to ESLint's token store",
|
|
5
|
+
"type": "module",
|
|
6
|
+
"exports": {
|
|
7
|
+
".": {
|
|
8
|
+
"import": "./lib/index.mjs",
|
|
9
|
+
"default": "./lib/index.mjs"
|
|
10
|
+
},
|
|
11
|
+
"./package.json": "./package.json"
|
|
12
|
+
},
|
|
13
|
+
"files": [
|
|
14
|
+
"lib"
|
|
15
|
+
],
|
|
16
|
+
"engines": {
|
|
17
|
+
"node": "^20.19.0 || ^22.13.0 || >=24"
|
|
18
|
+
},
|
|
19
|
+
"scripts": {
|
|
20
|
+
"build": "npm run build:tsdown",
|
|
21
|
+
"build:tsdown": "tsdown",
|
|
22
|
+
"lint": "npm run lint:js && npm run lint:ts",
|
|
23
|
+
"lint:js": "eslint .",
|
|
24
|
+
"lint:ts": "npm run tsc",
|
|
25
|
+
"tsc": "tsc --project tsconfig.build.json",
|
|
26
|
+
"eslint-fix": "eslint . --fix",
|
|
27
|
+
"test": "npm run mocha -- \"tests/src/**/*.ts\" --reporter=dot --timeout=60000",
|
|
28
|
+
"test:debug": "node --experimental-strip-types --experimental-transform-types ./node_modules/mocha/bin/mocha.js \"tests/src/**/*.ts\" --reporter=dot --timeout=60000",
|
|
29
|
+
"test:cover": "c8 --reporter=lcov --reporter=text npm run test:debug",
|
|
30
|
+
"ts": "node --import=tsx",
|
|
31
|
+
"mocha": "npm run ts -- ./node_modules/mocha/bin/mocha.js",
|
|
32
|
+
"generate:version": "env-cmd -e version -- npm run update && npm run lint -- --fix",
|
|
33
|
+
"changeset:version": "env-cmd -e version -- changeset version && npm run generate:version && git add --all",
|
|
34
|
+
"changeset:publish": "npm run build && changeset publish"
|
|
35
|
+
},
|
|
36
|
+
"repository": {
|
|
37
|
+
"type": "git",
|
|
38
|
+
"url": "git+https://github.com/ota-meshi/ast-token-store.git"
|
|
39
|
+
},
|
|
40
|
+
"keywords": [
|
|
41
|
+
"eslint",
|
|
42
|
+
"token-store"
|
|
43
|
+
],
|
|
44
|
+
"author": "Yosuke Ota",
|
|
45
|
+
"funding": "https://github.com/sponsors/ota-meshi",
|
|
46
|
+
"license": "MIT",
|
|
47
|
+
"bugs": {
|
|
48
|
+
"url": "https://github.com/ota-meshi/ast-token-store/issues"
|
|
49
|
+
},
|
|
50
|
+
"homepage": "https://github.com/ota-meshi/ast-token-store/",
|
|
51
|
+
"peerDependencies": {
|
|
52
|
+
"@eslint/markdown": "^7.4.0",
|
|
53
|
+
"eslint": ">=9.0.0"
|
|
54
|
+
},
|
|
55
|
+
"dependencies": {
|
|
56
|
+
"eslint-plugin-markdown-preferences": "^0.40.2"
|
|
57
|
+
},
|
|
58
|
+
"devDependencies": {
|
|
59
|
+
"@changesets/changelog-github": "^0.5.1",
|
|
60
|
+
"@changesets/cli": "^2.28.1",
|
|
61
|
+
"@eslint/core": "^1.0.0",
|
|
62
|
+
"@eslint/markdown": "^7.4.0",
|
|
63
|
+
"@ota-meshi/eslint-plugin": "^0.19.0",
|
|
64
|
+
"@shikijs/vitepress-twoslash": "^3.0.0",
|
|
65
|
+
"@types/eslint": "^9.6.1",
|
|
66
|
+
"@types/eslint-scope": "^8.0.0",
|
|
67
|
+
"@types/eslint-utils": "^3.0.5",
|
|
68
|
+
"@types/estree": "^1.0.6",
|
|
69
|
+
"@types/json-schema": "^7.0.15",
|
|
70
|
+
"@types/mdast": "^4.0.4",
|
|
71
|
+
"@types/mocha": "^10.0.10",
|
|
72
|
+
"@types/node": "^24.0.0",
|
|
73
|
+
"@types/semver": "^7.5.8",
|
|
74
|
+
"assert": "^2.1.0",
|
|
75
|
+
"c8": "^10.1.3",
|
|
76
|
+
"env-cmd": "^11.0.0",
|
|
77
|
+
"eslint": "^9.34.0",
|
|
78
|
+
"eslint-compat-utils": "^0.6.4",
|
|
79
|
+
"eslint-config-prettier": "^10.1.1",
|
|
80
|
+
"eslint-plugin-eslint-comments": "^3.2.0",
|
|
81
|
+
"eslint-plugin-eslint-plugin": "^7.0.0",
|
|
82
|
+
"eslint-plugin-jsdoc": "^55.0.0",
|
|
83
|
+
"eslint-plugin-json-schema-validator": "^6.0.0",
|
|
84
|
+
"eslint-plugin-jsonc": "^2.19.1",
|
|
85
|
+
"eslint-plugin-markdown": "^5.1.0",
|
|
86
|
+
"eslint-plugin-markdown-links": "^0.7.0",
|
|
87
|
+
"eslint-plugin-n": "^17.16.2",
|
|
88
|
+
"eslint-plugin-node-dependencies": "^1.0.0",
|
|
89
|
+
"eslint-plugin-prettier": "^5.2.3",
|
|
90
|
+
"eslint-plugin-regexp": "^3.0.0",
|
|
91
|
+
"eslint-plugin-vue": "^10.0.0",
|
|
92
|
+
"eslint-plugin-yml": "^3.0.0",
|
|
93
|
+
"eslint-snapshot-rule-tester": "^0.1.0",
|
|
94
|
+
"eslint-typegen": "^2.0.0",
|
|
95
|
+
"espree": "^11.0.0",
|
|
96
|
+
"events": "^3.3.0",
|
|
97
|
+
"globals": "^17.0.0",
|
|
98
|
+
"mocha": "^11.1.0",
|
|
99
|
+
"prettier": "^3.5.3",
|
|
100
|
+
"semver": "^7.7.1",
|
|
101
|
+
"toml-eslint-parser": "^1.0.3",
|
|
102
|
+
"tsdown": "^0.19.0",
|
|
103
|
+
"tsx": "^4.19.3",
|
|
104
|
+
"typescript": "~5.9.0",
|
|
105
|
+
"typescript-eslint": "^8.26.1",
|
|
106
|
+
"vitepress": "^1.6.3",
|
|
107
|
+
"vue-eslint-parser": "^10.0.0"
|
|
108
|
+
},
|
|
109
|
+
"publishConfig": {
|
|
110
|
+
"access": "public"
|
|
111
|
+
}
|
|
112
|
+
}
|