sqlalchemy-iris 0.5.0b3__py3-none-any.whl → 0.6.0b1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- intersystems_iris/_BufferReader.py +10 -0
- intersystems_iris/_BufferWriter.py +32 -0
- intersystems_iris/_ConnectionInformation.py +54 -0
- intersystems_iris/_ConnectionParameters.py +18 -0
- intersystems_iris/_Constant.py +38 -0
- intersystems_iris/_DBList.py +499 -0
- intersystems_iris/_Device.py +69 -0
- intersystems_iris/_GatewayContext.py +25 -0
- intersystems_iris/_GatewayException.py +4 -0
- intersystems_iris/_GatewayUtility.py +74 -0
- intersystems_iris/_IRIS.py +1294 -0
- intersystems_iris/_IRISConnection.py +516 -0
- intersystems_iris/_IRISEmbedded.py +85 -0
- intersystems_iris/_IRISGlobalNode.py +273 -0
- intersystems_iris/_IRISGlobalNodeView.py +25 -0
- intersystems_iris/_IRISIterator.py +143 -0
- intersystems_iris/_IRISList.py +360 -0
- intersystems_iris/_IRISNative.py +208 -0
- intersystems_iris/_IRISOREF.py +4 -0
- intersystems_iris/_IRISObject.py +424 -0
- intersystems_iris/_IRISReference.py +133 -0
- intersystems_iris/_InStream.py +149 -0
- intersystems_iris/_LegacyIterator.py +135 -0
- intersystems_iris/_ListItem.py +15 -0
- intersystems_iris/_ListReader.py +84 -0
- intersystems_iris/_ListWriter.py +157 -0
- intersystems_iris/_LogFileStream.py +115 -0
- intersystems_iris/_MessageHeader.py +51 -0
- intersystems_iris/_OutStream.py +25 -0
- intersystems_iris/_PrintStream.py +65 -0
- intersystems_iris/_PythonGateway.py +850 -0
- intersystems_iris/_SharedMemorySocket.py +87 -0
- intersystems_iris/__init__.py +79 -0
- intersystems_iris/__main__.py +7 -0
- intersystems_iris/dbapi/_Column.py +56 -0
- intersystems_iris/dbapi/_DBAPI.py +2295 -0
- intersystems_iris/dbapi/_Descriptor.py +46 -0
- intersystems_iris/dbapi/_IRISStream.py +63 -0
- intersystems_iris/dbapi/_Message.py +158 -0
- intersystems_iris/dbapi/_Parameter.py +138 -0
- intersystems_iris/dbapi/_ParameterCollection.py +133 -0
- intersystems_iris/dbapi/_ResultSetRow.py +314 -0
- intersystems_iris/dbapi/_SQLType.py +32 -0
- intersystems_iris/dbapi/__init__.py +0 -0
- intersystems_iris/dbapi/preparser/_PreParser.py +1658 -0
- intersystems_iris/dbapi/preparser/_Scanner.py +391 -0
- intersystems_iris/dbapi/preparser/_Token.py +81 -0
- intersystems_iris/dbapi/preparser/_TokenList.py +251 -0
- intersystems_iris/dbapi/preparser/__init__.py +0 -0
- intersystems_iris/pex/_BusinessHost.py +101 -0
- intersystems_iris/pex/_BusinessOperation.py +105 -0
- intersystems_iris/pex/_BusinessProcess.py +214 -0
- intersystems_iris/pex/_BusinessService.py +95 -0
- intersystems_iris/pex/_Common.py +228 -0
- intersystems_iris/pex/_Director.py +24 -0
- intersystems_iris/pex/_IRISBusinessOperation.py +5 -0
- intersystems_iris/pex/_IRISBusinessService.py +18 -0
- intersystems_iris/pex/_IRISInboundAdapter.py +5 -0
- intersystems_iris/pex/_IRISOutboundAdapter.py +17 -0
- intersystems_iris/pex/_InboundAdapter.py +57 -0
- intersystems_iris/pex/_Message.py +6 -0
- intersystems_iris/pex/_OutboundAdapter.py +46 -0
- intersystems_iris/pex/__init__.py +25 -0
- iris/__init__.py +25 -0
- iris/iris_site.py +13 -0
- iris/irisbuiltins.py +97 -0
- iris/irisloader.py +199 -0
- irisnative/_IRISNative.py +9 -0
- irisnative/__init__.py +10 -0
- {sqlalchemy_iris-0.5.0b3.dist-info → sqlalchemy_iris-0.6.0b1.dist-info}/METADATA +1 -1
- sqlalchemy_iris-0.6.0b1.dist-info/RECORD +83 -0
- sqlalchemy_iris-0.6.0b1.dist-info/top_level.txt +4 -0
- sqlalchemy_iris-0.5.0b3.dist-info/RECORD +0 -14
- sqlalchemy_iris-0.5.0b3.dist-info/top_level.txt +0 -1
- {sqlalchemy_iris-0.5.0b3.dist-info → sqlalchemy_iris-0.6.0b1.dist-info}/LICENSE +0 -0
- {sqlalchemy_iris-0.5.0b3.dist-info → sqlalchemy_iris-0.6.0b1.dist-info}/WHEEL +0 -0
- {sqlalchemy_iris-0.5.0b3.dist-info → sqlalchemy_iris-0.6.0b1.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,391 @@
|
|
1
|
+
import enum
|
2
|
+
import intersystems_iris.dbapi.preparser._Token
|
3
|
+
|
4
|
+
class ParseToken(enum.Enum):
|
5
|
+
tokUNKN = u'tokUNKN'
|
6
|
+
tokBOFL = u'tokBOFL'
|
7
|
+
tokEOFL = u'tokEOFL'
|
8
|
+
tokEOS = u'tokEOS'
|
9
|
+
tokNEWLN = u'tokNEWLN'
|
10
|
+
tokTAB = u'tokTAB'
|
11
|
+
tokRETURN = u'tokRETURN'
|
12
|
+
tokFORMFD = u'tokFORMFD'
|
13
|
+
tokLETTER = u'tokLETTER'
|
14
|
+
tokDIGIT = u'tokDIGIT'
|
15
|
+
tokDOT = u'tokDOT'
|
16
|
+
tokARROW = u'tokARROW'
|
17
|
+
tokATSIGN = u'tokATSIGN'
|
18
|
+
tokDQUOTE = u'tokDQUOTE'
|
19
|
+
tokLPARN = u'tokLPARN'
|
20
|
+
tokDOLLAR = u'tokDOLLAR'
|
21
|
+
tokPERCENT = u'tokPERCENT'
|
22
|
+
tokSQUOTE = u'tokSQUOTE'
|
23
|
+
tokMINUS = u'tokMINUS'
|
24
|
+
tokPLUS = u'tokPLUS'
|
25
|
+
tokRPARN = u'tokRPARN'
|
26
|
+
tokCOMMA = u'tokCOMMA'
|
27
|
+
tokSPACE = u'tokSPACE'
|
28
|
+
tokSEMI = u'tokSEMI'
|
29
|
+
tokASTER = u'tokASTER'
|
30
|
+
tokSLASH = u'tokSLASH'
|
31
|
+
tokPOUND = u'tokPOUND'
|
32
|
+
tokBSLASH = u'tokBSLASH'
|
33
|
+
tokUSCORE = u'tokUSCORE'
|
34
|
+
tokEQUAL = u'tokEQUAL'
|
35
|
+
tokLESS = u'tokLESS'
|
36
|
+
tokGREAT = u'tokGREAT'
|
37
|
+
tokLBRACK = u'tokLBRACK'
|
38
|
+
tokRBRACK = u'tokRBRACK'
|
39
|
+
tokAMPER = u'tokAMPER'
|
40
|
+
tokEXCLA = u'tokEXCLA'
|
41
|
+
tokQUEST = u'tokQUEST'
|
42
|
+
tokCOLON = u'tokCOLON'
|
43
|
+
tokVBAR = u'tokVBAR'
|
44
|
+
tokLBRACE = u'tokLBRACE'
|
45
|
+
tokRBRACE = u'tokRBRACE'
|
46
|
+
tokBQUOTE = u'tokBQUOTE'
|
47
|
+
tokTILDE = u'tokTILDE'
|
48
|
+
tokCRLF = u'tokCRLF'
|
49
|
+
tokNBSP = u'tokNBSP'
|
50
|
+
|
51
|
+
|
52
|
+
class CheckPoint(object):
|
53
|
+
def __init__(self, p_nIndex, p_nLexemeBegin):
|
54
|
+
try:
|
55
|
+
p_nIndex = int(p_nIndex)
|
56
|
+
except (TypeError, ValueError):
|
57
|
+
raise ValueError("p_nIndex must be an integer")
|
58
|
+
try:
|
59
|
+
p_nLexemeBegin = int(p_nLexemeBegin)
|
60
|
+
except (TypeError, ValueError):
|
61
|
+
raise ValueError("p_nLexemeBegin must be an integer")
|
62
|
+
|
63
|
+
self.m_nIndex = p_nIndex
|
64
|
+
self.m_nLexemeBegin = p_nLexemeBegin
|
65
|
+
|
66
|
+
class _Scanner(object):
|
67
|
+
# used to convert characters' byte values into ParseTokens
|
68
|
+
s_tokenTab = [ParseToken.tokUNKN, ParseToken.tokUNKN, ParseToken.tokUNKN, ParseToken.tokUNKN,
|
69
|
+
ParseToken.tokUNKN, ParseToken.tokUNKN, ParseToken.tokUNKN, ParseToken.tokUNKN,
|
70
|
+
ParseToken.tokUNKN, ParseToken.tokTAB, ParseToken.tokNEWLN, ParseToken.tokUNKN,
|
71
|
+
ParseToken.tokUNKN, ParseToken.tokRETURN, ParseToken.tokUNKN, ParseToken.tokUNKN,
|
72
|
+
ParseToken.tokUNKN, ParseToken.tokUNKN, ParseToken.tokUNKN, ParseToken.tokUNKN,
|
73
|
+
ParseToken.tokUNKN, ParseToken.tokUNKN, ParseToken.tokUNKN, ParseToken.tokUNKN,
|
74
|
+
ParseToken.tokUNKN, ParseToken.tokUNKN, ParseToken.tokUNKN, ParseToken.tokUNKN,
|
75
|
+
ParseToken.tokUNKN, ParseToken.tokUNKN, ParseToken.tokUNKN, ParseToken.tokUNKN,
|
76
|
+
ParseToken.tokSPACE, ParseToken.tokEXCLA, ParseToken.tokDQUOTE, ParseToken.tokPOUND,
|
77
|
+
ParseToken.tokDOLLAR, ParseToken.tokPERCENT, ParseToken.tokAMPER, ParseToken.tokSQUOTE,
|
78
|
+
ParseToken.tokLPARN, ParseToken.tokRPARN, ParseToken.tokASTER, ParseToken.tokPLUS,
|
79
|
+
ParseToken.tokCOMMA, ParseToken.tokMINUS, ParseToken.tokDOT, ParseToken.tokSLASH,
|
80
|
+
ParseToken.tokDIGIT, ParseToken.tokDIGIT, ParseToken.tokDIGIT, ParseToken.tokDIGIT,
|
81
|
+
ParseToken.tokDIGIT, ParseToken.tokDIGIT, ParseToken.tokDIGIT, ParseToken.tokDIGIT,
|
82
|
+
ParseToken.tokDIGIT, ParseToken.tokDIGIT, ParseToken.tokCOLON, ParseToken.tokSEMI,
|
83
|
+
ParseToken.tokLESS, ParseToken.tokEQUAL, ParseToken.tokGREAT, ParseToken.tokQUEST,
|
84
|
+
ParseToken.tokATSIGN, ParseToken.tokLETTER, ParseToken.tokLETTER, ParseToken.tokLETTER,
|
85
|
+
ParseToken.tokLETTER, ParseToken.tokLETTER, ParseToken.tokLETTER, ParseToken.tokLETTER,
|
86
|
+
ParseToken.tokLETTER, ParseToken.tokLETTER, ParseToken.tokLETTER, ParseToken.tokLETTER,
|
87
|
+
ParseToken.tokLETTER, ParseToken.tokLETTER, ParseToken.tokLETTER, ParseToken.tokLETTER,
|
88
|
+
ParseToken.tokLETTER, ParseToken.tokLETTER, ParseToken.tokLETTER, ParseToken.tokLETTER,
|
89
|
+
ParseToken.tokLETTER, ParseToken.tokLETTER, ParseToken.tokLETTER, ParseToken.tokLETTER,
|
90
|
+
ParseToken.tokLETTER, ParseToken.tokLETTER, ParseToken.tokLETTER, ParseToken.tokLBRACK,
|
91
|
+
ParseToken.tokBSLASH, ParseToken.tokRBRACK, ParseToken.tokARROW, ParseToken.tokUSCORE,
|
92
|
+
ParseToken.tokBQUOTE, ParseToken.tokLETTER, ParseToken.tokLETTER, ParseToken.tokLETTER,
|
93
|
+
ParseToken.tokLETTER, ParseToken.tokLETTER, ParseToken.tokLETTER, ParseToken.tokLETTER,
|
94
|
+
ParseToken.tokLETTER, ParseToken.tokLETTER, ParseToken.tokLETTER, ParseToken.tokLETTER,
|
95
|
+
ParseToken.tokLETTER, ParseToken.tokLETTER, ParseToken.tokLETTER, ParseToken.tokLETTER,
|
96
|
+
ParseToken.tokLETTER, ParseToken.tokLETTER, ParseToken.tokLETTER, ParseToken.tokLETTER,
|
97
|
+
ParseToken.tokLETTER, ParseToken.tokLETTER, ParseToken.tokLETTER, ParseToken.tokLETTER,
|
98
|
+
ParseToken.tokLETTER, ParseToken.tokLETTER, ParseToken.tokLETTER, ParseToken.tokLBRACE,
|
99
|
+
ParseToken.tokVBAR, ParseToken.tokRBRACE, ParseToken.tokTILDE, ParseToken.tokUNKN]
|
100
|
+
|
101
|
+
|
102
|
+
def CurrentTokenGet(self):
|
103
|
+
return self.m_CurrentToken
|
104
|
+
|
105
|
+
def CurrentTokenSet(self, token):
|
106
|
+
if not isinstance(token, ParseToken):
|
107
|
+
raise TypeError("token must be a ParseToken")
|
108
|
+
self.m_CurrentToken = token
|
109
|
+
|
110
|
+
def CurrentChar(self):
|
111
|
+
if self.m_nIndex >= self.m_nSourceLen:
|
112
|
+
return '\0'
|
113
|
+
else:
|
114
|
+
return self.m_strSource[self.m_nIndex]
|
115
|
+
|
116
|
+
def __init__(self, p_strSource = ""):
|
117
|
+
p_strSource = str(p_strSource)
|
118
|
+
|
119
|
+
self.m_strSource = p_strSource
|
120
|
+
self.m_strUpperSource = p_strSource.upper()
|
121
|
+
self.m_nSourceLen = len(p_strSource)
|
122
|
+
self.m_nIndex = -1
|
123
|
+
self.m_nLexemeBegin = 0
|
124
|
+
self.NextToken()
|
125
|
+
|
126
|
+
def CreateCheckPoint(self):
|
127
|
+
return CheckPoint(self.m_nIndex, self.m_nLexemeBegin)
|
128
|
+
|
129
|
+
def RestoreCheckPoint(self, p_CP):
|
130
|
+
if not isinstance(p_CP, CheckPoint):
|
131
|
+
raise TypeError("p_CP must be a CheckPoint")
|
132
|
+
|
133
|
+
self.m_nIndex = p_CP.m_nIndex - 1
|
134
|
+
self.m_nLexemeBegin = p_CP.m_nLexemeBegin
|
135
|
+
# Advance to index and initialize character/token
|
136
|
+
self.NextToken()
|
137
|
+
|
138
|
+
def BeginLexeme(self):
|
139
|
+
self.m_nLexemeBegin = self.m_nIndex
|
140
|
+
|
141
|
+
def EndLexeme(self):
|
142
|
+
t_nLexemeLen = self.m_nIndex - self.m_nLexemeBegin
|
143
|
+
if t_nLexemeLen < 0:
|
144
|
+
return ""
|
145
|
+
else:
|
146
|
+
return self.m_strSource[self.m_nLexemeBegin:self.m_nIndex]
|
147
|
+
|
148
|
+
def EndUpperLexeme(self):
|
149
|
+
t_nLexemeLen = self.m_nIndex - self.m_nLexemeBegin
|
150
|
+
if t_nLexemeLen < 0:
|
151
|
+
return ""
|
152
|
+
else:
|
153
|
+
return self.m_strUpperSource[self.m_nLexemeBegin:self.m_nIndex]
|
154
|
+
|
155
|
+
def NextToken(self):
|
156
|
+
if self.m_nIndex != self.m_nSourceLen:
|
157
|
+
self.m_nIndex += 1
|
158
|
+
if self.m_nIndex >= self.m_nSourceLen:
|
159
|
+
self.CurrentTokenSet(ParseToken.tokEOS)
|
160
|
+
else:
|
161
|
+
self.CurrentTokenSet(self.__Classify(self.m_strSource[self.m_nIndex]))
|
162
|
+
return self.CurrentTokenGet()
|
163
|
+
|
164
|
+
def PeekNextToken(self):
|
165
|
+
return self.__PeekAhead(1)
|
166
|
+
|
167
|
+
def PeekNextNextToken(self):
|
168
|
+
return self.__PeekAhead(2)
|
169
|
+
|
170
|
+
# Not used
|
171
|
+
def PeekNextChar(self):
|
172
|
+
return self.__PeekAheadChar(1)
|
173
|
+
|
174
|
+
# Not used
|
175
|
+
def PeekNextNextChar(self):
|
176
|
+
return self.__PeekAheadChar(2)
|
177
|
+
|
178
|
+
# Skip over whitespace, leaving the current token at the next character following
|
179
|
+
# or End-Of-Source if source exhausted
|
180
|
+
def SkipToEndOfLine(self):
|
181
|
+
while not self.IsNewLine(self.CurrentTokenGet()):
|
182
|
+
self.NextToken()
|
183
|
+
if self.CurrentTokenGet() == ParseToken.tokEOS:
|
184
|
+
break
|
185
|
+
|
186
|
+
def IsNewLine(self, p_eToken):
|
187
|
+
if not isinstance(p_eToken, ParseToken):
|
188
|
+
raise TypeError("p_eToken must be a ParseToken")
|
189
|
+
|
190
|
+
if p_eToken in [ParseToken.tokNEWLN, ParseToken.tokCRLF, ParseToken.tokRETURN]:
|
191
|
+
return True
|
192
|
+
else:
|
193
|
+
return False
|
194
|
+
|
195
|
+
# Note: Aviels spec (PreParser.txt) says the non-breaking space should
|
196
|
+
# be treated as whitespace. The c++ parser however does not comply with
|
197
|
+
# that spec so neither will we
|
198
|
+
def IsWhitespace(self, p_eToken):
|
199
|
+
if not isinstance(p_eToken, ParseToken):
|
200
|
+
raise TypeError("p_eToken must be a ParseToken")
|
201
|
+
|
202
|
+
if p_eToken in [ParseToken.tokTAB, ParseToken.tokNEWLN, ParseToken.tokCRLF, ParseToken.tokSPACE, ParseToken.tokRETURN]:
|
203
|
+
return True
|
204
|
+
else:
|
205
|
+
return False
|
206
|
+
|
207
|
+
# Skip over whitespace, leaving the current token at the next character following
|
208
|
+
# or End-Of-Source if source exhausted
|
209
|
+
def SkipWhitespace(self):
|
210
|
+
while self.IsWhitespace(self.CurrentTokenGet()):
|
211
|
+
self.NextToken()
|
212
|
+
if self.CurrentTokenGet() == ParseToken.tokEOS:
|
213
|
+
break
|
214
|
+
|
215
|
+
# Skip 'n' tokens
|
216
|
+
def Skip(self, t_nTokens):
|
217
|
+
try:
|
218
|
+
t_nTokens = int(t_nTokens)
|
219
|
+
except (TypeError, ValueError):
|
220
|
+
raise TypeError("t_nTokens must be an integer")
|
221
|
+
|
222
|
+
for i in range(t_nTokens):
|
223
|
+
self.NextToken()
|
224
|
+
|
225
|
+
# <keyword> = <letter> <letter*>
|
226
|
+
def Keyword(self):
|
227
|
+
self.BeginLexeme()
|
228
|
+
while self.CurrentTokenGet() == ParseToken.tokLETTER:
|
229
|
+
self.NextToken()
|
230
|
+
return self.EndLexeme()
|
231
|
+
|
232
|
+
# <comment> :- '/' '*' <chars> '*' '/'
|
233
|
+
def Comment(self):
|
234
|
+
t_bRet = False
|
235
|
+
while self.CurrentTokenGet() != ParseToken.tokEOS:
|
236
|
+
if ParseToken.tokASTER == self.CurrentTokenGet() and ParseToken.tokSLASH == self.PeekNextToken():
|
237
|
+
self.Skip(2)
|
238
|
+
t_bRet = True
|
239
|
+
break
|
240
|
+
self.NextToken()
|
241
|
+
return t_bRet
|
242
|
+
|
243
|
+
# <number> :- [ {'+' | '-'} ] <digit> <digit>* [ . <digit>* ] [ {'E' | 'e'} [ { '+' | '-' } ] <digit> <digit>* ]
|
244
|
+
def Number(self):
|
245
|
+
# Assume successful parse
|
246
|
+
m_boolReturn = True
|
247
|
+
self.BeginLexeme()
|
248
|
+
if ParseToken.tokMINUS == self.CurrentTokenGet() or ParseToken.tokPLUS == self.CurrentTokenGet():
|
249
|
+
# Skip '+' or '-'
|
250
|
+
self.NextToken()
|
251
|
+
while ParseToken.tokDIGIT == self.CurrentTokenGet():
|
252
|
+
# Skip Digits
|
253
|
+
self.NextToken()
|
254
|
+
if ParseToken.tokDOT == self.CurrentTokenGet():
|
255
|
+
# Skip '.'
|
256
|
+
self.NextToken()
|
257
|
+
# Skip trailing digits
|
258
|
+
while ParseToken.tokDIGIT == self.CurrentTokenGet():
|
259
|
+
self.NextToken()
|
260
|
+
if ParseToken.tokLETTER == self.CurrentTokenGet() and ('E' == self.CurrentChar() or 'e' == self.CurrentChar()):
|
261
|
+
# Skip 'E' or 'e'
|
262
|
+
self.NextToken()
|
263
|
+
if ParseToken.tokPLUS == self.CurrentTokenGet() or ParseToken.tokMINUS == self.CurrentTokenGet():
|
264
|
+
# Skip '+' or '-'
|
265
|
+
self.NextToken()
|
266
|
+
# Must have at least one digit
|
267
|
+
if ParseToken.tokDIGIT != self.CurrentTokenGet():
|
268
|
+
m_boolReturn = False
|
269
|
+
else:
|
270
|
+
while ParseToken.tokDIGIT == self.CurrentTokenGet():
|
271
|
+
# Skip trailing digits
|
272
|
+
self.NextToken()
|
273
|
+
return (self.EndLexeme(), m_boolReturn)
|
274
|
+
|
275
|
+
# 0x : <hex digit> <hex digit>*
|
276
|
+
def Hex(self):
|
277
|
+
if self.CurrentChar() == '0':
|
278
|
+
# Skip Digits
|
279
|
+
c = self.PeekNextChar()
|
280
|
+
if (c == 'X') or (c == 'x'):
|
281
|
+
self.BeginLexeme()
|
282
|
+
# Have at least a single letter
|
283
|
+
while True:
|
284
|
+
self.NextToken()
|
285
|
+
if self.CurrentTokenGet() not in [ParseToken.tokDIGIT, ParseToken.tokLETTER]:
|
286
|
+
break
|
287
|
+
return (self.EndLexeme(), True)
|
288
|
+
return (self.EndLexeme(), False)
|
289
|
+
|
290
|
+
# Parse a quoted string. Strings may be delimited by single or double
|
291
|
+
# quotes. The delimited identifiers parameter affects how the output
|
292
|
+
# token is classified
|
293
|
+
def String(self, p_bDelimitedIdentifiers):
|
294
|
+
p_bDelimitedIdentifiers = bool(p_bDelimitedIdentifiers)
|
295
|
+
|
296
|
+
m_tempToken = None
|
297
|
+
self.BeginLexeme()
|
298
|
+
# Remember quote type (single or double)
|
299
|
+
t_eToken = self.CurrentTokenGet()
|
300
|
+
# Advance to first string character
|
301
|
+
self.NextToken()
|
302
|
+
while True:
|
303
|
+
if ParseToken.tokEOS == self.CurrentTokenGet():
|
304
|
+
break
|
305
|
+
# Embedded Quotes
|
306
|
+
if t_eToken == self.CurrentTokenGet():
|
307
|
+
if t_eToken == self.PeekNextToken():
|
308
|
+
self.Skip(2)
|
309
|
+
continue
|
310
|
+
# Final delimiter reached, skip
|
311
|
+
self.NextToken()
|
312
|
+
break
|
313
|
+
else:
|
314
|
+
# Any other character
|
315
|
+
self.NextToken()
|
316
|
+
t_strRet = self.EndLexeme()
|
317
|
+
# RULE: Note that zero-length quoted string,
|
318
|
+
# is considered a CONST even when p_bDelimitedIdentifiers == true.
|
319
|
+
if "\"\"" == t_strRet or "''" == t_strRet:
|
320
|
+
m_tempToken = intersystems_iris.dbapi.preparser._Token.TOKEN.CONSTANT
|
321
|
+
else:
|
322
|
+
if (t_strRet.startswith("'")) or (False == p_bDelimitedIdentifiers):
|
323
|
+
m_tempToken = intersystems_iris.dbapi.preparser._Token.TOKEN.CONSTANT
|
324
|
+
else:
|
325
|
+
m_tempToken = intersystems_iris.dbapi.preparser._Token.TOKEN.ID
|
326
|
+
return (t_strRet, m_tempToken)
|
327
|
+
|
328
|
+
# Parse a Bracket delimited Identifier string.
|
329
|
+
def ParseBrackets(self, p_bDelimitedIdentifiers):
|
330
|
+
p_bDelimitedIdentifiers = bool(p_bDelimitedIdentifiers)
|
331
|
+
|
332
|
+
# Advance to first string character
|
333
|
+
self.NextToken()
|
334
|
+
self.BeginLexeme()
|
335
|
+
while True:
|
336
|
+
if self.CurrentTokenGet() in [ParseToken.tokRBRACK, ParseToken.tokEOS]:
|
337
|
+
t_strRet = "\"" + self.EndLexeme() + "\""
|
338
|
+
self.NextToken()
|
339
|
+
break
|
340
|
+
self.NextToken()
|
341
|
+
return (t_strRet, intersystems_iris.dbapi.preparser._Token.TOKEN.ID)
|
342
|
+
|
343
|
+
# <variable> :- Letter [ Letter | Digit | '_' ]*
|
344
|
+
def Variable(self):
|
345
|
+
self.BeginLexeme()
|
346
|
+
if ParseToken.tokLETTER == self.CurrentTokenGet():
|
347
|
+
# Have at least a single letter
|
348
|
+
while True:
|
349
|
+
self.NextToken()
|
350
|
+
if self.CurrentTokenGet() not in [ParseToken.tokDIGIT, ParseToken.tokLETTER, ParseToken.tokUSCORE]:
|
351
|
+
break
|
352
|
+
return self.EndLexeme()
|
353
|
+
|
354
|
+
# <identifier> :- { Letter | '%' | '$' | '_'} [ { Letter | Digit | '_' | '@' | '#' | '$' } ]*
|
355
|
+
def Identifier(self):
|
356
|
+
self.BeginLexeme()
|
357
|
+
t_eToken = self.CurrentTokenGet()
|
358
|
+
if self.CurrentTokenGet() in [ParseToken.tokLETTER, ParseToken.tokPERCENT, ParseToken.tokDOLLAR, ParseToken.tokPOUND, ParseToken.tokUSCORE]:
|
359
|
+
while self.NextToken() in [ParseToken.tokLETTER, ParseToken.tokDIGIT, ParseToken.tokUSCORE, ParseToken.tokAMPER, ParseToken.tokDOLLAR, ParseToken.tokPOUND, ParseToken.tokATSIGN]:
|
360
|
+
pass
|
361
|
+
return self.EndLexeme()
|
362
|
+
|
363
|
+
def __PeekAhead(self, p_nChars):
|
364
|
+
t_nOffset = self.m_nIndex + p_nChars
|
365
|
+
if t_nOffset >= self.m_nSourceLen:
|
366
|
+
return ParseToken.tokEOS
|
367
|
+
else:
|
368
|
+
return self.__Classify(self.m_strSource[t_nOffset])
|
369
|
+
|
370
|
+
def __PeekAheadChar(self, p_nChars):
|
371
|
+
t_nOffset = self.m_nIndex + p_nChars
|
372
|
+
if t_nOffset >= self.m_nSourceLen:
|
373
|
+
return '\0'
|
374
|
+
else:
|
375
|
+
return self.m_strSource[t_nOffset]
|
376
|
+
|
377
|
+
def __Classify(self, p_ch):
|
378
|
+
p_ch = str(p_ch)
|
379
|
+
if len(p_ch) != 1:
|
380
|
+
raise ValueError("p_ch must be a single character")
|
381
|
+
|
382
|
+
p_ord = ord(p_ch)
|
383
|
+
return self.s_tokenTab[p_ord] if p_ord < 128 else ParseToken.tokLETTER
|
384
|
+
|
385
|
+
def checkForNotPredicates(self):
|
386
|
+
if not self.IsWhitespace(self.CurrentTokenGet()):
|
387
|
+
if self.CurrentTokenGet() in [ParseToken.tokRBRACK, ParseToken.tokLBRACK, ParseToken.tokEQUAL, ParseToken.tokGREAT, ParseToken.tokLESS, ParseToken.tokEXCLA]:
|
388
|
+
self.NextToken()
|
389
|
+
return self.EndLexeme()
|
390
|
+
|
391
|
+
|
@@ -0,0 +1,81 @@
|
|
1
|
+
import enum
|
2
|
+
import intersystems_iris.dbapi.preparser._PreParser
|
3
|
+
|
4
|
+
class TOKEN(enum.Enum):
|
5
|
+
CONSTANT = u'CONSTANT'
|
6
|
+
ID = u'ID'
|
7
|
+
OP = u'OP'
|
8
|
+
UNKNOWN = u'UNKNOWN'
|
9
|
+
VAR = u'VAR'
|
10
|
+
QUESTION_MARK = u'QUESTION_MARK'
|
11
|
+
DTS = u'DTS'
|
12
|
+
OPEN_PAREN = u'OPEN_PAREN'
|
13
|
+
CLOSE_PAREN = u'CLOSE_PAREN'
|
14
|
+
COMMA = u'COMMA'
|
15
|
+
NULL = u'NULL'
|
16
|
+
NOT = u'NOT'
|
17
|
+
IS = u'IS'
|
18
|
+
THEN = u'THEN'
|
19
|
+
DATATYPE = u'DATATYPE'
|
20
|
+
ATSIGN = u'ATSIGN'
|
21
|
+
HEX = u'HEX'
|
22
|
+
STRFUNCTION = u'STRFUNCTION'
|
23
|
+
ELSE = u'ELSE'
|
24
|
+
|
25
|
+
class _Token(object):
|
26
|
+
"""
|
27
|
+
This class represents a 'token' parsed from the SQL statement.
|
28
|
+
It records the classification of the token as well as retaining the original string
|
29
|
+
"""
|
30
|
+
UNDEFINED = 0
|
31
|
+
CAST_CHAR32 = 1
|
32
|
+
CAST_CHAR128 = 2
|
33
|
+
CAST_CHAR512 = 3
|
34
|
+
CAST_CHAR = 4
|
35
|
+
CAST_INT = 10
|
36
|
+
CAST_NUM = 11
|
37
|
+
|
38
|
+
def TokenTypeGet(self):
|
39
|
+
return self.TokenType
|
40
|
+
|
41
|
+
def TokenTypeSet(self, t):
|
42
|
+
if not isinstance(t, TOKEN):
|
43
|
+
raise TypeError("t must be a TOKEN")
|
44
|
+
|
45
|
+
self.TokenType = t
|
46
|
+
|
47
|
+
# TOKENFMT type
|
48
|
+
def __init__(self, p_eToken = TOKEN.UNKNOWN, p_strLexeme = "", p_strUpperLexeme = None):
|
49
|
+
if not isinstance(p_eToken, TOKEN):
|
50
|
+
raise TypeError("p_eToken must be a TOKEN")
|
51
|
+
p_strLexeme = str(p_strLexeme)
|
52
|
+
if p_strUpperLexeme is not None:
|
53
|
+
p_strUpperLexeme = str(p_strUpperLexeme)
|
54
|
+
|
55
|
+
# The token's classification
|
56
|
+
self.TokenType = p_eToken
|
57
|
+
|
58
|
+
# The original string as appears in the SQL statement
|
59
|
+
if p_strUpperLexeme is None:
|
60
|
+
self.Lexeme = p_strLexeme
|
61
|
+
self.UpperLexeme = self.Lexeme
|
62
|
+
else:
|
63
|
+
if p_strUpperLexeme in intersystems_iris.dbapi.preparser._PreParser._PreParser.s_ReservedKeywords:
|
64
|
+
self.Lexeme = p_strUpperLexeme
|
65
|
+
else:
|
66
|
+
self.Lexeme = p_strLexeme
|
67
|
+
self.UpperLexeme = p_strUpperLexeme
|
68
|
+
self.m_replaced = False
|
69
|
+
|
70
|
+
# A replaced parameter
|
71
|
+
self.m_format = self.UNDEFINED
|
72
|
+
|
73
|
+
def __str__(self):
|
74
|
+
return "Token: " + str(self.TokenType) + " Lexeme: " + str(self.Lexeme)
|
75
|
+
|
76
|
+
def UpperEquals(self, p_str):
|
77
|
+
return self.UpperLexeme == p_str
|
78
|
+
|
79
|
+
def UpperContains(self, p_str):
|
80
|
+
return p_str in self.UpperLexeme
|
81
|
+
|