@@ -95,7 +95,7 @@ func (self *Tokenizer) nextToken() Token {
9595 if self .maybeNamedParam (); self .cursor > mid {
9696 return self .choose (start , mid , TokenTypeNamedParam )
9797 }
98- self .char ()
98+ self .skipChar ()
9999 }
100100
101101 if self .cursor > start {
@@ -128,7 +128,7 @@ func (self *Tokenizer) setNext(val Token) {
128128
129129func (self * Tokenizer ) maybeWhitespace () {
130130 for self .more () && charsetWhitespace .has (self .headByte ()) {
131- self .scan (1 )
131+ self .skipBytes (1 )
132132 }
133133}
134134
@@ -145,10 +145,10 @@ func (self *Tokenizer) maybeQuotedGrave() {
145145}
146146
147147func (self * Tokenizer ) maybeCommentLine () {
148- if ! self .scannedString (commentLinePrefix ) {
148+ if ! self .skippedString (commentLinePrefix ) {
149149 return
150150 }
151- for self .more () && ! self .scannedNewline () && self .scannedChar () {
151+ for self .more () && ! self .skippedNewline () && self .skippedChar () {
152152 }
153153}
154154
@@ -163,85 +163,85 @@ func (self *Tokenizer) maybeDoubleColon() {
163163
164164func (self * Tokenizer ) maybeOrdinalParam () {
165165 start := self .cursor
166- if ! self .scannedByte (ordinalParamPrefix ) {
166+ if ! self .skippedByte (ordinalParamPrefix ) {
167167 return
168168 }
169- if ! self .scannedDigits () {
169+ if ! self .skippedDigits () {
170170 self .cursor = start
171171 }
172172}
173173
174174func (self * Tokenizer ) maybeNamedParam () {
175175 start := self .cursor
176- if ! self .scannedByte (namedParamPrefix ) {
176+ if ! self .skippedByte (namedParamPrefix ) {
177177 return
178178 }
179- if ! self .scannedIdent () {
179+ if ! self .skippedIdent () {
180180 self .cursor = start
181181 }
182182}
183183
184184func (self * Tokenizer ) maybeString (val string ) {
185- _ = self .scannedString (val )
185+ _ = self .skippedString (val )
186186}
187187
188- func (self * Tokenizer ) scannedNewline () bool {
188+ func (self * Tokenizer ) skippedNewline () bool {
189189 start := self .cursor
190190 self .maybeNewline ()
191191 return self .cursor > start
192192}
193193
194194func (self * Tokenizer ) maybeNewline () {
195- self .scan (leadingNewlineSize (self .rest ()))
195+ self .skipBytes (leadingNewlineSize (self .rest ()))
196196}
197197
198- func (self * Tokenizer ) scannedChar () bool {
198+ func (self * Tokenizer ) skippedChar () bool {
199199 start := self .cursor
200- self .char ()
200+ self .skipChar ()
201201 return self .cursor > start
202202}
203203
204- func (self * Tokenizer ) char () {
204+ func (self * Tokenizer ) skipChar () {
205205 _ , size := utf8 .DecodeRuneInString (self .rest ())
206- self .scan (size )
206+ self .skipBytes (size )
207207}
208208
209- func (self * Tokenizer ) scannedDigits () bool {
209+ func (self * Tokenizer ) skippedDigits () bool {
210210 start := self .cursor
211- self .maybeDigits ()
211+ self .maybeSkipDigits ()
212212 return self .cursor > start
213213}
214214
215- func (self * Tokenizer ) maybeDigits () {
215+ func (self * Tokenizer ) maybeSkipDigits () {
216216 for self .more () && charsetDigitDec .has (self .headByte ()) {
217- self .scan (1 )
217+ self .skipBytes (1 )
218218 }
219219}
220220
221- func (self * Tokenizer ) scannedIdent () bool {
221+ func (self * Tokenizer ) skippedIdent () bool {
222222 start := self .cursor
223223 self .maybeIdent ()
224224 return self .cursor > start
225225}
226226
227227func (self * Tokenizer ) maybeIdent () {
228- if ! self .scannedByteIn (charsetIdentStart ) {
228+ if ! self .skippedByteFromCharset (charsetIdentStart ) {
229229 return
230230 }
231- for self .more () && self .scannedByteIn (charsetIdent ) {
231+ for self .more () && self .skippedByteFromCharset (charsetIdent ) {
232232 }
233233}
234234
235235func (self * Tokenizer ) maybeStringBetween (prefix , suffix string ) {
236- if ! self .scannedString (prefix ) {
236+ if ! self .skippedString (prefix ) {
237237 return
238238 }
239239
240240 for self .more () {
241- if self .scannedString (suffix ) {
241+ if self .skippedString (suffix ) {
242242 return
243243 }
244- self .char ()
244+ self .skipChar ()
245245 }
246246
247247 panic (ErrUnexpectedEOF {Err {
@@ -251,15 +251,15 @@ func (self *Tokenizer) maybeStringBetween(prefix, suffix string) {
251251}
252252
253253func (self * Tokenizer ) maybeStringBetweenBytes (prefix , suffix byte ) {
254- if ! self .scannedByte (prefix ) {
254+ if ! self .skippedByte (prefix ) {
255255 return
256256 }
257257
258258 for self .more () {
259- if self .scannedByte (suffix ) {
259+ if self .skippedByte (suffix ) {
260260 return
261261 }
262- self .char ()
262+ self .skipChar ()
263263 }
264264
265265 panic (ErrUnexpectedEOF {Err {
@@ -268,7 +268,7 @@ func (self *Tokenizer) maybeStringBetweenBytes(prefix, suffix byte) {
268268 }})
269269}
270270
271- func (self * Tokenizer ) scan (val int ) {
271+ func (self * Tokenizer ) skipBytes (val int ) {
272272 self .cursor += val
273273}
274274
@@ -288,33 +288,30 @@ func (self *Tokenizer) headByte() byte {
288288 return self .Source [self .cursor ]
289289}
290290
291- func (self * Tokenizer ) scannedByte (val byte ) bool {
291+ func (self * Tokenizer ) skippedByte (val byte ) bool {
292292 if self .headByte () == val {
293- self .scan (1 )
293+ self .skipBytes (1 )
294294 return true
295295 }
296296 return false
297297}
298298
299- func (self * Tokenizer ) scannedByteIn (val * charset ) bool {
299+ func (self * Tokenizer ) skippedByteFromCharset (val * charset ) bool {
300300 if val .has (self .headByte ()) {
301- self .scan (1 )
301+ self .skipBytes (1 )
302302 return true
303303 }
304304 return false
305305}
306306
307- func (self * Tokenizer ) scannedString (val string ) bool {
307+ func (self * Tokenizer ) skippedString (val string ) bool {
308308 if strings .HasPrefix (self .rest (), val ) {
309- self .scan (len (val ))
309+ self .skipBytes (len (val ))
310310 return true
311311 }
312312 return false
313313}
314314
315- // Part of `Token`.
316- type TokenType byte
317-
318315const (
319316 TokenTypeInvalid TokenType = iota
320317 TokenTypeText
@@ -329,6 +326,9 @@ const (
329326 TokenTypeNamedParam
330327)
331328
329+ // Part of `Token`.
330+ type TokenType byte
331+
332332// Represents an arbitrary chunk of SQL text parsed by `Tokenizer`.
333333type Token struct {
334334 Text string
0 commit comments