Skip to content

Commit c4954af

Browse files
committed
Remove token_first; its redundant to token_next(idx=0)
1 parent 60f621a commit c4954af

File tree

3 files changed

+12
-24
lines changed

3 files changed

+12
-24
lines changed

sqlparse/sql.py

Lines changed: 8 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -224,20 +224,6 @@ def _token_matching(self, funcs, start=0, end=None, reverse=False):
224224
if func(token):
225225
return token
226226

227-
def token_first(self, skip_ws=True, skip_cm=False):
228-
"""Returns the first child token.
229-
230-
If *ignore_whitespace* is ``True`` (the default), whitespace
231-
tokens are ignored.
232-
233-
if *ignore_comments* is ``True`` (default: ``False``), comments are
234-
ignored too.
235-
"""
236-
# this on is inconsistent, using Comment instead of T.Comment...
237-
funcs = lambda tk: not ((skip_ws and tk.is_whitespace()) or
238-
(skip_cm and imt(tk, i=Comment)))
239-
return self._token_matching(funcs)
240-
241227
def token_next_by(self, i=None, m=None, t=None, idx=0, end=None):
242228
funcs = lambda tk: imt(tk, i, m, t)
243229
return self._token_matching(funcs, idx, end)
@@ -250,24 +236,26 @@ def token_not_matching(self, idx, funcs):
250236
def token_matching(self, idx, funcs):
251237
return self._token_matching(funcs, idx)
252238

253-
def token_prev(self, idx, skip_ws=True, skip_cm=False):
239+
def token_prev(self, idx=0, skip_ws=True, skip_cm=False):
254240
"""Returns the previous token relative to *idx*.
255241
256242
If *skip_ws* is ``True`` (the default) whitespace tokens are ignored.
257243
``None`` is returned if there's no previous token.
258244
"""
259245
funcs = lambda tk: not ((skip_ws and tk.is_whitespace()) or
260-
(skip_cm and imt(tk, t=T.Comment)))
246+
(skip_cm and imt(tk, t=T.Comment, i=Comment)))
261247
return self._token_matching(funcs, idx, reverse=True)
262248

263-
def token_next(self, idx, skip_ws=True, skip_cm=False):
249+
def token_next(self, idx=0, skip_ws=True, skip_cm=False):
264250
"""Returns the next token relative to *idx*.
265251
252+
If called with idx = 0. Returns the first child token.
266253
If *skip_ws* is ``True`` (the default) whitespace tokens are ignored.
254+
If *skip_cm* is ``True`` (default: ``False``), comments are ignored.
267255
``None`` is returned if there's no next token.
268256
"""
269257
funcs = lambda tk: not ((skip_ws and tk.is_whitespace()) or
270-
(skip_cm and imt(tk, t=T.Comment)))
258+
(skip_cm and imt(tk, t=T.Comment, i=Comment)))
271259
return self._token_matching(funcs, idx)
272260

273261
def token_index(self, token, start=0):
@@ -395,7 +383,7 @@ def get_type(self):
395383
Whitespaces and comments at the beginning of the statement
396384
are ignored.
397385
"""
398-
first_token = self.token_first(skip_cm=True)
386+
first_token = self.token_next(skip_cm=True)
399387
if first_token is None:
400388
# An "empty" statement that either has not tokens at all
401389
# or only whitespace tokens.
@@ -433,7 +421,7 @@ def is_wildcard(self):
433421
def get_typecast(self):
434422
"""Returns the typecast or ``None`` of this object as a string."""
435423
marker = self.token_next_by(m=(T.Punctuation, '::'))
436-
next_ = self.token_next(marker, False)
424+
next_ = self.token_next(marker, skip_ws=False)
437425
return next_.value if next_ else None
438426

439427
def get_ordering(self):

tests/test_regressions.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@ def test_issue26(self):
4848
self.assert_(p.tokens[0].ttype is T.Comment.Single)
4949

5050
def test_issue34(self):
51-
t = sqlparse.parse("create")[0].token_first()
51+
t = sqlparse.parse("create")[0].token_next()
5252
self.assertEqual(t.match(T.Keyword.DDL, "create"), True)
5353
self.assertEqual(t.match(T.Keyword.DDL, "CREATE"), True)
5454

tests/test_tokenize.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -104,10 +104,10 @@ def test_repr(self):
104104

105105
def test_token_first(self):
106106
p = sqlparse.parse(' select foo')[0]
107-
first = p.token_first()
107+
first = p.token_next()
108108
self.assertEqual(first.value, 'select')
109-
self.assertEqual(p.token_first(skip_ws=False).value, ' ')
110-
self.assertEqual(sql.TokenList([]).token_first(), None)
109+
self.assertEqual(p.token_next(skip_ws=False).value, ' ')
110+
self.assertEqual(sql.TokenList([]).token_next(), None)
111111

112112
def test_token_matching(self):
113113
t1 = sql.Token(T.Keyword, 'foo')

0 commit comments

Comments
 (0)