Skip to content

Commit b8b65c5

Browse files
Andreas AlbrechtAndreas Albrecht
authored andcommitted
Code cleanup.
1 parent 9433666 commit b8b65c5

File tree

5 files changed

+26
-19
lines changed

5 files changed

+26
-19
lines changed

sqlparse/engine/grouping.py

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -125,8 +125,10 @@ def valid_final(token):
125125
def post(tlist, pidx, tidx, nidx):
126126
return tidx, nidx
127127

128-
_group(tlist, sql.TypedLiteral, match, valid_prev, valid_next, post, extend=False)
129-
_group(tlist, sql.TypedLiteral, match_to_extend, valid_prev, valid_final, post, extend=True)
128+
_group(tlist, sql.TypedLiteral, match, valid_prev, valid_next,
129+
post, extend=False)
130+
_group(tlist, sql.TypedLiteral, match_to_extend, valid_prev, valid_final,
131+
post, extend=True)
130132

131133

132134
def group_period(tlist):
@@ -252,7 +254,9 @@ def match(token):
252254

253255
def valid(token):
254256
return imt(token, i=sqlcls, t=ttypes) \
255-
or token.match(T.Keyword, ("CURRENT_DATE", "CURRENT_TIME", "CURRENT_TIMESTAMP"))
257+
or token.match(
258+
T.Keyword,
259+
('CURRENT_DATE', 'CURRENT_TIME', 'CURRENT_TIMESTAMP'))
256260

257261
def post(tlist, pidx, tidx, nidx):
258262
tlist[tidx].ttype = T.Operator

sqlparse/engine/statement_splitter.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ def _change_splitlevel(self, ttype, value):
3333
return 1
3434
elif ttype is T.Punctuation and value == ')':
3535
return -1
36-
elif ttype not in T.Keyword: # if normal token return
36+
elif ttype not in T.Keyword: # if normal token return
3737
return 0
3838

3939
# Everything after here is ttype = T.Keyword

sqlparse/filters/aligned_indent.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -105,8 +105,8 @@ def _split_kwds(self, tlist):
105105
# joins, group/order by are special case. only consider the first
106106
# word as aligner
107107
if (
108-
token.match(T.Keyword, self.join_words, regex=True) or
109-
token.match(T.Keyword, self.by_words, regex=True)
108+
token.match(T.Keyword, self.join_words, regex=True)
109+
or token.match(T.Keyword, self.by_words, regex=True)
110110
):
111111
token_indent = token.value.split()[0]
112112
else:

sqlparse/keywords.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,8 @@ def is_keyword(value):
6464
(r'[A-ZÀ-Ü]\w*(?=\()', tokens.Name), # side effect: change kw to func
6565
(r'-?0x[\dA-F]+', tokens.Number.Hexadecimal),
6666
(r'-?\d*(\.\d+)?E-?\d+', tokens.Number.Float),
67-
(r'(?![_A-ZÀ-Ü])-?(\d+(\.\d*)|\.\d+)(?![_A-ZÀ-Ü])', tokens.Number.Float),
67+
(r'(?![_A-ZÀ-Ü])-?(\d+(\.\d*)|\.\d+)(?![_A-ZÀ-Ü])',
68+
tokens.Number.Float),
6869
(r'(?![_A-ZÀ-Ü])-?\d+(?![_A-ZÀ-Ü])', tokens.Number.Integer),
6970
(r"'(''|\\\\|\\'|[^'])*'", tokens.String.Single),
7071
# not a real string literal in ANSI SQL:
@@ -84,7 +85,9 @@ def is_keyword(value):
8485
(r'DOUBLE\s+PRECISION\b', tokens.Name.Builtin),
8586
(r'GROUP\s+BY\b', tokens.Keyword),
8687
(r'ORDER\s+BY\b', tokens.Keyword),
87-
(r'(LATERAL\s+VIEW\s+)(EXPLODE|INLINE|PARSE_URL_TUPLE|POSEXPLODE|STACK)\b', tokens.Keyword),
88+
(r'(LATERAL\s+VIEW\s+)'
89+
r'(EXPLODE|INLINE|PARSE_URL_TUPLE|POSEXPLODE|STACK)\b',
90+
tokens.Keyword),
8891
(r"(AT|WITH')\s+TIME\s+ZONE\s+'[^']+'", tokens.Keyword.TZCast),
8992
(r'[0-9_A-ZÀ-Ü][_$#\w]*', is_keyword),
9093

sqlparse/sql.py

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -262,15 +262,14 @@ def token_first(self, skip_ws=True, skip_cm=False):
262262
ignored too.
263263
"""
264264
# this on is inconsistent, using Comment instead of T.Comment...
265-
funcs = lambda tk: not ((skip_ws and tk.is_whitespace)
266-
or (skip_cm and imt(tk,
267-
t=T.Comment, i=Comment)))
268-
return self._token_matching(funcs)[1]
265+
def matcher(tk):
266+
return not ((skip_ws and tk.is_whitespace)
267+
or (skip_cm and imt(tk, t=T.Comment, i=Comment)))
268+
return self._token_matching(matcher)[1]
269269

270270
def token_next_by(self, i=None, m=None, t=None, idx=-1, end=None):
271-
funcs = lambda tk: imt(tk, i, m, t)
272271
idx += 1
273-
return self._token_matching(funcs, idx, end)
272+
return self._token_matching(lambda tk: imt(tk, i, m, t), idx, end)
274273

275274
def token_not_matching(self, funcs, idx):
276275
funcs = (funcs,) if not isinstance(funcs, (list, tuple)) else funcs
@@ -300,10 +299,11 @@ def token_next(self, idx, skip_ws=True, skip_cm=False, _reverse=False):
300299
if idx is None:
301300
return None, None
302301
idx += 1 # alot of code usage current pre-compensates for this
303-
funcs = lambda tk: not ((skip_ws and tk.is_whitespace)
304-
or (skip_cm and imt(tk,
305-
t=T.Comment, i=Comment)))
306-
return self._token_matching(funcs, idx, reverse=_reverse)
302+
303+
def matcher(tk):
304+
return not ((skip_ws and tk.is_whitespace)
305+
or (skip_cm and imt(tk, t=T.Comment, i=Comment)))
306+
return self._token_matching(matcher, idx, reverse=_reverse)
307307

308308
def token_index(self, token, start=0):
309309
"""Return list index of token."""
@@ -490,7 +490,7 @@ def get_identifiers(self):
490490

491491

492492
class TypedLiteral(TokenList):
493-
"""A typed literal (?), such as "date '2001-09-28'" or "interval '2 hours'"."""
493+
"""A typed literal, such as "date '2001-09-28'" or "interval '2 hours'"."""
494494
M_OPEN = T.Name.Builtin, None
495495
M_CLOSE = T.String.Single, None
496496
M_EXTEND = T.Keyword, ("DAY", "MONTH", "YEAR", "HOUR", "MINUTE", "SECOND")

0 commit comments

Comments
 (0)