Skip to content

Commit 1992f6d

Browse files
committed
Cleanup module code.
1 parent f7e07b7 commit 1992f6d

File tree

8 files changed

+31
-31
lines changed

8 files changed

+31
-31
lines changed

sqlparse/engine/__init__.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -43,8 +43,8 @@ def run(self, sql, encoding=None):
4343
for filter_ in self.preprocess:
4444
stream = filter_.process(self, stream)
4545

46-
if (self.stmtprocess or self.postprocess or self.split_statements
47-
or self._grouping):
46+
if self.stmtprocess or self.postprocess or self.split_statements \
47+
or self._grouping:
4848
splitter = StatementFilter()
4949
stream = splitter.process(self, stream)
5050

sqlparse/engine/filter.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -23,8 +23,8 @@ def _reset(self):
2323
def _change_splitlevel(self, ttype, value):
2424
"Get the new split level (increase, decrease or remain equal)"
2525
# PostgreSQL
26-
if (ttype == T.Name.Builtin
27-
and value.startswith('$') and value.endswith('$')):
26+
if ttype == T.Name.Builtin \
27+
and value.startswith('$') and value.endswith('$'):
2828
if self._in_dbldollar:
2929
self._in_dbldollar = False
3030
return -1
@@ -64,8 +64,8 @@ def _change_splitlevel(self, ttype, value):
6464
self._is_create = True
6565
return 0
6666

67-
if (unified in ('IF', 'FOR')
68-
and self._is_create and self._begin_depth > 0):
67+
if unified in ('IF', 'FOR') \
68+
and self._is_create and self._begin_depth > 0:
6969
return 1
7070

7171
# Default

sqlparse/engine/grouping.py

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -116,7 +116,7 @@ def group_as(tlist):
116116
def _right_valid(token):
117117
# Currently limited to DML/DDL. Maybe additional more non SQL reserved
118118
# keywords should appear here (see issue8).
119-
return not token.ttype in (T.DML, T.DDL)
119+
return token.ttype not in (T.DML, T.DDL)
120120

121121
def _left_valid(token):
122122
if token.ttype is T.Keyword and token.value in ('NULL',):
@@ -191,7 +191,8 @@ def _next_token(tl, i):
191191

192192
i1 = tl.token_index(t1, start=i) if t1 else None
193193
t2_end = None if i1 is None else i1 + 1
194-
t2 = tl.token_next_by_instance(i, (sql.Function, sql.Parenthesis), end=t2_end)
194+
t2 = tl.token_next_by_instance(i, (sql.Function, sql.Parenthesis),
195+
end=t2_end)
195196

196197
if t1 and t2:
197198
i2 = tl.token_index(t2, start=i)
@@ -219,9 +220,10 @@ def _next_token(tl, i):
219220
if identifier_tokens and identifier_tokens[-1].ttype is T.Whitespace:
220221
identifier_tokens = identifier_tokens[:-1]
221222
if not (len(identifier_tokens) == 1
222-
and (isinstance(identifier_tokens[0], (sql.Function, sql.Parenthesis))
223-
or identifier_tokens[0].ttype in (T.Literal.Number.Integer,
224-
T.Literal.Number.Float))):
223+
and (isinstance(identifier_tokens[0], (sql.Function,
224+
sql.Parenthesis))
225+
or identifier_tokens[0].ttype in (
226+
T.Literal.Number.Integer, T.Literal.Number.Float))):
225227
group = tlist.group_tokens(sql.Identifier, identifier_tokens)
226228
idx = tlist.token_index(group, start=idx) + 1
227229
else:

sqlparse/filters.py

Lines changed: 4 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,6 @@
77
from sqlparse import sql, tokens as T
88
from sqlparse.compat import u, text_type
99
from sqlparse.engine import FilterStack
10-
from sqlparse.lexer import tokenize
1110
from sqlparse.pipeline import Pipeline
1211
from sqlparse.tokens import (Comment, Comparison, Keyword, Name, Punctuation,
1312
String, Whitespace)
@@ -144,8 +143,6 @@ def process(self, stack, stream):
144143

145144
# Found file path to include
146145
if token_type in String.Symbol:
147-
# if token_type in tokens.String.Symbol:
148-
149146
# Get path of file to include
150147
path = join(self.dirpath, value[1:-1])
151148

@@ -251,9 +248,9 @@ def _stripws_identifierlist(self, tlist):
251248
# Removes newlines before commas, see issue140
252249
last_nl = None
253250
for token in tlist.tokens[:]:
254-
if (token.ttype is T.Punctuation
255-
and token.value == ','
256-
and last_nl is not None):
251+
if token.ttype is T.Punctuation \
252+
and token.value == ',' \
253+
and last_nl is not None:
257254
tlist.tokens.remove(last_nl)
258255
if token.is_whitespace():
259256
last_nl = token
@@ -492,7 +489,7 @@ def _process(self, stack, group, stream):
492489
else:
493490
self.line = token.value.splitlines()[-1]
494491
elif (token.is_group()
495-
and not token.__class__ in self.keep_together):
492+
and token.__class__ not in self.keep_together):
496493
token.tokens = self._process(stack, token, token.tokens)
497494
else:
498495
val = u(token)

sqlparse/formatter.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -106,8 +106,8 @@ def build_filter_stack(stack, options):
106106
stack.enable_grouping()
107107
stack.stmtprocess.append(filters.StripCommentsFilter())
108108

109-
if (options.get('strip_whitespace', False)
110-
or options.get('reindent', False)):
109+
if options.get('strip_whitespace', False) \
110+
or options.get('reindent', False):
111111
stack.enable_grouping()
112112
stack.stmtprocess.append(filters.StripWhitespaceFilter())
113113

sqlparse/lexer.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -123,7 +123,7 @@ def _process_state(cls, unprocessed, processed, state):
123123
for state in tdef2:
124124
assert (state in unprocessed or
125125
state in ('#pop', '#push')), \
126-
'unknown new state ' + state
126+
'unknown new state ' + state
127127
new_state = tdef2
128128
else:
129129
assert False, 'unknown new state def %r' % tdef2

sqlparse/sql.py

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -232,7 +232,6 @@ def is_group(self):
232232
return True
233233

234234
def get_sublists(self):
235-
# return [x for x in self.tokens if isinstance(x, TokenList)]
236235
for x in self.tokens:
237236
if isinstance(x, TokenList):
238237
yield x
@@ -347,9 +346,9 @@ def token_next(self, idx, skip_ws=True):
347346
def token_index(self, token, start=0):
348347
"""Return list index of token."""
349348
if start > 0:
350-
# Performing `index` manually is much faster when starting in the middle
351-
# of the list of tokens and expecting to find the token near to the starting
352-
# index.
349+
# Performing `index` manually is much faster when starting
350+
# in the middle of the list of tokens and expecting to find
351+
# the token near to the starting index.
353352
for i in range(start, len(self.tokens)):
354353
if self.tokens[i] == token:
355354
return i
@@ -471,6 +470,7 @@ def _get_first_name(self, idx=None, reverse=False, keywords=False):
471470
return tok.get_name()
472471
return None
473472

473+
474474
class Statement(TokenList):
475475
"""Represents a SQL statement."""
476476

@@ -570,6 +570,7 @@ class SquareBrackets(TokenList):
570570
def _groupable_tokens(self):
571571
return self.tokens[1:-1]
572572

573+
573574
class Assignment(TokenList):
574575
"""An assignment like 'var := val;'"""
575576
__slots__ = ('value', 'ttype', 'tokens')
@@ -672,10 +673,10 @@ def get_parameters(self):
672673
for t in parenthesis.tokens:
673674
if isinstance(t, IdentifierList):
674675
return t.get_identifiers()
675-
elif isinstance(t, Identifier) or \
676-
isinstance(t, Function) or \
677-
t.ttype in T.Literal:
678-
return [t,]
676+
elif (isinstance(t, Identifier) or
677+
isinstance(t, Function) or
678+
t.ttype in T.Literal):
679+
return [t, ]
679680
return []
680681

681682

sqlparse/utils.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,6 @@ def memoize_generator(func):
7373
cache = Cache()
7474

7575
def wrapped_func(*args, **kwargs):
76-
# params = (args, kwargs)
7776
params = (args, tuple(sorted(kwargs.items())))
7877

7978
# Look if cached
@@ -120,6 +119,7 @@ def wrapped_func(*args, **kwargs):
120119

121120
LINE_MATCH = re.compile(r'(\r\n|\r|\n)')
122121

122+
123123
def split_unquoted_newlines(text):
124124
"""Split a string on all unquoted newlines.
125125
@@ -134,4 +134,4 @@ def split_unquoted_newlines(text):
134134
outputlines.append('')
135135
else:
136136
outputlines[-1] += line
137-
return outputlines
137+
return outputlines

0 commit comments

Comments
 (0)