Skip to content

Commit 5002bfa

Browse files
committed
Normalize behavior between token_next and token_next_by
both will now return the "next" token and not itself when passing own index
1 parent 4f922d9 commit 5002bfa

File tree

5 files changed

+26
-26
lines changed

5 files changed

+26
-26
lines changed

sqlparse/engine/grouping.py

Lines changed: 12 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -118,7 +118,7 @@ def group_identifier(tlist):
118118
tidx, token = tlist.token_next_by(t=T_IDENT)
119119
while token:
120120
tlist.group_tokens(sql.Identifier, tidx, tidx)
121-
tidx, token = tlist.token_next_by(t=T_IDENT, idx=tidx + 1)
121+
tidx, token = tlist.token_next_by(t=T_IDENT, idx=tidx)
122122

123123

124124
def group_period(tlist):
@@ -140,7 +140,7 @@ def group_arrays(tlist):
140140
t=(T.Name, T.String.Symbol,)):
141141
tlist.group_tokens(sql.Identifier, pidx, tidx, extend=True)
142142
tidx = pidx
143-
tidx, token = tlist.token_next_by(i=sql.SquareBrackets, idx=tidx + 1)
143+
tidx, token = tlist.token_next_by(i=sql.SquareBrackets, idx=tidx)
144144

145145

146146
@recurse(sql.Identifier)
@@ -161,8 +161,7 @@ def group_operator(tlist):
161161
tlist.group_tokens(sql.Operation, pidx, nidx)
162162
tidx = pidx
163163

164-
tidx, token = tlist.token_next_by(t=(T.Operator, T.Wildcard),
165-
idx=tidx + 1)
164+
tidx, token = tlist.token_next_by(t=(T.Operator, T.Wildcard), idx=tidx)
166165

167166

168167
@recurse(sql.IdentifierList)
@@ -182,7 +181,7 @@ def group_identifier_list(tlist):
182181
if func(prev_) and func(next_):
183182
tlist.group_tokens(sql.IdentifierList, pidx, nidx, extend=True)
184183
tidx = pidx
185-
tidx, token = tlist.token_next_by(m=M_COMMA, idx=tidx + 1)
184+
tidx, token = tlist.token_next_by(m=M_COMMA, idx=tidx)
186185

187186

188187
def group_brackets(tlist):
@@ -198,20 +197,20 @@ def group_comments(tlist):
198197
tidx, token = tlist.token_next_by(t=T.Comment)
199198
while token:
200199
end = tlist.token_not_matching(
201-
lambda tk: imt(tk, t=T.Comment) or tk.is_whitespace(), idx=tidx + 1)
200+
lambda tk: imt(tk, t=T.Comment) or tk.is_whitespace(), idx=tidx)
202201
if end is not None:
203202
eidx = tlist.token_index(end)
204203
eidx, end = tlist.token_prev(eidx, skip_ws=False)
205204
tlist.group_tokens(sql.Comment, tidx, eidx)
206205

207-
tidx, token = tlist.token_next_by(t=T.Comment, idx=tidx + 1)
206+
tidx, token = tlist.token_next_by(t=T.Comment, idx=tidx)
208207

209208

210209
@recurse(sql.Where)
211210
def group_where(tlist):
212211
tidx, token = tlist.token_next_by(m=sql.Where.M_OPEN)
213212
while token:
214-
eidx, end = tlist.token_next_by(m=sql.Where.M_CLOSE, idx=tidx + 1)
213+
eidx, end = tlist.token_next_by(m=sql.Where.M_CLOSE, idx=tidx)
215214

216215
if end is None:
217216
end = tlist._groupable_tokens[-1]
@@ -220,7 +219,7 @@ def group_where(tlist):
220219
# TODO: convert this to eidx instead of end token.
221220
# i think above values are len(tlist) and eidx-1
222221
tlist.group_tokens(sql.Where, tidx, end)
223-
tidx, token = tlist.token_next_by(m=sql.Where.M_OPEN, idx=tidx + 1)
222+
tidx, token = tlist.token_next_by(m=sql.Where.M_OPEN, idx=tidx)
224223

225224

226225
@recurse()
@@ -233,7 +232,7 @@ def group_aliased(tlist):
233232
nidx, next_ = tlist.token_next(tidx)
234233
if imt(next_, i=sql.Identifier):
235234
tlist.group_tokens(sql.Identifier, tidx, nidx, extend=True)
236-
tidx, token = tlist.token_next_by(i=I_ALIAS, t=T.Number, idx=tidx + 1)
235+
tidx, token = tlist.token_next_by(i=I_ALIAS, t=T.Number, idx=tidx)
237236

238237

239238
def group_typecasts(tlist):
@@ -257,7 +256,7 @@ def group_functions(tlist):
257256
nidx, next_ = tlist.token_next(tidx)
258257
if isinstance(next_, sql.Parenthesis):
259258
tlist.group_tokens(sql.Function, tidx, nidx)
260-
tidx, token = tlist.token_next_by(t=T.Name, idx=tidx + 1)
259+
tidx, token = tlist.token_next_by(t=T.Name, idx=tidx)
261260

262261

263262
def group_order(tlist):
@@ -268,7 +267,7 @@ def group_order(tlist):
268267
if imt(prev_, i=sql.Identifier, t=T.Number):
269268
tlist.group_tokens(sql.Identifier, pidx, tidx)
270269
tidx = pidx
271-
tidx, token = tlist.token_next_by(t=T.Keyword.Order, idx=tidx + 1)
270+
tidx, token = tlist.token_next_by(t=T.Keyword.Order, idx=tidx)
272271

273272

274273
@recurse()
@@ -279,7 +278,7 @@ def align_comments(tlist):
279278
if isinstance(prev_, sql.TokenList):
280279
tlist.group_tokens(sql.TokenList, pidx, tidx, extend=True)
281280
tidx = pidx
282-
tidx, token = tlist.token_next_by(i=sql.Comment, idx=tidx + 1)
281+
tidx, token = tlist.token_next_by(i=sql.Comment, idx=tidx)
283282

284283

285284
def group(stmt):

sqlparse/filters/aligned_indent.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -86,14 +86,14 @@ def _process_case(self, tlist):
8686
max_cond_width - condition_width[i]))
8787
tlist.insert_after(cond[-1], ws)
8888

89-
def _next_token(self, tlist, idx=0):
89+
def _next_token(self, tlist, idx=-1):
9090
split_words = T.Keyword, self.split_words, True
9191
tidx, token = tlist.token_next_by(m=split_words, idx=idx)
9292
# treat "BETWEEN x and y" as a single statement
9393
if token and token.normalized == 'BETWEEN':
94-
tidx, token = self._next_token(tlist, tidx + 1)
94+
tidx, token = self._next_token(tlist, tidx)
9595
if token and token.normalized == 'AND':
96-
tidx, token = self._next_token(tlist, tidx + 1)
96+
tidx, token = self._next_token(tlist, tidx)
9797
return tidx, token
9898

9999
def _split_kwds(self, tlist):
@@ -106,7 +106,7 @@ def _split_kwds(self, tlist):
106106
token_indent = text_type(token)
107107
tlist.insert_before(token, self.nl(token_indent))
108108
tidx += 1
109-
tidx, token = self._next_token(tlist, tidx + 1)
109+
tidx, token = self._next_token(tlist, tidx)
110110

111111
def _process_default(self, tlist):
112112
self._split_kwds(tlist)

sqlparse/filters/others.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -99,7 +99,7 @@ def _process(tlist):
9999
tidx += 1 # has to shift since token inserted before it
100100

101101
# assert tlist.token_index(token) == tidx
102-
tidx, token = tlist.token_next_by(t=ttypes, idx=tidx + 1)
102+
tidx, token = tlist.token_next_by(t=ttypes, idx=tidx)
103103

104104
def process(self, stmt):
105105
[self.process(sgroup) for sgroup in stmt.get_sublists()]

sqlparse/filters/reindent.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -44,18 +44,18 @@ def _get_offset(self, token):
4444
def nl(self):
4545
return sql.Token(T.Whitespace, self.n + self.char * self.leading_ws)
4646

47-
def _next_token(self, tlist, idx=0):
47+
def _next_token(self, tlist, idx=-1):
4848
split_words = ('FROM', 'STRAIGHT_JOIN$', 'JOIN$', 'AND', 'OR',
4949
'GROUP', 'ORDER', 'UNION', 'VALUES',
5050
'SET', 'BETWEEN', 'EXCEPT', 'HAVING')
5151
m_split = T.Keyword, split_words, True
5252
tidx, token = tlist.token_next_by(m=m_split, idx=idx)
5353

5454
if token and token.normalized == 'BETWEEN':
55-
tidx, token = self._next_token(tlist, tidx + 1)
55+
tidx, token = self._next_token(tlist, tidx)
5656

5757
if token and token.normalized == 'AND':
58-
tidx, token = self._next_token(tlist, tidx + 1)
58+
tidx, token = self._next_token(tlist, tidx)
5959

6060
return tidx, token
6161

@@ -74,10 +74,11 @@ def _split_kwds(self, tlist):
7474
tlist.insert_before(tidx, self.nl())
7575
tidx += 1
7676

77-
tidx, token = self._next_token(tlist, tidx + 1)
77+
tidx, token = self._next_token(tlist, tidx)
7878

7979
def _split_statements(self, tlist):
80-
tidx, token = tlist.token_next_by(t=(T.Keyword.DDL, T.Keyword.DML))
80+
ttypes = T.Keyword.DML, T.Keyword.DDL
81+
tidx, token = tlist.token_next_by(t=ttypes)
8182
while token:
8283
pidx, prev_ = tlist.token_prev(tidx, skip_ws=False)
8384
if prev_ and prev_.is_whitespace():
@@ -87,8 +88,7 @@ def _split_statements(self, tlist):
8788
if prev_:
8889
tlist.insert_before(tidx, self.nl())
8990
tidx += 1
90-
tidx, token = tlist.token_next_by(
91-
t=(T.Keyword.DDL, T.Keyword.DML), idx=tidx + 1)
91+
tidx, token = tlist.token_next_by(t=ttypes, idx=tidx)
9292

9393
def _process(self, tlist):
9494
func_name = '_process_{cls}'.format(cls=type(tlist).__name__)

sqlparse/sql.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -240,8 +240,9 @@ def token_first(self, skip_ws=True, skip_cm=False):
240240
(skip_cm and imt(tk, t=T.Comment, i=Comment)))
241241
return self._token_matching(funcs)[1]
242242

243-
def token_next_by(self, i=None, m=None, t=None, idx=0, end=None):
243+
def token_next_by(self, i=None, m=None, t=None, idx=-1, end=None):
244244
funcs = lambda tk: imt(tk, i, m, t)
245+
idx += 1
245246
return self._token_matching(funcs, idx, end)
246247

247248
def token_not_matching(self, funcs, idx):

0 commit comments

Comments
 (0)