Skip to content

Commit a36008a

Browse files
committed
Unify_naming_schema. Closes andialbrecht#283
1 parent ae05326 commit a36008a

10 files changed

Lines changed: 39 additions & 44 deletions

File tree

CHANGELOG

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,9 @@
11
Development Version
22
-------------------
33

4-
Nothing, yet.
4+
Internal Changes
5+
6+
* `is_whitespace` and `is_group` changed into properties
57

68

79
Release 0.2.1 (Aug 13, 2016)

examples/extract_table_names.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818

1919

2020
def is_subselect(parsed):
21-
if not parsed.is_group():
21+
if not parsed.is_group:
2222
return False
2323
for item in parsed.tokens:
2424
if item.ttype is DML and item.value.upper() == 'SELECT':

sqlparse/engine/grouping.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -21,13 +21,13 @@ def _group_matching(tlist, cls):
2121
for idx, token in enumerate(list(tlist)):
2222
tidx = idx - tidx_offset
2323

24-
if token.is_whitespace():
24+
if token.is_whitespace:
2525
# ~50% of tokens will be whitespace. Will checking early
2626
# for them avoid 3 comparisons, but then add 1 more comparison
2727
# for the other ~50% of tokens...
2828
continue
2929

30-
if token.is_group() and not isinstance(token, cls):
30+
if token.is_group and not isinstance(token, cls):
3131
# Check inside previously grouped (ie. parenthesis) if group
3232
# of differnt type is inside (ie, case). though ideally should
3333
# should check for all open/close tokens at once to avoid recursion
@@ -246,7 +246,7 @@ def group_comments(tlist):
246246
tidx, token = tlist.token_next_by(t=T.Comment)
247247
while token:
248248
eidx, end = tlist.token_not_matching(
249-
lambda tk: imt(tk, t=T.Comment) or tk.is_whitespace(), idx=tidx)
249+
lambda tk: imt(tk, t=T.Comment) or tk.is_whitespace, idx=tidx)
250250
if end is not None:
251251
eidx, end = tlist.token_prev(eidx, skip_ws=False)
252252
tlist.group_tokens(sql.Comment, tidx, eidx)
@@ -372,10 +372,10 @@ def _group(tlist, cls, match,
372372
for idx, token in enumerate(list(tlist)):
373373
tidx = idx - tidx_offset
374374

375-
if token.is_whitespace():
375+
if token.is_whitespace:
376376
continue
377377

378-
if recurse and token.is_group() and not isinstance(token, cls):
378+
if recurse and token.is_group and not isinstance(token, cls):
379379
_group(token, cls, match, valid_prev, valid_next, post, extend)
380380

381381
if match(token):

sqlparse/filters/aligned_indent.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ def nl(self, offset=1):
3838
self._max_kwd_len + offset + indent + self.offset))
3939

4040
def _process_statement(self, tlist):
41-
if tlist.tokens[0].is_whitespace() and self.indent == 0:
41+
if tlist.tokens[0].is_whitespace and self.indent == 0:
4242
tlist.tokens.pop(0)
4343

4444
# process the main query body

sqlparse/filters/others.py

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -23,8 +23,8 @@ def get_next_comment():
2323
# Replace by whitespace if prev and next exist and if they're not
2424
# whitespaces. This doesn't apply if prev or next is a paranthesis.
2525
if (prev_ is None or next_ is None or
26-
prev_.is_whitespace() or prev_.match(T.Punctuation, '(') or
27-
next_.is_whitespace() or next_.match(T.Punctuation, ')')):
26+
prev_.is_whitespace or prev_.match(T.Punctuation, '(') or
27+
next_.is_whitespace or next_.match(T.Punctuation, ')')):
2828
tlist.tokens.remove(token)
2929
else:
3030
tlist.tokens[tidx] = sql.Token(T.Whitespace, ' ')
@@ -48,9 +48,9 @@ def _stripws_default(tlist):
4848
last_was_ws = False
4949
is_first_char = True
5050
for token in tlist.tokens:
51-
if token.is_whitespace():
51+
if token.is_whitespace:
5252
token.value = '' if last_was_ws or is_first_char else ' '
53-
last_was_ws = token.is_whitespace()
53+
last_was_ws = token.is_whitespace
5454
is_first_char = False
5555

5656
def _stripws_identifierlist(self, tlist):
@@ -59,25 +59,25 @@ def _stripws_identifierlist(self, tlist):
5959
for token in list(tlist.tokens):
6060
if last_nl and token.ttype is T.Punctuation and token.value == ',':
6161
tlist.tokens.remove(last_nl)
62-
last_nl = token if token.is_whitespace() else None
62+
last_nl = token if token.is_whitespace else None
6363

6464
# next_ = tlist.token_next(token, skip_ws=False)
65-
# if (next_ and not next_.is_whitespace() and
65+
# if (next_ and not next_.is_whitespace and
6666
# token.ttype is T.Punctuation and token.value == ','):
6767
# tlist.insert_after(token, sql.Token(T.Whitespace, ' '))
6868
return self._stripws_default(tlist)
6969

7070
def _stripws_parenthesis(self, tlist):
71-
if tlist.tokens[1].is_whitespace():
71+
if tlist.tokens[1].is_whitespace:
7272
tlist.tokens.pop(1)
73-
if tlist.tokens[-2].is_whitespace():
73+
if tlist.tokens[-2].is_whitespace:
7474
tlist.tokens.pop(-2)
7575
self._stripws_default(tlist)
7676

7777
def process(self, stmt, depth=0):
7878
[self.process(sgroup, depth + 1) for sgroup in stmt.get_sublists()]
7979
self._stripws(stmt)
80-
if depth == 0 and stmt.tokens and stmt.tokens[-1].is_whitespace():
80+
if depth == 0 and stmt.tokens and stmt.tokens[-1].is_whitespace:
8181
stmt.tokens.pop(-1)
8282
return stmt
8383

sqlparse/filters/output.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@ def _process(self, stream, varname, has_nl):
4747
# Print the tokens on the quote
4848
for token in stream:
4949
# Token is a new line separator
50-
if token.is_whitespace() and '\n' in token.value:
50+
if token.is_whitespace and '\n' in token.value:
5151
# Close quote and add a new line
5252
yield sql.Token(T.Text, " '")
5353
yield sql.Token(T.Whitespace, '\n')
@@ -93,7 +93,7 @@ def _process(self, stream, varname, has_nl):
9393
# Print the tokens on the quote
9494
for token in stream:
9595
# Token is a new line separator
96-
if token.is_whitespace() and '\n' in token.value:
96+
if token.is_whitespace and '\n' in token.value:
9797
# Close quote and add a new line
9898
yield sql.Token(T.Text, ' ";')
9999
yield sql.Token(T.Whitespace, '\n')

sqlparse/filters/reindent.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ def __init__(self, width=2, char=' ', wrap_after=0, n='\n'):
2323

2424
def _flatten_up_to_token(self, token):
2525
"""Yields all tokens up to token but excluding current."""
26-
if token.is_group():
26+
if token.is_group:
2727
token = next(token.flatten())
2828

2929
for t in self._curr_stmt.flatten():
@@ -65,7 +65,7 @@ def _split_kwds(self, tlist):
6565
pidx, prev_ = tlist.token_prev(tidx, skip_ws=False)
6666
uprev = text_type(prev_)
6767

68-
if prev_ and prev_.is_whitespace():
68+
if prev_ and prev_.is_whitespace:
6969
del tlist.tokens[pidx]
7070
tidx -= 1
7171

@@ -80,7 +80,7 @@ def _split_statements(self, tlist):
8080
tidx, token = tlist.token_next_by(t=ttypes)
8181
while token:
8282
pidx, prev_ = tlist.token_prev(tidx, skip_ws=False)
83-
if prev_ and prev_.is_whitespace():
83+
if prev_ and prev_.is_whitespace:
8484
del tlist.tokens[pidx]
8585
tidx -= 1
8686
# only break if it's not the first token

sqlparse/filters/right_margin.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,12 +23,12 @@ def __init__(self, width=79):
2323

2424
def _process(self, group, stream):
2525
for token in stream:
26-
if token.is_whitespace() and '\n' in token.value:
26+
if token.is_whitespace and '\n' in token.value:
2727
if token.value.endswith('\n'):
2828
self.line = ''
2929
else:
3030
self.line = token.value.splitlines()[-1]
31-
elif token.is_group() and type(token) not in self.keep_together:
31+
elif token.is_group and type(token) not in self.keep_together:
3232
token.tokens = self._process(token, token.tokens)
3333
else:
3434
val = text_type(token)

sqlparse/sql.py

Lines changed: 12 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -24,14 +24,17 @@ class Token(object):
2424
the type of the token.
2525
"""
2626

27-
__slots__ = ('value', 'ttype', 'parent', 'normalized', 'is_keyword')
27+
__slots__ = ('value', 'ttype', 'parent', 'normalized', 'is_keyword',
28+
'is_group', 'is_whitespace')
2829

2930
def __init__(self, ttype, value):
3031
value = text_type(value)
3132
self.value = value
3233
self.ttype = ttype
3334
self.parent = None
35+
self.is_group = False
3436
self.is_keyword = ttype in T.Keyword
37+
self.is_whitespace = self.ttype in T.Whitespace
3538
self.normalized = value.upper() if self.is_keyword else value
3639

3740
def __str__(self):
@@ -96,14 +99,6 @@ def match(self, ttype, values, regex=False):
9699

97100
return self.normalized in values
98101

99-
def is_group(self):
100-
"""Returns ``True`` if this object has children."""
101-
return False
102-
103-
def is_whitespace(self):
104-
"""Return ``True`` if this token is a whitespace token."""
105-
return self.ttype in T.Whitespace
106-
107102
def within(self, group_cls):
108103
"""Returns ``True`` if this token is within *group_cls*.
109104
@@ -145,6 +140,7 @@ def __init__(self, tokens=None):
145140
self.tokens = tokens or []
146141
[setattr(token, 'parent', self) for token in tokens]
147142
super(TokenList, self).__init__(None, text_type(self))
143+
self.is_group = True
148144

149145
def __str__(self):
150146
return ''.join(token.value for token in self.flatten())
@@ -173,7 +169,7 @@ def _pprint_tree(self, max_depth=None, depth=0, f=None):
173169
print("{indent}{idx:2d} {cls} {q}{value}{q}"
174170
.format(**locals()), file=f)
175171

176-
if token.is_group() and (max_depth is None or depth < max_depth):
172+
if token.is_group and (max_depth is None or depth < max_depth):
177173
token._pprint_tree(max_depth, depth + 1, f)
178174

179175
def get_token_at_offset(self, offset):
@@ -191,18 +187,15 @@ def flatten(self):
191187
This method is recursively called for all child tokens.
192188
"""
193189
for token in self.tokens:
194-
if token.is_group():
190+
if token.is_group:
195191
for item in token.flatten():
196192
yield item
197193
else:
198194
yield token
199195

200-
def is_group(self):
201-
return True
202-
203196
def get_sublists(self):
204197
for token in self.tokens:
205-
if token.is_group():
198+
if token.is_group:
206199
yield token
207200

208201
@property
@@ -241,7 +234,7 @@ def token_first(self, skip_ws=True, skip_cm=False):
241234
ignored too.
242235
"""
243236
# this on is inconsistent, using Comment instead of T.Comment...
244-
funcs = lambda tk: not ((skip_ws and tk.is_whitespace()) or
237+
funcs = lambda tk: not ((skip_ws and tk.is_whitespace) or
245238
(skip_cm and imt(tk, t=T.Comment, i=Comment)))
246239
return self._token_matching(funcs)[1]
247240

@@ -278,7 +271,7 @@ def token_next(self, idx, skip_ws=True, skip_cm=False, _reverse=False):
278271
if idx is None:
279272
return None, None
280273
idx += 1 # alot of code usage current pre-compensates for this
281-
funcs = lambda tk: not ((skip_ws and tk.is_whitespace()) or
274+
funcs = lambda tk: not ((skip_ws and tk.is_whitespace) or
282275
(skip_cm and imt(tk, t=T.Comment, i=Comment)))
283276
return self._token_matching(funcs, idx, reverse=_reverse)
284277

@@ -296,7 +289,7 @@ def group_tokens(self, grp_cls, start, end, include_end=True,
296289
end_idx = end + include_end
297290

298291
# will be needed later for new group_clauses
299-
# while skip_ws and tokens and tokens[-1].is_whitespace():
292+
# while skip_ws and tokens and tokens[-1].is_whitespace:
300293
# tokens = tokens[:-1]
301294

302295
if extend and isinstance(start, grp_cls):
@@ -471,7 +464,7 @@ def get_identifiers(self):
471464
Whitespaces and punctuations are not included in this generator.
472465
"""
473466
for token in self.tokens:
474-
if not (token.is_whitespace() or token.match(T.Punctuation, ',')):
467+
if not (token.is_whitespace or token.match(T.Punctuation, ',')):
475468
yield token
476469

477470

tests/test_grouping.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ def test_grouping_identifiers():
5151

5252
s = "INSERT INTO `test` VALUES('foo', 'bar');"
5353
parsed = sqlparse.parse(s)[0]
54-
types = [l.ttype for l in parsed.tokens if not l.is_whitespace()]
54+
types = [l.ttype for l in parsed.tokens if not l.is_whitespace]
5555
assert types == [T.DML, T.Keyword, None, T.Keyword, None, T.Punctuation]
5656

5757
s = "select 1.0*(a+b) as col, sum(c)/sum(d) from myschema.mytable"

0 commit comments

Comments
 (0)