Skip to content

Commit 974222b

Browse files
committed
Code cleanup and test coverage.
1 parent ac165c9 commit 974222b

File tree

8 files changed

+77
-51
lines changed

8 files changed

+77
-51
lines changed

.hgignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@ syntax: glob
22
docs/build
33
dist
44
MANIFEST
5+
.coverage
56
extras/appengine/django
67
extras/appengine/django.zip
78
extras/appengine/release

CHANGES

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@ In Development
66
* Improved parsing of identifier lists (issue2).
77
* Recursive recognition of AS (issue4) and CASE.
88
* Improved support for UPDATE statements.
9+
* Code cleanup and better test coverage.
910

1011

1112
Release 0.1.0

Makefile

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,9 @@ help:
1212
test:
1313
$(PYTHON) tests/run_tests.py
1414

15+
coverage:
16+
nosetests --with-coverage --cover-inclusive --cover-package=sqlparse
17+
1518
clean:
1619
$(PYTHON) setup.py clean
1720
find . -name '*.pyc' -delete

sqlparse/engine/filter.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@ def process(self, stack, stream):
1717
class StatementFilter(TokenFilter):
1818

1919
def __init__(self):
20+
TokenFilter.__init__(self)
2021
self._in_declare = False
2122
self._in_dbldollar = False
2223
self._is_create = False

sqlparse/formatter.py

Lines changed: 0 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -120,41 +120,3 @@ def build_filter_stack(stack, options):
120120
return stack
121121

122122

123-
def format(statement, **options):
124-
import filters
125-
lexer = Lexer()
126-
# lexer.add_filter('whitespace')
127-
lexer.add_filter(filters.GroupFilter())
128-
if options.get('reindent', False):
129-
lexer.add_filter(filters.StripWhitespaceFilter())
130-
lexer.add_filter(filters.IndentFilter(
131-
n_indents=options.get('n_indents', 2)))
132-
if options.get('ltrim', False):
133-
lexer.add_filter(filters.LTrimFilter())
134-
keyword_case = options.get('keyword_case', None)
135-
if keyword_case is not None:
136-
assert keyword_case in ('lower', 'upper', 'capitalize')
137-
lexer.add_filter(filters.KeywordCaseFilter(case=keyword_case))
138-
identifier_case = options.get('identifier_case', None)
139-
if identifier_case is not None:
140-
assert identifier_case in ('lower', 'upper', 'capitalize')
141-
lexer.add_filter(filters.IdentifierCaseFilter(case=identifier_case))
142-
if options.get('strip_comments', False):
143-
lexer.add_filter(filters.StripCommentsFilter())
144-
right_margin = options.get('right_margin', None)
145-
if right_margin is not None:
146-
right_margin = int(right_margin)
147-
assert right_margin > 0
148-
lexer.add_filter(filters.RightMarginFilter(margin=right_margin))
149-
lexer.add_filter(filters.UngroupFilter())
150-
if options.get('output_format', None):
151-
ofrmt = options['output_format']
152-
assert ofrmt in ('sql', 'python', 'php')
153-
if ofrmt == 'python':
154-
lexer.add_filter(filters.OutputPythonFilter())
155-
elif ofrmt == 'php':
156-
lexer.add_filter(filters.OutputPHPFilter())
157-
tokens = []
158-
for ttype, value in lexer.get_tokens(unicode(statement)):
159-
tokens.append((ttype, value))
160-
return statement.__class__(tokens)

sqlparse/sql.py

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -118,19 +118,19 @@ def __str__(self):
118118
def _get_repr_name(self):
119119
return self.__class__.__name__
120120

121-
def _pprint_tree(self, max_depth=None, depth=0):
122-
"""Pretty-print the object tree."""
123-
indent = ' '*(depth*2)
124-
for token in self.tokens:
125-
if token.is_group():
126-
pre = ' | '
127-
else:
128-
pre = ' | '
129-
print '%s%s%s \'%s\'' % (indent, pre, token._get_repr_name(),
130-
token._get_repr_value())
131-
if (token.is_group() and max_depth is not None
132-
and depth < max_depth):
133-
token._pprint_tree(max_depth, depth+1)
121+
## def _pprint_tree(self, max_depth=None, depth=0):
122+
## """Pretty-print the object tree."""
123+
## indent = ' '*(depth*2)
124+
## for token in self.tokens:
125+
## if token.is_group():
126+
## pre = ' | '
127+
## else:
128+
## pre = ' | '
129+
## print '%s%s%s \'%s\'' % (indent, pre, token._get_repr_name(),
130+
## token._get_repr_value())
131+
## if (token.is_group() and max_depth is not None
132+
## and depth < max_depth):
133+
## token._pprint_tree(max_depth, depth+1)
134134

135135
def flatten(self):
136136
"""Generator yielding ungrouped tokens.

tests/test_grouping.py

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -123,3 +123,15 @@ def test_alias(self):
123123
p = sqlparse.parse(s)[0]
124124
self.ndiffAssertEqual(s, p.to_unicode())
125125
self.assertEqual(p.tokens[4].get_alias(), 'view')
126+
127+
128+
129+
class TestStatement(TestCaseBase):
130+
131+
def test_get_type(self):
132+
f = lambda sql: sqlparse.parse(sql)[0]
133+
self.assertEqual(f('select * from foo').get_type(), 'SELECT')
134+
self.assertEqual(f('update foo').get_type(), 'UPDATE')
135+
self.assertEqual(f(' update foo').get_type(), 'UPDATE')
136+
self.assertEqual(f('\nupdate foo').get_type(), 'UPDATE')
137+
self.assertEqual(f('foo').get_type(), 'UNKNOWN')

tests/test_tokenize.py

Lines changed: 46 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,9 @@
33
import unittest
44
import types
55

6+
import sqlparse
67
from sqlparse import lexer
8+
from sqlparse import sql
79
from sqlparse.tokens import *
810

911

@@ -38,3 +40,47 @@ def test_linebreaks(self): # issue1
3840
sql = 'foo\r\nbar\n'
3941
tokens = lexer.tokenize(sql)
4042
self.assertEqual(''.join(str(x[1]) for x in tokens), sql)
43+
44+
45+
class TestToken(unittest.TestCase):
46+
47+
def test_str(self):
48+
token = sql.Token(None, 'FoO')
49+
self.assertEqual(str(token), 'FoO')
50+
51+
def test_repr(self):
52+
token = sql.Token(Keyword, 'foo')
53+
tst = "<Keyword 'foo' at 0x"
54+
self.assertEqual(repr(token)[:len(tst)], tst)
55+
token = sql.Token(Keyword, '1234567890')
56+
tst = "<Keyword '123456...' at 0x"
57+
self.assertEqual(repr(token)[:len(tst)], tst)
58+
59+
def test_flatten(self):
60+
token = sql.Token(Keyword, 'foo')
61+
gen = token.flatten()
62+
self.assertEqual(type(gen), types.GeneratorType)
63+
lgen = list(gen)
64+
self.assertEqual(lgen, [token])
65+
66+
67+
class TestTokenList(unittest.TestCase):
68+
69+
def test_token_first(self):
70+
p = sqlparse.parse(' select foo')[0]
71+
first = p.token_first()
72+
self.assertEqual(first.value, 'select')
73+
self.assertEqual(p.token_first(ignore_whitespace=False).value, ' ')
74+
self.assertEqual(sql.TokenList([]).token_first(), None)
75+
76+
def test_token_matching(self):
77+
t1 = sql.Token(Keyword, 'foo')
78+
t2 = sql.Token(Punctuation, ',')
79+
x = sql.TokenList([t1, t2])
80+
self.assertEqual(x.token_matching(0, [lambda t: t.ttype is Keyword]),
81+
t1)
82+
self.assertEqual(x.token_matching(0,
83+
[lambda t: t.ttype is Punctuation]),
84+
t2)
85+
self.assertEqual(x.token_matching(1, [lambda t: t.ttype is Keyword]),
86+
None)

0 commit comments

Comments
 (0)