1111# http://pygments.org/
1212# It's separated from the rest of pygments to increase performance
1313# and to allow some customizations.
14-
14+ from __future__ import absolute_import
15+ from __future__ import unicode_literals
1516import re
1617
17- from sqlparse import tokens
18- from sqlparse .keywords import KEYWORDS , KEYWORDS_COMMON
18+ from . import tokens
19+ from .keywords import KEYWORDS , KEYWORDS_COMMON
1920
2021
2122class include (str ):
@@ -79,7 +80,7 @@ def _process_state(cls, unprocessed, processed, state):
7980
8081 try :
8182 rex = re .compile (tdef [0 ], rflags ).match
82- except Exception , err :
83+ except Exception as err :
8384 raise ValueError (("uncompilable regex %r in state"
8485 " %r of %r: %s"
8586 % (tdef [0 ], state , cls , err )))
@@ -150,9 +151,7 @@ def __call__(cls, *args, **kwds):
150151 return type .__call__ (cls , * args , ** kwds )
151152
152153
153- class Lexer (object ):
154-
155- __metaclass__ = LexerMeta
154+ class Lexer (object , metaclass = LexerMeta ):
156155
157156 encoding = 'utf-8'
158157 stripall = False
@@ -209,7 +208,7 @@ def __init__(self):
209208 self .filters = []
210209
211210 def add_filter (self , filter_ , ** options ):
212- from sqlparse .filters import Filter
211+ from .filters import Filter
213212 if not isinstance (filter_ , Filter ):
214213 filter_ = filter_ (** options )
215214 self .filters .append (filter_ )
@@ -223,7 +222,7 @@ def get_tokens(self, text, unfiltered=False):
223222 Also preprocess the text, i.e. expand tabs and strip it if
224223 wanted and applies registered filters.
225224 """
226- if not isinstance (text , unicode ):
225+ if not isinstance (text , str ):
227226 if self .encoding == 'guess' :
228227 try :
229228 text = text .decode ('utf-8' )
0 commit comments