Skip to content

Commit dafea85

Browse files
Issue #18873: The tokenize module, IDLE, 2to3, and the findnocoding.py script
now detect Python source code encoding only in comment lines.
1 parent 975fce3 commit dafea85

9 files changed

Lines changed: 44 additions & 22 deletions

File tree

Lib/idlelib/IOBinding.py

Lines changed: 9 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@
6363
encoding = locale_encoding ### KBK 07Sep07 This is used all over IDLE, check!
6464
### 'encoding' is used below in encode(), check!
6565

66-
coding_re = re.compile("coding[:=]\s*([-\w_.]+)")
66+
coding_re = re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)', re.ASCII)
6767

6868
def coding_spec(data):
6969
"""Return the encoding declaration according to PEP 263.
@@ -84,14 +84,16 @@ def coding_spec(data):
8484
lines = data
8585
# consider only the first two lines
8686
if '\n' in lines:
87-
lst = lines.split('\n')[:2]
87+
lst = lines.split('\n', 2)[:2]
8888
elif '\r' in lines:
89-
lst = lines.split('\r')[:2]
89+
lst = lines.split('\r', 2)[:2]
90+
else:
91+
lst = [lines]
92+
for line in lst:
93+
match = coding_re.match(line)
94+
if match is not None:
95+
break
9096
else:
91-
lst = list(lines)
92-
str = '\n'.join(lst)
93-
match = coding_re.search(str)
94-
if not match:
9597
return None
9698
name = match.group(1)
9799
try:

Lib/lib2to3/pgen2/tokenize.py

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -236,7 +236,7 @@ def compat(self, token, iterable):
236236
startline = False
237237
toks_append(tokval)
238238

239-
cookie_re = re.compile("coding[:=]\s*([-\w.]+)")
239+
cookie_re = re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)', re.ASCII)
240240

241241
def _get_normal_name(orig_enc):
242242
"""Imitates get_normal_name in tokenizer.c."""
@@ -281,11 +281,10 @@ def find_cookie(line):
281281
line_string = line.decode('ascii')
282282
except UnicodeDecodeError:
283283
return None
284-
285-
matches = cookie_re.findall(line_string)
286-
if not matches:
284+
match = cookie_re.match(line_string)
285+
if not match:
287286
return None
288-
encoding = _get_normal_name(matches[0])
287+
encoding = _get_normal_name(match.group(1))
289288
try:
290289
codec = lookup(encoding)
291290
except LookupError:
Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
#!/usr/bin/env python
2+
print '#coding=0'

Lib/lib2to3/tests/test_refactor.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -271,6 +271,10 @@ def test_file_encoding(self):
271271
fn = os.path.join(TEST_DATA_DIR, "different_encoding.py")
272272
self.check_file_refactoring(fn)
273273

274+
def test_false_file_encoding(self):
275+
fn = os.path.join(TEST_DATA_DIR, "false_encoding.py")
276+
data = self.check_file_refactoring(fn)
277+
274278
def test_bom(self):
275279
fn = os.path.join(TEST_DATA_DIR, "bom.py")
276280
data = self.check_file_refactoring(fn)

Lib/test/test_importlib/source/test_source_encoding.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
import unittest
1111

1212

13-
CODING_RE = re.compile(r'coding[:=]\s*([-\w.]+)')
13+
CODING_RE = re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)', re.ASCII)
1414

1515

1616
class EncodingTest(unittest.TestCase):
@@ -41,7 +41,7 @@ def run_test(self, source):
4141

4242
def create_source(self, encoding):
4343
encoding_line = "# coding={0}".format(encoding)
44-
assert CODING_RE.search(encoding_line)
44+
assert CODING_RE.match(encoding_line)
4545
source_lines = [encoding_line.encode('utf-8')]
4646
source_lines.append(self.source_line.encode(encoding))
4747
return b'\n'.join(source_lines)
@@ -50,7 +50,7 @@ def test_non_obvious_encoding(self):
5050
# Make sure that an encoding that has never been a standard one for
5151
# Python works.
5252
encoding_line = "# coding=koi8-r"
53-
assert CODING_RE.search(encoding_line)
53+
assert CODING_RE.match(encoding_line)
5454
source = "{0}\na=42\n".format(encoding_line).encode("koi8-r")
5555
self.run_test(source)
5656

Lib/test/test_tokenize.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -946,6 +946,13 @@ def test_short_files(self):
946946
readline = self.get_readline((b'# coding: bad\n',))
947947
self.assertRaises(SyntaxError, detect_encoding, readline)
948948

949+
def test_false_encoding(self):
950+
# Issue 18873: "Encoding" detected in non-comment lines
951+
readline = self.get_readline((b'print("#coding=fake")',))
952+
encoding, consumed_lines = detect_encoding(readline)
953+
self.assertEqual(encoding, 'utf-8')
954+
self.assertEqual(consumed_lines, [b'print("#coding=fake")'])
955+
949956
def test_open(self):
950957
filename = support.TESTFN + '.py'
951958
self.addCleanup(support.unlink, filename)

Lib/tokenize.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@
3131
from codecs import lookup, BOM_UTF8
3232
import collections
3333
from io import TextIOWrapper
34-
cookie_re = re.compile("coding[:=]\s*([-\w.]+)")
34+
cookie_re = re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)', re.ASCII)
3535

3636
import token
3737
__all__ = token.__all__ + ["COMMENT", "tokenize", "detect_encoding",
@@ -372,10 +372,10 @@ def find_cookie(line):
372372
msg = '{} for {!r}'.format(msg, filename)
373373
raise SyntaxError(msg)
374374

375-
matches = cookie_re.findall(line_string)
376-
if not matches:
375+
match = cookie_re.match(line_string)
376+
if not match:
377377
return None
378-
encoding = _get_normal_name(matches[0])
378+
encoding = _get_normal_name(match.group(1))
379379
try:
380380
codec = lookup(encoding)
381381
except LookupError:

Misc/NEWS

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -68,6 +68,8 @@ Core and Builtins
6868
Library
6969
-------
7070

71+
- Issue #18873: The tokenize module now detects Python source code encoding
72+
only in comment lines.
7173

7274
- Issue #17324: Fix http.server's request handling case on trailing '/'. Patch
7375
contributed by Vajrasky Kok.
@@ -304,6 +306,9 @@ C API
304306
IDLE
305307
----
306308

309+
- Issue #18873: IDLE now detects Python source code encoding only in comment
310+
lines.
311+
307312
- Issue #18988: The "Tab" key now works when a word is already autocompleted.
308313

309314
- Issue #18489: Add tests for SearchEngine. Original patch by Phil Webster.
@@ -430,6 +435,9 @@ Documentation
430435
Tools/Demos
431436
-----------
432437

438+
- Issue #18873: 2to3 and the findnocoding.py script now detect Python source
439+
code encoding only in comment lines.
440+
433441
- Issue #18817: Fix a resource warning in Lib/aifc.py demo.
434442

435443
- Issue #18439: Make patchcheck work on Windows for ACKS, NEWS.

Tools/scripts/findnocoding.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -32,13 +32,13 @@ def walk_python_files(self, paths, *args, **kwargs):
3232
"no sophisticated Python source file search will be done.", file=sys.stderr)
3333

3434

35-
decl_re = re.compile(rb"coding[=:]\s*([-\w.]+)")
35+
decl_re = re.compile(rb'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)')
3636

3737
def get_declaration(line):
38-
match = decl_re.search(line)
38+
match = decl_re.match(line)
3939
if match:
4040
return match.group(1)
41-
return ''
41+
return b''
4242

4343
def has_correct_encoding(text, codec):
4444
try:

0 commit comments

Comments
 (0)