mirror of https://github.com/django/django.git
Refs #33002 -- Optimized Lexer.tokenize() by skipping computing lineno when not needed.
This commit is contained in:
parent
65ed96fa39
commit
6242c22a2f
|
@ -357,8 +357,8 @@ class Lexer:
|
||||||
for bit in tag_re.split(self.template_string):
|
for bit in tag_re.split(self.template_string):
|
||||||
if bit:
|
if bit:
|
||||||
result.append(self.create_token(bit, None, lineno, in_tag))
|
result.append(self.create_token(bit, None, lineno, in_tag))
|
||||||
|
lineno += bit.count('\n')
|
||||||
in_tag = not in_tag
|
in_tag = not in_tag
|
||||||
lineno += bit.count('\n')
|
|
||||||
return result
|
return result
|
||||||
|
|
||||||
def create_token(self, token_string, position, lineno, in_tag):
|
def create_token(self, token_string, position, lineno, in_tag):
|
||||||
|
|
Loading…
Reference in New Issue