2021-04-30 02:29:08 +08:00
|
|
|
"""JsLex: a lexer for JavaScript"""
|
2011-06-08 00:11:25 +08:00
|
|
|
# Originally from https://bitbucket.org/ned/jslex
|
|
|
|
import re
|
|
|
|
|
2013-11-03 03:37:48 +08:00
|
|
|
|
2017-01-19 15:39:46 +08:00
|
|
|
class Tok:
|
2011-06-08 00:11:25 +08:00
|
|
|
"""
|
|
|
|
A specification for a token class.
|
|
|
|
"""
|
|
|
|
num = 0
|
|
|
|
|
|
|
|
def __init__(self, name, regex, next=None):
|
|
|
|
self.id = Tok.num
|
|
|
|
Tok.num += 1
|
|
|
|
self.name = name
|
|
|
|
self.regex = regex
|
|
|
|
self.next = next
|
|
|
|
|
2013-11-03 03:37:48 +08:00
|
|
|
|
2011-06-08 00:11:25 +08:00
|
|
|
def literals(choices, prefix="", suffix=""):
|
|
|
|
"""
|
|
|
|
Create a regex from a space-separated list of literal `choices`.
|
|
|
|
|
|
|
|
If provided, `prefix` and `suffix` will be attached to each choice
|
|
|
|
individually.
|
|
|
|
"""
|
2013-11-04 02:08:55 +08:00
|
|
|
return "|".join(prefix + re.escape(c) + suffix for c in choices.split())
|
2011-06-08 00:11:25 +08:00
|
|
|
|
|
|
|
|
2017-01-19 15:39:46 +08:00
|
|
|
class Lexer:
|
2011-06-08 00:11:25 +08:00
|
|
|
"""
|
|
|
|
A generic multi-state regex-based lexer.
|
|
|
|
"""
|
|
|
|
|
|
|
|
def __init__(self, states, first):
|
|
|
|
self.regexes = {}
|
|
|
|
self.toks = {}
|
|
|
|
|
|
|
|
for state, rules in states.items():
|
|
|
|
parts = []
|
|
|
|
for tok in rules:
|
|
|
|
groupid = "t%d" % tok.id
|
|
|
|
self.toks[groupid] = tok
|
|
|
|
parts.append("(?P<%s>%s)" % (groupid, tok.regex))
|
2013-10-21 20:52:21 +08:00
|
|
|
self.regexes[state] = re.compile("|".join(parts), re.MULTILINE | re.VERBOSE)
|
2011-06-08 00:11:25 +08:00
|
|
|
|
|
|
|
self.state = first
|
|
|
|
|
|
|
|
def lex(self, text):
|
|
|
|
"""
|
|
|
|
Lexically analyze `text`.
|
|
|
|
|
2017-01-25 04:32:33 +08:00
|
|
|
Yield pairs (`name`, `tokentext`).
|
2011-06-08 00:11:25 +08:00
|
|
|
"""
|
2011-10-05 04:11:41 +08:00
|
|
|
end = len(text)
|
|
|
|
state = self.state
|
|
|
|
regexes = self.regexes
|
|
|
|
toks = self.toks
|
|
|
|
start = 0
|
|
|
|
|
|
|
|
while start < end:
|
|
|
|
for match in regexes[state].finditer(text, start):
|
|
|
|
name = match.lastgroup
|
|
|
|
tok = toks[name]
|
2020-05-11 04:03:39 +08:00
|
|
|
toktext = match[name]
|
2011-10-05 04:11:41 +08:00
|
|
|
start += len(toktext)
|
|
|
|
yield (tok.name, toktext)
|
|
|
|
|
|
|
|
if tok.next:
|
|
|
|
state = tok.next
|
2011-06-08 00:11:25 +08:00
|
|
|
break
|
2011-10-05 04:11:41 +08:00
|
|
|
|
|
|
|
self.state = state
|
2011-06-08 00:11:25 +08:00
|
|
|
|
|
|
|
|
|
|
|
class JsLexer(Lexer):
|
|
|
|
"""
|
2021-04-30 02:29:08 +08:00
|
|
|
A JavaScript lexer
|
2011-06-08 00:11:25 +08:00
|
|
|
|
|
|
|
>>> lexer = JsLexer()
|
|
|
|
>>> list(lexer.lex("a = 1"))
|
|
|
|
[('id', 'a'), ('ws', ' '), ('punct', '='), ('ws', ' '), ('dnum', '1')]
|
|
|
|
|
2021-04-30 02:29:08 +08:00
|
|
|
This doesn't properly handle non-ASCII characters in the JavaScript source.
|
2011-06-08 00:11:25 +08:00
|
|
|
"""
|
|
|
|
|
|
|
|
# Because these tokens are matched as alternatives in a regex, longer
|
|
|
|
# possibilities must appear in the list before shorter ones, for example,
|
|
|
|
# '>>' before '>'.
|
|
|
|
#
|
2021-04-30 02:29:08 +08:00
|
|
|
# Note that we don't have to detect malformed JavaScript, only properly
|
|
|
|
# lex correct JavaScript, so much of this is simplified.
|
2011-06-08 00:11:25 +08:00
|
|
|
|
2021-04-30 02:29:08 +08:00
|
|
|
# Details of JavaScript lexical structure are taken from
|
2011-06-08 00:11:25 +08:00
|
|
|
# http://www.ecma-international.org/publications/files/ECMA-ST/ECMA-262.pdf
|
|
|
|
|
|
|
|
# A useful explanation of automatic semicolon insertion is at
|
|
|
|
# http://inimino.org/~inimino/blog/javascript_semicolons
|
|
|
|
|
|
|
|
both_before = [
|
2013-11-03 03:37:48 +08:00
|
|
|
Tok("comment", r"/\*(.|\n)*?\*/"),
|
|
|
|
Tok("linecomment", r"//.*?$"),
|
|
|
|
Tok("ws", r"\s+"),
|
|
|
|
Tok("keyword", literals("""
|
|
|
|
break case catch class const continue debugger
|
|
|
|
default delete do else enum export extends
|
|
|
|
finally for function if import in instanceof
|
|
|
|
new return super switch this throw try typeof
|
|
|
|
var void while with
|
|
|
|
""", suffix=r"\b"), next='reg'),
|
|
|
|
Tok("reserved", literals("null true false", suffix=r"\b"), next='div'),
|
|
|
|
Tok("id", r"""
|
|
|
|
([a-zA-Z_$ ]|\\u[0-9a-fA-Z]{4}) # first char
|
|
|
|
([a-zA-Z_$0-9]|\\u[0-9a-fA-F]{4})* # rest chars
|
|
|
|
""", next='div'),
|
|
|
|
Tok("hnum", r"0[xX][0-9a-fA-F]+", next='div'),
|
|
|
|
Tok("onum", r"0[0-7]+"),
|
|
|
|
Tok("dnum", r"""
|
|
|
|
( (0|[1-9][0-9]*) # DecimalIntegerLiteral
|
|
|
|
\. # dot
|
|
|
|
[0-9]* # DecimalDigits-opt
|
|
|
|
([eE][-+]?[0-9]+)? # ExponentPart-opt
|
|
|
|
|
|
|
|
|
\. # dot
|
|
|
|
[0-9]+ # DecimalDigits
|
|
|
|
([eE][-+]?[0-9]+)? # ExponentPart-opt
|
|
|
|
|
|
|
|
|
(0|[1-9][0-9]*) # DecimalIntegerLiteral
|
|
|
|
([eE][-+]?[0-9]+)? # ExponentPart-opt
|
|
|
|
)
|
|
|
|
""", next='div'),
|
|
|
|
Tok("punct", literals("""
|
|
|
|
>>>= === !== >>> <<= >>= <= >= == != << >> &&
|
|
|
|
|| += -= *= %= &= |= ^=
|
|
|
|
"""), next="reg"),
|
|
|
|
Tok("punct", literals("++ -- ) ]"), next='div'),
|
|
|
|
Tok("punct", literals("{ } ( [ . ; , < > + - * % & | ^ ! ~ ? : ="), next='reg'),
|
|
|
|
Tok("string", r'"([^"\\]|(\\(.|\n)))*?"', next='div'),
|
|
|
|
Tok("string", r"'([^'\\]|(\\(.|\n)))*?'", next='div'),
|
2013-10-18 17:02:43 +08:00
|
|
|
]
|
2011-06-08 00:11:25 +08:00
|
|
|
|
|
|
|
both_after = [
|
2013-11-03 03:37:48 +08:00
|
|
|
Tok("other", r"."),
|
2011-06-08 00:11:25 +08:00
|
|
|
]
|
|
|
|
|
|
|
|
states = {
|
2013-10-20 07:33:10 +08:00
|
|
|
# slash will mean division
|
|
|
|
'div': both_before + [
|
|
|
|
Tok("punct", literals("/= /"), next='reg'),
|
|
|
|
] + both_after,
|
|
|
|
|
|
|
|
# slash will mean regex
|
|
|
|
'reg': both_before + [
|
|
|
|
Tok("regex",
|
2011-06-08 00:11:25 +08:00
|
|
|
r"""
|
|
|
|
/ # opening slash
|
|
|
|
# First character is..
|
|
|
|
( [^*\\/[] # anything but * \ / or [
|
|
|
|
| \\. # or an escape sequence
|
|
|
|
| \[ # or a class, which has
|
|
|
|
( [^\]\\] # anything but \ or ]
|
|
|
|
| \\. # or an escape sequence
|
|
|
|
)* # many times
|
|
|
|
\]
|
|
|
|
)
|
|
|
|
# Following characters are same, except for excluding a star
|
|
|
|
( [^\\/[] # anything but \ / or [
|
|
|
|
| \\. # or an escape sequence
|
|
|
|
| \[ # or a class, which has
|
|
|
|
( [^\]\\] # anything but \ or ]
|
|
|
|
| \\. # or an escape sequence
|
|
|
|
)* # many times
|
|
|
|
\]
|
|
|
|
)* # many times
|
|
|
|
/ # closing slash
|
|
|
|
[a-zA-Z0-9]* # trailing flags
|
|
|
|
""", next='div'),
|
2013-10-20 07:33:10 +08:00
|
|
|
] + both_after,
|
2013-10-18 17:02:43 +08:00
|
|
|
}
|
2011-06-08 00:11:25 +08:00
|
|
|
|
|
|
|
def __init__(self):
|
2017-01-21 21:13:44 +08:00
|
|
|
super().__init__(self.states, 'reg')
|
2011-06-08 00:11:25 +08:00
|
|
|
|
|
|
|
|
|
|
|
def prepare_js_for_gettext(js):
|
|
|
|
"""
|
2021-04-30 02:29:08 +08:00
|
|
|
Convert the JavaScript source `js` into something resembling C for
|
2011-06-08 00:11:25 +08:00
|
|
|
xgettext.
|
|
|
|
|
|
|
|
What actually happens is that all the regex literals are replaced with
|
|
|
|
"REGEX".
|
|
|
|
"""
|
|
|
|
def escape_quotes(m):
|
|
|
|
"""Used in a regex to properly escape double quotes."""
|
2020-05-11 04:03:39 +08:00
|
|
|
s = m[0]
|
2011-06-08 00:11:25 +08:00
|
|
|
if s == '"':
|
|
|
|
return r'\"'
|
|
|
|
else:
|
|
|
|
return s
|
|
|
|
|
|
|
|
lexer = JsLexer()
|
|
|
|
c = []
|
|
|
|
for name, tok in lexer.lex(js):
|
|
|
|
if name == 'regex':
|
|
|
|
# C doesn't grok regexes, and they aren't needed for gettext,
|
|
|
|
# so just output a string instead.
|
2013-10-10 20:50:32 +08:00
|
|
|
tok = '"REGEX"'
|
2011-06-08 00:11:25 +08:00
|
|
|
elif name == 'string':
|
|
|
|
# C doesn't have single-quoted strings, so make all strings
|
|
|
|
# double-quoted.
|
|
|
|
if tok.startswith("'"):
|
|
|
|
guts = re.sub(r"\\.|.", escape_quotes, tok[1:-1])
|
|
|
|
tok = '"' + guts + '"'
|
|
|
|
elif name == 'id':
|
|
|
|
# C can't deal with Unicode escapes in identifiers. We don't
|
|
|
|
# need them for gettext anyway, so replace them with something
|
|
|
|
# innocuous
|
2013-10-10 20:50:32 +08:00
|
|
|
tok = tok.replace("\\", "U")
|
2011-06-08 00:11:25 +08:00
|
|
|
c.append(tok)
|
|
|
|
return ''.join(c)
|