2007-12-11 13:49:11 +08:00
|
|
|
"""HTML utilities suitable for global use."""
|
2005-07-13 09:25:57 +08:00
|
|
|
|
2019-04-24 19:30:34 +08:00
|
|
|
import html
|
2017-10-13 02:59:38 +08:00
|
|
|
import json
|
Merged Unicode branch into trunk (r4952:5608). This should be fully
backwards compatible for all practical purposes.
Fixed #2391, #2489, #2996, #3322, #3344, #3370, #3406, #3432, #3454, #3492, #3582, #3690, #3878, #3891, #3937, #4039, #4141, #4227, #4286, #4291, #4300, #4452, #4702
git-svn-id: http://code.djangoproject.com/svn/django/trunk@5609 bcc190cf-cafb-0310-a4f2-bffc1f526a37
2007-07-04 20:11:04 +08:00
|
|
|
import re
|
2017-02-18 08:45:34 +08:00
|
|
|
from html.parser import HTMLParser
|
2017-01-07 19:11:46 +08:00
|
|
|
from urllib.parse import parse_qsl, quote, unquote, urlencode, urlsplit, urlunsplit
|
2007-07-16 13:28:13 +08:00
|
|
|
|
2019-07-03 01:32:17 +08:00
|
|
|
from django.utils.encoding import punycode
|
2017-02-01 23:39:32 +08:00
|
|
|
from django.utils.functional import Promise, keep_lazy, keep_lazy_text
|
2014-06-26 22:55:36 +08:00
|
|
|
from django.utils.http import RFC3986_GENDELIMS, RFC3986_SUBDELIMS
|
2019-10-26 22:42:32 +08:00
|
|
|
from django.utils.regex_helper import _lazy_re_compile
|
2019-02-05 22:38:29 +08:00
|
|
|
from django.utils.safestring import SafeData, SafeString, mark_safe
|
2011-07-29 18:22:25 +08:00
|
|
|
from django.utils.text import normalize_newlines
|
2005-07-13 09:25:57 +08:00
|
|
|
|
|
|
|
|
2022-02-21 16:46:39 +08:00
|
|
|
@keep_lazy(SafeString)
|
2012-06-30 23:41:51 +08:00
|
|
|
def escape(text):
|
Fixed a whole bunch of small docs typos, errors, and ommissions.
Fixes #8358, #8396, #8724, #9043, #9128, #9247, #9267, #9267, #9375, #9409, #9414, #9416, #9446, #9454, #9464, #9503, #9518, #9533, #9657, #9658, #9683, #9733, #9771, #9835, #9836, #9837, #9897, #9906, #9912, #9945, #9986, #9992, #10055, #10084, #10091, #10145, #10245, #10257, #10309, #10358, #10359, #10424, #10426, #10508, #10531, #10551, #10635, #10637, #10656, #10658, #10690, #10699, #19528.
Thanks to all the respective authors of those tickets.
git-svn-id: http://code.djangoproject.com/svn/django/trunk@10371 bcc190cf-cafb-0310-a4f2-bffc1f526a37
2009-04-04 02:30:54 +08:00
|
|
|
"""
|
2017-01-25 04:32:33 +08:00
|
|
|
Return the given text with ampersands, quotes and angle brackets encoded
|
2014-09-04 20:15:09 +08:00
|
|
|
for use in HTML.
|
2014-12-24 05:29:01 +08:00
|
|
|
|
2017-01-25 04:32:33 +08:00
|
|
|
Always escape input, even if it's already escaped and marked as such.
|
|
|
|
This may result in double-escaping. If this is a concern, use
|
|
|
|
conditional_escape() instead.
|
Fixed a whole bunch of small docs typos, errors, and ommissions.
Fixes #8358, #8396, #8724, #9043, #9128, #9247, #9267, #9267, #9375, #9409, #9414, #9416, #9446, #9454, #9464, #9503, #9518, #9533, #9657, #9658, #9683, #9733, #9771, #9835, #9836, #9837, #9897, #9906, #9912, #9945, #9986, #9992, #10055, #10084, #10091, #10145, #10245, #10257, #10309, #10358, #10359, #10424, #10426, #10508, #10531, #10551, #10635, #10637, #10656, #10658, #10690, #10699, #19528.
Thanks to all the respective authors of those tickets.
git-svn-id: http://code.djangoproject.com/svn/django/trunk@10371 bcc190cf-cafb-0310-a4f2-bffc1f526a37
2009-04-04 02:30:54 +08:00
|
|
|
"""
|
2022-02-03 02:12:09 +08:00
|
|
|
return SafeString(html.escape(str(text)))
|
2005-07-13 09:25:57 +08:00
|
|
|
|
2016-11-13 01:11:23 +08:00
|
|
|
|
2012-09-18 18:28:49 +08:00
|
|
|
_js_escapes = {
|
|
|
|
ord("\\"): "\\u005C",
|
|
|
|
ord("'"): "\\u0027",
|
|
|
|
ord('"'): "\\u0022",
|
|
|
|
ord(">"): "\\u003E",
|
|
|
|
ord("<"): "\\u003C",
|
|
|
|
ord("&"): "\\u0026",
|
|
|
|
ord("="): "\\u003D",
|
|
|
|
ord("-"): "\\u002D",
|
|
|
|
ord(";"): "\\u003B",
|
2017-03-04 22:04:16 +08:00
|
|
|
ord("`"): "\\u0060",
|
2012-09-18 18:28:49 +08:00
|
|
|
ord("\u2028"): "\\u2028",
|
|
|
|
ord("\u2029"): "\\u2029",
|
|
|
|
}
|
2011-01-03 01:34:52 +08:00
|
|
|
|
|
|
|
# Escape every ASCII character with a value less than 32.
|
2012-09-18 18:28:49 +08:00
|
|
|
_js_escapes.update((ord("%c" % z), "\\u%04X" % z) for z in range(32))
|
2011-01-03 01:34:52 +08:00
|
|
|
|
2013-11-03 04:12:09 +08:00
|
|
|
|
2022-02-21 16:46:39 +08:00
|
|
|
@keep_lazy(SafeString)
|
2011-01-03 01:34:52 +08:00
|
|
|
def escapejs(value):
|
2017-01-25 04:32:33 +08:00
|
|
|
"""Hex encode characters for use in JavaScript strings."""
|
2017-04-22 01:52:26 +08:00
|
|
|
return mark_safe(str(value).translate(_js_escapes))
|
2011-01-03 01:34:52 +08:00
|
|
|
|
2013-11-03 04:12:09 +08:00
|
|
|
|
2017-10-13 02:59:38 +08:00
|
|
|
_json_script_escapes = {
|
|
|
|
ord(">"): "\\u003E",
|
|
|
|
ord("<"): "\\u003C",
|
|
|
|
ord("&"): "\\u0026",
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2021-11-19 23:52:57 +08:00
|
|
|
def json_script(value, element_id=None):
|
2017-10-13 02:59:38 +08:00
|
|
|
"""
|
|
|
|
Escape all the HTML/XML special characters with their unicode escapes, so
|
|
|
|
value is safe to be output anywhere except for inside a tag attribute. Wrap
|
|
|
|
the escaped JSON in a script tag.
|
|
|
|
"""
|
|
|
|
from django.core.serializers.json import DjangoJSONEncoder
|
2022-02-04 03:24:19 +08:00
|
|
|
|
2017-10-13 02:59:38 +08:00
|
|
|
json_str = json.dumps(value, cls=DjangoJSONEncoder).translate(_json_script_escapes)
|
2021-11-19 23:52:57 +08:00
|
|
|
if element_id:
|
|
|
|
template = '<script id="{}" type="application/json">{}</script>'
|
|
|
|
args = (element_id, mark_safe(json_str))
|
|
|
|
else:
|
|
|
|
template = '<script type="application/json">{}</script>'
|
|
|
|
args = (mark_safe(json_str),)
|
|
|
|
return format_html(template, *args)
|
2017-10-13 02:59:38 +08:00
|
|
|
|
|
|
|
|
2012-06-30 23:41:51 +08:00
|
|
|
def conditional_escape(text):
|
2007-11-14 20:58:53 +08:00
|
|
|
"""
|
|
|
|
Similar to escape(), except that it doesn't operate on pre-escaped strings.
|
2014-12-24 05:29:01 +08:00
|
|
|
|
|
|
|
This function relies on the __html__ convention used both by Django's
|
|
|
|
SafeData class and by third-party libraries like markupsafe.
|
2007-11-14 20:58:53 +08:00
|
|
|
"""
|
2017-02-01 23:39:32 +08:00
|
|
|
if isinstance(text, Promise):
|
|
|
|
text = str(text)
|
2013-10-15 06:40:52 +08:00
|
|
|
if hasattr(text, "__html__"):
|
|
|
|
return text.__html__()
|
2007-11-14 20:58:53 +08:00
|
|
|
else:
|
2012-06-30 23:41:51 +08:00
|
|
|
return escape(text)
|
2007-11-14 20:58:53 +08:00
|
|
|
|
2013-11-03 07:53:29 +08:00
|
|
|
|
2012-07-01 01:54:38 +08:00
|
|
|
def format_html(format_string, *args, **kwargs):
|
|
|
|
"""
|
2017-01-25 04:32:33 +08:00
|
|
|
Similar to str.format, but pass all arguments through conditional_escape(),
|
|
|
|
and call mark_safe() on the result. This function should be used instead
|
2012-07-01 01:54:38 +08:00
|
|
|
of str.format or % interpolation to build up small HTML fragments.
|
|
|
|
"""
|
|
|
|
args_safe = map(conditional_escape, args)
|
2017-01-07 19:11:46 +08:00
|
|
|
kwargs_safe = {k: conditional_escape(v) for (k, v) in kwargs.items()}
|
2012-07-01 01:54:38 +08:00
|
|
|
return mark_safe(format_string.format(*args_safe, **kwargs_safe))
|
|
|
|
|
2013-11-03 07:53:29 +08:00
|
|
|
|
2012-07-01 01:54:38 +08:00
|
|
|
def format_html_join(sep, format_string, args_generator):
|
|
|
|
"""
|
2013-01-25 19:53:40 +08:00
|
|
|
A wrapper of format_html, for the common case of a group of arguments that
|
|
|
|
need to be formatted using the same format string, and then joined using
|
2012-07-01 01:54:38 +08:00
|
|
|
'sep'. 'sep' is also passed through conditional_escape.
|
|
|
|
|
|
|
|
'args_generator' should be an iterator that returns the sequence of 'args'
|
|
|
|
that will be passed to format_html.
|
|
|
|
|
|
|
|
Example:
|
|
|
|
|
2014-11-27 08:41:27 +08:00
|
|
|
format_html_join('\n', "<li>{} {}</li>", ((u.first_name, u.last_name)
|
2012-07-01 01:54:38 +08:00
|
|
|
for u in users))
|
|
|
|
"""
|
|
|
|
return mark_safe(
|
|
|
|
conditional_escape(sep).join(
|
2018-09-28 21:57:12 +08:00
|
|
|
format_html(format_string, *args) for args in args_generator
|
2022-02-04 03:24:19 +08:00
|
|
|
)
|
2018-09-28 21:57:12 +08:00
|
|
|
)
|
2012-07-01 01:54:38 +08:00
|
|
|
|
|
|
|
|
2015-11-07 21:30:20 +08:00
|
|
|
@keep_lazy_text
|
2007-11-14 20:58:53 +08:00
|
|
|
def linebreaks(value, autoescape=False):
|
2018-01-21 15:09:10 +08:00
|
|
|
"""Convert newlines into <p> and <br>s."""
|
2017-04-22 01:52:26 +08:00
|
|
|
value = normalize_newlines(value)
|
|
|
|
paras = re.split("\n{2,}", str(value))
|
2007-11-14 20:58:53 +08:00
|
|
|
if autoescape:
|
2018-01-21 15:09:10 +08:00
|
|
|
paras = ["<p>%s</p>" % escape(p).replace("\n", "<br>") for p in paras]
|
2007-11-14 20:58:53 +08:00
|
|
|
else:
|
2018-01-21 15:09:10 +08:00
|
|
|
paras = ["<p>%s</p>" % p.replace("\n", "<br>") for p in paras]
|
2012-06-08 00:08:47 +08:00
|
|
|
return "\n\n".join(paras)
|
2005-07-13 09:25:57 +08:00
|
|
|
|
2013-05-22 23:29:16 +08:00
|
|
|
|
|
|
|
class MLStripper(HTMLParser):
|
|
|
|
def __init__(self):
|
2017-12-07 22:10:32 +08:00
|
|
|
super().__init__(convert_charrefs=False)
|
2013-05-22 23:29:16 +08:00
|
|
|
self.reset()
|
|
|
|
self.fed = []
|
2013-10-22 18:21:07 +08:00
|
|
|
|
2013-05-22 23:29:16 +08:00
|
|
|
def handle_data(self, d):
|
|
|
|
self.fed.append(d)
|
2013-10-22 18:21:07 +08:00
|
|
|
|
2013-05-22 23:29:16 +08:00
|
|
|
def handle_entityref(self, name):
|
|
|
|
self.fed.append("&%s;" % name)
|
2013-10-22 18:21:07 +08:00
|
|
|
|
2013-05-22 23:29:16 +08:00
|
|
|
def handle_charref(self, name):
|
|
|
|
self.fed.append("&#%s;" % name)
|
2013-10-22 18:21:07 +08:00
|
|
|
|
2013-05-22 23:29:16 +08:00
|
|
|
def get_data(self):
|
|
|
|
return "".join(self.fed)
|
|
|
|
|
2013-11-03 07:53:29 +08:00
|
|
|
|
2014-03-20 23:50:50 +08:00
|
|
|
def _strip_once(value):
|
|
|
|
"""
|
|
|
|
Internal tag stripping utility used by strip_tags.
|
|
|
|
"""
|
2013-05-22 23:29:16 +08:00
|
|
|
s = MLStripper()
|
2017-02-18 08:45:34 +08:00
|
|
|
s.feed(value)
|
|
|
|
s.close()
|
|
|
|
return s.get_data()
|
2014-03-20 23:50:50 +08:00
|
|
|
|
|
|
|
|
2015-11-07 21:30:20 +08:00
|
|
|
@keep_lazy_text
|
2014-03-20 23:50:50 +08:00
|
|
|
def strip_tags(value):
|
2017-01-25 04:32:33 +08:00
|
|
|
"""Return the given HTML with all tags stripped."""
|
2014-04-03 14:59:06 +08:00
|
|
|
# Note: in typical case this loop executes _strip_once once. Loop condition
|
|
|
|
# is redundant, but helps to reduce number of executions of _strip_once.
|
2018-02-10 08:41:30 +08:00
|
|
|
value = str(value)
|
2014-04-03 14:59:06 +08:00
|
|
|
while "<" in value and ">" in value:
|
2014-03-20 23:50:50 +08:00
|
|
|
new_value = _strip_once(value)
|
2019-07-15 18:00:06 +08:00
|
|
|
if value.count("<") == new_value.count("<"):
|
|
|
|
# _strip_once wasn't able to detect more tags.
|
2014-04-03 14:59:06 +08:00
|
|
|
break
|
|
|
|
value = new_value
|
|
|
|
return value
|
2005-07-13 09:25:57 +08:00
|
|
|
|
2013-11-03 07:53:29 +08:00
|
|
|
|
2015-11-07 21:30:20 +08:00
|
|
|
@keep_lazy_text
|
2006-01-15 09:51:30 +08:00
|
|
|
def strip_spaces_between_tags(value):
|
2017-01-25 04:32:33 +08:00
|
|
|
"""Return the given HTML with spaces between tags removed."""
|
2018-02-10 08:41:30 +08:00
|
|
|
return re.sub(r">\s+<", "><", str(value))
|
2006-01-15 09:51:30 +08:00
|
|
|
|
2013-11-03 07:53:29 +08:00
|
|
|
|
2012-01-08 02:15:28 +08:00
|
|
|
def smart_urlquote(url):
|
2017-01-25 04:32:33 +08:00
|
|
|
"""Quote a URL if it isn't already quoted."""
|
2022-02-04 03:24:19 +08:00
|
|
|
|
2014-06-27 03:14:30 +08:00
|
|
|
def unquote_quote(segment):
|
2017-01-12 06:17:25 +08:00
|
|
|
segment = unquote(segment)
|
2014-06-27 03:14:30 +08:00
|
|
|
# Tilde is part of RFC3986 Unreserved Characters
|
2018-09-26 14:48:47 +08:00
|
|
|
# https://tools.ietf.org/html/rfc3986#section-2.3
|
|
|
|
# See also https://bugs.python.org/issue16285
|
2018-02-10 08:41:30 +08:00
|
|
|
return quote(segment, safe=RFC3986_SUBDELIMS + RFC3986_GENDELIMS + "~")
|
2014-06-27 03:14:30 +08:00
|
|
|
|
2012-01-08 02:39:14 +08:00
|
|
|
# Handle IDN before quoting.
|
|
|
|
try:
|
2012-12-03 20:13:24 +08:00
|
|
|
scheme, netloc, path, query, fragment = urlsplit(url)
|
|
|
|
except ValueError:
|
|
|
|
# invalid IPv6 URL (normally square brackets in hostname part).
|
2014-06-27 03:14:30 +08:00
|
|
|
return unquote_quote(url)
|
2012-01-08 02:15:28 +08:00
|
|
|
|
2014-06-27 03:14:30 +08:00
|
|
|
try:
|
2019-07-03 01:32:17 +08:00
|
|
|
netloc = punycode(netloc) # IDN -> ACE
|
2014-06-27 03:14:30 +08:00
|
|
|
except UnicodeError: # invalid domain part
|
|
|
|
return unquote_quote(url)
|
|
|
|
|
|
|
|
if query:
|
|
|
|
# Separately unquoting key/value, so as to not mix querystring separators
|
|
|
|
# included in query values. See #22267.
|
2017-01-12 06:17:25 +08:00
|
|
|
query_parts = [
|
|
|
|
(unquote(q[0]), unquote(q[1]))
|
2014-06-27 03:14:30 +08:00
|
|
|
for q in parse_qsl(query, keep_blank_values=True)
|
|
|
|
]
|
|
|
|
# urlencode will take care of quoting
|
|
|
|
query = urlencode(query_parts)
|
|
|
|
|
|
|
|
path = unquote_quote(path)
|
|
|
|
fragment = unquote_quote(fragment)
|
|
|
|
|
|
|
|
return urlunsplit((scheme, netloc, path, query, fragment))
|
2013-11-03 07:53:29 +08:00
|
|
|
|
2014-08-09 18:44:48 +08:00
|
|
|
|
2021-10-15 01:27:31 +08:00
|
|
|
class Urlizer:
|
2005-07-13 09:25:57 +08:00
|
|
|
"""
|
2017-01-25 04:32:33 +08:00
|
|
|
Convert any URLs in text into clickable links.
|
2007-07-16 13:28:13 +08:00
|
|
|
|
2021-10-15 01:27:31 +08:00
|
|
|
Work on http://, https://, www. links, and also on links ending in one of
|
2012-01-09 05:36:22 +08:00
|
|
|
the original seven gTLDs (.com, .edu, .gov, .int, .mil, .net, and .org).
|
|
|
|
Links can have trailing punctuation (periods, commas, close-parens) and
|
|
|
|
leading punctuation (opening parens) and it'll still do the right thing.
|
2021-10-15 01:27:31 +08:00
|
|
|
"""
|
|
|
|
|
|
|
|
trailing_punctuation_chars = ".,:;!"
|
|
|
|
wrapping_punctuation = [("(", ")"), ("[", "]")]
|
2022-02-04 03:24:19 +08:00
|
|
|
|
2021-10-15 01:27:31 +08:00
|
|
|
simple_url_re = _lazy_re_compile(r"^https?://\[?\w", re.IGNORECASE)
|
|
|
|
simple_url_2_re = _lazy_re_compile(
|
|
|
|
r"^www\.|^(?!http)\w[^@]+\.(com|edu|gov|int|mil|net|org)($|/.*)$", re.IGNORECASE
|
|
|
|
)
|
|
|
|
word_split_re = _lazy_re_compile(r"""([\s<>"']+)""")
|
2005-07-13 09:25:57 +08:00
|
|
|
|
2021-10-15 01:27:31 +08:00
|
|
|
mailto_template = "mailto:{local}@{domain}"
|
|
|
|
url_template = '<a href="{href}"{attrs}>{url}</a>'
|
2005-07-13 09:25:57 +08:00
|
|
|
|
2021-10-15 01:27:31 +08:00
|
|
|
def __call__(self, text, trim_url_limit=None, nofollow=False, autoescape=False):
|
|
|
|
"""
|
|
|
|
If trim_url_limit is not None, truncate the URLs in the link text
|
|
|
|
longer than this limit to trim_url_limit - 1 characters and append an
|
|
|
|
ellipsis.
|
2008-06-26 13:07:13 +08:00
|
|
|
|
2021-10-15 01:27:31 +08:00
|
|
|
If nofollow is True, give the links a rel="nofollow" attribute.
|
|
|
|
|
|
|
|
If autoescape is True, autoescape the link text and URLs.
|
|
|
|
"""
|
2021-10-31 08:00:40 +08:00
|
|
|
safe_input = isinstance(text, SafeData)
|
2021-10-15 01:27:31 +08:00
|
|
|
|
|
|
|
words = self.word_split_re.split(str(text))
|
|
|
|
return "".join(
|
|
|
|
[
|
2021-10-31 08:00:40 +08:00
|
|
|
self.handle_word(
|
|
|
|
word,
|
|
|
|
safe_input=safe_input,
|
|
|
|
trim_url_limit=trim_url_limit,
|
|
|
|
nofollow=nofollow,
|
|
|
|
autoescape=autoescape,
|
|
|
|
)
|
|
|
|
for word in words
|
2021-10-15 01:27:31 +08:00
|
|
|
]
|
|
|
|
)
|
2014-08-09 18:44:48 +08:00
|
|
|
|
2021-10-31 08:00:40 +08:00
|
|
|
def handle_word(
|
2021-11-03 15:14:50 +08:00
|
|
|
self,
|
|
|
|
word,
|
|
|
|
*,
|
|
|
|
safe_input,
|
|
|
|
trim_url_limit=None,
|
|
|
|
nofollow=False,
|
|
|
|
autoescape=False,
|
2021-10-31 08:00:40 +08:00
|
|
|
):
|
2021-10-15 01:27:31 +08:00
|
|
|
if "." in word or "@" in word or ":" in word:
|
|
|
|
# lead: Punctuation trimmed from the beginning of the word.
|
|
|
|
# middle: State of the word.
|
|
|
|
# trail: Punctuation trimmed from the end of the word.
|
|
|
|
lead, middle, trail = self.trim_punctuation(word)
|
|
|
|
# Make URL we want to point to.
|
|
|
|
url = None
|
2021-10-31 08:00:40 +08:00
|
|
|
nofollow_attr = ' rel="nofollow"' if nofollow else ""
|
2021-10-15 01:27:31 +08:00
|
|
|
if self.simple_url_re.match(middle):
|
|
|
|
url = smart_urlquote(html.unescape(middle))
|
|
|
|
elif self.simple_url_2_re.match(middle):
|
|
|
|
url = smart_urlquote("http://%s" % html.unescape(middle))
|
|
|
|
elif ":" not in middle and self.is_email_simple(middle):
|
|
|
|
local, domain = middle.rsplit("@", 1)
|
|
|
|
try:
|
|
|
|
domain = punycode(domain)
|
|
|
|
except UnicodeError:
|
|
|
|
return word
|
|
|
|
url = self.mailto_template.format(local=local, domain=domain)
|
|
|
|
nofollow_attr = ""
|
|
|
|
# Make link.
|
|
|
|
if url:
|
2021-11-03 15:14:50 +08:00
|
|
|
trimmed = self.trim_url(middle, limit=trim_url_limit)
|
2021-10-31 08:00:40 +08:00
|
|
|
if autoescape and not safe_input:
|
2021-10-15 01:27:31 +08:00
|
|
|
lead, trail = escape(lead), escape(trail)
|
|
|
|
trimmed = escape(trimmed)
|
|
|
|
middle = self.url_template.format(
|
|
|
|
href=escape(url),
|
|
|
|
attrs=nofollow_attr,
|
|
|
|
url=trimmed,
|
|
|
|
)
|
|
|
|
return mark_safe(f"{lead}{middle}{trail}")
|
|
|
|
else:
|
2021-10-31 08:00:40 +08:00
|
|
|
if safe_input:
|
2021-10-15 01:27:31 +08:00
|
|
|
return mark_safe(word)
|
2021-10-31 08:00:40 +08:00
|
|
|
elif autoescape:
|
2021-10-15 01:27:31 +08:00
|
|
|
return escape(word)
|
2021-10-31 08:00:40 +08:00
|
|
|
elif safe_input:
|
2021-10-15 01:27:31 +08:00
|
|
|
return mark_safe(word)
|
2021-10-31 08:00:40 +08:00
|
|
|
elif autoescape:
|
2021-10-15 01:27:31 +08:00
|
|
|
return escape(word)
|
|
|
|
return word
|
|
|
|
|
2021-11-03 15:14:50 +08:00
|
|
|
def trim_url(self, x, *, limit):
|
|
|
|
if limit is None or len(x) <= limit:
|
2013-05-17 22:33:36 +08:00
|
|
|
return x
|
2021-11-03 15:14:50 +08:00
|
|
|
return "%s…" % x[: max(0, limit - 1)]
|
2014-08-09 18:44:48 +08:00
|
|
|
|
2021-10-15 01:27:31 +08:00
|
|
|
def trim_punctuation(self, word):
|
2016-02-12 12:37:34 +08:00
|
|
|
"""
|
2021-10-15 01:27:31 +08:00
|
|
|
Trim trailing and wrapping punctuation from `word`. Return the items of
|
|
|
|
the new state.
|
2016-02-12 12:37:34 +08:00
|
|
|
"""
|
2021-10-15 01:27:31 +08:00
|
|
|
lead, middle, trail = "", word, ""
|
2016-02-12 12:37:34 +08:00
|
|
|
# Continue trimming until middle remains unchanged.
|
|
|
|
trimmed_something = True
|
|
|
|
while trimmed_something:
|
|
|
|
trimmed_something = False
|
|
|
|
# Trim wrapping punctuation.
|
2021-10-15 01:27:31 +08:00
|
|
|
for opening, closing in self.wrapping_punctuation:
|
2012-01-09 03:42:14 +08:00
|
|
|
if middle.startswith(opening):
|
|
|
|
middle = middle[len(opening) :]
|
2016-02-12 12:37:34 +08:00
|
|
|
lead += opening
|
|
|
|
trimmed_something = True
|
2012-01-09 03:42:14 +08:00
|
|
|
# Keep parentheses at the end only if they're balanced.
|
2021-10-15 01:27:31 +08:00
|
|
|
if (
|
|
|
|
middle.endswith(closing)
|
|
|
|
and middle.count(closing) == middle.count(opening) + 1
|
|
|
|
):
|
2012-01-09 03:42:14 +08:00
|
|
|
middle = middle[: -len(closing)]
|
|
|
|
trail = closing + trail
|
2016-02-12 12:37:34 +08:00
|
|
|
trimmed_something = True
|
2018-10-13 00:15:26 +08:00
|
|
|
# Trim trailing punctuation (after trimming wrapping punctuation,
|
2019-07-02 15:36:17 +08:00
|
|
|
# as encoded entities contain ';'). Unescape entities to avoid
|
2018-10-13 00:15:26 +08:00
|
|
|
# breaking them by removing ';'.
|
2019-04-24 19:30:34 +08:00
|
|
|
middle_unescaped = html.unescape(middle)
|
2021-10-15 01:27:31 +08:00
|
|
|
stripped = middle_unescaped.rstrip(self.trailing_punctuation_chars)
|
2018-10-13 00:15:26 +08:00
|
|
|
if middle_unescaped != stripped:
|
2021-07-07 17:19:33 +08:00
|
|
|
punctuation_count = len(middle_unescaped) - len(stripped)
|
|
|
|
trail = middle[-punctuation_count:] + trail
|
|
|
|
middle = middle[:-punctuation_count]
|
2018-10-13 00:15:26 +08:00
|
|
|
trimmed_something = True
|
2016-02-12 12:37:34 +08:00
|
|
|
return lead, middle, trail
|
|
|
|
|
2021-10-15 01:27:31 +08:00
|
|
|
@staticmethod
|
2018-02-25 00:30:11 +08:00
|
|
|
def is_email_simple(value):
|
|
|
|
"""Return True if value looks like an email address."""
|
|
|
|
# An @ must be in the middle of the value.
|
|
|
|
if "@" not in value or value.startswith("@") or value.endswith("@"):
|
|
|
|
return False
|
|
|
|
try:
|
|
|
|
p1, p2 = value.split("@")
|
|
|
|
except ValueError:
|
|
|
|
# value contains more than one @.
|
|
|
|
return False
|
|
|
|
# Dot must be in p2 (e.g. example.com)
|
|
|
|
if "." not in p2 or p2.startswith("."):
|
|
|
|
return False
|
|
|
|
return True
|
|
|
|
|
2012-01-09 03:42:14 +08:00
|
|
|
|
2021-10-15 01:27:31 +08:00
|
|
|
urlizer = Urlizer()
|
2012-01-09 03:42:14 +08:00
|
|
|
|
2021-10-15 01:27:31 +08:00
|
|
|
|
|
|
|
@keep_lazy_text
|
|
|
|
def urlize(text, trim_url_limit=None, nofollow=False, autoescape=False):
|
|
|
|
return urlizer(
|
|
|
|
text, trim_url_limit=trim_url_limit, nofollow=nofollow, autoescape=autoescape
|
|
|
|
)
|
2005-07-13 09:25:57 +08:00
|
|
|
|
2013-11-03 07:53:29 +08:00
|
|
|
|
2013-05-18 19:58:45 +08:00
|
|
|
def avoid_wrapping(value):
|
|
|
|
"""
|
|
|
|
Avoid text wrapping in the middle of a phrase by adding non-breaking
|
|
|
|
spaces where there previously were normal spaces.
|
|
|
|
"""
|
|
|
|
return value.replace(" ", "\xa0")
|
2015-03-19 04:42:59 +08:00
|
|
|
|
|
|
|
|
|
|
|
def html_safe(klass):
|
|
|
|
"""
|
|
|
|
A decorator that defines the __html__ method. This helps non-Django
|
2019-02-05 22:38:29 +08:00
|
|
|
templates to detect classes whose __str__ methods return SafeString.
|
2015-03-19 04:42:59 +08:00
|
|
|
"""
|
|
|
|
if "__html__" in klass.__dict__:
|
|
|
|
raise ValueError(
|
|
|
|
"can't apply @html_safe to %s because it defines "
|
|
|
|
"__html__()." % klass.__name__
|
|
|
|
)
|
2016-12-01 18:38:01 +08:00
|
|
|
if "__str__" not in klass.__dict__:
|
|
|
|
raise ValueError(
|
|
|
|
"can't apply @html_safe to %s because it doesn't "
|
|
|
|
"define __str__()." % klass.__name__
|
|
|
|
)
|
|
|
|
klass_str = klass.__str__
|
|
|
|
klass.__str__ = lambda self: mark_safe(klass_str(self))
|
|
|
|
klass.__html__ = lambda self: str(self)
|
2015-03-19 04:42:59 +08:00
|
|
|
return klass
|