"""HTML utilities suitable for global use."""
from __future__ import unicode_literals
import re
try:
from urllib.parse import quote, unquote, urlsplit, urlunsplit
except ImportError: # Python 2
from urllib import quote, unquote
from urlparse import urlsplit, urlunsplit
from django.utils.safestring import SafeData, mark_safe
from django.utils.encoding import force_text, force_str
from django.utils.functional import allow_lazy
from django.utils import six
from django.utils.text import normalize_newlines
from .html_parser import HTMLParser, HTMLParseError
# Configuration for urlize() function.
TRAILING_PUNCTUATION = ['.', ',', ':', ';', '.)']
WRAPPING_PUNCTUATION = [('(', ')'), ('<', '>'), ('[', ']'), ('<', '>')]
# List of possible strings used for bullets in bulleted lists.
DOTS = ['·', '*', '\u2022', '', '•', '•']
unencoded_ampersands_re = re.compile(r'&(?!(\w+|#\d+);)')
word_split_re = re.compile(r'(\s+)')
simple_url_re = re.compile(r'^https?://\[?\w', re.IGNORECASE)
simple_url_2_re = re.compile(r'^www\.|^(?!http)\w[^@]+\.(com|edu|gov|int|mil|net|org)$', re.IGNORECASE)
simple_email_re = re.compile(r'^\S+@\S+\.\S+$')
link_target_attribute_re = re.compile(r'(]*?)target=[^\s>]+')
html_gunk_re = re.compile(r'(?: (?:%s).*?[a-zA-Z].*? (?: |\s| and %s %s
|<\/i>|<\/b>|<\/em>|<\/strong>|<\/?smallcaps>|<\/?uppercase>)', re.IGNORECASE)
hard_coded_bullets_re = re.compile(r'((?:
)*?
s."""
value = normalize_newlines(value)
paras = re.split('\n{2,}', value)
if autoescape:
paras = ['
') for p in paras]
else:
paras = ['
') for p in paras]
return '\n\n'.join(paras)
linebreaks = allow_lazy(linebreaks, six.text_type)
class MLStripper(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def handle_entityref(self, name):
self.fed.append('&%s;' % name)
def handle_charref(self, name):
self.fed.append('%s;' % name)
def get_data(self):
return ''.join(self.fed)
def strip_tags(value):
"""Returns the given HTML with all tags stripped."""
s = MLStripper()
try:
s.feed(value)
s.close()
except HTMLParseError:
return value
else:
return s.get_data()
strip_tags = allow_lazy(strip_tags)
def remove_tags(html, tags):
"""Returns the given HTML with given tags removed."""
tags = [re.escape(tag) for tag in tags.split()]
tags_re = '(%s)' % '|'.join(tags)
starttag_re = re.compile(r'<%s(/?>|(\s+[^>]*>))' % tags_re, re.U)
endtag_re = re.compile('%s>' % tags_re)
html = starttag_re.sub('', html)
html = endtag_re.sub('', html)
return html
remove_tags = allow_lazy(remove_tags, six.text_type)
def strip_spaces_between_tags(value):
"""Returns the given HTML with spaces between tags removed."""
return re.sub(r'>\s+<', '><', force_text(value))
strip_spaces_between_tags = allow_lazy(strip_spaces_between_tags, six.text_type)
def strip_entities(value):
"""Returns the given HTML with all entities (&something;) stripped."""
return re.sub(r'&(?:\w+|#\d+);', '', force_text(value))
strip_entities = allow_lazy(strip_entities, six.text_type)
def fix_ampersands(value):
"""Returns the given HTML with all unencoded ampersands encoded correctly."""
return unencoded_ampersands_re.sub('&', force_text(value))
fix_ampersands = allow_lazy(fix_ampersands, six.text_type)
def smart_urlquote(url):
"Quotes a URL if it isn't already quoted."
# Handle IDN before quoting.
try:
scheme, netloc, path, query, fragment = urlsplit(url)
try:
netloc = netloc.encode('idna').decode('ascii') # IDN -> ACE
except UnicodeError: # invalid domain part
pass
else:
url = urlunsplit((scheme, netloc, path, query, fragment))
except ValueError:
# invalid IPv6 URL (normally square brackets in hostname part).
pass
url = unquote(force_str(url))
# See http://bugs.python.org/issue2637
url = quote(url, safe=b'!*\'();:@&=+$,/?#[]~')
return force_text(url)
def urlize(text, trim_url_limit=None, nofollow=False, autoescape=False):
"""
Converts any URLs in text into clickable links.
Works on http://, https://, www. links, and also on links ending in one of
the original seven gTLDs (.com, .edu, .gov, .int, .mil, .net, and .org).
Links can have trailing punctuation (periods, commas, close-parens) and
leading punctuation (opening parens) and it'll still do the right thing.
If trim_url_limit is not None, the URLs in link text longer than this limit
will truncated to trim_url_limit-3 characters and appended with an elipsis.
If nofollow is True, the URLs in link text will get a rel="nofollow"
attribute.
If autoescape is True, the link text and URLs will get autoescaped.
"""
def trim_url(x, limit=trim_url_limit):
if limit is None or len(x) <= limit:
return x
return '%s...' % x[:max(0, limit - 3)]
safe_input = isinstance(text, SafeData)
words = word_split_re.split(force_text(text))
for i, word in enumerate(words):
match = None
if '.' in word or '@' in word or ':' in word:
# Deal with punctuation.
lead, middle, trail = '', word, ''
for punctuation in TRAILING_PUNCTUATION:
if middle.endswith(punctuation):
middle = middle[:-len(punctuation)]
trail = punctuation + trail
for opening, closing in WRAPPING_PUNCTUATION:
if middle.startswith(opening):
middle = middle[len(opening):]
lead = lead + opening
# Keep parentheses at the end only if they're balanced.
if (middle.endswith(closing)
and middle.count(closing) == middle.count(opening) + 1):
middle = middle[:-len(closing)]
trail = closing + trail
# Make URL we want to point to.
url = None
nofollow_attr = ' rel="nofollow"' if nofollow else ''
if simple_url_re.match(middle):
url = smart_urlquote(middle)
elif simple_url_2_re.match(middle):
url = smart_urlquote('http://%s' % middle)
elif not ':' in middle and simple_email_re.match(middle):
local, domain = middle.rsplit('@', 1)
try:
domain = domain.encode('idna').decode('ascii')
except UnicodeError:
continue
url = 'mailto:%s@%s' % (local, domain)
nofollow_attr = ''
# Make link.
if url:
trimmed = trim_url(middle)
if autoescape and not safe_input:
lead, trail = escape(lead), escape(trail)
url, trimmed = escape(url), escape(trimmed)
middle = '%s' % (url, nofollow_attr, trimmed)
words[i] = mark_safe('%s%s%s' % (lead, middle, trail))
else:
if safe_input:
words[i] = mark_safe(word)
elif autoescape:
words[i] = escape(word)
elif safe_input:
words[i] = mark_safe(word)
elif autoescape:
words[i] = escape(word)
return ''.join(words)
urlize = allow_lazy(urlize, six.text_type)
def clean_html(text):
"""
Clean the given HTML. Specifically, do the following:
* Convert and to and .
* Encode all ampersands correctly.
* Remove all "target" attributes from tags.
* Remove extraneous HTML, such as presentational tags that open and
immediately close and
.
* Convert hard-coded bullets into HTML unordered lists.
* Remove stuff like "
.
text = html_gunk_re.sub('', text)
# Convert hard-coded bullets into HTML unordered lists.
def replace_p_tags(match):
s = match.group().replace('
%s' % d, '
", but only if it's at the bottom # of the text. text = trailing_empty_content_re.sub('', text) return text clean_html = allow_lazy(clean_html, six.text_type) def avoid_wrapping(value): """ Avoid text wrapping in the middle of a phrase by adding non-breaking spaces where there previously were normal spaces. """ return value.replace(" ", "\xa0")