2007-12-11 13:49:11 +08:00
|
|
|
"""HTML utilities suitable for global use."""
|
2005-07-13 09:25:57 +08:00
|
|
|
|
2012-06-08 00:08:47 +08:00
|
|
|
from __future__ import unicode_literals
|
|
|
|
|
Merged Unicode branch into trunk (r4952:5608). This should be fully
backwards compatible for all practical purposes.
Fixed #2391, #2489, #2996, #3322, #3344, #3370, #3406, #3432, #3454, #3492, #3582, #3690, #3878, #3891, #3937, #4039, #4141, #4227, #4286, #4291, #4300, #4452, #4702
git-svn-id: http://code.djangoproject.com/svn/django/trunk@5609 bcc190cf-cafb-0310-a4f2-bffc1f526a37
2007-07-04 20:11:04 +08:00
|
|
|
import re
|
|
|
|
import string
|
2012-07-20 21:36:52 +08:00
|
|
|
try:
|
|
|
|
from urllib.parse import quote, urlsplit, urlunsplit
|
|
|
|
except ImportError: # Python 2
|
|
|
|
from urllib import quote
|
|
|
|
from urlparse import urlsplit, urlunsplit
|
2007-07-16 13:28:13 +08:00
|
|
|
|
2007-11-14 20:58:53 +08:00
|
|
|
from django.utils.safestring import SafeData, mark_safe
|
2012-08-29 02:59:56 +08:00
|
|
|
from django.utils.encoding import force_bytes, force_text
|
Merged Unicode branch into trunk (r4952:5608). This should be fully
backwards compatible for all practical purposes.
Fixed #2391, #2489, #2996, #3322, #3344, #3370, #3406, #3432, #3454, #3492, #3582, #3690, #3878, #3891, #3937, #4039, #4141, #4227, #4286, #4291, #4300, #4452, #4702
git-svn-id: http://code.djangoproject.com/svn/django/trunk@5609 bcc190cf-cafb-0310-a4f2-bffc1f526a37
2007-07-04 20:11:04 +08:00
|
|
|
from django.utils.functional import allow_lazy
|
2012-07-20 20:48:51 +08:00
|
|
|
from django.utils import six
|
2011-07-29 18:22:25 +08:00
|
|
|
from django.utils.text import normalize_newlines
|
2005-07-13 09:25:57 +08:00
|
|
|
|
2007-12-11 13:49:11 +08:00
|
|
|
# Configuration for urlize() function.
|
2012-07-17 22:38:04 +08:00
|
|
|
TRAILING_PUNCTUATION = ['.', ',', ':', ';', '.)']
|
2012-01-09 03:42:14 +08:00
|
|
|
WRAPPING_PUNCTUATION = [('(', ')'), ('<', '>'), ('<', '>')]
|
2005-07-13 09:25:57 +08:00
|
|
|
|
2007-12-11 13:49:11 +08:00
|
|
|
# List of possible strings used for bullets in bulleted lists.
|
2012-06-08 00:08:47 +08:00
|
|
|
DOTS = ['·', '*', '\u2022', '•', '•', '•']
|
2005-07-13 09:25:57 +08:00
|
|
|
|
2005-09-03 02:51:14 +08:00
|
|
|
unencoded_ampersands_re = re.compile(r'&(?!(\w+|#\d+);)')
|
2012-01-08 02:15:28 +08:00
|
|
|
unquoted_percents_re = re.compile(r'%(?![0-9A-Fa-f]{2})')
|
2005-09-03 02:51:14 +08:00
|
|
|
word_split_re = re.compile(r'(\s+)')
|
2012-04-12 01:49:22 +08:00
|
|
|
simple_url_re = re.compile(r'^https?://\w', re.IGNORECASE)
|
|
|
|
simple_url_2_re = re.compile(r'^www\.|^(?!http)\w[^@]+\.(com|edu|gov|int|mil|net|org)$', re.IGNORECASE)
|
2012-01-08 02:39:14 +08:00
|
|
|
simple_email_re = re.compile(r'^\S+@\S+\.\S+$')
|
2005-09-03 02:51:14 +08:00
|
|
|
link_target_attribute_re = re.compile(r'(<a [^>]*?)target=[^\s>]+')
|
|
|
|
html_gunk_re = re.compile(r'(?:<br clear="all">|<i><\/i>|<b><\/b>|<em><\/em>|<strong><\/strong>|<\/?smallcaps>|<\/?uppercase>)', re.IGNORECASE)
|
2006-01-19 09:06:12 +08:00
|
|
|
hard_coded_bullets_re = re.compile(r'((?:<p>(?:%s).*?[a-zA-Z].*?</p>\s*)+)' % '|'.join([re.escape(x) for x in DOTS]), re.DOTALL)
|
2005-09-03 02:51:14 +08:00
|
|
|
trailing_empty_content_re = re.compile(r'(?:<p>(?: |\s|<br \/>)*?</p>\s*)+\Z')
|
2012-08-03 22:10:04 +08:00
|
|
|
|
2005-07-13 09:25:57 +08:00
|
|
|
|
2012-06-30 23:41:51 +08:00
|
|
|
def escape(text):
|
Fixed a whole bunch of small docs typos, errors, and ommissions.
Fixes #8358, #8396, #8724, #9043, #9128, #9247, #9267, #9267, #9375, #9409, #9414, #9416, #9446, #9454, #9464, #9503, #9518, #9533, #9657, #9658, #9683, #9733, #9771, #9835, #9836, #9837, #9897, #9906, #9912, #9945, #9986, #9992, #10055, #10084, #10091, #10145, #10245, #10257, #10309, #10358, #10359, #10424, #10426, #10508, #10531, #10551, #10635, #10637, #10656, #10658, #10690, #10699, #19528.
Thanks to all the respective authors of those tickets.
git-svn-id: http://code.djangoproject.com/svn/django/trunk@10371 bcc190cf-cafb-0310-a4f2-bffc1f526a37
2009-04-04 02:30:54 +08:00
|
|
|
"""
|
2012-06-30 23:41:51 +08:00
|
|
|
Returns the given text with ampersands, quotes and angle brackets encoded for use in HTML.
|
Fixed a whole bunch of small docs typos, errors, and ommissions.
Fixes #8358, #8396, #8724, #9043, #9128, #9247, #9267, #9267, #9375, #9409, #9414, #9416, #9446, #9454, #9464, #9503, #9518, #9533, #9657, #9658, #9683, #9733, #9771, #9835, #9836, #9837, #9897, #9906, #9912, #9945, #9986, #9992, #10055, #10084, #10091, #10145, #10245, #10257, #10309, #10358, #10359, #10424, #10426, #10508, #10531, #10551, #10635, #10637, #10656, #10658, #10690, #10699, #19528.
Thanks to all the respective authors of those tickets.
git-svn-id: http://code.djangoproject.com/svn/django/trunk@10371 bcc190cf-cafb-0310-a4f2-bffc1f526a37
2009-04-04 02:30:54 +08:00
|
|
|
"""
|
2012-07-21 16:00:10 +08:00
|
|
|
return mark_safe(force_text(text).replace('&', '&').replace('<', '<').replace('>', '>').replace('"', '"').replace("'", '''))
|
2012-07-20 20:48:51 +08:00
|
|
|
escape = allow_lazy(escape, six.text_type)
|
2005-07-13 09:25:57 +08:00
|
|
|
|
2012-09-18 18:28:49 +08:00
|
|
|
_js_escapes = {
|
|
|
|
ord('\\'): '\\u005C',
|
|
|
|
ord('\''): '\\u0027',
|
|
|
|
ord('"'): '\\u0022',
|
|
|
|
ord('>'): '\\u003E',
|
|
|
|
ord('<'): '\\u003C',
|
|
|
|
ord('&'): '\\u0026',
|
|
|
|
ord('='): '\\u003D',
|
|
|
|
ord('-'): '\\u002D',
|
|
|
|
ord(';'): '\\u003B',
|
|
|
|
ord('\u2028'): '\\u2028',
|
|
|
|
ord('\u2029'): '\\u2029'
|
|
|
|
}
|
2011-01-03 01:34:52 +08:00
|
|
|
|
|
|
|
# Escape every ASCII character with a value less than 32.
|
2012-09-18 18:28:49 +08:00
|
|
|
_js_escapes.update((ord('%c' % z), '\\u%04X' % z) for z in range(32))
|
2011-01-03 01:34:52 +08:00
|
|
|
|
|
|
|
def escapejs(value):
|
|
|
|
"""Hex encodes characters for use in JavaScript strings."""
|
2012-09-18 18:28:49 +08:00
|
|
|
return mark_safe(force_text(value).translate(_js_escapes))
|
2012-07-20 20:48:51 +08:00
|
|
|
escapejs = allow_lazy(escapejs, six.text_type)
|
2011-01-03 01:34:52 +08:00
|
|
|
|
2012-06-30 23:41:51 +08:00
|
|
|
def conditional_escape(text):
|
2007-11-14 20:58:53 +08:00
|
|
|
"""
|
|
|
|
Similar to escape(), except that it doesn't operate on pre-escaped strings.
|
|
|
|
"""
|
2012-06-30 23:41:51 +08:00
|
|
|
if isinstance(text, SafeData):
|
|
|
|
return text
|
2007-11-14 20:58:53 +08:00
|
|
|
else:
|
2012-06-30 23:41:51 +08:00
|
|
|
return escape(text)
|
2007-11-14 20:58:53 +08:00
|
|
|
|
2012-07-01 01:54:38 +08:00
|
|
|
def format_html(format_string, *args, **kwargs):
|
|
|
|
"""
|
|
|
|
Similar to str.format, but passes all arguments through conditional_escape,
|
|
|
|
and calls 'mark_safe' on the result. This function should be used instead
|
|
|
|
of str.format or % interpolation to build up small HTML fragments.
|
|
|
|
"""
|
|
|
|
args_safe = map(conditional_escape, args)
|
|
|
|
kwargs_safe = dict([(k, conditional_escape(v)) for (k, v) in
|
2012-07-21 03:14:27 +08:00
|
|
|
six.iteritems(kwargs)])
|
2012-07-01 01:54:38 +08:00
|
|
|
return mark_safe(format_string.format(*args_safe, **kwargs_safe))
|
|
|
|
|
|
|
|
def format_html_join(sep, format_string, args_generator):
|
|
|
|
"""
|
|
|
|
A wrapper format_html, for the common case of a group of arguments that need
|
|
|
|
to be formatted using the same format string, and then joined using
|
|
|
|
'sep'. 'sep' is also passed through conditional_escape.
|
|
|
|
|
|
|
|
'args_generator' should be an iterator that returns the sequence of 'args'
|
|
|
|
that will be passed to format_html.
|
|
|
|
|
|
|
|
Example:
|
|
|
|
|
|
|
|
format_html_join('\n', "<li>{0} {1}</li>", ((u.first_name, u.last_name)
|
|
|
|
for u in users))
|
|
|
|
|
|
|
|
"""
|
|
|
|
return mark_safe(conditional_escape(sep).join(
|
|
|
|
format_html(format_string, *tuple(args))
|
|
|
|
for args in args_generator))
|
|
|
|
|
|
|
|
|
2007-11-14 20:58:53 +08:00
|
|
|
def linebreaks(value, autoescape=False):
|
2007-12-11 13:49:11 +08:00
|
|
|
"""Converts newlines into <p> and <br />s."""
|
2011-07-29 18:22:25 +08:00
|
|
|
value = normalize_newlines(value)
|
2005-07-13 09:25:57 +08:00
|
|
|
paras = re.split('\n{2,}', value)
|
2007-11-14 20:58:53 +08:00
|
|
|
if autoescape:
|
2012-06-08 00:08:47 +08:00
|
|
|
paras = ['<p>%s</p>' % escape(p).replace('\n', '<br />') for p in paras]
|
2007-11-14 20:58:53 +08:00
|
|
|
else:
|
2012-06-08 00:08:47 +08:00
|
|
|
paras = ['<p>%s</p>' % p.replace('\n', '<br />') for p in paras]
|
|
|
|
return '\n\n'.join(paras)
|
2012-07-20 20:48:51 +08:00
|
|
|
linebreaks = allow_lazy(linebreaks, six.text_type)
|
2005-07-13 09:25:57 +08:00
|
|
|
|
|
|
|
def strip_tags(value):
|
2007-12-11 13:49:11 +08:00
|
|
|
"""Returns the given HTML with all tags stripped."""
|
2012-07-21 16:00:10 +08:00
|
|
|
return re.sub(r'<[^>]*?>', '', force_text(value))
|
Merged Unicode branch into trunk (r4952:5608). This should be fully
backwards compatible for all practical purposes.
Fixed #2391, #2489, #2996, #3322, #3344, #3370, #3406, #3432, #3454, #3492, #3582, #3690, #3878, #3891, #3937, #4039, #4141, #4227, #4286, #4291, #4300, #4452, #4702
git-svn-id: http://code.djangoproject.com/svn/django/trunk@5609 bcc190cf-cafb-0310-a4f2-bffc1f526a37
2007-07-04 20:11:04 +08:00
|
|
|
strip_tags = allow_lazy(strip_tags)
|
2005-07-13 09:25:57 +08:00
|
|
|
|
2012-08-18 20:53:22 +08:00
|
|
|
def remove_tags(html, tags):
|
|
|
|
"""Returns the given HTML with given tags removed."""
|
|
|
|
tags = [re.escape(tag) for tag in tags.split()]
|
2012-08-18 23:47:21 +08:00
|
|
|
tags_re = '(%s)' % '|'.join(tags)
|
|
|
|
starttag_re = re.compile(r'<%s(/?>|(\s+[^>]*>))' % tags_re, re.U)
|
|
|
|
endtag_re = re.compile('</%s>' % tags_re)
|
|
|
|
html = starttag_re.sub('', html)
|
|
|
|
html = endtag_re.sub('', html)
|
2012-08-18 20:53:22 +08:00
|
|
|
return html
|
2012-08-18 23:47:21 +08:00
|
|
|
remove_tags = allow_lazy(remove_tags, six.text_type)
|
2012-08-18 20:53:22 +08:00
|
|
|
|
2006-01-15 09:51:30 +08:00
|
|
|
def strip_spaces_between_tags(value):
|
2007-12-11 13:49:11 +08:00
|
|
|
"""Returns the given HTML with spaces between tags removed."""
|
2012-07-21 16:00:10 +08:00
|
|
|
return re.sub(r'>\s+<', '><', force_text(value))
|
2012-07-20 20:48:51 +08:00
|
|
|
strip_spaces_between_tags = allow_lazy(strip_spaces_between_tags, six.text_type)
|
2006-01-15 09:51:30 +08:00
|
|
|
|
2005-07-13 09:25:57 +08:00
|
|
|
def strip_entities(value):
|
2007-12-11 13:49:11 +08:00
|
|
|
"""Returns the given HTML with all entities (&something;) stripped."""
|
2012-07-21 16:00:10 +08:00
|
|
|
return re.sub(r'&(?:\w+|#\d+);', '', force_text(value))
|
2012-07-20 20:48:51 +08:00
|
|
|
strip_entities = allow_lazy(strip_entities, six.text_type)
|
2005-07-13 09:25:57 +08:00
|
|
|
|
|
|
|
def fix_ampersands(value):
|
2007-12-11 13:49:11 +08:00
|
|
|
"""Returns the given HTML with all unencoded ampersands encoded correctly."""
|
2012-07-21 16:00:10 +08:00
|
|
|
return unencoded_ampersands_re.sub('&', force_text(value))
|
2012-07-20 20:48:51 +08:00
|
|
|
fix_ampersands = allow_lazy(fix_ampersands, six.text_type)
|
2005-07-13 09:25:57 +08:00
|
|
|
|
2012-01-08 02:15:28 +08:00
|
|
|
def smart_urlquote(url):
|
2012-02-04 01:57:15 +08:00
|
|
|
"Quotes a URL if it isn't already quoted."
|
2012-01-08 02:39:14 +08:00
|
|
|
# Handle IDN before quoting.
|
2012-07-20 21:36:52 +08:00
|
|
|
scheme, netloc, path, query, fragment = urlsplit(url)
|
2012-01-08 02:39:14 +08:00
|
|
|
try:
|
2012-08-12 04:44:42 +08:00
|
|
|
netloc = netloc.encode('idna').decode('ascii') # IDN -> ACE
|
2012-01-08 02:39:14 +08:00
|
|
|
except UnicodeError: # invalid domain part
|
|
|
|
pass
|
|
|
|
else:
|
2012-07-20 21:36:52 +08:00
|
|
|
url = urlunsplit((scheme, netloc, path, query, fragment))
|
2012-01-08 02:39:14 +08:00
|
|
|
|
2012-02-04 01:57:15 +08:00
|
|
|
# An URL is considered unquoted if it contains no % characters or
|
2012-01-08 02:15:28 +08:00
|
|
|
# contains a % not followed by two hexadecimal digits. See #9655.
|
|
|
|
if '%' not in url or unquoted_percents_re.search(url):
|
|
|
|
# See http://bugs.python.org/issue2637
|
2012-08-29 02:59:56 +08:00
|
|
|
url = quote(force_bytes(url), safe=b'!*\'();:@&=+$,/?#[]~')
|
2012-01-08 02:39:14 +08:00
|
|
|
|
2012-07-21 16:00:10 +08:00
|
|
|
return force_text(url)
|
2012-01-08 02:15:28 +08:00
|
|
|
|
2007-11-14 20:58:53 +08:00
|
|
|
def urlize(text, trim_url_limit=None, nofollow=False, autoescape=False):
|
2005-07-13 09:25:57 +08:00
|
|
|
"""
|
2007-12-11 13:49:11 +08:00
|
|
|
Converts any URLs in text into clickable links.
|
2007-07-16 13:28:13 +08:00
|
|
|
|
2012-01-09 00:08:43 +08:00
|
|
|
Works on http://, https://, www. links, and also on links ending in one of
|
2012-01-09 05:36:22 +08:00
|
|
|
the original seven gTLDs (.com, .edu, .gov, .int, .mil, .net, and .org).
|
|
|
|
Links can have trailing punctuation (periods, commas, close-parens) and
|
|
|
|
leading punctuation (opening parens) and it'll still do the right thing.
|
2005-07-13 09:25:57 +08:00
|
|
|
|
2007-06-23 11:10:32 +08:00
|
|
|
If trim_url_limit is not None, the URLs in link text longer than this limit
|
|
|
|
will truncated to trim_url_limit-3 characters and appended with an elipsis.
|
2005-07-13 09:25:57 +08:00
|
|
|
|
2007-06-23 11:10:32 +08:00
|
|
|
If nofollow is True, the URLs in link text will get a rel="nofollow"
|
|
|
|
attribute.
|
2008-06-26 13:07:13 +08:00
|
|
|
|
|
|
|
If autoescape is True, the link text and URLs will get autoescaped.
|
2005-07-13 09:25:57 +08:00
|
|
|
"""
|
2008-06-26 13:07:13 +08:00
|
|
|
trim_url = lambda x, limit=trim_url_limit: limit is not None and (len(x) > limit and ('%s...' % x[:max(0, limit - 3)])) or x
|
2007-11-14 20:58:53 +08:00
|
|
|
safe_input = isinstance(text, SafeData)
|
2012-07-21 16:00:10 +08:00
|
|
|
words = word_split_re.split(force_text(text))
|
2005-07-13 09:25:57 +08:00
|
|
|
for i, word in enumerate(words):
|
2008-07-20 02:05:22 +08:00
|
|
|
match = None
|
|
|
|
if '.' in word or '@' in word or ':' in word:
|
2012-01-09 03:42:14 +08:00
|
|
|
# Deal with punctuation.
|
|
|
|
lead, middle, trail = '', word, ''
|
|
|
|
for punctuation in TRAILING_PUNCTUATION:
|
|
|
|
if middle.endswith(punctuation):
|
|
|
|
middle = middle[:-len(punctuation)]
|
|
|
|
trail = punctuation + trail
|
|
|
|
for opening, closing in WRAPPING_PUNCTUATION:
|
|
|
|
if middle.startswith(opening):
|
|
|
|
middle = middle[len(opening):]
|
|
|
|
lead = lead + opening
|
|
|
|
# Keep parentheses at the end only if they're balanced.
|
|
|
|
if (middle.endswith(closing)
|
|
|
|
and middle.count(closing) == middle.count(opening) + 1):
|
|
|
|
middle = middle[:-len(closing)]
|
|
|
|
trail = closing + trail
|
|
|
|
|
2008-06-26 13:07:13 +08:00
|
|
|
# Make URL we want to point to.
|
|
|
|
url = None
|
2012-01-08 17:51:36 +08:00
|
|
|
nofollow_attr = ' rel="nofollow"' if nofollow else ''
|
2012-01-08 23:43:32 +08:00
|
|
|
if simple_url_re.match(middle):
|
2012-01-08 02:15:28 +08:00
|
|
|
url = smart_urlquote(middle)
|
2012-01-08 23:43:32 +08:00
|
|
|
elif simple_url_2_re.match(middle):
|
2012-01-08 02:15:28 +08:00
|
|
|
url = smart_urlquote('http://%s' % middle)
|
2012-01-08 02:39:14 +08:00
|
|
|
elif not ':' in middle and simple_email_re.match(middle):
|
|
|
|
local, domain = middle.rsplit('@', 1)
|
2012-02-05 00:05:48 +08:00
|
|
|
try:
|
2012-08-12 04:44:42 +08:00
|
|
|
domain = domain.encode('idna').decode('ascii')
|
2012-02-05 00:05:48 +08:00
|
|
|
except UnicodeError:
|
|
|
|
continue
|
2012-01-08 02:39:14 +08:00
|
|
|
url = 'mailto:%s@%s' % (local, domain)
|
2008-06-26 13:07:13 +08:00
|
|
|
nofollow_attr = ''
|
2012-01-09 03:42:14 +08:00
|
|
|
|
2008-06-26 13:07:13 +08:00
|
|
|
# Make link.
|
|
|
|
if url:
|
|
|
|
trimmed = trim_url(middle)
|
2008-02-03 16:54:26 +08:00
|
|
|
if autoescape and not safe_input:
|
|
|
|
lead, trail = escape(lead), escape(trail)
|
2008-06-26 13:07:13 +08:00
|
|
|
url, trimmed = escape(url), escape(trimmed)
|
|
|
|
middle = '<a href="%s"%s>%s</a>' % (url, nofollow_attr, trimmed)
|
2008-02-03 16:54:26 +08:00
|
|
|
words[i] = mark_safe('%s%s%s' % (lead, middle, trail))
|
2008-06-26 13:07:13 +08:00
|
|
|
else:
|
|
|
|
if safe_input:
|
|
|
|
words[i] = mark_safe(word)
|
|
|
|
elif autoescape:
|
|
|
|
words[i] = escape(word)
|
2007-11-17 20:12:40 +08:00
|
|
|
elif safe_input:
|
|
|
|
words[i] = mark_safe(word)
|
|
|
|
elif autoescape:
|
|
|
|
words[i] = escape(word)
|
2012-06-08 00:08:47 +08:00
|
|
|
return ''.join(words)
|
2012-07-20 20:48:51 +08:00
|
|
|
urlize = allow_lazy(urlize, six.text_type)
|
2005-07-13 09:25:57 +08:00
|
|
|
|
|
|
|
def clean_html(text):
|
|
|
|
"""
|
2007-07-16 13:28:13 +08:00
|
|
|
Clean the given HTML. Specifically, do the following:
|
|
|
|
* Convert <b> and <i> to <strong> and <em>.
|
|
|
|
* Encode all ampersands correctly.
|
|
|
|
* Remove all "target" attributes from <a> tags.
|
|
|
|
* Remove extraneous HTML, such as presentational tags that open and
|
2005-07-13 09:25:57 +08:00
|
|
|
immediately close and <br clear="all">.
|
2007-07-16 13:28:13 +08:00
|
|
|
* Convert hard-coded bullets into HTML unordered lists.
|
|
|
|
* Remove stuff like "<p> </p>", but only if it's at the
|
2005-07-13 09:25:57 +08:00
|
|
|
bottom of the text.
|
|
|
|
"""
|
|
|
|
from django.utils.text import normalize_newlines
|
2012-07-21 16:00:10 +08:00
|
|
|
text = normalize_newlines(force_text(text))
|
2005-07-13 09:25:57 +08:00
|
|
|
text = re.sub(r'<(/?)\s*b\s*>', '<\\1strong>', text)
|
|
|
|
text = re.sub(r'<(/?)\s*i\s*>', '<\\1em>', text)
|
|
|
|
text = fix_ampersands(text)
|
|
|
|
# Remove all target="" attributes from <a> tags.
|
2005-09-03 02:51:14 +08:00
|
|
|
text = link_target_attribute_re.sub('\\1', text)
|
2005-07-13 09:25:57 +08:00
|
|
|
# Trim stupid HTML such as <br clear="all">.
|
2005-09-03 02:51:14 +08:00
|
|
|
text = html_gunk_re.sub('', text)
|
2005-07-13 09:25:57 +08:00
|
|
|
# Convert hard-coded bullets into HTML unordered lists.
|
|
|
|
def replace_p_tags(match):
|
2012-06-08 00:08:47 +08:00
|
|
|
s = match.group().replace('</p>', '</li>')
|
2005-07-13 09:25:57 +08:00
|
|
|
for d in DOTS:
|
2012-06-08 00:08:47 +08:00
|
|
|
s = s.replace('<p>%s' % d, '<li>')
|
|
|
|
return '<ul>\n%s\n</ul>' % s
|
2005-09-03 02:51:14 +08:00
|
|
|
text = hard_coded_bullets_re.sub(replace_p_tags, text)
|
2007-07-16 13:28:13 +08:00
|
|
|
# Remove stuff like "<p> </p>", but only if it's at the bottom
|
|
|
|
# of the text.
|
2012-06-08 00:08:47 +08:00
|
|
|
text = trailing_empty_content_re.sub('', text)
|
2005-07-13 09:25:57 +08:00
|
|
|
return text
|
2012-07-20 20:48:51 +08:00
|
|
|
clean_html = allow_lazy(clean_html, six.text_type)
|