"HTML utilities suitable for global use."
import re, string
# Configuration for urlize() function
LEADING_PUNCTUATION = ['(', '<', '<']
TRAILING_PUNCTUATION = ['.', ',', ')', '>', '\n', '>']
# list of possible strings used for bullets in bulleted lists
DOTS = ['·', '*', '\xe2\x80\xa2', '', '•', '•']
unencoded_ampersands_re = re.compile(r'&(?!(\w+|#\d+);)')
word_split_re = re.compile(r'(\s+)')
punctuation_re = re.compile('^(?P (?:%s).*?[a-zA-Z].*? (?: |\s| and %s
|<\/i>|<\/b>|<\/em>|<\/strong>|<\/?smallcaps>|<\/?uppercase>)', re.IGNORECASE)
hard_coded_bullets_re = re.compile(r'((?:
)*?
s"
value = re.sub(r'\r\n|\r|\n', '\n', value) # normalize newlines
paras = re.split('\n{2,}', value)
paras = ['
') for p in paras]
return '\n\n'.join(paras)
def strip_tags(value):
"Returns the given HTML with all tags stripped"
return re.sub(r'<[^>]*?>', '', value)
def strip_spaces_between_tags(value):
"Returns the given HTML with spaces between tags normalized to a single space"
return re.sub(r'>\s+<', '> <', value)
def strip_entities(value):
"Returns the given HTML with all entities (&something;) stripped"
return re.sub(r'&(?:\w+|#\d);', '', value)
def fix_ampersands(value):
"Returns the given HTML with all unencoded ampersands encoded correctly"
return unencoded_ampersands_re.sub('&', value)
def urlize(text, trim_url_limit=None, nofollow=False):
"""
Converts any URLs in text into clickable links. Works on http://, https:// and
www. links. Links can have trailing punctuation (periods, commas, close-parens)
and leading punctuation (opening parens) and it'll still do the right thing.
If trim_url_limit is not None, the URLs in link text will be limited to
trim_url_limit characters.
If nofollow is True, the URLs in link text will get a rel="nofollow" attribute.
"""
trim_url = lambda x, limit=trim_url_limit: limit is not None and (x[:limit] + (len(x) >=limit and '...' or '')) or x
words = word_split_re.split(text)
nofollow_attr = nofollow and ' rel="nofollow"' or ''
for i, word in enumerate(words):
match = punctuation_re.match(word)
if match:
lead, middle, trail = match.groups()
if middle.startswith('www.') or ('@' not in middle and not middle.startswith('http://') and \
len(middle) > 0 and middle[0] in string.letters + string.digits and \
(middle.endswith('.org') or middle.endswith('.net') or middle.endswith('.com'))):
middle = '%s' % (middle, nofollow_attr, trim_url(middle))
if middle.startswith('http://') or middle.startswith('https://'):
middle = '%s' % (middle, nofollow_attr, trim_url(middle))
if '@' in middle and not middle.startswith('www.') and not ':' in middle \
and simple_email_re.match(middle):
middle = '%s' % (middle, middle)
if lead + middle + trail != word:
words[i] = lead + middle + trail
return ''.join(words)
def clean_html(text):
"""
Cleans the given HTML. Specifically, it does the following:
* Converts and to and .
* Encodes all ampersands correctly.
* Removes all "target" attributes from tags.
* Removes extraneous HTML, such as presentational tags that open and
immediately close and
.
* Converts hard-coded bullets into HTML unordered lists.
* Removes stuff like "
.
text = html_gunk_re.sub('', text)
# Convert hard-coded bullets into HTML unordered lists.
def replace_p_tags(match):
s = match.group().replace('
%s' % d, '
", but only if it's at the bottom of the text. text = trailing_empty_content_re.sub('', text) return text