2017-01-07 19:11:46 +08:00
|
|
|
import html.entities
|
2005-07-13 09:25:57 +08:00
|
|
|
import re
|
2011-07-14 21:47:10 +08:00
|
|
|
import unicodedata
|
2019-04-24 21:10:28 +08:00
|
|
|
import warnings
|
2011-07-13 17:35:51 +08:00
|
|
|
from gzip import GzipFile
|
2012-05-06 01:47:03 +08:00
|
|
|
from io import BytesIO
|
2011-07-13 17:35:51 +08:00
|
|
|
|
2019-04-24 21:10:28 +08:00
|
|
|
from django.utils.deprecation import RemovedInDjango40Warning
|
2018-07-14 16:38:18 +08:00
|
|
|
from django.utils.functional import SimpleLazyObject, keep_lazy_text, lazy
|
2019-10-26 22:42:32 +08:00
|
|
|
from django.utils.regex_helper import _lazy_re_compile
|
2017-01-27 03:58:33 +08:00
|
|
|
from django.utils.translation import gettext as _, gettext_lazy, pgettext
|
2005-12-04 20:06:16 +08:00
|
|
|
|
2013-12-26 20:46:15 +08:00
|
|
|
|
2016-11-15 01:20:44 +08:00
|
|
|
@keep_lazy_text
|
2016-01-24 00:47:07 +08:00
|
|
|
def capfirst(x):
|
2016-11-15 01:20:44 +08:00
|
|
|
"""Capitalize the first letter of a string."""
|
2017-04-22 01:52:26 +08:00
|
|
|
return x and str(x)[0].upper() + str(x)[1:]
|
2016-11-15 01:20:44 +08:00
|
|
|
|
2005-08-02 05:29:52 +08:00
|
|
|
|
2011-07-14 21:47:10 +08:00
|
|
|
# Set up regular expressions
|
2019-10-26 22:42:32 +08:00
|
|
|
re_words = _lazy_re_compile(r'<[^>]+?>|([^<>\s]+)', re.S)
|
|
|
|
re_chars = _lazy_re_compile(r'<[^>]+?>|(.)', re.S)
|
|
|
|
re_tag = _lazy_re_compile(r'<(/)?(\S+?)(?:(\s*/)|\s.*?)?>', re.S)
|
|
|
|
re_newlines = _lazy_re_compile(r'\r\n|\r') # Used in normalize_newlines
|
|
|
|
re_camel_case = _lazy_re_compile(r'(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))')
|
2011-07-14 21:47:10 +08:00
|
|
|
|
|
|
|
|
2015-11-07 21:30:20 +08:00
|
|
|
@keep_lazy_text
|
2005-07-13 09:25:57 +08:00
|
|
|
def wrap(text, width):
|
|
|
|
"""
|
2014-05-17 00:39:03 +08:00
|
|
|
A word-wrap function that preserves existing line breaks. Expects that
|
|
|
|
existing line breaks are posix newlines.
|
|
|
|
|
2017-01-25 04:32:33 +08:00
|
|
|
Preserve all white space except added line breaks consume the space on
|
2014-05-17 00:39:03 +08:00
|
|
|
which they break the line.
|
|
|
|
|
2017-01-25 04:32:33 +08:00
|
|
|
Don't wrap long words, thus the output text may have lines longer than
|
2014-05-17 00:39:03 +08:00
|
|
|
``width``.
|
2005-07-13 09:25:57 +08:00
|
|
|
"""
|
2006-12-16 03:05:57 +08:00
|
|
|
def _generator():
|
2014-05-17 00:39:03 +08:00
|
|
|
for line in text.splitlines(True): # True keeps trailing linebreaks
|
|
|
|
max_width = min((line.endswith('\n') and width + 1 or width), width)
|
|
|
|
while len(line) > max_width:
|
|
|
|
space = line[:max_width + 1].rfind(' ') + 1
|
|
|
|
if space == 0:
|
|
|
|
space = line.find(' ') + 1
|
|
|
|
if space == 0:
|
|
|
|
yield line
|
|
|
|
line = ''
|
|
|
|
break
|
|
|
|
yield '%s\n' % line[:space - 1]
|
|
|
|
line = line[space:]
|
|
|
|
max_width = min((line.endswith('\n') and width + 1 or width), width)
|
|
|
|
if line:
|
|
|
|
yield line
|
2012-06-08 00:08:47 +08:00
|
|
|
return ''.join(_generator())
|
2005-07-13 09:25:57 +08:00
|
|
|
|
2010-08-07 22:57:24 +08:00
|
|
|
|
2011-07-14 21:47:10 +08:00
|
|
|
class Truncator(SimpleLazyObject):
|
2010-08-07 22:57:24 +08:00
|
|
|
"""
|
2011-07-14 21:47:10 +08:00
|
|
|
An object used to truncate text, either by characters or words.
|
|
|
|
"""
|
|
|
|
def __init__(self, text):
|
2017-04-22 01:52:26 +08:00
|
|
|
super().__init__(lambda: str(text))
|
2005-07-13 09:25:57 +08:00
|
|
|
|
2011-07-14 21:47:10 +08:00
|
|
|
def add_truncation_text(self, text, truncate=None):
|
|
|
|
if truncate is None:
|
|
|
|
truncate = pgettext(
|
|
|
|
'String to return when truncating text',
|
2018-08-21 21:28:51 +08:00
|
|
|
'%(truncated_text)s…')
|
2011-07-14 21:47:10 +08:00
|
|
|
if '%(truncated_text)s' in truncate:
|
|
|
|
return truncate % {'truncated_text': text}
|
|
|
|
# The truncation text didn't contain the %(truncated_text)s string
|
|
|
|
# replacement argument so just append it to the text.
|
|
|
|
if text.endswith(truncate):
|
|
|
|
# But don't append the truncation text if the current text already
|
|
|
|
# ends in this.
|
|
|
|
return text
|
|
|
|
return '%s%s' % (text, truncate)
|
2010-08-07 22:57:24 +08:00
|
|
|
|
2013-01-10 17:27:20 +08:00
|
|
|
def chars(self, num, truncate=None, html=False):
|
2011-07-14 21:47:10 +08:00
|
|
|
"""
|
2017-01-25 04:32:33 +08:00
|
|
|
Return the text truncated to be no longer than the specified number
|
2011-07-14 21:47:10 +08:00
|
|
|
of characters.
|
|
|
|
|
2017-01-25 04:32:33 +08:00
|
|
|
`truncate` specifies what should be used to notify that the string has
|
2018-08-21 21:28:51 +08:00
|
|
|
been truncated, defaulting to a translatable string of an ellipsis.
|
2011-07-14 21:47:10 +08:00
|
|
|
"""
|
2015-11-07 21:30:20 +08:00
|
|
|
self._setup()
|
2011-07-14 21:47:10 +08:00
|
|
|
length = int(num)
|
|
|
|
text = unicodedata.normalize('NFC', self._wrapped)
|
|
|
|
|
|
|
|
# Calculate the length to truncate to (max length - end_text length)
|
|
|
|
truncate_len = length
|
|
|
|
for char in self.add_truncation_text('', truncate):
|
|
|
|
if not unicodedata.combining(char):
|
|
|
|
truncate_len -= 1
|
|
|
|
if truncate_len == 0:
|
|
|
|
break
|
2013-01-10 17:27:20 +08:00
|
|
|
if html:
|
|
|
|
return self._truncate_html(length, truncate, text, truncate_len, False)
|
|
|
|
return self._text_chars(length, truncate, text, truncate_len)
|
2011-07-14 21:47:10 +08:00
|
|
|
|
2013-01-10 17:27:20 +08:00
|
|
|
def _text_chars(self, length, truncate, text, truncate_len):
|
2017-01-25 04:32:33 +08:00
|
|
|
"""Truncate a string after a certain number of chars."""
|
2011-07-14 21:47:10 +08:00
|
|
|
s_len = 0
|
|
|
|
end_index = None
|
|
|
|
for i, char in enumerate(text):
|
|
|
|
if unicodedata.combining(char):
|
|
|
|
# Don't consider combining characters
|
|
|
|
# as adding to the string length
|
|
|
|
continue
|
|
|
|
s_len += 1
|
|
|
|
if end_index is None and s_len > truncate_len:
|
|
|
|
end_index = i
|
|
|
|
if s_len > length:
|
|
|
|
# Return the truncated string
|
|
|
|
return self.add_truncation_text(text[:end_index or 0],
|
|
|
|
truncate)
|
|
|
|
|
|
|
|
# Return the original string since no truncation was necessary
|
|
|
|
return text
|
|
|
|
|
|
|
|
def words(self, num, truncate=None, html=False):
|
|
|
|
"""
|
2017-01-25 04:32:33 +08:00
|
|
|
Truncate a string after a certain number of words. `truncate` specifies
|
|
|
|
what should be used to notify that the string has been truncated,
|
2018-08-21 21:28:51 +08:00
|
|
|
defaulting to ellipsis.
|
2011-07-14 21:47:10 +08:00
|
|
|
"""
|
2015-11-07 21:30:20 +08:00
|
|
|
self._setup()
|
2011-07-14 21:47:10 +08:00
|
|
|
length = int(num)
|
|
|
|
if html:
|
2013-01-10 17:27:20 +08:00
|
|
|
return self._truncate_html(length, truncate, self._wrapped, length, True)
|
2011-07-14 21:47:10 +08:00
|
|
|
return self._text_words(length, truncate)
|
|
|
|
|
|
|
|
def _text_words(self, length, truncate):
|
|
|
|
"""
|
2017-01-25 04:32:33 +08:00
|
|
|
Truncate a string after a certain number of words.
|
2011-07-14 21:47:10 +08:00
|
|
|
|
2017-01-25 04:32:33 +08:00
|
|
|
Strip newlines in the string.
|
2011-07-14 21:47:10 +08:00
|
|
|
"""
|
|
|
|
words = self._wrapped.split()
|
|
|
|
if len(words) > length:
|
|
|
|
words = words[:length]
|
2012-06-08 00:08:47 +08:00
|
|
|
return self.add_truncation_text(' '.join(words), truncate)
|
|
|
|
return ' '.join(words)
|
2011-07-14 21:47:10 +08:00
|
|
|
|
2013-01-10 17:27:20 +08:00
|
|
|
def _truncate_html(self, length, truncate, text, truncate_len, words):
|
2011-07-14 21:47:10 +08:00
|
|
|
"""
|
2017-01-25 04:32:33 +08:00
|
|
|
Truncate HTML to a certain number of chars (not counting tags and
|
2013-01-10 17:27:20 +08:00
|
|
|
comments), or, if words is True, then to a certain number of words.
|
2017-01-25 04:32:33 +08:00
|
|
|
Close opened tags if they were correctly closed in the given HTML.
|
2011-07-14 21:47:10 +08:00
|
|
|
|
2017-01-25 04:32:33 +08:00
|
|
|
Preserve newlines in the HTML.
|
2011-07-14 21:47:10 +08:00
|
|
|
"""
|
2013-01-10 17:27:20 +08:00
|
|
|
if words and length <= 0:
|
2012-06-08 00:08:47 +08:00
|
|
|
return ''
|
2013-01-10 17:27:20 +08:00
|
|
|
|
2011-07-14 21:47:10 +08:00
|
|
|
html4_singlets = (
|
|
|
|
'br', 'col', 'link', 'base', 'img',
|
|
|
|
'param', 'area', 'hr', 'input'
|
|
|
|
)
|
2013-01-10 17:27:20 +08:00
|
|
|
|
|
|
|
# Count non-HTML chars/words and keep note of open tags
|
2011-07-14 21:47:10 +08:00
|
|
|
pos = 0
|
|
|
|
end_text_pos = 0
|
2013-01-10 17:27:20 +08:00
|
|
|
current_len = 0
|
2011-07-14 21:47:10 +08:00
|
|
|
open_tags = []
|
2013-01-10 17:27:20 +08:00
|
|
|
|
|
|
|
regex = re_words if words else re_chars
|
|
|
|
|
|
|
|
while current_len <= length:
|
|
|
|
m = regex.search(text, pos)
|
2011-07-14 21:47:10 +08:00
|
|
|
if not m:
|
|
|
|
# Checked through whole string
|
|
|
|
break
|
|
|
|
pos = m.end(0)
|
2020-05-11 04:03:39 +08:00
|
|
|
if m[1]:
|
2013-01-10 17:27:20 +08:00
|
|
|
# It's an actual non-HTML word or char
|
|
|
|
current_len += 1
|
|
|
|
if current_len == truncate_len:
|
2011-07-14 21:47:10 +08:00
|
|
|
end_text_pos = pos
|
|
|
|
continue
|
|
|
|
# Check for tag
|
2020-05-11 04:03:39 +08:00
|
|
|
tag = re_tag.match(m[0])
|
2013-01-10 17:27:20 +08:00
|
|
|
if not tag or current_len >= truncate_len:
|
2011-07-14 21:47:10 +08:00
|
|
|
# Don't worry about non tags or tags after our truncate point
|
|
|
|
continue
|
|
|
|
closing_tag, tagname, self_closing = tag.groups()
|
|
|
|
# Element names are always case-insensitive
|
|
|
|
tagname = tagname.lower()
|
|
|
|
if self_closing or tagname in html4_singlets:
|
2007-02-10 10:51:27 +08:00
|
|
|
pass
|
2011-07-14 21:47:10 +08:00
|
|
|
elif closing_tag:
|
|
|
|
# Check for match in open tags list
|
|
|
|
try:
|
|
|
|
i = open_tags.index(tagname)
|
|
|
|
except ValueError:
|
|
|
|
pass
|
|
|
|
else:
|
|
|
|
# SGML: An end tag closes, back to the matching start tag,
|
|
|
|
# all unclosed intervening start tags with omitted end tags
|
|
|
|
open_tags = open_tags[i + 1:]
|
2007-02-10 10:51:27 +08:00
|
|
|
else:
|
2011-07-14 21:47:10 +08:00
|
|
|
# Add it to the start of the open tags list
|
|
|
|
open_tags.insert(0, tagname)
|
2013-01-10 17:27:20 +08:00
|
|
|
|
|
|
|
if current_len <= length:
|
|
|
|
return text
|
|
|
|
out = text[:end_text_pos]
|
2011-07-14 21:47:10 +08:00
|
|
|
truncate_text = self.add_truncation_text('', truncate)
|
|
|
|
if truncate_text:
|
|
|
|
out += truncate_text
|
|
|
|
# Close any tags still open
|
|
|
|
for tag in open_tags:
|
|
|
|
out += '</%s>' % tag
|
|
|
|
# Return string
|
|
|
|
return out
|
|
|
|
|
2013-11-03 07:53:29 +08:00
|
|
|
|
2015-11-07 21:30:20 +08:00
|
|
|
@keep_lazy_text
|
2005-07-13 09:25:57 +08:00
|
|
|
def get_valid_filename(s):
|
|
|
|
"""
|
2017-01-25 04:32:33 +08:00
|
|
|
Return the given string converted to a string that can be used for a clean
|
|
|
|
filename. Remove leading and trailing spaces; convert other spaces to
|
|
|
|
underscores; and remove anything that is not an alphanumeric, dash,
|
|
|
|
underscore, or dot.
|
2005-07-13 09:25:57 +08:00
|
|
|
>>> get_valid_filename("john's portrait in 2004.jpg")
|
2012-06-08 00:08:47 +08:00
|
|
|
'johns_portrait_in_2004.jpg'
|
2005-07-13 09:25:57 +08:00
|
|
|
"""
|
2017-04-22 01:52:26 +08:00
|
|
|
s = str(s).strip().replace(' ', '_')
|
2009-04-05 01:34:58 +08:00
|
|
|
return re.sub(r'(?u)[^-\w.]', '', s)
|
2005-07-13 09:25:57 +08:00
|
|
|
|
2013-11-03 07:53:29 +08:00
|
|
|
|
2015-11-07 21:30:20 +08:00
|
|
|
@keep_lazy_text
|
2017-01-27 03:58:33 +08:00
|
|
|
def get_text_list(list_, last_word=gettext_lazy('or')):
|
2005-07-13 09:25:57 +08:00
|
|
|
"""
|
|
|
|
>>> get_text_list(['a', 'b', 'c', 'd'])
|
2012-06-08 00:08:47 +08:00
|
|
|
'a, b, c or d'
|
2005-07-13 09:25:57 +08:00
|
|
|
>>> get_text_list(['a', 'b', 'c'], 'and')
|
2012-06-08 00:08:47 +08:00
|
|
|
'a, b and c'
|
2005-07-13 09:25:57 +08:00
|
|
|
>>> get_text_list(['a', 'b'], 'and')
|
2012-06-08 00:08:47 +08:00
|
|
|
'a and b'
|
2005-07-13 09:25:57 +08:00
|
|
|
>>> get_text_list(['a'])
|
2012-06-08 00:08:47 +08:00
|
|
|
'a'
|
2005-07-13 09:25:57 +08:00
|
|
|
>>> get_text_list([])
|
2012-06-08 00:08:47 +08:00
|
|
|
''
|
2005-07-13 09:25:57 +08:00
|
|
|
"""
|
2017-11-30 00:54:34 +08:00
|
|
|
if not list_:
|
2013-10-17 16:17:41 +08:00
|
|
|
return ''
|
|
|
|
if len(list_) == 1:
|
2017-04-22 01:52:26 +08:00
|
|
|
return str(list_[0])
|
2012-06-08 00:08:47 +08:00
|
|
|
return '%s %s %s' % (
|
2010-12-13 06:53:49 +08:00
|
|
|
# Translators: This string is used as a separator between list elements
|
2017-04-22 01:52:26 +08:00
|
|
|
_(', ').join(str(i) for i in list_[:-1]), str(last_word), str(list_[-1])
|
|
|
|
)
|
2005-07-13 09:25:57 +08:00
|
|
|
|
2013-11-03 07:53:29 +08:00
|
|
|
|
2015-11-07 21:30:20 +08:00
|
|
|
@keep_lazy_text
|
2005-07-13 09:25:57 +08:00
|
|
|
def normalize_newlines(text):
|
2017-01-25 04:32:33 +08:00
|
|
|
"""Normalize CRLF and CR newlines to just LF."""
|
2017-04-22 01:52:26 +08:00
|
|
|
return re_newlines.sub('\n', str(text))
|
2005-07-13 09:25:57 +08:00
|
|
|
|
2013-11-03 07:53:29 +08:00
|
|
|
|
2015-11-07 21:30:20 +08:00
|
|
|
@keep_lazy_text
|
2005-07-13 09:25:57 +08:00
|
|
|
def phone2numeric(phone):
|
2017-01-25 04:32:33 +08:00
|
|
|
"""Convert a phone number with letters into its numeric equivalent."""
|
2016-03-29 06:33:29 +08:00
|
|
|
char2number = {
|
|
|
|
'a': '2', 'b': '2', 'c': '2', 'd': '3', 'e': '3', 'f': '3', 'g': '4',
|
|
|
|
'h': '4', 'i': '4', 'j': '5', 'k': '5', 'l': '5', 'm': '6', 'n': '6',
|
|
|
|
'o': '6', 'p': '7', 'q': '7', 'r': '7', 's': '7', 't': '8', 'u': '8',
|
|
|
|
'v': '8', 'w': '9', 'x': '9', 'y': '9', 'z': '9',
|
|
|
|
}
|
2012-06-08 00:08:47 +08:00
|
|
|
return ''.join(char2number.get(c, c) for c in phone.lower())
|
2005-07-19 01:23:04 +08:00
|
|
|
|
2013-11-03 07:53:29 +08:00
|
|
|
|
2005-07-19 01:23:04 +08:00
|
|
|
# From http://www.xhaus.com/alan/python/httpcomp.html#gzip
|
|
|
|
# Used with permission.
|
|
|
|
def compress_string(s):
|
2012-05-06 01:47:03 +08:00
|
|
|
zbuf = BytesIO()
|
2016-10-14 19:41:42 +08:00
|
|
|
with GzipFile(mode='wb', compresslevel=6, fileobj=zbuf, mtime=0) as zfile:
|
2016-08-05 07:45:14 +08:00
|
|
|
zfile.write(s)
|
2005-07-19 01:23:04 +08:00
|
|
|
return zbuf.getvalue()
|
2005-12-04 20:06:16 +08:00
|
|
|
|
2013-11-03 07:53:29 +08:00
|
|
|
|
2018-10-04 20:28:03 +08:00
|
|
|
class StreamingBuffer(BytesIO):
|
2012-10-20 23:40:14 +08:00
|
|
|
def read(self):
|
2018-10-04 20:28:03 +08:00
|
|
|
ret = self.getvalue()
|
|
|
|
self.seek(0)
|
|
|
|
self.truncate()
|
2012-10-20 23:40:14 +08:00
|
|
|
return ret
|
|
|
|
|
2013-11-03 07:53:29 +08:00
|
|
|
|
2012-10-20 23:40:14 +08:00
|
|
|
# Like compress_string, but for iterators of strings.
|
|
|
|
def compress_sequence(sequence):
|
|
|
|
buf = StreamingBuffer()
|
2016-10-14 19:41:42 +08:00
|
|
|
with GzipFile(mode='wb', compresslevel=6, fileobj=buf, mtime=0) as zfile:
|
2016-08-05 07:45:14 +08:00
|
|
|
# Output headers...
|
|
|
|
yield buf.read()
|
|
|
|
for item in sequence:
|
|
|
|
zfile.write(item)
|
|
|
|
data = buf.read()
|
|
|
|
if data:
|
|
|
|
yield data
|
2012-10-20 23:40:14 +08:00
|
|
|
yield buf.read()
|
|
|
|
|
2005-12-04 20:06:16 +08:00
|
|
|
|
2009-04-10 12:13:27 +08:00
|
|
|
# Expression to match some_token and some_token="with spaces" (and similarly
|
|
|
|
# for single-quoted strings).
|
2019-10-26 22:42:32 +08:00
|
|
|
smart_split_re = _lazy_re_compile(r"""
|
2010-02-25 04:52:14 +08:00
|
|
|
((?:
|
|
|
|
[^\s'"]*
|
|
|
|
(?:
|
|
|
|
(?:"(?:[^"\\]|\\.)*" | '(?:[^'\\]|\\.)*')
|
|
|
|
[^\s'"]*
|
|
|
|
)+
|
|
|
|
) | \S+)
|
|
|
|
""", re.VERBOSE)
|
2009-04-10 12:13:27 +08:00
|
|
|
|
2013-11-03 07:53:29 +08:00
|
|
|
|
2006-06-07 14:08:23 +08:00
|
|
|
def smart_split(text):
|
2008-06-06 22:09:20 +08:00
|
|
|
r"""
|
2006-06-08 12:26:23 +08:00
|
|
|
Generator that splits a string by spaces, leaving quoted phrases together.
|
|
|
|
Supports both single and double quotes, and supports escaping quotes with
|
|
|
|
backslashes. In the output, strings will keep their initial and trailing
|
2009-03-23 17:40:25 +08:00
|
|
|
quote marks and escaped quotes will remain escaped (the results can then
|
|
|
|
be further processed with unescape_string_literal()).
|
2007-03-30 19:57:50 +08:00
|
|
|
|
2008-06-06 22:09:20 +08:00
|
|
|
>>> list(smart_split(r'This is "a person\'s" test.'))
|
2012-06-08 00:08:47 +08:00
|
|
|
['This', 'is', '"a person\\\'s"', 'test.']
|
2009-03-23 17:40:25 +08:00
|
|
|
>>> list(smart_split(r"Another 'person\'s' test."))
|
2012-06-08 00:08:47 +08:00
|
|
|
['Another', "'person\\'s'", 'test.']
|
2009-03-23 17:40:25 +08:00
|
|
|
>>> list(smart_split(r'A "\"funky\" style" test.'))
|
2012-06-08 00:08:47 +08:00
|
|
|
['A', '"\\"funky\\" style"', 'test.']
|
2006-06-08 12:26:23 +08:00
|
|
|
"""
|
2017-04-22 01:52:26 +08:00
|
|
|
for bit in smart_split_re.finditer(str(text)):
|
2020-05-11 04:03:39 +08:00
|
|
|
yield bit[0]
|
Merged Unicode branch into trunk (r4952:5608). This should be fully
backwards compatible for all practical purposes.
Fixed #2391, #2489, #2996, #3322, #3344, #3370, #3406, #3432, #3454, #3492, #3582, #3690, #3878, #3891, #3937, #4039, #4141, #4227, #4286, #4291, #4300, #4452, #4702
git-svn-id: http://code.djangoproject.com/svn/django/trunk@5609 bcc190cf-cafb-0310-a4f2-bffc1f526a37
2007-07-04 20:11:04 +08:00
|
|
|
|
2013-11-03 07:53:29 +08:00
|
|
|
|
2008-07-01 23:10:51 +08:00
|
|
|
def _replace_entity(match):
|
2020-05-11 04:03:39 +08:00
|
|
|
text = match[1]
|
2012-06-08 00:08:47 +08:00
|
|
|
if text[0] == '#':
|
2009-03-30 11:03:37 +08:00
|
|
|
text = text[1:]
|
|
|
|
try:
|
2012-06-08 00:08:47 +08:00
|
|
|
if text[0] in 'xX':
|
2009-03-30 11:03:37 +08:00
|
|
|
c = int(text[1:], 16)
|
|
|
|
else:
|
|
|
|
c = int(text)
|
2017-01-07 19:11:46 +08:00
|
|
|
return chr(c)
|
2009-03-30 11:03:37 +08:00
|
|
|
except ValueError:
|
2020-05-11 04:03:39 +08:00
|
|
|
return match[0]
|
2009-03-30 11:03:37 +08:00
|
|
|
else:
|
|
|
|
try:
|
2017-01-07 19:11:46 +08:00
|
|
|
return chr(html.entities.name2codepoint[text])
|
2019-08-01 20:30:20 +08:00
|
|
|
except KeyError:
|
2020-05-11 04:03:39 +08:00
|
|
|
return match[0]
|
2008-07-01 23:10:51 +08:00
|
|
|
|
2016-11-13 01:11:23 +08:00
|
|
|
|
2019-10-26 22:42:32 +08:00
|
|
|
_entity_re = _lazy_re_compile(r"&(#?[xX]?(?:[0-9a-fA-F]+|\w{1,8}));")
|
2008-07-01 23:10:51 +08:00
|
|
|
|
2013-11-03 07:53:29 +08:00
|
|
|
|
2015-11-07 21:30:20 +08:00
|
|
|
@keep_lazy_text
|
2008-07-01 23:10:51 +08:00
|
|
|
def unescape_entities(text):
|
2019-04-24 21:10:28 +08:00
|
|
|
warnings.warn(
|
|
|
|
'django.utils.text.unescape_entities() is deprecated in favor of '
|
|
|
|
'html.unescape().',
|
|
|
|
RemovedInDjango40Warning, stacklevel=2,
|
|
|
|
)
|
2017-04-22 01:52:26 +08:00
|
|
|
return _entity_re.sub(_replace_entity, str(text))
|
2009-03-23 17:40:25 +08:00
|
|
|
|
2013-11-03 07:53:29 +08:00
|
|
|
|
2015-11-07 21:30:20 +08:00
|
|
|
@keep_lazy_text
|
2009-03-23 17:40:25 +08:00
|
|
|
def unescape_string_literal(s):
|
|
|
|
r"""
|
|
|
|
Convert quoted string literals to unquoted strings with escaped quotes and
|
|
|
|
backslashes unquoted::
|
|
|
|
|
|
|
|
>>> unescape_string_literal('"abc"')
|
|
|
|
'abc'
|
|
|
|
>>> unescape_string_literal("'abc'")
|
|
|
|
'abc'
|
|
|
|
>>> unescape_string_literal('"a \"bc\""')
|
|
|
|
'a "bc"'
|
|
|
|
>>> unescape_string_literal("'\'ab\' c'")
|
|
|
|
"'ab' c"
|
|
|
|
"""
|
|
|
|
if s[0] not in "\"'" or s[-1] != s[0]:
|
|
|
|
raise ValueError("Not a string literal: %r" % s)
|
|
|
|
quote = s[0]
|
|
|
|
return s[1:-1].replace(r'\%s' % quote, quote).replace(r'\\', '\\')
|
2012-08-18 20:53:22 +08:00
|
|
|
|
2013-11-03 07:53:29 +08:00
|
|
|
|
2018-07-14 16:38:18 +08:00
|
|
|
@keep_lazy_text
|
2015-04-16 06:28:49 +08:00
|
|
|
def slugify(value, allow_unicode=False):
|
2012-08-18 20:53:22 +08:00
|
|
|
"""
|
2020-05-29 12:41:17 +08:00
|
|
|
Convert to ASCII if 'allow_unicode' is False. Convert spaces or repeated
|
|
|
|
dashes to single dashes. Remove characters that aren't alphanumerics,
|
|
|
|
underscores, or hyphens. Convert to lowercase. Also strip leading and
|
2020-05-27 05:56:41 +08:00
|
|
|
trailing whitespace, dashes, and underscores.
|
2012-08-18 20:53:22 +08:00
|
|
|
"""
|
2017-04-22 01:52:26 +08:00
|
|
|
value = str(value)
|
2015-04-16 06:28:49 +08:00
|
|
|
if allow_unicode:
|
|
|
|
value = unicodedata.normalize('NFKC', value)
|
2017-04-10 20:12:37 +08:00
|
|
|
else:
|
|
|
|
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')
|
2020-05-27 05:56:41 +08:00
|
|
|
value = re.sub(r'[^\w\s-]', '', value.lower())
|
|
|
|
return re.sub(r'[-\s]+', '-', value).strip('-_')
|
2013-12-26 20:46:15 +08:00
|
|
|
|
|
|
|
|
|
|
|
def camel_case_to_spaces(value):
|
|
|
|
"""
|
2018-09-25 22:30:18 +08:00
|
|
|
Split CamelCase and convert to lowercase. Strip surrounding whitespace.
|
2013-12-26 20:46:15 +08:00
|
|
|
"""
|
|
|
|
return re_camel_case.sub(r' \1', value).strip().lower()
|
2016-08-25 00:18:17 +08:00
|
|
|
|
|
|
|
|
|
|
|
def _format_lazy(format_string, *args, **kwargs):
|
|
|
|
"""
|
|
|
|
Apply str.format() on 'format_string' where format_string, args,
|
|
|
|
and/or kwargs might be lazy.
|
|
|
|
"""
|
|
|
|
return format_string.format(*args, **kwargs)
|
2016-11-13 01:11:23 +08:00
|
|
|
|
|
|
|
|
2016-12-29 23:27:49 +08:00
|
|
|
format_lazy = lazy(_format_lazy, str)
|