2013-06-22 04:59:33 +08:00
|
|
|
import base64
|
2011-03-01 22:28:06 +08:00
|
|
|
import calendar
|
|
|
|
import datetime
|
2009-03-22 15:58:29 +08:00
|
|
|
import re
|
2015-03-10 08:05:13 +08:00
|
|
|
import unicodedata
|
2013-06-22 04:59:33 +08:00
|
|
|
from binascii import Error as BinasciiError
|
2011-09-10 00:18:38 +08:00
|
|
|
from email.utils import formatdate
|
2017-01-07 19:11:46 +08:00
|
|
|
from urllib.parse import (
|
2017-03-14 22:46:53 +08:00
|
|
|
ParseResult, SplitResult, _coerce_args, _splitnetloc, _splitparams, quote,
|
|
|
|
quote_plus, scheme_chars, unquote, unquote_plus,
|
|
|
|
urlencode as original_urlencode, uses_params,
|
2017-01-07 19:11:46 +08:00
|
|
|
)
|
2007-10-31 11:59:40 +08:00
|
|
|
|
2015-01-08 02:41:29 +08:00
|
|
|
from django.core.exceptions import TooManyFieldsSent
|
2011-04-22 20:01:41 +08:00
|
|
|
from django.utils.datastructures import MultiValueDict
|
2017-01-26 21:25:15 +08:00
|
|
|
from django.utils.encoding import force_bytes
|
2015-11-07 21:30:20 +08:00
|
|
|
from django.utils.functional import keep_lazy_text
|
Merged Unicode branch into trunk (r4952:5608). This should be fully
backwards compatible for all practical purposes.
Fixed #2391, #2489, #2996, #3322, #3344, #3370, #3406, #3432, #3454, #3492, #3582, #3690, #3878, #3891, #3937, #4039, #4141, #4227, #4286, #4291, #4300, #4452, #4702
git-svn-id: http://code.djangoproject.com/svn/django/trunk@5609 bcc190cf-cafb-0310-a4f2-bffc1f526a37
2007-07-04 20:11:04 +08:00
|
|
|
|
2016-09-01 21:32:20 +08:00
|
|
|
# based on RFC 7232, Appendix C
|
|
|
|
ETAG_MATCH = re.compile(r'''
|
|
|
|
\A( # start of string and capture group
|
|
|
|
(?:W/)? # optional weak indicator
|
|
|
|
" # opening quote
|
|
|
|
[^"]* # any sequence of non-quote characters
|
|
|
|
" # end quote
|
|
|
|
)\Z # end of string and capture group
|
|
|
|
''', re.X)
|
2009-03-22 15:58:29 +08:00
|
|
|
|
2011-03-01 22:28:06 +08:00
|
|
|
MONTHS = 'jan feb mar apr may jun jul aug sep oct nov dec'.split()
|
|
|
|
__D = r'(?P<day>\d{2})'
|
|
|
|
__D2 = r'(?P<day>[ \d]\d)'
|
|
|
|
__M = r'(?P<mon>\w{3})'
|
|
|
|
__Y = r'(?P<year>\d{4})'
|
|
|
|
__Y2 = r'(?P<year>\d{2})'
|
|
|
|
__T = r'(?P<hour>\d{2}):(?P<min>\d{2}):(?P<sec>\d{2})'
|
|
|
|
RFC1123_DATE = re.compile(r'^\w{3}, %s %s %s %s GMT$' % (__D, __M, __Y, __T))
|
|
|
|
RFC850_DATE = re.compile(r'^\w{6,9}, %s-%s-%s %s GMT$' % (__D, __M, __Y2, __T))
|
|
|
|
ASCTIME_DATE = re.compile(r'^\w{3} %s %s %s %s$' % (__M, __D2, __T, __Y))
|
|
|
|
|
2017-01-20 17:20:53 +08:00
|
|
|
RFC3986_GENDELIMS = ":/?#[]@"
|
|
|
|
RFC3986_SUBDELIMS = "!$&'()*+,;="
|
2014-06-26 22:55:36 +08:00
|
|
|
|
2015-01-08 02:41:29 +08:00
|
|
|
FIELDS_MATCH = re.compile('[&;]')
|
|
|
|
|
2013-11-03 07:53:29 +08:00
|
|
|
|
2015-11-07 21:30:20 +08:00
|
|
|
@keep_lazy_text
|
Merged Unicode branch into trunk (r4952:5608). This should be fully
backwards compatible for all practical purposes.
Fixed #2391, #2489, #2996, #3322, #3344, #3370, #3406, #3432, #3454, #3492, #3582, #3690, #3878, #3891, #3937, #4039, #4141, #4227, #4286, #4291, #4300, #4452, #4702
git-svn-id: http://code.djangoproject.com/svn/django/trunk@5609 bcc190cf-cafb-0310-a4f2-bffc1f526a37
2007-07-04 20:11:04 +08:00
|
|
|
def urlquote(url, safe='/'):
|
|
|
|
"""
|
2017-01-26 21:25:15 +08:00
|
|
|
A legacy compatibility wrapper to Python's urllib.parse.quote() function.
|
|
|
|
(was used for unicode handling on Python 2)
|
Merged Unicode branch into trunk (r4952:5608). This should be fully
backwards compatible for all practical purposes.
Fixed #2391, #2489, #2996, #3322, #3344, #3370, #3406, #3432, #3454, #3492, #3582, #3690, #3878, #3891, #3937, #4039, #4141, #4227, #4286, #4291, #4300, #4452, #4702
git-svn-id: http://code.djangoproject.com/svn/django/trunk@5609 bcc190cf-cafb-0310-a4f2-bffc1f526a37
2007-07-04 20:11:04 +08:00
|
|
|
"""
|
2017-01-26 21:25:15 +08:00
|
|
|
return quote(url, safe)
|
Merged Unicode branch into trunk (r4952:5608). This should be fully
backwards compatible for all practical purposes.
Fixed #2391, #2489, #2996, #3322, #3344, #3370, #3406, #3432, #3454, #3492, #3582, #3690, #3878, #3891, #3937, #4039, #4141, #4227, #4286, #4291, #4300, #4452, #4702
git-svn-id: http://code.djangoproject.com/svn/django/trunk@5609 bcc190cf-cafb-0310-a4f2-bffc1f526a37
2007-07-04 20:11:04 +08:00
|
|
|
|
2013-11-03 07:53:29 +08:00
|
|
|
|
2015-11-07 21:30:20 +08:00
|
|
|
@keep_lazy_text
|
Merged Unicode branch into trunk (r4952:5608). This should be fully
backwards compatible for all practical purposes.
Fixed #2391, #2489, #2996, #3322, #3344, #3370, #3406, #3432, #3454, #3492, #3582, #3690, #3878, #3891, #3937, #4039, #4141, #4227, #4286, #4291, #4300, #4452, #4702
git-svn-id: http://code.djangoproject.com/svn/django/trunk@5609 bcc190cf-cafb-0310-a4f2-bffc1f526a37
2007-07-04 20:11:04 +08:00
|
|
|
def urlquote_plus(url, safe=''):
|
|
|
|
"""
|
2017-01-26 21:25:15 +08:00
|
|
|
A legacy compatibility wrapper to Python's urllib.parse.quote_plus()
|
|
|
|
function. (was used for unicode handling on Python 2)
|
Merged Unicode branch into trunk (r4952:5608). This should be fully
backwards compatible for all practical purposes.
Fixed #2391, #2489, #2996, #3322, #3344, #3370, #3406, #3432, #3454, #3492, #3582, #3690, #3878, #3891, #3937, #4039, #4141, #4227, #4286, #4291, #4300, #4452, #4702
git-svn-id: http://code.djangoproject.com/svn/django/trunk@5609 bcc190cf-cafb-0310-a4f2-bffc1f526a37
2007-07-04 20:11:04 +08:00
|
|
|
"""
|
2017-01-26 21:25:15 +08:00
|
|
|
return quote_plus(url, safe)
|
Merged Unicode branch into trunk (r4952:5608). This should be fully
backwards compatible for all practical purposes.
Fixed #2391, #2489, #2996, #3322, #3344, #3370, #3406, #3432, #3454, #3492, #3582, #3690, #3878, #3891, #3937, #4039, #4141, #4227, #4286, #4291, #4300, #4452, #4702
git-svn-id: http://code.djangoproject.com/svn/django/trunk@5609 bcc190cf-cafb-0310-a4f2-bffc1f526a37
2007-07-04 20:11:04 +08:00
|
|
|
|
2013-11-03 07:53:29 +08:00
|
|
|
|
2015-11-07 21:30:20 +08:00
|
|
|
@keep_lazy_text
|
2012-01-29 17:00:12 +08:00
|
|
|
def urlunquote(quoted_url):
|
|
|
|
"""
|
2017-01-26 21:25:15 +08:00
|
|
|
A legacy compatibility wrapper to Python's urllib.parse.unquote() function.
|
|
|
|
(was used for unicode handling on Python 2)
|
2012-01-29 17:00:12 +08:00
|
|
|
"""
|
2017-01-26 21:25:15 +08:00
|
|
|
return unquote(quoted_url)
|
2012-01-29 17:00:12 +08:00
|
|
|
|
2013-11-03 07:53:29 +08:00
|
|
|
|
2015-11-07 21:30:20 +08:00
|
|
|
@keep_lazy_text
|
2012-01-29 17:00:12 +08:00
|
|
|
def urlunquote_plus(quoted_url):
|
|
|
|
"""
|
2017-01-26 21:25:15 +08:00
|
|
|
A legacy compatibility wrapper to Python's urllib.parse.unquote_plus()
|
|
|
|
function. (was used for unicode handling on Python 2)
|
2012-01-29 17:00:12 +08:00
|
|
|
"""
|
2017-01-26 21:25:15 +08:00
|
|
|
return unquote_plus(quoted_url)
|
2012-01-29 17:00:12 +08:00
|
|
|
|
2013-11-03 07:53:29 +08:00
|
|
|
|
2017-01-26 21:25:15 +08:00
|
|
|
def urlencode(query, doseq=False):
|
Merged Unicode branch into trunk (r4952:5608). This should be fully
backwards compatible for all practical purposes.
Fixed #2391, #2489, #2996, #3322, #3344, #3370, #3406, #3432, #3454, #3492, #3582, #3690, #3878, #3891, #3937, #4039, #4141, #4227, #4286, #4291, #4300, #4452, #4702
git-svn-id: http://code.djangoproject.com/svn/django/trunk@5609 bcc190cf-cafb-0310-a4f2-bffc1f526a37
2007-07-04 20:11:04 +08:00
|
|
|
"""
|
2017-01-26 21:25:15 +08:00
|
|
|
A version of Python's urllib.parse.urlencode() function that can operate on
|
|
|
|
MultiValueDict and non-string values.
|
Merged Unicode branch into trunk (r4952:5608). This should be fully
backwards compatible for all practical purposes.
Fixed #2391, #2489, #2996, #3322, #3344, #3370, #3406, #3432, #3454, #3492, #3582, #3690, #3878, #3891, #3937, #4039, #4141, #4227, #4286, #4291, #4300, #4452, #4702
git-svn-id: http://code.djangoproject.com/svn/django/trunk@5609 bcc190cf-cafb-0310-a4f2-bffc1f526a37
2007-07-04 20:11:04 +08:00
|
|
|
"""
|
2011-04-22 20:01:41 +08:00
|
|
|
if isinstance(query, MultiValueDict):
|
|
|
|
query = query.lists()
|
|
|
|
elif hasattr(query, 'items'):
|
Merged Unicode branch into trunk (r4952:5608). This should be fully
backwards compatible for all practical purposes.
Fixed #2391, #2489, #2996, #3322, #3344, #3370, #3406, #3432, #3454, #3492, #3582, #3690, #3878, #3891, #3937, #4039, #4141, #4227, #4286, #4291, #4300, #4452, #4702
git-svn-id: http://code.djangoproject.com/svn/django/trunk@5609 bcc190cf-cafb-0310-a4f2-bffc1f526a37
2007-07-04 20:11:04 +08:00
|
|
|
query = query.items()
|
2017-10-10 04:20:01 +08:00
|
|
|
query_params = []
|
|
|
|
for key, value in query:
|
|
|
|
if isinstance(value, (str, bytes)):
|
|
|
|
query_val = value
|
|
|
|
else:
|
|
|
|
try:
|
|
|
|
iter(value)
|
|
|
|
except TypeError:
|
|
|
|
query_val = value
|
|
|
|
else:
|
|
|
|
# Consume generators and iterators, even when doseq=True, to
|
|
|
|
# work around https://bugs.python.org/issue31706.
|
|
|
|
query_val = [
|
|
|
|
item if isinstance(item, bytes) else str(item)
|
|
|
|
for item in value
|
|
|
|
]
|
|
|
|
query_params.append((key, query_val))
|
|
|
|
return original_urlencode(query_params, doseq)
|
Merged Unicode branch into trunk (r4952:5608). This should be fully
backwards compatible for all practical purposes.
Fixed #2391, #2489, #2996, #3322, #3344, #3370, #3406, #3432, #3454, #3492, #3582, #3690, #3878, #3891, #3937, #4039, #4141, #4227, #4286, #4291, #4300, #4452, #4702
git-svn-id: http://code.djangoproject.com/svn/django/trunk@5609 bcc190cf-cafb-0310-a4f2-bffc1f526a37
2007-07-04 20:11:04 +08:00
|
|
|
|
2013-11-03 07:53:29 +08:00
|
|
|
|
2007-10-31 11:59:40 +08:00
|
|
|
def cookie_date(epoch_seconds=None):
|
|
|
|
"""
|
2017-01-25 04:32:33 +08:00
|
|
|
Format the time to ensure compatibility with Netscape's cookie standard.
|
2007-10-31 11:59:40 +08:00
|
|
|
|
2017-01-25 04:32:33 +08:00
|
|
|
`epoch_seconds` is a floating point number expressed in seconds since the
|
|
|
|
epoch, in UTC - such as that outputted by time.time(). If set to None, it
|
|
|
|
defaults to the current time.
|
2007-10-31 11:59:40 +08:00
|
|
|
|
2017-01-25 04:32:33 +08:00
|
|
|
Output a string in the format 'Wdy, DD-Mon-YYYY HH:MM:SS GMT'.
|
2007-10-31 11:59:40 +08:00
|
|
|
"""
|
|
|
|
rfcdate = formatdate(epoch_seconds)
|
|
|
|
return '%s-%s-%s GMT' % (rfcdate[:7], rfcdate[8:11], rfcdate[12:25])
|
|
|
|
|
2013-11-03 07:53:29 +08:00
|
|
|
|
2007-10-31 11:59:40 +08:00
|
|
|
def http_date(epoch_seconds=None):
|
|
|
|
"""
|
2017-01-25 04:32:33 +08:00
|
|
|
Format the time to match the RFC1123 date format as specified by HTTP
|
2016-05-02 20:35:05 +08:00
|
|
|
RFC7231 section 7.1.1.1.
|
2007-10-31 11:59:40 +08:00
|
|
|
|
2017-01-25 04:32:33 +08:00
|
|
|
`epoch_seconds` is a floating point number expressed in seconds since the
|
|
|
|
epoch, in UTC - such as that outputted by time.time(). If set to None, it
|
|
|
|
defaults to the current time.
|
2007-10-31 11:59:40 +08:00
|
|
|
|
2017-01-25 04:32:33 +08:00
|
|
|
Output a string in the format 'Wdy, DD Mon YYYY HH:MM:SS GMT'.
|
2007-10-31 11:59:40 +08:00
|
|
|
"""
|
2013-08-07 19:00:39 +08:00
|
|
|
return formatdate(epoch_seconds, usegmt=True)
|
2008-08-01 04:47:53 +08:00
|
|
|
|
2013-11-03 07:53:29 +08:00
|
|
|
|
2011-03-01 22:28:06 +08:00
|
|
|
def parse_http_date(date):
|
|
|
|
"""
|
2017-01-25 04:32:33 +08:00
|
|
|
Parse a date format as specified by HTTP RFC7231 section 7.1.1.1.
|
2011-03-01 22:28:06 +08:00
|
|
|
|
|
|
|
The three formats allowed by the RFC are accepted, even if only the first
|
|
|
|
one is still in widespread use.
|
|
|
|
|
2017-01-25 04:32:33 +08:00
|
|
|
Return an integer expressed in seconds since the epoch, in UTC.
|
2011-03-01 22:28:06 +08:00
|
|
|
"""
|
2017-10-14 03:36:09 +08:00
|
|
|
# email.utils.parsedate() does the job for RFC1123 dates; unfortunately
|
2016-05-02 20:35:05 +08:00
|
|
|
# RFC7231 makes it mandatory to support RFC850 dates too. So we roll
|
2011-03-01 22:28:06 +08:00
|
|
|
# our own RFC-compliant parsing.
|
|
|
|
for regex in RFC1123_DATE, RFC850_DATE, ASCTIME_DATE:
|
|
|
|
m = regex.match(date)
|
|
|
|
if m is not None:
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
raise ValueError("%r is not in a valid HTTP date format" % date)
|
|
|
|
try:
|
|
|
|
year = int(m.group('year'))
|
|
|
|
if year < 100:
|
2011-03-04 02:42:59 +08:00
|
|
|
if year < 70:
|
|
|
|
year += 2000
|
|
|
|
else:
|
|
|
|
year += 1900
|
2011-03-01 22:28:06 +08:00
|
|
|
month = MONTHS.index(m.group('mon').lower()) + 1
|
|
|
|
day = int(m.group('day'))
|
|
|
|
hour = int(m.group('hour'))
|
|
|
|
min = int(m.group('min'))
|
|
|
|
sec = int(m.group('sec'))
|
|
|
|
result = datetime.datetime(year, month, day, hour, min, sec)
|
|
|
|
return calendar.timegm(result.utctimetuple())
|
2017-01-08 03:13:29 +08:00
|
|
|
except Exception as exc:
|
|
|
|
raise ValueError("%r is not a valid date" % date) from exc
|
2011-03-01 22:28:06 +08:00
|
|
|
|
2013-11-03 07:53:29 +08:00
|
|
|
|
2011-03-01 22:28:06 +08:00
|
|
|
def parse_http_date_safe(date):
|
|
|
|
"""
|
2017-01-25 04:32:33 +08:00
|
|
|
Same as parse_http_date, but return None if the input is invalid.
|
2011-03-01 22:28:06 +08:00
|
|
|
"""
|
2017-09-07 20:16:21 +08:00
|
|
|
try:
|
2011-03-01 22:28:06 +08:00
|
|
|
return parse_http_date(date)
|
2017-09-07 20:16:21 +08:00
|
|
|
except Exception:
|
|
|
|
pass
|
2011-03-01 22:28:06 +08:00
|
|
|
|
2013-11-03 07:53:29 +08:00
|
|
|
|
2008-08-01 04:47:53 +08:00
|
|
|
# Base 36 functions: useful for generating compact URLs
|
|
|
|
|
|
|
|
def base36_to_int(s):
|
|
|
|
"""
|
2017-01-25 04:32:33 +08:00
|
|
|
Convert a base 36 string to an int. Raise ValueError if the input won't fit
|
|
|
|
into an int.
|
2008-08-01 04:47:53 +08:00
|
|
|
"""
|
2011-01-24 16:02:40 +08:00
|
|
|
# To prevent overconsumption of server resources, reject any
|
2017-01-20 02:21:54 +08:00
|
|
|
# base36 string that is longer than 13 base36 digits (13 digits
|
2011-01-24 16:02:40 +08:00
|
|
|
# is sufficient to base36-encode any 64-bit integer)
|
2010-12-23 11:45:08 +08:00
|
|
|
if len(s) > 13:
|
|
|
|
raise ValueError("Base36 input too large")
|
2016-12-01 18:38:01 +08:00
|
|
|
return int(s, 36)
|
2008-08-01 04:47:53 +08:00
|
|
|
|
2013-11-03 07:53:29 +08:00
|
|
|
|
2008-08-01 04:47:53 +08:00
|
|
|
def int_to_base36(i):
|
2017-01-25 04:32:33 +08:00
|
|
|
"""Convert an integer to a base36 string."""
|
2014-09-09 07:31:21 +08:00
|
|
|
char_set = '0123456789abcdefghijklmnopqrstuvwxyz'
|
2012-08-04 00:46:30 +08:00
|
|
|
if i < 0:
|
|
|
|
raise ValueError("Negative base36 conversion input.")
|
2014-09-09 07:31:21 +08:00
|
|
|
if i < 36:
|
|
|
|
return char_set[i]
|
|
|
|
b36 = ''
|
|
|
|
while i != 0:
|
|
|
|
i, n = divmod(i, 36)
|
|
|
|
b36 = char_set[n] + b36
|
|
|
|
return b36
|
2009-03-22 15:58:29 +08:00
|
|
|
|
2013-11-03 07:53:29 +08:00
|
|
|
|
2013-06-22 04:59:33 +08:00
|
|
|
def urlsafe_base64_encode(s):
|
|
|
|
"""
|
2017-01-25 04:32:33 +08:00
|
|
|
Encode a bytestring in base64 for use in URLs. Strip any trailing equal
|
|
|
|
signs.
|
2013-06-22 04:59:33 +08:00
|
|
|
"""
|
|
|
|
return base64.urlsafe_b64encode(s).rstrip(b'\n=')
|
|
|
|
|
2013-11-03 07:53:29 +08:00
|
|
|
|
2013-06-22 04:59:33 +08:00
|
|
|
def urlsafe_base64_decode(s):
|
|
|
|
"""
|
2017-01-25 04:32:33 +08:00
|
|
|
Decode a base64 encoded string. Add back any trailing equal signs that
|
2013-06-22 04:59:33 +08:00
|
|
|
might have been stripped.
|
|
|
|
"""
|
2014-08-21 19:53:22 +08:00
|
|
|
s = force_bytes(s)
|
2013-06-22 04:59:33 +08:00
|
|
|
try:
|
|
|
|
return base64.urlsafe_b64decode(s.ljust(len(s) + len(s) % 4, b'='))
|
|
|
|
except (LookupError, BinasciiError) as e:
|
|
|
|
raise ValueError(e)
|
|
|
|
|
2013-11-03 07:53:29 +08:00
|
|
|
|
2009-03-22 15:58:29 +08:00
|
|
|
def parse_etags(etag_str):
|
|
|
|
"""
|
2016-09-01 21:32:20 +08:00
|
|
|
Parse a string of ETags given in an If-None-Match or If-Match header as
|
|
|
|
defined by RFC 7232. Return a list of quoted ETags, or ['*'] if all ETags
|
|
|
|
should be matched.
|
2009-03-22 15:58:29 +08:00
|
|
|
"""
|
2016-09-01 21:32:20 +08:00
|
|
|
if etag_str.strip() == '*':
|
|
|
|
return ['*']
|
|
|
|
else:
|
|
|
|
# Parse each ETag individually, and return any that are valid.
|
|
|
|
etag_matches = (ETAG_MATCH.match(etag.strip()) for etag in etag_str.split(','))
|
|
|
|
return [match.group(1) for match in etag_matches if match]
|
2009-03-22 15:58:29 +08:00
|
|
|
|
2013-11-03 07:53:29 +08:00
|
|
|
|
2016-09-01 21:32:20 +08:00
|
|
|
def quote_etag(etag_str):
|
2016-01-05 15:09:10 +08:00
|
|
|
"""
|
2016-09-01 21:32:20 +08:00
|
|
|
If the provided string is already a quoted ETag, return it. Otherwise, wrap
|
|
|
|
the string in quotes, making it a strong ETag.
|
2016-01-05 15:09:10 +08:00
|
|
|
"""
|
2016-09-01 21:32:20 +08:00
|
|
|
if ETAG_MATCH.match(etag_str):
|
|
|
|
return etag_str
|
|
|
|
else:
|
|
|
|
return '"%s"' % etag_str
|
2016-01-05 15:09:10 +08:00
|
|
|
|
|
|
|
|
2015-03-17 17:52:55 +08:00
|
|
|
def is_same_domain(host, pattern):
|
2012-03-30 17:20:04 +08:00
|
|
|
"""
|
2015-03-17 17:52:55 +08:00
|
|
|
Return ``True`` if the host is either an exact match or a match
|
|
|
|
to the wildcard pattern.
|
|
|
|
|
|
|
|
Any pattern beginning with a period matches a domain and all of its
|
|
|
|
subdomains. (e.g. ``.example.com`` matches ``example.com`` and
|
|
|
|
``foo.example.com``). Anything else is an exact string match.
|
2012-03-30 17:20:04 +08:00
|
|
|
"""
|
2015-03-17 17:52:55 +08:00
|
|
|
if not pattern:
|
2013-05-18 18:32:47 +08:00
|
|
|
return False
|
2012-11-18 05:00:53 +08:00
|
|
|
|
2015-03-17 17:52:55 +08:00
|
|
|
pattern = pattern.lower()
|
|
|
|
return (
|
|
|
|
pattern[0] == '.' and (host.endswith(pattern) or host == pattern[1:]) or
|
|
|
|
pattern == host
|
|
|
|
)
|
|
|
|
|
2013-11-03 07:53:29 +08:00
|
|
|
|
2017-09-03 08:12:27 +08:00
|
|
|
def is_safe_url(url, allowed_hosts=None, require_https=False):
|
2012-11-18 05:00:53 +08:00
|
|
|
"""
|
|
|
|
Return ``True`` if the url is a safe redirection (i.e. it doesn't point to
|
2013-08-14 00:06:22 +08:00
|
|
|
a different host and uses a safe scheme).
|
2012-11-18 05:00:53 +08:00
|
|
|
|
2017-01-25 04:32:33 +08:00
|
|
|
Always return ``False`` on an empty url.
|
2016-08-19 19:23:13 +08:00
|
|
|
|
|
|
|
If ``require_https`` is ``True``, only 'https' will be considered a valid
|
|
|
|
scheme, as opposed to 'http' and 'https' with the default, ``False``.
|
2012-11-18 05:00:53 +08:00
|
|
|
"""
|
2015-03-10 08:05:13 +08:00
|
|
|
if url is not None:
|
|
|
|
url = url.strip()
|
2012-11-18 05:00:53 +08:00
|
|
|
if not url:
|
|
|
|
return False
|
2016-07-27 11:45:07 +08:00
|
|
|
if allowed_hosts is None:
|
|
|
|
allowed_hosts = set()
|
2016-02-23 05:47:01 +08:00
|
|
|
# Chrome treats \ completely as / in paths but it could be part of some
|
|
|
|
# basic auth credentials so we need to check both URLs.
|
2016-07-27 11:45:07 +08:00
|
|
|
return (_is_safe_url(url, allowed_hosts, require_https=require_https) and
|
|
|
|
_is_safe_url(url.replace('\\', '/'), allowed_hosts, require_https=require_https))
|
2016-02-23 05:47:01 +08:00
|
|
|
|
|
|
|
|
2017-03-14 22:46:53 +08:00
|
|
|
# Copied from urllib.parse.urlparse() but uses fixed urlsplit() function.
|
|
|
|
def _urlparse(url, scheme='', allow_fragments=True):
|
|
|
|
"""Parse a URL into 6 components:
|
|
|
|
<scheme>://<netloc>/<path>;<params>?<query>#<fragment>
|
|
|
|
Return a 6-tuple: (scheme, netloc, path, params, query, fragment).
|
|
|
|
Note that we don't break the components up in smaller bits
|
|
|
|
(e.g. netloc is a single string) and we don't expand % escapes."""
|
|
|
|
url, scheme, _coerce_result = _coerce_args(url, scheme)
|
|
|
|
splitresult = _urlsplit(url, scheme, allow_fragments)
|
|
|
|
scheme, netloc, url, query, fragment = splitresult
|
|
|
|
if scheme in uses_params and ';' in url:
|
|
|
|
url, params = _splitparams(url)
|
|
|
|
else:
|
|
|
|
params = ''
|
|
|
|
result = ParseResult(scheme, netloc, url, params, query, fragment)
|
|
|
|
return _coerce_result(result)
|
|
|
|
|
|
|
|
|
|
|
|
# Copied from urllib.parse.urlsplit() with
|
|
|
|
# https://github.com/python/cpython/pull/661 applied.
|
|
|
|
def _urlsplit(url, scheme='', allow_fragments=True):
|
|
|
|
"""Parse a URL into 5 components:
|
|
|
|
<scheme>://<netloc>/<path>?<query>#<fragment>
|
|
|
|
Return a 5-tuple: (scheme, netloc, path, query, fragment).
|
|
|
|
Note that we don't break the components up in smaller bits
|
|
|
|
(e.g. netloc is a single string) and we don't expand % escapes."""
|
|
|
|
url, scheme, _coerce_result = _coerce_args(url, scheme)
|
|
|
|
allow_fragments = bool(allow_fragments)
|
|
|
|
netloc = query = fragment = ''
|
|
|
|
i = url.find(':')
|
|
|
|
if i > 0:
|
|
|
|
for c in url[:i]:
|
|
|
|
if c not in scheme_chars:
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
scheme, url = url[:i].lower(), url[i + 1:]
|
|
|
|
|
|
|
|
if url[:2] == '//':
|
|
|
|
netloc, url = _splitnetloc(url, 2)
|
|
|
|
if (('[' in netloc and ']' not in netloc) or
|
|
|
|
(']' in netloc and '[' not in netloc)):
|
|
|
|
raise ValueError("Invalid IPv6 URL")
|
|
|
|
if allow_fragments and '#' in url:
|
|
|
|
url, fragment = url.split('#', 1)
|
|
|
|
if '?' in url:
|
|
|
|
url, query = url.split('?', 1)
|
|
|
|
v = SplitResult(scheme, netloc, url, query, fragment)
|
|
|
|
return _coerce_result(v)
|
|
|
|
|
|
|
|
|
2016-07-27 11:45:07 +08:00
|
|
|
def _is_safe_url(url, allowed_hosts, require_https=False):
|
2014-05-12 19:38:39 +08:00
|
|
|
# Chrome considers any URL with more than two slashes to be absolute, but
|
2014-05-15 03:00:57 +08:00
|
|
|
# urlparse is not so flexible. Treat any url with three slashes as unsafe.
|
2014-05-12 19:38:39 +08:00
|
|
|
if url.startswith('///'):
|
|
|
|
return False
|
2017-04-30 07:10:43 +08:00
|
|
|
try:
|
|
|
|
url_info = _urlparse(url)
|
|
|
|
except ValueError: # e.g. invalid IPv6 addresses
|
|
|
|
return False
|
2014-05-12 19:38:39 +08:00
|
|
|
# Forbid URLs like http:///example.com - with a scheme, but without a hostname.
|
|
|
|
# In that URL, example.com is not the hostname but, a path component. However,
|
|
|
|
# Chrome will still consider example.com to be the hostname, so we must not
|
|
|
|
# allow this syntax.
|
|
|
|
if not url_info.netloc and url_info.scheme:
|
|
|
|
return False
|
2015-03-10 08:05:13 +08:00
|
|
|
# Forbid URLs that start with control characters. Some browsers (like
|
|
|
|
# Chrome) ignore quite a few control characters at the start of a
|
|
|
|
# URL and might consider the URL as scheme relative.
|
|
|
|
if unicodedata.category(url[0])[0] == 'C':
|
|
|
|
return False
|
2016-08-19 19:23:13 +08:00
|
|
|
scheme = url_info.scheme
|
|
|
|
# Consider URLs without a scheme (e.g. //example.com/p) to be http.
|
|
|
|
if not url_info.scheme and url_info.netloc:
|
|
|
|
scheme = 'http'
|
|
|
|
valid_schemes = ['https'] if require_https else ['http', 'https']
|
2016-07-27 11:45:07 +08:00
|
|
|
return ((not url_info.netloc or url_info.netloc in allowed_hosts) and
|
2016-08-19 19:23:13 +08:00
|
|
|
(not scheme or scheme in valid_schemes))
|
2015-01-08 02:41:29 +08:00
|
|
|
|
|
|
|
|
|
|
|
def limited_parse_qsl(qs, keep_blank_values=False, encoding='utf-8',
|
|
|
|
errors='replace', fields_limit=None):
|
|
|
|
"""
|
|
|
|
Return a list of key/value tuples parsed from query string.
|
|
|
|
|
|
|
|
Copied from urlparse with an additional "fields_limit" argument.
|
|
|
|
Copyright (C) 2013 Python Software Foundation (see LICENSE.python).
|
|
|
|
|
|
|
|
Arguments:
|
|
|
|
|
|
|
|
qs: percent-encoded query string to be parsed
|
|
|
|
|
|
|
|
keep_blank_values: flag indicating whether blank values in
|
|
|
|
percent-encoded queries should be treated as blank strings. A
|
|
|
|
true value indicates that blanks should be retained as blank
|
|
|
|
strings. The default false value indicates that blank values
|
|
|
|
are to be ignored and treated as if they were not included.
|
|
|
|
|
|
|
|
encoding and errors: specify how to decode percent-encoded sequences
|
|
|
|
into Unicode characters, as accepted by the bytes.decode() method.
|
|
|
|
|
|
|
|
fields_limit: maximum number of fields parsed or an exception
|
|
|
|
is raised. None means no limit and is the default.
|
|
|
|
"""
|
|
|
|
if fields_limit:
|
|
|
|
pairs = FIELDS_MATCH.split(qs, fields_limit)
|
|
|
|
if len(pairs) > fields_limit:
|
|
|
|
raise TooManyFieldsSent(
|
|
|
|
'The number of GET/POST parameters exceeded '
|
|
|
|
'settings.DATA_UPLOAD_MAX_NUMBER_FIELDS.'
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
pairs = FIELDS_MATCH.split(qs)
|
|
|
|
r = []
|
|
|
|
for name_value in pairs:
|
|
|
|
if not name_value:
|
|
|
|
continue
|
2017-01-20 17:20:53 +08:00
|
|
|
nv = name_value.split('=', 1)
|
2015-01-08 02:41:29 +08:00
|
|
|
if len(nv) != 2:
|
|
|
|
# Handle case of a control-name with no equal sign
|
|
|
|
if keep_blank_values:
|
|
|
|
nv.append('')
|
|
|
|
else:
|
|
|
|
continue
|
|
|
|
if len(nv[1]) or keep_blank_values:
|
2016-12-01 18:38:01 +08:00
|
|
|
name = nv[0].replace('+', ' ')
|
|
|
|
name = unquote(name, encoding=encoding, errors=errors)
|
|
|
|
value = nv[1].replace('+', ' ')
|
|
|
|
value = unquote(value, encoding=encoding, errors=errors)
|
2015-01-08 02:41:29 +08:00
|
|
|
r.append((name, value))
|
|
|
|
return r
|