2008-07-01 23:10:51 +08:00
|
|
|
"""
|
|
|
|
Multi-part parsing for file uploads.
|
|
|
|
|
|
|
|
Exposes one class, ``MultiPartParser``, which feeds chunks of uploaded data to
|
|
|
|
file upload handlers for processing.
|
|
|
|
"""
|
2012-08-15 02:51:50 +08:00
|
|
|
import base64
|
2013-09-30 23:55:14 +08:00
|
|
|
import binascii
|
2008-07-01 23:10:51 +08:00
|
|
|
import cgi
|
2019-01-15 21:11:02 +08:00
|
|
|
import collections
|
2019-04-24 21:10:28 +08:00
|
|
|
import html
|
2017-01-07 19:11:46 +08:00
|
|
|
from urllib.parse import unquote
|
2012-08-15 02:51:50 +08:00
|
|
|
|
2008-07-01 23:10:51 +08:00
|
|
|
from django.conf import settings
|
2015-01-08 02:41:29 +08:00
|
|
|
from django.core.exceptions import (
|
|
|
|
RequestDataTooBig,
|
|
|
|
SuspiciousMultipartForm,
|
|
|
|
TooManyFieldsSent,
|
2015-01-28 20:35:27 +08:00
|
|
|
)
|
|
|
|
from django.core.files.uploadhandler import SkipFile, StopFutureHandlers, StopUpload
|
2008-07-01 23:10:51 +08:00
|
|
|
from django.utils.datastructures import MultiValueDict
|
2017-01-26 17:08:08 +08:00
|
|
|
from django.utils.encoding import force_str
|
2008-07-01 23:10:51 +08:00
|
|
|
|
2008-07-22 11:26:25 +08:00
|
|
|
__all__ = ("MultiPartParser", "MultiPartParserError", "InputStreamExhausted")
|
2008-07-01 23:10:51 +08:00
|
|
|
|
2013-11-03 04:12:09 +08:00
|
|
|
|
2008-07-01 23:10:51 +08:00
|
|
|
class MultiPartParserError(Exception):
|
|
|
|
pass
|
|
|
|
|
2013-11-03 04:12:09 +08:00
|
|
|
|
2008-07-01 23:10:51 +08:00
|
|
|
class InputStreamExhausted(Exception):
|
|
|
|
"""
|
|
|
|
No more reads are allowed from this device.
|
|
|
|
"""
|
2022-02-08 19:09:55 +08:00
|
|
|
|
2008-07-01 23:10:51 +08:00
|
|
|
pass
|
|
|
|
|
2016-11-13 01:11:23 +08:00
|
|
|
|
2008-07-01 23:10:51 +08:00
|
|
|
RAW = "raw"
|
|
|
|
FILE = "file"
|
|
|
|
FIELD = "field"
|
|
|
|
|
2013-11-03 04:12:09 +08:00
|
|
|
|
2017-01-19 15:39:46 +08:00
|
|
|
class MultiPartParser:
|
2008-07-01 23:10:51 +08:00
|
|
|
"""
|
|
|
|
A rfc2388 multipart/form-data parser.
|
|
|
|
|
|
|
|
``MultiValueDict.parse()`` reads the input stream in ``chunk_size`` chunks
|
2011-06-28 18:17:56 +08:00
|
|
|
and returns a tuple of ``(MultiValueDict(POST), MultiValueDict(FILES))``.
|
2008-07-01 23:10:51 +08:00
|
|
|
"""
|
2022-02-08 19:09:55 +08:00
|
|
|
|
2008-07-01 23:10:51 +08:00
|
|
|
def __init__(self, META, input_data, upload_handlers, encoding=None):
|
|
|
|
"""
|
|
|
|
Initialize the MultiPartParser object.
|
|
|
|
|
|
|
|
:META:
|
|
|
|
The standard ``META`` dictionary in Django request objects.
|
|
|
|
:input_data:
|
2009-03-31 04:59:33 +08:00
|
|
|
The raw post data, as a file-like object.
|
2013-05-18 07:49:33 +08:00
|
|
|
:upload_handlers:
|
2016-06-02 02:27:20 +08:00
|
|
|
A list of UploadHandler instances that perform operations on the
|
|
|
|
uploaded data.
|
2008-07-01 23:10:51 +08:00
|
|
|
:encoding:
|
|
|
|
The encoding with which to treat the incoming data.
|
|
|
|
"""
|
2014-05-16 17:07:43 +08:00
|
|
|
# Content-Type should contain multipart and the boundary information.
|
2016-05-06 22:27:43 +08:00
|
|
|
content_type = META.get("CONTENT_TYPE", "")
|
2008-07-01 23:10:51 +08:00
|
|
|
if not content_type.startswith("multipart/"):
|
|
|
|
raise MultiPartParserError("Invalid Content-Type: %s" % content_type)
|
|
|
|
|
|
|
|
# Parse the header to get the boundary to split the parts.
|
2019-03-02 02:47:09 +08:00
|
|
|
try:
|
|
|
|
ctypes, opts = parse_header(content_type.encode("ascii"))
|
|
|
|
except UnicodeEncodeError:
|
|
|
|
raise MultiPartParserError(
|
|
|
|
"Invalid non-ASCII Content-Type in multipart: %s"
|
|
|
|
% force_str(content_type)
|
2022-02-08 19:09:55 +08:00
|
|
|
)
|
2008-07-01 23:10:51 +08:00
|
|
|
boundary = opts.get("boundary")
|
|
|
|
if not boundary or not cgi.valid_boundary(boundary):
|
2019-03-02 01:53:18 +08:00
|
|
|
raise MultiPartParserError(
|
|
|
|
"Invalid boundary in multipart: %s" % force_str(boundary)
|
2022-02-08 19:09:55 +08:00
|
|
|
)
|
2008-07-01 23:10:51 +08:00
|
|
|
|
|
|
|
# Content-Length should contain the length of the body we are about
|
|
|
|
# to receive.
|
|
|
|
try:
|
2016-05-06 22:27:43 +08:00
|
|
|
content_length = int(META.get("CONTENT_LENGTH", 0))
|
2008-07-01 23:10:51 +08:00
|
|
|
except (ValueError, TypeError):
|
|
|
|
content_length = 0
|
|
|
|
|
2011-06-10 16:39:38 +08:00
|
|
|
if content_length < 0:
|
2008-07-01 23:10:51 +08:00
|
|
|
# This means we shouldn't continue...raise an error.
|
|
|
|
raise MultiPartParserError("Invalid content length: %r" % content_length)
|
|
|
|
|
2016-12-29 23:27:49 +08:00
|
|
|
if isinstance(boundary, str):
|
2012-06-08 00:08:47 +08:00
|
|
|
boundary = boundary.encode("ascii")
|
2008-07-01 23:10:51 +08:00
|
|
|
self._boundary = boundary
|
|
|
|
self._input_data = input_data
|
|
|
|
|
|
|
|
# For compatibility with low-level network APIs (with 32-bit integers),
|
|
|
|
# the chunk size should be < 2^31, but still divisible by 4.
|
2009-05-09 01:22:34 +08:00
|
|
|
possible_sizes = [x.chunk_size for x in upload_handlers if x.chunk_size]
|
2013-11-04 02:08:55 +08:00
|
|
|
self._chunk_size = min([2**31 - 4] + possible_sizes)
|
2008-07-01 23:10:51 +08:00
|
|
|
|
|
|
|
self._meta = META
|
|
|
|
self._encoding = encoding or settings.DEFAULT_CHARSET
|
|
|
|
self._content_length = content_length
|
|
|
|
self._upload_handlers = upload_handlers
|
|
|
|
|
|
|
|
def parse(self):
|
|
|
|
"""
|
|
|
|
Parse the POST data and break it into a FILES MultiValueDict and a POST
|
|
|
|
MultiValueDict.
|
|
|
|
|
2016-06-02 02:27:20 +08:00
|
|
|
Return a tuple containing the POST and FILES dictionary, respectively.
|
2008-07-01 23:10:51 +08:00
|
|
|
"""
|
|
|
|
from django.http import QueryDict
|
|
|
|
|
|
|
|
encoding = self._encoding
|
|
|
|
handlers = self._upload_handlers
|
|
|
|
|
2011-06-10 16:39:38 +08:00
|
|
|
# HTTP spec says that Content-Length >= 0 is valid
|
|
|
|
# handling content-length == 0 before continuing
|
|
|
|
if self._content_length == 0:
|
2016-05-04 00:04:08 +08:00
|
|
|
return QueryDict(encoding=self._encoding), MultiValueDict()
|
2011-06-10 16:39:38 +08:00
|
|
|
|
2013-05-18 07:49:33 +08:00
|
|
|
# See if any of the handlers take care of the parsing.
|
|
|
|
# This allows overriding everything if need be.
|
2008-07-01 23:10:51 +08:00
|
|
|
for handler in handlers:
|
2016-06-02 02:27:20 +08:00
|
|
|
result = handler.handle_raw_input(
|
|
|
|
self._input_data,
|
|
|
|
self._meta,
|
|
|
|
self._content_length,
|
|
|
|
self._boundary,
|
|
|
|
encoding,
|
|
|
|
)
|
2013-11-03 05:02:56 +08:00
|
|
|
# Check to see if it was handled
|
2008-07-01 23:10:51 +08:00
|
|
|
if result is not None:
|
|
|
|
return result[0], result[1]
|
|
|
|
|
|
|
|
# Create the data structures to be used later.
|
2016-05-04 00:04:08 +08:00
|
|
|
self._post = QueryDict(mutable=True)
|
2008-07-01 23:10:51 +08:00
|
|
|
self._files = MultiValueDict()
|
|
|
|
|
|
|
|
# Instantiate the parser and stream:
|
2011-06-28 18:17:56 +08:00
|
|
|
stream = LazyStream(ChunkIter(self._input_data, self._chunk_size))
|
2008-07-01 23:10:51 +08:00
|
|
|
|
|
|
|
# Whether or not to signal a file-completion at the beginning of the loop.
|
|
|
|
old_field_name = None
|
|
|
|
counters = [0] * len(handlers)
|
|
|
|
|
2015-01-08 02:41:29 +08:00
|
|
|
# Number of bytes that have been read.
|
|
|
|
num_bytes_read = 0
|
|
|
|
# To count the number of keys in the request.
|
|
|
|
num_post_keys = 0
|
|
|
|
# To limit the amount of data read from the request.
|
|
|
|
read_size = None
|
2020-02-20 02:53:48 +08:00
|
|
|
# Whether a file upload is finished.
|
|
|
|
uploaded_file = True
|
2015-01-08 02:41:29 +08:00
|
|
|
|
2008-07-01 23:10:51 +08:00
|
|
|
try:
|
|
|
|
for item_type, meta_data, field_stream in Parser(stream, self._boundary):
|
|
|
|
if old_field_name:
|
|
|
|
# We run this at the beginning of the next loop
|
|
|
|
# since we cannot be sure a file is complete until
|
|
|
|
# we hit the next boundary/part of the multipart content.
|
|
|
|
self.handle_file_complete(old_field_name, counters)
|
2008-07-08 06:06:32 +08:00
|
|
|
old_field_name = None
|
2020-02-20 02:53:48 +08:00
|
|
|
uploaded_file = True
|
2008-07-01 23:10:51 +08:00
|
|
|
|
|
|
|
try:
|
|
|
|
disposition = meta_data["content-disposition"][1]
|
|
|
|
field_name = disposition["name"].strip()
|
|
|
|
except (KeyError, IndexError, AttributeError):
|
|
|
|
continue
|
|
|
|
|
|
|
|
transfer_encoding = meta_data.get("content-transfer-encoding")
|
2011-05-08 00:59:16 +08:00
|
|
|
if transfer_encoding is not None:
|
|
|
|
transfer_encoding = transfer_encoding[0].strip()
|
2017-01-26 17:08:08 +08:00
|
|
|
field_name = force_str(field_name, encoding, errors="replace")
|
2008-07-01 23:10:51 +08:00
|
|
|
|
|
|
|
if item_type == FIELD:
|
2015-01-08 02:41:29 +08:00
|
|
|
# Avoid storing more than DATA_UPLOAD_MAX_NUMBER_FIELDS.
|
|
|
|
num_post_keys += 1
|
|
|
|
if (
|
|
|
|
settings.DATA_UPLOAD_MAX_NUMBER_FIELDS is not None
|
|
|
|
and settings.DATA_UPLOAD_MAX_NUMBER_FIELDS < num_post_keys
|
|
|
|
):
|
|
|
|
raise TooManyFieldsSent(
|
|
|
|
"The number of GET/POST parameters exceeded "
|
|
|
|
"settings.DATA_UPLOAD_MAX_NUMBER_FIELDS."
|
|
|
|
)
|
|
|
|
|
|
|
|
# Avoid reading more than DATA_UPLOAD_MAX_MEMORY_SIZE.
|
|
|
|
if settings.DATA_UPLOAD_MAX_MEMORY_SIZE is not None:
|
|
|
|
read_size = (
|
|
|
|
settings.DATA_UPLOAD_MAX_MEMORY_SIZE - num_bytes_read
|
2022-02-08 19:09:55 +08:00
|
|
|
)
|
2015-01-08 02:41:29 +08:00
|
|
|
|
2008-07-01 23:10:51 +08:00
|
|
|
# This is a post field, we can just set it in the post
|
|
|
|
if transfer_encoding == "base64":
|
2015-01-08 02:41:29 +08:00
|
|
|
raw_data = field_stream.read(size=read_size)
|
|
|
|
num_bytes_read += len(raw_data)
|
2008-07-01 23:10:51 +08:00
|
|
|
try:
|
2013-09-30 23:55:14 +08:00
|
|
|
data = base64.b64decode(raw_data)
|
2016-12-01 18:38:01 +08:00
|
|
|
except binascii.Error:
|
2008-07-01 23:10:51 +08:00
|
|
|
data = raw_data
|
|
|
|
else:
|
2015-01-08 02:41:29 +08:00
|
|
|
data = field_stream.read(size=read_size)
|
|
|
|
num_bytes_read += len(data)
|
|
|
|
|
|
|
|
# Add two here to make the check consistent with the
|
|
|
|
# x-www-form-urlencoded check that includes '&='.
|
|
|
|
num_bytes_read += len(field_name) + 2
|
|
|
|
if (
|
|
|
|
settings.DATA_UPLOAD_MAX_MEMORY_SIZE is not None
|
|
|
|
and num_bytes_read > settings.DATA_UPLOAD_MAX_MEMORY_SIZE
|
|
|
|
):
|
|
|
|
raise RequestDataTooBig(
|
|
|
|
"Request body exceeded "
|
|
|
|
"settings.DATA_UPLOAD_MAX_MEMORY_SIZE."
|
|
|
|
)
|
2008-07-01 23:10:51 +08:00
|
|
|
|
2017-01-26 17:08:08 +08:00
|
|
|
self._post.appendlist(
|
|
|
|
field_name, force_str(data, encoding, errors="replace")
|
2022-02-08 19:09:55 +08:00
|
|
|
)
|
2008-07-01 23:10:51 +08:00
|
|
|
elif item_type == FILE:
|
|
|
|
# This is a file, use the handler...
|
|
|
|
file_name = disposition.get("filename")
|
2016-03-07 20:06:46 +08:00
|
|
|
if file_name:
|
2017-01-26 17:08:08 +08:00
|
|
|
file_name = force_str(file_name, encoding, errors="replace")
|
2021-03-16 17:19:00 +08:00
|
|
|
file_name = self.sanitize_file_name(file_name)
|
2008-07-01 23:10:51 +08:00
|
|
|
if not file_name:
|
|
|
|
continue
|
|
|
|
|
2013-04-20 01:20:23 +08:00
|
|
|
content_type, content_type_extra = meta_data.get(
|
|
|
|
"content-type", ("", {})
|
2022-02-08 19:09:55 +08:00
|
|
|
)
|
2013-04-20 01:20:23 +08:00
|
|
|
content_type = content_type.strip()
|
|
|
|
charset = content_type_extra.get("charset")
|
2008-07-01 23:10:51 +08:00
|
|
|
|
|
|
|
try:
|
|
|
|
content_length = int(meta_data.get("content-length")[0])
|
|
|
|
except (IndexError, TypeError, ValueError):
|
|
|
|
content_length = None
|
|
|
|
|
|
|
|
counters = [0] * len(handlers)
|
2020-02-20 02:53:48 +08:00
|
|
|
uploaded_file = False
|
2008-07-01 23:10:51 +08:00
|
|
|
try:
|
|
|
|
for handler in handlers:
|
|
|
|
try:
|
2016-06-02 02:27:20 +08:00
|
|
|
handler.new_file(
|
|
|
|
field_name,
|
|
|
|
file_name,
|
|
|
|
content_type,
|
|
|
|
content_length,
|
|
|
|
charset,
|
|
|
|
content_type_extra,
|
|
|
|
)
|
2008-07-01 23:10:51 +08:00
|
|
|
except StopFutureHandlers:
|
|
|
|
break
|
|
|
|
|
|
|
|
for chunk in field_stream:
|
|
|
|
if transfer_encoding == "base64":
|
|
|
|
# We only special-case base64 transfer encoding
|
2014-09-03 01:23:51 +08:00
|
|
|
# We should always decode base64 chunks by
|
|
|
|
# multiple of 4, ignoring whitespace.
|
|
|
|
|
|
|
|
stripped_chunk = b"".join(chunk.split())
|
|
|
|
|
|
|
|
remaining = len(stripped_chunk) % 4
|
|
|
|
while remaining != 0:
|
|
|
|
over_chunk = field_stream.read(4 - remaining)
|
2022-01-21 14:50:03 +08:00
|
|
|
if not over_chunk:
|
|
|
|
break
|
2014-09-03 01:23:51 +08:00
|
|
|
stripped_chunk += b"".join(over_chunk.split())
|
|
|
|
remaining = len(stripped_chunk) % 4
|
2012-10-12 05:20:25 +08:00
|
|
|
|
2008-07-01 23:10:51 +08:00
|
|
|
try:
|
2014-09-03 01:23:51 +08:00
|
|
|
chunk = base64.b64decode(stripped_chunk)
|
2017-01-08 03:13:29 +08:00
|
|
|
except Exception as exc:
|
2008-07-01 23:10:51 +08:00
|
|
|
# Since this is only a chunk, any error is
|
|
|
|
# an unfixable error.
|
2017-01-08 03:13:29 +08:00
|
|
|
raise MultiPartParserError(
|
|
|
|
"Could not decode base64 data."
|
|
|
|
) from exc
|
2008-07-01 23:10:51 +08:00
|
|
|
|
|
|
|
for i, handler in enumerate(handlers):
|
|
|
|
chunk_length = len(chunk)
|
2016-06-02 02:27:20 +08:00
|
|
|
chunk = handler.receive_data_chunk(chunk, counters[i])
|
2008-07-01 23:10:51 +08:00
|
|
|
counters[i] += chunk_length
|
|
|
|
if chunk is None:
|
2016-06-02 02:27:20 +08:00
|
|
|
# Don't continue if the chunk received by
|
2016-06-06 04:36:59 +08:00
|
|
|
# the handler is None.
|
2008-07-01 23:10:51 +08:00
|
|
|
break
|
|
|
|
|
2012-04-29 00:09:37 +08:00
|
|
|
except SkipFile:
|
2014-05-26 04:52:47 +08:00
|
|
|
self._close_files()
|
2008-07-01 23:10:51 +08:00
|
|
|
# Just use up the rest of this file...
|
|
|
|
exhaust(field_stream)
|
|
|
|
else:
|
|
|
|
# Handle file upload completions on next iteration.
|
|
|
|
old_field_name = field_name
|
|
|
|
else:
|
|
|
|
# If this is neither a FIELD or a FILE, just exhaust the stream.
|
|
|
|
exhaust(stream)
|
2012-04-29 00:09:37 +08:00
|
|
|
except StopUpload as e:
|
2014-05-26 04:52:47 +08:00
|
|
|
self._close_files()
|
2008-07-01 23:10:51 +08:00
|
|
|
if not e.connection_reset:
|
2011-06-28 18:17:56 +08:00
|
|
|
exhaust(self._input_data)
|
2008-07-01 23:10:51 +08:00
|
|
|
else:
|
2020-02-20 02:53:48 +08:00
|
|
|
if not uploaded_file:
|
|
|
|
for handler in handlers:
|
|
|
|
handler.upload_interrupted()
|
2008-07-01 23:10:51 +08:00
|
|
|
# Make sure that the request data is all fed
|
2011-06-28 18:17:56 +08:00
|
|
|
exhaust(self._input_data)
|
2008-07-01 23:10:51 +08:00
|
|
|
|
|
|
|
# Signal that the upload has completed.
|
2017-12-27 02:44:12 +08:00
|
|
|
# any() shortcircuits if a handler's upload_complete() returns a value.
|
|
|
|
any(handler.upload_complete() for handler in handlers)
|
2016-11-16 21:22:38 +08:00
|
|
|
self._post._mutable = False
|
2008-07-01 23:10:51 +08:00
|
|
|
return self._post, self._files
|
|
|
|
|
|
|
|
def handle_file_complete(self, old_field_name, counters):
|
|
|
|
"""
|
2014-03-02 22:25:53 +08:00
|
|
|
Handle all the signaling that takes place when a file is complete.
|
2008-07-01 23:10:51 +08:00
|
|
|
"""
|
|
|
|
for i, handler in enumerate(self._upload_handlers):
|
|
|
|
file_obj = handler.file_complete(counters[i])
|
|
|
|
if file_obj:
|
|
|
|
# If it returns a file object, then set the files dict.
|
2017-01-26 17:08:08 +08:00
|
|
|
self._files.appendlist(
|
|
|
|
force_str(old_field_name, self._encoding, errors="replace"),
|
|
|
|
file_obj,
|
|
|
|
)
|
2008-07-01 23:10:51 +08:00
|
|
|
break
|
|
|
|
|
2021-03-16 17:19:00 +08:00
|
|
|
def sanitize_file_name(self, file_name):
|
2021-04-15 00:23:44 +08:00
|
|
|
"""
|
|
|
|
Sanitize the filename of an upload.
|
|
|
|
|
|
|
|
Remove all possible path separators, even though that might remove more
|
|
|
|
than actually required by the target system. Filenames that could
|
|
|
|
potentially cause problems (current/parent dir) are also discarded.
|
|
|
|
|
|
|
|
It should be noted that this function could still return a "filepath"
|
|
|
|
like "C:some_file.txt" which is handled later on by the storage layer.
|
|
|
|
So while this function does sanitize filenames to some extent, the
|
|
|
|
resulting filename should still be considered as untrusted user input.
|
|
|
|
"""
|
2021-03-16 17:19:00 +08:00
|
|
|
file_name = html.unescape(file_name)
|
2021-04-15 00:23:44 +08:00
|
|
|
file_name = file_name.rsplit("/")[-1]
|
|
|
|
file_name = file_name.rsplit("\\")[-1]
|
|
|
|
|
|
|
|
if file_name in {"", ".", ".."}:
|
|
|
|
return None
|
|
|
|
return file_name
|
2021-03-16 17:19:00 +08:00
|
|
|
|
|
|
|
IE_sanitize = sanitize_file_name
|
2008-07-01 23:10:51 +08:00
|
|
|
|
2014-05-26 04:52:47 +08:00
|
|
|
def _close_files(self):
|
|
|
|
# Free up all file handles.
|
|
|
|
# FIXME: this currently assumes that upload handlers store the file as 'file'
|
|
|
|
# We should document that...
|
|
|
|
# (Maybe add handler.free_file to complement new_file)
|
|
|
|
for handler in self._upload_handlers:
|
|
|
|
if hasattr(handler, "file"):
|
|
|
|
handler.file.close()
|
|
|
|
|
2013-11-03 08:37:15 +08:00
|
|
|
|
2017-01-07 19:11:46 +08:00
|
|
|
class LazyStream:
|
2008-07-01 23:10:51 +08:00
|
|
|
"""
|
|
|
|
The LazyStream wrapper allows one to get and "unget" bytes from a stream.
|
|
|
|
|
|
|
|
Given a producer object (an iterator that yields bytestrings), the
|
|
|
|
LazyStream object will support iteration, reading, and keeping a "look-back"
|
|
|
|
variable in case you need to "unget" some bytes.
|
|
|
|
"""
|
2022-02-08 19:09:55 +08:00
|
|
|
|
2008-07-01 23:10:51 +08:00
|
|
|
def __init__(self, producer, length=None):
|
|
|
|
"""
|
|
|
|
Every LazyStream must have a producer when instantiated.
|
|
|
|
|
|
|
|
A producer is an iterable that returns a string each time it
|
|
|
|
is called.
|
|
|
|
"""
|
|
|
|
self._producer = producer
|
|
|
|
self._empty = False
|
2012-05-19 23:43:34 +08:00
|
|
|
self._leftover = b""
|
2008-07-01 23:10:51 +08:00
|
|
|
self.length = length
|
2008-07-13 04:43:15 +08:00
|
|
|
self.position = 0
|
2008-07-01 23:10:51 +08:00
|
|
|
self._remaining = length
|
2008-07-13 04:43:15 +08:00
|
|
|
self._unget_history = []
|
2008-07-01 23:10:51 +08:00
|
|
|
|
|
|
|
def tell(self):
|
|
|
|
return self.position
|
|
|
|
|
|
|
|
def read(self, size=None):
|
|
|
|
def parts():
|
2013-05-17 22:33:36 +08:00
|
|
|
remaining = self._remaining if size is None else size
|
2008-07-01 23:10:51 +08:00
|
|
|
# do the whole thing in one shot if no limit was provided.
|
|
|
|
if remaining is None:
|
2012-05-19 23:43:34 +08:00
|
|
|
yield b"".join(self)
|
2008-07-01 23:10:51 +08:00
|
|
|
return
|
|
|
|
|
|
|
|
# otherwise do some bookkeeping to return exactly enough
|
|
|
|
# of the stream and stashing any extra content we get from
|
|
|
|
# the producer
|
|
|
|
while remaining != 0:
|
|
|
|
assert remaining > 0, "remaining bytes to read should never go negative"
|
|
|
|
|
2015-06-16 23:46:32 +08:00
|
|
|
try:
|
|
|
|
chunk = next(self)
|
|
|
|
except StopIteration:
|
|
|
|
return
|
|
|
|
else:
|
|
|
|
emitting = chunk[:remaining]
|
|
|
|
self.unget(chunk[remaining:])
|
|
|
|
remaining -= len(emitting)
|
|
|
|
yield emitting
|
2008-07-01 23:10:51 +08:00
|
|
|
|
2019-04-24 19:09:29 +08:00
|
|
|
return b"".join(parts())
|
2008-07-01 23:10:51 +08:00
|
|
|
|
2012-08-09 20:36:05 +08:00
|
|
|
def __next__(self):
|
2008-07-01 23:10:51 +08:00
|
|
|
"""
|
|
|
|
Used when the exact number of bytes to read is unimportant.
|
|
|
|
|
2017-01-25 05:23:56 +08:00
|
|
|
Return whatever chunk is conveniently returned from the iterator.
|
|
|
|
Useful to avoid unnecessary bookkeeping if performance is an issue.
|
2008-07-01 23:10:51 +08:00
|
|
|
"""
|
|
|
|
if self._leftover:
|
|
|
|
output = self._leftover
|
2012-05-19 23:43:34 +08:00
|
|
|
self._leftover = b""
|
2008-07-01 23:10:51 +08:00
|
|
|
else:
|
2012-05-11 02:14:04 +08:00
|
|
|
output = next(self._producer)
|
2008-07-13 04:43:15 +08:00
|
|
|
self._unget_history = []
|
2008-07-01 23:10:51 +08:00
|
|
|
self.position += len(output)
|
|
|
|
return output
|
|
|
|
|
|
|
|
def close(self):
|
|
|
|
"""
|
|
|
|
Used to invalidate/disable this lazy stream.
|
|
|
|
|
2017-01-25 05:23:56 +08:00
|
|
|
Replace the producer with an empty list. Any leftover bytes that have
|
2008-07-01 23:10:51 +08:00
|
|
|
already been read will still be reported upon read() and/or next().
|
|
|
|
"""
|
|
|
|
self._producer = []
|
|
|
|
|
|
|
|
def __iter__(self):
|
|
|
|
return self
|
|
|
|
|
|
|
|
def unget(self, bytes):
|
|
|
|
"""
|
2017-01-25 05:23:56 +08:00
|
|
|
Place bytes back onto the front of the lazy stream.
|
2008-07-01 23:10:51 +08:00
|
|
|
|
|
|
|
Future calls to read() will return those bytes first. The
|
|
|
|
stream position and thus tell() will be rewound.
|
|
|
|
"""
|
2008-07-13 04:43:15 +08:00
|
|
|
if not bytes:
|
|
|
|
return
|
|
|
|
self._update_unget_history(len(bytes))
|
2008-07-01 23:10:51 +08:00
|
|
|
self.position -= len(bytes)
|
2017-05-21 05:16:36 +08:00
|
|
|
self._leftover = bytes + self._leftover
|
2008-07-01 23:10:51 +08:00
|
|
|
|
2008-07-13 04:43:15 +08:00
|
|
|
def _update_unget_history(self, num_bytes):
|
|
|
|
"""
|
2017-01-25 05:23:56 +08:00
|
|
|
Update the unget history as a sanity check to see if we've pushed
|
2008-07-13 04:43:15 +08:00
|
|
|
back the same number of bytes in one chunk. If we keep ungetting the
|
|
|
|
same number of bytes many times (here, 50), we're mostly likely in an
|
|
|
|
infinite loop of some sort. This is usually caused by a
|
|
|
|
maliciously-malformed MIME request.
|
|
|
|
"""
|
|
|
|
self._unget_history = [num_bytes] + self._unget_history[:49]
|
2016-06-02 02:27:20 +08:00
|
|
|
number_equal = len(
|
|
|
|
[
|
|
|
|
current_number
|
|
|
|
for current_number in self._unget_history
|
|
|
|
if current_number == num_bytes
|
|
|
|
]
|
|
|
|
)
|
2008-07-13 04:43:15 +08:00
|
|
|
|
|
|
|
if number_equal > 40:
|
2013-05-16 07:14:28 +08:00
|
|
|
raise SuspiciousMultipartForm(
|
2008-07-13 04:43:15 +08:00
|
|
|
"The multipart parser got stuck, which shouldn't happen with"
|
|
|
|
" normal uploaded files. Check for malicious upload activity;"
|
|
|
|
" if there is none, report this to the Django developers."
|
|
|
|
)
|
2008-07-01 23:10:51 +08:00
|
|
|
|
2013-11-03 08:37:15 +08:00
|
|
|
|
2017-01-07 19:11:46 +08:00
|
|
|
class ChunkIter:
|
2008-07-01 23:10:51 +08:00
|
|
|
"""
|
|
|
|
An iterable that will yield chunks of data. Given a file-like object as the
|
2017-01-25 05:23:56 +08:00
|
|
|
constructor, yield chunks of read operations from that object.
|
2008-07-01 23:10:51 +08:00
|
|
|
"""
|
2022-02-08 19:09:55 +08:00
|
|
|
|
2008-07-01 23:10:51 +08:00
|
|
|
def __init__(self, flo, chunk_size=64 * 1024):
|
|
|
|
self.flo = flo
|
|
|
|
self.chunk_size = chunk_size
|
|
|
|
|
2012-08-09 20:36:05 +08:00
|
|
|
def __next__(self):
|
2008-07-01 23:10:51 +08:00
|
|
|
try:
|
|
|
|
data = self.flo.read(self.chunk_size)
|
|
|
|
except InputStreamExhausted:
|
|
|
|
raise StopIteration()
|
|
|
|
if data:
|
|
|
|
return data
|
|
|
|
else:
|
|
|
|
raise StopIteration()
|
|
|
|
|
|
|
|
def __iter__(self):
|
|
|
|
return self
|
|
|
|
|
2013-11-03 08:37:15 +08:00
|
|
|
|
2017-01-07 19:11:46 +08:00
|
|
|
class InterBoundaryIter:
|
2008-07-01 23:10:51 +08:00
|
|
|
"""
|
|
|
|
A Producer that will iterate over boundaries.
|
|
|
|
"""
|
2022-02-08 19:09:55 +08:00
|
|
|
|
2008-07-01 23:10:51 +08:00
|
|
|
def __init__(self, stream, boundary):
|
|
|
|
self._stream = stream
|
|
|
|
self._boundary = boundary
|
|
|
|
|
|
|
|
def __iter__(self):
|
|
|
|
return self
|
|
|
|
|
2012-08-09 20:36:05 +08:00
|
|
|
def __next__(self):
|
2008-07-01 23:10:51 +08:00
|
|
|
try:
|
|
|
|
return LazyStream(BoundaryIter(self._stream, self._boundary))
|
|
|
|
except InputStreamExhausted:
|
|
|
|
raise StopIteration()
|
|
|
|
|
2013-11-03 08:37:15 +08:00
|
|
|
|
2017-01-07 19:11:46 +08:00
|
|
|
class BoundaryIter:
|
2008-07-01 23:10:51 +08:00
|
|
|
"""
|
|
|
|
A Producer that is sensitive to boundaries.
|
|
|
|
|
|
|
|
Will happily yield bytes until a boundary is found. Will yield the bytes
|
|
|
|
before the boundary, throw away the boundary bytes themselves, and push the
|
|
|
|
post-boundary bytes back on the stream.
|
|
|
|
|
2012-05-11 02:14:04 +08:00
|
|
|
The future calls to next() after locating the boundary will raise a
|
2008-07-01 23:10:51 +08:00
|
|
|
StopIteration exception.
|
|
|
|
"""
|
|
|
|
|
|
|
|
def __init__(self, stream, boundary):
|
|
|
|
self._stream = stream
|
|
|
|
self._boundary = boundary
|
|
|
|
self._done = False
|
|
|
|
# rollback an additional six bytes because the format is like
|
|
|
|
# this: CRLF<boundary>[--CRLF]
|
|
|
|
self._rollback = len(boundary) + 6
|
|
|
|
|
|
|
|
# Try to use mx fast string search if available. Otherwise
|
|
|
|
# use Python find. Wrap the latter for consistency.
|
|
|
|
unused_char = self._stream.read(1)
|
|
|
|
if not unused_char:
|
|
|
|
raise InputStreamExhausted()
|
|
|
|
self._stream.unget(unused_char)
|
|
|
|
|
|
|
|
def __iter__(self):
|
|
|
|
return self
|
|
|
|
|
2012-08-09 20:36:05 +08:00
|
|
|
def __next__(self):
|
2008-07-01 23:10:51 +08:00
|
|
|
if self._done:
|
|
|
|
raise StopIteration()
|
|
|
|
|
|
|
|
stream = self._stream
|
|
|
|
rollback = self._rollback
|
|
|
|
|
|
|
|
bytes_read = 0
|
|
|
|
chunks = []
|
|
|
|
for bytes in stream:
|
|
|
|
bytes_read += len(bytes)
|
|
|
|
chunks.append(bytes)
|
|
|
|
if bytes_read > rollback:
|
|
|
|
break
|
|
|
|
if not bytes:
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
self._done = True
|
|
|
|
|
|
|
|
if not chunks:
|
|
|
|
raise StopIteration()
|
|
|
|
|
2012-05-19 23:43:34 +08:00
|
|
|
chunk = b"".join(chunks)
|
2017-09-22 22:47:14 +08:00
|
|
|
boundary = self._find_boundary(chunk)
|
2008-07-01 23:10:51 +08:00
|
|
|
|
|
|
|
if boundary:
|
|
|
|
end, next = boundary
|
|
|
|
stream.unget(chunk[next:])
|
|
|
|
self._done = True
|
|
|
|
return chunk[:end]
|
|
|
|
else:
|
2014-03-02 22:25:53 +08:00
|
|
|
# make sure we don't treat a partial boundary (and
|
2008-07-01 23:10:51 +08:00
|
|
|
# its separators) as data
|
2013-11-03 05:02:56 +08:00
|
|
|
if not chunk[:-rollback]: # and len(chunk) >= (len(self._boundary) + 6):
|
2008-07-01 23:10:51 +08:00
|
|
|
# There's nothing left, we should just return and mark as done.
|
|
|
|
self._done = True
|
|
|
|
return chunk
|
|
|
|
else:
|
|
|
|
stream.unget(chunk[-rollback:])
|
|
|
|
return chunk[:-rollback]
|
|
|
|
|
2017-09-22 22:47:14 +08:00
|
|
|
def _find_boundary(self, data):
|
2008-07-01 23:10:51 +08:00
|
|
|
"""
|
2017-01-25 05:23:56 +08:00
|
|
|
Find a multipart boundary in data.
|
2008-07-01 23:10:51 +08:00
|
|
|
|
2017-01-25 05:23:56 +08:00
|
|
|
Should no boundary exist in the data, return None. Otherwise, return
|
|
|
|
a tuple containing the indices of the following:
|
2008-07-01 23:10:51 +08:00
|
|
|
* the end of current encapsulation
|
|
|
|
* the start of the next encapsulation
|
|
|
|
"""
|
2013-01-15 10:23:42 +08:00
|
|
|
index = data.find(self._boundary)
|
2008-07-01 23:10:51 +08:00
|
|
|
if index < 0:
|
|
|
|
return None
|
|
|
|
else:
|
|
|
|
end = index
|
|
|
|
next = index + len(self._boundary)
|
|
|
|
# backup over CRLF
|
2013-11-04 02:08:55 +08:00
|
|
|
last = max(0, end - 1)
|
|
|
|
if data[last : last + 1] == b"\n":
|
2008-07-01 23:10:51 +08:00
|
|
|
end -= 1
|
2013-11-04 02:08:55 +08:00
|
|
|
last = max(0, end - 1)
|
|
|
|
if data[last : last + 1] == b"\r":
|
2008-07-01 23:10:51 +08:00
|
|
|
end -= 1
|
|
|
|
return end, next
|
|
|
|
|
2013-11-03 08:37:15 +08:00
|
|
|
|
2008-07-01 23:10:51 +08:00
|
|
|
def exhaust(stream_or_iterable):
|
2017-01-26 02:00:00 +08:00
|
|
|
"""Exhaust an iterator or stream."""
|
2008-07-01 23:10:51 +08:00
|
|
|
try:
|
|
|
|
iterator = iter(stream_or_iterable)
|
|
|
|
except TypeError:
|
|
|
|
iterator = ChunkIter(stream_or_iterable, 16384)
|
2019-01-15 21:11:02 +08:00
|
|
|
collections.deque(iterator, maxlen=0) # consume iterator quickly.
|
2008-07-01 23:10:51 +08:00
|
|
|
|
2013-11-03 08:37:15 +08:00
|
|
|
|
2008-07-01 23:10:51 +08:00
|
|
|
def parse_boundary_stream(stream, max_header_size):
|
|
|
|
"""
|
2017-01-25 05:23:56 +08:00
|
|
|
Parse one and exactly one stream that encapsulates a boundary.
|
2008-07-01 23:10:51 +08:00
|
|
|
"""
|
|
|
|
# Stream at beginning of header, look for end of header
|
|
|
|
# and parse it if found. The header must fit within one
|
|
|
|
# chunk.
|
|
|
|
chunk = stream.read(max_header_size)
|
|
|
|
|
|
|
|
# 'find' returns the top of these four bytes, so we'll
|
|
|
|
# need to munch them later to prevent them from polluting
|
|
|
|
# the payload.
|
2012-05-19 23:43:34 +08:00
|
|
|
header_end = chunk.find(b"\r\n\r\n")
|
2008-07-01 23:10:51 +08:00
|
|
|
|
|
|
|
def _parse_header(line):
|
|
|
|
main_value_pair, params = parse_header(line)
|
|
|
|
try:
|
|
|
|
name, value = main_value_pair.split(":", 1)
|
2013-09-30 23:55:14 +08:00
|
|
|
except ValueError:
|
2008-07-01 23:10:51 +08:00
|
|
|
raise ValueError("Invalid header: %r" % line)
|
|
|
|
return name, (value, params)
|
|
|
|
|
|
|
|
if header_end == -1:
|
|
|
|
# we find no header, so we just mark this fact and pass on
|
|
|
|
# the stream verbatim
|
|
|
|
stream.unget(chunk)
|
|
|
|
return (RAW, {}, stream)
|
|
|
|
|
|
|
|
header = chunk[:header_end]
|
|
|
|
|
|
|
|
# here we place any excess chunk back onto the stream, as
|
|
|
|
# well as throwing away the CRLFCRLF bytes from above.
|
|
|
|
stream.unget(chunk[header_end + 4 :])
|
|
|
|
|
|
|
|
TYPE = RAW
|
|
|
|
outdict = {}
|
|
|
|
|
|
|
|
# Eliminate blank lines
|
2012-05-19 23:43:34 +08:00
|
|
|
for line in header.split(b"\r\n"):
|
2008-07-01 23:10:51 +08:00
|
|
|
# This terminology ("main value" and "dictionary of
|
|
|
|
# parameters") is from the Python docs.
|
|
|
|
try:
|
|
|
|
name, (value, params) = _parse_header(line)
|
2013-09-30 23:55:14 +08:00
|
|
|
except ValueError:
|
2008-07-01 23:10:51 +08:00
|
|
|
continue
|
|
|
|
|
|
|
|
if name == "content-disposition":
|
|
|
|
TYPE = FIELD
|
|
|
|
if params.get("filename"):
|
|
|
|
TYPE = FILE
|
|
|
|
|
|
|
|
outdict[name] = value, params
|
|
|
|
|
|
|
|
if TYPE == RAW:
|
|
|
|
stream.unget(chunk)
|
|
|
|
|
|
|
|
return (TYPE, outdict, stream)
|
|
|
|
|
2013-11-03 08:37:15 +08:00
|
|
|
|
2017-01-19 15:39:46 +08:00
|
|
|
class Parser:
|
2008-07-01 23:10:51 +08:00
|
|
|
def __init__(self, stream, boundary):
|
|
|
|
self._stream = stream
|
2012-05-19 23:43:34 +08:00
|
|
|
self._separator = b"--" + boundary
|
2008-07-01 23:10:51 +08:00
|
|
|
|
|
|
|
def __iter__(self):
|
|
|
|
boundarystream = InterBoundaryIter(self._stream, self._separator)
|
|
|
|
for sub_stream in boundarystream:
|
|
|
|
# Iterate over each part
|
|
|
|
yield parse_boundary_stream(sub_stream, 1024)
|
|
|
|
|
2013-11-03 08:37:15 +08:00
|
|
|
|
2008-07-01 23:10:51 +08:00
|
|
|
def parse_header(line):
|
2016-06-02 02:27:20 +08:00
|
|
|
"""
|
|
|
|
Parse the header into a key-value.
|
|
|
|
|
2017-01-21 05:04:05 +08:00
|
|
|
Input (line): bytes, output: str for key/name, bytes for values which
|
2016-06-02 02:27:20 +08:00
|
|
|
will be decoded later.
|
2012-06-08 00:08:47 +08:00
|
|
|
"""
|
2012-05-19 23:43:34 +08:00
|
|
|
plist = _parse_header_params(b";" + line)
|
2012-06-08 00:08:47 +08:00
|
|
|
key = plist.pop(0).lower().decode("ascii")
|
2008-07-01 23:10:51 +08:00
|
|
|
pdict = {}
|
|
|
|
for p in plist:
|
2012-05-19 23:43:34 +08:00
|
|
|
i = p.find(b"=")
|
2008-07-01 23:10:51 +08:00
|
|
|
if i >= 0:
|
2014-07-12 20:08:50 +08:00
|
|
|
has_encoding = False
|
2012-06-08 00:08:47 +08:00
|
|
|
name = p[:i].strip().lower().decode("ascii")
|
2014-07-12 20:08:50 +08:00
|
|
|
if name.endswith("*"):
|
|
|
|
# Lang/encoding embedded in the value (like "filename*=UTF-8''file.ext")
|
|
|
|
# http://tools.ietf.org/html/rfc2231#section-4
|
|
|
|
name = name[:-1]
|
2015-01-24 20:14:30 +08:00
|
|
|
if p.count(b"'") == 2:
|
|
|
|
has_encoding = True
|
2013-11-04 02:08:55 +08:00
|
|
|
value = p[i + 1 :].strip()
|
2012-08-11 20:44:34 +08:00
|
|
|
if len(value) >= 2 and value[:1] == value[-1:] == b'"':
|
2008-07-01 23:10:51 +08:00
|
|
|
value = value[1:-1]
|
2012-05-19 23:43:34 +08:00
|
|
|
value = value.replace(b"\\\\", b"\\").replace(b'\\"', b'"')
|
2020-02-21 13:25:22 +08:00
|
|
|
if has_encoding:
|
|
|
|
encoding, lang, value = value.split(b"'")
|
|
|
|
value = unquote(value.decode(), encoding=encoding.decode())
|
2008-07-01 23:10:51 +08:00
|
|
|
pdict[name] = value
|
|
|
|
return key, pdict
|
|
|
|
|
2013-11-03 08:37:15 +08:00
|
|
|
|
2008-07-01 23:10:51 +08:00
|
|
|
def _parse_header_params(s):
|
|
|
|
plist = []
|
2012-05-19 23:43:34 +08:00
|
|
|
while s[:1] == b";":
|
2008-07-01 23:10:51 +08:00
|
|
|
s = s[1:]
|
2012-05-19 23:43:34 +08:00
|
|
|
end = s.find(b";")
|
|
|
|
while end > 0 and s.count(b'"', 0, end) % 2:
|
|
|
|
end = s.find(b";", end + 1)
|
2008-07-01 23:10:51 +08:00
|
|
|
if end < 0:
|
|
|
|
end = len(s)
|
|
|
|
f = s[:end]
|
|
|
|
plist.append(f.strip())
|
|
|
|
s = s[end:]
|
|
|
|
return plist
|