2005-11-28 01:20:43 +08:00
|
|
|
"Misc. utility functions/classes for admin documentation generator."
|
2005-07-13 09:25:57 +08:00
|
|
|
|
|
|
|
import re
|
2011-09-10 00:18:38 +08:00
|
|
|
from email.errors import HeaderParseError
|
2015-01-28 20:35:27 +08:00
|
|
|
from email.parser import HeaderParser
|
2011-09-10 00:18:38 +08:00
|
|
|
|
2015-12-30 23:51:16 +08:00
|
|
|
from django.urls import reverse
|
2012-08-29 02:59:56 +08:00
|
|
|
from django.utils.encoding import force_bytes
|
2015-01-28 20:35:27 +08:00
|
|
|
from django.utils.safestring import mark_safe
|
|
|
|
|
2005-11-28 01:20:43 +08:00
|
|
|
try:
|
|
|
|
import docutils.core
|
|
|
|
import docutils.nodes
|
|
|
|
import docutils.parsers.rst.roles
|
|
|
|
except ImportError:
|
|
|
|
docutils_is_available = False
|
|
|
|
else:
|
|
|
|
docutils_is_available = True
|
2005-07-13 09:25:57 +08:00
|
|
|
|
2013-10-31 23:42:28 +08:00
|
|
|
|
2005-07-13 09:25:57 +08:00
|
|
|
def trim_docstring(docstring):
|
|
|
|
"""
|
2014-11-04 02:24:22 +08:00
|
|
|
Uniformly trim leading/trailing whitespace from docstrings.
|
2005-07-13 09:25:57 +08:00
|
|
|
|
2014-11-04 02:24:22 +08:00
|
|
|
Based on https://www.python.org/dev/peps/pep-0257/#handling-docstring-indentation
|
2005-07-13 09:25:57 +08:00
|
|
|
"""
|
|
|
|
if not docstring or not docstring.strip():
|
|
|
|
return ''
|
|
|
|
# Convert tabs to spaces and split into lines
|
|
|
|
lines = docstring.expandtabs().splitlines()
|
2013-08-30 07:20:00 +08:00
|
|
|
indent = min(len(line) - len(line.lstrip()) for line in lines if line.lstrip())
|
2005-07-13 09:25:57 +08:00
|
|
|
trimmed = [lines[0].lstrip()] + [line[indent:].rstrip() for line in lines[1:]]
|
|
|
|
return "\n".join(trimmed).strip()
|
|
|
|
|
2013-10-31 23:42:28 +08:00
|
|
|
|
2005-07-13 09:25:57 +08:00
|
|
|
def parse_docstring(docstring):
|
|
|
|
"""
|
2014-11-04 02:24:22 +08:00
|
|
|
Parse out the parts of a docstring. Return (title, body, metadata).
|
2005-07-13 09:25:57 +08:00
|
|
|
"""
|
|
|
|
docstring = trim_docstring(docstring)
|
|
|
|
parts = re.split(r'\n{2,}', docstring)
|
|
|
|
title = parts[0]
|
|
|
|
if len(parts) == 1:
|
|
|
|
body = ''
|
|
|
|
metadata = {}
|
|
|
|
else:
|
|
|
|
parser = HeaderParser()
|
|
|
|
try:
|
|
|
|
metadata = parser.parsestr(parts[-1])
|
|
|
|
except HeaderParseError:
|
|
|
|
metadata = {}
|
|
|
|
body = "\n\n".join(parts[1:])
|
|
|
|
else:
|
|
|
|
metadata = dict(metadata.items())
|
|
|
|
if metadata:
|
|
|
|
body = "\n\n".join(parts[1:-1])
|
|
|
|
else:
|
|
|
|
body = "\n\n".join(parts[1:])
|
|
|
|
return title, body, metadata
|
|
|
|
|
2013-10-31 23:42:28 +08:00
|
|
|
|
2009-05-07 20:52:02 +08:00
|
|
|
def parse_rst(text, default_reference_context, thing_being_parsed=None):
|
2005-07-13 09:25:57 +08:00
|
|
|
"""
|
|
|
|
Convert the string from reST to an XHTML fragment.
|
|
|
|
"""
|
|
|
|
overrides = {
|
2013-10-27 09:27:42 +08:00
|
|
|
'doctitle_xform': True,
|
2015-11-03 17:43:07 +08:00
|
|
|
'initial_header_level': 3,
|
2013-10-27 09:27:42 +08:00
|
|
|
"default_reference_context": default_reference_context,
|
2015-03-31 21:47:06 +08:00
|
|
|
"link_base": reverse('django-admindocs-docroot').rstrip('/'),
|
|
|
|
'raw_enabled': False,
|
|
|
|
'file_insertion_enabled': False,
|
2005-07-13 09:25:57 +08:00
|
|
|
}
|
2018-01-04 07:52:12 +08:00
|
|
|
thing_being_parsed = thing_being_parsed and force_bytes('<%s>' % thing_being_parsed)
|
2013-06-18 08:10:21 +08:00
|
|
|
# Wrap ``text`` in some reST that sets the default role to ``cmsreference``,
|
|
|
|
# then restores it.
|
|
|
|
source = """
|
|
|
|
.. default-role:: cmsreference
|
|
|
|
|
|
|
|
%s
|
|
|
|
|
|
|
|
.. default-role::
|
|
|
|
"""
|
2016-03-29 06:33:29 +08:00
|
|
|
parts = docutils.core.publish_parts(
|
|
|
|
source % text,
|
|
|
|
source_path=thing_being_parsed, destination_path=None,
|
|
|
|
writer_name='html', settings_overrides=overrides,
|
|
|
|
)
|
2007-11-14 20:58:53 +08:00
|
|
|
return mark_safe(parts['fragment'])
|
2005-07-13 09:25:57 +08:00
|
|
|
|
2016-11-13 01:11:23 +08:00
|
|
|
|
2005-08-03 03:59:51 +08:00
|
|
|
#
|
|
|
|
# reST roles
|
|
|
|
#
|
|
|
|
ROLES = {
|
2013-10-27 09:27:42 +08:00
|
|
|
'model': '%s/models/%s/',
|
|
|
|
'view': '%s/views/%s/',
|
|
|
|
'template': '%s/templates/%s/',
|
|
|
|
'filter': '%s/filters/#%s',
|
|
|
|
'tag': '%s/tags/#%s',
|
2005-08-03 03:59:51 +08:00
|
|
|
}
|
|
|
|
|
2013-10-31 23:42:28 +08:00
|
|
|
|
2005-07-13 09:25:57 +08:00
|
|
|
def create_reference_role(rolename, urlbase):
|
2006-06-03 21:37:34 +08:00
|
|
|
def _role(name, rawtext, text, lineno, inliner, options=None, content=None):
|
2013-10-17 16:17:41 +08:00
|
|
|
if options is None:
|
|
|
|
options = {}
|
|
|
|
if content is None:
|
|
|
|
content = []
|
2014-09-04 20:15:09 +08:00
|
|
|
node = docutils.nodes.reference(
|
|
|
|
rawtext,
|
|
|
|
text,
|
|
|
|
refuri=(urlbase % (
|
|
|
|
inliner.document.settings.link_base,
|
|
|
|
text.lower(),
|
|
|
|
)),
|
|
|
|
**options
|
|
|
|
)
|
2005-07-13 09:25:57 +08:00
|
|
|
return [node], []
|
|
|
|
docutils.parsers.rst.roles.register_canonical_role(rolename, _role)
|
|
|
|
|
2013-10-31 23:42:28 +08:00
|
|
|
|
2006-06-03 21:37:34 +08:00
|
|
|
def default_reference_role(name, rawtext, text, lineno, inliner, options=None, content=None):
|
2013-10-17 16:17:41 +08:00
|
|
|
if options is None:
|
|
|
|
options = {}
|
|
|
|
if content is None:
|
|
|
|
content = []
|
2005-07-13 09:25:57 +08:00
|
|
|
context = inliner.document.settings.default_reference_context
|
2014-09-04 20:15:09 +08:00
|
|
|
node = docutils.nodes.reference(
|
|
|
|
rawtext,
|
|
|
|
text,
|
|
|
|
refuri=(ROLES[context] % (
|
|
|
|
inliner.document.settings.link_base,
|
|
|
|
text.lower(),
|
|
|
|
)),
|
|
|
|
**options
|
|
|
|
)
|
2005-07-13 09:25:57 +08:00
|
|
|
return [node], []
|
|
|
|
|
2016-11-13 01:11:23 +08:00
|
|
|
|
2005-12-07 13:11:19 +08:00
|
|
|
if docutils_is_available:
|
|
|
|
docutils.parsers.rst.roles.register_canonical_role('cmsreference', default_reference_role)
|
|
|
|
|
2006-05-02 09:31:56 +08:00
|
|
|
for name, urlbase in ROLES.items():
|
2005-12-07 13:11:19 +08:00
|
|
|
create_reference_role(name, urlbase)
|
2016-05-30 23:14:00 +08:00
|
|
|
|
|
|
|
# Match the beginning of a named or unnamed group.
|
|
|
|
named_group_matcher = re.compile(r'\(\?P(<\w+>)')
|
|
|
|
unnamed_group_matcher = re.compile(r'\(')
|
|
|
|
|
|
|
|
|
|
|
|
def replace_named_groups(pattern):
|
|
|
|
r"""
|
|
|
|
Find named groups in `pattern` and replace them with the group name. E.g.,
|
|
|
|
1. ^(?P<a>\w+)/b/(\w+)$ ==> ^<a>/b/(\w+)$
|
|
|
|
2. ^(?P<a>\w+)/b/(?P<c>\w+)/$ ==> ^<a>/b/<c>/$
|
|
|
|
"""
|
|
|
|
named_group_indices = [
|
|
|
|
(m.start(0), m.end(0), m.group(1))
|
|
|
|
for m in named_group_matcher.finditer(pattern)
|
|
|
|
]
|
|
|
|
# Tuples of (named capture group pattern, group name).
|
|
|
|
group_pattern_and_name = []
|
|
|
|
# Loop over the groups and their start and end indices.
|
|
|
|
for start, end, group_name in named_group_indices:
|
|
|
|
# Handle nested parentheses, e.g. '^(?P<a>(x|y))/b'.
|
|
|
|
unmatched_open_brackets, prev_char = 1, None
|
|
|
|
for idx, val in enumerate(list(pattern[end:])):
|
|
|
|
# If brackets are balanced, the end of the string for the current
|
|
|
|
# named capture group pattern has been reached.
|
|
|
|
if unmatched_open_brackets == 0:
|
|
|
|
group_pattern_and_name.append((pattern[start:end + idx], group_name))
|
|
|
|
break
|
|
|
|
|
|
|
|
# Check for unescaped `(` and `)`. They mark the start and end of a
|
|
|
|
# nested group.
|
|
|
|
if val == '(' and prev_char != '\\':
|
|
|
|
unmatched_open_brackets += 1
|
|
|
|
elif val == ')' and prev_char != '\\':
|
|
|
|
unmatched_open_brackets -= 1
|
|
|
|
prev_char = val
|
|
|
|
|
|
|
|
# Replace the string for named capture groups with their group names.
|
|
|
|
for group_pattern, group_name in group_pattern_and_name:
|
|
|
|
pattern = pattern.replace(group_pattern, group_name)
|
|
|
|
return pattern
|
|
|
|
|
|
|
|
|
|
|
|
def replace_unnamed_groups(pattern):
|
|
|
|
r"""
|
|
|
|
Find unnamed groups in `pattern` and replace them with '<var>'. E.g.,
|
|
|
|
1. ^(?P<a>\w+)/b/(\w+)$ ==> ^(?P<a>\w+)/b/<var>$
|
|
|
|
2. ^(?P<a>\w+)/b/((x|y)\w+)$ ==> ^(?P<a>\w+)/b/<var>$
|
|
|
|
"""
|
|
|
|
unnamed_group_indices = [m.start(0) for m in unnamed_group_matcher.finditer(pattern)]
|
|
|
|
# Indices of the start of unnamed capture groups.
|
|
|
|
group_indices = []
|
|
|
|
# Loop over the start indices of the groups.
|
|
|
|
for start in unnamed_group_indices:
|
|
|
|
# Handle nested parentheses, e.g. '^b/((x|y)\w+)$'.
|
|
|
|
unmatched_open_brackets, prev_char = 1, None
|
|
|
|
for idx, val in enumerate(list(pattern[start + 1:])):
|
|
|
|
if unmatched_open_brackets == 0:
|
|
|
|
group_indices.append((start, start + 1 + idx))
|
|
|
|
break
|
|
|
|
|
|
|
|
# Check for unescaped `(` and `)`. They mark the start and end of
|
|
|
|
# a nested group.
|
|
|
|
if val == '(' and prev_char != '\\':
|
|
|
|
unmatched_open_brackets += 1
|
|
|
|
elif val == ')' and prev_char != '\\':
|
|
|
|
unmatched_open_brackets -= 1
|
|
|
|
prev_char = val
|
|
|
|
|
|
|
|
# Remove unnamed group matches inside other unnamed capture groups.
|
|
|
|
group_start_end_indices = []
|
|
|
|
prev_end = None
|
|
|
|
for start, end in group_indices:
|
|
|
|
if prev_end and start > prev_end or not prev_end:
|
|
|
|
group_start_end_indices.append((start, end))
|
|
|
|
prev_end = end
|
|
|
|
|
|
|
|
if group_start_end_indices:
|
|
|
|
# Replace unnamed groups with <var>. Handle the fact that replacing the
|
|
|
|
# string between indices will change string length and thus indices
|
|
|
|
# will point to the wrong substring if not corrected.
|
|
|
|
final_pattern, prev_end = [], None
|
|
|
|
for start, end in group_start_end_indices:
|
|
|
|
if prev_end:
|
|
|
|
final_pattern.append(pattern[prev_end:start])
|
|
|
|
final_pattern.append(pattern[:start] + '<var>')
|
|
|
|
prev_end = end
|
|
|
|
final_pattern.append(pattern[prev_end:])
|
|
|
|
return ''.join(final_pattern)
|
|
|
|
else:
|
|
|
|
return pattern
|