Added django.template.Token.split_contents() and used it to add support for strings with spaces in {% ifchanged %}
git-svn-id: http://code.djangoproject.com/svn/django/trunk@3112 bcc190cf-cafb-0310-a4f2-bffc1f526a37
This commit is contained in:
parent
0f0560a9ac
commit
5edd1335b2
|
@ -56,9 +56,10 @@ times with multiple contexts)
|
||||||
"""
|
"""
|
||||||
import re
|
import re
|
||||||
from inspect import getargspec
|
from inspect import getargspec
|
||||||
from django.utils.functional import curry
|
|
||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
from django.template.context import Context, RequestContext, ContextPopException
|
from django.template.context import Context, RequestContext, ContextPopException
|
||||||
|
from django.utils.functional import curry
|
||||||
|
from django.utils.text import smart_split
|
||||||
|
|
||||||
__all__ = ('Template', 'Context', 'RequestContext', 'compile_string')
|
__all__ = ('Template', 'Context', 'RequestContext', 'compile_string')
|
||||||
|
|
||||||
|
@ -163,16 +164,12 @@ class Token:
|
||||||
self.token_type, self.contents = token_type, contents
|
self.token_type, self.contents = token_type, contents
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return '<%s token: "%s...">' % (
|
return '<%s token: "%s...">' % \
|
||||||
{TOKEN_TEXT: 'Text', TOKEN_VAR: 'Var', TOKEN_BLOCK: 'Block'}[self.token_type],
|
({TOKEN_TEXT: 'Text', TOKEN_VAR: 'Var', TOKEN_BLOCK: 'Block'}[self.token_type],
|
||||||
self.contents[:20].replace('\n', '')
|
self.contents[:20].replace('\n', ''))
|
||||||
)
|
|
||||||
|
|
||||||
def __repr__(self):
|
def split_contents(self):
|
||||||
return '<%s token: "%s">' % (
|
return smart_split(self.contents)
|
||||||
{TOKEN_TEXT: 'Text', TOKEN_VAR: 'Var', TOKEN_BLOCK: 'Block'}[self.token_type],
|
|
||||||
self.contents[:].replace('\n', '')
|
|
||||||
)
|
|
||||||
|
|
||||||
class Lexer(object):
|
class Lexer(object):
|
||||||
def __init__(self, template_string, origin):
|
def __init__(self, template_string, origin):
|
||||||
|
@ -367,7 +364,6 @@ class DebugParser(Parser):
|
||||||
if not hasattr(e, 'source'):
|
if not hasattr(e, 'source'):
|
||||||
e.source = token.source
|
e.source = token.source
|
||||||
|
|
||||||
|
|
||||||
def lexer_factory(*args, **kwargs):
|
def lexer_factory(*args, **kwargs):
|
||||||
if settings.TEMPLATE_DEBUG:
|
if settings.TEMPLATE_DEBUG:
|
||||||
return DebugLexer(*args, **kwargs)
|
return DebugLexer(*args, **kwargs)
|
||||||
|
@ -380,7 +376,6 @@ def parser_factory(*args, **kwargs):
|
||||||
else:
|
else:
|
||||||
return Parser(*args, **kwargs)
|
return Parser(*args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
class TokenParser:
|
class TokenParser:
|
||||||
"""
|
"""
|
||||||
Subclass this and implement the top() method to parse a template line. When
|
Subclass this and implement the top() method to parse a template line. When
|
||||||
|
@ -564,7 +559,7 @@ class FilterExpression(object):
|
||||||
def args_check(name, func, provided):
|
def args_check(name, func, provided):
|
||||||
provided = list(provided)
|
provided = list(provided)
|
||||||
plen = len(provided)
|
plen = len(provided)
|
||||||
(args, varargs, varkw, defaults) = getargspec(func)
|
args, varargs, varkw, defaults = getargspec(func)
|
||||||
# First argument is filter input.
|
# First argument is filter input.
|
||||||
args.pop(0)
|
args.pop(0)
|
||||||
if defaults:
|
if defaults:
|
||||||
|
@ -820,7 +815,7 @@ class Library(object):
|
||||||
return func
|
return func
|
||||||
|
|
||||||
def simple_tag(self,func):
|
def simple_tag(self,func):
|
||||||
(params, xx, xxx, defaults) = getargspec(func)
|
params, xx, xxx, defaults = getargspec(func)
|
||||||
|
|
||||||
class SimpleNode(Node):
|
class SimpleNode(Node):
|
||||||
def __init__(self, vars_to_resolve):
|
def __init__(self, vars_to_resolve):
|
||||||
|
@ -837,7 +832,7 @@ class Library(object):
|
||||||
|
|
||||||
def inclusion_tag(self, file_name, context_class=Context, takes_context=False):
|
def inclusion_tag(self, file_name, context_class=Context, takes_context=False):
|
||||||
def dec(func):
|
def dec(func):
|
||||||
(params, xx, xxx, defaults) = getargspec(func)
|
params, xx, xxx, defaults = getargspec(func)
|
||||||
if takes_context:
|
if takes_context:
|
||||||
if params[0] == 'context':
|
if params[0] == 'context':
|
||||||
params = params[1:]
|
params = params[1:]
|
||||||
|
|
|
@ -502,7 +502,7 @@ def do_ifequal(parser, token, negate):
|
||||||
...
|
...
|
||||||
{% endifnotequal %}
|
{% endifnotequal %}
|
||||||
"""
|
"""
|
||||||
bits = token.contents.split()
|
bits = list(token.split_contents())
|
||||||
if len(bits) != 3:
|
if len(bits) != 3:
|
||||||
raise TemplateSyntaxError, "%r takes two arguments" % bits[0]
|
raise TemplateSyntaxError, "%r takes two arguments" % bits[0]
|
||||||
end_tag = 'end' + bits[0]
|
end_tag = 'end' + bits[0]
|
||||||
|
|
|
@ -307,6 +307,18 @@ TEMPLATE_TESTS = {
|
||||||
'ifequal09': ('{% ifequal a "test" %}yes{% else %}no{% endifequal %}', {}, "no"),
|
'ifequal09': ('{% ifequal a "test" %}yes{% else %}no{% endifequal %}', {}, "no"),
|
||||||
'ifequal10': ('{% ifequal a b %}yes{% else %}no{% endifequal %}', {}, "yes"),
|
'ifequal10': ('{% ifequal a b %}yes{% else %}no{% endifequal %}', {}, "yes"),
|
||||||
|
|
||||||
|
# SMART SPLITTING
|
||||||
|
'ifequal-split01': ('{% ifequal a "test man" %}yes{% else %}no{% endifequal %}', {}, "no"),
|
||||||
|
'ifequal-split02': ('{% ifequal a "test man" %}yes{% else %}no{% endifequal %}', {'a': 'foo'}, "no"),
|
||||||
|
'ifequal-split03': ('{% ifequal a "test man" %}yes{% else %}no{% endifequal %}', {'a': 'test man'}, "yes"),
|
||||||
|
'ifequal-split04': ("{% ifequal a 'test man' %}yes{% else %}no{% endifequal %}", {'a': 'test man'}, "yes"),
|
||||||
|
'ifequal-split05': ("{% ifequal a 'i \"love\" you' %}yes{% else %}no{% endifequal %}", {'a': ''}, "no"),
|
||||||
|
'ifequal-split06': ("{% ifequal a 'i \"love\" you' %}yes{% else %}no{% endifequal %}", {'a': 'i "love" you'}, "yes"),
|
||||||
|
'ifequal-split07': ("{% ifequal a 'i \"love\" you' %}yes{% else %}no{% endifequal %}", {'a': 'i love you'}, "no"),
|
||||||
|
'ifequal-split08': (r"{% ifequal a 'I\'m happy' %}yes{% else %}no{% endifequal %}", {'a': "I'm happy"}, "yes"),
|
||||||
|
'ifequal-split09': (r"{% ifequal a 'slash\man' %}yes{% else %}no{% endifequal %}", {'a': r"slash\man"}, "yes"),
|
||||||
|
'ifequal-split10': (r"{% ifequal a 'slash\man' %}yes{% else %}no{% endifequal %}", {'a': r"slashman"}, "no"),
|
||||||
|
|
||||||
### IFNOTEQUAL TAG ########################################################
|
### IFNOTEQUAL TAG ########################################################
|
||||||
'ifnotequal01': ("{% ifnotequal a b %}yes{% endifnotequal %}", {"a": 1, "b": 2}, "yes"),
|
'ifnotequal01': ("{% ifnotequal a b %}yes{% endifnotequal %}", {"a": 1, "b": 2}, "yes"),
|
||||||
'ifnotequal02': ("{% ifnotequal a b %}yes{% endifnotequal %}", {"a": 1, "b": 1}, ""),
|
'ifnotequal02': ("{% ifnotequal a b %}yes{% endifnotequal %}", {"a": 1, "b": 1}, ""),
|
||||||
|
|
Loading…
Reference in New Issue