django1/django/utils/translation/trans_real.py

741 lines
26 KiB
Python
Raw Normal View History

"""Translation helper functions."""
from __future__ import unicode_literals
from collections import OrderedDict
import os
import re
import sys
import gettext as gettext_module
from threading import local
import warnings
from django.apps import apps
from django.dispatch import receiver
from django.test.signals import setting_changed
from django.utils.deprecation import RemovedInDjango19Warning
from django.utils.encoding import force_str, force_text
from django.utils._os import upath
from django.utils.safestring import mark_safe, SafeData
from django.utils import six, lru_cache
from django.utils.six import StringIO
from django.utils.translation import TranslatorCommentWarning, trim_whitespace, LANGUAGE_SESSION_KEY
# Translations are cached in a dictionary for every language.
# The active translations are stored by threadid to make them thread local.
_translations = {}
_active = local()
# The default translation is based on the settings file.
_default = None
# This is a cache of settings.LANGUAGES in an OrderedDict for easy lookups by
# key
_supported = None
# magic gettext number to separate context from message
CONTEXT_SEPARATOR = "\x04"
# Format of Accept-Language header values. From RFC 2616, section 14.4 and 3.9
# and RFC 3066, section 2.1
accept_language_re = re.compile(r'''
([A-Za-z]{1,8}(?:-[A-Za-z0-9]{1,8})*|\*) # "en", "en-au", "x-y-z", "es-419", "*"
(?:\s*;\s*q=(0(?:\.\d{,3})?|1(?:.0{,3})?))? # Optional "q=1.00", "q=0.8"
(?:\s*,\s*|$) # Multiple accepts per header.
''', re.VERBOSE)
language_code_re = re.compile(r'^[a-z]{1,8}(?:-[a-z0-9]{1,8})*$', re.IGNORECASE)
language_code_prefix_re = re.compile(r'^/([\w-]+)(/|$)')
# some browsers use deprecated locales. refs #18419
2013-11-07 12:00:48 +08:00
_BROWSERS_DEPRECATED_LOCALES = {
'zh-cn': 'zh-hans',
'zh-tw': 'zh-hant',
}
_DJANGO_DEPRECATED_LOCALES = _BROWSERS_DEPRECATED_LOCALES
@receiver(setting_changed)
def reset_cache(**kwargs):
"""
Reset global state when LANGUAGES setting has been changed, as some
languages should no longer be accepted.
"""
if kwargs['setting'] in ('LANGUAGES', 'LANGUAGE_CODE'):
global _supported
_supported = None
check_for_language.cache_clear()
get_supported_language_variant.cache_clear()
def to_locale(language, to_lower=False):
"""
Turns a language name (en-us) into a locale name (en_US). If 'to_lower' is
True, the last component is lower-cased (en_us).
"""
p = language.find('-')
if p >= 0:
if to_lower:
2013-11-04 02:08:55 +08:00
return language[:p].lower() + '_' + language[p + 1:].lower()
else:
# Get correct locale for sr-latn
2013-11-04 02:08:55 +08:00
if len(language[p + 1:]) > 2:
return language[:p].lower() + '_' + language[p + 1].upper() + language[p + 2:].lower()
return language[:p].lower() + '_' + language[p + 1:].upper()
else:
return language.lower()
2013-11-03 07:53:29 +08:00
def to_language(locale):
"""Turns a locale name (en_US) into a language name (en-us)."""
p = locale.find('_')
if p >= 0:
2013-11-04 02:08:55 +08:00
return locale[:p].lower() + '-' + locale[p + 1:].lower()
else:
return locale.lower()
2013-11-03 07:53:29 +08:00
class DjangoTranslation(gettext_module.GNUTranslations):
"""
This class sets up the GNUTranslations context with regard to output
charset.
"""
def __init__(self, *args, **kw):
gettext_module.GNUTranslations.__init__(self, *args, **kw)
self.set_output_charset('utf-8')
self.__language = '??'
def merge(self, other):
self._catalog.update(other._catalog)
def set_language(self, language):
self.__language = language
self.__to_language = to_language(language)
def language(self):
return self.__language
def to_language(self):
return self.__to_language
def __repr__(self):
return "<DjangoTranslation lang:%s>" % self.__language
2013-11-03 07:53:29 +08:00
def translation(language):
"""
Returns a translation object.
This translation object will be constructed out of multiple GNUTranslations
objects by merging their catalogs. It will construct a object for the
requested language and add a fallback to the default language, if it's
different from the requested language.
"""
global _translations
t = _translations.get(language, None)
if t is not None:
return t
from django.conf import settings
globalpath = os.path.join(os.path.dirname(upath(sys.modules[settings.__module__].__file__)), 'locale')
def _fetch(lang, fallback=None):
global _translations
res = _translations.get(lang, None)
if res is not None:
return res
loc = to_locale(lang)
def _translation(path):
try:
t = gettext_module.translation('django', path, [loc], DjangoTranslation)
t.set_language(lang)
return t
except IOError:
return None
res = _translation(globalpath)
# We want to ensure that, for example, "en-gb" and "en-us" don't share
# the same translation object (thus, merging en-us with a local update
# doesn't affect en-gb), even though they will both use the core "en"
# translation. So we have to subvert Python's internal gettext caching.
base_lang = lambda x: x.split('-', 1)[0]
if base_lang(lang) in [base_lang(trans) for trans in list(_translations)]:
res._info = res._info.copy()
res._catalog = res._catalog.copy()
def _merge(path):
t = _translation(path)
if t is not None:
if res is None:
return t
else:
res.merge(t)
return res
for app_config in reversed(list(apps.get_app_configs())):
apppath = os.path.join(app_config.path, 'locale')
if os.path.isdir(apppath):
res = _merge(apppath)
for localepath in reversed(settings.LOCALE_PATHS):
if os.path.isdir(localepath):
res = _merge(localepath)
if res is None:
if fallback is not None:
res = fallback
else:
return gettext_module.NullTranslations()
_translations[lang] = res
return res
default_translation = _fetch(settings.LANGUAGE_CODE)
current_translation = _fetch(language, fallback=default_translation)
return current_translation
2013-11-03 07:53:29 +08:00
def activate(language):
"""
Fetches the translation object for a given language and installs it as the
current translation object for the current thread.
"""
if language in _DJANGO_DEPRECATED_LOCALES:
msg = ("The use of the language code '%s' is deprecated. "
"Please use the '%s' translation instead.")
warnings.warn(msg % (language, _DJANGO_DEPRECATED_LOCALES[language]),
RemovedInDjango19Warning, stacklevel=2)
_active.value = translation(language)
2013-11-03 07:53:29 +08:00
def deactivate():
"""
Deinstalls the currently active translation object so that further _ calls
will resolve against the default translation object, again.
"""
if hasattr(_active, "value"):
del _active.value
2013-11-03 07:53:29 +08:00
def deactivate_all():
"""
Makes the active translation object a NullTranslations() instance. This is
useful when we want delayed translations to appear as the original string
for some reason.
"""
_active.value = gettext_module.NullTranslations()
2013-11-03 07:53:29 +08:00
def get_language():
"""Returns the currently selected language."""
t = getattr(_active, "value", None)
if t is not None:
try:
return t.to_language()
except AttributeError:
pass
# If we don't have a real translation object, assume it's the default language.
from django.conf import settings
return settings.LANGUAGE_CODE
2013-11-03 07:53:29 +08:00
def get_language_bidi():
"""
Returns selected language's BiDi layout.
* False = left-to-right layout
* True = right-to-left layout
"""
from django.conf import settings
base_lang = get_language().split('-')[0]
return base_lang in settings.LANGUAGES_BIDI
2013-11-03 07:53:29 +08:00
def catalog():
"""
Returns the current active catalog for further processing.
This can be used if you need to modify the catalog or want to access the
whole message catalog instead of just translating one string.
"""
global _default
t = getattr(_active, "value", None)
if t is not None:
return t
if _default is None:
from django.conf import settings
_default = translation(settings.LANGUAGE_CODE)
return _default
2013-11-03 07:53:29 +08:00
def do_translate(message, translation_function):
"""
Translates 'message' using the given 'translation_function' name -- which
will be either gettext or ugettext. It uses the current thread to find the
translation object to use. If no current translation is activated, the
message will be run through the default translation object.
"""
global _default
# str() is allowing a bytestring message to remain bytestring on Python 2
eol_message = message.replace(str('\r\n'), str('\n')).replace(str('\r'), str('\n'))
t = getattr(_active, "value", None)
if t is not None:
result = getattr(t, translation_function)(eol_message)
else:
if _default is None:
from django.conf import settings
_default = translation(settings.LANGUAGE_CODE)
result = getattr(_default, translation_function)(eol_message)
if isinstance(message, SafeData):
return mark_safe(result)
return result
2013-11-03 07:53:29 +08:00
def gettext(message):
"""
Returns a string of the translation of the message.
Returns a string on Python 3 and an UTF-8-encoded bytestring on Python 2.
"""
return do_translate(message, 'gettext')
2012-07-22 04:24:13 +08:00
if six.PY3:
ugettext = gettext
else:
def ugettext(message):
return do_translate(message, 'ugettext')
2013-11-03 07:53:29 +08:00
def pgettext(context, message):
msg_with_ctxt = "%s%s%s" % (context, CONTEXT_SEPARATOR, message)
result = ugettext(msg_with_ctxt)
if CONTEXT_SEPARATOR in result:
# Translation not found
result = message
return result
2013-11-03 07:53:29 +08:00
def gettext_noop(message):
"""
Marks strings for translation but doesn't translate them now. This can be
used to store strings in global variables that should stay in the base
language (because they might be used externally) and will be translated
later.
"""
return message
2013-11-03 07:53:29 +08:00
def do_ntranslate(singular, plural, number, translation_function):
global _default
t = getattr(_active, "value", None)
if t is not None:
return getattr(t, translation_function)(singular, plural, number)
if _default is None:
from django.conf import settings
_default = translation(settings.LANGUAGE_CODE)
return getattr(_default, translation_function)(singular, plural, number)
2013-11-03 07:53:29 +08:00
def ngettext(singular, plural, number):
"""
Returns a string of the translation of either the singular or plural,
based on the number.
Returns a string on Python 3 and an UTF-8-encoded bytestring on Python 2.
"""
return do_ntranslate(singular, plural, number, 'ngettext')
2012-07-22 04:24:13 +08:00
if six.PY3:
ungettext = ngettext
else:
def ungettext(singular, plural, number):
"""
Returns a unicode strings of the translation of either the singular or
plural, based on the number.
"""
return do_ntranslate(singular, plural, number, 'ungettext')
2013-11-03 07:53:29 +08:00
def npgettext(context, singular, plural, number):
msgs_with_ctxt = ("%s%s%s" % (context, CONTEXT_SEPARATOR, singular),
"%s%s%s" % (context, CONTEXT_SEPARATOR, plural),
number)
result = ungettext(*msgs_with_ctxt)
if CONTEXT_SEPARATOR in result:
# Translation not found
2012-07-22 04:24:13 +08:00
result = ungettext(singular, plural, number)
return result
2013-11-03 07:53:29 +08:00
def all_locale_paths():
"""
Returns a list of paths to user-provides languages files.
"""
from django.conf import settings
globalpath = os.path.join(
os.path.dirname(upath(sys.modules[settings.__module__].__file__)), 'locale')
return [globalpath] + list(settings.LOCALE_PATHS)
2013-11-03 07:53:29 +08:00
@lru_cache.lru_cache()
def check_for_language(lang_code):
"""
Checks whether there is a global language file for the given language
code. This is used to decide whether a user-provided language is
available.
"""
# First, a quick check to make sure lang_code is well-formed (#21458)
if not language_code_re.search(lang_code):
return False
for path in all_locale_paths():
if gettext_module.find('django', path, [to_locale(lang_code)]) is not None:
return True
return False
2013-11-03 07:53:29 +08:00
@lru_cache.lru_cache(maxsize=1000)
def get_supported_language_variant(lang_code, strict=False):
"""
Returns the language-code that's listed in supported languages, possibly
selecting a more generic variant. Raises LookupError if nothing found.
If `strict` is False (the default), the function will look for an alternative
country-specific variant when the currently checked is not found.
lru_cache should have a maxsize to prevent from memory exhaustion attacks,
as the provided language codes are taken from the HTTP request. See also
<https://www.djangoproject.com/weblog/2007/oct/26/security-fix/>.
"""
global _supported
if _supported is None:
from django.conf import settings
_supported = OrderedDict(settings.LANGUAGES)
if lang_code:
# some browsers use deprecated language codes -- #18419
replacement = _BROWSERS_DEPRECATED_LOCALES.get(lang_code)
if lang_code not in _supported and replacement in _supported:
return replacement
# if fr-ca is not supported, try fr.
generic_lang_code = lang_code.split('-')[0]
for code in (lang_code, generic_lang_code):
if code in _supported and check_for_language(code):
return code
if not strict:
# if fr-fr is not supported, try fr-ca.
for supported_code in _supported:
if supported_code.startswith(generic_lang_code + '-'):
return supported_code
raise LookupError(lang_code)
2013-11-03 07:53:29 +08:00
def get_language_from_path(path, strict=False):
"""
Returns the language-code if there is a valid language-code
found in the `path`.
If `strict` is False (the default), the function will look for an alternative
country-specific variant when the currently checked is not found.
"""
regex_match = language_code_prefix_re.match(path)
if not regex_match:
return None
lang_code = regex_match.group(1)
try:
return get_supported_language_variant(lang_code, strict=strict)
except LookupError:
return None
2013-11-03 07:53:29 +08:00
def get_language_from_request(request, check_path=False):
"""
Analyzes the request to find what language the user wants the system to
show. Only languages listed in settings.LANGUAGES are taken into account.
If the user requests a sublanguage where we have a main language, we send
out the main language.
If check_path is True, the URL path prefix will be checked for a language
code, otherwise this is skipped for backwards compatibility.
"""
from django.conf import settings
global _supported
if _supported is None:
_supported = OrderedDict(settings.LANGUAGES)
if check_path:
lang_code = get_language_from_path(request.path_info)
if lang_code is not None:
return lang_code
if hasattr(request, 'session'):
# for backwards compatibility django_language is also checked (remove in 1.8)
lang_code = request.session.get(LANGUAGE_SESSION_KEY, request.session.get('django_language'))
if lang_code in _supported and lang_code is not None and check_for_language(lang_code):
return lang_code
lang_code = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME)
try:
return get_supported_language_variant(lang_code)
except LookupError:
pass
accept = request.META.get('HTTP_ACCEPT_LANGUAGE', '')
for accept_lang, unused in parse_accept_lang_header(accept):
if accept_lang == '*':
break
if not language_code_re.search(accept_lang):
continue
try:
return get_supported_language_variant(accept_lang)
except LookupError:
continue
try:
return get_supported_language_variant(settings.LANGUAGE_CODE)
except LookupError:
return settings.LANGUAGE_CODE
dot_re = re.compile(r'\S')
2013-11-03 07:53:29 +08:00
def blankout(src, char):
"""
Changes every non-whitespace character to the given char.
Used in the templatize function.
"""
return dot_re.sub(char, src)
context_re = re.compile(r"""^\s+.*context\s+((?:"[^"]*?")|(?:'[^']*?'))\s*""")
inline_re = re.compile(r"""^\s*trans\s+((?:"[^"]*?")|(?:'[^']*?'))(\s+.*context\s+((?:"[^"]*?")|(?:'[^']*?')))?\s*""")
block_re = re.compile(r"""^\s*blocktrans(\s+.*context\s+((?:"[^"]*?")|(?:'[^']*?')))?(?:\s+|$)""")
endblock_re = re.compile(r"""^\s*endblocktrans$""")
plural_re = re.compile(r"""^\s*plural$""")
constant_re = re.compile(r"""_\(((?:".*?")|(?:'.*?'))\)""")
one_percent_re = re.compile(r"""(?<!%)%(?!%)""")
def templatize(src, origin=None):
"""
Turns a Django template into something that is understood by xgettext. It
does so by translating the Django translation tags into standard gettext
function invocations.
"""
from django.conf import settings
from django.template import (Lexer, TOKEN_TEXT, TOKEN_VAR, TOKEN_BLOCK,
TOKEN_COMMENT, TRANSLATOR_COMMENT_MARK)
src = force_text(src, settings.FILE_CHARSET)
out = StringIO()
message_context = None
intrans = False
inplural = False
trimmed = False
singular = []
plural = []
incomment = False
comment = []
lineno_comment_map = {}
comment_lineno_cache = None
def join_tokens(tokens, trim=False):
message = ''.join(tokens)
if trim:
message = trim_whitespace(message)
return message
for t in Lexer(src, origin).tokenize():
if incomment:
if t.token_type == TOKEN_BLOCK and t.contents == 'endcomment':
content = ''.join(comment)
translators_comment_start = None
for lineno, line in enumerate(content.splitlines(True)):
if line.lstrip().startswith(TRANSLATOR_COMMENT_MARK):
translators_comment_start = lineno
for lineno, line in enumerate(content.splitlines(True)):
if translators_comment_start is not None and lineno >= translators_comment_start:
out.write(' # %s' % line)
else:
out.write(' #\n')
incomment = False
comment = []
else:
comment.append(t.contents)
elif intrans:
if t.token_type == TOKEN_BLOCK:
endbmatch = endblock_re.match(t.contents)
pluralmatch = plural_re.match(t.contents)
if endbmatch:
if inplural:
if message_context:
out.write(' npgettext(%r, %r, %r,count) ' % (
message_context,
join_tokens(singular, trimmed),
join_tokens(plural, trimmed)))
else:
out.write(' ngettext(%r, %r, count) ' % (
join_tokens(singular, trimmed),
join_tokens(plural, trimmed)))
for part in singular:
out.write(blankout(part, 'S'))
for part in plural:
out.write(blankout(part, 'P'))
else:
if message_context:
out.write(' pgettext(%r, %r) ' % (
message_context,
join_tokens(singular, trimmed)))
else:
out.write(' gettext(%r) ' % join_tokens(singular,
trimmed))
for part in singular:
out.write(blankout(part, 'S'))
message_context = None
intrans = False
inplural = False
singular = []
plural = []
elif pluralmatch:
inplural = True
else:
filemsg = ''
if origin:
filemsg = 'file %s, ' % origin
raise SyntaxError("Translation blocks must not include other block tags: %s (%sline %d)" % (t.contents, filemsg, t.lineno))
elif t.token_type == TOKEN_VAR:
if inplural:
plural.append('%%(%s)s' % t.contents)
else:
singular.append('%%(%s)s' % t.contents)
elif t.token_type == TOKEN_TEXT:
contents = one_percent_re.sub('%%', t.contents)
if inplural:
plural.append(contents)
else:
singular.append(contents)
else:
# Handle comment tokens (`{# ... #}`) plus other constructs on
# the same line:
if comment_lineno_cache is not None:
cur_lineno = t.lineno + t.contents.count('\n')
if comment_lineno_cache == cur_lineno:
if t.token_type != TOKEN_COMMENT:
for c in lineno_comment_map[comment_lineno_cache]:
filemsg = ''
if origin:
filemsg = 'file %s, ' % origin
warn_msg = ("The translator-targeted comment '%s' "
"(%sline %d) was ignored, because it wasn't the last item "
"on the line.") % (c, filemsg, comment_lineno_cache)
warnings.warn(warn_msg, TranslatorCommentWarning)
lineno_comment_map[comment_lineno_cache] = []
else:
out.write('# %s' % ' | '.join(lineno_comment_map[comment_lineno_cache]))
comment_lineno_cache = None
if t.token_type == TOKEN_BLOCK:
imatch = inline_re.match(t.contents)
bmatch = block_re.match(t.contents)
cmatches = constant_re.findall(t.contents)
if imatch:
g = imatch.group(1)
if g[0] == '"':
g = g.strip('"')
elif g[0] == "'":
g = g.strip("'")
g = one_percent_re.sub('%%', g)
if imatch.group(2):
# A context is provided
context_match = context_re.match(imatch.group(2))
message_context = context_match.group(1)
if message_context[0] == '"':
message_context = message_context.strip('"')
elif message_context[0] == "'":
message_context = message_context.strip("'")
out.write(' pgettext(%r, %r) ' % (message_context, g))
message_context = None
else:
out.write(' gettext(%r) ' % g)
elif bmatch:
for fmatch in constant_re.findall(t.contents):
out.write(' _(%s) ' % fmatch)
if bmatch.group(1):
# A context is provided
context_match = context_re.match(bmatch.group(1))
message_context = context_match.group(1)
if message_context[0] == '"':
message_context = message_context.strip('"')
elif message_context[0] == "'":
message_context = message_context.strip("'")
intrans = True
inplural = False
trimmed = 'trimmed' in t.split_contents()
singular = []
plural = []
elif cmatches:
for cmatch in cmatches:
out.write(' _(%s) ' % cmatch)
elif t.contents == 'comment':
incomment = True
else:
out.write(blankout(t.contents, 'B'))
elif t.token_type == TOKEN_VAR:
parts = t.contents.split('|')
cmatch = constant_re.match(parts[0])
if cmatch:
out.write(' _(%s) ' % cmatch.group(1))
for p in parts[1:]:
if p.find(':_(') >= 0:
out.write(' %s ' % p.split(':', 1)[1])
else:
out.write(blankout(p, 'F'))
elif t.token_type == TOKEN_COMMENT:
if t.contents.lstrip().startswith(TRANSLATOR_COMMENT_MARK):
lineno_comment_map.setdefault(t.lineno,
[]).append(t.contents)
comment_lineno_cache = t.lineno
else:
out.write(blankout(t.contents, 'X'))
return force_str(out.getvalue())
2013-11-03 07:53:29 +08:00
def parse_accept_lang_header(lang_string):
"""
Parses the lang_string, which is the body of an HTTP Accept-Language
header, and returns a list of (lang, q-value), ordered by 'q' values.
Any format errors in lang_string results in an empty list being returned.
"""
result = []
pieces = accept_language_re.split(lang_string.lower())
if pieces[-1]:
return []
for i in range(0, len(pieces) - 1, 3):
2013-10-27 09:27:42 +08:00
first, lang, priority = pieces[i:i + 3]
if first:
return []
if priority:
try:
priority = float(priority)
except ValueError:
return []
if not priority: # if priority is 0.0 at this point make it 1.0
priority = 1.0
result.append((lang, priority))
result.sort(key=lambda k: k[1], reverse=True)
return result