diff options
author | Sébastien Dailly <sebastien@chimrod.com> | 2021-09-17 10:16:06 +0200 |
---|---|---|
committer | Sébastien Dailly <sebastien@chimrod.com> | 2021-11-18 14:46:42 +0100 |
commit | fa6a4da8d6f9cd85cd60e505e8eb5a9becc40ed6 (patch) | |
tree | 1b5a6b6d5c7d0c0b0de95731731b0a0707caf770 /plugins/typogrify/filters.py | |
parent | b10b8aaaa255f88ddc246c8cd0acd24f6d2b744b (diff) |
Update plugin configuration
Diffstat (limited to 'plugins/typogrify/filters.py')
-rwxr-xr-x | plugins/typogrify/filters.py | 386 |
1 files changed, 0 insertions, 386 deletions
diff --git a/plugins/typogrify/filters.py b/plugins/typogrify/filters.py deleted file mode 100755 index 41d108d..0000000 --- a/plugins/typogrify/filters.py +++ /dev/null @@ -1,386 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -import re -from typogrify.titlecase import titlecase # NOQA -import locale - -class TypogrifyError(Exception): - """ A base error class so we can catch or scilence typogrify's errors in templates """ - pass - -def process_ignores(text, ignore_tags=None): - """ Creates a list of tuples based on tags to be ignored. - Tags can be added as a list in the `ignore_tags`. - Returns in the following format: - - [ - ('Text here', <should text be processed? True|False>), - ('Text here', <should text be processed? True|False>), - ] - - >>> process_ignores('<pre>processed</pre><p>processed</p>') - [('<pre>processed</pre>', False), ('<p>processed</p>', True)] - >>> process_ignores('<code>processed</code><p>processed<pre>processed</pre></p>') - [('<code>processed</code>', False), ('<p>processed', True), ('<pre>processed</pre>', False), ('</p>', True)] - >>> process_ignores('<code>processed</code><p>processed<pre>processed</pre></p>',['p']) - [('<code>processed</code>', False), ('<p>processed<pre>processed</pre></p>', False)] - """ - - position = 0 - sections = [] - if ignore_tags is None: - ignore_tags = [] - - ignore_tags = ignore_tags + ['pre', 'code'] # default tags - ignore_regex = r'<(%s)(?:\s.*?)?>.*?</(\1)>' % '|'.join(ignore_tags) - ignore_finder = re.compile(ignore_regex, re.IGNORECASE | re.DOTALL) - - for section in ignore_finder.finditer(text): - start, end = section.span() - - if position != start: - # if the current position isn't the match we - # need to process everything in between - sections.append((text[position:start], True)) - - # now we mark the matched section as ignored - sections.append((text[start:end], False)) - - position = end - - # match the rest of the text if necessary - # (this could in fact be the entire string) - if position < len(text): - sections.append((text[position:len(text)], True)) - - return sections - -def amp(text): - """Wraps apersands in HTML with ``<span class="amp">`` so they can be - styled with CSS. Apersands are also normalized to ``&``. Requires - ampersands to have whitespace or an `` `` on both sides. - - >>> amp('One & two') - 'One <span class="amp">&</span> two' - >>> amp('One & two') - 'One <span class="amp">&</span> two' - >>> amp('One & two') - 'One <span class="amp">&</span> two' - - >>> amp('One & two') - 'One <span class="amp">&</span> two' - - It won't mess up & that are already wrapped, in entities or URLs - - >>> amp('One <span class="amp">&</span> two') - 'One <span class="amp">&</span> two' - >>> amp('“this” & <a href="/?that&test">that</a>') - '“this” <span class="amp">&</span> <a href="/?that&test">that</a>' - - It should ignore standalone amps that are in attributes - >>> amp('<link href="xyz.html" title="One & Two">xyz</link>') - '<link href="xyz.html" title="One & Two">xyz</link>' - """ - # tag_pattern from http://haacked.com/archive/2004/10/25/usingregularexpressionstomatchhtml.aspx - # it kinda sucks but it fixes the standalone amps in attributes bug - tag_pattern = '</?\w+((\s+\w+(\s*=\s*(?:".*?"|\'.*?\'|[^\'">\s]+))?)+\s*|\s*)/?>' - amp_finder = re.compile(r"(\s| )(&|&|&\#38;)(\s| )") - intra_tag_finder = re.compile(r'(?P<prefix>(%s)?)(?P<text>([^<]*))(?P<suffix>(%s)?)' % (tag_pattern, tag_pattern)) - - def _amp_process(groups): - prefix = groups.group('prefix') or '' - text = amp_finder.sub(r"""\1<span class="amp">&</span>\3""", groups.group('text')) - suffix = groups.group('suffix') or '' - return prefix + text + suffix - - output = intra_tag_finder.sub(_amp_process, text) - return output - - -def caps(text): - """Wraps multiple capital letters in ``<span class="caps">`` - so they can be styled with CSS. - - >>> caps("A message from KU") - 'A message from <span class="caps">KU</span>' - - Uses the smartypants tokenizer to not screw with HTML or with tags it shouldn't. - - >>> caps("<PRE>CAPS</pre> more CAPS") - '<PRE>CAPS</pre> more <span class="caps">CAPS</span>' - - >>> caps("A message from 2KU2 with digits") - 'A message from <span class="caps">2KU2</span> with digits' - - >>> caps("Dotted caps followed by spaces should never include them in the wrap D.O.T. like so.") - 'Dotted caps followed by spaces should never include them in the wrap <span class="caps">D.O.T.</span> like so.' - - All caps with with apostrophes in them shouldn't break. Only handles dump apostrophes though. - >>> caps("JIMMY'S") - '<span class="caps">JIMMY\\'S</span>' - - >>> caps("<i>D.O.T.</i>HE34T<b>RFID</b>") - '<i><span class="caps">D.O.T.</span></i><span class="caps">HE34T</span><b><span class="caps">RFID</span></b>' - """ - try: - import smartypants - except ImportError: - raise TypogrifyError("Error in {% caps %} filter: The Python SmartyPants library isn't installed.") - - tokens = smartypants._tokenize(text) - result = [] - in_skipped_tag = False - - cap_finder = re.compile(r"""( - (\b[A-Z\d]* # Group 2: Any amount of caps and digits - [A-Z]\d*[A-Z] # A cap string much at least include two caps (but they can have digits between them) - [A-Z\d']*\b) # Any amount of caps and digits or dumb apostsrophes - | (\b[A-Z]+\.\s? # OR: Group 3: Some caps, followed by a '.' and an optional space - (?:[A-Z]+\.\s?)+) # Followed by the same thing at least once more - (?:\s|\b|$)) - """, re.VERBOSE) - - def _cap_wrapper(matchobj): - """This is necessary to keep dotted cap strings to pick up extra spaces""" - if matchobj.group(2): - return """<span class="caps">%s</span>""" % matchobj.group(2) - else: - if matchobj.group(3)[-1] == " ": - caps = matchobj.group(3)[:-1] - tail = ' ' - else: - caps = matchobj.group(3) - tail = '' - return """<span class="caps">%s</span>%s""" % (caps, tail) - - # Add additional tags whose content should be - # ignored here. Note - <pre> and <code> tag are - # ignored by default and therefore are not here - tags_to_skip_regex = re.compile("<(/)?(?:kbd|script)[^>]*>", re.IGNORECASE) - - for token in tokens: - if token[0] == "tag": - # Don't mess with tags. - result.append(token[1]) - close_match = tags_to_skip_regex.match(token[1]) - if close_match and close_match.group(1) == None: - in_skipped_tag = True - else: - in_skipped_tag = False - else: - if in_skipped_tag: - result.append(token[1]) - else: - result.append(cap_finder.sub(_cap_wrapper, token[1])) - output = "".join(result) - return output - - -def initial_quotes(text): - """Wraps initial quotes in ``class="dquo"`` for double quotes or - ``class="quo"`` for single quotes. Works in these block tags ``(h1-h6, p, li, dt, dd)`` - and also accounts for potential opening inline elements ``a, em, strong, span, b, i`` - - >>> initial_quotes('"With primes"') - '<span class="dquo">"</span>With primes"' - >>> initial_quotes("'With single primes'") - '<span class="quo">\\'</span>With single primes\\'' - - >>> initial_quotes('<a href="#">"With primes and a link"</a>') - '<a href="#"><span class="dquo">"</span>With primes and a link"</a>' - - >>> initial_quotes('“With smartypanted quotes”') - '<span class="dquo">“</span>With smartypanted quotes”' - """ - quote_finder = re.compile(r"""((<(p|h[1-6]|li|dt|dd)[^>]*>|^) # start with an opening p, h1-6, li, dd, dt or the start of the string - \s* # optional white space! - (<(a|em|span|strong|i|b)[^>]*>\s*)*) # optional opening inline tags, with more optional white space for each. - (("|“|&\#8220;)|('|‘|&\#8216;)) # Find me a quote! (only need to find the left quotes and the primes) - # double quotes are in group 7, singles in group 8 - """, re.VERBOSE) - - def _quote_wrapper(matchobj): - if matchobj.group(7): - classname = "dquo" - quote = matchobj.group(7) - else: - classname = "quo" - quote = matchobj.group(8) - return """%s<span class="%s">%s</span>""" % (matchobj.group(1), classname, quote) - output = quote_finder.sub(_quote_wrapper, text) - return output - - -def smartypants(text): - """Applies smarty pants to curl quotes. - - >>> smartypants('The "Green" man') - 'The “Green” man' - """ - try: - import smartypants - except ImportError: - raise TypogrifyError("Error in {% smartypants %} filter: The Python smartypants library isn't installed.") - else: - output = smartypants.smartypants(text) - return output - -def french_insecable(text): - """Replace the space between each double sign punctuation by a thin - non-breaking space. - - This conform with the french typographic rules. - - >>> french_insecable('Foo !') - u'Foo<span style="white-space:nowrap"> </span>!' - - >>> french_insecable('Foo ?') - u'Foo<span style="white-space:nowrap"> </span>?' - - >>> french_insecable('Foo : bar') - u'Foo<span style="white-space:nowrap"> </span>: bar' - - >>> french_insecable('Foo ; bar') - u'Foo<span style="white-space:nowrap"> </span>; bar' - - >>> french_insecable(u'\xab bar \xbb') - u'\\xab<span style="white-space:nowrap"> </span>bar<span style="white-space:nowrap"> </span>\\xbb' - - >>> french_insecable('123 456') - u'123<span style="white-space:nowrap"> </span>456' - - >>> french_insecable('123 %') - u'123<span style="white-space:nowrap"> </span>%' - - Space inside attributes should be preserved : - - >>> french_insecable('<a title="foo !">') - '<a title="foo !">' - """ - - tag_pattern = '</?\w+((\s+\w+(\s*=\s*(?:".*?"|\'.*?\'|[^\'">\s]+))?)+\s*|\s*)/?>' - intra_tag_finder = re.compile(r'(?P<prefix>(%s)?)(?P<text>([^<]*))(?P<suffix>(%s)?)' % (tag_pattern, tag_pattern)) - - nnbsp = u'<span style="white-space:nowrap"> </span>' - space_finder = re.compile(r"""(?: - (\w\s[:;!\?\xbb])| # Group 1, space before punctuation - ([\xab]\s\w)| - ([0-9]\s[0-9])| - ([0-9]\s\%) - )""", re.VERBOSE) - - def _insecable_wrapper(groups): - """This is necessary to keep dotted cap strings to pick up extra spaces""" - def substitute(matchobj): - return matchobj.group(0).replace(" ", nnbsp) - - prefix = groups.group('prefix') or '' - text = space_finder.sub(substitute, groups.group('text')) - suffix = groups.group('suffix') or '' - return prefix + text + suffix - - output = intra_tag_finder.sub(_insecable_wrapper, text) - return output - -def localize(text): - """ Return the text processed with the appropriate system locale - """ - table = {"fr_FR" : lambda x : french_insecable(x)} - - lang = locale.getdefaultlocale()[0] - processor = table.get(lang, lambda x : x) - - return processor(text) - -def widont(text): - """Replaces the space between the last two words in a string with `` `` - Works in these block tags ``(h1-h6, p, li, dd, dt)`` and also accounts for - potential closing inline elements ``a, em, strong, span, b, i`` - - >>> widont('A very simple test') - 'A very simple test' - - Single word items shouldn't be changed - >>> widont('Test') - 'Test' - >>> widont(' Test') - ' Test' - >>> widont('<ul><li>Test</p></li><ul>') - '<ul><li>Test</p></li><ul>' - >>> widont('<ul><li> Test</p></li><ul>') - '<ul><li> Test</p></li><ul>' - - >>> widont('<p>In a couple of paragraphs</p><p>paragraph two</p>') - '<p>In a couple of paragraphs</p><p>paragraph two</p>' - - >>> widont('<h1><a href="#">In a link inside a heading</i> </a></h1>') - '<h1><a href="#">In a link inside a heading</i> </a></h1>' - - >>> widont('<h1><a href="#">In a link</a> followed by other text</h1>') - '<h1><a href="#">In a link</a> followed by other text</h1>' - - Empty HTMLs shouldn't error - >>> widont('<h1><a href="#"></a></h1>') - '<h1><a href="#"></a></h1>' - - >>> widont('<div>Divs get no love!</div>') - '<div>Divs get no love!</div>' - - >>> widont('<pre>Neither do PREs</pre>') - '<pre>Neither do PREs</pre>' - - >>> widont('<div><p>But divs with paragraphs do!</p></div>') - '<div><p>But divs with paragraphs do!</p></div>' - """ - - widont_finder = re.compile(r"""((?:</?(?:a|em|span|strong|i|b)[^>]*>)|[^<>\s]) # must be proceeded by an approved inline opening or closing tag or a nontag/nonspace - \s+ # the space to replace - ([^<>\s]+ # must be flollowed by non-tag non-space characters - \s* # optional white space! - (</(a|em|span|strong|i|b)>\s*)* # optional closing inline tags with optional white space after each - ((</(p|h[1-6]|li|dt|dd)>)|$)) # end with a closing p, h1-6, li or the end of the string - """, re.VERBOSE) - output = widont_finder.sub(r'\1 \2', text) - - return output - -def applyfilters(text): - """Applies the following filters: smartypants, caps, amp, initial_quotes - - >>> typogrify('<h2>"Jayhawks" & KU fans act extremely obnoxiously</h2>') - '<h2><span class="dquo">“</span>Jayhawks” <span class="amp">&</span> <span class="caps">KU</span> fans act extremely obnoxiously</h2>' - """ - text = amp(text) - text = smartypants(text) - text = caps(text) - text = initial_quotes(text) - text = localize(text) - - return text - -def typogrify(text, ignore_tags=None): - """The super typography filter - - Applies filters to text that are not in tags contained in the - ignore_tags list. - """ - - section_list = process_ignores(text, ignore_tags) - - rendered_text = "" - for text_item, should_process in section_list: - if should_process: - rendered_text += applyfilters(text_item) - else: - rendered_text += text_item - - # apply widont at the end, as its already smart about tags. Hopefully. - return widont(rendered_text) - -def _test(): - import doctest - doctest.testmod(verbose=True) - -if __name__ == "__main__": - _test() |