From 237ae081dbade817f4a2033b6aa2d3cdeb15b8b2 Mon Sep 17 00:00:00 2001 From: Sébastien Dailly Date: Sat, 15 Nov 2014 21:22:30 +0100 Subject: Moved typogrify as plugin --- plugins/typogrify/filters.py | 386 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 386 insertions(+) create mode 100755 plugins/typogrify/filters.py (limited to 'plugins/typogrify/filters.py') diff --git a/plugins/typogrify/filters.py b/plugins/typogrify/filters.py new file mode 100755 index 0000000..e2a145c --- /dev/null +++ b/plugins/typogrify/filters.py @@ -0,0 +1,386 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +import re +from titlecase import titlecase # NOQA +import locale + +class TypogrifyError(Exception): + """ A base error class so we can catch or scilence typogrify's errors in templates """ + pass + +def process_ignores(text, ignore_tags=None): + """ Creates a list of tuples based on tags to be ignored. + Tags can be added as a list in the `ignore_tags`. + Returns in the following format: + + [ + ('Text here', ), + ('Text here', ), + ] + + >>> process_ignores('
processed

processed

') + [('
processed
', False), ('

processed

', True)] + >>> process_ignores('processed

processed

processed

') + [('processed', False), ('

processed', True), ('

processed
', False), ('

', True)] + >>> process_ignores('processed

processed

processed

',['p']) + [('processed', False), ('

processed

processed

', False)] + """ + + position = 0 + sections = [] + if ignore_tags is None: + ignore_tags = [] + + ignore_tags = ignore_tags + ['pre', 'code'] # default tags + ignore_regex = r'<(%s)(?:\s.*?)?>.*?' % '|'.join(ignore_tags) + ignore_finder = re.compile(ignore_regex, re.IGNORECASE | re.DOTALL) + + for section in ignore_finder.finditer(text): + start, end = section.span() + + if position != start: + # if the current position isn't the match we + # need to process everything in between + sections.append((text[position:start], True)) + + # now we mark the matched section as ignored + sections.append((text[start:end], False)) + + position = end + + # match the rest of the text if necessary + # (this could in fact be the entire string) + if position < len(text): + sections.append((text[position:len(text)], True)) + + return sections + +def amp(text): + """Wraps apersands in HTML with ```` so they can be + styled with CSS. Apersands are also normalized to ``&``. Requires + ampersands to have whitespace or an `` `` on both sides. + + >>> amp('One & two') + 'One & two' + >>> amp('One & two') + 'One & two' + >>> amp('One & two') + 'One & two' + + >>> amp('One & two') + 'One & two' + + It won't mess up & that are already wrapped, in entities or URLs + + >>> amp('One & two') + 'One & two' + >>> amp('“this” & that') + '“this” & that' + + It should ignore standalone amps that are in attributes + >>> amp('xyz') + 'xyz' + """ + # tag_pattern from http://haacked.com/archive/2004/10/25/usingregularexpressionstomatchhtml.aspx + # it kinda sucks but it fixes the standalone amps in attributes bug + tag_pattern = '\s]+))?)+\s*|\s*)/?>' + amp_finder = re.compile(r"(\s| )(&|&|&\#38;)(\s| )") + intra_tag_finder = re.compile(r'(?P(%s)?)(?P([^<]*))(?P(%s)?)' % (tag_pattern, tag_pattern)) + + def _amp_process(groups): + prefix = groups.group('prefix') or '' + text = amp_finder.sub(r"""\1&\3""", groups.group('text')) + suffix = groups.group('suffix') or '' + return prefix + text + suffix + + output = intra_tag_finder.sub(_amp_process, text) + return output + + +def caps(text): + """Wraps multiple capital letters in ```` + so they can be styled with CSS. + + >>> caps("A message from KU") + 'A message from KU' + + Uses the smartypants tokenizer to not screw with HTML or with tags it shouldn't. + + >>> caps("
CAPS
more CAPS") + '
CAPS
more CAPS' + + >>> caps("A message from 2KU2 with digits") + 'A message from 2KU2 with digits' + + >>> caps("Dotted caps followed by spaces should never include them in the wrap D.O.T. like so.") + 'Dotted caps followed by spaces should never include them in the wrap D.O.T. like so.' + + All caps with with apostrophes in them shouldn't break. Only handles dump apostrophes though. + >>> caps("JIMMY'S") + 'JIMMY\\'S' + + >>> caps("D.O.T.HE34TRFID") + 'D.O.T.HE34TRFID' + """ + try: + import smartypants + except ImportError: + raise TypogrifyError("Error in {% caps %} filter: The Python SmartyPants library isn't installed.") + + tokens = smartypants._tokenize(text) + result = [] + in_skipped_tag = False + + cap_finder = re.compile(r"""( + (\b[A-Z\d]* # Group 2: Any amount of caps and digits + [A-Z]\d*[A-Z] # A cap string much at least include two caps (but they can have digits between them) + [A-Z\d']*\b) # Any amount of caps and digits or dumb apostsrophes + | (\b[A-Z]+\.\s? # OR: Group 3: Some caps, followed by a '.' and an optional space + (?:[A-Z]+\.\s?)+) # Followed by the same thing at least once more + (?:\s|\b|$)) + """, re.VERBOSE) + + def _cap_wrapper(matchobj): + """This is necessary to keep dotted cap strings to pick up extra spaces""" + if matchobj.group(2): + return """%s""" % matchobj.group(2) + else: + if matchobj.group(3)[-1] == " ": + caps = matchobj.group(3)[:-1] + tail = ' ' + else: + caps = matchobj.group(3) + tail = '' + return """%s%s""" % (caps, tail) + + # Add additional tags whose content should be + # ignored here. Note -
 and  tag are
+    # ignored by default and therefore are not here
+    tags_to_skip_regex = re.compile("<(/)?(?:kbd|script)[^>]*>", re.IGNORECASE)
+
+    for token in tokens:
+        if token[0] == "tag":
+            # Don't mess with tags.
+            result.append(token[1])
+            close_match = tags_to_skip_regex.match(token[1])
+            if close_match and close_match.group(1) == None:
+                in_skipped_tag = True
+            else:
+                in_skipped_tag = False
+        else:
+            if in_skipped_tag:
+                result.append(token[1])
+            else:
+                result.append(cap_finder.sub(_cap_wrapper, token[1]))
+    output = "".join(result)
+    return output
+
+
+def initial_quotes(text):
+    """Wraps initial quotes in ``class="dquo"`` for double quotes or
+    ``class="quo"`` for single quotes. Works in these block tags ``(h1-h6, p, li, dt, dd)``
+    and also accounts for potential opening inline elements ``a, em, strong, span, b, i``
+
+    >>> initial_quotes('"With primes"')
+    '"With primes"'
+    >>> initial_quotes("'With single primes'")
+    '\\'With single primes\\''
+
+    >>> initial_quotes('"With primes and a link"')
+    '"With primes and a link"'
+
+    >>> initial_quotes('“With smartypanted quotes”')
+    'With smartypanted quotes”'
+    """
+    quote_finder = re.compile(r"""((<(p|h[1-6]|li|dt|dd)[^>]*>|^)              # start with an opening p, h1-6, li, dd, dt or the start of the string
+                                  \s*                                          # optional white space!
+                                  (<(a|em|span|strong|i|b)[^>]*>\s*)*)         # optional opening inline tags, with more optional white space for each.
+                                  (("|“|&\#8220;)|('|‘|&\#8216;))  # Find me a quote! (only need to find the left quotes and the primes)
+                                                                               # double quotes are in group 7, singles in group 8
+                                  """, re.VERBOSE)
+
+    def _quote_wrapper(matchobj):
+        if matchobj.group(7):
+            classname = "dquo"
+            quote = matchobj.group(7)
+        else:
+            classname = "quo"
+            quote = matchobj.group(8)
+        return """%s%s""" % (matchobj.group(1), classname, quote)
+    output = quote_finder.sub(_quote_wrapper, text)
+    return output
+
+
+def smartypants(text):
+    """Applies smarty pants to curl quotes.
+
+    >>> smartypants('The "Green" man')
+    'The “Green” man'
+    """
+    try:
+        import smartypants
+    except ImportError:
+        raise TypogrifyError("Error in {% smartypants %} filter: The Python smartypants library isn't installed.")
+    else:
+        output = smartypants.smartypants(text)
+        return output
+
+def french_insecable(text):
+    """Replace the space between each double sign punctuation by a thin
+    non-breaking space.
+
+    This conform with the french typographic rules.
+
+    >>> french_insecable('Foo !')
+    u'Foo!'
+
+    >>> french_insecable('Foo ?')
+    u'Foo?'
+
+    >>> french_insecable('Foo : bar')
+    u'Foo: bar'
+
+    >>> french_insecable('Foo ; bar')
+    u'Foo; bar'
+
+    >>> french_insecable(u'\xab bar \xbb')
+    u'\\xabbar\\xbb'
+
+    >>> french_insecable('123 456')
+    u'123456'
+
+    >>> french_insecable('123 %')
+    u'123%'
+
+    Space inside attributes should be preserved :
+
+    >>> french_insecable('')
+    ''
+    """
+
+    tag_pattern = '\s]+))?)+\s*|\s*)/?>'
+    intra_tag_finder = re.compile(r'(?P(%s)?)(?P([^<]*))(?P(%s)?)' % (tag_pattern, tag_pattern))
+
+    nnbsp = u''
+    space_finder = re.compile(r"""(?:
+                            (\w\s[:;!\?\xbb])|       # Group 1, space before punctuation
+                            ([\xab]\s\w)|
+                            ([0-9]\s[0-9])|
+                            ([0-9]\s\%)
+                            )""", re.VERBOSE)
+
+    def _insecable_wrapper(groups):
+        """This is necessary to keep dotted cap strings to pick up extra spaces"""
+        def substitute(matchobj):
+            return matchobj.group(0).replace(" ", nnbsp)
+
+        prefix = groups.group('prefix') or ''
+        text = space_finder.sub(substitute, groups.group('text'))
+        suffix = groups.group('suffix') or ''
+        return prefix + text + suffix
+
+    output = intra_tag_finder.sub(_insecable_wrapper, text)
+    return output
+
+def localize(text):
+    """ Return the text processed with the appropriate system locale
+    """
+    table = {"fr_FR" : lambda x : french_insecable(x)}
+
+    lang = locale.getdefaultlocale()[0]
+    processor = table.get(lang, lambda x : x)
+
+    return processor(text)
+
+def widont(text):
+    """Replaces the space between the last two words in a string with `` ``
+    Works in these block tags ``(h1-h6, p, li, dd, dt)`` and also accounts for
+    potential closing inline elements ``a, em, strong, span, b, i``
+
+    >>> widont('A very simple test')
+    'A very simple test'
+
+    Single word items shouldn't be changed
+    >>> widont('Test')
+    'Test'
+    >>> widont(' Test')
+    ' Test'
+    >>> widont('
  • Test

    • ') + '
      • Test

        • ' + >>> widont('
          • Test

            • ') + '
              • Test

                • ' + + >>> widont('

                  In a couple of paragraphs

                  paragraph two

                  ') + '

                  In a couple of paragraphs

                  paragraph two

                  ' + + >>> widont('

                  In a link inside a heading

                  ') + '

                  In a link inside a heading

                  ' + + >>> widont('

                  In a link followed by other text

                  ') + '

                  In a link followed by other text

                  ' + + Empty HTMLs shouldn't error + >>> widont('

                  ') + '

                  ' + + >>> widont('
                  Divs get no love!
                  ') + '
                  Divs get no love!
                  ' + + >>> widont('
                  Neither do PREs
                  ') + '
                  Neither do PREs
                  ' + + >>> widont('

                  But divs with paragraphs do!

                  ') + '

                  But divs with paragraphs do!

                  ' + """ + + widont_finder = re.compile(r"""((?:]*>)|[^<>\s]) # must be proceeded by an approved inline opening or closing tag or a nontag/nonspace + \s+ # the space to replace + ([^<>\s]+ # must be flollowed by non-tag non-space characters + \s* # optional white space! + (\s*)* # optional closing inline tags with optional white space after each + (()|$)) # end with a closing p, h1-6, li or the end of the string + """, re.VERBOSE) + output = widont_finder.sub(r'\1 \2', text) + + return output + +def applyfilters(text): + """Applies the following filters: smartypants, caps, amp, initial_quotes + + >>> typogrify('

                  "Jayhawks" & KU fans act extremely obnoxiously

                  ') + '

                  Jayhawks” & KU fans act extremely obnoxiously

                  ' + """ + text = amp(text) + text = smartypants(text) + text = caps(text) + text = initial_quotes(text) + text = localize(text) + + return text + +def typogrify(text, ignore_tags=None): + """The super typography filter + + Applies filters to text that are not in tags contained in the + ignore_tags list. + """ + + section_list = process_ignores(text, ignore_tags) + + rendered_text = "" + for text_item, should_process in section_list: + if should_process: + rendered_text += applyfilters(text_item) + else: + rendered_text += text_item + + # apply widont at the end, as its already smart about tags. Hopefully. + return widont(rendered_text) + +def _test(): + import doctest + doctest.testmod(verbose=True) + +if __name__ == "__main__": + _test() -- cgit v1.2.3