From 237ae081dbade817f4a2033b6aa2d3cdeb15b8b2 Mon Sep 17 00:00:00 2001
From: Sébastien Dailly processed processed processedprocessed
processed
', False), ('processed
processed
processed
', False), ('processed', True), ('
processed', False), ('', True)] + >>> process_ignores('
processed
processed
processed',['p']) + [('
processed
', False), ('processed
processed', False)] + """ + + position = 0 + sections = [] + if ignore_tags is None: + ignore_tags = [] + + ignore_tags = ignore_tags + ['pre', 'code'] # default tags + ignore_regex = r'<(%s)(?:\s.*?)?>.*?(\1)>' % '|'.join(ignore_tags) + ignore_finder = re.compile(ignore_regex, re.IGNORECASE | re.DOTALL) + + for section in ignore_finder.finditer(text): + start, end = section.span() + + if position != start: + # if the current position isn't the match we + # need to process everything in between + sections.append((text[position:start], True)) + + # now we mark the matched section as ignored + sections.append((text[start:end], False)) + + position = end + + # match the rest of the text if necessary + # (this could in fact be the entire string) + if position < len(text): + sections.append((text[position:len(text)], True)) + + return sections + +def amp(text): + """Wraps apersands in HTML with ```` so they can be + styled with CSS. Apersands are also normalized to ``&``. Requires + ampersands to have whitespace or an `` `` on both sides. + + >>> amp('One & two') + 'One & two' + >>> amp('One & two') + 'One & two' + >>> amp('One & two') + 'One & two' + + >>> amp('One & two') + 'One & two' + + It won't mess up & that are already wrapped, in entities or URLs + + >>> amp('One & two') + 'One & two' + >>> amp('“this” & that') + '“this” & that' + + It should ignore standalone amps that are in attributes + >>> amp('xyz') + 'xyz' + """ + # tag_pattern from http://haacked.com/archive/2004/10/25/usingregularexpressionstomatchhtml.aspx + # it kinda sucks but it fixes the standalone amps in attributes bug + tag_pattern = '?\w+((\s+\w+(\s*=\s*(?:".*?"|\'.*?\'|[^\'">\s]+))?)+\s*|\s*)/?>' + amp_finder = re.compile(r"(\s| )(&|&|&\#38;)(\s| )") + intra_tag_finder = re.compile(r'(?P
CAPSmore CAPS") + '
CAPSmore CAPS' + + >>> caps("A message from 2KU2 with digits") + 'A message from 2KU2 with digits' + + >>> caps("Dotted caps followed by spaces should never include them in the wrap D.O.T. like so.") + 'Dotted caps followed by spaces should never include them in the wrap D.O.T. like so.' + + All caps with with apostrophes in them shouldn't break. Only handles dump apostrophes though. + >>> caps("JIMMY'S") + 'JIMMY\\'S' + + >>> caps("D.O.T.HE34TRFID") + 'D.O.T.HE34TRFID' + """ + try: + import smartypants + except ImportError: + raise TypogrifyError("Error in {% caps %} filter: The Python SmartyPants library isn't installed.") + + tokens = smartypants._tokenize(text) + result = [] + in_skipped_tag = False + + cap_finder = re.compile(r"""( + (\b[A-Z\d]* # Group 2: Any amount of caps and digits + [A-Z]\d*[A-Z] # A cap string much at least include two caps (but they can have digits between them) + [A-Z\d']*\b) # Any amount of caps and digits or dumb apostsrophes + | (\b[A-Z]+\.\s? # OR: Group 3: Some caps, followed by a '.' and an optional space + (?:[A-Z]+\.\s?)+) # Followed by the same thing at least once more + (?:\s|\b|$)) + """, re.VERBOSE) + + def _cap_wrapper(matchobj): + """This is necessary to keep dotted cap strings to pick up extra spaces""" + if matchobj.group(2): + return """%s""" % matchobj.group(2) + else: + if matchobj.group(3)[-1] == " ": + caps = matchobj.group(3)[:-1] + tail = ' ' + else: + caps = matchobj.group(3) + tail = '' + return """%s%s""" % (caps, tail) + + # Add additional tags whose content should be + # ignored here. Note -
and tag are
+ # ignored by default and therefore are not here
+ tags_to_skip_regex = re.compile("<(/)?(?:kbd|script)[^>]*>", re.IGNORECASE)
+
+ for token in tokens:
+ if token[0] == "tag":
+ # Don't mess with tags.
+ result.append(token[1])
+ close_match = tags_to_skip_regex.match(token[1])
+ if close_match and close_match.group(1) == None:
+ in_skipped_tag = True
+ else:
+ in_skipped_tag = False
+ else:
+ if in_skipped_tag:
+ result.append(token[1])
+ else:
+ result.append(cap_finder.sub(_cap_wrapper, token[1]))
+ output = "".join(result)
+ return output
+
+
+def initial_quotes(text):
+ """Wraps initial quotes in ``class="dquo"`` for double quotes or
+ ``class="quo"`` for single quotes. Works in these block tags ``(h1-h6, p, li, dt, dd)``
+ and also accounts for potential opening inline elements ``a, em, strong, span, b, i``
+
+ >>> initial_quotes('"With primes"')
+ '"With primes"'
+ >>> initial_quotes("'With single primes'")
+ '\\'With single primes\\''
+
+ >>> initial_quotes('"With primes and a link"')
+ '"With primes and a link"'
+
+ >>> initial_quotes('“With smartypanted quotes”')
+ '“With smartypanted quotes”'
+ """
+ quote_finder = re.compile(r"""((<(p|h[1-6]|li|dt|dd)[^>]*>|^) # start with an opening p, h1-6, li, dd, dt or the start of the string
+ \s* # optional white space!
+ (<(a|em|span|strong|i|b)[^>]*>\s*)*) # optional opening inline tags, with more optional white space for each.
+ (("|“|&\#8220;)|('|‘|&\#8216;)) # Find me a quote! (only need to find the left quotes and the primes)
+ # double quotes are in group 7, singles in group 8
+ """, re.VERBOSE)
+
+ def _quote_wrapper(matchobj):
+ if matchobj.group(7):
+ classname = "dquo"
+ quote = matchobj.group(7)
+ else:
+ classname = "quo"
+ quote = matchobj.group(8)
+ return """%s%s""" % (matchobj.group(1), classname, quote)
+ output = quote_finder.sub(_quote_wrapper, text)
+ return output
+
+
+def smartypants(text):
+ """Applies smarty pants to curl quotes.
+
+ >>> smartypants('The "Green" man')
+ 'The “Green” man'
+ """
+ try:
+ import smartypants
+ except ImportError:
+ raise TypogrifyError("Error in {% smartypants %} filter: The Python smartypants library isn't installed.")
+ else:
+ output = smartypants.smartypants(text)
+ return output
+
+def french_insecable(text):
+ """Replace the space between each double sign punctuation by a thin
+ non-breaking space.
+
+ This conform with the french typographic rules.
+
+ >>> french_insecable('Foo !')
+ u'Foo !'
+
+ >>> french_insecable('Foo ?')
+ u'Foo ?'
+
+ >>> french_insecable('Foo : bar')
+ u'Foo : bar'
+
+ >>> french_insecable('Foo ; bar')
+ u'Foo ; bar'
+
+ >>> french_insecable(u'\xab bar \xbb')
+ u'\\xab bar \\xbb'
+
+ >>> french_insecable('123 456')
+ u'123 456'
+
+ >>> french_insecable('123 %')
+ u'123 %'
+
+ Space inside attributes should be preserved :
+
+ >>> french_insecable('')
+ ''
+ """
+
+ tag_pattern = '?\w+((\s+\w+(\s*=\s*(?:".*?"|\'.*?\'|[^\'">\s]+))?)+\s*|\s*)/?>'
+ intra_tag_finder = re.compile(r'(?P(%s)?)(?P([^<]*))(?P(%s)?)' % (tag_pattern, tag_pattern))
+
+ nnbsp = u' '
+ space_finder = re.compile(r"""(?:
+ (\w\s[:;!\?\xbb])| # Group 1, space before punctuation
+ ([\xab]\s\w)|
+ ([0-9]\s[0-9])|
+ ([0-9]\s\%)
+ )""", re.VERBOSE)
+
+ def _insecable_wrapper(groups):
+ """This is necessary to keep dotted cap strings to pick up extra spaces"""
+ def substitute(matchobj):
+ return matchobj.group(0).replace(" ", nnbsp)
+
+ prefix = groups.group('prefix') or ''
+ text = space_finder.sub(substitute, groups.group('text'))
+ suffix = groups.group('suffix') or ''
+ return prefix + text + suffix
+
+ output = intra_tag_finder.sub(_insecable_wrapper, text)
+ return output
+
+def localize(text):
+ """ Return the text processed with the appropriate system locale
+ """
+ table = {"fr_FR" : lambda x : french_insecable(x)}
+
+ lang = locale.getdefaultlocale()[0]
+ processor = table.get(lang, lambda x : x)
+
+ return processor(text)
+
+def widont(text):
+ """Replaces the space between the last two words in a string with `` ``
+ Works in these block tags ``(h1-h6, p, li, dd, dt)`` and also accounts for
+ potential closing inline elements ``a, em, strong, span, b, i``
+
+ >>> widont('A very simple test')
+ 'A very simple test'
+
+ Single word items shouldn't be changed
+ >>> widont('Test')
+ 'Test'
+ >>> widont(' Test')
+ ' Test'
+ >>> widont('- Test
')
+ '- Test
'
+ >>> widont('- Test
')
+ '- Test
'
+
+ >>> widont('In a couple of paragraphs
paragraph two
')
+ 'In a couple of paragraphs
paragraph two
'
+
+ >>> widont('In a link inside a heading
')
+ 'In a link inside a heading
'
+
+ >>> widont('In a link followed by other text
')
+ 'In a link followed by other text
'
+
+ Empty HTMLs shouldn't error
+ >>> widont('
')
+ '
'
+
+ >>> widont('Divs get no love!')
+ 'Divs get no love!'
+
+ >>> widont('
Neither do PREs
')
+ 'Neither do PREs
'
+
+ >>> widont('But divs with paragraphs do!
')
+ 'But divs with paragraphs do!
'
+ """
+
+ widont_finder = re.compile(r"""((?:?(?:a|em|span|strong|i|b)[^>]*>)|[^<>\s]) # must be proceeded by an approved inline opening or closing tag or a nontag/nonspace
+ \s+ # the space to replace
+ ([^<>\s]+ # must be flollowed by non-tag non-space characters
+ \s* # optional white space!
+ ((a|em|span|strong|i|b)>\s*)* # optional closing inline tags with optional white space after each
+ (((p|h[1-6]|li|dt|dd)>)|$)) # end with a closing p, h1-6, li or the end of the string
+ """, re.VERBOSE)
+ output = widont_finder.sub(r'\1 \2', text)
+
+ return output
+
+def applyfilters(text):
+ """Applies the following filters: smartypants, caps, amp, initial_quotes
+
+ >>> typogrify('"Jayhawks" & KU fans act extremely obnoxiously
')
+ '“Jayhawks” & KU fans act extremely obnoxiously
'
+ """
+ text = amp(text)
+ text = smartypants(text)
+ text = caps(text)
+ text = initial_quotes(text)
+ text = localize(text)
+
+ return text
+
+def typogrify(text, ignore_tags=None):
+ """The super typography filter
+
+ Applies filters to text that are not in tags contained in the
+ ignore_tags list.
+ """
+
+ section_list = process_ignores(text, ignore_tags)
+
+ rendered_text = ""
+ for text_item, should_process in section_list:
+ if should_process:
+ rendered_text += applyfilters(text_item)
+ else:
+ rendered_text += text_item
+
+ # apply widont at the end, as its already smart about tags. Hopefully.
+ return widont(rendered_text)
+
+def _test():
+ import doctest
+ doctest.testmod(verbose=True)
+
+if __name__ == "__main__":
+ _test()
diff --git a/plugins/typogrify/titlecase/__init__.py b/plugins/typogrify/titlecase/__init__.py
new file mode 100755
index 0000000..aeaca97
--- /dev/null
+++ b/plugins/typogrify/titlecase/__init__.py
@@ -0,0 +1,101 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# titlecase v0.5.1
+# Copyright (C) 2008-2010, Stuart Colville.
+# https://pypi.python.org/pypi/titlecase
+
+"""
+Original Perl version by: John Gruber http://daringfireball.net/ 10 May 2008
+Python version by Stuart Colville http://muffinresearch.co.uk
+License: http://www.opensource.org/licenses/mit-license.php
+"""
+
+import re
+
+__all__ = ['titlecase']
+__version__ = '0.5.1'
+
+SMALL = 'a|an|and|as|at|but|by|en|for|if|in|of|on|or|the|to|v\.?|via|vs\.?'
+PUNCT = r"""!"#$%&'‘()*+,\-./:;?@[\\\]_`{|}~"""
+
+SMALL_WORDS = re.compile(r'^(%s)$' % SMALL, re.I)
+INLINE_PERIOD = re.compile(r'[a-z][.][a-z]', re.I)
+UC_ELSEWHERE = re.compile(r'[%s]*?[a-zA-Z]+[A-Z]+?' % PUNCT)
+CAPFIRST = re.compile(r"^[%s]*?([A-Za-z])" % PUNCT)
+SMALL_FIRST = re.compile(r'^([%s]*)(%s)\b' % (PUNCT, SMALL), re.I)
+SMALL_LAST = re.compile(r'\b(%s)[%s]?$' % (SMALL, PUNCT), re.I)
+SUBPHRASE = re.compile(r'([:.;?!][ ])(%s)' % SMALL)
+APOS_SECOND = re.compile(r"^[dol]{1}['‘]{1}[a-z]+$", re.I)
+ALL_CAPS = re.compile(r'^[A-Z\s%s]+$' % PUNCT)
+UC_INITIALS = re.compile(r"^(?:[A-Z]{1}\.{1}|[A-Z]{1}\.{1}[A-Z]{1})+$")
+MAC_MC = re.compile(r"^([Mm]a?c)(\w+)")
+
+def titlecase(text):
+
+ """
+ Titlecases input text
+
+ This filter changes all words to Title Caps, and attempts to be clever
+ about *un*capitalizing SMALL words like a/an/the in the input.
+
+ The list of "SMALL words" which are not capped comes from
+ the New York Times Manual of Style, plus 'vs' and 'v'.
+
+ """
+
+ lines = re.split('[\r\n]+', text)
+ processed = []
+ for line in lines:
+ all_caps = ALL_CAPS.match(line)
+ words = re.split('[\t ]', line)
+ tc_line = []
+ for word in words:
+ if all_caps:
+ if UC_INITIALS.match(word):
+ tc_line.append(word)
+ continue
+ else:
+ word = word.lower()
+
+ if APOS_SECOND.match(word):
+ word = word.replace(word[0], word[0].upper())
+ word = word.replace(word[2], word[2].upper())
+ tc_line.append(word)
+ continue
+ if INLINE_PERIOD.search(word) or UC_ELSEWHERE.match(word):
+ tc_line.append(word)
+ continue
+ if SMALL_WORDS.match(word):
+ tc_line.append(word.lower())
+ continue
+
+ match = MAC_MC.match(word)
+ if match:
+ tc_line.append("%s%s" % (match.group(1).capitalize(),
+ match.group(2).capitalize()))
+ continue
+
+ hyphenated = []
+ for item in word.split('-'):
+ hyphenated.append(CAPFIRST.sub(lambda m: m.group(0).upper(), item))
+ tc_line.append("-".join(hyphenated))
+
+
+ result = " ".join(tc_line)
+
+ result = SMALL_FIRST.sub(lambda m: '%s%s' % (
+ m.group(1),
+ m.group(2).capitalize()
+ ), result)
+
+ result = SMALL_LAST.sub(lambda m: m.group(0).capitalize(), result)
+
+ result = SUBPHRASE.sub(lambda m: '%s%s' % (
+ m.group(1),
+ m.group(2).capitalize()
+ ), result)
+
+ processed.append(result)
+
+ return "\n".join(processed)
+
diff --git a/plugins/typogrify/titlecase/tests.py b/plugins/typogrify/titlecase/tests.py
new file mode 100644
index 0000000..97a45e4
--- /dev/null
+++ b/plugins/typogrify/titlecase/tests.py
@@ -0,0 +1,174 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+"""Tests for titlecase"""
+
+
+import os
+import sys
+sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../'))
+
+from titlecase import titlecase
+
+TEST_DATA = (
+ (
+ "Q&A with steve jobs: 'that's what happens in technology'",
+ "Q&A With Steve Jobs: 'That's What Happens in Technology'"
+ ),
+ (
+ "What is AT&T's problem?",
+ "What Is AT&T's Problem?"
+ ),
+ (
+ "Apple deal with AT&T falls through",
+ "Apple Deal With AT&T Falls Through"
+ ),
+ (
+ "this v that",
+ "This v That"
+ ),
+ (
+ "this v. that",
+ "This v. That"
+ ),
+ (
+ "this vs that",
+ "This vs That"
+ ),
+ (
+ "this vs. that",
+ "This vs. That"
+ ),
+ (
+ "The SEC's Apple probe: what you need to know",
+ "The SEC's Apple Probe: What You Need to Know"
+ ),
+ (
+ "'by the Way, small word at the start but within quotes.'",
+ "'By the Way, Small Word at the Start but Within Quotes.'"
+ ),
+ (
+ "Small word at end is nothing to be afraid of",
+ "Small Word at End Is Nothing to Be Afraid Of"
+ ),
+ (
+ "Starting Sub-Phrase With a Small Word: a Trick, Perhaps?",
+ "Starting Sub-Phrase With a Small Word: A Trick, Perhaps?"
+ ),
+ (
+ "Sub-Phrase With a Small Word in Quotes: 'a Trick, Perhaps?'",
+ "Sub-Phrase With a Small Word in Quotes: 'A Trick, Perhaps?'"
+ ),
+ (
+ 'sub-phrase with a small word in quotes: "a trick, perhaps?"',
+ 'Sub-Phrase With a Small Word in Quotes: "A Trick, Perhaps?"'
+ ),
+ (
+ '"Nothing to Be Afraid of?"',
+ '"Nothing to Be Afraid Of?"'
+ ),
+ (
+ '"Nothing to be Afraid Of?"',
+ '"Nothing to Be Afraid Of?"'
+ ),
+ (
+ 'a thing',
+ 'A Thing'
+ ),
+ (
+ "2lmc Spool: 'gruber on OmniFocus and vapo(u)rware'",
+ "2lmc Spool: 'Gruber on OmniFocus and Vapo(u)rware'"
+ ),
+ (
+ 'this is just an example.com',
+ 'This Is Just an example.com'
+ ),
+ (
+ 'this is something listed on del.icio.us',
+ 'This Is Something Listed on del.icio.us'
+ ),
+ (
+ 'iTunes should be unmolested',
+ 'iTunes Should Be Unmolested'
+ ),
+ (
+ 'reading between the lines of steve jobs’s ‘thoughts on music’',
+ 'Reading Between the Lines of Steve Jobs’s ‘Thoughts on Music’'
+ ),
+ (
+ 'seriously, ‘repair permissions’ is voodoo',
+ 'Seriously, ‘Repair Permissions’ Is Voodoo'
+ ),
+ (
+ 'generalissimo francisco franco: still dead; kieren McCarthy: still a jackass',
+ 'Generalissimo Francisco Franco: Still Dead; Kieren McCarthy: Still a Jackass'
+ ),
+ (
+ "O'Reilly should be untouched",
+ "O'Reilly Should Be Untouched"
+ ),
+ (
+ "my name is o'reilly",
+ "My Name Is O'Reilly"
+ ),
+ (
+ "WASHINGTON, D.C. SHOULD BE FIXED BUT MIGHT BE A PROBLEM",
+ "Washington, D.C. Should Be Fixed but Might Be a Problem"
+ ),
+ (
+ "THIS IS ALL CAPS AND SHOULD BE ADDRESSED",
+ "This Is All Caps and Should Be Addressed"
+ ),
+ (
+ "Mr McTavish went to MacDonalds",
+ "Mr McTavish Went to MacDonalds"
+ ),
+ (
+ "this shouldn't\nget mangled",
+ "This Shouldn't\nGet Mangled"
+ ),
+ (
+ "this is http://foo.com",
+ "This Is http://foo.com"
+ )
+)
+
+def test_all_caps_regex():
+ """Test - all capitals regex"""
+ from titlecase import ALL_CAPS
+ assert bool(ALL_CAPS.match('THIS IS ALL CAPS')) is True
+
+def test_initials_regex():
+ """Test - uppercase initals regex with A.B"""
+ from titlecase import UC_INITIALS
+ assert bool(UC_INITIALS.match('A.B')) is True
+
+def test_initials_regex_2():
+ """Test - uppercase initals regex with A.B."""
+ from titlecase import UC_INITIALS
+ assert bool(UC_INITIALS.match('A.B.')) is True
+
+def test_initials_regex_3():
+ """Test - uppercase initals regex with ABCD"""
+ from titlecase import UC_INITIALS
+ assert bool(UC_INITIALS.match('ABCD')) is False
+
+def check_input_matches_expected_output(in_, out):
+ """Function yielded by test generator"""
+ try :
+ assert titlecase(in_) == out
+ except AssertionError:
+ print("%s != %s" % (titlecase(in_), out))
+ raise
+
+
+def test_input_output():
+ """Generated tests"""
+ for data in TEST_DATA:
+ yield check_input_matches_expected_output, data[0], data[1]
+
+
+if __name__ == "__main__":
+ import nose
+ nose.main()
+
diff --git a/plugins/typogrify/typogrify.py b/plugins/typogrify/typogrify.py
new file mode 100644
index 0000000..e3bc268
--- /dev/null
+++ b/plugins/typogrify/typogrify.py
@@ -0,0 +1,20 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+from pelican import signals
+from filters import typogrify
+
+def apply(data):
+
+ if not data._content:
+ return
+
+
+ data._content = typogrify(data._content)
+
+ metadata = data.metadata
+ metadata['title'] = typogrify(metadata['title'])
+ data._summary = typogrify(data.summary)
+
+def register():
+ signals.content_object_init.connect(apply)
--
cgit v1.2.3