diff options
-rwxr-xr-x | content/Glyphes/2021-01-03-calligraphie.rst (renamed from content/Informatique/2021-01-03-calligraphie.rst) | 0 | ||||
-rw-r--r-- | content/Perso/2013-05-18-traitment.rst | 1 | ||||
-rw-r--r-- | content/Perso/2013-06-01-marelle.rst | 1 | ||||
-rwxr-xr-x | pelicanconf.py | 4 | ||||
-rw-r--r-- | plugins/my_typogrify/__init__.py | 1 | ||||
-rwxr-xr-x | plugins/my_typogrify/mytypogrify.py | 96 | ||||
-rw-r--r-- | plugins/typogrify/__init__.py | 1 | ||||
-rwxr-xr-x | plugins/typogrify/filters.py | 386 | ||||
-rwxr-xr-x | plugins/typogrify/titlecase/__init__.py | 101 | ||||
-rw-r--r-- | plugins/typogrify/titlecase/tests.py | 174 | ||||
-rwxr-xr-x | plugins/typogrify/typogrify.py | 19 | ||||
-rwxr-xr-x | readme.rst | 4 |
12 files changed, 103 insertions, 685 deletions
diff --git a/content/Informatique/2021-01-03-calligraphie.rst b/content/Glyphes/2021-01-03-calligraphie.rst index 3fd6e30..3fd6e30 100755 --- a/content/Informatique/2021-01-03-calligraphie.rst +++ b/content/Glyphes/2021-01-03-calligraphie.rst diff --git a/content/Perso/2013-05-18-traitment.rst b/content/Perso/2013-05-18-traitment.rst index 14a6298..b5613d3 100644 --- a/content/Perso/2013-05-18-traitment.rst +++ b/content/Perso/2013-05-18-traitment.rst @@ -9,6 +9,7 @@ Cachez-moi ce menu que je ne saurais voir :tags: Humeur :summary: |summary| :logo: {static}/images/traitement/logo.jpg +:status: hidden .. figure:: https://farm1.staticflickr.com/214/4555895229_880a76beb7_q.jpg diff --git a/content/Perso/2013-06-01-marelle.rst b/content/Perso/2013-06-01-marelle.rst index f658be9..98cc359 100644 --- a/content/Perso/2013-06-01-marelle.rst +++ b/content/Perso/2013-06-01-marelle.rst @@ -9,6 +9,7 @@ La marelle romaine : les solutions :tags: Jeux :logo: /images/marelle/logo.jpg :summary: |summary| +:status: hidden .. image:: {static}/images/marelle/extract.jpeg :width: 150 diff --git a/pelicanconf.py b/pelicanconf.py index 93b13f6..68081b7 100755 --- a/pelicanconf.py +++ b/pelicanconf.py @@ -47,7 +47,7 @@ THEME_TEMPLATES_OVERRIDES = \ [ 'theme/custom' ] -LINKS= ( ("git","http://git.chimrod.com") +LINKS= ( ("Git", "http://git.chimrod.com") , ) HOME_HIDE_TAGS=False I18N_TEMPLATES_LANG = "fr_FR" @@ -78,7 +78,7 @@ PLUGIN_PATHS = ['plugins'] PLUGINS = \ ( 'related_posts' - , 'typogrify' + , 'my_typogrify' , 'i18n_subsites' , 'render_math' ) diff --git a/plugins/my_typogrify/__init__.py b/plugins/my_typogrify/__init__.py new file mode 100644 index 0000000..b445852 --- /dev/null +++ b/plugins/my_typogrify/__init__.py @@ -0,0 +1 @@ +from .mytypogrify import *
diff --git a/plugins/my_typogrify/mytypogrify.py b/plugins/my_typogrify/mytypogrify.py new file mode 100755 index 0000000..490e82b --- /dev/null +++ b/plugins/my_typogrify/mytypogrify.py @@ -0,0 +1,96 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +from pelican import signals +import typogrify.filters as filters +import re + +def french_insecable(text): + """Replace the space between each double sign punctuation by a thin + non-breaking space. + + This conform with the french typographic rules. + + >>> french_insecable('Foo !') + u'Foo<span style="white-space:nowrap"> </span>!' + + >>> french_insecable('Foo ?') + u'Foo<span style="white-space:nowrap"> </span>?' + + >>> french_insecable('Foo : bar') + u'Foo<span style="white-space:nowrap"> </span>: bar' + + >>> french_insecable('Foo ; bar') + u'Foo<span style="white-space:nowrap"> </span>; bar' + + >>> french_insecable(u'\xab bar \xbb') + u'\\xab<span style="white-space:nowrap"> </span>bar<span style="white-space:nowrap"> </span>\\xbb' + + >>> french_insecable('123 456') + u'123<span style="white-space:nowrap"> </span>456' + + >>> french_insecable('123 %') + u'123<span style="white-space:nowrap"> </span>%' + + Space inside attributes should be preserved : + + >>> french_insecable('<a title="foo !">') + '<a title="foo !">' + """ + + tag_pattern = '</?\w+((\s+\w+(\s*=\s*(?:".*?"|\'.*?\'|[^\'">\s]+))?)+\s*|\s*)/?>' + intra_tag_finder = re.compile(r'(?P<prefix>(%s)?)(?P<text>([^<]*))(?P<suffix>(%s)?)' % (tag_pattern, tag_pattern)) + + nnbsp = u'<span style="white-space:nowrap"> </span>' + space_finder = re.compile(r"""(?: + (\w\s[:;!\?\xbb])| # Group 1, space before punctuation + ([\xab]\s\w)| + ([0-9]\s[0-9])| + ([0-9]\s\%) + )""", re.VERBOSE) + + def _insecable_wrapper(groups): + """This is necessary to keep dotted cap strings to pick up extra spaces""" + def substitute(matchobj): + return matchobj.group(0).replace(" ", nnbsp) + + prefix = groups.group('prefix') or '' + text = space_finder.sub(substitute, groups.group('text')) + suffix = groups.group('suffix') or '' + return prefix + text + suffix + + output = intra_tag_finder.sub(_insecable_wrapper, text) + return output + +def typogrify(text, ignore_tags=None): + """The super typography filter + + Applies filters to text that are not in tags contained in the + ignore_tags list. + """ + + section_list = filters.process_ignores(text, ignore_tags) + + rendered_text = "" + for text_item, should_process in section_list: + if should_process: + rendered_text += french_insecable(filters.applyfilters(text_item)) + else: + rendered_text += text_item + + # apply widont at the end, as its already smart about tags. Hopefully. + return filters.widont(rendered_text) + +def apply(data): + + if not data._content: + return + + + data._content = typogrify(data._content) + + metadata = data.metadata + metadata['title'] = typogrify(metadata['title']) + +def register(): + signals.content_object_init.connect(apply) diff --git a/plugins/typogrify/__init__.py b/plugins/typogrify/__init__.py deleted file mode 100644 index 8511890..0000000 --- a/plugins/typogrify/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .typogrify import *
diff --git a/plugins/typogrify/filters.py b/plugins/typogrify/filters.py deleted file mode 100755 index 41d108d..0000000 --- a/plugins/typogrify/filters.py +++ /dev/null @@ -1,386 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -import re -from typogrify.titlecase import titlecase # NOQA -import locale - -class TypogrifyError(Exception): - """ A base error class so we can catch or scilence typogrify's errors in templates """ - pass - -def process_ignores(text, ignore_tags=None): - """ Creates a list of tuples based on tags to be ignored. - Tags can be added as a list in the `ignore_tags`. - Returns in the following format: - - [ - ('Text here', <should text be processed? True|False>), - ('Text here', <should text be processed? True|False>), - ] - - >>> process_ignores('<pre>processed</pre><p>processed</p>') - [('<pre>processed</pre>', False), ('<p>processed</p>', True)] - >>> process_ignores('<code>processed</code><p>processed<pre>processed</pre></p>') - [('<code>processed</code>', False), ('<p>processed', True), ('<pre>processed</pre>', False), ('</p>', True)] - >>> process_ignores('<code>processed</code><p>processed<pre>processed</pre></p>',['p']) - [('<code>processed</code>', False), ('<p>processed<pre>processed</pre></p>', False)] - """ - - position = 0 - sections = [] - if ignore_tags is None: - ignore_tags = [] - - ignore_tags = ignore_tags + ['pre', 'code'] # default tags - ignore_regex = r'<(%s)(?:\s.*?)?>.*?</(\1)>' % '|'.join(ignore_tags) - ignore_finder = re.compile(ignore_regex, re.IGNORECASE | re.DOTALL) - - for section in ignore_finder.finditer(text): - start, end = section.span() - - if position != start: - # if the current position isn't the match we - # need to process everything in between - sections.append((text[position:start], True)) - - # now we mark the matched section as ignored - sections.append((text[start:end], False)) - - position = end - - # match the rest of the text if necessary - # (this could in fact be the entire string) - if position < len(text): - sections.append((text[position:len(text)], True)) - - return sections - -def amp(text): - """Wraps apersands in HTML with ``<span class="amp">`` so they can be - styled with CSS. Apersands are also normalized to ``&``. Requires - ampersands to have whitespace or an `` `` on both sides. - - >>> amp('One & two') - 'One <span class="amp">&</span> two' - >>> amp('One & two') - 'One <span class="amp">&</span> two' - >>> amp('One & two') - 'One <span class="amp">&</span> two' - - >>> amp('One & two') - 'One <span class="amp">&</span> two' - - It won't mess up & that are already wrapped, in entities or URLs - - >>> amp('One <span class="amp">&</span> two') - 'One <span class="amp">&</span> two' - >>> amp('“this” & <a href="/?that&test">that</a>') - '“this” <span class="amp">&</span> <a href="/?that&test">that</a>' - - It should ignore standalone amps that are in attributes - >>> amp('<link href="xyz.html" title="One & Two">xyz</link>') - '<link href="xyz.html" title="One & Two">xyz</link>' - """ - # tag_pattern from http://haacked.com/archive/2004/10/25/usingregularexpressionstomatchhtml.aspx - # it kinda sucks but it fixes the standalone amps in attributes bug - tag_pattern = '</?\w+((\s+\w+(\s*=\s*(?:".*?"|\'.*?\'|[^\'">\s]+))?)+\s*|\s*)/?>' - amp_finder = re.compile(r"(\s| )(&|&|&\#38;)(\s| )") - intra_tag_finder = re.compile(r'(?P<prefix>(%s)?)(?P<text>([^<]*))(?P<suffix>(%s)?)' % (tag_pattern, tag_pattern)) - - def _amp_process(groups): - prefix = groups.group('prefix') or '' - text = amp_finder.sub(r"""\1<span class="amp">&</span>\3""", groups.group('text')) - suffix = groups.group('suffix') or '' - return prefix + text + suffix - - output = intra_tag_finder.sub(_amp_process, text) - return output - - -def caps(text): - """Wraps multiple capital letters in ``<span class="caps">`` - so they can be styled with CSS. - - >>> caps("A message from KU") - 'A message from <span class="caps">KU</span>' - - Uses the smartypants tokenizer to not screw with HTML or with tags it shouldn't. - - >>> caps("<PRE>CAPS</pre> more CAPS") - '<PRE>CAPS</pre> more <span class="caps">CAPS</span>' - - >>> caps("A message from 2KU2 with digits") - 'A message from <span class="caps">2KU2</span> with digits' - - >>> caps("Dotted caps followed by spaces should never include them in the wrap D.O.T. like so.") - 'Dotted caps followed by spaces should never include them in the wrap <span class="caps">D.O.T.</span> like so.' - - All caps with with apostrophes in them shouldn't break. Only handles dump apostrophes though. - >>> caps("JIMMY'S") - '<span class="caps">JIMMY\\'S</span>' - - >>> caps("<i>D.O.T.</i>HE34T<b>RFID</b>") - '<i><span class="caps">D.O.T.</span></i><span class="caps">HE34T</span><b><span class="caps">RFID</span></b>' - """ - try: - import smartypants - except ImportError: - raise TypogrifyError("Error in {% caps %} filter: The Python SmartyPants library isn't installed.") - - tokens = smartypants._tokenize(text) - result = [] - in_skipped_tag = False - - cap_finder = re.compile(r"""( - (\b[A-Z\d]* # Group 2: Any amount of caps and digits - [A-Z]\d*[A-Z] # A cap string much at least include two caps (but they can have digits between them) - [A-Z\d']*\b) # Any amount of caps and digits or dumb apostsrophes - | (\b[A-Z]+\.\s? # OR: Group 3: Some caps, followed by a '.' and an optional space - (?:[A-Z]+\.\s?)+) # Followed by the same thing at least once more - (?:\s|\b|$)) - """, re.VERBOSE) - - def _cap_wrapper(matchobj): - """This is necessary to keep dotted cap strings to pick up extra spaces""" - if matchobj.group(2): - return """<span class="caps">%s</span>""" % matchobj.group(2) - else: - if matchobj.group(3)[-1] == " ": - caps = matchobj.group(3)[:-1] - tail = ' ' - else: - caps = matchobj.group(3) - tail = '' - return """<span class="caps">%s</span>%s""" % (caps, tail) - - # Add additional tags whose content should be - # ignored here. Note - <pre> and <code> tag are - # ignored by default and therefore are not here - tags_to_skip_regex = re.compile("<(/)?(?:kbd|script)[^>]*>", re.IGNORECASE) - - for token in tokens: - if token[0] == "tag": - # Don't mess with tags. - result.append(token[1]) - close_match = tags_to_skip_regex.match(token[1]) - if close_match and close_match.group(1) == None: - in_skipped_tag = True - else: - in_skipped_tag = False - else: - if in_skipped_tag: - result.append(token[1]) - else: - result.append(cap_finder.sub(_cap_wrapper, token[1])) - output = "".join(result) - return output - - -def initial_quotes(text): - """Wraps initial quotes in ``class="dquo"`` for double quotes or - ``class="quo"`` for single quotes. Works in these block tags ``(h1-h6, p, li, dt, dd)`` - and also accounts for potential opening inline elements ``a, em, strong, span, b, i`` - - >>> initial_quotes('"With primes"') - '<span class="dquo">"</span>With primes"' - >>> initial_quotes("'With single primes'") - '<span class="quo">\\'</span>With single primes\\'' - - >>> initial_quotes('<a href="#">"With primes and a link"</a>') - '<a href="#"><span class="dquo">"</span>With primes and a link"</a>' - - >>> initial_quotes('“With smartypanted quotes”') - '<span class="dquo">“</span>With smartypanted quotes”' - """ - quote_finder = re.compile(r"""((<(p|h[1-6]|li|dt|dd)[^>]*>|^) # start with an opening p, h1-6, li, dd, dt or the start of the string - \s* # optional white space! - (<(a|em|span|strong|i|b)[^>]*>\s*)*) # optional opening inline tags, with more optional white space for each. - (("|“|&\#8220;)|('|‘|&\#8216;)) # Find me a quote! (only need to find the left quotes and the primes) - # double quotes are in group 7, singles in group 8 - """, re.VERBOSE) - - def _quote_wrapper(matchobj): - if matchobj.group(7): - classname = "dquo" - quote = matchobj.group(7) - else: - classname = "quo" - quote = matchobj.group(8) - return """%s<span class="%s">%s</span>""" % (matchobj.group(1), classname, quote) - output = quote_finder.sub(_quote_wrapper, text) - return output - - -def smartypants(text): - """Applies smarty pants to curl quotes. - - >>> smartypants('The "Green" man') - 'The “Green” man' - """ - try: - import smartypants - except ImportError: - raise TypogrifyError("Error in {% smartypants %} filter: The Python smartypants library isn't installed.") - else: - output = smartypants.smartypants(text) - return output - -def french_insecable(text): - """Replace the space between each double sign punctuation by a thin - non-breaking space. - - This conform with the french typographic rules. - - >>> french_insecable('Foo !') - u'Foo<span style="white-space:nowrap"> </span>!' - - >>> french_insecable('Foo ?') - u'Foo<span style="white-space:nowrap"> </span>?' - - >>> french_insecable('Foo : bar') - u'Foo<span style="white-space:nowrap"> </span>: bar' - - >>> french_insecable('Foo ; bar') - u'Foo<span style="white-space:nowrap"> </span>; bar' - - >>> french_insecable(u'\xab bar \xbb') - u'\\xab<span style="white-space:nowrap"> </span>bar<span style="white-space:nowrap"> </span>\\xbb' - - >>> french_insecable('123 456') - u'123<span style="white-space:nowrap"> </span>456' - - >>> french_insecable('123 %') - u'123<span style="white-space:nowrap"> </span>%' - - Space inside attributes should be preserved : - - >>> french_insecable('<a title="foo !">') - '<a title="foo !">' - """ - - tag_pattern = '</?\w+((\s+\w+(\s*=\s*(?:".*?"|\'.*?\'|[^\'">\s]+))?)+\s*|\s*)/?>' - intra_tag_finder = re.compile(r'(?P<prefix>(%s)?)(?P<text>([^<]*))(?P<suffix>(%s)?)' % (tag_pattern, tag_pattern)) - - nnbsp = u'<span style="white-space:nowrap"> </span>' - space_finder = re.compile(r"""(?: - (\w\s[:;!\?\xbb])| # Group 1, space before punctuation - ([\xab]\s\w)| - ([0-9]\s[0-9])| - ([0-9]\s\%) - )""", re.VERBOSE) - - def _insecable_wrapper(groups): - """This is necessary to keep dotted cap strings to pick up extra spaces""" - def substitute(matchobj): - return matchobj.group(0).replace(" ", nnbsp) - - prefix = groups.group('prefix') or '' - text = space_finder.sub(substitute, groups.group('text')) - suffix = groups.group('suffix') or '' - return prefix + text + suffix - - output = intra_tag_finder.sub(_insecable_wrapper, text) - return output - -def localize(text): - """ Return the text processed with the appropriate system locale - """ - table = {"fr_FR" : lambda x : french_insecable(x)} - - lang = locale.getdefaultlocale()[0] - processor = table.get(lang, lambda x : x) - - return processor(text) - -def widont(text): - """Replaces the space between the last two words in a string with `` `` - Works in these block tags ``(h1-h6, p, li, dd, dt)`` and also accounts for - potential closing inline elements ``a, em, strong, span, b, i`` - - >>> widont('A very simple test') - 'A very simple test' - - Single word items shouldn't be changed - >>> widont('Test') - 'Test' - >>> widont(' Test') - ' Test' - >>> widont('<ul><li>Test</p></li><ul>') - '<ul><li>Test</p></li><ul>' - >>> widont('<ul><li> Test</p></li><ul>') - '<ul><li> Test</p></li><ul>' - - >>> widont('<p>In a couple of paragraphs</p><p>paragraph two</p>') - '<p>In a couple of paragraphs</p><p>paragraph two</p>' - - >>> widont('<h1><a href="#">In a link inside a heading</i> </a></h1>') - '<h1><a href="#">In a link inside a heading</i> </a></h1>' - - >>> widont('<h1><a href="#">In a link</a> followed by other text</h1>') - '<h1><a href="#">In a link</a> followed by other text</h1>' - - Empty HTMLs shouldn't error - >>> widont('<h1><a href="#"></a></h1>') - '<h1><a href="#"></a></h1>' - - >>> widont('<div>Divs get no love!</div>') - '<div>Divs get no love!</div>' - - >>> widont('<pre>Neither do PREs</pre>') - '<pre>Neither do PREs</pre>' - - >>> widont('<div><p>But divs with paragraphs do!</p></div>') - '<div><p>But divs with paragraphs do!</p></div>' - """ - - widont_finder = re.compile(r"""((?:</?(?:a|em|span|strong|i|b)[^>]*>)|[^<>\s]) # must be proceeded by an approved inline opening or closing tag or a nontag/nonspace - \s+ # the space to replace - ([^<>\s]+ # must be flollowed by non-tag non-space characters - \s* # optional white space! - (</(a|em|span|strong|i|b)>\s*)* # optional closing inline tags with optional white space after each - ((</(p|h[1-6]|li|dt|dd)>)|$)) # end with a closing p, h1-6, li or the end of the string - """, re.VERBOSE) - output = widont_finder.sub(r'\1 \2', text) - - return output - -def applyfilters(text): - """Applies the following filters: smartypants, caps, amp, initial_quotes - - >>> typogrify('<h2>"Jayhawks" & KU fans act extremely obnoxiously</h2>') - '<h2><span class="dquo">“</span>Jayhawks” <span class="amp">&</span> <span class="caps">KU</span> fans act extremely obnoxiously</h2>' - """ - text = amp(text) - text = smartypants(text) - text = caps(text) - text = initial_quotes(text) - text = localize(text) - - return text - -def typogrify(text, ignore_tags=None): - """The super typography filter - - Applies filters to text that are not in tags contained in the - ignore_tags list. - """ - - section_list = process_ignores(text, ignore_tags) - - rendered_text = "" - for text_item, should_process in section_list: - if should_process: - rendered_text += applyfilters(text_item) - else: - rendered_text += text_item - - # apply widont at the end, as its already smart about tags. Hopefully. - return widont(rendered_text) - -def _test(): - import doctest - doctest.testmod(verbose=True) - -if __name__ == "__main__": - _test() diff --git a/plugins/typogrify/titlecase/__init__.py b/plugins/typogrify/titlecase/__init__.py deleted file mode 100755 index aeaca97..0000000 --- a/plugins/typogrify/titlecase/__init__.py +++ /dev/null @@ -1,101 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# titlecase v0.5.1 -# Copyright (C) 2008-2010, Stuart Colville. -# https://pypi.python.org/pypi/titlecase - -""" -Original Perl version by: John Gruber http://daringfireball.net/ 10 May 2008 -Python version by Stuart Colville http://muffinresearch.co.uk -License: http://www.opensource.org/licenses/mit-license.php -""" - -import re - -__all__ = ['titlecase'] -__version__ = '0.5.1' - -SMALL = 'a|an|and|as|at|but|by|en|for|if|in|of|on|or|the|to|v\.?|via|vs\.?' -PUNCT = r"""!"#$%&'‘()*+,\-./:;?@[\\\]_`{|}~""" - -SMALL_WORDS = re.compile(r'^(%s)$' % SMALL, re.I) -INLINE_PERIOD = re.compile(r'[a-z][.][a-z]', re.I) -UC_ELSEWHERE = re.compile(r'[%s]*?[a-zA-Z]+[A-Z]+?' % PUNCT) -CAPFIRST = re.compile(r"^[%s]*?([A-Za-z])" % PUNCT) -SMALL_FIRST = re.compile(r'^([%s]*)(%s)\b' % (PUNCT, SMALL), re.I) -SMALL_LAST = re.compile(r'\b(%s)[%s]?$' % (SMALL, PUNCT), re.I) -SUBPHRASE = re.compile(r'([:.;?!][ ])(%s)' % SMALL) -APOS_SECOND = re.compile(r"^[dol]{1}['‘]{1}[a-z]+$", re.I) -ALL_CAPS = re.compile(r'^[A-Z\s%s]+$' % PUNCT) -UC_INITIALS = re.compile(r"^(?:[A-Z]{1}\.{1}|[A-Z]{1}\.{1}[A-Z]{1})+$") -MAC_MC = re.compile(r"^([Mm]a?c)(\w+)") - -def titlecase(text): - - """ - Titlecases input text - - This filter changes all words to Title Caps, and attempts to be clever - about *un*capitalizing SMALL words like a/an/the in the input. - - The list of "SMALL words" which are not capped comes from - the New York Times Manual of Style, plus 'vs' and 'v'. - - """ - - lines = re.split('[\r\n]+', text) - processed = [] - for line in lines: - all_caps = ALL_CAPS.match(line) - words = re.split('[\t ]', line) - tc_line = [] - for word in words: - if all_caps: - if UC_INITIALS.match(word): - tc_line.append(word) - continue - else: - word = word.lower() - - if APOS_SECOND.match(word): - word = word.replace(word[0], word[0].upper()) - word = word.replace(word[2], word[2].upper()) - tc_line.append(word) - continue - if INLINE_PERIOD.search(word) or UC_ELSEWHERE.match(word): - tc_line.append(word) - continue - if SMALL_WORDS.match(word): - tc_line.append(word.lower()) - continue - - match = MAC_MC.match(word) - if match: - tc_line.append("%s%s" % (match.group(1).capitalize(), - match.group(2).capitalize())) - continue - - hyphenated = [] - for item in word.split('-'): - hyphenated.append(CAPFIRST.sub(lambda m: m.group(0).upper(), item)) - tc_line.append("-".join(hyphenated)) - - - result = " ".join(tc_line) - - result = SMALL_FIRST.sub(lambda m: '%s%s' % ( - m.group(1), - m.group(2).capitalize() - ), result) - - result = SMALL_LAST.sub(lambda m: m.group(0).capitalize(), result) - - result = SUBPHRASE.sub(lambda m: '%s%s' % ( - m.group(1), - m.group(2).capitalize() - ), result) - - processed.append(result) - - return "\n".join(processed) - diff --git a/plugins/typogrify/titlecase/tests.py b/plugins/typogrify/titlecase/tests.py deleted file mode 100644 index 97a45e4..0000000 --- a/plugins/typogrify/titlecase/tests.py +++ /dev/null @@ -1,174 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -"""Tests for titlecase""" - - -import os -import sys -sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../')) - -from titlecase import titlecase - -TEST_DATA = ( - ( - "Q&A with steve jobs: 'that's what happens in technology'", - "Q&A With Steve Jobs: 'That's What Happens in Technology'" - ), - ( - "What is AT&T's problem?", - "What Is AT&T's Problem?" - ), - ( - "Apple deal with AT&T falls through", - "Apple Deal With AT&T Falls Through" - ), - ( - "this v that", - "This v That" - ), - ( - "this v. that", - "This v. That" - ), - ( - "this vs that", - "This vs That" - ), - ( - "this vs. that", - "This vs. That" - ), - ( - "The SEC's Apple probe: what you need to know", - "The SEC's Apple Probe: What You Need to Know" - ), - ( - "'by the Way, small word at the start but within quotes.'", - "'By the Way, Small Word at the Start but Within Quotes.'" - ), - ( - "Small word at end is nothing to be afraid of", - "Small Word at End Is Nothing to Be Afraid Of" - ), - ( - "Starting Sub-Phrase With a Small Word: a Trick, Perhaps?", - "Starting Sub-Phrase With a Small Word: A Trick, Perhaps?" - ), - ( - "Sub-Phrase With a Small Word in Quotes: 'a Trick, Perhaps?'", - "Sub-Phrase With a Small Word in Quotes: 'A Trick, Perhaps?'" - ), - ( - 'sub-phrase with a small word in quotes: "a trick, perhaps?"', - 'Sub-Phrase With a Small Word in Quotes: "A Trick, Perhaps?"' - ), - ( - '"Nothing to Be Afraid of?"', - '"Nothing to Be Afraid Of?"' - ), - ( - '"Nothing to be Afraid Of?"', - '"Nothing to Be Afraid Of?"' - ), - ( - 'a thing', - 'A Thing' - ), - ( - "2lmc Spool: 'gruber on OmniFocus and vapo(u)rware'", - "2lmc Spool: 'Gruber on OmniFocus and Vapo(u)rware'" - ), - ( - 'this is just an example.com', - 'This Is Just an example.com' - ), - ( - 'this is something listed on del.icio.us', - 'This Is Something Listed on del.icio.us' - ), - ( - 'iTunes should be unmolested', - 'iTunes Should Be Unmolested' - ), - ( - 'reading between the lines of steve jobs’s ‘thoughts on music’', - 'Reading Between the Lines of Steve Jobs’s ‘Thoughts on Music’' - ), - ( - 'seriously, ‘repair permissions’ is voodoo', - 'Seriously, ‘Repair Permissions’ Is Voodoo' - ), - ( - 'generalissimo francisco franco: still dead; kieren McCarthy: still a jackass', - 'Generalissimo Francisco Franco: Still Dead; Kieren McCarthy: Still a Jackass' - ), - ( - "O'Reilly should be untouched", - "O'Reilly Should Be Untouched" - ), - ( - "my name is o'reilly", - "My Name Is O'Reilly" - ), - ( - "WASHINGTON, D.C. SHOULD BE FIXED BUT MIGHT BE A PROBLEM", - "Washington, D.C. Should Be Fixed but Might Be a Problem" - ), - ( - "THIS IS ALL CAPS AND SHOULD BE ADDRESSED", - "This Is All Caps and Should Be Addressed" - ), - ( - "Mr McTavish went to MacDonalds", - "Mr McTavish Went to MacDonalds" - ), - ( - "this shouldn't\nget mangled", - "This Shouldn't\nGet Mangled" - ), - ( - "this is http://foo.com", - "This Is http://foo.com" - ) -) - -def test_all_caps_regex(): - """Test - all capitals regex""" - from titlecase import ALL_CAPS - assert bool(ALL_CAPS.match('THIS IS ALL CAPS')) is True - -def test_initials_regex(): - """Test - uppercase initals regex with A.B""" - from titlecase import UC_INITIALS - assert bool(UC_INITIALS.match('A.B')) is True - -def test_initials_regex_2(): - """Test - uppercase initals regex with A.B.""" - from titlecase import UC_INITIALS - assert bool(UC_INITIALS.match('A.B.')) is True - -def test_initials_regex_3(): - """Test - uppercase initals regex with ABCD""" - from titlecase import UC_INITIALS - assert bool(UC_INITIALS.match('ABCD')) is False - -def check_input_matches_expected_output(in_, out): - """Function yielded by test generator""" - try : - assert titlecase(in_) == out - except AssertionError: - print("%s != %s" % (titlecase(in_), out)) - raise - - -def test_input_output(): - """Generated tests""" - for data in TEST_DATA: - yield check_input_matches_expected_output, data[0], data[1] - - -if __name__ == "__main__": - import nose - nose.main() - diff --git a/plugins/typogrify/typogrify.py b/plugins/typogrify/typogrify.py deleted file mode 100755 index 7f5f568..0000000 --- a/plugins/typogrify/typogrify.py +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -from pelican import signals -from typogrify.filters import typogrify - -def apply(data): - - if not data._content: - return - - - data._content = typogrify(data._content) - - metadata = data.metadata - metadata['title'] = typogrify(metadata['title']) - -def register(): - signals.content_object_init.connect(apply) @@ -1,5 +1,5 @@ -Ce dépôt contient le blog hébergé à l'adresse suivante : http://blog.chimrod.com/ +Ce dépôt contient le blog hébergé à l'adresse suivante : https://blog.chimrod.com/ Installation ============ @@ -13,7 +13,7 @@ Installer pélican dans un environnement virtuel : mkvirtualenv pelican workon pelican - pip3 install pelican i18n_subsites3 smartypants pygments + pip3 install pelican smartypants pygments typogrify Copie du dépôt -------------- |