X-Git-Url: https://git.ralfj.de/rust-101.git/blobdiff_plain/7df9cdaae2db9969c8b83c4c69ccc21eb0973eb4..392c35cc88157a46418467782782ea95b91ba849:/pycco-rs diff --git a/pycco-rs b/pycco-rs index d250b20..c884311 100755 --- a/pycco-rs +++ b/pycco-rs @@ -11,16 +11,16 @@ def patch_html(source, marker, new_text): return source.replace(marker, marker + new_text, 1) # now, monkey-patch pycco for Rust support -pycco.main.languages[".rs"] = { "name": "rust", "symbol": "//"} -for ext, l in pycco.main.languages.items(): +pycco.main.supported_languages[".rs"] = { "name": "rust", "comment_symbol": "//"} +for ext, l in pycco.main.supported_languages.items(): # Does the line begin with a comment? - l["comment_matcher"] = re.compile(r"^\s*" + l["symbol"] + "\s?") + l["comment_matcher"] = re.compile(r"^\s*" + l["comment_symbol"] + "\s?") # The dividing token we feed into Pygments, to delimit the boundaries between # sections. - l["divider_text"] = "\n" + l["symbol"] + "DIVIDER\n" + l["divider_text"] = "\n" + l["comment_symbol"] + "DIVIDER\n" # The mirror of `divider_text` that we expect Pygments to return. We can split # on this to recover the original sections. - l["divider_html"] = re.compile(r'\n*' + l["symbol"] + 'DIVIDER\n*') + l["divider_html"] = re.compile(r'\n*' + l["comment_symbol"] + 'DIVIDER\n*') # Get the Pygments Lexer for this language. l["lexer"] = lexers.get_lexer_by_name(l["name"])