git @ Cat's Eye Technologies The-Glosscubator / master script / build_readmes / readme_writer.py
master

Tree @master (Download .tar.gz)

readme_writer.py @masterraw · history · blame

# SPDX-FileCopyrightText: Chris Pressey, the original author of this work, has dedicated it to the public domain.
# For more information, please refer to <https://unlicense.org/>
# SPDX-License-Identifier: Unlicense

"""
Handles the generation and writing of README files
"""
import os
import re

from formatters import (
    format_see_also_link, format_see_also_links,
    format_webpage, format_book, format_repo, format_paper, format_entry
)


UNLICENSE_HEADER = """<!--
{}-FileCopyrightText: Chris Pressey, the original author of this work, has dedicated it to the public domain.

{}-License-Identifier: CC0-1.0
-->
""".format('SPDX', 'SPDX')


def write_readme_file(base_dir, topic, topic_section, entries, secondary_entries):
    webpages = entries["webpages"]
    repos = entries["repos"]
    books = entries["books"]
    papers = entries["papers"]

    sec_webpages = secondary_entries["webpages"]
    sec_repos = secondary_entries["repos"]
    sec_books = secondary_entries["books"]
    sec_papers = secondary_entries["papers"]

    title = topic_section["properties"].get("title", topic)
    see_also = topic_section["properties"].get("see-also", "")
    if see_also:
        see_also = [s.strip() for s in see_also.split(",")]
    else:
        see_also = []

    see_also = " | _See also: {}_".format(format_see_also_links(see_also)) if see_also else ""

    with open(os.path.join(base_dir, "by-topic", topic, "README.md"), 'w') as f:
        f.write(title + "\n")
        f.write("-" * len(title) + "\n")
        f.write("\n{}\n".format(UNLICENSE_HEADER))
        f.write("[(Up)](../../README.md#topics){}\n".format(see_also))
        f.write("""
- - - -

""")
        if webpages or sec_webpages:
            f.write("\n### Web resources\n\n")
            for i, webpage in enumerate(webpages):
                f.write(format_entry(webpage, format_webpage))

            for source_topic, entries in sorted(sec_webpages.items()):
                for entry in entries:
                    f.write(format_entry(entry, format_webpage, source_topic=source_topic))

        if repos or sec_repos:
            f.write("\n### Repositories\n\n")
            for i, repo in enumerate(repos):
                f.write(format_entry(repo, format_repo))

            for source_topic, entries in sorted(sec_repos.items()):
                for entry in entries:
                    f.write(format_entry(entry, format_repo, source_topic=source_topic))

        if papers or sec_papers:
            f.write("\n### Papers\n\n")
            for i, paper in enumerate(papers):
                f.write(format_entry(paper, format_paper))

            for source_topic, entries in sorted(sec_papers.items()):
                for entry in entries:
                    f.write(format_entry(entry, format_paper, source_topic=source_topic))

        if books or sec_books:
            f.write("\n### Books\n\n")
            for i, book in enumerate(books):
                f.write(format_entry(book, format_book))

            for source_topic, entries in sorted(sec_books.items()):
                for entry in entries:
                    f.write(format_entry(entry, format_book, source_topic=source_topic))


def dump_at_rating(f, c, entries, target_rating, formatter):
    count = 0
    for topic in sorted(c.topic_dirs):
        selecteds = []
        for entry in entries[topic]:
            if entry["properties"].get("is-heading"):
                continue

            url = entry["properties"].get("url", "")
            if url.startswith("https://en.wikipedia"):
                continue

            link = "*   {}".format(formatter(entry))

            rating = entry["properties"].get("rating", "TODO")
            if rating == target_rating:
                selecteds.append(link)
        if selecteds:
            f.write("\n### {}\n\n".format(topic))
            for selected in selecteds:
                f.write(selected)
                f.write("\n")
            count += len(selecteds)
    return count


def dump_rating_page(c, rating, rating_name, webpages, repos, books, papers):
    count = 0
    with open(os.path.join(c.base_dir, "by-rating", rating_name + ".md"), "w") as f:
        title = rating_name + " Resources"
        f.write(title + "\n")
        f.write("=" * len(title) + "\n")
        f.write("\n{}".format(UNLICENSE_HEADER))
        f.write("\n{} Webpages\n--------------\n".format(rating_name))
        count += dump_at_rating(f, c, webpages, rating, formatter=format_webpage)
        f.write("\n{} Books\n--------------\n".format(rating_name))
        count += dump_at_rating(f, c, books, rating, formatter=format_book)
        f.write("\n{} Papers\n--------------\n".format(rating_name))
        count += dump_at_rating(f, c, papers, rating, formatter=format_paper)
        f.write("\n{} Repositories\n--------------\n".format(rating_name))
        count += dump_at_rating(f, c, repos, rating, formatter=format_repo)
    return count


def update_main_readme(base_dir, totals, topics):
    with open(os.path.join(base_dir, "README.md"), "r") as f:
        readme = f.read()

    pattern = r"\<\!-- TOTALS --\>.*?\<\!-- \/TOTALS --\>"
    repl = "<!-- TOTALS -->\n\n{}\n\n<!-- /TOTALS -->".format(totals)
    readme = re.sub(pattern, repl, readme, count=1, flags=re.DOTALL)

    pattern = r"\<\!-- TOPICS --\>.*?\<\!-- \/TOPICS --\>"
    repl = "<!-- TOPICS -->\n\n{}\n\n<!-- /TOPICS -->".format(topics)
    readme = re.sub(pattern, repl, readme, count=1, flags=re.DOTALL)

    with open(os.path.join(base_dir, "README.md"), "w") as f:
        f.write(readme)