Update page.py with new ipo, use date from filename
This commit is contained in:
parent
5b014b97da
commit
bc6c3aa16e
4 changed files with 53 additions and 85 deletions
|
@ -1,6 +1,5 @@
|
|||
title: SVG + makefile = slide deck
|
||||
summary: "You don't need LibreOffice to prepare your presentation -- or: an introduction to makefiles"
|
||||
published: 2020-06-11
|
||||
|
||||
---
|
||||
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
title: Get started using feeds
|
||||
summary: Don't subscribe to our email newsletter
|
||||
published: 2020-06-02
|
||||
|
||||
---
|
||||
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
title: Get started with IRC
|
||||
summary: Pick a client, join some channels, maybe register with NickServ
|
||||
published: 2020-06-02
|
||||
|
||||
---
|
||||
|
||||
|
|
135
page.py
135
page.py
|
@ -3,34 +3,10 @@
|
|||
import sys
|
||||
import re
|
||||
from markdown import markdown
|
||||
import strictyaml
|
||||
from ipo import read, write, map, dictmap, starstarmap, join, sort, ipo
|
||||
|
||||
|
||||
md_to_html = join("\n") | ipo(markdown)
|
||||
yaml_to_dict = join("\n") | ipo(strictyaml.load) | ipo(lambda x: x.data)
|
||||
|
||||
|
||||
@ipo
|
||||
def parted(data, regex):
|
||||
"""
|
||||
Two parts of data, delimited by the first element on which regex fullmatches.
|
||||
|
||||
If a delimiter is found, the second part is an iterator over the remaining elements. Else it is None.
|
||||
"""
|
||||
first = []
|
||||
|
||||
it = iter(data)
|
||||
try:
|
||||
while True:
|
||||
item = next(it)
|
||||
if re.fullmatch(regex, item):
|
||||
break
|
||||
first.append(item)
|
||||
except StopIteration:
|
||||
return (first, None)
|
||||
|
||||
return (first, it)
|
||||
from strictyaml import load as load_yaml
|
||||
from pymaybe import maybe as maybe
|
||||
from functools import partial as p
|
||||
from ipo import read, write, dictmap, starstarmap, ipo, all_before, all_after
|
||||
|
||||
|
||||
LIST_ITEM_TEMPLATE = """
|
||||
|
@ -45,48 +21,32 @@ def blog_page_template():
|
|||
return fh.read()
|
||||
|
||||
|
||||
# FIXME what a mess
|
||||
def blog_page(file, read_body=True):
|
||||
"""
|
||||
Keep the file open if you want to be able to read the body.
|
||||
Good:
|
||||
>>> with open(…) as file:
|
||||
>>> metadata, body = blog_page(file)
|
||||
>>> [smthng(line) for line in body]
|
||||
>>> print("\n".join(hello))
|
||||
def date_from_filename(filename):
|
||||
return maybe(
|
||||
re.search(r"(?:^|/)([0-9]{4}-[0-9]{2}-[0-9]{2})[^/]+$", filename)
|
||||
).group(1).or_none()
|
||||
|
||||
Bad:
|
||||
>>> with open(…) as file:
|
||||
>>> metadata, body = blog_page(file)
|
||||
>>> hello = (smthng(line) for line in body) # Lazy generator, the body wasn't read from file yet
|
||||
>>> print("\n".join(hello))
|
||||
"""
|
||||
if isinstance(file, str):
|
||||
assert not read_body, "Can't read body when giving a filename, need a file for that."
|
||||
filename = file
|
||||
file = open(filename)
|
||||
else:
|
||||
filename = None
|
||||
|
||||
try:
|
||||
metadata_yaml, body_md = read(file) | parted("---+")
|
||||
def blog_page_metadata(filename):
|
||||
with open(filename) as file:
|
||||
metadata_from_file = (
|
||||
read(file=file) | p(all_before, p(re.fullmatch, "---+")) | "\n".join |
|
||||
load_yaml | (lambda x: x.data)
|
||||
).data
|
||||
|
||||
metadata = {
|
||||
**(metadata_yaml | yaml_to_dict),
|
||||
}
|
||||
if not read_body:
|
||||
metadata["path"] = re.sub(r".md$", "", filename)
|
||||
return {
|
||||
**metadata_from_file,
|
||||
"path": re.sub(r".md$", "", filename),
|
||||
"published": date_from_filename(filename)
|
||||
}
|
||||
|
||||
body = (
|
||||
body_md | md_to_html(extensions=["abbr", "toc", "smarty", "fenced_code", "codehilite"])
|
||||
if read_body else None
|
||||
)
|
||||
|
||||
return (metadata, body)
|
||||
|
||||
finally:
|
||||
if filename:
|
||||
file.close()
|
||||
def blog_page_body(filename):
|
||||
with open(filename) as file:
|
||||
return (
|
||||
read(file=file) | p(all_after, p(re.fullmatch, "---+")) | "\n".join |
|
||||
p(markdown, extensions=["abbr", "toc", "smarty", "fenced_code", "codehilite"])
|
||||
).data
|
||||
|
||||
|
||||
def safe_metadata(metadata):
|
||||
|
@ -100,21 +60,32 @@ def safe_metadata(metadata):
|
|||
}
|
||||
|
||||
|
||||
if sys.argv[1] == "--index":
|
||||
blog_post_list = (
|
||||
sys.argv[4:] |
|
||||
map(lambda filename: {
|
||||
**blog_page(filename, read_body=False)[0],
|
||||
}) |
|
||||
sort(key=lambda x: x["published"]) |
|
||||
map(safe_metadata) |
|
||||
map(lambda metadata: LIST_ITEM_TEMPLATE.format(**metadata)) |
|
||||
join("")
|
||||
)
|
||||
def main():
|
||||
if sys.argv[1] == "--index":
|
||||
blog_post_list = (
|
||||
ipo(sys.argv[4:]) |
|
||||
p(map, blog_page_metadata) |
|
||||
p(sorted, key=lambda x: x["published"]) |
|
||||
p(map, safe_metadata) |
|
||||
p(map, lambda metadata: LIST_ITEM_TEMPLATE.format(**metadata)) |
|
||||
"".join
|
||||
)
|
||||
|
||||
with open(sys.argv[2]) as file_in, open(sys.argv[3], "w") as file_out:
|
||||
file_in.read().format(blog_posts=blog_post_list) | write(file=file_out)
|
||||
else:
|
||||
with open(sys.argv[1]) as file_in, open(sys.argv[2], "w") as file_out:
|
||||
metadata, body = blog_page(file_in)
|
||||
blog_page_template().format(**safe_metadata(metadata), body=body) | write(file=file_out)
|
||||
with open(sys.argv[2]) as file_in, open(sys.argv[3], "w") as file_out:
|
||||
print(
|
||||
file_in.read().format(blog_posts=blog_post_list),
|
||||
end="", file=file_out
|
||||
)
|
||||
|
||||
else:
|
||||
metadata = safe_metadata(blog_page_metadata(sys.argv[1]))
|
||||
body = blog_page_body(sys.argv[1])
|
||||
|
||||
with open(sys.argv[2], "w") as file_out:
|
||||
print(
|
||||
blog_page_template().format(**metadata, body=body),
|
||||
end="", file=file_out
|
||||
)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
|
Loading…
Reference in a new issue