diff options
author | ng0 <ng0@n0.is> | 2019-11-12 21:36:28 +0000 |
---|---|---|
committer | ng0 <ng0@n0.is> | 2019-11-12 21:36:28 +0000 |
commit | 4500a4e0bde4939eb3f49ea54efd4feb18d6ec4c (patch) | |
tree | 798f97d58daf7072f1a4ff3a1f217b2405354594 /inc | |
parent | ab19fc81090ebbbecf471b010bc41f5a43ec858f (diff) | |
download | www-4500a4e0bde4939eb3f49ea54efd4feb18d6ec4c.tar.gz www-4500a4e0bde4939eb3f49ea54efd4feb18d6ec4c.zip |
move inc to www_shared repo
Diffstat (limited to 'inc')
-rw-r--r-- | inc/fileproc.py | 98 | ||||
-rw-r--r-- | inc/i18nfix.py | 41 | ||||
-rw-r--r-- | inc/news.macro.j2 | 13 | ||||
-rw-r--r-- | inc/site.py | 153 | ||||
-rw-r--r-- | inc/sitemap.py | 21 | ||||
-rw-r--r-- | inc/sum.py | 33 | ||||
-rw-r--r-- | inc/textproc.py | 39 |
7 files changed, 0 insertions, 398 deletions
diff --git a/inc/fileproc.py b/inc/fileproc.py deleted file mode 100644 index bf6cc8df..00000000 --- a/inc/fileproc.py +++ /dev/null | |||
@@ -1,98 +0,0 @@ | |||
1 | from pathlib import Path | ||
2 | import os | ||
3 | import shutil | ||
4 | |||
5 | def copy_tree(source, destination): | ||
6 | destination.mkdir(parents=True, exist_ok=True) | ||
7 | for _ in os.listdir(source): | ||
8 | i = source / _ | ||
9 | o = destination / _ | ||
10 | if i.is_dir(): | ||
11 | copy_tree(i, o) | ||
12 | else: | ||
13 | shutil.copy2(str(i), str(o)) | ||
14 | |||
15 | |||
16 | def copy_files(kind, conf, locale, inlist, ptarget): | ||
17 | o = Path(ptarget) | ||
18 | for item in conf[inlist]: | ||
19 | i = Path(kind + "/" + item["file"]) | ||
20 | # print(i) | ||
21 | for t in item["targets"]: | ||
22 | d_loc = o / locale / t | ||
23 | d = o / t | ||
24 | # print(d) | ||
25 | if i.is_file() is not False: | ||
26 | d_loc.write_text(i.read_text()) | ||
27 | print("copied " + str(i) + " to " + str(d_loc) + "...") | ||
28 | d.write_text(i.read_text()) | ||
29 | print("copied " + str(i) + " to " + str(d) + "...") | ||
30 | |||
31 | |||
32 | def rm_rf(directory): | ||
33 | directory = Path(directory) | ||
34 | for child in directory.glob('*'): | ||
35 | if child.is_file(): | ||
36 | child.unlink() | ||
37 | else: | ||
38 | rm_rf(child) | ||
39 | # directory.rmdir() | ||
40 | |||
41 | |||
42 | def fileop(infile, outfile, action): | ||
43 | """ | ||
44 | infile: inputfile, Path object | ||
45 | outfile: outputfile, Path object | ||
46 | action: action if any, String | ||
47 | """ | ||
48 | i = Path(infile) | ||
49 | o = Path(outfile) | ||
50 | outdir = Path("rendered") | ||
51 | if i.is_file() is not False: | ||
52 | if action == "copy": | ||
53 | # Write content of i to o. | ||
54 | o.write_text(i.read_text()) | ||
55 | if action == "link": | ||
56 | o.symlink_to(i) | ||
57 | |||
58 | |||
59 | def write_name(filename, infile, locale, replacer): | ||
60 | return "./rendered/" + locale + "/" + infile.replace(replacer, | ||
61 | '').rstrip(".j2") | ||
62 | |||
63 | |||
64 | def localized(filename, locale, *args): | ||
65 | if len(args) == 0: | ||
66 | return "../" + locale + "/" + filename | ||
67 | ext = kwargs.get('ext', None) | ||
68 | if ext is not None: | ||
69 | lf = filename + "." + locale + "." + ext | ||
70 | lp = Path(lf) | ||
71 | if locale == "en" or not lp.is_file(): | ||
72 | return "../" + filename + "." + ext | ||
73 | else: | ||
74 | return "../" + lf | ||
75 | |||
76 | |||
77 | # This generates and switches sites generations, preventing | ||
78 | # in-place modification of the website. | ||
79 | # * save old generation directory name | ||
80 | # * jinja2 creates content in "rendered" (happened before calling this function) | ||
81 | # * calculate sum of "rendered" | ||
82 | # * move "rendered" to out/$sum | ||
83 | # * remove symlink "html_dir" | ||
84 | # * symlink out/$sum to "html_dir" | ||
85 | # * delete old generation directory | ||
86 | def generation_dir(htmldir): | ||
87 | oldgen = Path(htmldir).resolve() | ||
88 | # precondition: jinja2 has created the files in "rendered". | ||
89 | newgen = Path("rendered") | ||
90 | newgen_sum = walksum(newgen) | ||
91 | outdir = Path("out") | ||
92 | outdir.mkdir(parents=True, exist_ok=True) | ||
93 | newgen_target = Path("out") / newgen_sum | ||
94 | newgen.rename(newgen_target) | ||
95 | html = Path(htmldir) | ||
96 | html.unlink() | ||
97 | fileop(newgen, html, "link") | ||
98 | rm_rf(oldgen) | ||
diff --git a/inc/i18nfix.py b/inc/i18nfix.py deleted file mode 100644 index 69fe177d..00000000 --- a/inc/i18nfix.py +++ /dev/null | |||
@@ -1,41 +0,0 @@ | |||
1 | #!/usr/bin/env python3 | ||
2 | # | ||
3 | # Copyright (C) 2017, 2018 GNUnet e.V. | ||
4 | # | ||
5 | # Copying and distribution of this file, with or without modification, | ||
6 | # are permitted in any medium without royalty provided the copyright | ||
7 | # notice and this notice are preserved. This file is offered as-is, | ||
8 | # without any warranty. | ||
9 | """ | ||
10 | Extract translations from a Jinja2 template, stripping leading newlines. | ||
11 | |||
12 | @author Florian Dold | ||
13 | """ | ||
14 | |||
15 | import re | ||
16 | import jinja2.ext | ||
17 | |||
18 | |||
19 | def normalize(message): | ||
20 | message = message.strip() | ||
21 | # collapse whitespaces (including newlines) into one space. | ||
22 | message = re.sub("\s+", " ", message) | ||
23 | return message | ||
24 | |||
25 | |||
26 | def babel_extract(fileobj, keywords, comment_tags, options): | ||
27 | res = jinja2.ext.babel_extract(fileobj, keywords, comment_tags, options) | ||
28 | for lineno, funcname, message, comments in res: | ||
29 | message = normalize(message) | ||
30 | yield lineno, funcname, message, comments | ||
31 | |||
32 | |||
33 | def wrap_gettext(f): | ||
34 | """ | ||
35 | Call gettext with whitespace normalized. | ||
36 | """ | ||
37 | def wrapper(message): | ||
38 | message = normalize(message) | ||
39 | return f(message) | ||
40 | |||
41 | return wrapper | ||
diff --git a/inc/news.macro.j2 b/inc/news.macro.j2 deleted file mode 100644 index 0ed9972b..00000000 --- a/inc/news.macro.j2 +++ /dev/null | |||
@@ -1,13 +0,0 @@ | |||
1 | {% macro newspreview(name) -%} | ||
2 | <section class="item-preview"> | ||
3 | <header> | ||
4 | <h3>{{ name['title']|e }}</h3> | ||
5 | <p class="item-date"> | ||
6 | {{ name['date'] }} | ||
7 | </p> | ||
8 | </header> | ||
9 | <p class="item-abstract"> | ||
10 | {{ name['abstract'] }} [<a href="{% if kwargs|length > 0 %}{{ kwargs['prefix'] }}{% endif %}{{ name['page'] }}" title="{{ name['date']}}">{{ _("read more") }}</a>] | ||
11 | </p> | ||
12 | </section> | ||
13 | {% endmacro -%} | ||
diff --git a/inc/site.py b/inc/site.py deleted file mode 100644 index 9fd9ac87..00000000 --- a/inc/site.py +++ /dev/null | |||
@@ -1,153 +0,0 @@ | |||
1 | import os | ||
2 | import os.path | ||
3 | import sys | ||
4 | import re | ||
5 | import gettext | ||
6 | import glob | ||
7 | import codecs | ||
8 | import jinja2 | ||
9 | from pathlib import Path, PurePosixPath, PurePath | ||
10 | from ruamel.yaml import YAML | ||
11 | import inc.i18nfix as i18nfix | ||
12 | from inc.textproc import cut_news_text | ||
13 | from inc.fileproc import copy_files, copy_tree | ||
14 | |||
15 | |||
16 | class gen_site: | ||
17 | def __init__(self, debug): | ||
18 | self.debug = debug | ||
19 | |||
20 | def load_config(self, name="www.yml"): | ||
21 | yaml = YAML(typ='safe') | ||
22 | site_configfile = Path(name) | ||
23 | return yaml.load(site_configfile) | ||
24 | |||
25 | def copy_trees(self, directory): | ||
26 | """ Take a directory name (string) and pass it to copy_tree() as Path object. """ | ||
27 | i = Path(directory) | ||
28 | o = Path("rendered/" + directory) | ||
29 | copy_tree(i, o) | ||
30 | |||
31 | def gen_abstract(self, conf, name, member, pages, length): | ||
32 | if self.debug: | ||
33 | print("generating abstracts...") | ||
34 | for item in conf[name]: | ||
35 | item[member] = cut_news_text(item[pages], length) | ||
36 | if self.debug: | ||
37 | print("cwd: " + str(Path.cwd())) | ||
38 | if self.debug > 1: | ||
39 | print(conf["newsposts"]) | ||
40 | if self.debug: | ||
41 | print("[done] generating abstracts") | ||
42 | |||
43 | def run(self, root, conf, env): | ||
44 | # root = "../" + root | ||
45 | if self.debug: | ||
46 | _ = Path(".") | ||
47 | q = list(_.glob("**/*.j2")) | ||
48 | print(q) | ||
49 | # for in_file in glob.glob(root + "/*.j2"): | ||
50 | for in_file in Path(".").glob(root + "/*.j2"): | ||
51 | in_file = str(in_file) | ||
52 | if self.debug: | ||
53 | print(in_file) | ||
54 | name, ext = re.match(r"(.*)\.([^.]+)$", | ||
55 | in_file.rstrip(".j2")).groups() | ||
56 | tmpl = env.get_template(in_file) | ||
57 | |||
58 | def self_localized(other_locale): | ||
59 | """ | ||
60 | Return URL for the current page in another locale. | ||
61 | """ | ||
62 | return "../" + other_locale + "/" + in_file.replace( | ||
63 | root + '/', '').rstrip(".j2") | ||
64 | |||
65 | def url_localized(filename): | ||
66 | if root == "news": | ||
67 | return "../../" + locale + "/" + filename | ||
68 | else: | ||
69 | return "../" + locale + "/" + filename | ||
70 | |||
71 | def url_static(filename): | ||
72 | if root == "news": | ||
73 | return "../../static/" + filename | ||
74 | else: | ||
75 | return "../static/" + filename | ||
76 | |||
77 | def url_dist(filename): | ||
78 | if root == "news": | ||
79 | return "../../dist/" + filename | ||
80 | else: | ||
81 | return "../dist/" + filename | ||
82 | |||
83 | def svg_localized(filename): | ||
84 | lf = filename + "." + locale + ".svg" | ||
85 | if locale == "en" or not Path(lf).is_file(): | ||
86 | return "../" + filename + ".svg" | ||
87 | else: | ||
88 | return "../" + lf | ||
89 | |||
90 | def url(x): | ||
91 | # TODO: look at the app root environment variable | ||
92 | # TODO: check if file exists | ||
93 | #if root == "news": | ||
94 | # return "../" + "../" + x | ||
95 | #else: | ||
96 | # return "../" + x | ||
97 | return "../" + x | ||
98 | |||
99 | # for l in glob.glob("locale/*/"): | ||
100 | # https://bugs.python.org/issue22276 | ||
101 | for l in list(x for x in Path(".").glob("locale/*/") if x.is_dir()): | ||
102 | l = str(PurePath(l).name) | ||
103 | if self.debug: | ||
104 | print(l) | ||
105 | # locale = os.path.basename(l[:-1]) | ||
106 | locale = l | ||
107 | |||
108 | tr = gettext.translation("messages", | ||
109 | localedir="locale", | ||
110 | languages=[locale]) | ||
111 | |||
112 | tr.gettext = i18nfix.wrap_gettext(tr.gettext) | ||
113 | |||
114 | env.install_gettext_translations(tr, newstyle=True) | ||
115 | |||
116 | content = tmpl.render(lang=locale, | ||
117 | lang_full=conf["langs_full"][locale], | ||
118 | url=url, | ||
119 | meetingnotesdata=conf["meetingnotes"], | ||
120 | newsdata=conf["newsposts"], | ||
121 | videosdata=conf["videoslist"], | ||
122 | self_localized=self_localized, | ||
123 | url_localized=url_localized, | ||
124 | url_static=url_static, | ||
125 | url_dist=url_dist, | ||
126 | svg_localized=svg_localized, | ||
127 | filename=name + "." + ext) | ||
128 | |||
129 | if root == "news": | ||
130 | out_name = "./rendered/" + locale + "/" + root + "/" + in_file.replace( | ||
131 | root + '/', '').rstrip(".j2") | ||
132 | else: | ||
133 | out_name = "./rendered/" + locale + "/" + in_file.replace( | ||
134 | root + '/', '').rstrip(".j2") | ||
135 | |||
136 | outdir = Path("rendered") | ||
137 | |||
138 | if root == "news": | ||
139 | langdir = outdir / locale / root | ||
140 | else: | ||
141 | langdir = outdir / locale | ||
142 | |||
143 | try: | ||
144 | langdir.mkdir(parents=True, exist_ok=True) | ||
145 | except e as FileNotFoundError: | ||
146 | print(e) | ||
147 | |||
148 | with codecs.open(out_name, "w", encoding='utf-8') as f: | ||
149 | try: | ||
150 | print(Path.cwd()) | ||
151 | f.write(content) | ||
152 | except e as Error: | ||
153 | print(e) | ||
diff --git a/inc/sitemap.py b/inc/sitemap.py deleted file mode 100644 index 5ccf7447..00000000 --- a/inc/sitemap.py +++ /dev/null | |||
@@ -1,21 +0,0 @@ | |||
1 | import os | ||
2 | from pathlib import Path, PurePosixPath | ||
3 | |||
4 | |||
5 | def sitemap_tree(path): | ||
6 | tree = dict(name=PurePosixPath(path).name, children=[]) | ||
7 | try: | ||
8 | mylist = os.listdir(path) | ||
9 | except OSError: | ||
10 | pass | ||
11 | else: | ||
12 | for name in mylist: | ||
13 | fn = os.path.join(path, name) | ||
14 | if os.path.isdir(fn): | ||
15 | tree['children'].append(sitemap_tree(fn)) | ||
16 | else: | ||
17 | np = os.path.join(name) | ||
18 | if np.startswith('/'): | ||
19 | np = np[1:] | ||
20 | tree['children'].append(dict(name=np)) | ||
21 | return tree | ||
diff --git a/inc/sum.py b/inc/sum.py deleted file mode 100644 index fff7a814..00000000 --- a/inc/sum.py +++ /dev/null | |||
@@ -1,33 +0,0 @@ | |||
1 | def sha256sum(_): | ||
2 | sha256 = hashlib.sha256() | ||
3 | with io.open(_, mode="rb") as fd: | ||
4 | content = fd.read() | ||
5 | sha256.update(content) | ||
6 | return sha256.hexdigest() | ||
7 | |||
8 | |||
9 | def walksum(_): | ||
10 | sha256 = hashlib.sha256() | ||
11 | x = Path(_) | ||
12 | if not x.exists(): | ||
13 | return -1 | ||
14 | try: | ||
15 | for root, directories, files in os.walk(_): | ||
16 | for names in sorted(files): | ||
17 | filepath = os.path.join(root, names) | ||
18 | try: | ||
19 | fl = open(filepath, 'rb') | ||
20 | except: | ||
21 | fl.close() | ||
22 | continue | ||
23 | while 1: | ||
24 | buf = fl.read(4096) | ||
25 | if not buf: | ||
26 | break | ||
27 | sha256.update(hashlib.sha256(buf).hexdigest()) | ||
28 | fl.close() | ||
29 | except: | ||
30 | import traceback | ||
31 | traceback.print_exc() | ||
32 | return -2 | ||
33 | return sha256.hexdigest() | ||
diff --git a/inc/textproc.py b/inc/textproc.py deleted file mode 100644 index f3b97d3d..00000000 --- a/inc/textproc.py +++ /dev/null | |||
@@ -1,39 +0,0 @@ | |||
1 | import html.parser | ||
2 | from bs4 import BeautifulSoup | ||
3 | |||
4 | |||
5 | class extractText(html.parser.HTMLParser): | ||
6 | def __init__(self): | ||
7 | super(extractText, self).__init__() | ||
8 | self.result = [] | ||
9 | |||
10 | def handle_data(self, data): | ||
11 | self.result.append(data) | ||
12 | |||
13 | def text_in(self): | ||
14 | return ''.join(self.result) | ||
15 | |||
16 | |||
17 | def html2text(html): | ||
18 | k = extractText() | ||
19 | k.feed(html) | ||
20 | return k.text_in() | ||
21 | |||
22 | |||
23 | def cut_text(filename, count): | ||
24 | with open(filename) as html: | ||
25 | soup = BeautifulSoup(html, features="lxml") | ||
26 | for script in soup(["script", "style"]): | ||
27 | script.extract() | ||
28 | k = [] | ||
29 | for i in soup.findAll('p')[1]: | ||
30 | k.append(i) | ||
31 | b = ''.join(str(e) for e in k) | ||
32 | text = html2text(b.replace("\n", "")) | ||
33 | textreduced = (text[:count] + '...') if len(text) > count else (text + | ||
34 | '..') | ||
35 | return (textreduced) | ||
36 | |||
37 | |||
38 | def cut_news_text(filename, count): | ||
39 | return cut_text("news/" + filename + ".j2", count) | ||