620 lines
21 KiB
Python
620 lines
21 KiB
Python
#!/usr/bin/env python3
|
|
# -*- encoding: utf-8 -*-
|
|
|
|
import sys
|
|
import json
|
|
import math
|
|
import shutil
|
|
import jinja2
|
|
import zipfile
|
|
import htmlmin
|
|
import warnings
|
|
import configparser
|
|
from markupsafe import Markup
|
|
from PIL import Image
|
|
from pathlib import Path
|
|
from hashlib import sha256 as hashalgo
|
|
|
|
Image.MAX_IMAGE_PIXELS = None
|
|
|
|
jinjaEnvironment = jinja2.Environment(
|
|
loader=jinja2.FileSystemLoader(
|
|
str(Path('tpl').absolute())
|
|
)
|
|
)
|
|
|
|
|
|
def treat_split(l): return list(filter(len, map(str.strip, l)))
|
|
|
|
|
|
def jsonp_dumps(obj, padding='callback'):
|
|
return Markup(f'{padding}({json.dumps(obj, sort_keys=True)});')
|
|
|
|
|
|
def jsonv_dumps(obj, var='variable'):
|
|
return Markup(f'var {var} = {json.dumps(obj, sort_keys=True)};')
|
|
|
|
|
|
def json_dumps(obj):
|
|
return Markup(f'{json.dumps(obj, sort_keys=True)}')
|
|
|
|
|
|
def digest_for(bytes):
|
|
m = hashalgo()
|
|
m.update(bytes)
|
|
return m.hexdigest()
|
|
|
|
|
|
class LeveledItem(object):
|
|
def __init__(self, item, *children):
|
|
self.item = item
|
|
self.children = LeveledList(children)
|
|
|
|
def __str__(self):
|
|
return repr(self)
|
|
|
|
def __repr__(self):
|
|
return f'{type(self).__name__}({repr(self.item)}, *{repr(self.children)})'
|
|
|
|
def to_jsonable(self, depth=1):
|
|
return {
|
|
'item': self.item,
|
|
'children': self.children.to_jsonable(depth),
|
|
}
|
|
|
|
def transform(self, dct: dict):
|
|
return type(self)(dct[self.item], *self.children.transform(dct))
|
|
|
|
def leveled_transform(self, *dcts):
|
|
head, *tail = dcts
|
|
return type(self)(head[self.item], *self.children.leveled_transform(*tail))
|
|
|
|
def linearify(self):
|
|
return LeveledList([self.item, *self.children.linearify()])
|
|
|
|
|
|
class LeveledList(list):
|
|
def __new__(cls, base=None):
|
|
return list.__new__(cls, base)
|
|
|
|
def to_jsonable(self, depth=2):
|
|
return [
|
|
child if depth <= 1 else child.to_jsonable(depth-1)
|
|
for child in self
|
|
]
|
|
|
|
def transform(self, dct: dict):
|
|
return type(self)([
|
|
i.transform(dct) if isinstance(i, LeveledItem) else dct[i]
|
|
for i in self
|
|
])
|
|
|
|
def leveled_transform(self, *dcts):
|
|
head, *_ = dcts
|
|
return type(self)([
|
|
i.leveled_transform(*dcts) if isinstance(i,
|
|
LeveledItem) else head[i]
|
|
for i in self
|
|
])
|
|
|
|
def linearify(self):
|
|
return type(self)(self._linearify())
|
|
|
|
def _linearify(self):
|
|
for l in [
|
|
i.linearify() if isinstance(i, (LeveledList, LeveledItem)) else [i]
|
|
for i in self
|
|
]:
|
|
yield from l
|
|
yield from []
|
|
|
|
|
|
def parse_leveled_list(file: Path) -> LeveledList:
|
|
k2l = dict()
|
|
lst = LeveledList()
|
|
lines = treat_split(file.read_text().splitlines())
|
|
for lnumb, line in enumerate(lines):
|
|
itm = line[1:]
|
|
if line[0] == '-':
|
|
lst.append(LeveledItem(itm))
|
|
elif line[0] == '=':
|
|
lst[-1].children.append(itm)
|
|
else:
|
|
raise RuntimeError('cannot determine nestedfulness level')
|
|
k2l[itm] = lnumb
|
|
return lst, k2l
|
|
|
|
|
|
class Objectify(object):
|
|
def __init__(self, obj=None, **kwargs):
|
|
if obj is None:
|
|
obj = kwargs
|
|
else:
|
|
obj = {**obj, **kwargs}
|
|
self.__dict__ = obj
|
|
|
|
def __str__(self):
|
|
return str(self.__dict__)
|
|
|
|
def __repr__(self):
|
|
return f'{type(self).__name__}({repr(self.__dict__)})'
|
|
|
|
def __getitem__(self, idx):
|
|
return self.__dict__[idx]
|
|
|
|
def __setitem__(self, idx, val):
|
|
self.__dict__[idx] = val
|
|
|
|
|
|
class ImageScalingDefinition(object):
|
|
def __init__(self, x, y):
|
|
self.x = x
|
|
self.y = y
|
|
|
|
def resize(self, img: Image):
|
|
raise NotImplementedError
|
|
|
|
|
|
class ImageThumbnailerDefinition(ImageScalingDefinition):
|
|
def get_resize_res(self, img: Image):
|
|
x = self.x
|
|
y = self.y
|
|
if x is not None or y is not None:
|
|
dx, dy = img.size
|
|
if x is None or y is None:
|
|
if x is None:
|
|
if dy >= y:
|
|
x = dx
|
|
else:
|
|
x = math.ceil(dx*(y/dy))
|
|
elif y is None:
|
|
if dx >= x:
|
|
y = dy
|
|
else:
|
|
y = math.ceil(dy*(x/dx))
|
|
if not (x >= dx and y >= dy):
|
|
return (x, y)
|
|
|
|
def resize(self, img: Image):
|
|
tpl = self.get_resize_res(img)
|
|
if tpl is not None:
|
|
img.thumbnail(tpl)
|
|
|
|
|
|
def remove_transparency(im, bg_colour=(0xF5, 0xF5, 0xF5)):
|
|
if im.mode in ('RGBA', 'LA') or (im.mode == 'P' and 'transparency' in im.info):
|
|
alpha = im.convert('RGBA').split()[-1]
|
|
bg = Image.new("RGB", im.size, bg_colour)
|
|
bg.paste(im, mask=alpha)
|
|
im.close()
|
|
return bg
|
|
else:
|
|
return im
|
|
|
|
|
|
def get_images(images_directory, output_directory):
|
|
print('Setting image paths...', flush=True)
|
|
images_source = list(images_directory.rglob('*.*'))
|
|
images_sizes = dict(
|
|
thumb=ImageThumbnailerDefinition(300, 300),
|
|
small=ImageThumbnailerDefinition(900, 900),
|
|
dcent=ImageThumbnailerDefinition(1440, 1440),
|
|
large=ImageThumbnailerDefinition(2560, 2560),
|
|
full=ImageThumbnailerDefinition(None, None)
|
|
)
|
|
images_source = {
|
|
image.name: (
|
|
image,
|
|
digest_for(image.read_bytes())
|
|
)
|
|
for image in images_source
|
|
}
|
|
images_source = {
|
|
k: Objectify(
|
|
source=i,
|
|
files=Objectify(
|
|
thumb=output_directory.joinpath(f'thumbnail_{d}.jpg'),
|
|
small=output_directory.joinpath(f'small_{d}.jpg'),
|
|
dcent=output_directory.joinpath(f'decent_{d}.jpg'),
|
|
large=output_directory.joinpath(f'large_{d}.jpg'),
|
|
full=output_directory.joinpath(f'full_{d}{i.suffix}')
|
|
),
|
|
formats=Objectify(),
|
|
dimens=Objectify(),
|
|
sizes=Objectify()
|
|
)
|
|
for k, (i, d) in images_source.items()
|
|
}
|
|
print('Ensuring all images were converted...', flush=True)
|
|
for image_name, image_source in images_source.items():
|
|
print(f'Current image: {image_name}', flush=True)
|
|
for size_name, image_size in images_sizes.items():
|
|
image_destination = image_source.files[size_name]
|
|
if not image_destination.exists():
|
|
convert_image(
|
|
image_source,
|
|
image_size,
|
|
size_name,
|
|
image_destination,
|
|
image_name
|
|
)
|
|
image_source.dimens = Objectify({
|
|
size_name: Image.open(image_source.files[size_name]).size
|
|
for size_name in images_sizes
|
|
})
|
|
image_source.sizes = Objectify({
|
|
size_name: image_source.files[size_name].stat().st_size
|
|
for size_name in images_sizes
|
|
})
|
|
image_source.formats = Objectify({
|
|
size_name: image_source.files[size_name].suffix[1:].upper()
|
|
for size_name in images_sizes
|
|
})
|
|
return images_source
|
|
|
|
|
|
def convert_image(
|
|
image_source,
|
|
image_size,
|
|
size_name,
|
|
image_destination,
|
|
image_name
|
|
):
|
|
img = Image.open(image_source.source)
|
|
resz = image_size.get_resize_res(img)
|
|
# These prints are necessary for the CI runner to not kill the process
|
|
print(f'[{size_name}] ', end='')
|
|
if (
|
|
resz is None
|
|
and
|
|
image_source.source.suffix == image_destination.suffix
|
|
):
|
|
print('did not resize: ', end='')
|
|
shutil.copyfile(
|
|
image_source.source,
|
|
image_destination
|
|
)
|
|
resz = img.size
|
|
else:
|
|
print('got resized [', end='')
|
|
print('x'.join(list(map(str, img.size))), end='')
|
|
image_size.resize(img)
|
|
print('::', end='')
|
|
print('x'.join(list(map(str, img.size))), end='')
|
|
print(']', end='')
|
|
img = remove_transparency(img)
|
|
img.save(image_destination)
|
|
resz = img.size
|
|
print(': ', end='')
|
|
print(f'{image_name}', flush=True)
|
|
img.close()
|
|
del img
|
|
|
|
|
|
def parse_art_info(
|
|
images_description: configparser.ConfigParser,
|
|
tags_description: configparser.ConfigParser,
|
|
artists_description: configparser.ConfigParser,
|
|
social_description: configparser.ConfigParser
|
|
):
|
|
print('Parsing art info...', flush=True)
|
|
tags = dict()
|
|
social = dict()
|
|
images = dict()
|
|
artists = dict()
|
|
pending_tags = list(set(tags_description.sections())-{'DEFAULT'})
|
|
pending_social = list(set(social_description.sections())-{'DEFAULT'})
|
|
pending_images = list(set(images_description.sections())-{'DEFAULT'})
|
|
pending_artists = list(set(artists_description.sections())-{'DEFAULT'})
|
|
delayed_images = set()
|
|
|
|
def failsafe_set(this, image_def, attr, hard=True, default=None):
|
|
this[attr] = image_def[attr].replace("''", "'") if attr in image_def else (
|
|
this[attr] if hard else this.get(attr, default))
|
|
while len(pending_images) > 0:
|
|
image_name, *pending_images = pending_images
|
|
image_def = images_description[image_name]
|
|
if(
|
|
image_def.get('ignore', 'false') == 'false'
|
|
and
|
|
image_name not in images
|
|
):
|
|
this = dict()
|
|
parent = None
|
|
if 'partof' in image_def:
|
|
parent = image_def['partof']
|
|
elif 'sameas' in image_def:
|
|
parent = image_def['sameas']
|
|
if parent is not None and parent not in images:
|
|
if image_name in delayed_images:
|
|
raise ValueError(
|
|
f'Parent dependency of {repr(image_name)} ({repr(parent)}) cannot be ignored'
|
|
)
|
|
pending_images = [parent, image_name, *pending_images]
|
|
delayed_images.add(image_name)
|
|
continue
|
|
elif parent is not None:
|
|
this = images[parent].copy()
|
|
failsafe_set(this, image_def, 'name')
|
|
failsafe_set(this, image_def, 'date')
|
|
failsafe_set(this, image_def, 'artist')
|
|
failsafe_set(this, image_def, 'technology')
|
|
failsafe_set(this, image_def, 'tags', False, '')
|
|
failsafe_set(this, image_def, 'link', False, None)
|
|
if isinstance(this['date'], str):
|
|
this['date'] = list(
|
|
map(int, treat_split(this['date'].split('-'))))
|
|
if isinstance(this['tags'], str):
|
|
this['tags'] = treat_split(this['tags'].split(','))
|
|
pending_artists.append(this['artist'])
|
|
pending_tags.append(this['technology'])
|
|
pending_tags += this['tags']
|
|
images[image_name] = this
|
|
pending_tags = list(set(pending_tags))
|
|
pending_artists = list(set(pending_artists))
|
|
print('Parsing artist info...', flush=True)
|
|
for pending_artist in pending_artists:
|
|
this = dict()
|
|
if pending_artist in artists_description:
|
|
artist_def = artists_description[pending_artist]
|
|
sns = list()
|
|
this['name'] = pending_artist
|
|
failsafe_set(this, artist_def, 'name')
|
|
for sn in artist_def:
|
|
if sn != 'name':
|
|
this[sn] = artist_def[sn]
|
|
sns.append(sn)
|
|
pending_social.append(sn)
|
|
this['@'] = sorted(sns)
|
|
else:
|
|
warnings.warn(
|
|
f'Artist {repr(pending_artist)} not found in INI file')
|
|
this['name'] = pending_artist
|
|
artists[pending_artist] = this
|
|
pending_social = list(set(pending_social))
|
|
print('Parsing tag info...', flush=True)
|
|
for pending_tag in pending_tags:
|
|
this = dict()
|
|
if pending_tag in tags_description:
|
|
tag_def = tags_description[pending_tag]
|
|
if 'name' in tag_def:
|
|
failsafe_set(this, tag_def, 'name')
|
|
if 'icon' in tag_def:
|
|
failsafe_set(this, tag_def, 'icon')
|
|
else:
|
|
warnings.warn(
|
|
f'Tag {repr(pending_tag)} not found in INI file')
|
|
if 'name' not in this:
|
|
this['name'] = pending_tag
|
|
if 'icon' not in this:
|
|
this['icon'] = tags_description['DEFAULT']['icon']
|
|
tags[pending_tag] = this
|
|
print('Parsing social network info...', flush=True)
|
|
for pending_s in pending_social:
|
|
this = dict()
|
|
if pending_s in social_description:
|
|
social_def = social_description[pending_s]
|
|
if 'name' in social_def:
|
|
failsafe_set(this, social_def, 'name')
|
|
if 'icon' in social_def:
|
|
failsafe_set(this, social_def, 'icon')
|
|
else:
|
|
warnings.warn(
|
|
f'Social Platform {repr(pending_s)} not found in INI file')
|
|
if 'name' not in this:
|
|
this['name'] = pending_s
|
|
if 'icon' not in this:
|
|
this['icon'] = social_description['DEFAULT']['icon']
|
|
social[pending_s] = this
|
|
return tags, social, images, artists
|
|
|
|
|
|
def run(configfile):
|
|
config = get_config_parser_from_file(configfile)
|
|
output_directory = Path(config['cfg']['output_directory'])
|
|
images_directory = Path(config['cfg']['images_directory'])
|
|
template_directory = get_config_parser_from_file(
|
|
Path(config['cfg']['template_directory']))
|
|
images_description = get_config_parser_from_file(
|
|
Path(config['cfg']['images_description']))
|
|
tags_description = get_config_parser_from_file(
|
|
Path(config['cfg']['tags_description']))
|
|
artists_description = get_config_parser_from_file(
|
|
Path(config['cfg']['artists_description']))
|
|
social_description = get_config_parser_from_file(
|
|
Path(config['cfg']['social_description']))
|
|
pages = treat_split(config['cfg']['pages'].split(','))
|
|
tags, social, images_, artists = parse_art_info(
|
|
images_description,
|
|
tags_description,
|
|
artists_description,
|
|
social_description
|
|
)
|
|
all_images = get_images(images_directory, output_directory)
|
|
image_stereotypes = 'thumb,small,dcent,large,full'.split(',')
|
|
imgsrc2imgurl = Objectify({
|
|
s: {
|
|
k: v.files[s].name
|
|
for k, v in all_images.items()
|
|
}
|
|
for s in image_stereotypes
|
|
})
|
|
# print(all_images)
|
|
js_tags = jsonv_dumps(tags, 'tags')
|
|
js_social = jsonv_dumps(social, 'social')
|
|
js_artists = jsonv_dumps(artists, 'artists')
|
|
print('Processing pages...', flush=True)
|
|
for page in pages:
|
|
name = config[page]['name'].strip()
|
|
language = config[page]['language'].strip()
|
|
print(f'Processing page {repr(name)}...', flush=True)
|
|
template = jinjaEnvironment.get_template(
|
|
f"{config[page].get('template', page)}.html"
|
|
)
|
|
imgorg, key2line = parse_leveled_list(
|
|
Path(config[page]['sorted'].strip()))
|
|
# print(jsonp_dumps(imgorg.transform(key2line).linearify()))
|
|
|
|
images__ = {l: images_[k] for k, l in key2line.items()}
|
|
images = list(map(
|
|
lambda a: a[1],
|
|
sorted(list(images__.items()))
|
|
))
|
|
js_images = jsonv_dumps(images, 'images')
|
|
img_formats = [
|
|
all_images[i].formats.__dict__ for i in imgorg.linearify()]
|
|
img_dimens = [
|
|
all_images[i].dimens.__dict__ for i in imgorg.linearify()]
|
|
img_sizes = [
|
|
all_images[i].sizes.__dict__ for i in imgorg.linearify()]
|
|
|
|
org_hierarchical = imgorg.transform(key2line).to_jsonable()
|
|
org_linear = imgorg.transform(imgsrc2imgurl.small).linearify()
|
|
imgs_hierarchical = imgorg.leveled_transform(
|
|
imgsrc2imgurl.small, imgsrc2imgurl.thumb).linearify()
|
|
imgs_thumb = imgorg.transform(imgsrc2imgurl.thumb).linearify()
|
|
imgs_small = imgorg.transform(imgsrc2imgurl.small).linearify()
|
|
imgs_dcent = imgorg.transform(imgsrc2imgurl.dcent).linearify()
|
|
imgs_large = imgorg.transform(imgsrc2imgurl.large).linearify()
|
|
imgs_full = imgorg.transform(imgsrc2imgurl.full).linearify()
|
|
js_org_hierarchical = jsonv_dumps(org_hierarchical, 'org_hierarchical')
|
|
js_org_linear = jsonv_dumps(org_linear, 'org_linear')
|
|
js_imgs_hierarchical = jsonv_dumps(
|
|
imgs_hierarchical, 'imgs_hierarchical')
|
|
js_imgs_thumb = jsonv_dumps(imgs_thumb, 'imgs_thumb')
|
|
js_imgs_small = jsonv_dumps(imgs_small, 'imgs_small')
|
|
js_imgs_dcent = jsonv_dumps(imgs_dcent, 'imgs_dcent')
|
|
js_imgs_large = jsonv_dumps(imgs_large, 'imgs_large')
|
|
js_imgs_full = jsonv_dumps(imgs_full, 'imgs_full')
|
|
js_formats = jsonv_dumps(img_formats, 'formats')
|
|
js_dimens = jsonv_dumps(img_dimens, 'dimens')
|
|
js_sizes = jsonv_dumps(img_sizes, 'sizes')
|
|
js_name = jsonv_dumps(name, 'name')
|
|
print(f'Generating ZIP thumbs for {repr(name)}...', flush=True)
|
|
zip_thumb = zipFiles(
|
|
output_directory.joinpath(page+'_thumb.zip'),
|
|
*[output_directory.joinpath(img) for img in imgs_thumb]
|
|
)
|
|
print(f'Generating ZIP smalls for {repr(name)}...', flush=True)
|
|
zip_small = zipFiles(
|
|
output_directory.joinpath(page+'_small.zip'),
|
|
*[output_directory.joinpath(img) for img in imgs_small]
|
|
)
|
|
print(f'Generating ZIP dcents for {repr(name)}...', flush=True)
|
|
zip_dcent = zipFiles(
|
|
output_directory.joinpath(page+'_dcent.zip'),
|
|
*[output_directory.joinpath(img) for img in imgs_dcent]
|
|
)
|
|
print(f'Generating ZIP larges for {repr(name)}...', flush=True)
|
|
zip_large = zipFiles(
|
|
output_directory.joinpath(page+'_large.zip'),
|
|
*[output_directory.joinpath(img) for img in imgs_large]
|
|
)
|
|
print(f'Generating ZIP fulls for {repr(name)}...', flush=True)
|
|
zip_full = zipFiles(
|
|
output_directory.joinpath(page+'_full.zip'),
|
|
*[output_directory.joinpath(img) for img in imgs_full]
|
|
)
|
|
zips = [zip_thumb, zip_small, zip_dcent, zip_large, zip_full]
|
|
zips = [dict(name=zp.name, size=zp.stat().st_size) for zp in zips]
|
|
for zp in zips:
|
|
zp['fmtd'] = sizeFmt(zp['size'])
|
|
js_zips = jsonv_dumps(zips, 'zips')
|
|
print(f'Rendering page {repr(name)}...', flush=True)
|
|
rendered = template.render(
|
|
language=language,
|
|
name=name,
|
|
org_hierarchical=org_hierarchical,
|
|
org_linear=org_linear,
|
|
imgs_hierarchical=imgs_hierarchical,
|
|
imgs_thumb=imgs_thumb,
|
|
imgs_small=imgs_small,
|
|
imgs_dcent=imgs_dcent,
|
|
imgs_large=imgs_large,
|
|
imgs_full=imgs_full,
|
|
formats=img_formats,
|
|
dimens=img_dimens,
|
|
sizes=img_sizes,
|
|
artists=artists,
|
|
images=images,
|
|
social=social,
|
|
tags=tags,
|
|
zips=zips,
|
|
js_org_hierarchical=js_org_hierarchical,
|
|
js_org_linear=js_org_linear,
|
|
js_imgs_hierarchical=js_imgs_hierarchical,
|
|
js_imgs_thumb=js_imgs_thumb,
|
|
js_imgs_small=js_imgs_small,
|
|
js_imgs_dcent=js_imgs_dcent,
|
|
js_imgs_large=js_imgs_large,
|
|
js_imgs_full=js_imgs_full,
|
|
js_formats=js_formats,
|
|
js_dimens=js_dimens,
|
|
js_sizes=js_sizes,
|
|
js_artists=js_artists,
|
|
js_images=js_images,
|
|
js_social=js_social,
|
|
js_tags=js_tags,
|
|
js_name=js_name,
|
|
js_zips=js_zips
|
|
)
|
|
minified = rendered
|
|
# minified = htmlmin.minify(rendered, remove_empty_space=True)
|
|
output_directory.joinpath(f"{page}.html").write_text(minified)
|
|
print('All done!', flush=True)
|
|
|
|
|
|
def get_config_parser_from_file(file):
|
|
config = configparser.ConfigParser()
|
|
config.read(file)
|
|
return config
|
|
|
|
|
|
def printusage():
|
|
print("Usage:", file=sys.stderr)
|
|
print(
|
|
f" python{sys.version.major} -m {sys.argv[0]} <config.ini>",
|
|
file=sys.stderr
|
|
)
|
|
|
|
|
|
def zipFiles(out, *inputs):
|
|
if out.exists():
|
|
return out
|
|
zf = zipfile.ZipFile(
|
|
out,
|
|
mode='w',
|
|
compression=zipfile.ZIP_DEFLATED,
|
|
allowZip64=True,
|
|
compresslevel=9
|
|
)
|
|
for i, p in enumerate(inputs):
|
|
zf.writestr(
|
|
'%04d%s' % (i, p.suffix),
|
|
p.read_bytes(),
|
|
compress_type=zipfile.ZIP_DEFLATED,
|
|
compresslevel=9
|
|
)
|
|
zf.close()
|
|
return out
|
|
|
|
|
|
def sizeFmt(bytecount):
|
|
scale = ('B', 'KB', 'MB', 'GB', 'TB')
|
|
magnitude = 0
|
|
while bytecount > 2048:
|
|
magnitude += 1
|
|
bytecount /= 1024
|
|
return "%0.2f %s" % (bytecount, scale[magnitude])
|
|
|
|
|
|
def main():
|
|
if len(sys.argv) == 2:
|
|
configfile = Path(sys.argv[1])
|
|
if configfile.exists():
|
|
run(configfile)
|
|
else:
|
|
printusage()
|
|
else:
|
|
printusage()
|