site_fursona_refs/trivial_static_image_gallery/__init__.py

937 lines
35 KiB
Python

#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
import configparser
import json
import math
import shutil
import sys
import time
import warnings
import zipfile
from hashlib import sha256 as hashalgo
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple
import jinja2
from markupsafe import Markup
from PIL import Image
import htmlmin
from flaskext.markdown import Markdown
class Objectify(object):
def __init__(self, obj=None, **kwargs):
if obj is None:
obj = kwargs
else:
obj = {**obj, **kwargs}
self.__dict__ = obj
def __str__(self):
return str(self.__dict__)
def __repr__(self):
return f'{type(self).__name__}({repr(self.__dict__)})'
def __getitem__(self, idx):
return self.__dict__[idx]
def __setitem__(self, idx, val):
self.__dict__[idx] = val
Image.MAX_IMAGE_PIXELS = None
jinjaEnvironment = jinja2.Environment(
loader=jinja2.FileSystemLoader(
str(Path('tpl').absolute())
)
)
md = Markdown(app=Objectify(dict(jinja_env=jinjaEnvironment)),
extensions=['fenced_code'],
safe_mode=True)
def treat_split(l): return list(filter(len, map(str.strip, l)))
def jsonp_dumps(obj, padding='callback'):
return Markup(f'{padding}({json.dumps(obj, sort_keys=True)});')
def jsonv_dumps(obj, var='variable'):
return Markup(f'var {var} = {json.dumps(obj, sort_keys=True)};')
def json_dumps(obj):
return Markup(f'{json.dumps(obj, sort_keys=True)}')
def digest_for(bts):
m = hashalgo()
m.update(bts)
return m.hexdigest()
class LeveledItem(object):
def __init__(self, item, *children):
self.item = item
self.children = LeveledList(children)
def __str__(self):
return repr(self)
def __repr__(self):
return f'{type(self).__name__}({repr(self.item)}, *{repr(self.children)})'
def to_jsonable(self, depth=1):
return {
'item': self.item,
'children': self.children.to_jsonable(depth),
}
def transform(self, dct: dict):
return type(self)(dct[self.item], *self.children.transform(dct))
def leveled_transform(self, *dcts):
head, *tail = dcts
return type(self)(head[self.item], *self.children.leveled_transform(*tail))
def linearify(self):
return LeveledList([self.item, *self.children.linearify()])
class LeveledList(list):
def __new__(cls, base=None):
return list.__new__(cls, base)
def to_jsonable(self, depth=2):
return [
child if depth <= 1 else child.to_jsonable(depth-1)
for child in self
]
def transform(self, dct: dict):
return type(self)([
i.transform(dct) if isinstance(i, LeveledItem) else dct[i]
for i in self
])
def leveled_transform(self, *dcts):
head, *_ = dcts
return type(self)([
i.leveled_transform(*dcts) if isinstance(i,
LeveledItem) else head[i]
for i in self
])
def linearify(self):
return type(self)(self._linearify())
def _linearify(self):
for l in [
i.linearify() if isinstance(i, (LeveledList, LeveledItem)) else [i]
for i in self
]:
yield from l
yield from []
def parse_leveled_list(file: Path) -> Tuple[LeveledList, Dict[str, int]]:
k2l: Dict[str, int] = dict()
lst = LeveledList()
lines = treat_split(file.read_text().splitlines())
for lnumb, line in enumerate(lines):
itm = line[1:]
if line[0] == '-':
lst.append(LeveledItem(itm))
elif line[0] == '=':
lst[-1].children.append(itm)
else:
raise RuntimeError('cannot determine nestedfulness level')
k2l[itm] = lnumb
return lst, k2l
class ImageScalingDefinition(object):
def __init__(self, x, y):
self.x = x
self.y = y
def resize(self, img: Image):
raise NotImplementedError
class ImageThumbnailerDefinition(ImageScalingDefinition):
def get_resize_res(self, img: Image):
x = self.x
y = self.y
if x is not None or y is not None:
dx, dy = img.size
if x is None or y is None:
if x is None:
if dy >= y:
x = dx
else:
x = math.ceil(dx*(y/dy))
elif y is None:
if dx >= x:
y = dy
else:
y = math.ceil(dy*(x/dx))
if not (x >= dx and y >= dy):
return (x, y)
def resize(self, img: Image):
tpl = self.get_resize_res(img)
if tpl is not None:
img.thumbnail(tpl)
def remove_transparency(im, bg_colour=(0xF5, 0xF5, 0xF5)):
if im.mode in ('RGBA', 'LA') or (im.mode == 'P' and 'transparency' in im.info):
alpha = im.convert('RGBA').split()[-1]
bg = Image.new("RGB", im.size, bg_colour)
bg.paste(im, mask=alpha)
im.close()
return bg
else:
return im
def get_images(images_directory, output_directory):
print('Setting image paths...', flush=True)
images_source = list(filter(lambda x: x.name != '.git',
images_directory.rglob('*.*')))
images_sizes = dict(
thumb=ImageThumbnailerDefinition(300, 300),
small=ImageThumbnailerDefinition(900, 900),
dcent=ImageThumbnailerDefinition(1440, 1440),
large=ImageThumbnailerDefinition(2560, 2560),
full=ImageThumbnailerDefinition(None, None)
)
images_source = {
image.name: (
image,
digest_for(image.read_bytes())
)
for image in images_source
}
images_source = {
k: Objectify(
source=i,
files=Objectify(
thumb=output_directory.joinpath(f'thumbnail_{d}.jpg'),
small=output_directory.joinpath(f'small_{d}.jpg'),
dcent=output_directory.joinpath(f'decent_{d}.jpg'),
large=output_directory.joinpath(f'large_{d}.jpg'),
full=output_directory.joinpath(f'full_{d}{i.suffix}')
),
formats=Objectify(),
dimens=Objectify(),
sizes=Objectify()
)
for k, (i, d) in images_source.items()
}
print('Ensuring all images were converted...', flush=True)
showProfiler = False
profiled_images = list()
for image_name, image_source in images_source.items():
already_printed = False
start = time.time()
for size_name, image_size in images_sizes.items():
image_destination = image_source.files[size_name]
if not image_destination.exists():
if not already_printed:
print(f'Current image: {image_name}', flush=True)
already_printed = True
convert_image(
image_source,
image_size,
size_name,
image_destination,
image_name
)
end = time.time()
if end-start > 1:
showProfiler = True
profiled_images.append(((end-start), image_name))
image_source.dimens = Objectify({
size_name: Image.open(image_source.files[size_name]).size
for size_name in images_sizes
})
image_source.sizes = Objectify({
size_name: image_source.files[size_name].stat().st_size
for size_name in images_sizes
})
image_source.formats = Objectify({
size_name: image_source.files[size_name].suffix[1:].upper()
for size_name in images_sizes
})
if showProfiler:
profiled_images.sort()
profiled_images.reverse()
print('Profiled conversion times:', flush=True)
for i, (d, p) in enumerate(profiled_images):
s = ' %03d: % 6.2fs - %s ' % (i+1, d, p)
if len(s) < 70:
s += ' '*(70-len(s))
s += 'x'.join(list(map(str, images_source[p].dimens.full)))
print(s, flush=True)
total = sum(list(map(lambda a: a[0], profiled_images)))
print(' Total: %.3fs' % total, flush=True)
return images_source
def convert_image(image_source,
image_size,
size_name,
image_destination,
image_name
):
img = Image.open(image_source.source)
resz = image_size.get_resize_res(img)
# These prints are necessary for the CI runner to not kill the process
print(f'[{size_name}] ', end='')
if (
resz is None
and
image_source.source.suffix == image_destination.suffix
):
print('did not resize: ', end='')
shutil.copyfile(
image_source.source,
image_destination
)
resz = img.size
else:
print('got resized [', end='')
print('x'.join(list(map(str, img.size))), end='')
image_size.resize(img)
print('::', end='')
print('x'.join(list(map(str, img.size))), end='')
print(']', end='')
img = remove_transparency(img)
img.save(image_destination)
resz = img.size
print(': ', end='')
print(f'{image_name}', flush=True)
img.close()
del img
def parse_art_info(images_description: configparser.ConfigParser,
tags_description: configparser.ConfigParser,
tag_groups_description: configparser.ConfigParser,
artists_description: configparser.ConfigParser,
social_description: configparser.ConfigParser,
bodyparts_description: configparser.ConfigParser
):
print('Parsing art info...', flush=True)
tags = dict()
social = dict()
images: Dict[str, Any] = dict()
artists = dict()
bodyparts = dict()
tag_groups = dict()
pending_tags = list(set(tags_description.sections())-{'DEFAULT'})
pending_social = list(set(social_description.sections())-{'DEFAULT'})
pending_images = list(set(images_description.sections())-{'DEFAULT'})
pending_artists = list(set(artists_description.sections())-{'DEFAULT'})
pending_bodyparts = list(set(bodyparts_description.sections())-{'DEFAULT'})
pending_tag_groups = list(
set(tag_groups_description.sections())-{'DEFAULT'})
delayed_images = set()
def interpret_tag_operations(tag_sequence):
tags = []
for tag in tag_sequence:
if tag.startswith('!'):
tag2 = tag[1:]
tags = [t for t in tags if t != tag2]
elif tag not in tags:
tags.append(tag)
return tags
def failsafe_set(this, image_def, attr, hard=True, default=None, mapper=lambda a: a):
this[attr] = mapper(image_def[attr].replace("''", "'") if attr in image_def else (
this[attr] if hard else this.get(attr, default)))
def getBodyPart(bp):
if bp not in bodyparts:
if bp in bodyparts_description:
bp_def = bodyparts_description[bp]
bodyparts[bp] = bp_def.get('name', bp).strip()
else:
bodyparts[bp] = bp.strip()
warnings.warn(
f'Body part {repr(bp)} not found in INI file')
return bp
for bp in pending_bodyparts:
getBodyPart(bp)
while len(pending_images) > 0:
image_name, *pending_images = pending_images
image_def = images_description[image_name]
if(
image_def.get('ignore', 'false') == 'false'
and
image_name not in images
):
this = dict()
parent = None
if 'partof' in image_def:
parent = image_def['partof']
elif 'sameas' in image_def:
parent = image_def['sameas']
oldtags = []
if parent is not None and parent not in images:
if image_name in delayed_images:
raise ValueError(
f'Parent dependency of {repr(image_name)} ({repr(parent)}) cannot be ignored'
)
pending_images = [parent, image_name, *pending_images]
delayed_images.add(image_name)
continue
elif parent is not None:
this = images[parent].copy()
oldtags = this['tags'].copy()
failsafe_set(this, image_def, 'name')
failsafe_set(this, image_def, 'date')
failsafe_set(this, image_def, 'artist')
failsafe_set(this, image_def, 'technology')
failsafe_set(this, image_def, 'tags', False, '')
failsafe_set(this, image_def, 'link', False, None)
failsafe_set(this, image_def, 'notes', False, '')
cd = this.get('color', dict()).copy()
for key in image_def:
if key.startswith('color.'):
value: Optional[str] = image_def[key].strip()
set_unknown = False
if value in ('?', '??', '???'):
set_unknown = True
if value in ('', '-', '?', '??', '???'):
value = None
key2 = key[6:].split('@')
getBodyPart(key2[0])
getBodyPart(key2[-1])
k = key2[-1]
cd[k] = None if value is None else dict(
targetOnImage=key2[-1],
targetAppropriate=key2[0],
palleteChoiceAccurate=key2[0] == key2[-1],
colorUsed=value.upper(),
colorIsLight=determineLightOrDark(value.upper())
)
if set_unknown:
cd.pop(k, None)
if isinstance(this['date'], str):
this['date'] = list(
map(int, treat_split(this['date'].split('-'))))
if isinstance(this['tags'], str):
this['tags'] = treat_split(this['tags'].split(','))
this['tags'] = interpret_tag_operations(oldtags+this['tags'])
pending_artists.append(this['artist'])
pending_tags.append(this['technology'])
pending_tags += this['tags']
this['color'] = cd
images[image_name] = this
pending_tags = list(set(pending_tags))
pending_artists = list(set(pending_artists))
print('Parsing artist info...', flush=True)
for pending_artist in pending_artists:
this = dict()
if pending_artist in artists_description:
artist_def = artists_description[pending_artist]
sns = list()
this['name'] = pending_artist
failsafe_set(this, artist_def, 'name')
for sn in artist_def:
if sn != 'name':
this[sn] = artist_def[sn]
sns.append(sn)
pending_social.append(sn)
this['@'] = sorted(sns)
else:
warnings.warn(
f'Artist {repr(pending_artist)} not found in INI file')
this['name'] = pending_artist
artists[pending_artist] = this
pending_social = list(set(pending_social))
print('Parsing tag info...', flush=True)
for pending_tag in pending_tags:
this = dict()
if pending_tag in tags_description:
tag_def = tags_description[pending_tag]
if 'name' in tag_def:
failsafe_set(this, tag_def, 'name')
if 'icon' in tag_def:
failsafe_set(this, tag_def, 'icon')
if 'taggroup' in tag_def:
failsafe_set(this, tag_def, 'taggroup')
if 'position' in tag_def:
failsafe_set(this, tag_def, 'position', mapper=int)
else:
warnings.warn(
f'Tag {repr(pending_tag)} not found in INI file')
if 'name' not in this:
this['name'] = pending_tag
if 'icon' not in this:
this['icon'] = tags_description['DEFAULT']['icon']
if 'taggroup' not in this:
this['taggroup'] = 'DEFAULT'
if 'taggroup' not in this:
this['position'] = int(tags_description['DEFAULT']['position'])
pending_tag_groups.append(this['taggroup'])
tags[pending_tag] = this
pending_tag_groups = list(set(pending_tag_groups))
print('Parsing tag group info...', flush=True)
for pending_tag_group in [*pending_tag_groups, 'DEFAULT']:
this = dict()
if pending_tag_group in tag_groups_description:
tag_def = tag_groups_description[pending_tag_group]
if 'name' in tag_def:
failsafe_set(this, tag_def, 'name')
if 'required' in tag_def:
failsafe_set(this, tag_def, 'required',
mapper=lambda a: a == 'True')
if 'single' in tag_def:
failsafe_set(this, tag_def, 'single',
mapper=lambda a: a == 'True')
if 'position' in tag_def:
failsafe_set(this, tag_def, 'position', mapper=int)
else:
warnings.warn(
f'Tag group {repr(pending_tag_group)} not found in INI file')
if 'name' not in this:
this['name'] = pending_tag_group
if 'required' not in this:
this['required'] = tag_groups_description['DEFAULT']['required'] == 'True'
if 'single' not in this:
this['single'] = tag_groups_description['DEFAULT']['single'] == 'True'
if 'position' not in this:
this['position'] = int(
tag_groups_description['DEFAULT']['position'])
tag_groups[pending_tag_group] = this
print('Parsing social network info...', flush=True)
for pending_s in pending_social:
this = dict()
if pending_s in social_description:
social_def = social_description[pending_s]
if 'name' in social_def:
failsafe_set(this, social_def, 'name')
if 'icon' in social_def:
failsafe_set(this, social_def, 'icon')
else:
warnings.warn(
f'Social Platform {repr(pending_s)} not found in INI file')
if 'name' not in this:
this['name'] = pending_s
if 'icon' not in this:
this['icon'] = social_description['DEFAULT']['icon']
social[pending_s] = this
return tags, tag_groups, social, images, artists, bodyparts
def run(configfile):
configfolder = configfile.parent
config = get_config_parser_from_file(configfile)
if config['cfg'].get('lookAt', False):
return run(configfolder.joinpath(config['cfg']['lookAt']))
output_directory = Path(config['cfg']['output_directory'])
images_directory = Path(config['cfg']['images_directory'])
image_notes_path = Path(config['cfg']['image_notes_directory'])
input_images_files = set(map(lambda path: path.name, filter(
Path.is_file, images_directory.glob('*'))))
used_images_files = set()
webmanifest_template = json.loads(
Path(config['cfg']['webmanifest']).read_text())
# template_directory = get_config_parser_from_file(
# configfolder.joinpath(config['cfg']['template_directory']))
images_description = get_config_parser_from_file(
configfolder.joinpath(config['cfg']['images_description']))
tags_description = get_config_parser_from_file(
configfolder.joinpath(config['cfg']['tags_description']))
tag_groups_description = get_config_parser_from_file(
configfolder.joinpath(config['cfg']['tag_groups_description']))
artists_description = get_config_parser_from_file(
configfolder.joinpath(config['cfg']['artists_description']))
social_description = get_config_parser_from_file(
configfolder.joinpath(config['cfg']['social_description']))
bodyparts_description = get_config_parser_from_file(
configfolder.joinpath(config['cfg']['bodyparts_description']))
pages = treat_split(config['cfg']['pages'].split(','))
tags, tag_groups, social, images_, artists, bodyparts = parse_art_info(
images_description,
tags_description,
tag_groups_description,
artists_description,
social_description,
bodyparts_description
)
sorted_artists = list(map(lambda a: a[0], sorted(
artists.items(), key=lambda a: a[1]['name'])))
# Image notes
image_notes_path.mkdir(parents=True, exist_ok=True)
for img in images_.keys():
imgnotepath = image_notes_path.joinpath(f'{img}.md')
if not imgnotepath.exists():
imgnotepath.touch(exist_ok=True)
imgnotepath.write_text('\n')
image_notes = dict(map(
lambda img: (img, image_notes_path.joinpath(f'{img}.md').read_text()),
images_.keys()))
for k, img in images_.items():
img['notes'] = image_notes[k].rstrip()
del image_notes
# Grouping and sorting tags
all_tags_by_group = dict(
map(lambda a: (a['taggroup'], list()), tags.values()))
all_tags_by_group['DEFAULT'] = list()
for tag_id, tag in tags.items():
group_id = tag['taggroup']
all_tags_by_group[group_id].append(tag_id)
for k, groups in all_tags_by_group.items():
all_tags_by_group[k] = sorted(
groups,
key=lambda i: (tag_groups[tags[i]['taggroup']]
['position'], tags[i]['position'], tags[i]['name'])
)
for image in images_.values():
image['tags'] = sorted(
image['tags'],
key=lambda i: (tag_groups[tags[i]['taggroup']]
['position'], tags[i]['position'], tags[i]['name'])
)
tag_groups_sorted = sorted(
tag_groups.keys(),
key=lambda g: (tag_groups[g]['position'], tag_groups[g]['name']))
tag_groups_nonempty_sorted = list(filter(
lambda g: len(all_tags_by_group[g]),
sorted(tag_groups.keys(),
key=lambda g: (tag_groups[g]['position'], tag_groups[g]['name']))))
# Ensuring tag policies
for image_file, image_data in sorted(images_.items()):
image_tags = [image_data['technology']]+image_data['tags']
image_tags_grouped = dict(
map(lambda a: (a, list()), tag_groups.keys()))
for image_tag in image_tags:
image_tags_grouped[tags[image_tag]['taggroup']].append(image_tag)
for tag_group_name, tag_group in sorted(tag_groups.items()):
itg = image_tags_grouped[tag_group_name]
if tag_group_name == 'DEFAULT' and len(itg) > 0:
print(f'On image {repr(image_file)}: ')
print(f' Using group {repr(tag_group_name)} is discouraged')
print(f' Found: {itg}')
print(f' Aceptable: {all_tags_by_group[tag_group_name]}')
elif tag_group_name == 'DEFAULT':
continue
elif len(itg) == 0 and tag_group['required']:
print(f'On image {repr(image_file)}:')
print(
f' At least one tag of group {repr(tag_group_name)} is required')
print(f' Found: {itg}')
print(f' Aceptable: {all_tags_by_group[tag_group_name]}')
elif len(itg) > 1 and tag_group['single']:
print(f'On image {repr(image_file)}:')
print(
f' Up to one tag of group {repr(tag_group_name)} is admitted')
print(f' Found: {itg}')
print(f' Aceptable: {all_tags_by_group[tag_group_name]}')
all_images = get_images(images_directory, output_directory)
image_stereotypes = 'thumb,small,dcent,large,full'.split(',')
imgsrc2imgurl = Objectify({
s: {
k: v.files[s].name
for k, v in all_images.items()
}
for s in image_stereotypes
})
# print(all_images)
js_tags = jsonv_dumps(tags, 'tags')
js_social = jsonv_dumps(social, 'social')
js_artists = jsonv_dumps(artists, 'artists')
print('Processing pages...', flush=True)
for page in pages:
mes = treat_split(config[page].get('me', '').split('||'))
name = config[page]['name'].strip()
language = config[page]['language'].strip()
keywords = config[page].get('keywords', '').strip()
description = config[page].get('description', '').strip()
if not keywords:
keywords = None
if not description:
description = None
color_list = treat_split(config[page].get(
'colors', '').strip().split(','))
canonical_colors = dict()
color_repeated = {True: 0, False: 0}
for clr in color_list:
c = config[page]['color.'+clr].strip()
if c.startswith('#'):
canonical_colors[clr] = dict(
color=c,
thisColor=clr,
rootColor=clr,
repeated=False,
colorIsLight=determineLightOrDark(c)
)
color_repeated[False] += 1
elif c.startswith('@'):
c2 = c
c = canonical_colors[c2[1:]]['color']
r = canonical_colors[c2[1:]]['rootColor']
canonical_colors[clr] = dict(
color=c,
thisColor=clr,
rootColor=r,
repeated=True,
colorIsLight=determineLightOrDark(c)
)
color_repeated[True] += 1
else:
raise NotImplementedError
print(f'Processing page {repr(name)}...', flush=True)
template = jinjaEnvironment.get_template(
f"{config[page].get('template', page)}.html"
)
imgorg, key2line = parse_leveled_list(
Path(config[page]['sorted'].strip()))
for i in imgorg.linearify():
used_images_files.add(i)
# print(jsonp_dumps(imgorg.transform(key2line).linearify()))
images__ = {l: images_[k] for k, l in key2line.items()}
images = list(map(
lambda a: a[1],
sorted(list(images__.items()))
))
js_images = jsonv_dumps(images, 'images')
img_formats = [
all_images[i].formats.__dict__ for i in imgorg.linearify()]
img_dimens = [
all_images[i].dimens.__dict__ for i in imgorg.linearify()]
img_sizes = [
all_images[i].sizes.__dict__ for i in imgorg.linearify()]
js_canonical_colors = jsonv_dumps(canonical_colors, 'canonical_colors')
js_color_repeated = jsonv_dumps(color_repeated, 'color_repeated')
js_color_list = jsonv_dumps(color_list, 'color_list')
js_bodyparts = jsonv_dumps(bodyparts, 'bodyparts')
org_hierarchical = imgorg.transform(key2line).to_jsonable()
org_linear = imgorg.transform(imgsrc2imgurl.small).linearify()
imgs_hierarchical = imgorg.leveled_transform(
imgsrc2imgurl.small, imgsrc2imgurl.thumb).linearify()
imgs_thumb = imgorg.transform(imgsrc2imgurl.thumb).linearify()
imgs_small = imgorg.transform(imgsrc2imgurl.small).linearify()
imgs_dcent = imgorg.transform(imgsrc2imgurl.dcent).linearify()
imgs_large = imgorg.transform(imgsrc2imgurl.large).linearify()
imgs_full = imgorg.transform(imgsrc2imgurl.full).linearify()
js_org_hierarchical = jsonv_dumps(org_hierarchical, 'org_hierarchical')
js_org_linear = jsonv_dumps(org_linear, 'org_linear')
js_imgs_hierarchical = jsonv_dumps(
imgs_hierarchical, 'imgs_hierarchical')
js_imgs_thumb = jsonv_dumps(imgs_thumb, 'imgs_thumb')
js_imgs_small = jsonv_dumps(imgs_small, 'imgs_small')
js_imgs_dcent = jsonv_dumps(imgs_dcent, 'imgs_dcent')
js_imgs_large = jsonv_dumps(imgs_large, 'imgs_large')
js_imgs_full = jsonv_dumps(imgs_full, 'imgs_full')
js_formats = jsonv_dumps(img_formats, 'formats')
js_dimens = jsonv_dumps(img_dimens, 'dimens')
js_sizes = jsonv_dumps(img_sizes, 'sizes')
js_name = jsonv_dumps(name, 'name')
js_mes = jsonv_dumps(mes, 'mes')
js_tag_groups_sorted = jsonv_dumps(
tag_groups_sorted, 'tag_groups_sorted')
js_all_tags_by_group = jsonv_dumps(
all_tags_by_group, 'all_tags_by_group')
js_tag_groups = jsonv_dumps(tag_groups, 'tag_groups')
js_tag_groups_nonempty_sorted = jsonv_dumps(
tag_groups_nonempty_sorted, 'tag_groups_nonempty_sorted')
js_sorted_artists = jsonv_dumps(sorted_artists, 'sorted_artists')
print(f'Generating ZIP thumbs for {repr(name)}...', flush=True)
zip_thumb = zipFiles(
output_directory.joinpath(page+'_thumb.zip'),
*[output_directory.joinpath(img) for img in imgs_thumb]
)
print(f'Generating ZIP smalls for {repr(name)}...', flush=True)
zip_small = zipFiles(
output_directory.joinpath(page+'_small.zip'),
*[output_directory.joinpath(img) for img in imgs_small]
)
print(f'Generating ZIP dcents for {repr(name)}...', flush=True)
zip_dcent = zipFiles(
output_directory.joinpath(page+'_dcent.zip'),
*[output_directory.joinpath(img) for img in imgs_dcent]
)
print(f'Generating ZIP larges for {repr(name)}...', flush=True)
zip_large = zipFiles(
output_directory.joinpath(page+'_large.zip'),
*[output_directory.joinpath(img) for img in imgs_large]
)
print(f'Generating ZIP fulls for {repr(name)}...', flush=True)
zip_full = zipFiles(
output_directory.joinpath(page+'_full.zip'),
*[output_directory.joinpath(img) for img in imgs_full]
)
zips = [zip_thumb, zip_small, zip_dcent, zip_large, zip_full]
zips = [dict(name=zp.name, size=zp.stat().st_size) for zp in zips]
for zp in zips:
zp['fmtd'] = sizeFmt(zp['size'])
js_zips = jsonv_dumps(zips, 'zips')
print(f'Rendering page {repr(name)}...', flush=True)
webmanifest = {
**webmanifest_template,
'short_name': name,
'name': name,
'description': description,
'start_url': f'./{page}.html',
}
js_webmanifest = jsonv_dumps(webmanifest, 'webmanifest')
rendered = template.render(
description=description,
keywords=keywords,
language=language,
name=name,
org_hierarchical=org_hierarchical,
org_linear=org_linear,
imgs_hierarchical=imgs_hierarchical,
imgs_thumb=imgs_thumb,
imgs_small=imgs_small,
imgs_dcent=imgs_dcent,
imgs_large=imgs_large,
imgs_full=imgs_full,
formats=img_formats,
dimens=img_dimens,
sizes=img_sizes,
artists=artists,
images=images,
social=social,
tags=tags,
zips=zips,
page=page,
mes=mes,
canonical_colors=canonical_colors,
color_repeated=color_repeated,
color_list=color_list,
bodyparts=bodyparts,
webmanifest=webmanifest,
tag_groups_sorted=tag_groups_sorted,
all_tags_by_group=all_tags_by_group,
tag_groups=tag_groups,
sorted_artists=sorted_artists,
tag_groups_nonempty_sorted=tag_groups_nonempty_sorted,
js_tag_groups_nonempty_sorted=js_tag_groups_nonempty_sorted,
js_sorted_artists=js_sorted_artists,
js_tag_groups_sorted=js_tag_groups_sorted,
js_all_tags_by_group=js_all_tags_by_group,
js_tag_groups=js_tag_groups,
js_org_hierarchical=js_org_hierarchical,
js_org_linear=js_org_linear,
js_imgs_hierarchical=js_imgs_hierarchical,
js_imgs_thumb=js_imgs_thumb,
js_imgs_small=js_imgs_small,
js_imgs_dcent=js_imgs_dcent,
js_imgs_large=js_imgs_large,
js_imgs_full=js_imgs_full,
js_formats=js_formats,
js_dimens=js_dimens,
js_sizes=js_sizes,
js_artists=js_artists,
js_images=js_images,
js_social=js_social,
js_tags=js_tags,
js_name=js_name,
js_zips=js_zips,
js_mes=js_mes,
js_canonical_colors=js_canonical_colors,
js_color_repeated=js_color_repeated,
js_color_list=js_color_list,
js_bodyparts=js_bodyparts,
js_webmanifest=js_webmanifest
)
minified = rendered
minified = htmlmin.minify(rendered, remove_empty_space=True)
output_directory.joinpath(f"{page}.html").write_text(minified)
output_directory.joinpath(f"{page}.json").write_text(
json_dumps(webmanifest))
if len(unused_images_files := (input_images_files.difference(used_images_files))) > 0:
print(f'Unused images: {sorted(list(unused_images_files))}')
print('All done!', flush=True)
def get_config_parser_from_file(file):
config = configparser.ConfigParser()
config.read(file)
return config
def printusage():
print("Usage:", file=sys.stderr)
print(
f" python{sys.version_info.major} -m {sys.argv[0]} <config.ini>",
file=sys.stderr
)
def determineLightOrDark(color): # True: light, False: dark
'''https://en.wikipedia.org/wiki/Grayscale#Luma_coding_in_video_systems'''
yr = 0.2126
yg = 0.7152
yb = 0.0722
if not (len(color) == 7 and color.startswith('#')):
raise ValueError(f'Color {repr(color)} should be in #RRGGBB format')
r = int(color[1:3], 16)
g = int(color[3:5], 16)
b = int(color[5:7], 16)
hsp = (
yr*r**2 +
yg*g**2 +
yb*b**2)**0.5
return hsp > 127
def zipFiles(out, *inputs):
if out.exists():
return out
zf = zipfile.ZipFile(
out,
mode='w',
compression=zipfile.ZIP_DEFLATED,
allowZip64=True,
compresslevel=9
)
for i, p in enumerate(inputs):
zf.writestr(
'%04d%s' % (i, p.suffix),
p.read_bytes(),
compress_type=zipfile.ZIP_DEFLATED,
compresslevel=9
)
zf.close()
return out
def sizeFmt(bytecount):
scale = ('B', 'KB', 'MB', 'GB', 'TB')
magnitude = 0
while bytecount > 2048:
magnitude += 1
bytecount /= 1024
return "%0.2f %s" % (bytecount, scale[magnitude])
def main():
if len(sys.argv) == 2:
configfile = Path(sys.argv[1])
if configfile.exists():
run(configfile)
else:
printusage()
else:
printusage()