site_fursona_refs/trivial_static_image_gallery/__init__.py

937 lines
35 KiB
Python
Raw Normal View History

2019-12-28 01:00:26 +00:00
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
2020-06-25 06:20:44 +00:00
import configparser
2019-12-28 01:00:26 +00:00
import json
import math
import shutil
2020-06-25 06:20:44 +00:00
import sys
import time
2019-12-28 01:00:26 +00:00
import warnings
2020-06-25 06:20:44 +00:00
import zipfile
from hashlib import sha256 as hashalgo
from pathlib import Path
2020-06-25 20:31:58 +00:00
from typing import Any, Dict, List, Optional, Tuple
2020-06-25 06:20:44 +00:00
import jinja2
2019-12-28 01:00:26 +00:00
from markupsafe import Markup
from PIL import Image
2020-06-25 06:20:44 +00:00
import htmlmin
from flaskext.markdown import Markdown
class Objectify(object):
def __init__(self, obj=None, **kwargs):
if obj is None:
obj = kwargs
else:
obj = {**obj, **kwargs}
self.__dict__ = obj
def __str__(self):
return str(self.__dict__)
def __repr__(self):
return f'{type(self).__name__}({repr(self.__dict__)})'
def __getitem__(self, idx):
return self.__dict__[idx]
def __setitem__(self, idx, val):
self.__dict__[idx] = val
2019-12-28 01:00:26 +00:00
Image.MAX_IMAGE_PIXELS = None
jinjaEnvironment = jinja2.Environment(
loader=jinja2.FileSystemLoader(
str(Path('tpl').absolute())
)
)
2020-06-25 06:20:44 +00:00
md = Markdown(app=Objectify(dict(jinja_env=jinjaEnvironment)),
extensions=['fenced_code'],
safe_mode=True)
2019-12-28 01:00:26 +00:00
def treat_split(l): return list(filter(len, map(str.strip, l)))
def jsonp_dumps(obj, padding='callback'):
return Markup(f'{padding}({json.dumps(obj, sort_keys=True)});')
def jsonv_dumps(obj, var='variable'):
return Markup(f'var {var} = {json.dumps(obj, sort_keys=True)};')
def json_dumps(obj):
return Markup(f'{json.dumps(obj, sort_keys=True)}')
2020-06-25 20:31:58 +00:00
def digest_for(bts):
2019-12-28 01:00:26 +00:00
m = hashalgo()
2020-06-25 20:31:58 +00:00
m.update(bts)
2019-12-28 01:00:26 +00:00
return m.hexdigest()
class LeveledItem(object):
def __init__(self, item, *children):
self.item = item
self.children = LeveledList(children)
def __str__(self):
return repr(self)
def __repr__(self):
return f'{type(self).__name__}({repr(self.item)}, *{repr(self.children)})'
def to_jsonable(self, depth=1):
return {
'item': self.item,
'children': self.children.to_jsonable(depth),
}
def transform(self, dct: dict):
return type(self)(dct[self.item], *self.children.transform(dct))
def leveled_transform(self, *dcts):
head, *tail = dcts
return type(self)(head[self.item], *self.children.leveled_transform(*tail))
def linearify(self):
return LeveledList([self.item, *self.children.linearify()])
class LeveledList(list):
def __new__(cls, base=None):
return list.__new__(cls, base)
def to_jsonable(self, depth=2):
return [
child if depth <= 1 else child.to_jsonable(depth-1)
for child in self
]
def transform(self, dct: dict):
return type(self)([
i.transform(dct) if isinstance(i, LeveledItem) else dct[i]
for i in self
])
def leveled_transform(self, *dcts):
head, *_ = dcts
return type(self)([
i.leveled_transform(*dcts) if isinstance(i,
LeveledItem) else head[i]
for i in self
])
def linearify(self):
return type(self)(self._linearify())
def _linearify(self):
for l in [
i.linearify() if isinstance(i, (LeveledList, LeveledItem)) else [i]
for i in self
]:
yield from l
yield from []
2020-06-25 20:31:58 +00:00
def parse_leveled_list(file: Path) -> Tuple[LeveledList, Dict[str, int]]:
k2l: Dict[str, int] = dict()
2019-12-28 01:00:26 +00:00
lst = LeveledList()
lines = treat_split(file.read_text().splitlines())
for lnumb, line in enumerate(lines):
itm = line[1:]
if line[0] == '-':
lst.append(LeveledItem(itm))
elif line[0] == '=':
lst[-1].children.append(itm)
else:
raise RuntimeError('cannot determine nestedfulness level')
k2l[itm] = lnumb
return lst, k2l
class ImageScalingDefinition(object):
def __init__(self, x, y):
self.x = x
self.y = y
def resize(self, img: Image):
raise NotImplementedError
class ImageThumbnailerDefinition(ImageScalingDefinition):
def get_resize_res(self, img: Image):
x = self.x
y = self.y
if x is not None or y is not None:
dx, dy = img.size
if x is None or y is None:
if x is None:
if dy >= y:
x = dx
else:
x = math.ceil(dx*(y/dy))
elif y is None:
if dx >= x:
y = dy
else:
y = math.ceil(dy*(x/dx))
if not (x >= dx and y >= dy):
return (x, y)
def resize(self, img: Image):
tpl = self.get_resize_res(img)
if tpl is not None:
img.thumbnail(tpl)
2019-12-28 14:10:02 +00:00
def remove_transparency(im, bg_colour=(0xF5, 0xF5, 0xF5)):
2019-12-28 01:00:26 +00:00
if im.mode in ('RGBA', 'LA') or (im.mode == 'P' and 'transparency' in im.info):
alpha = im.convert('RGBA').split()[-1]
bg = Image.new("RGB", im.size, bg_colour)
bg.paste(im, mask=alpha)
im.close()
return bg
else:
return im
def get_images(images_directory, output_directory):
print('Setting image paths...', flush=True)
2022-01-20 01:19:20 +00:00
images_source = list(filter(lambda x: x.name != '.git',
images_directory.rglob('*.*')))
2019-12-28 01:00:26 +00:00
images_sizes = dict(
2019-12-28 14:10:02 +00:00
thumb=ImageThumbnailerDefinition(300, 300),
small=ImageThumbnailerDefinition(900, 900),
2019-12-28 01:00:26 +00:00
dcent=ImageThumbnailerDefinition(1440, 1440),
large=ImageThumbnailerDefinition(2560, 2560),
full=ImageThumbnailerDefinition(None, None)
)
images_source = {
image.name: (
image,
digest_for(image.read_bytes())
)
for image in images_source
}
images_source = {
k: Objectify(
source=i,
files=Objectify(
thumb=output_directory.joinpath(f'thumbnail_{d}.jpg'),
small=output_directory.joinpath(f'small_{d}.jpg'),
dcent=output_directory.joinpath(f'decent_{d}.jpg'),
large=output_directory.joinpath(f'large_{d}.jpg'),
full=output_directory.joinpath(f'full_{d}{i.suffix}')
),
formats=Objectify(),
dimens=Objectify(),
sizes=Objectify()
)
for k, (i, d) in images_source.items()
}
print('Ensuring all images were converted...', flush=True)
2019-12-30 08:51:05 +00:00
showProfiler = False
profiled_images = list()
2019-12-28 01:00:26 +00:00
for image_name, image_source in images_source.items():
2020-06-25 06:20:44 +00:00
already_printed = False
2019-12-30 08:51:05 +00:00
start = time.time()
2019-12-28 01:00:26 +00:00
for size_name, image_size in images_sizes.items():
image_destination = image_source.files[size_name]
2020-06-25 10:31:19 +00:00
if not image_destination.exists():
if not already_printed:
print(f'Current image: {image_name}', flush=True)
already_printed = True
2019-12-28 01:00:26 +00:00
convert_image(
image_source,
image_size,
size_name,
image_destination,
image_name
)
2019-12-30 08:51:05 +00:00
end = time.time()
if end-start > 1:
showProfiler = True
profiled_images.append(((end-start), image_name))
2019-12-28 01:00:26 +00:00
image_source.dimens = Objectify({
size_name: Image.open(image_source.files[size_name]).size
for size_name in images_sizes
})
image_source.sizes = Objectify({
size_name: image_source.files[size_name].stat().st_size
for size_name in images_sizes
})
image_source.formats = Objectify({
size_name: image_source.files[size_name].suffix[1:].upper()
for size_name in images_sizes
})
2019-12-30 08:51:05 +00:00
if showProfiler:
profiled_images.sort()
profiled_images.reverse()
print('Profiled conversion times:', flush=True)
for i, (d, p) in enumerate(profiled_images):
s = ' %03d: % 6.2fs - %s ' % (i+1, d, p)
if len(s) < 70:
s += ' '*(70-len(s))
s += 'x'.join(list(map(str, images_source[p].dimens.full)))
print(s, flush=True)
total = sum(list(map(lambda a: a[0], profiled_images)))
print(' Total: %.3fs' % total, flush=True)
2019-12-28 01:00:26 +00:00
return images_source
2020-06-25 20:31:58 +00:00
def convert_image(image_source,
image_size,
size_name,
image_destination,
image_name
):
2019-12-28 01:00:26 +00:00
img = Image.open(image_source.source)
resz = image_size.get_resize_res(img)
# These prints are necessary for the CI runner to not kill the process
print(f'[{size_name}] ', end='')
if (
resz is None
and
image_source.source.suffix == image_destination.suffix
):
print('did not resize: ', end='')
shutil.copyfile(
image_source.source,
image_destination
)
resz = img.size
else:
print('got resized [', end='')
print('x'.join(list(map(str, img.size))), end='')
image_size.resize(img)
print('::', end='')
print('x'.join(list(map(str, img.size))), end='')
print(']', end='')
img = remove_transparency(img)
img.save(image_destination)
resz = img.size
print(': ', end='')
print(f'{image_name}', flush=True)
img.close()
del img
2020-06-25 20:31:58 +00:00
def parse_art_info(images_description: configparser.ConfigParser,
tags_description: configparser.ConfigParser,
tag_groups_description: configparser.ConfigParser,
artists_description: configparser.ConfigParser,
social_description: configparser.ConfigParser,
bodyparts_description: configparser.ConfigParser
):
2019-12-28 01:00:26 +00:00
print('Parsing art info...', flush=True)
tags = dict()
social = dict()
2020-06-25 20:31:58 +00:00
images: Dict[str, Any] = dict()
2019-12-28 01:00:26 +00:00
artists = dict()
2019-12-29 23:47:53 +00:00
bodyparts = dict()
2020-06-25 06:20:44 +00:00
tag_groups = dict()
2019-12-28 01:00:26 +00:00
pending_tags = list(set(tags_description.sections())-{'DEFAULT'})
pending_social = list(set(social_description.sections())-{'DEFAULT'})
pending_images = list(set(images_description.sections())-{'DEFAULT'})
pending_artists = list(set(artists_description.sections())-{'DEFAULT'})
2019-12-29 23:47:53 +00:00
pending_bodyparts = list(set(bodyparts_description.sections())-{'DEFAULT'})
2020-06-25 06:20:44 +00:00
pending_tag_groups = list(
set(tag_groups_description.sections())-{'DEFAULT'})
2019-12-28 01:00:26 +00:00
delayed_images = set()
2020-06-25 06:20:44 +00:00
def interpret_tag_operations(tag_sequence):
tags = []
for tag in tag_sequence:
if tag.startswith('!'):
tag2 = tag[1:]
tags = [t for t in tags if t != tag2]
elif tag not in tags:
tags.append(tag)
return tags
def failsafe_set(this, image_def, attr, hard=True, default=None, mapper=lambda a: a):
this[attr] = mapper(image_def[attr].replace("''", "'") if attr in image_def else (
this[attr] if hard else this.get(attr, default)))
2019-12-29 23:47:53 +00:00
def getBodyPart(bp):
if bp not in bodyparts:
if bp in bodyparts_description:
bp_def = bodyparts_description[bp]
bodyparts[bp] = bp_def.get('name', bp).strip()
else:
bodyparts[bp] = bp.strip()
warnings.warn(
f'Body part {repr(bp)} not found in INI file')
return bp
for bp in pending_bodyparts:
getBodyPart(bp)
2019-12-28 01:00:26 +00:00
while len(pending_images) > 0:
image_name, *pending_images = pending_images
image_def = images_description[image_name]
if(
image_def.get('ignore', 'false') == 'false'
and
image_name not in images
):
this = dict()
parent = None
if 'partof' in image_def:
parent = image_def['partof']
elif 'sameas' in image_def:
parent = image_def['sameas']
2020-06-25 06:20:44 +00:00
oldtags = []
2019-12-28 01:00:26 +00:00
if parent is not None and parent not in images:
if image_name in delayed_images:
raise ValueError(
f'Parent dependency of {repr(image_name)} ({repr(parent)}) cannot be ignored'
)
pending_images = [parent, image_name, *pending_images]
delayed_images.add(image_name)
continue
elif parent is not None:
this = images[parent].copy()
2020-06-25 06:20:44 +00:00
oldtags = this['tags'].copy()
2019-12-28 01:00:26 +00:00
failsafe_set(this, image_def, 'name')
failsafe_set(this, image_def, 'date')
failsafe_set(this, image_def, 'artist')
failsafe_set(this, image_def, 'technology')
failsafe_set(this, image_def, 'tags', False, '')
failsafe_set(this, image_def, 'link', False, None)
2020-06-25 06:20:44 +00:00
failsafe_set(this, image_def, 'notes', False, '')
cd = this.get('color', dict()).copy()
2019-12-29 23:47:53 +00:00
for key in image_def:
if key.startswith('color.'):
2020-06-25 20:31:58 +00:00
value: Optional[str] = image_def[key].strip()
2020-06-25 06:20:44 +00:00
set_unknown = False
if value in ('?', '??', '???'):
set_unknown = True
if value in ('', '-', '?', '??', '???'):
2019-12-29 23:47:53 +00:00
value = None
2020-06-25 20:31:58 +00:00
key2 = key[6:].split('@')
getBodyPart(key2[0])
getBodyPart(key2[-1])
k = key2[-1]
2020-06-25 06:20:44 +00:00
cd[k] = None if value is None else dict(
2020-06-25 20:31:58 +00:00
targetOnImage=key2[-1],
targetAppropriate=key2[0],
palleteChoiceAccurate=key2[0] == key2[-1],
2019-12-29 23:47:53 +00:00
colorUsed=value.upper(),
colorIsLight=determineLightOrDark(value.upper())
2020-06-25 06:20:44 +00:00
)
if set_unknown:
cd.pop(k, None)
2019-12-28 01:00:26 +00:00
if isinstance(this['date'], str):
this['date'] = list(
map(int, treat_split(this['date'].split('-'))))
if isinstance(this['tags'], str):
this['tags'] = treat_split(this['tags'].split(','))
2020-06-25 06:20:44 +00:00
this['tags'] = interpret_tag_operations(oldtags+this['tags'])
2019-12-28 01:00:26 +00:00
pending_artists.append(this['artist'])
pending_tags.append(this['technology'])
pending_tags += this['tags']
2020-06-25 06:20:44 +00:00
this['color'] = cd
2019-12-28 01:00:26 +00:00
images[image_name] = this
pending_tags = list(set(pending_tags))
pending_artists = list(set(pending_artists))
print('Parsing artist info...', flush=True)
for pending_artist in pending_artists:
this = dict()
if pending_artist in artists_description:
artist_def = artists_description[pending_artist]
sns = list()
this['name'] = pending_artist
failsafe_set(this, artist_def, 'name')
for sn in artist_def:
if sn != 'name':
this[sn] = artist_def[sn]
sns.append(sn)
pending_social.append(sn)
2019-12-28 15:09:58 +00:00
this['@'] = sorted(sns)
2019-12-28 01:00:26 +00:00
else:
warnings.warn(
f'Artist {repr(pending_artist)} not found in INI file')
this['name'] = pending_artist
artists[pending_artist] = this
pending_social = list(set(pending_social))
print('Parsing tag info...', flush=True)
for pending_tag in pending_tags:
this = dict()
if pending_tag in tags_description:
tag_def = tags_description[pending_tag]
if 'name' in tag_def:
failsafe_set(this, tag_def, 'name')
if 'icon' in tag_def:
failsafe_set(this, tag_def, 'icon')
2020-06-25 06:20:44 +00:00
if 'taggroup' in tag_def:
failsafe_set(this, tag_def, 'taggroup')
if 'position' in tag_def:
failsafe_set(this, tag_def, 'position', mapper=int)
2019-12-28 01:00:26 +00:00
else:
warnings.warn(
f'Tag {repr(pending_tag)} not found in INI file')
if 'name' not in this:
this['name'] = pending_tag
if 'icon' not in this:
this['icon'] = tags_description['DEFAULT']['icon']
2020-06-25 06:20:44 +00:00
if 'taggroup' not in this:
this['taggroup'] = 'DEFAULT'
if 'taggroup' not in this:
this['position'] = int(tags_description['DEFAULT']['position'])
pending_tag_groups.append(this['taggroup'])
2019-12-28 01:00:26 +00:00
tags[pending_tag] = this
2020-06-25 06:20:44 +00:00
pending_tag_groups = list(set(pending_tag_groups))
print('Parsing tag group info...', flush=True)
for pending_tag_group in [*pending_tag_groups, 'DEFAULT']:
this = dict()
if pending_tag_group in tag_groups_description:
tag_def = tag_groups_description[pending_tag_group]
if 'name' in tag_def:
failsafe_set(this, tag_def, 'name')
if 'required' in tag_def:
failsafe_set(this, tag_def, 'required',
mapper=lambda a: a == 'True')
if 'single' in tag_def:
failsafe_set(this, tag_def, 'single',
mapper=lambda a: a == 'True')
if 'position' in tag_def:
failsafe_set(this, tag_def, 'position', mapper=int)
else:
warnings.warn(
f'Tag group {repr(pending_tag_group)} not found in INI file')
if 'name' not in this:
this['name'] = pending_tag_group
if 'required' not in this:
this['required'] = tag_groups_description['DEFAULT']['required'] == 'True'
if 'single' not in this:
this['single'] = tag_groups_description['DEFAULT']['single'] == 'True'
if 'position' not in this:
this['position'] = int(
tag_groups_description['DEFAULT']['position'])
tag_groups[pending_tag_group] = this
2019-12-28 01:00:26 +00:00
print('Parsing social network info...', flush=True)
for pending_s in pending_social:
this = dict()
if pending_s in social_description:
social_def = social_description[pending_s]
if 'name' in social_def:
failsafe_set(this, social_def, 'name')
if 'icon' in social_def:
failsafe_set(this, social_def, 'icon')
else:
warnings.warn(
f'Social Platform {repr(pending_s)} not found in INI file')
if 'name' not in this:
this['name'] = pending_s
if 'icon' not in this:
this['icon'] = social_description['DEFAULT']['icon']
social[pending_s] = this
2020-06-25 06:20:44 +00:00
return tags, tag_groups, social, images, artists, bodyparts
2019-12-28 01:00:26 +00:00
def run(configfile):
2019-12-30 01:14:42 +00:00
configfolder = configfile.parent
2019-12-28 01:00:26 +00:00
config = get_config_parser_from_file(configfile)
2019-12-30 01:14:42 +00:00
if config['cfg'].get('lookAt', False):
return run(configfolder.joinpath(config['cfg']['lookAt']))
2019-12-28 01:00:26 +00:00
output_directory = Path(config['cfg']['output_directory'])
images_directory = Path(config['cfg']['images_directory'])
2020-06-25 06:20:44 +00:00
image_notes_path = Path(config['cfg']['image_notes_directory'])
input_images_files = set(map(lambda path: path.name, filter(
Path.is_file, images_directory.glob('*'))))
used_images_files = set()
2019-12-30 08:51:05 +00:00
webmanifest_template = json.loads(
Path(config['cfg']['webmanifest']).read_text())
2020-06-25 20:31:58 +00:00
# template_directory = get_config_parser_from_file(
# configfolder.joinpath(config['cfg']['template_directory']))
2019-12-28 01:00:26 +00:00
images_description = get_config_parser_from_file(
2019-12-30 01:14:42 +00:00
configfolder.joinpath(config['cfg']['images_description']))
2019-12-28 01:00:26 +00:00
tags_description = get_config_parser_from_file(
2019-12-30 01:14:42 +00:00
configfolder.joinpath(config['cfg']['tags_description']))
2020-06-25 06:20:44 +00:00
tag_groups_description = get_config_parser_from_file(
configfolder.joinpath(config['cfg']['tag_groups_description']))
2019-12-28 01:00:26 +00:00
artists_description = get_config_parser_from_file(
2019-12-30 01:14:42 +00:00
configfolder.joinpath(config['cfg']['artists_description']))
2019-12-28 01:00:26 +00:00
social_description = get_config_parser_from_file(
2019-12-30 01:14:42 +00:00
configfolder.joinpath(config['cfg']['social_description']))
2019-12-29 23:47:53 +00:00
bodyparts_description = get_config_parser_from_file(
2019-12-30 01:14:42 +00:00
configfolder.joinpath(config['cfg']['bodyparts_description']))
2019-12-28 01:00:26 +00:00
pages = treat_split(config['cfg']['pages'].split(','))
2020-06-25 06:20:44 +00:00
tags, tag_groups, social, images_, artists, bodyparts = parse_art_info(
2019-12-28 01:00:26 +00:00
images_description,
tags_description,
2020-06-25 06:20:44 +00:00
tag_groups_description,
2019-12-28 01:00:26 +00:00
artists_description,
2019-12-29 23:47:53 +00:00
social_description,
bodyparts_description
2019-12-28 01:00:26 +00:00
)
2020-06-25 20:31:58 +00:00
sorted_artists = list(map(lambda a: a[0], sorted(
artists.items(), key=lambda a: a[1]['name'])))
2020-06-25 06:20:44 +00:00
# Image notes
image_notes_path.mkdir(parents=True, exist_ok=True)
for img in images_.keys():
imgnotepath = image_notes_path.joinpath(f'{img}.md')
if not imgnotepath.exists():
imgnotepath.touch(exist_ok=True)
2020-06-25 09:59:47 +00:00
imgnotepath.write_text('\n')
2020-06-25 06:20:44 +00:00
image_notes = dict(map(
lambda img: (img, image_notes_path.joinpath(f'{img}.md').read_text()),
images_.keys()))
for k, img in images_.items():
img['notes'] = image_notes[k].rstrip()
del image_notes
2020-06-25 06:20:44 +00:00
# Grouping and sorting tags
all_tags_by_group = dict(
map(lambda a: (a['taggroup'], list()), tags.values()))
all_tags_by_group['DEFAULT'] = list()
for tag_id, tag in tags.items():
group_id = tag['taggroup']
all_tags_by_group[group_id].append(tag_id)
for k, groups in all_tags_by_group.items():
all_tags_by_group[k] = sorted(
groups,
key=lambda i: (tag_groups[tags[i]['taggroup']]
['position'], tags[i]['position'], tags[i]['name'])
)
for image in images_.values():
image['tags'] = sorted(
image['tags'],
key=lambda i: (tag_groups[tags[i]['taggroup']]
['position'], tags[i]['position'], tags[i]['name'])
)
tag_groups_sorted = sorted(
tag_groups.keys(),
key=lambda g: (tag_groups[g]['position'], tag_groups[g]['name']))
tag_groups_nonempty_sorted = list(filter(
lambda g: len(all_tags_by_group[g]),
sorted(tag_groups.keys(),
key=lambda g: (tag_groups[g]['position'], tag_groups[g]['name']))))
2020-06-25 06:20:44 +00:00
# Ensuring tag policies
for image_file, image_data in sorted(images_.items()):
image_tags = [image_data['technology']]+image_data['tags']
image_tags_grouped = dict(
map(lambda a: (a, list()), tag_groups.keys()))
for image_tag in image_tags:
image_tags_grouped[tags[image_tag]['taggroup']].append(image_tag)
for tag_group_name, tag_group in sorted(tag_groups.items()):
itg = image_tags_grouped[tag_group_name]
if tag_group_name == 'DEFAULT' and len(itg) > 0:
print(f'On image {repr(image_file)}: ')
print(f' Using group {repr(tag_group_name)} is discouraged')
print(f' Found: {itg}')
print(f' Aceptable: {all_tags_by_group[tag_group_name]}')
elif tag_group_name == 'DEFAULT':
continue
elif len(itg) == 0 and tag_group['required']:
print(f'On image {repr(image_file)}:')
print(
f' At least one tag of group {repr(tag_group_name)} is required')
print(f' Found: {itg}')
print(f' Aceptable: {all_tags_by_group[tag_group_name]}')
elif len(itg) > 1 and tag_group['single']:
print(f'On image {repr(image_file)}:')
print(
f' Up to one tag of group {repr(tag_group_name)} is admitted')
print(f' Found: {itg}')
print(f' Aceptable: {all_tags_by_group[tag_group_name]}')
2019-12-28 01:00:26 +00:00
all_images = get_images(images_directory, output_directory)
image_stereotypes = 'thumb,small,dcent,large,full'.split(',')
imgsrc2imgurl = Objectify({
s: {
k: v.files[s].name
for k, v in all_images.items()
}
for s in image_stereotypes
})
# print(all_images)
js_tags = jsonv_dumps(tags, 'tags')
js_social = jsonv_dumps(social, 'social')
js_artists = jsonv_dumps(artists, 'artists')
print('Processing pages...', flush=True)
for page in pages:
2020-01-05 01:34:07 +00:00
mes = treat_split(config[page].get('me', '').split('||'))
2019-12-28 01:00:26 +00:00
name = config[page]['name'].strip()
language = config[page]['language'].strip()
2019-12-28 17:12:53 +00:00
keywords = config[page].get('keywords', '').strip()
description = config[page].get('description', '').strip()
2019-12-29 23:47:53 +00:00
if not keywords:
keywords = None
if not description:
description = None
color_list = treat_split(config[page].get(
'colors', '').strip().split(','))
canonical_colors = dict()
color_repeated = {True: 0, False: 0}
for clr in color_list:
c = config[page]['color.'+clr].strip()
if c.startswith('#'):
canonical_colors[clr] = dict(
color=c,
thisColor=clr,
rootColor=clr,
repeated=False,
colorIsLight=determineLightOrDark(c)
)
color_repeated[False] += 1
elif c.startswith('@'):
c2 = c
c = canonical_colors[c2[1:]]['color']
r = canonical_colors[c2[1:]]['rootColor']
canonical_colors[clr] = dict(
color=c,
thisColor=clr,
rootColor=r,
repeated=True,
colorIsLight=determineLightOrDark(c)
)
color_repeated[True] += 1
else:
raise NotImplementedError
2019-12-28 01:00:26 +00:00
print(f'Processing page {repr(name)}...', flush=True)
template = jinjaEnvironment.get_template(
f"{config[page].get('template', page)}.html"
)
imgorg, key2line = parse_leveled_list(
Path(config[page]['sorted'].strip()))
2020-06-25 06:20:44 +00:00
for i in imgorg.linearify():
used_images_files.add(i)
2019-12-28 01:00:26 +00:00
# print(jsonp_dumps(imgorg.transform(key2line).linearify()))
images__ = {l: images_[k] for k, l in key2line.items()}
images = list(map(
lambda a: a[1],
sorted(list(images__.items()))
))
js_images = jsonv_dumps(images, 'images')
2019-12-28 14:10:02 +00:00
img_formats = [
all_images[i].formats.__dict__ for i in imgorg.linearify()]
img_dimens = [
all_images[i].dimens.__dict__ for i in imgorg.linearify()]
img_sizes = [
all_images[i].sizes.__dict__ for i in imgorg.linearify()]
2019-12-28 01:00:26 +00:00
2019-12-29 23:47:53 +00:00
js_canonical_colors = jsonv_dumps(canonical_colors, 'canonical_colors')
js_color_repeated = jsonv_dumps(color_repeated, 'color_repeated')
js_color_list = jsonv_dumps(color_list, 'color_list')
js_bodyparts = jsonv_dumps(bodyparts, 'bodyparts')
2019-12-28 01:00:26 +00:00
org_hierarchical = imgorg.transform(key2line).to_jsonable()
org_linear = imgorg.transform(imgsrc2imgurl.small).linearify()
imgs_hierarchical = imgorg.leveled_transform(
imgsrc2imgurl.small, imgsrc2imgurl.thumb).linearify()
imgs_thumb = imgorg.transform(imgsrc2imgurl.thumb).linearify()
imgs_small = imgorg.transform(imgsrc2imgurl.small).linearify()
imgs_dcent = imgorg.transform(imgsrc2imgurl.dcent).linearify()
imgs_large = imgorg.transform(imgsrc2imgurl.large).linearify()
imgs_full = imgorg.transform(imgsrc2imgurl.full).linearify()
js_org_hierarchical = jsonv_dumps(org_hierarchical, 'org_hierarchical')
js_org_linear = jsonv_dumps(org_linear, 'org_linear')
js_imgs_hierarchical = jsonv_dumps(
imgs_hierarchical, 'imgs_hierarchical')
js_imgs_thumb = jsonv_dumps(imgs_thumb, 'imgs_thumb')
js_imgs_small = jsonv_dumps(imgs_small, 'imgs_small')
js_imgs_dcent = jsonv_dumps(imgs_dcent, 'imgs_dcent')
js_imgs_large = jsonv_dumps(imgs_large, 'imgs_large')
js_imgs_full = jsonv_dumps(imgs_full, 'imgs_full')
2019-12-28 14:10:02 +00:00
js_formats = jsonv_dumps(img_formats, 'formats')
js_dimens = jsonv_dumps(img_dimens, 'dimens')
js_sizes = jsonv_dumps(img_sizes, 'sizes')
js_name = jsonv_dumps(name, 'name')
2020-01-05 01:34:07 +00:00
js_mes = jsonv_dumps(mes, 'mes')
2020-06-25 20:31:58 +00:00
js_tag_groups_sorted = jsonv_dumps(
tag_groups_sorted, 'tag_groups_sorted')
js_all_tags_by_group = jsonv_dumps(
all_tags_by_group, 'all_tags_by_group')
js_tag_groups = jsonv_dumps(tag_groups, 'tag_groups')
2020-06-25 20:31:58 +00:00
js_tag_groups_nonempty_sorted = jsonv_dumps(
tag_groups_nonempty_sorted, 'tag_groups_nonempty_sorted')
js_sorted_artists = jsonv_dumps(sorted_artists, 'sorted_artists')
2019-12-28 14:10:02 +00:00
print(f'Generating ZIP thumbs for {repr(name)}...', flush=True)
zip_thumb = zipFiles(
output_directory.joinpath(page+'_thumb.zip'),
*[output_directory.joinpath(img) for img in imgs_thumb]
)
print(f'Generating ZIP smalls for {repr(name)}...', flush=True)
zip_small = zipFiles(
output_directory.joinpath(page+'_small.zip'),
*[output_directory.joinpath(img) for img in imgs_small]
)
print(f'Generating ZIP dcents for {repr(name)}...', flush=True)
zip_dcent = zipFiles(
output_directory.joinpath(page+'_dcent.zip'),
*[output_directory.joinpath(img) for img in imgs_dcent]
)
print(f'Generating ZIP larges for {repr(name)}...', flush=True)
zip_large = zipFiles(
output_directory.joinpath(page+'_large.zip'),
*[output_directory.joinpath(img) for img in imgs_large]
)
print(f'Generating ZIP fulls for {repr(name)}...', flush=True)
zip_full = zipFiles(
output_directory.joinpath(page+'_full.zip'),
*[output_directory.joinpath(img) for img in imgs_full]
)
zips = [zip_thumb, zip_small, zip_dcent, zip_large, zip_full]
zips = [dict(name=zp.name, size=zp.stat().st_size) for zp in zips]
for zp in zips:
zp['fmtd'] = sizeFmt(zp['size'])
js_zips = jsonv_dumps(zips, 'zips')
print(f'Rendering page {repr(name)}...', flush=True)
2019-12-30 07:33:01 +00:00
webmanifest = {
**webmanifest_template,
'short_name': name,
'name': name,
'description': description,
'start_url': f'./{page}.html',
}
js_webmanifest = jsonv_dumps(webmanifest, 'webmanifest')
2019-12-28 01:00:26 +00:00
rendered = template.render(
2019-12-28 17:12:53 +00:00
description=description,
keywords=keywords,
2019-12-28 01:00:26 +00:00
language=language,
name=name,
org_hierarchical=org_hierarchical,
org_linear=org_linear,
imgs_hierarchical=imgs_hierarchical,
imgs_thumb=imgs_thumb,
imgs_small=imgs_small,
imgs_dcent=imgs_dcent,
imgs_large=imgs_large,
imgs_full=imgs_full,
2019-12-28 14:10:02 +00:00
formats=img_formats,
dimens=img_dimens,
sizes=img_sizes,
2019-12-28 01:00:26 +00:00
artists=artists,
images=images,
social=social,
tags=tags,
2019-12-28 14:10:02 +00:00
zips=zips,
2019-12-30 07:33:01 +00:00
page=page,
2020-01-05 01:34:07 +00:00
mes=mes,
2019-12-29 23:47:53 +00:00
canonical_colors=canonical_colors,
color_repeated=color_repeated,
color_list=color_list,
bodyparts=bodyparts,
2019-12-30 07:33:01 +00:00
webmanifest=webmanifest,
tag_groups_sorted=tag_groups_sorted,
all_tags_by_group=all_tags_by_group,
tag_groups=tag_groups,
sorted_artists=sorted_artists,
tag_groups_nonempty_sorted=tag_groups_nonempty_sorted,
js_tag_groups_nonempty_sorted=js_tag_groups_nonempty_sorted,
js_sorted_artists=js_sorted_artists,
js_tag_groups_sorted=js_tag_groups_sorted,
js_all_tags_by_group=js_all_tags_by_group,
js_tag_groups=js_tag_groups,
2019-12-28 01:00:26 +00:00
js_org_hierarchical=js_org_hierarchical,
js_org_linear=js_org_linear,
js_imgs_hierarchical=js_imgs_hierarchical,
js_imgs_thumb=js_imgs_thumb,
js_imgs_small=js_imgs_small,
js_imgs_dcent=js_imgs_dcent,
js_imgs_large=js_imgs_large,
js_imgs_full=js_imgs_full,
2019-12-28 14:10:02 +00:00
js_formats=js_formats,
js_dimens=js_dimens,
js_sizes=js_sizes,
2019-12-28 01:00:26 +00:00
js_artists=js_artists,
js_images=js_images,
js_social=js_social,
2019-12-28 14:10:02 +00:00
js_tags=js_tags,
js_name=js_name,
2019-12-29 23:47:53 +00:00
js_zips=js_zips,
2020-01-05 01:34:07 +00:00
js_mes=js_mes,
2019-12-29 23:47:53 +00:00
js_canonical_colors=js_canonical_colors,
js_color_repeated=js_color_repeated,
js_color_list=js_color_list,
2019-12-30 07:33:01 +00:00
js_bodyparts=js_bodyparts,
js_webmanifest=js_webmanifest
2019-12-28 01:00:26 +00:00
)
2019-12-28 14:10:02 +00:00
minified = rendered
2019-12-28 16:55:57 +00:00
minified = htmlmin.minify(rendered, remove_empty_space=True)
2019-12-28 14:10:02 +00:00
output_directory.joinpath(f"{page}.html").write_text(minified)
2019-12-30 08:51:05 +00:00
output_directory.joinpath(f"{page}.json").write_text(
json_dumps(webmanifest))
2020-06-25 06:20:44 +00:00
if len(unused_images_files := (input_images_files.difference(used_images_files))) > 0:
print(f'Unused images: {sorted(list(unused_images_files))}')
2019-12-28 01:00:26 +00:00
print('All done!', flush=True)
def get_config_parser_from_file(file):
config = configparser.ConfigParser()
config.read(file)
return config
def printusage():
print("Usage:", file=sys.stderr)
print(
2020-06-25 20:31:58 +00:00
f" python{sys.version_info.major} -m {sys.argv[0]} <config.ini>",
2019-12-28 01:00:26 +00:00
file=sys.stderr
)
2019-12-29 23:47:53 +00:00
def determineLightOrDark(color): # True: light, False: dark
'''https://en.wikipedia.org/wiki/Grayscale#Luma_coding_in_video_systems'''
yr = 0.2126
yg = 0.7152
yb = 0.0722
if not (len(color) == 7 and color.startswith('#')):
raise ValueError(f'Color {repr(color)} should be in #RRGGBB format')
r = int(color[1:3], 16)
g = int(color[3:5], 16)
b = int(color[5:7], 16)
hsp = (
yr*r**2 +
yg*g**2 +
yb*b**2)**0.5
return hsp > 127
2019-12-28 14:10:02 +00:00
def zipFiles(out, *inputs):
if out.exists():
return out
zf = zipfile.ZipFile(
out,
mode='w',
compression=zipfile.ZIP_DEFLATED,
allowZip64=True,
compresslevel=9
)
for i, p in enumerate(inputs):
zf.writestr(
'%04d%s' % (i, p.suffix),
p.read_bytes(),
compress_type=zipfile.ZIP_DEFLATED,
compresslevel=9
)
zf.close()
return out
def sizeFmt(bytecount):
scale = ('B', 'KB', 'MB', 'GB', 'TB')
magnitude = 0
while bytecount > 2048:
magnitude += 1
bytecount /= 1024
return "%0.2f %s" % (bytecount, scale[magnitude])
2019-12-28 01:00:26 +00:00
def main():
if len(sys.argv) == 2:
configfile = Path(sys.argv[1])
if configfile.exists():
run(configfile)
else:
printusage()
else:
printusage()