extract to json and create def from json
This commit is contained in:
parent
2ee206c2d5
commit
44fed694e9
2 changed files with 100 additions and 107 deletions
|
@ -7,6 +7,7 @@ import struct
|
|||
from PIL import Image, ImageDraw
|
||||
from collections import defaultdict
|
||||
import os
|
||||
import json
|
||||
from common import crc24_func, font, sanitize_filename
|
||||
|
||||
def get_color(fname):
|
||||
|
@ -31,7 +32,6 @@ def extract_def(infile,outdir,shred=True):
|
|||
palette.extend((r,g,b))
|
||||
|
||||
offsets = defaultdict(list)
|
||||
k = 0 # for naming bogus filename entries
|
||||
for i in range(blocks):
|
||||
# bid - block id
|
||||
# entries - number of images in this block
|
||||
|
@ -41,23 +41,18 @@ def extract_def(infile,outdir,shred=True):
|
|||
# a list of 13 character long filenames
|
||||
for j in range(entries):
|
||||
name, = struct.unpack("13s", f.read(13))
|
||||
name = sanitize_filename(name)
|
||||
# if nothing remains, create bogus name
|
||||
if len(name) == 0:
|
||||
num = "%02d"%k
|
||||
if len(bn)+len(num) > 9: # truncate name
|
||||
name = bn[:9-len(num)]+num
|
||||
else:
|
||||
name = bn+num
|
||||
k+=1
|
||||
names.append(name)
|
||||
# a list of offsets
|
||||
for n in names:
|
||||
for j in range(entries):
|
||||
offs, = struct.unpack("<I", f.read(4))
|
||||
offsets[bid].append((n,offs))
|
||||
offsets[bid].append(offs)
|
||||
|
||||
os.mkdir(os.path.join(outdir,"%s.dir"%bn))
|
||||
|
||||
out_json = {"sequences":[],"type":t,"format":-1}
|
||||
|
||||
for bid,l in offsets.items():
|
||||
for j,(n,offs) in enumerate(l):
|
||||
frames=[]
|
||||
for j,offs in enumerate(l):
|
||||
f.seek(offs)
|
||||
pixeldata = ""
|
||||
# first entry is the size which is unused
|
||||
|
@ -66,10 +61,17 @@ def extract_def(infile,outdir,shred=True):
|
|||
# w,h - width and height, w must be a multiple of 16
|
||||
# lm,tm - left and top margin
|
||||
_,fmt,fw,fh,w,h,lm,tm = struct.unpack("<IIIIIIii", f.read(32))
|
||||
n = os.path.splitext(n)[0]
|
||||
outname = "%s"%outdir+os.sep+"%02d_%s_%02d_%02d_%s_%d.png"%(t,bn,bid,j,n,fmt)
|
||||
outname = os.path.join(outdir,"%s.dir"%bn,"%02d_%02d.png"%(bid,j))
|
||||
print "writing to %s"%outname
|
||||
|
||||
if out_json["format"] == -1:
|
||||
out_json["format"] = fmt
|
||||
elif out_json["format"] != fmt:
|
||||
print "format %d of this frame does not match of last frame %d"%(fmt,global_fmt)
|
||||
return False
|
||||
|
||||
frames.append(os.path.join("%s.dir"%bn,"%02d_%02d.png"%(bid,j)))
|
||||
|
||||
if w != 0 and h != 0:
|
||||
if fmt == 0:
|
||||
pixeldata = f.read(w*h)
|
||||
|
@ -135,13 +137,8 @@ def extract_def(infile,outdir,shred=True):
|
|||
return False
|
||||
im = Image.fromstring('P', (w,h),pixeldata)
|
||||
else: # either width or height is zero
|
||||
if w == 0:
|
||||
w = 1
|
||||
if h == 0:
|
||||
h = 1
|
||||
# TODO: encode this information correctly and dont create a fake 1px image
|
||||
im = Image.new('P', (w,h))
|
||||
if shred:
|
||||
im = None
|
||||
if im and shred:
|
||||
#im = Image.new("P", (w*3,h*3), get_color(bn))
|
||||
#draw = ImageDraw.Draw(im)
|
||||
#tw,th = draw.textsize("%d%s"%(j,bn),font=font)
|
||||
|
@ -162,11 +159,15 @@ def extract_def(infile,outdir,shred=True):
|
|||
im = Image.fromarray(pixels)
|
||||
imo = Image.new('P', (fw,fh))
|
||||
imo.putpalette(palette)
|
||||
if im:
|
||||
imo.paste(im,(lm,tm))
|
||||
#draw = ImageDraw.Draw(imo)
|
||||
#tw,th = draw.textsize(bn,font=font)
|
||||
#draw.text(((fw-tw)/2,(fh-th)/2),bn,255,font=font)
|
||||
imo.save(outname)
|
||||
out_json["sequences"].append({"group":bid,"frames":frames})
|
||||
with open(os.path.join(outdir,"%s.json"%bn),"w+") as o:
|
||||
json.dump(out_json,o,indent=4)
|
||||
return True
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
|
114
makedef.py
114
makedef.py
|
@ -1,14 +1,16 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
import os
|
||||
import re
|
||||
import struct
|
||||
import json
|
||||
from collections import defaultdict
|
||||
from PIL import Image
|
||||
ushrtmax = (1<<16)-1
|
||||
|
||||
def encode0(im):
|
||||
return ''.join([chr(i) for i in list(im.getdata())])
|
||||
data = ''.join([chr(i) for i in list(im.getdata())])
|
||||
size = len(data)
|
||||
return data,size
|
||||
|
||||
# greedy RLE
|
||||
# for each pixel, test which encoding manages to encode most data, then apply
|
||||
|
@ -16,7 +18,7 @@ def encode0(im):
|
|||
def encode1(im):
|
||||
pixels = im.load()
|
||||
w,h = im.size
|
||||
result = []
|
||||
data = []
|
||||
# these function return a tuple of the compressed string and the amount of
|
||||
# pixels compressed
|
||||
def rle_comp(x,y):
|
||||
|
@ -56,8 +58,10 @@ def encode1(im):
|
|||
else:
|
||||
r += rawc
|
||||
x += rawl
|
||||
result.append(r)
|
||||
return result
|
||||
data.append(r)
|
||||
# 4*height bytes for lineoffsets
|
||||
size = 4*h+sum([len(d) for d in data])
|
||||
return data,size
|
||||
|
||||
def encode23chunk(s,e,pixels,y):
|
||||
r = ''
|
||||
|
@ -115,38 +119,52 @@ def encode23chunk(s,e,pixels,y):
|
|||
def encode2(im):
|
||||
pixels = im.load()
|
||||
w,h = im.size
|
||||
result = []
|
||||
data = []
|
||||
for y in range(h):
|
||||
result.append(encode23chunk(0,w,pixels,y))
|
||||
return result
|
||||
data.append(encode23chunk(0,w,pixels,y))
|
||||
# 2*height bytes for lineoffsets plus two unknown bytes
|
||||
size = 2*h+2+sum(len(d) for d in data)
|
||||
return data,size
|
||||
|
||||
# this is like encode2 but limited to only encoding blocks of 32 pixels at a time
|
||||
def encode3(im):
|
||||
pixels = im.load()
|
||||
w,h = im.size
|
||||
result = []
|
||||
data = []
|
||||
for y in range(h):
|
||||
res = []
|
||||
# encode each row in 32 pixel blocks
|
||||
for i in range(w/32):
|
||||
res.append(encode23chunk(i*32, (i+1)*32, pixels, y))
|
||||
result.append(res)
|
||||
return result
|
||||
data.append(res)
|
||||
# width/16 bytes per line as offset header
|
||||
size = (w/16)*h+sum(sum([len(e) for e in d]) for d in data)
|
||||
return data,size
|
||||
|
||||
fmtencoders = [encode0,encode1,encode2,encode3]
|
||||
|
||||
def makedef(indir, outdir):
|
||||
def makedef(infile, outdir):
|
||||
infiles = defaultdict(list)
|
||||
sig = None
|
||||
|
||||
with open(infile) as f:
|
||||
in_json = json.load(f)
|
||||
|
||||
t = in_json["type"]
|
||||
fmt = in_json["format"]
|
||||
p = os.path.basename(infile)
|
||||
p = os.path.splitext(p)[0].lower()
|
||||
d = os.path.dirname(infile)
|
||||
|
||||
# sanity checks and fill infiles dict
|
||||
for f in os.listdir(indir):
|
||||
m = re.match('(\d+)_([a-z0-9_]+)_(\d+)_(\d+)_([A-Za-z0-9_]+)_([0-3]).png', f)
|
||||
if not m:
|
||||
continue
|
||||
t,p,bid,j,fn,fmt = m.groups()
|
||||
t,bid,j,fmt = int(t),int(bid),int(j),int(fmt)
|
||||
im = Image.open(os.sep.join([indir,f]))
|
||||
for seq in in_json["sequences"]:
|
||||
bid = seq["group"]
|
||||
for f in seq["frames"]:
|
||||
im = Image.open(os.path.join(d,f))
|
||||
fw,fh = im.size
|
||||
if fmt == 2 and (fw != 32 or fh != 32):
|
||||
print "format 2 must have width and height 32"
|
||||
return False
|
||||
lm,tm,rm,bm = im.getbbox() or (0,0,0,0)
|
||||
# format 3 has to have width and lm divisible by 32
|
||||
if fmt == 3 and lm%32 != 0:
|
||||
|
@ -161,7 +179,7 @@ def makedef(indir, outdir):
|
|||
if im.mode != 'P':
|
||||
print "input images must have a palette"
|
||||
return False
|
||||
cursig =(t,p,fw,fh,im.getpalette(),fmt)
|
||||
cursig =(fw,fh,im.getpalette())
|
||||
if not sig:
|
||||
sig = cursig
|
||||
else:
|
||||
|
@ -170,24 +188,14 @@ def makedef(indir, outdir):
|
|||
print sig
|
||||
print cursig
|
||||
return False
|
||||
if len(fn) > 9:
|
||||
print "filename can't be longer than 9 bytes"
|
||||
return False
|
||||
data = fmtencoders[fmt](im)
|
||||
infiles[bid].append((im,t,p,j,fn,lm,tm,fmt,data))
|
||||
data,size = fmtencoders[fmt](im)
|
||||
infiles[bid].append((w,h,lm,tm,data,size))
|
||||
|
||||
if len(infiles) == 0:
|
||||
print "no input files detected"
|
||||
return False
|
||||
|
||||
# check if j values for all bids are correct and sort them in j order in the process
|
||||
for bid in infiles:
|
||||
infiles[bid].sort(key=lambda t: t[3])
|
||||
for k,(_,_,_,j,_,_,_,_,_) in enumerate(infiles[bid]):
|
||||
if k != j:
|
||||
print "incorrect j value %d for bid %d should be %d"%(j,bid,k)
|
||||
|
||||
t,p,fw,fh,pal,fmt = cursig
|
||||
fw,fh,pal = cursig
|
||||
outname = os.path.join(outdir,p)+".def"
|
||||
print "writing to %s"%outname
|
||||
outf = open(outname, "w+")
|
||||
|
@ -209,41 +217,27 @@ def makedef(indir, outdir):
|
|||
# the last two values have unknown meaning
|
||||
outf.write(struct.pack("<IIII",bid,len(l),0,0))
|
||||
# write filenames
|
||||
for _,_,_,_,fn,_,_,_,_ in l:
|
||||
outf.write(struct.pack("13s", fn+".pcx"))
|
||||
for i,_ in enumerate(l):
|
||||
fn = "%02d_%03d.pcx"%(bid,i)
|
||||
outf.write(struct.pack("13s", fn))
|
||||
# write data offsets
|
||||
for im,_,_,_,_,_,_,fmt,data in l:
|
||||
for w,h,_,_,data,size in l:
|
||||
outf.write(struct.pack("<I",curoffset))
|
||||
w,h = im.size
|
||||
# every image occupies size depending on its format plus 32 byte header
|
||||
if fmt == 0:
|
||||
curoffset += 32+len(data)
|
||||
elif fmt == 1:
|
||||
# 4*height bytes for lineoffsets
|
||||
curoffset += 32+4*h+sum(len(d) for d in data)
|
||||
elif fmt == 2:
|
||||
# 2*height bytes for lineoffsets plus two unknown bytes
|
||||
curoffset += 32+2*h+2+sum(len(d) for d in data)
|
||||
elif fmt == 3:
|
||||
# width/16 bytes per line as offset header
|
||||
curoffset += 32+(w/16)*h+sum(sum([len(e) for e in d]) for d in data)
|
||||
curoffset += 32+size
|
||||
|
||||
for bid,l in infiles.items():
|
||||
for im,_,p,j,_,lm,tm,fmt,data in l:
|
||||
w,h = im.size
|
||||
for w,h,lm,tm,data,size in l:
|
||||
# size
|
||||
# format
|
||||
# full width and full height
|
||||
# width and height
|
||||
# left and top margin
|
||||
if fmt == 0:
|
||||
s = len(data)
|
||||
outf.write(struct.pack("<IIIIIIii",s,fmt,fw,fh,w,h,lm,tm))
|
||||
buf = ''.join([chr(i) for i in list(im.getdata())])
|
||||
outf.write(buf)
|
||||
outf.write(struct.pack("<IIIIIIii",size,fmt,fw,fh,w,h,lm,tm))
|
||||
outf.write(data)
|
||||
elif fmt == 1:
|
||||
s = 4*h+sum(len(d) for d in data)
|
||||
outf.write(struct.pack("<IIIIIIii",s,fmt,fw,fh,w,h,lm,tm))
|
||||
outf.write(struct.pack("<IIIIIIii",size,fmt,fw,fh,w,h,lm,tm))
|
||||
lineoffs = []
|
||||
acc = 4*h
|
||||
for d in data:
|
||||
|
@ -253,8 +247,7 @@ def makedef(indir, outdir):
|
|||
for i in data:
|
||||
outf.write(i)
|
||||
elif fmt == 2:
|
||||
s = 2*h+2+sum(len(d) for d in data)
|
||||
outf.write(struct.pack("<IIIIIIii",s,fmt,fw,fh,w,h,lm,tm))
|
||||
outf.write(struct.pack("<IIIIIIii",size,fmt,fw,fh,w,h,lm,tm))
|
||||
lineoffs = []
|
||||
acc = 0
|
||||
for d in data:
|
||||
|
@ -269,8 +262,7 @@ def makedef(indir, outdir):
|
|||
for i in data:
|
||||
outf.write(i)
|
||||
elif fmt == 3:
|
||||
s = (w/16)*h+sum(sum([len(e) for e in d]) for d in data)
|
||||
outf.write(struct.pack("<IIIIIIii",s,fmt,fw,fh,w,h,lm,tm))
|
||||
outf.write(struct.pack("<IIIIIIii",size,fmt,fw,fh,w,h,lm,tm))
|
||||
# store the offsets for all 32 pixel blocks
|
||||
acc = 0
|
||||
lineoffs = []
|
||||
|
@ -291,7 +283,7 @@ def makedef(indir, outdir):
|
|||
if __name__ == '__main__':
|
||||
import sys
|
||||
if len(sys.argv) != 3:
|
||||
print "usage: %s indir outdir"%sys.argv[0]
|
||||
print "usage: %s infile.json outdir"%sys.argv[0]
|
||||
exit(1)
|
||||
ret = makedef(sys.argv[1], sys.argv[2])
|
||||
exit(0 if ret else 1)
|
||||
|
|
Loading…
Reference in a new issue