extract to json and create def from json
This commit is contained in:
parent
2ee206c2d5
commit
44fed694e9
2 changed files with 100 additions and 107 deletions
|
@ -7,6 +7,7 @@ import struct
|
||||||
from PIL import Image, ImageDraw
|
from PIL import Image, ImageDraw
|
||||||
from collections import defaultdict
|
from collections import defaultdict
|
||||||
import os
|
import os
|
||||||
|
import json
|
||||||
from common import crc24_func, font, sanitize_filename
|
from common import crc24_func, font, sanitize_filename
|
||||||
|
|
||||||
def get_color(fname):
|
def get_color(fname):
|
||||||
|
@ -31,7 +32,6 @@ def extract_def(infile,outdir,shred=True):
|
||||||
palette.extend((r,g,b))
|
palette.extend((r,g,b))
|
||||||
|
|
||||||
offsets = defaultdict(list)
|
offsets = defaultdict(list)
|
||||||
k = 0 # for naming bogus filename entries
|
|
||||||
for i in range(blocks):
|
for i in range(blocks):
|
||||||
# bid - block id
|
# bid - block id
|
||||||
# entries - number of images in this block
|
# entries - number of images in this block
|
||||||
|
@ -41,23 +41,18 @@ def extract_def(infile,outdir,shred=True):
|
||||||
# a list of 13 character long filenames
|
# a list of 13 character long filenames
|
||||||
for j in range(entries):
|
for j in range(entries):
|
||||||
name, = struct.unpack("13s", f.read(13))
|
name, = struct.unpack("13s", f.read(13))
|
||||||
name = sanitize_filename(name)
|
|
||||||
# if nothing remains, create bogus name
|
|
||||||
if len(name) == 0:
|
|
||||||
num = "%02d"%k
|
|
||||||
if len(bn)+len(num) > 9: # truncate name
|
|
||||||
name = bn[:9-len(num)]+num
|
|
||||||
else:
|
|
||||||
name = bn+num
|
|
||||||
k+=1
|
|
||||||
names.append(name)
|
|
||||||
# a list of offsets
|
# a list of offsets
|
||||||
for n in names:
|
for j in range(entries):
|
||||||
offs, = struct.unpack("<I", f.read(4))
|
offs, = struct.unpack("<I", f.read(4))
|
||||||
offsets[bid].append((n,offs))
|
offsets[bid].append(offs)
|
||||||
|
|
||||||
|
os.mkdir(os.path.join(outdir,"%s.dir"%bn))
|
||||||
|
|
||||||
|
out_json = {"sequences":[],"type":t,"format":-1}
|
||||||
|
|
||||||
for bid,l in offsets.items():
|
for bid,l in offsets.items():
|
||||||
for j,(n,offs) in enumerate(l):
|
frames=[]
|
||||||
|
for j,offs in enumerate(l):
|
||||||
f.seek(offs)
|
f.seek(offs)
|
||||||
pixeldata = ""
|
pixeldata = ""
|
||||||
# first entry is the size which is unused
|
# first entry is the size which is unused
|
||||||
|
@ -66,10 +61,17 @@ def extract_def(infile,outdir,shred=True):
|
||||||
# w,h - width and height, w must be a multiple of 16
|
# w,h - width and height, w must be a multiple of 16
|
||||||
# lm,tm - left and top margin
|
# lm,tm - left and top margin
|
||||||
_,fmt,fw,fh,w,h,lm,tm = struct.unpack("<IIIIIIii", f.read(32))
|
_,fmt,fw,fh,w,h,lm,tm = struct.unpack("<IIIIIIii", f.read(32))
|
||||||
n = os.path.splitext(n)[0]
|
outname = os.path.join(outdir,"%s.dir"%bn,"%02d_%02d.png"%(bid,j))
|
||||||
outname = "%s"%outdir+os.sep+"%02d_%s_%02d_%02d_%s_%d.png"%(t,bn,bid,j,n,fmt)
|
|
||||||
print "writing to %s"%outname
|
print "writing to %s"%outname
|
||||||
|
|
||||||
|
if out_json["format"] == -1:
|
||||||
|
out_json["format"] = fmt
|
||||||
|
elif out_json["format"] != fmt:
|
||||||
|
print "format %d of this frame does not match of last frame %d"%(fmt,global_fmt)
|
||||||
|
return False
|
||||||
|
|
||||||
|
frames.append(os.path.join("%s.dir"%bn,"%02d_%02d.png"%(bid,j)))
|
||||||
|
|
||||||
if w != 0 and h != 0:
|
if w != 0 and h != 0:
|
||||||
if fmt == 0:
|
if fmt == 0:
|
||||||
pixeldata = f.read(w*h)
|
pixeldata = f.read(w*h)
|
||||||
|
@ -135,13 +137,8 @@ def extract_def(infile,outdir,shred=True):
|
||||||
return False
|
return False
|
||||||
im = Image.fromstring('P', (w,h),pixeldata)
|
im = Image.fromstring('P', (w,h),pixeldata)
|
||||||
else: # either width or height is zero
|
else: # either width or height is zero
|
||||||
if w == 0:
|
im = None
|
||||||
w = 1
|
if im and shred:
|
||||||
if h == 0:
|
|
||||||
h = 1
|
|
||||||
# TODO: encode this information correctly and dont create a fake 1px image
|
|
||||||
im = Image.new('P', (w,h))
|
|
||||||
if shred:
|
|
||||||
#im = Image.new("P", (w*3,h*3), get_color(bn))
|
#im = Image.new("P", (w*3,h*3), get_color(bn))
|
||||||
#draw = ImageDraw.Draw(im)
|
#draw = ImageDraw.Draw(im)
|
||||||
#tw,th = draw.textsize("%d%s"%(j,bn),font=font)
|
#tw,th = draw.textsize("%d%s"%(j,bn),font=font)
|
||||||
|
@ -162,11 +159,15 @@ def extract_def(infile,outdir,shred=True):
|
||||||
im = Image.fromarray(pixels)
|
im = Image.fromarray(pixels)
|
||||||
imo = Image.new('P', (fw,fh))
|
imo = Image.new('P', (fw,fh))
|
||||||
imo.putpalette(palette)
|
imo.putpalette(palette)
|
||||||
|
if im:
|
||||||
imo.paste(im,(lm,tm))
|
imo.paste(im,(lm,tm))
|
||||||
#draw = ImageDraw.Draw(imo)
|
#draw = ImageDraw.Draw(imo)
|
||||||
#tw,th = draw.textsize(bn,font=font)
|
#tw,th = draw.textsize(bn,font=font)
|
||||||
#draw.text(((fw-tw)/2,(fh-th)/2),bn,255,font=font)
|
#draw.text(((fw-tw)/2,(fh-th)/2),bn,255,font=font)
|
||||||
imo.save(outname)
|
imo.save(outname)
|
||||||
|
out_json["sequences"].append({"group":bid,"frames":frames})
|
||||||
|
with open(os.path.join(outdir,"%s.json"%bn),"w+") as o:
|
||||||
|
json.dump(out_json,o,indent=4)
|
||||||
return True
|
return True
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|
114
makedef.py
114
makedef.py
|
@ -1,14 +1,16 @@
|
||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import re
|
|
||||||
import struct
|
import struct
|
||||||
|
import json
|
||||||
from collections import defaultdict
|
from collections import defaultdict
|
||||||
from PIL import Image
|
from PIL import Image
|
||||||
ushrtmax = (1<<16)-1
|
ushrtmax = (1<<16)-1
|
||||||
|
|
||||||
def encode0(im):
|
def encode0(im):
|
||||||
return ''.join([chr(i) for i in list(im.getdata())])
|
data = ''.join([chr(i) for i in list(im.getdata())])
|
||||||
|
size = len(data)
|
||||||
|
return data,size
|
||||||
|
|
||||||
# greedy RLE
|
# greedy RLE
|
||||||
# for each pixel, test which encoding manages to encode most data, then apply
|
# for each pixel, test which encoding manages to encode most data, then apply
|
||||||
|
@ -16,7 +18,7 @@ def encode0(im):
|
||||||
def encode1(im):
|
def encode1(im):
|
||||||
pixels = im.load()
|
pixels = im.load()
|
||||||
w,h = im.size
|
w,h = im.size
|
||||||
result = []
|
data = []
|
||||||
# these function return a tuple of the compressed string and the amount of
|
# these function return a tuple of the compressed string and the amount of
|
||||||
# pixels compressed
|
# pixels compressed
|
||||||
def rle_comp(x,y):
|
def rle_comp(x,y):
|
||||||
|
@ -56,8 +58,10 @@ def encode1(im):
|
||||||
else:
|
else:
|
||||||
r += rawc
|
r += rawc
|
||||||
x += rawl
|
x += rawl
|
||||||
result.append(r)
|
data.append(r)
|
||||||
return result
|
# 4*height bytes for lineoffsets
|
||||||
|
size = 4*h+sum([len(d) for d in data])
|
||||||
|
return data,size
|
||||||
|
|
||||||
def encode23chunk(s,e,pixels,y):
|
def encode23chunk(s,e,pixels,y):
|
||||||
r = ''
|
r = ''
|
||||||
|
@ -115,38 +119,52 @@ def encode23chunk(s,e,pixels,y):
|
||||||
def encode2(im):
|
def encode2(im):
|
||||||
pixels = im.load()
|
pixels = im.load()
|
||||||
w,h = im.size
|
w,h = im.size
|
||||||
result = []
|
data = []
|
||||||
for y in range(h):
|
for y in range(h):
|
||||||
result.append(encode23chunk(0,w,pixels,y))
|
data.append(encode23chunk(0,w,pixels,y))
|
||||||
return result
|
# 2*height bytes for lineoffsets plus two unknown bytes
|
||||||
|
size = 2*h+2+sum(len(d) for d in data)
|
||||||
|
return data,size
|
||||||
|
|
||||||
# this is like encode2 but limited to only encoding blocks of 32 pixels at a time
|
# this is like encode2 but limited to only encoding blocks of 32 pixels at a time
|
||||||
def encode3(im):
|
def encode3(im):
|
||||||
pixels = im.load()
|
pixels = im.load()
|
||||||
w,h = im.size
|
w,h = im.size
|
||||||
result = []
|
data = []
|
||||||
for y in range(h):
|
for y in range(h):
|
||||||
res = []
|
res = []
|
||||||
# encode each row in 32 pixel blocks
|
# encode each row in 32 pixel blocks
|
||||||
for i in range(w/32):
|
for i in range(w/32):
|
||||||
res.append(encode23chunk(i*32, (i+1)*32, pixels, y))
|
res.append(encode23chunk(i*32, (i+1)*32, pixels, y))
|
||||||
result.append(res)
|
data.append(res)
|
||||||
return result
|
# width/16 bytes per line as offset header
|
||||||
|
size = (w/16)*h+sum(sum([len(e) for e in d]) for d in data)
|
||||||
|
return data,size
|
||||||
|
|
||||||
fmtencoders = [encode0,encode1,encode2,encode3]
|
fmtencoders = [encode0,encode1,encode2,encode3]
|
||||||
|
|
||||||
def makedef(indir, outdir):
|
def makedef(infile, outdir):
|
||||||
infiles = defaultdict(list)
|
infiles = defaultdict(list)
|
||||||
sig = None
|
sig = None
|
||||||
|
|
||||||
|
with open(infile) as f:
|
||||||
|
in_json = json.load(f)
|
||||||
|
|
||||||
|
t = in_json["type"]
|
||||||
|
fmt = in_json["format"]
|
||||||
|
p = os.path.basename(infile)
|
||||||
|
p = os.path.splitext(p)[0].lower()
|
||||||
|
d = os.path.dirname(infile)
|
||||||
|
|
||||||
# sanity checks and fill infiles dict
|
# sanity checks and fill infiles dict
|
||||||
for f in os.listdir(indir):
|
for seq in in_json["sequences"]:
|
||||||
m = re.match('(\d+)_([a-z0-9_]+)_(\d+)_(\d+)_([A-Za-z0-9_]+)_([0-3]).png', f)
|
bid = seq["group"]
|
||||||
if not m:
|
for f in seq["frames"]:
|
||||||
continue
|
im = Image.open(os.path.join(d,f))
|
||||||
t,p,bid,j,fn,fmt = m.groups()
|
|
||||||
t,bid,j,fmt = int(t),int(bid),int(j),int(fmt)
|
|
||||||
im = Image.open(os.sep.join([indir,f]))
|
|
||||||
fw,fh = im.size
|
fw,fh = im.size
|
||||||
|
if fmt == 2 and (fw != 32 or fh != 32):
|
||||||
|
print "format 2 must have width and height 32"
|
||||||
|
return False
|
||||||
lm,tm,rm,bm = im.getbbox() or (0,0,0,0)
|
lm,tm,rm,bm = im.getbbox() or (0,0,0,0)
|
||||||
# format 3 has to have width and lm divisible by 32
|
# format 3 has to have width and lm divisible by 32
|
||||||
if fmt == 3 and lm%32 != 0:
|
if fmt == 3 and lm%32 != 0:
|
||||||
|
@ -161,7 +179,7 @@ def makedef(indir, outdir):
|
||||||
if im.mode != 'P':
|
if im.mode != 'P':
|
||||||
print "input images must have a palette"
|
print "input images must have a palette"
|
||||||
return False
|
return False
|
||||||
cursig =(t,p,fw,fh,im.getpalette(),fmt)
|
cursig =(fw,fh,im.getpalette())
|
||||||
if not sig:
|
if not sig:
|
||||||
sig = cursig
|
sig = cursig
|
||||||
else:
|
else:
|
||||||
|
@ -170,24 +188,14 @@ def makedef(indir, outdir):
|
||||||
print sig
|
print sig
|
||||||
print cursig
|
print cursig
|
||||||
return False
|
return False
|
||||||
if len(fn) > 9:
|
data,size = fmtencoders[fmt](im)
|
||||||
print "filename can't be longer than 9 bytes"
|
infiles[bid].append((w,h,lm,tm,data,size))
|
||||||
return False
|
|
||||||
data = fmtencoders[fmt](im)
|
|
||||||
infiles[bid].append((im,t,p,j,fn,lm,tm,fmt,data))
|
|
||||||
|
|
||||||
if len(infiles) == 0:
|
if len(infiles) == 0:
|
||||||
print "no input files detected"
|
print "no input files detected"
|
||||||
return False
|
return False
|
||||||
|
|
||||||
# check if j values for all bids are correct and sort them in j order in the process
|
fw,fh,pal = cursig
|
||||||
for bid in infiles:
|
|
||||||
infiles[bid].sort(key=lambda t: t[3])
|
|
||||||
for k,(_,_,_,j,_,_,_,_,_) in enumerate(infiles[bid]):
|
|
||||||
if k != j:
|
|
||||||
print "incorrect j value %d for bid %d should be %d"%(j,bid,k)
|
|
||||||
|
|
||||||
t,p,fw,fh,pal,fmt = cursig
|
|
||||||
outname = os.path.join(outdir,p)+".def"
|
outname = os.path.join(outdir,p)+".def"
|
||||||
print "writing to %s"%outname
|
print "writing to %s"%outname
|
||||||
outf = open(outname, "w+")
|
outf = open(outname, "w+")
|
||||||
|
@ -209,41 +217,27 @@ def makedef(indir, outdir):
|
||||||
# the last two values have unknown meaning
|
# the last two values have unknown meaning
|
||||||
outf.write(struct.pack("<IIII",bid,len(l),0,0))
|
outf.write(struct.pack("<IIII",bid,len(l),0,0))
|
||||||
# write filenames
|
# write filenames
|
||||||
for _,_,_,_,fn,_,_,_,_ in l:
|
for i,_ in enumerate(l):
|
||||||
outf.write(struct.pack("13s", fn+".pcx"))
|
fn = "%02d_%03d.pcx"%(bid,i)
|
||||||
|
outf.write(struct.pack("13s", fn))
|
||||||
# write data offsets
|
# write data offsets
|
||||||
for im,_,_,_,_,_,_,fmt,data in l:
|
for w,h,_,_,data,size in l:
|
||||||
outf.write(struct.pack("<I",curoffset))
|
outf.write(struct.pack("<I",curoffset))
|
||||||
w,h = im.size
|
|
||||||
# every image occupies size depending on its format plus 32 byte header
|
# every image occupies size depending on its format plus 32 byte header
|
||||||
if fmt == 0:
|
curoffset += 32+size
|
||||||
curoffset += 32+len(data)
|
|
||||||
elif fmt == 1:
|
|
||||||
# 4*height bytes for lineoffsets
|
|
||||||
curoffset += 32+4*h+sum(len(d) for d in data)
|
|
||||||
elif fmt == 2:
|
|
||||||
# 2*height bytes for lineoffsets plus two unknown bytes
|
|
||||||
curoffset += 32+2*h+2+sum(len(d) for d in data)
|
|
||||||
elif fmt == 3:
|
|
||||||
# width/16 bytes per line as offset header
|
|
||||||
curoffset += 32+(w/16)*h+sum(sum([len(e) for e in d]) for d in data)
|
|
||||||
|
|
||||||
for bid,l in infiles.items():
|
for bid,l in infiles.items():
|
||||||
for im,_,p,j,_,lm,tm,fmt,data in l:
|
for w,h,lm,tm,data,size in l:
|
||||||
w,h = im.size
|
|
||||||
# size
|
# size
|
||||||
# format
|
# format
|
||||||
# full width and full height
|
# full width and full height
|
||||||
# width and height
|
# width and height
|
||||||
# left and top margin
|
# left and top margin
|
||||||
if fmt == 0:
|
if fmt == 0:
|
||||||
s = len(data)
|
outf.write(struct.pack("<IIIIIIii",size,fmt,fw,fh,w,h,lm,tm))
|
||||||
outf.write(struct.pack("<IIIIIIii",s,fmt,fw,fh,w,h,lm,tm))
|
outf.write(data)
|
||||||
buf = ''.join([chr(i) for i in list(im.getdata())])
|
|
||||||
outf.write(buf)
|
|
||||||
elif fmt == 1:
|
elif fmt == 1:
|
||||||
s = 4*h+sum(len(d) for d in data)
|
outf.write(struct.pack("<IIIIIIii",size,fmt,fw,fh,w,h,lm,tm))
|
||||||
outf.write(struct.pack("<IIIIIIii",s,fmt,fw,fh,w,h,lm,tm))
|
|
||||||
lineoffs = []
|
lineoffs = []
|
||||||
acc = 4*h
|
acc = 4*h
|
||||||
for d in data:
|
for d in data:
|
||||||
|
@ -253,8 +247,7 @@ def makedef(indir, outdir):
|
||||||
for i in data:
|
for i in data:
|
||||||
outf.write(i)
|
outf.write(i)
|
||||||
elif fmt == 2:
|
elif fmt == 2:
|
||||||
s = 2*h+2+sum(len(d) for d in data)
|
outf.write(struct.pack("<IIIIIIii",size,fmt,fw,fh,w,h,lm,tm))
|
||||||
outf.write(struct.pack("<IIIIIIii",s,fmt,fw,fh,w,h,lm,tm))
|
|
||||||
lineoffs = []
|
lineoffs = []
|
||||||
acc = 0
|
acc = 0
|
||||||
for d in data:
|
for d in data:
|
||||||
|
@ -269,8 +262,7 @@ def makedef(indir, outdir):
|
||||||
for i in data:
|
for i in data:
|
||||||
outf.write(i)
|
outf.write(i)
|
||||||
elif fmt == 3:
|
elif fmt == 3:
|
||||||
s = (w/16)*h+sum(sum([len(e) for e in d]) for d in data)
|
outf.write(struct.pack("<IIIIIIii",size,fmt,fw,fh,w,h,lm,tm))
|
||||||
outf.write(struct.pack("<IIIIIIii",s,fmt,fw,fh,w,h,lm,tm))
|
|
||||||
# store the offsets for all 32 pixel blocks
|
# store the offsets for all 32 pixel blocks
|
||||||
acc = 0
|
acc = 0
|
||||||
lineoffs = []
|
lineoffs = []
|
||||||
|
@ -291,7 +283,7 @@ def makedef(indir, outdir):
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
import sys
|
import sys
|
||||||
if len(sys.argv) != 3:
|
if len(sys.argv) != 3:
|
||||||
print "usage: %s indir outdir"%sys.argv[0]
|
print "usage: %s infile.json outdir"%sys.argv[0]
|
||||||
exit(1)
|
exit(1)
|
||||||
ret = makedef(sys.argv[1], sys.argv[2])
|
ret = makedef(sys.argv[1], sys.argv[2])
|
||||||
exit(0 if ret else 1)
|
exit(0 if ret else 1)
|
||||||
|
|
Loading…
Reference in a new issue