Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import bpy
- import bpy.props
- import bpy_extras
- import mathutils
- from mathutils import Vector, Matrix
- from bpy_extras.io_utils import ImportHelper, orientation_helper_factory, axis_conversion, _check_axis_conversion
- from bpy.props import *
- import bmesh
- import re
- import glob
- import os
- import hashlib
- from struct import *
- bl_info = {
- "name": "Ninja Ripper mesh data (.rip)",
- "author": "Alexander Gavrilov",
- "version": (0, 2),
- "blender": (2, 77, 0),
- "location": "File > Import-Export > Ninja Ripper (.rip) ",
- "description": "Import Ninja Ripper mesh data",
- "warning": "",
- "category": "Import-Export",
- }
- def read_uint(fh):
- return unpack('I', fh.read(4))[0]
- def read_string(fh):
- str = b''
- while True:
- c = fh.read(1)
- if c == b'\0' or c == b'':
- return str.decode('cp437')
- else:
- str = str + c
- def concat_attrs(datalists):
- result = []
- for i in range(len(datalists[0])):
- data = []
- for l in datalists:
- data.extend(l[i])
- result.append(data)
- return result
- class RipLogInfo(object):
- def __init__(self):
- self.log_file_cache = {}
- def verify_texture_match(self, basename, stages, texlist):
- if len(stages.keys()) != len(texlist):
- print('Texture count mismatch vs log for %s: %d vs %d' %
- (basename, len(stages.keys()), len(texlist)))
- return False
- for i,key in enumerate(stages.keys()):
- if texlist[i].lower() != stages[key].lower():
- print('Texture name mismatch vs log for %s[%d]: %s vs %s' %
- (basename, i, stages[key], texlist[i]))
- return False
- return True
- def get_texture_stages(self, filename, texlist):
- dirname, basename = os.path.split(os.path.realpath(filename))
- if dirname == '' or basename == '':
- return None
- logdir, subdir = os.path.split(dirname)
- if logdir == '' or subdir == '':
- return None
- logkey = logdir.lower()
- if logkey not in self.log_file_cache:
- self.log_file_cache[logkey] = self.parse_log(logdir)
- logtable = self.log_file_cache[logkey]
- filetable = logtable.get(subdir.lower(), {})
- stages = filetable.get(basename.lower(), None)
- if stages and self.verify_texture_match(filename, stages, texlist):
- return stages
- else:
- return None
- def find_log(self, logdir):
- if not os.path.isdir(logdir):
- return None
- for file in os.listdir(logdir):
- if file.lower().endswith(".exe.log.txt"):
- return os.path.join(logdir, file)
- return None
- def parse_log(self, logdir):
- logpath = self.find_log(logdir)
- if not logpath:
- return {}
- fh = open(logpath, "rt", encoding='cp437')
- try:
- stage_pattern = re.compile(r'^\S+\s+\S+\s+Texture stage #(\d+)\s.*\\([^\\]+)\\(Tex_\d+_\d+\.dds)\s*$')
- mesh_pattern = re.compile(r'^\S+\s+\S+\s+Mesh saved as:.*\\([^\\]+)\\(Mesh_\d+\.rip)\s*$')
- logtable = {}
- stage_accum = {}
- for line in fh:
- match = mesh_pattern.fullmatch(line)
- if match:
- subdir = match.group(1).lower()
- if subdir not in logtable:
- logtable[subdir] = {}
- logtable[subdir][match.group(2).lower()] = stage_accum
- stage_accum = {}
- else:
- match = stage_pattern.fullmatch(line)
- if match:
- stage_accum[int(match.group(1))] = match.group(3)
- return logtable
- finally:
- fh.close()
- class HLSLShaderInfo(object):
- def __init__(self, fname):
- self.filename = fname
- self.lines = []
- self.version = None
- self.used_attrs = {}
- self.used_samplers = {}
- def parse_file(self):
- fh = open(self.filename, "rt", encoding='cp437')
- try:
- comment_pattern = re.compile('//|#')
- split_pattern = re.compile('^\s*(\S+)(?:\s+(\S|\S.*\S))?\s*$')
- for line in fh:
- m = comment_pattern.search(line)
- if m:
- line = line[0:m.start()]
- m = split_pattern.fullmatch(line.lower())
- if not m:
- continue
- cmd = [m.group(1)]
- if m.group(2):
- cmd.extend(map(lambda s: s.strip(), m.group(2).split(',')))
- self.lines.append(cmd)
- # Check valid version string:
- if len(self.lines) == 0 or not re.fullmatch('[pv]s_\d+_\d+', self.lines[0][0]):
- return False
- self.version = self.lines[0][0]
- # Scan for use declarations
- declname_pattern = re.compile('dcl_([a-z]+)(?:([0-9]+).*|[^a-z0-9].*)?')
- for cmd in self.lines:
- if len(cmd) < 2 or not cmd[0].startswith('dcl_'):
- continue
- if cmd[1].startswith('v'):
- m = declname_pattern.fullmatch(cmd[0])
- if m:
- attr = m.group(1).upper()
- id = int(m.group(2) or 0)
- if attr not in self.used_attrs:
- self.used_attrs[attr] = set([id])
- else:
- self.used_attrs[attr].add(id)
- elif cmd[1].startswith('s'):
- m = re.match('^s(\d+)', cmd[1])
- if m:
- self.used_samplers[int(m.group(1))] = cmd[0][4:]
- return True
- finally:
- fh.close()
- class RipFileAttribute(object):
- def __init__(self, fh):
- self.semantic = read_string(fh)
- self.semantic_index = read_uint(fh)
- self.offset = read_uint(fh)
- self.size = read_uint(fh)
- self.end = self.offset + self.size
- self.items = read_uint(fh)
- format = ''
- codes = ['f', 'I', 'i']
- for j in range(self.items):
- id = read_uint(fh)
- format = format + (codes[id] if id <= 2 else 'I')
- self.format = format
- self.data = []
- def get_hashtag(self):
- return "[%s:%d:%d:%d:%s]" % (self.semantic, self.semantic_index, self.offset, self.size, self.format)
- def parse_vertex(self, buffer):
- self.data.append(unpack(self.format, buffer[self.offset : self.end]))
- def as_floats(self, arity=4, divisor=1.0):
- if self.format == 'f'*min(arity,self.items):
- return self.data
- elif self.format[0:arity] == 'f'*arity:
- return list(map(lambda v: v[0:arity], self.data))
- else:
- def convert(item):
- return tuple(map(lambda v: float(v)/divisor, item[0:arity]))
- return list(map(convert, self.data))
- class RipFile(object):
- def __init__(self, filename, riplog=None):
- self.filename = filename
- self.riplog = riplog
- self.dirname = os.path.dirname(filename)
- self.basename = os.path.basename(filename)
- self.faces = []
- self.attributes = []
- self.shaders = []
- self.textures = []
- self.texture_stages = None
- self.num_verts = 0
- self.shader_vert = None
- self.shader_frag = None
- self.data_hash = ""
- def parse_file(self):
- fh = open(self.filename, "rb")
- try:
- magic = read_uint(fh)
- if magic != 0xDEADC0DE:
- raise RuntimeError("Invalid file magic: %08d" % (magic))
- version = read_uint(fh)
- if version != 4:
- raise RuntimeError("Invalid file version: %d" % (version))
- num_faces = read_uint(fh)
- self.num_verts = read_uint(fh)
- block_size = read_uint(fh)
- num_tex = read_uint(fh)
- num_shaders = read_uint(fh)
- num_attrs = read_uint(fh)
- datahash = hashlib.sha1()
- for i in range(num_attrs):
- attr = RipFileAttribute(fh)
- self.attributes.append(attr)
- datahash.update(attr.get_hashtag().encode('utf-8'))
- for i in range(num_tex):
- self.textures.append(read_string(fh))
- if self.riplog:
- self.texture_stages = self.riplog.get_texture_stages(self.filename, self.textures)
- for i in range(num_shaders):
- self.shaders.append(read_string(fh))
- for i in range(num_faces):
- data = fh.read(4*3)
- face = unpack('III', data)
- # Omit degenerate triangles - they are sometimes used to merge strips
- if face[0] != face[1] and face[1] != face[2] and face[0] != face[2]:
- self.faces.append(face)
- datahash.update(data)
- datahash.update(b"|")
- for i in range(self.num_verts):
- data = fh.read(block_size)
- datahash.update(data)
- for attr in self.attributes:
- attr.parse_vertex(data)
- self.data_hash = datahash.hexdigest()
- finally:
- fh.close()
- def parse_shaders(self):
- dirs = [
- self.dirname,
- os.path.join(self.dirname, "..", "Shaders")
- ]
- for fname in self.shaders:
- for dir in dirs:
- path = os.path.join(dir, fname)
- if os.path.isfile(path):
- shader = HLSLShaderInfo(path)
- if shader.parse_file():
- if shader.version.startswith('v'):
- self.shader_vert = shader
- else:
- self.shader_frag = shader
- break
- def find_attrs(self, semantic):
- return [attr for attr in self.attributes if attr.semantic == semantic]
- def is_used_attr(self, attr):
- if not self.shader_vert:
- return True
- used = self.shader_vert.used_attrs
- return attr.semantic in used and attr.semantic_index in used[attr.semantic]
- def get_textures(self, filter=True):
- samplers = None
- if self.shader_frag and filter:
- samplers = self.shader_frag.used_samplers
- if samplers and len(samplers) == 0:
- return {}
- stages = self.texture_stages
- if not stages:
- return dict(enumerate(self.textures))
- else:
- if filter:
- return dict([(id,stages[id]) for id in stages.keys() if id in samplers])
- else:
- return stages
- def has_textures(self, filter=True):
- return len(self.get_textures(filter)) > 0
- class BaseDuplicateTracker(object):
- def __init__(self):
- self.file_hashes = {}
- self.hash_missing_textures = True
- def hash_file(self, fname):
- if not os.path.isfile(fname):
- return None
- try:
- hash = hashlib.sha1()
- with open(fname, "rb") as f:
- for chunk in iter(lambda: f.read(4096), b""):
- hash.update(chunk)
- return hash.hexdigest()
- except IOError as e:
- print("I/O error(%d): %s" % (e.errno, e.strerror))
- return None
- def get_file_hash(self, filename):
- fullpath = os.path.realpath(filename)
- if fullpath in self.file_hashes:
- return self.file_hashes[fullpath]
- hash = self.hash_file(fullpath)
- self.file_hashes[fullpath] = hash
- return hash
- def is_sharing_mesh(self):
- return False
- def create_texture(self, fullpath):
- try:
- teximage = bpy.data.images.load(fullpath, True)
- if teximage.users > 0:
- for tex in bpy.data.textures:
- if tex.type == 'IMAGE' and tex.image == teximage:
- return tex
- name,ext = os.path.splitext(os.path.basename(fullpath))
- texobj = bpy.data.textures.new(name, type='IM
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement