Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'release')
-rw-r--r--release/scripts/3ds_import.py832
-rw-r--r--release/scripts/bpymodules/meshtools.py31
-rw-r--r--release/scripts/bvh2arm.py6
-rw-r--r--release/scripts/lightwave_export.py21
-rw-r--r--release/scripts/lightwave_import.py801
-rw-r--r--release/scripts/nendo_export.py21
-rw-r--r--release/scripts/nendo_import.py21
-rw-r--r--release/scripts/off_export.py23
-rw-r--r--release/scripts/off_import.py24
-rw-r--r--release/scripts/radiosity_export.py21
-rw-r--r--release/scripts/radiosity_import.py21
-rw-r--r--release/scripts/raw_export.py22
-rw-r--r--release/scripts/raw_import.py21
-rw-r--r--release/scripts/save_theme.py27
-rw-r--r--release/scripts/slp_import.py21
-rw-r--r--release/scripts/truespace_export.py22
-rw-r--r--release/scripts/truespace_import.py21
-rw-r--r--release/scripts/videoscape_export.py22
-rw-r--r--release/scripts/wings_export.py21
-rw-r--r--release/scripts/wings_import.py191
20 files changed, 1444 insertions, 746 deletions
diff --git a/release/scripts/3ds_import.py b/release/scripts/3ds_import.py
index 6bd81c2606d..017ec74123f 100644
--- a/release/scripts/3ds_import.py
+++ b/release/scripts/3ds_import.py
@@ -4,21 +4,30 @@
Name: '3D Studio (.3ds)...'
Blender: 237
Group: 'Import'
-Tooltip: 'Import from 3DS file format (.3ds).'
+Tooltip: 'Import from 3DS file format. (.3ds)'
"""
__author__ = ["Bob Holcomb", "Richard Lärkäng", "Damien McGinnes", "Campbell Barton"]
__url__ = ("blender", "elysiun", "http://www.gametutorials.com")
-__version__ = "0.82"
+__version__ = "0.92"
__bpydoc__ = """\
3ds Importer
-This script imports a 3ds file and the materials into blender for editing.
+This script imports a 3ds file and the materials into Blender for editing.
-Loader is based on 3ds loader from www.gametutorials.com(Thanks DigiBen).
+Loader is based on 3ds loader from www.gametutorials.com (Thanks DigiBen).
+
+Changes:
+
+0.92<br>
+- Added support for diffuse, alpha, spec, bump maps in a single material
+
+0.9<br>
+- Reorganized code into object/material block functions<br>
+- Use of Matrix() to copy matrix data<br>
+- added support for material transparency<br>
-Changes:<br>
0.81a (fork- not 0.9) Campbell Barton 2005-06-08<br>
- Simplified import code<br>
- Never overwrite data<br>
@@ -27,7 +36,7 @@ Changes:<br>
0.81 Damien McGinnes 2005-01-09<br>
- handle missing images better<br>
-
+
0.8 Damien McGinnes 2005-01-08<br>
- copies sticky UV coords to face ones<br>
- handles images better<br>
@@ -35,9 +44,11 @@ Changes:<br>
"""
+# $Id$
+#
# ***** BEGIN GPL LICENSE BLOCK *****
#
-# Script copyright (C) Bob Holcomb
+# Script copyright (C) Bob Holcomb
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
@@ -56,88 +67,85 @@ Changes:<br>
# ***** END GPL LICENCE BLOCK *****
# --------------------------------------------------------------------------
+
# Importing modules
import Blender
-from Blender import NMesh, Scene, Object, Material, Image
+from Blender import NMesh, Scene, Object, Material, Image, Texture
import sys, struct, string
import os
-#this script imports uvcoords as sticky vertex coords
-#this parameter enables copying these to face uv coords
-#which shold be more useful.
-
+######################################################
+# Data Structures
+######################################################
+#----- Primary Chunk,
+PRIMARY = long("0x4D4D",16) # should be aat the beginning of each file
+VERSION = long("0x0002",16) #This gives the version of the .3ds file
+EDITOR_BLOCK = long("0x3D3D",16) #this is the Editor Data block, contains objects, materials
+KEYFRAME_BLOCK = long("0xB000",16) #This is the header for all of the key frame info
+
+#------ sub defines of EDITOR_BLOCK
+MATERIAL_BLOCK = long("0xAFFF",16) #This stores the Material info
+OBJECT_BLOCK = long("0x4000",16) #This stores the Object,Camera,Light
+
+#------ sub defines of OBJECT_BLOCK
+OBJECT_MESH = long("0x4100",16) # This lets us know that we are reading a new object
+OBJECT_LIGHT = long("0x4600",16) # This lets un know we are reading a light object
+OBJECT_CAMERA = long("0x4700",16) # This lets un know we are reading a camera object
+
+#------ sub defines of OBJECT_MESH
+MESH_VERTICES = long("0x4110",16) # The objects vertices
+MESH_FACES = long("0x4120",16) # The objects faces
+MESH_MATERIAL = long("0x4130",16) # This is found if the object has a material, either texture map or color
+MESH_UV = long("0x4140",16) # The UV texture coordinates
+MESH_TRANS_MATRIX = long("0x4160",16) # The Object Matrix
+MESH_COLOR = long("0x4165",16) # The color of the object
+MESH_TEXTURE_INFO = long("0x470",16) # Info about the Object Texture
+
+#------ sub defines of OBJECT_CAMERA
+CAMERA_CONE = long("0x4710",16) # The camera see cone
+CAMERA_RANGES = long("0x4720",16) # The camera range values
+
+#------ sub defines of OBJECT_LIGHT
+LIGHT_SPOTLIGHT = long("0x4610",16) # A spotlight
+LIGHT_ATTENUATE = long("0x4625",16) # Light attenuation values
+
+
+#------ sub defines of MATERIAL_BLOCK
+MAT_NAME = long("0xA000",16) # This holds the material name
+MAT_AMBIENT = long("0xA010",16) # Ambient color of the object/material
+MAT_DIFFUSE = long("0xA020",16) # This holds the color of the object/material
+MAT_SPECULAR = long("0xA030",16) # SPecular color of the object/material
+MAT_SHINESS = long("0xA040",16) # ??
+MAT_TRANSPARENCY= long("0xA050",16) # Transparency value of material
+MAT_SELF_ILLUM = long("0xA080",16) # Self Illumination value of material
+MAT_WIRE = long("0xA085",16) # Only render's wireframe
+
+MAT_TEXTURE_MAP = long("0xA200",16) # This is a header for a new texture map
+MAT_SPECULAR_MAP= long("0xA204",16) # This is a header for a new specular map
+MAT_OPACITY_MAP = long("0xA210",16) # This is a header for a new opacity map
+MAT_REFLECTION_MAP= long("0xA220",16) # This is a header for a new reflection map
+MAT_BUMP_MAP = long("0xA230",16) # This is a header for a new bump map
+MAT_MAP_FILENAME= long("0xA300",16) # This holds the file name of the texture
+#lots more to add here for maps
-#===========================================================================#
-# Returns unique name of object/mesh (stops overwriting existing meshes) #
-#===========================================================================#
-def getUniqueName(name):
- newName = name
- uniqueInt = 0
- while 1:
- try:
- ob = Object.Get(newName)
- # Okay, this is working, so lets make a new name
- newName = '%s.%d' % (name, uniqueInt)
- uniqueInt +=1
- except AttributeError:
- if newName not in NMesh.GetNames():
- return newName
- else:
- newName = '%s.%d' % (name, uniqueInt)
- uniqueInt +=1
+######################################################
+# Globals
+######################################################
+TEXTURE_DICT={}
+MATERIAL_DICT={}
######################################################
-# Data Structures
+# Chunk Class
######################################################
-
-#Some of the chunks that we will see
-#----- Primary Chunk, at the beginning of each file
-PRIMARY= long("0x4D4D",16)
-
-#------ Main Chunks
-OBJECTINFO = long("0x3D3D",16); #This gives the version of the mesh and is found right before the material and object information
-VERSION = long("0x0002",16); #This gives the version of the .3ds file
-EDITKEYFRAME= long("0xB000",16); #This is the header for all of the key frame info
-
-#------ sub defines of OBJECTINFO
-MATERIAL=45055 #0xAFFF // This stored the texture info
-OBJECT=16384 #0x4000 // This stores the faces, vertices, etc...
-
-#>------ sub defines of MATERIAL
-MATNAME = long("0xA000",16); # This holds the material name
-MATAMBIENT = long("0xA010",16); # Ambient color of the object/material
-MATDIFFUSE = long("0xA020",16); # This holds the color of the object/material
-MATSPECULAR = long("0xA030",16); # SPecular color of the object/material
-MATSHINESS = long("0xA040",16); # ??
-MATMAP = long("0xA200",16); # This is a header for a new material
-MATMAPFILE = long("0xA300",16); # This holds the file name of the texture
-
-#>------ sub defines of OBJECT
-OBJECT_MESH = long("0x4100",16); # This lets us know that we are reading a new object
-OBJECT_LIGHT = long("0x4600",16); # This lets un know we are reading a light object
-OBJECT_CAMERA= long("0x4700",16); # This lets un know we are reading a camera object
-
-#>------ sub defines of CAMERA
-OBJECT_CAM_RANGES= long("0x4720",16); # The camera range values
-
-#>------ sub defines of OBJECT_MESH
-OBJECT_VERTICES = long("0x4110",16); # The objects vertices
-OBJECT_FACES = long("0x4120",16); # The objects faces
-OBJECT_MATERIAL = long("0x4130",16); # This is found if the object has a material, either texture map or color
-OBJECT_UV = long("0x4140",16); # The UV texture coordinates
-OBJECT_TRANS_MATRIX = long("0x4160",16); # The Object Matrix
-
-#the chunk class
class chunk:
ID=0
length=0
bytes_read=0
- #we don't read in the bytes_read, we compute that
binary_format="<HI"
def __init__(self):
@@ -146,378 +154,484 @@ class chunk:
self.bytes_read=0
def dump(self):
- print "ID: ", self.ID
print "ID in hex: ", hex(self.ID)
print "length: ", self.length
print "bytes_read: ", self.bytes_read
+######################################################
+# Helper functions
+######################################################
def read_chunk(file, chunk):
- temp_data=file.read(struct.calcsize(chunk.binary_format))
- data=struct.unpack(chunk.binary_format, temp_data)
- chunk.ID=data[0]
- chunk.length=data[1]
- #update the bytes read function
- chunk.bytes_read=6
-
- #if debugging
- #chunk.dump()
+ temp_data=file.read(struct.calcsize(chunk.binary_format))
+ data=struct.unpack(chunk.binary_format, temp_data)
+ chunk.ID=data[0]
+ chunk.length=data[1]
+ chunk.bytes_read=6
+
+def skip_to_end(file, skip_chunk):
+ buffer_size=skip_chunk.length-skip_chunk.bytes_read
+ binary_format=str(buffer_size)+"c"
+ temp_data=file.read(struct.calcsize(binary_format))
+ skip_chunk.bytes_read+=buffer_size
def read_string(file):
s=""
index=0
- #print "reading a string"
- #read in the characters till we get a null character
+ #read the first character
temp_data=file.read(1)
data=struct.unpack("c", temp_data)
s=s+(data[0])
- #print "string: ",s
+ #read in the characters till we get a null character
while(ord(s[index])!=0):
index+=1
temp_data=file.read(1)
data=struct.unpack("c", temp_data)
s=s+(data[0])
- #print "string: ",s
-
- #remove the null character from the string
- the_string=s[:-1]
+ the_string=s[:-1] #remove the null character from the string
return str(the_string)
-######################################################
-# IMPORT
-######################################################
-def process_next_object_chunk(file, previous_chunk):
- new_chunk=chunk()
- temp_chunk=chunk()
-
- while (previous_chunk.bytes_read<previous_chunk.length):
- #read the next chunk
- read_chunk(file, new_chunk)
-
+def getUniqueName(name):
+ newName = name
+ uniqueInt = 0
+ while 1:
+ try:
+ ob = Object.Get(newName)
+ # Okay, this is working, so lets make a new name
+ newName = '%s.%d' % (name, uniqueInt)
+ uniqueInt +=1
+ except AttributeError:
+ if newName not in NMesh.GetNames():
+ return newName
+ else:
+ newName = '%s.%d' % (name, uniqueInt)
+ uniqueInt +=1
-def process_next_chunk(file, previous_chunk, new_object_list):
- contextObName = None
- #contextLamp = None
- contextMaterial = None
- contextMatrix = Blender.Mathutils.Matrix(); contextMatrix.identity()
- contextMesh = None
-
- TEXDICT={}
- MATDICT={}
+def add_texture_to_material(image, texture, material, mapto):
+ if mapto=="DIFFUSE":
+ map=Texture.MapTo.COL
+ elif mapto=="SPECULAR":
+ map=Texture.MapTo.SPEC
+ elif mapto=="OPACITY":
+ map=Texture.MapTo.ALPHA
+ elif mapto=="BUMP":
+ map=Texture.MapTo.NOR
+ else:
+ print "/tError: Cannot map to ", mapto
+ return
- objectList = [] # Keep a list of imported objects.
+ texture.setImage(image)
+ texture_list=material.getTextures()
+ index=0
+ for tex in texture_list:
+ if tex==None:
+ material.setTexture(index,texture,Texture.TexCo.OBJECT,map)
+ return
+ else:
+ index+=1
+ if index>10:
+ print "/tError: Cannot add diffuse map. Too many textures"
+######################################################
+# Process an object (tri-mesh, Camera, or Light)
+######################################################
+def process_object_block(file, previous_chunk, object_list):
# Localspace variable names, faster.
- STRUCT_SIZE_1CHAR = struct.calcsize("c")
STRUCT_SIZE_2FLOAT = struct.calcsize("2f")
STRUCT_SIZE_3FLOAT = struct.calcsize("3f")
STRUCT_SIZE_UNSIGNED_SHORT = struct.calcsize("H")
STRUCT_SIZE_4UNSIGNED_SHORT = struct.calcsize("4H")
STRUCT_SIZE_4x3MAT = struct.calcsize("ffffffffffff")
+ #spare chunks
+ new_chunk=chunk()
+ temp_chunk=chunk()
- def putContextMesh(myContextMesh):
- INV_MAT = Blender.Mathutils.CopyMat(contextMatrix)
- INV_MAT.invert()
- contextMesh.transform(INV_MAT)
- objectList.append(NMesh.PutRaw(contextMesh))
- objectList[-1].name = contextObName
- objectList[-1].setMatrix(contextMatrix)
+ global TEXURE_DICT
+ global MATERIAL_DICT
+ #don't know which one we're making, so let's have a place for one of each
+ new_mesh=None
+ new_light=None
+ new_camera=None
+
+ #all objects have a name (first thing)
+ tempName = str(read_string(file))
+ obj_name = getUniqueName( tempName )
+ previous_chunk.bytes_read += (len(tempName)+1)
- #a spare chunk
- new_chunk=chunk()
- temp_chunk=chunk()
-
- #loop through all the data for this chunk (previous chunk) and see what it is
while (previous_chunk.bytes_read<previous_chunk.length):
- #read the next chunk
- #print "reading a chunk"
read_chunk(file, new_chunk)
+
+ if (new_chunk.ID==OBJECT_MESH):
+ new_mesh=Blender.NMesh.New(obj_name)
+ while (new_chunk.bytes_read<new_chunk.length):
+ read_chunk(file, temp_chunk)
+
+ if (temp_chunk.ID==MESH_VERTICES):
+ temp_data=file.read(STRUCT_SIZE_UNSIGNED_SHORT)
+ data=struct.unpack("H", temp_data)
+ temp_chunk.bytes_read+=2
+ num_verts=data[0]
+ for counter in range (num_verts):
+ temp_data=file.read(STRUCT_SIZE_3FLOAT)
+ temp_chunk.bytes_read += STRUCT_SIZE_3FLOAT
+ data=struct.unpack("3f", temp_data)
+ v=NMesh.Vert(data[0],data[1],data[2])
+ new_mesh.verts.append(v)
+
+ elif (temp_chunk.ID==MESH_FACES):
+ temp_data=file.read(STRUCT_SIZE_UNSIGNED_SHORT)
+ data=struct.unpack("H", temp_data)
+ temp_chunk.bytes_read+=2
+ num_faces=data[0]
+ for counter in range(num_faces):
+ temp_data=file.read(STRUCT_SIZE_4UNSIGNED_SHORT)
+ temp_chunk.bytes_read += STRUCT_SIZE_4UNSIGNED_SHORT #4 short ints x 2 bytes each
+ data=struct.unpack("4H", temp_data)
+ #insert the mesh info into the faces, don't worry about data[3] it is a 3D studio thing
+ f = NMesh.Face( [new_mesh.verts[data[i]] for i in xrange(3)] )
+ f.uv = [ tuple(new_mesh.verts[data[i]].uvco[:2]) for i in xrange(3) ]
+ new_mesh.faces.append(f)
+
+ elif (temp_chunk.ID==MESH_MATERIAL):
+ material_name=""
+ material_name=str(read_string(file))
+ temp_chunk.bytes_read += len(material_name)+1 # remove 1 null character.
+ material_found=0
+ for mat in Material.Get():
+ if(mat.name==material_name):
+ if len(new_mesh.materials) >= 15:
+ print "\tCant assign more than 16 materials per mesh, keep going..."
+ break
+ else:
+ meshHasMat = 0
+ for myMat in new_mesh.materials:
+ if myMat.name == mat.name:
+ meshHasMat = 1
+ if meshHasMat == 0:
+ new_mesh.addMaterial(mat)
+ material_found=1
+ #figure out what material index this is for the mesh
+ for mat_counter in range(len(new_mesh.materials)):
+ if new_mesh.materials[mat_counter].name == material_name:
+ mat_index=mat_counter
+ break # get out of this for loop so we don't accidentally set material_found back to 0
+ else:
+ material_found=0
+
+ if material_found == 1:
+ #read the number of faces using this material
+ temp_data=file.read(STRUCT_SIZE_UNSIGNED_SHORT)
+ data=struct.unpack("H", temp_data)
+ temp_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT
+ num_faces_using_mat=data[0]
+ #list of faces using mat
+ for face_counter in range(num_faces_using_mat):
+ temp_data=file.read(STRUCT_SIZE_UNSIGNED_SHORT)
+ temp_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT
+ data=struct.unpack("H", temp_data)
+ new_mesh.faces[data[0]].materialIndex = mat_index
+ try:
+ mname = MATERIAL_DICT[mat.name]
+ new_mesh.faces[data[0]].image = TEXTURE_DICT[mname]
+ except:
+ continue
+ else:
+ #read past the information about the material you couldn't find
+ skip_to_end(file,temp_chunk)
+
+ elif (new_chunk.ID == MESH_UV):
+ temp_data=file.read(STRUCT_SIZE_UNSIGNED_SHORT)
+ data=struct.unpack("H", temp_data)
+ temp_chunk.bytes_read+=2
+ num_uv=data[0]
+
+ for counter in range(num_uv):
+ temp_data=file.read(STRUCT_SIZE_2FLOAT)
+ temp_chunk.bytes_read += STRUCT_SIZE_2FLOAT #2 float x 4 bytes each
+ data=struct.unpack("2f", temp_data)
+ #insert the insert the UV coords in the vertex data
+ new_mesh.verts[counter].uvco = data
+
+ elif (new_chunk.ID == MESH_TRANS_MATRIX):
+ temp_data=file.read(STRUCT_SIZE_4x3MAT)
+ data = list( struct.unpack("ffffffffffff", temp_data) )
+ temp_chunk.bytes_read += STRUCT_SIZE_4x3MAT
+ new_matrix = Blender.Mathutils.Matrix(\
+ data[:3] + [0],\
+ data[3:6] + [0],\
+ data[6:9] + [0],\
+ data[9:] + [1])
+ new_mesh.setMatrix(new_matrix)
+ else:
+ skip_to_end(file, temp_chunk)
+
+ new_chunk.bytes_read+=temp_chunk.bytes_read
+
+ elif (new_chunk.ID==OBJECT_LIGHT):
+ skip_to_end(file,new_chunk)
- #is it a Version chunk?
- if (new_chunk.ID==VERSION):
- #print "found a VERSION chunk"
- #read in the version of the file
- #it's an unsigned short (H)
- temp_data=file.read(struct.calcsize("I"))
- data=struct.unpack("I", temp_data)
- version=data[0]
- new_chunk.bytes_read+=4 #read the 4 bytes for the version number
- #this loader works with version 3 and below, but may not with 4 and above
- if (version>3):
- print "\tNon-Fatal Error: Version greater than 3, may not load correctly: ", version
-
- #is it an object info chunk?
- elif (new_chunk.ID==OBJECTINFO):
- # print "found an OBJECTINFO chunk"
- process_next_chunk(file, new_chunk, new_object_list)
+ elif (new_chunk.ID==OBJECT_CAMERA):
+ skip_to_end(file,new_chunk)
- #keep track of how much we read in the main chunk
- new_chunk.bytes_read+=temp_chunk.bytes_read
-
- #is it an object chunk?
- elif (new_chunk.ID==OBJECT):
- # print "found an OBJECT chunk"
- tempName = str(read_string(file))
- contextObName = getUniqueName( tempName )
- new_chunk.bytes_read += (len(tempName)+1)
+ else: #don't know what kind of object it is
+ skip_to_end(file,new_chunk)
- #is it a material chunk?
- elif (new_chunk.ID==MATERIAL):
- # print "found a MATERIAL chunk"
- contextMaterial = Material.New()
+ if new_mesh!=None:
+ object_list.append(NMesh.PutRaw(new_mesh))
+ if new_light!=None:
+ object_list.append(new_light)
+ if new_camera!=None:
+ object_list.append(new_camera)
- elif (new_chunk.ID==MATNAME):
- # print "Found a MATNAME chunk"
+ previous_chunk.bytes_read+=new_chunk.bytes_read
+
+######################################################
+# Process a Material
+######################################################
+def process_material_block(file, previous_chunk):
+ # Localspace variable names, faster.
+ STRUCT_SIZE_3BYTE = struct.calcsize("3B")
+ STRUCT_SIZE_UNSIGNED_SHORT = struct.calcsize("H")
+
+ #spare chunks
+ new_chunk=chunk()
+ temp_chunk=chunk()
+
+ global TEXURE_DICT
+ global MATERIAL_DICT
+
+ new_material=Blender.Material.New()
+
+ while (previous_chunk.bytes_read<previous_chunk.length):
+ #read the next chunk
+ read_chunk(file, new_chunk)
+
+ if (new_chunk.ID==MAT_NAME):
material_name=""
material_name=str(read_string(file))
-
- #plus one for the null character that ended the string
- new_chunk.bytes_read+=(len(material_name)+1)
-
- contextMaterial.setName(material_name)
- MATDICT[material_name] = contextMaterial.name
+ new_chunk.bytes_read+=(len(material_name)+1) #plus one for the null character that ended the string
+ new_material.setName(material_name)
+ MATERIAL_DICT[material_name] = new_material.name
- elif (new_chunk.ID==MATAMBIENT):
- # print "Found a MATAMBIENT chunk"
-
+ elif (new_chunk.ID==MAT_AMBIENT):
read_chunk(file, temp_chunk)
- temp_data=file.read(struct.calcsize("3B"))
+ temp_data=file.read(STRUCT_SIZE_3BYTE)
data=struct.unpack("3B", temp_data)
temp_chunk.bytes_read+=3
- contextMaterial.mirCol = [float(col)/255 for col in data] # data [0,1,2] == rgb
+ new_material.mirCol = [float(col)/255 for col in data] # data [0,1,2] == rgb
new_chunk.bytes_read+=temp_chunk.bytes_read
- elif (new_chunk.ID==MATDIFFUSE):
- # print "Found a MATDIFFUSE chunk"
-
+ elif (new_chunk.ID==MAT_DIFFUSE):
read_chunk(file, temp_chunk)
- temp_data=file.read(struct.calcsize("3B"))
+ temp_data=file.read(STRUCT_SIZE_3BYTE)
data=struct.unpack("3B", temp_data)
temp_chunk.bytes_read+=3
- contextMaterial.rgbCol = [float(col)/255 for col in data] # data [0,1,2] == rgb
+ new_material.rgbCol = [float(col)/255 for col in data] # data [0,1,2] == rgb
new_chunk.bytes_read+=temp_chunk.bytes_read
- elif (new_chunk.ID==MATSPECULAR):
- # print "Found a MATSPECULAR chunk"
-
+ elif (new_chunk.ID==MAT_SPECULAR):
read_chunk(file, temp_chunk)
- temp_data=file.read(struct.calcsize("3B"))
+ temp_data=file.read(STRUCT_SIZE_3BYTE)
data=struct.unpack("3B", temp_data)
temp_chunk.bytes_read+=3
-
- contextMaterial.specCol = [float(col)/255 for col in data] # data [0,1,2] == rgb
+ new_material.specCol = [float(col)/255 for col in data] # data [0,1,2] == rgb
new_chunk.bytes_read+=temp_chunk.bytes_read
- elif (new_chunk.ID==MATMAP):
- # print "Found a MATMAP chunk"
- pass # This chunk has no data
-
- elif (new_chunk.ID==MATMAPFILE):
- # print "Found a MATMAPFILE chunk"
- texture_name=""
- texture_name=str(read_string(file))
- try:
- img = Image.Load(texture_name)
- TEXDICT[contextMaterial.name]=img
- except IOError:
- fname = os.path.join( os.path.dirname(FILENAME), texture_name)
- try:
- img = Image.Load(fname)
- TEXDICT[contextMaterial.name]=img
- except IOError:
- print "\tERROR: failed to load image ",texture_name
- TEXDICT[contextMaterial.name] = None # Dummy
+ elif (new_chunk.ID==MAT_TEXTURE_MAP):
+ new_texture=Blender.Texture.New('Diffuse')
+ new_texture.setType('Image')
+ while (new_chunk.bytes_read<new_chunk.length):
+ read_chunk(file, temp_chunk)
+
+ if (temp_chunk.ID==MAT_MAP_FILENAME):
+ texture_name=""
+ texture_name=str(read_string(file))
+ try:
+ img = Image.Load(texture_name)
+ TEXTURE_DICT[new_material.name]=img
+ except IOError:
+ fname = os.path.join( os.path.dirname(FILENAME), texture_name)
+ try:
+ img = Image.Load(fname)
+ TEXTURE_DICT[new_material.name]=img
+ except IOError:
+ print "\tERROR: failed to load image ",texture_name
+ TEXTURE_DICT[new_material.name] = None # Dummy
+ img=Blender.Image.New(fname,1,1,24) #blank image
+ new_chunk.bytes_read += (len(texture_name)+1) #plus one for the null character that gets removed
- #plus one for the null character that gets removed
- new_chunk.bytes_read += (len(texture_name)+1)
-
-
- elif (new_chunk.ID==OBJECT_MESH):
- # print "Found an OBJECT_MESH chunk"
- if contextMesh != None: # Write context mesh if we have one.
- putContextMesh(contextMesh)
-
- contextMesh = NMesh.New()
-
- # Reset matrix
- contextMatrix = Blender.Mathutils.Matrix(); contextMatrix.identity()
+ else:
+ skip_to_end(file, temp_chunk)
+
+ new_chunk.bytes_read+=temp_chunk.bytes_read
- elif (new_chunk.ID==OBJECT_VERTICES):
- # print "Found an OBJECT_VERTICES chunk"
- #print "object_verts: length: ", new_chunk.length
- temp_data=file.read(STRUCT_SIZE_UNSIGNED_SHORT)
- data=struct.unpack("H", temp_data)
- new_chunk.bytes_read+=2
- num_verts=data[0]
- # print "number of verts: ", num_verts
- for counter in range (num_verts):
- temp_data=file.read(STRUCT_SIZE_3FLOAT)
- new_chunk.bytes_read += STRUCT_SIZE_3FLOAT #12: 3 floats x 4 bytes each
- data=struct.unpack("3f", temp_data)
- v=NMesh.Vert(data[0],data[1],data[2])
- contextMesh.verts.append(v)
- #print "object verts: bytes read: ", new_chunk.bytes_read
-
- elif (new_chunk.ID==OBJECT_FACES):
- # print "Found an OBJECT_FACES chunk"
- #print "object faces: length: ", new_chunk.length
- temp_data=file.read(STRUCT_SIZE_UNSIGNED_SHORT)
- data=struct.unpack("H", temp_data)
- new_chunk.bytes_read+=2
- num_faces=data[0]
- #print "number of faces: ", num_faces
-
- for counter in range(num_faces):
- temp_data=file.read(STRUCT_SIZE_4UNSIGNED_SHORT)
- new_chunk.bytes_read += STRUCT_SIZE_4UNSIGNED_SHORT #4 short ints x 2 bytes each
- data=struct.unpack("4H", temp_data)
+ #add the map to the material in the right channel
+ add_texture_to_material(img, new_texture, new_material, "DIFFUSE")
- #insert the mesh info into the faces, don't worry about data[3] it is a 3D studio thing
- f = NMesh.Face( [contextMesh.verts[data[i]] for i in xrange(3) ] )
- f.uv = [ tuple(contextMesh.verts[data[i]].uvco[:2]) for i in xrange(3) ]
- contextMesh.faces.append(f)
- #print "object faces: bytes read: ", new_chunk.bytes_read
-
- elif (new_chunk.ID==OBJECT_MATERIAL):
- # print "Found an OBJECT_MATERIAL chunk"
- material_name=""
- material_name=str(read_string(file))
- new_chunk.bytes_read += len(material_name)+1 # remove 1 null character.
-
- #look up the material in all the materials
- material_found=0
- for mat in Material.Get():
+ elif (new_chunk.ID==MAT_SPECULAR_MAP):
+ new_texture=Blender.Texture.New('Specular')
+ new_texture.setType('Image')
+ while (new_chunk.bytes_read<new_chunk.length):
+ read_chunk(file, temp_chunk)
- #found it, add it to the mesh
- if(mat.name==material_name):
- if len(contextMesh.materials) >= 15:
- print "\tCant assign more than 16 materials per mesh, keep going..."
- break
- else:
- meshHasMat = 0
- for myMat in contextMesh.materials:
- if myMat.name == mat.name:
- meshHasMat = 1
-
- if meshHasMat == 0:
- contextMesh.addMaterial(mat)
- material_found=1
-
- #figure out what material index this is for the mesh
- for mat_counter in range(len(contextMesh.materials)):
- if contextMesh.materials[mat_counter].name == material_name:
- mat_index=mat_counter
- #print "material index: ",mat_index
-
-
- break # get out of this for loop so we don't accidentally set material_found back to 0
+ if (temp_chunk.ID==MAT_MAP_FILENAME):
+ texture_name=""
+ texture_name=str(read_string(file))
+ try:
+ img = Image.Load(texture_name)
+ TEXTURE_DICT[new_material.name]=img
+ except IOError:
+ fname = os.path.join( os.path.dirname(FILENAME), texture_name)
+ try:
+ img = Image.Load(fname)
+ TEXTURE_DICT[new_material.name]=img
+ except IOError:
+ print "\tERROR: failed to load image ",texture_name
+ TEXTURE_DICT[new_material.name] = None # Dummy
+ img=Blender.Image.New(fname,1,1,24) #blank image
+ new_chunk.bytes_read += (len(texture_name)+1) #plus one for the null character that gets removed
else:
- material_found=0
- # print "Not matching: ", mat.name, " and ", material_name
-
- if material_found == 1:
- contextMaterial = mat
- #read the number of faces using this material
- temp_data=file.read(STRUCT_SIZE_UNSIGNED_SHORT)
- data=struct.unpack("H", temp_data)
- new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT
- num_faces_using_mat=data[0]
-
- #list of faces using mat
- for face_counter in range(num_faces_using_mat):
- temp_data=file.read(STRUCT_SIZE_UNSIGNED_SHORT)
- new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT
- data=struct.unpack("H", temp_data)
- contextMesh.faces[data[0]].materialIndex = mat_index
-
+ skip_to_end(file, temp_chunk)
+
+ new_chunk.bytes_read+=temp_chunk.bytes_read
+
+ #add the map to the material in the right channel
+ add_texture_to_material(img, new_texture, new_material, "SPECULAR")
+
+ elif (new_chunk.ID==MAT_OPACITY_MAP):
+ new_texture=Blender.Texture.New('Opacity')
+ new_texture.setType('Image')
+ while (new_chunk.bytes_read<new_chunk.length):
+ read_chunk(file, temp_chunk)
+
+ if (temp_chunk.ID==MAT_MAP_FILENAME):
+ texture_name=""
+ texture_name=str(read_string(file))
try:
- mname = MATDICT[contextMaterial.name]
- contextMesh.faces[data[0]].image = TEXDICT[mname]
- except:
- continue
- else:
- #read past the information about the material you couldn't find
- #print "Couldn't find material. Reading past face material info"
- buffer_size=new_chunk.length-new_chunk.bytes_read
- binary_format=str(buffer_size)+"c"
- temp_data=file.read(struct.calcsize(binary_format))
- new_chunk.bytes_read+=buffer_size
-
- #print "object mat: bytes read: ", new_chunk.bytes_read
+ img = Image.Load(texture_name)
+ TEXTURE_DICT[new_material.name]=img
+ except IOError:
+ fname = os.path.join( os.path.dirname(FILENAME), texture_name)
+ try:
+ img = Image.Load(fname)
+ TEXTURE_DICT[new_material.name]=img
+ except IOError:
+ print "\tERROR: failed to load image ",texture_name
+ TEXTURE_DICT[new_material.name] = None # Dummy
+ img=Blender.Image.New(fname,1,1,24) #blank image
+ new_chunk.bytes_read += (len(texture_name)+1) #plus one for the null character that gets removed
+ else:
+ skip_to_end(file, temp_chunk)
+
+ new_chunk.bytes_read+=temp_chunk.bytes_read
- elif (new_chunk.ID == OBJECT_UV):
- # print "Found an OBJECT_UV chunk"
- temp_data=file.read(STRUCT_SIZE_UNSIGNED_SHORT)
- data=struct.unpack("H", temp_data)
- new_chunk.bytes_read+=2
- num_uv=data[0]
+ #add the map to the material in the right channel
+ add_texture_to_material(img, new_texture, new_material, "OPACITY")
- for counter in range(num_uv):
- temp_data=file.read(STRUCT_SIZE_2FLOAT)
- new_chunk.bytes_read += STRUCT_SIZE_2FLOAT #2 float x 4 bytes each
- data=struct.unpack("2f", temp_data)
+ elif (new_chunk.ID==MAT_BUMP_MAP):
+ new_texture=Blender.Texture.New('Bump')
+ new_texture.setType('Image')
+ while (new_chunk.bytes_read<new_chunk.length):
+ read_chunk(file, temp_chunk)
- #insert the insert the UV coords in the vertex data
- contextMesh.verts[counter].uvco = data
-
- elif (new_chunk.ID == OBJECT_TRANS_MATRIX):
- # print "Found an OBJECT_TRANS_MATRIX chunk"
+ if (temp_chunk.ID==MAT_MAP_FILENAME):
+ texture_name=""
+ texture_name=str(read_string(file))
+ try:
+ img = Image.Load(texture_name)
+ TEXTURE_DICT[new_material.name]=img
+ except IOError:
+ fname = os.path.join( os.path.dirname(FILENAME), texture_name)
+ try:
+ img = Image.Load(fname)
+ TEXTURE_DICT[new_material.name]=img
+ except IOError:
+ print "\tERROR: failed to load image ",texture_name
+ TEXTURE_DICT[new_material.name] = None # Dummy
+ img=Blender.Image.New(fname,1,1,24) #blank image
+ new_chunk.bytes_read += (len(texture_name)+1) #plus one for the null character that gets removed
+ else:
+ skip_to_end(file, temp_chunk)
+
+ new_chunk.bytes_read+=temp_chunk.bytes_read
+
+ #add the map to the material in the right channel
+ add_texture_to_material(img, new_texture, new_material, "BUMP")
- temp_data=file.read(STRUCT_SIZE_4x3MAT)
- data = list( struct.unpack("ffffffffffff", temp_data) )
- new_chunk.bytes_read += STRUCT_SIZE_4x3MAT
+ elif (new_chunk.ID==MAT_TRANSPARENCY):
+ read_chunk(file, temp_chunk)
+ temp_data=file.read(STRUCT_SIZE_UNSIGNED_SHORT)
+ data=struct.unpack("H", temp_data)
+ temp_chunk.bytes_read+=2
+ new_material.setAlpha(1-(float(data[0])/100))
+ new_chunk.bytes_read+=temp_chunk.bytes_read
- contextMatrix = Blender.Mathutils.Matrix(\
- data[:3] + [0],\
- data[3:6] + [0],\
- data[6:9] + [0],\
- data[9:] + [1])
+ else:
+ skip_to_end(file,new_chunk)
-
-
- else: #(new_chunk.ID!=VERSION or new_chunk.ID!=OBJECTINFO or new_chunk.ID!=OBJECT or new_chunk.ID!=MATERIAL):
- # print "skipping to end of this chunk"
- buffer_size=new_chunk.length-new_chunk.bytes_read
- binary_format=str(buffer_size)+"c"
- temp_data=file.read(struct.calcsize(binary_format))
- new_chunk.bytes_read+=buffer_size
-
+ previous_chunk.bytes_read+=new_chunk.bytes_read
- #update the previous chunk bytes read
- previous_chunk.bytes_read += new_chunk.bytes_read
- #print "Bytes left in this chunk: ", previous_chunk.length-previous_chunk.bytes_read
+######################################################
+# process a main chunk
+######################################################
+def process_main_chunk(file,previous_chunk,new_object_list):
- # FINISHED LOOP
- # There will be a number of objects still not added
- if contextMesh != None:
- putContextMesh(contextMesh)
+ #spare chunks
+ new_chunk=chunk()
+ temp_chunk=chunk()
+
+ #Go through the main chunk
+ while (previous_chunk.bytes_read<previous_chunk.length):
+ read_chunk(file, new_chunk)
+
+ if (new_chunk.ID==VERSION):
+ temp_data=file.read(struct.calcsize("I"))
+ data=struct.unpack("I", temp_data)
+ version=data[0]
+ new_chunk.bytes_read+=4 #read the 4 bytes for the version number
+ if (version>3): #this loader works with version 3 and below, but may not with 4 and above
+ print "\tNon-Fatal Error: Version greater than 3, may not load correctly: ", version
+
+ elif (new_chunk.ID==EDITOR_BLOCK):
+ while(new_chunk.bytes_read<new_chunk.length):
+ read_chunk(file, temp_chunk)
+ if (temp_chunk.ID==MATERIAL_BLOCK):
+ process_material_block(file, temp_chunk)
+ elif (temp_chunk.ID==OBJECT_BLOCK):
+ process_object_block(file, temp_chunk,new_object_list)
+ else:
+ skip_to_end(file,temp_chunk)
+
+ new_chunk.bytes_read+=temp_chunk.bytes_read
+ else:
+ skip_to_end(file,new_chunk)
- for ob in objectList:
- ob.sel = 1
+ previous_chunk.bytes_read+=new_chunk.bytes_read
+#***********************************************
+# main entry point for loading 3ds files
+#***********************************************
def load_3ds (filename):
+ current_chunk=chunk()
+ print "--------------------------------"
print 'Importing "%s"' % filename
-
- time1 = Blender.sys.time()
+ time1 = Blender.sys.time() #for timing purposes
+ file=open(filename,"rb")
+ new_object_list = []
global FILENAME
FILENAME=filename
- current_chunk=chunk()
- file=open(filename,"rb")
-
- #here we go!
- # print "reading the first chunk"
- new_object_list = []
read_chunk(file, current_chunk)
+
if (current_chunk.ID!=PRIMARY):
print "\tFatal Error: Not a valid 3ds file: ", filename
file.close()
return
-
- process_next_chunk(file, current_chunk, new_object_list)
+
+ process_main_chunk(file, current_chunk, new_object_list)
# Select all new objects.
for ob in new_object_list: ob.sel = 1
diff --git a/release/scripts/bpymodules/meshtools.py b/release/scripts/bpymodules/meshtools.py
index 4ddf6035e59..bf875abf43c 100644
--- a/release/scripts/bpymodules/meshtools.py
+++ b/release/scripts/bpymodules/meshtools.py
@@ -5,12 +5,28 @@
# | http://www.redrival.com/scorpius |
# | scorpius@netzero.com |
# | September 28, 2002 |
-# | Released under the Blender Artistic Licence (BAL) |
-# | Import Export Suite v0.5 |
# +---------------------------------------------------------+
# | Common Functions & Global Variables For All IO Modules |
# +---------------------------------------------------------+
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# ***** END GPL LICENCE BLOCK *****
+
import Blender
import sys
@@ -35,7 +51,16 @@ def append_faces(mesh, faces, facesuv, uvcoords):
for i in range(len(faces)):
if not i%100 and show_progress: Blender.Window.DrawProgressBar(float(i)/len(faces), "Generating Faces")
numfaceverts=len(faces[i])
- if numfaceverts <= 4: # This face is a triangle or quad
+ if numfaceverts == 2: #This is not a face is an edge
+ if mesh.edges == None: #first run
+ mesh.addEdgeData()
+ #rev_face = revert(cur_face)
+ i1 = faces[i][0]
+ i2 = faces[i][1]
+ ee = mesh.addEdge(mesh.verts[i1],mesh.verts[i2])
+ ee.flag |= Blender.NMesh.EdgeFlags.EDGEDRAW
+ ee.flag |= Blender.NMesh.EdgeFlags.EDGERENDER
+ elif numfaceverts in [3,4]: # This face is a triangle or quad
face = Blender.NMesh.Face()
for j in range(numfaceverts):
index = faces[i][j]
diff --git a/release/scripts/bvh2arm.py b/release/scripts/bvh2arm.py
index b34be8a8222..8093eb6d10d 100644
--- a/release/scripts/bvh2arm.py
+++ b/release/scripts/bvh2arm.py
@@ -5,9 +5,11 @@ Blender: 239
Group: 'Animation'
Tooltip: 'Create Armature from a parented-empties chain'
"""
-__author__ = " Jean-Baptiste PERIN (jb_perin(at)yahoo.fr)"
+__author__ = "Jean-Baptiste PERIN (jb_perin(at)yahoo.fr)"
__url__ = ("blender", "elysiun",
-"BVH 2 ARMATURE, http://perso.wanadoo.fr/jb.perin/",
+"Author's homepage, http://perso.wanadoo.fr/jb.perin/",
+"Documentation, http://perso.wanadoo.fr/jb.perin/BVH2ARM/doc/bvh2arm.html",
+"Mocap tutorial, http://perso.wanadoo.fr/jb.perin/Mocap/MocapAnimation.html",
"Communicate problems and errors, http://www.zoo-logique.org/3D.Blender/newsportal/thread.php?group=3D.Blender")
__version__ = "2.4"
diff --git a/release/scripts/lightwave_export.py b/release/scripts/lightwave_export.py
index a109acc02c1..d3d7099594f 100644
--- a/release/scripts/lightwave_export.py
+++ b/release/scripts/lightwave_export.py
@@ -46,12 +46,27 @@ v5.5 format.
# | http://www.redrival.com/scorpius |
# | scorpius@netzero.com |
# | April 21, 2002 |
-# | Released under the Blender Artistic Licence (BAL) |
-# | Import Export Suite v0.5 |
-# +---------------------------------------------------------+
# | Read and write LightWave Object File Format (*.lwo) |
# +---------------------------------------------------------+
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# ***** END GPL LICENCE BLOCK *****
+
import Blender, meshtools
import struct, chunk, os, cStringIO, time, operator
diff --git a/release/scripts/lightwave_import.py b/release/scripts/lightwave_import.py
index 8dadbf3c01c..d3e67a0508c 100644
--- a/release/scripts/lightwave_import.py
+++ b/release/scripts/lightwave_import.py
@@ -1,16 +1,19 @@
#!BPY
"""
Name: 'LightWave + Materials (.lwo)...'
-Blender: 237
+Blender: 239
Group: 'Import'
Tooltip: 'Import LightWave Object File Format (.lwo)'
"""
__author__ = "Alessandro Pirovano, Anthony D'Agostino (Scorpius)"
__url__ = ("blender", "elysiun",
-"Author's homepage, http://www.redrival.com/scorpius", "Author's homepage, http://uaraus.altervista.org")
+"Anthony's homepage, http://www.redrival.com/scorpius", "Alessandro's homepage, http://uaraus.altervista.org")
-importername = "lwo_import 0.1.16"
+importername = "lwo_import 0.2.2b"
+
+# $Id$
+#
# +---------------------------------------------------------+
# | Save your work before and after use. |
# | Please report any useful comment to: |
@@ -22,7 +25,6 @@ importername = "lwo_import 0.1.16"
# | http://www.redrival.com/scorpius |
# | scorpius@netzero.com |
# | April 21, 2002 |
-# | Released under the Blender Artistic Licence (BAL) |
# | Import Export Suite v0.5 |
# +---------------------------------------------------------+
# | Read and write LightWave Object File Format (*.lwo) |
@@ -31,8 +33,36 @@ importername = "lwo_import 0.1.16"
# | Alessandro Pirovano tweaked starting on March 2005 |
# | http://uaraus.altervista.org |
# +---------------------------------------------------------+
+# +----------------------------------------------------------
+# | GPL license block
+# |
+# | This program is free software; you can redistribute it and/or modify
+# | it under the terms of the GNU General Public License as published by
+# | the Free Software Foundation; either version 2 of the License, or
+# | (at your option) any later version.
+# |
+# | This program is distributed in the hope that it will be useful,
+# | but WITHOUT ANY WARRANTY; without even the implied warranty of
+# | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# | GNU General Public License for more details.
+# |
+# | You should have received a copy of the GNU General Public License
+# | along with this program; if not, write to the Free Software
+# | Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+# +----------------------------------------------------------
# +---------------------------------------------------------+
# | Release log: |
+# | 0.2.1 : This code works with Blender 2.40 RC1 |
+# | modified material mode assignment to deal with |
+# | Python API modification |
+# | Changed script license to GNU GPL |
+# | 0.2.0: This code works with Blender 2.40a2 or up |
+# | Major rewrite to deal with large meshes |
+# | - 2 pass file parsing |
+# | - lower memory footprint |
+# | (as long as python gc allows) |
+# | 2.40a2 - Removed subsurf settings patches=poly |
+# | 2.40a2 - Edge generation instead of 2vert faces |
# | 0.1.16: fixed (try 2) texture offset calculations |
# | added hint on axis mapping |
# | added hint on texture blending mode |
@@ -70,10 +100,23 @@ import struct, chunk, os, cStringIO, time, operator, copy
textname = "lwo_log"
#uncomment the following line to disable logging facility
-#textname = None 1
+#textname = None
# ===========================================================
+
+# ===========================================================
+# === Make sure it is a string ... deal with strange chars ==
+# ===========================================================
+def safestring(st):
+ myst = ""
+ for ll in xrange(len(st)):
+ if st[ll] < " ":
+ myst += "#"
+ else:
+ myst += st[ll]
+ return myst
+
class dotext:
_NO = 0 #use internal to class only
@@ -87,7 +130,7 @@ class dotext:
self.txtobj = None
return
tlist = Blender.Text.get()
- for i in range(len(tlist)):
+ for i in xrange(len(tlist)):
if (tlist[i].getName()==tname):
tlist[i].clear()
#print tname, " text object found and cleared!"
@@ -120,7 +163,7 @@ class dotext:
def plist(self, pplist, where = _NO):
self.pprint ("list:[")
- for pp in range(len(pplist)):
+ for pp in xrange(len(pplist)):
self.pprint ("[%d] -> %s" % (pp, pplist[pp]), where)
self.pprint ("]")
# end def plist
@@ -140,7 +183,7 @@ class dotext:
elif type(parg) == type ({}):
self.pdict(parg, where)
else:
- self.pstring(parg, where)
+ self.pstring(safestring(str(parg)), where)
# end def pprint
def logcon(self, parg):
@@ -152,6 +195,13 @@ tobj=dotext(textname)
#uncomment the following line to log all messages on both console and logfile
#tobj=dotext(textname,dotext.CON)
+def rlcopy(ll):
+ if type(ll) != type ([]):
+ return ll
+ if ll == []:
+ return []
+ cpy = [rlcopy(ii) for ii in ll]
+ return cpy
# ===========================================================
# === Main read functions ===================================
@@ -172,7 +222,9 @@ def read(filename):
start = time.clock()
file = open(filename, "rb")
- # === LWO header ===
+ editmode = Blender.Window.EditMode() # are we in edit mode? If so ...
+ if editmode: Blender.Window.EditMode(0) # leave edit mode before getting the mesh # === LWO header ===
+
form_id, form_size, form_type = struct.unpack(">4s1L4s", file.read(12))
if (form_type == "LWOB"):
read_lwob(file, filename)
@@ -193,6 +245,8 @@ def read(filename):
tobj.pprint ("#####################################################################")
tobj.logcon (message)
tobj.logcon ("#####################################################################")
+ if editmode: Blender.Window.EditMode(1) # optional, just being nice
+
# enddef read
@@ -234,6 +288,7 @@ def read_lwo2(file, filename, typ="LWO2"):
dir_part = Blender.sys.dirname(filename)
fname_part = Blender.sys.basename(filename)
+ ask_weird = 1
#first initialization of data structures
defaultname = os.path.splitext(fname_part)[0]
@@ -242,17 +297,63 @@ def read_lwo2(file, filename, typ="LWO2"):
clip_list = [] #clip list: global for the whole file?
object_index = 0
object_list = None
+ objspec_list = None
# init value is: object_list = [[None, {}, [], [], {}, {}, 0, {}, {}]]
#0 - objname #original name
#1 - obj_dict = {TAG} #objects created
- #2 - verts = [] #object vertices
- #3 - faces = [] #object faces (associations poly -> vertices)
+ #2 - verts = [] #object vertexes
+ #3 - faces = [] #object faces (associations poly -> vertexes)
#4 - obj_dim_dict = {TAG} #tuples size and pos in local object coords - used for NON-UV mappings
- #5 - polytag_dict = {TAG} #tag to polygon mapping
+ #5 - polytag_dict = {TAG} #tag to polygons mapping
#6 - patch_flag #0 = surf; 1 = patch (subdivision surface) - it was the image list
- #7 - uvcoords_dict = {name} #uvmap coordinates (mixed mode per face/per vertex)
- #8 - facesuv_dict = {name} #uvmap coordinates associations poly -> uv tuples
+ #7 - uvcoords_dict = {name} #uvmap coordinates (mixed mode per vertex/per face)
+ #8 - facesuv_dict = {name} #vmad only coordinates associations poly & vertex -> uv tuples
+ #pass 1: look in advance for materials
+ tobj.logcon ("#####################################################################")
+ tobj.logcon ("Starting Pass 1: hold on tight")
+ tobj.logcon ("#####################################################################")
+ while 1:
+ try:
+ lwochunk = chunk.Chunk(file)
+ except EOFError:
+ break
+ tobj.pprint(" ")
+ if lwochunk.chunkname == "TAGS": # Tags
+ tobj.pprint("---- TAGS")
+ tag_list.extend(read_tags(lwochunk))
+ elif lwochunk.chunkname == "SURF": # surfaces
+ tobj.pprint("---- SURF")
+ surf_list.append(read_surfs(lwochunk, surf_list, tag_list))
+ elif lwochunk.chunkname == "CLIP": # texture images
+ tobj.pprint("---- CLIP")
+ clip_list.append(read_clip(lwochunk, dir_part))
+ tobj.pprint("read total %s clips up to now" % len(clip_list))
+ else: # Misc Chunks
+ if ask_weird:
+ ckname = safestring(lwochunk.chunkname)
+ if "#" in ckname:
+ choice = Blender.Draw.PupMenu("WARNING: file could be corrupted.%t|Import anyway|Give up")
+ if choice != 1:
+ tobj.logcon("---- %s: Maybe file corrupted. Terminated by user" % lwochunk.chunkname)
+ return
+ ask_weird = 0
+ tobj.pprint("---- %s: skipping (maybe later)" % lwochunk.chunkname)
+ lwochunk.skip()
+
+ #add default material for orphaned faces, if any
+ surf_list.append({'NAME': "_Orphans", 'g_MAT': Blender.Material.New("_Orphans")})
+
+ #pass 2: effectively generate objects
+ tobj.logcon ("#####################################################################")
+ tobj.logcon ("Pass 2: now for the hard part")
+ tobj.logcon ("#####################################################################")
+ file.seek(0)
+ # === LWO header ===
+ form_id, form_size, form_type = struct.unpack(">4s1L4s", file.read(12))
+ if (form_type != "LWO2"):
+ tobj.logcon ("??? Inconsistent file type: %s" %form_type)
+ return
while 1:
try:
lwochunk = chunk.Chunk(file)
@@ -263,81 +364,69 @@ def read_lwo2(file, filename, typ="LWO2"):
tobj.pprint("---- LAYR")
objname = read_layr(lwochunk)
tobj.pprint(objname)
- if object_list == None:
- object_list = [[objname, {}, [], [], {}, {}, 0, {}, {}]]
- else:
- object_list.append([objname, {}, [], [], {}, {}, 0, {}, {}])
- object_index += 1
+ if objspec_list != None: #create the object
+ create_objects(clip_list, objspec_list, surf_list)
+ update_material(clip_list, objspec_list, surf_list) #give it all the object
+ objspec_list = [objname, {}, [], [], {}, {}, 0, {}, {}]
+ object_index += 1
elif lwochunk.chunkname == "PNTS": # Verts
tobj.pprint("---- PNTS")
verts = read_verts(lwochunk)
- object_list[object_index][2] = verts
+ objspec_list[2] = verts
elif lwochunk.chunkname == "VMAP": # MAPS (UV)
tobj.pprint("---- VMAP")
- object_list[object_index][7], object_list[object_index][8] = read_vmap(object_list[object_index][7], object_list[object_index][8], object_list[object_index][3], len(object_list[object_index][2]), lwochunk)
+ #objspec_list[7] = read_vmap(objspec_list[7], len(objspec_list[2]), lwochunk)
+ read_vmap(objspec_list[7], len(objspec_list[2]), lwochunk)
elif lwochunk.chunkname == "VMAD": # MAPS (UV) per-face
tobj.pprint("---- VMAD")
- object_list[object_index][7], object_list[object_index][8] = read_vmad(object_list[object_index][7], object_list[object_index][8], object_list[object_index][3], len(object_list[object_index][2]), lwochunk)
+ #objspec_list[7], objspec_list[8] = read_vmad(objspec_list[7], objspec_list[8], len(objspec_list[3]), len(objspec_list[2]), lwochunk)
+ read_vmad(objspec_list[7], objspec_list[8], len(objspec_list[3]), len(objspec_list[2]), lwochunk)
elif lwochunk.chunkname == "POLS": # Faces v6.0
tobj.pprint("-------- POLS(6)")
faces, flag = read_faces_6(lwochunk)
#flag is 0 for regular polygon, 1 for patches (= subsurf), 2 for anything else to be ignored
if flag<2:
- if object_list[object_index][3] != []:
- object_list.append([object_list[object_index][0], #update name
- {}, #init
- copy.deepcopy(object_list[object_index][2]), #same vertices
- [], #no faces
- {}, #no need to copy - filled at runtime
- {}, #polygon tagging will follow
- flag, #patch flag
- copy.deepcopy(object_list[object_index][7]), #same uvcoords
- {}]) #no uv mapping
+ if objspec_list[3] != []:
+ #create immediately the object
+ create_objects(clip_list, objspec_list, surf_list)
+ update_material(clip_list, objspec_list, surf_list) #give it all the object
+ #update with new data
+ objspec_list = [objspec_list[0], #update name
+ {}, #init
+ objspec_list[2], #same vertexes
+ faces, #give it the new faces
+ {}, #no need to copy - filled at runtime
+ {}, #polygon tagging will follow
+ flag, #patch flag
+ objspec_list[7], #same uvcoords
+ {}] #no vmad mapping
object_index += 1
#end if already has a face list
- #update uv coords mapping if VMAP already encountered
- for uvname in object_list[object_index][7]:
- tobj.pprint("updating uv to face mapping for %s" % uvname)
- object_list[object_index][8][uvname] = copy.deepcopy(faces)
- object_list[object_index][3] = faces
- objname = object_list[object_index][0]
+ objspec_list[3] = faces
+ objname = objspec_list[0]
if objname == None:
objname = defaultname
#end if processing a valid poly type
- elif lwochunk.chunkname == "TAGS": # Tags
- tobj.pprint("---- TAGS")
- tag_list.extend(read_tags(lwochunk))
elif lwochunk.chunkname == "PTAG": # PTags
tobj.pprint("---- PTAG")
polytag_dict = read_ptags(lwochunk, tag_list)
- for kk in polytag_dict.keys(): object_list[object_index][5][kk] = polytag_dict[kk]
- elif lwochunk.chunkname == "SURF": # surfaces
- tobj.pprint("---- SURF")
- surf_list.append(read_surfs(lwochunk, surf_list, tag_list))
- elif lwochunk.chunkname == "CLIP": # texture images
- tobj.pprint("---- CLIP")
- clip_list.append(read_clip(lwochunk))
- tobj.pprint("read total %s clips" % len(clip_list))
+ for kk in polytag_dict.keys(): objspec_list[5][kk] = polytag_dict[kk]
else: # Misc Chunks
- tobj.pprint("---- %s: skipping" % lwochunk.chunkname)
+ tobj.pprint("---- %s: skipping (definitely!)" % lwochunk.chunkname)
lwochunk.skip()
#uncomment here to log data structure as it is built
#tobj.pprint(object_list)
+ #last object read
+ create_objects(clip_list, objspec_list, surf_list)
+ update_material(clip_list, objspec_list, surf_list) #give it all the object
+ objspec_list = None
+ surf_list = None
+ clip_list = None
+
tobj.pprint ("\n#####################################################################")
- tobj.pprint("Found %d objects:" % len(object_list))
+ tobj.pprint("Found %d objects:" % object_index)
tobj.pprint ("#####################################################################")
- for objspec_list in object_list:
- tobj.pprint ("\n#===================================================================#")
- tobj.pprint("Processing Object: %s" % objspec_list[0])
- tobj.pprint ("#===================================================================#")
- objspec_list[3], objspec_list[5], objspec_list[8] = recalc_faces(objspec_list[2], objspec_list[3], objspec_list[5], objspec_list[8]) #recalculate faces, polytag_dict and uv_mapping get rid of faces fanning
-
- create_objects(objspec_list)
-
- if surf_list != []:
- create_material(clip_list, surf_list, objspec_list, dir_part) #give it all the object
- return
# enddef read_lwo2
@@ -358,12 +447,12 @@ def read_verts(lwochunk):
numverts = lwochunk.chunksize/12
#$verts = []
verts = [None] * numverts
- for i in range(numverts):
- if not i%100 and my_meshtools.show_progress:
+ for i in xrange(numverts):
+ if not i%1000 and my_meshtools.show_progress:
Blender.Window.DrawProgressBar(float(i)/numverts, "Reading Verts")
x, y, z = struct.unpack(">fff", data.read(12))
verts[i] = (x, z, y)
- tobj.pprint("read %d vertices" % (i+1))
+ tobj.pprint("read %d vertexes" % (i+1))
return verts
# enddef read_verts
@@ -406,11 +495,11 @@ def read_faces_5(lwochunk):
faces = []
i = 0
while i < lwochunk.chunksize:
- if not i%100 and my_meshtools.show_progress:
+ if not i%1000 and my_meshtools.show_progress:
Blender.Window.DrawProgressBar(float(i)/lwochunk.chunksize, "Reading Faces")
facev = []
numfaceverts, = struct.unpack(">H", data.read(2))
- for j in range(numfaceverts):
+ for j in xrange(numfaceverts):
index, = struct.unpack(">H", data.read(2))
facev.append(index)
facev.reverse()
@@ -442,48 +531,44 @@ def read_vx(data):
# ======================
# === Read uvmapping ===
# ======================
-def read_vmap(uvcoords_dict, facesuv_dict, faces, maxvertnum, lwochunk):
+def read_vmap(uvcoords_dict, maxvertnum, lwochunk):
if maxvertnum == 0:
- tobj.pprint ("Found VMAP but no vertices to map!")
- return uvcoords_dict, facesuv_dict
+ tobj.pprint ("Found VMAP but no vertexes to map!")
+ return uvcoords_dict
data = cStringIO.StringIO(lwochunk.read())
map_type = data.read(4)
if map_type != "TXUV":
tobj.pprint ("Reading VMAP: No Texture UV map Were Found. Map Type: %s" % map_type)
- return uvcoords_dict, facesuv_dict
+ return uvcoords_dict
dimension, = struct.unpack(">H", data.read(2))
name, i = read_name(data) #i initialized with string lenght + zeros
tobj.pprint ("TXUV %d %s" % (dimension, name))
- #my_uv_list = [None] * maxvertnum
- my_uv_list = [(0.0, 0.0)] * maxvertnum #more safe to have some default coordinates to associate in any case?
- while (i < lwochunk.chunksize - 6): #4+2 header bytes already read
+ #note if there is already a VMAD it will be lost
+ #it is assumed that VMAD will follow the corresponding VMAP
+ if uvcoords_dict.has_key(name):
+ my_uv_dict = uvcoords_dict[name] #update existing
+ else:
+ my_uv_dict = {} #start a brand new: this could be made more smart
+ while (i < lwochunk.chunksize - 6): #4+2 header bytes already read
vertnum, vnum_size = read_vx(data)
u, v = struct.unpack(">ff", data.read(8))
if vertnum >= maxvertnum:
- tobj.pprint ("Hem: more uvmap than vertices? ignoring uv data for vertex %d" % vertnum)
+ tobj.pprint ("Hem: more uvmap than vertexes? ignoring uv data for vertex %d" % vertnum)
else:
- my_uv_list[vertnum] = (u, v)
+ my_uv_dict[vertnum] = (u, v)
i += 8 + vnum_size
#end loop on uv pairs
- uvcoords_dict[name] = my_uv_list
+ uvcoords_dict[name] = my_uv_dict
#this is a per-vertex mapping AND the uv tuple is vertex-ordered, so faces_uv is the same as faces
- if faces == []:
- tobj.pprint ("no faces read yet! delaying uv to face assignments")
- facesuv_dict[name] = []
- else:
- #deepcopy so we could modify it without actually modify faces
- tobj.pprint ("faces already present: proceeding with assignments")
- facesuv_dict[name] = copy.deepcopy(faces)
- return uvcoords_dict, facesuv_dict
-
+ #return uvcoords_dict
+ return
# ========================
# === Read uvmapping 2 ===
# ========================
-def read_vmad(uvcoords_dict, facesuv_dict, faces, maxvertnum, lwochunk):
- maxfacenum = len(faces)
+def read_vmad(uvcoords_dict, facesuv_dict, maxfacenum, maxvertnum, lwochunk):
if maxvertnum == 0 or maxfacenum == 0:
- tobj.pprint ("Found VMAD but no vertices to map!")
+ tobj.pprint ("Found VMAD but no vertexes to map!")
return uvcoords_dict, facesuv_dict
data = cStringIO.StringIO(lwochunk.read())
map_type = data.read(4)
@@ -494,13 +579,12 @@ def read_vmad(uvcoords_dict, facesuv_dict, faces, maxvertnum, lwochunk):
name, i = read_name(data) #i initialized with string lenght + zeros
tobj.pprint ("TXUV %d %s" % (dimension, name))
if uvcoords_dict.has_key(name):
- my_uv_list = uvcoords_dict[name] #update existing
- my_facesuv_list = facesuv_dict[name]
+ my_uv_dict = uvcoords_dict[name] #update existing
else:
- my_uv_list = [(0.0, 0.0)] * maxvertnum #start a brand new: this could be made more smart
- my_facesuv_list = copy.deepcopy(faces)
+ my_uv_dict = {} #start a brand new: this could be made more smart
+ my_facesuv_list = []
+ newindex = maxvertnum + 10 #why +10? Why not?
#end variable initialization
- lastindex = len(my_uv_list) - 1
while (i < lwochunk.chunksize - 6): #4+2 header bytes already read
vertnum, vnum_size = read_vx(data)
i += vnum_size
@@ -508,20 +592,17 @@ def read_vmad(uvcoords_dict, facesuv_dict, faces, maxvertnum, lwochunk):
i += vnum_size
u, v = struct.unpack(">ff", data.read(8))
if polynum >= maxfacenum or vertnum >= maxvertnum:
- tobj.pprint ("Hem: more uvmap than vertices? ignorig uv data for vertex %d" % vertnum)
+ tobj.pprint ("Hem: more uvmap than vertexes? ignorig uv data for vertex %d" % vertnum)
else:
- my_uv_list.append( (u,v) )
- newindex = len(my_uv_list) - 1
- for vi in range(len(my_facesuv_list[polynum])): #polynum starting from 1 or from 0?
- if my_facesuv_list[polynum][vi] == vertnum:
- my_facesuv_list[polynum][vi] = newindex
- #end loop on current face vertices
+ my_uv_dict[newindex] = (u, v)
+ my_facesuv_list.append([polynum, vertnum, newindex])
+ newindex += 1
i += 8
#end loop on uv pairs
- uvcoords_dict[name] = my_uv_list
+ uvcoords_dict[name] = my_uv_dict
facesuv_dict[name] = my_facesuv_list
- tobj.pprint ("updated %d vertices data" % (newindex-lastindex))
- return uvcoords_dict, facesuv_dict
+ tobj.pprint ("updated %d vertexes data" % (newindex-maxvertnum-10))
+ return
# =================
@@ -558,7 +639,7 @@ def read_ptags(lwochunk, tag_list):
ptag_dict = {}
i = 0
while(i < lwochunk.chunksize-4): #4 bytes polygon type already read
- if not i%100 and my_meshtools.show_progress:
+ if not i%1000 and my_meshtools.show_progress:
Blender.Window.DrawProgressBar(float(i)/lwochunk.chunksize, "Reading PTAGS")
poln, poln_size = read_vx(data)
i += poln_size
@@ -581,7 +662,9 @@ def read_ptags(lwochunk, tag_list):
# ==================
# === Read Clips ===
# ==================
-def read_clip(lwochunk):
+def read_clip(lwochunk, dir_part):
+# img, IMG, g_IMG refers to blender image objects
+# ima, IMAG, g_IMAG refers to clip dictionary 'ID' entries: refer to blok and surf
clip_dict = {}
data = cStringIO.StringIO(lwochunk.read())
image_index, = struct.unpack(">L", data.read(4))
@@ -616,11 +699,30 @@ def read_clip(lwochunk):
n, = struct.unpack(">H", data.read(2))
clip_dict['NEGA'] = n
else: # Misc Chunks
- tobj.pprint("-------- SURF:%s: skipping" % subchunkname)
+ tobj.pprint("-------- CLIP:%s: skipping" % subchunkname)
discard = data.read(subchunklen)
i = i + 6 + subchunklen
#end loop on surf chunks
tobj.pprint("read image:%s" % clip_dict)
+ if clip_dict.has_key('XREF'):
+ tobj.pprint("Cross-reference: no image pre-allocated.")
+ return clip_dict
+ #look for images
+ img = load_image("",clip_dict['NAME'])
+ if img == None:
+ tobj.pprint ( "***No image %s found: trying LWO file subdir" % clip_dict['NAME'])
+ img = load_image(dir_part,clip_dict['BASENAME'])
+ if img == None:
+ tobj.pprint ( "***No image %s found in directory %s: trying Images subdir" % (clip_dict['BASENAME'], dir_part))
+ img = load_image(dir_part+Blender.sys.sep+"Images",clip_dict['BASENAME'])
+ if img == None:
+ tobj.pprint ( "***No image %s found: trying alternate Images subdir" % clip_dict['BASENAME'])
+ img = load_image(dir_part+Blender.sys.sep+".."+Blender.sys.sep+"Images",clip_dict['BASENAME'])
+ if img == None:
+ tobj.pprint ( "***No image %s found: giving up" % clip_dict['BASENAME'])
+ #lucky we are: we have an image
+ tobj.pprint ("Image pre-allocated.")
+ clip_dict['g_IMG'] = img
return clip_dict
@@ -644,7 +746,7 @@ def read_surfblok(subchunkdata):
tobj.pprint ("---------- IMAP")
ordinal, i = read_name(data)
my_dict['ORD'] = ordinal
- my_dict['g_ORD'] = -1
+ #my_dict['g_ORD'] = -1
my_dict['ENAB'] = True
while(i < subchunklen): # ---------left 6------------------------- loop on header parameters
sub2chunkname, = struct.unpack("4s", data.read(4))
@@ -886,6 +988,9 @@ def read_surfs(lwochunk, surf_list, tag_list):
my_dict['BLOK'].append(rr)
if uvname != "":
my_dict['UVNAME'] = uvname #theoretically there could be a number of them: only one used per surf
+ if not(my_dict.has_key('g_IMAG')) and (rr.has_key('CHAN')) and (rr.has_key('OPAC')) and (rr.has_key('IMAG')):
+ if (rr['CHAN'] == 'COLR') and (rr['OPAC'] == 0):
+ my_dict['g_IMAG'] = rr['IMAG'] #do not set anything, just save image object for later assignment
subchunklen = 0 #force ending
else: # Misc Chunks
tobj.pprint("-------- SURF:%s: skipping" % subchunkname)
@@ -893,14 +998,13 @@ def read_surfs(lwochunk, surf_list, tag_list):
discard = data.read(subchunklen)
#end loop on surf chunks
if my_dict.has_key('BLOK'):
- my_dict['BLOK'].reverse()
+ my_dict['BLOK'].reverse() #texture applied in reverse order with respect to reading from lwo
+ #uncomment this if material pre-allocated by read_surf
+ my_dict['g_MAT'] = Blender.Material.New(my_dict['NAME'])
+ tobj.pprint("-> Material pre-allocated.")
return my_dict
-
-
-
-
# ===========================================================
# === Generation Routines ===================================
# ===========================================================
@@ -916,13 +1020,13 @@ def dist_vector (head, tail): #vector from head to tail
# ================
def find_ear(normal, list_dict, verts, face):
nv = len(list_dict['MF'])
- #looping through vertices trying to find an ear
+ #looping through vertexes trying to find an ear
#most likely in case of panic
mlc = 0
mla = 1
mlb = 2
- for c in range(nv):
+ for c in xrange(nv):
a = (c+1) % nv; b = (a+1) % nv
if list_dict['P'][a] > 0.0: #we have to start from a convex vertex
@@ -934,7 +1038,7 @@ def find_ear(normal, list_dict, verts, face):
#tobj.pprint (" ok, this one passed")
concave = 0
concave_inside = 0
- for xx in range(nv): #looking for concave vertex
+ for xx in xrange(nv): #looking for concave vertex
if (list_dict['P'][xx] <= 0.0) and (xx != b) and (xx != c): #cannot be a: it's convex
#ok, found concave vertex
concave = 1
@@ -974,10 +1078,10 @@ def find_ear(normal, list_dict, verts, face):
concave_inside = 1
break
#endif found a concave vertex
- #end loop looking for concave vertices
+ #end loop looking for concave vertexes
if (concave == 0) or (concave_inside == 0):
- #no concave vertices in polygon (should not be): return immediately
- #looped all concave vertices and no one inside found
+ #no concave vertexes in polygon (should not be): return immediately
+ #looped all concave vertexes and no one inside found
return [c, a, b]
#no convex vertex, try another one
#end loop to find a suitable base vertex for ear
@@ -1003,18 +1107,18 @@ def reduce_face(verts, face):
list_dict['P'] = [None] * nv
#list of distances
for mvi in list_dict['MF']:
- #vector between two vertices
+ #vector between two vertexes
mvi_hiend = (mvi+1) % nv #last-to-first
vi_hiend = face[mvi_hiend] #vertex
vi = face[mvi]
list_dict['D'][mvi] = dist_vector(verts[vi_hiend], verts[vi])
- #list of cross products - normals evaluated into vertices
- for vi in range(nv):
+ #list of cross products - normals evaluated into vertexes
+ for vi in xrange(nv):
list_dict['X'][vi] = Blender.Mathutils.CrossVecs(list_dict['D'][vi], list_dict['D'][vi-1])
my_face_normal = Blender.Mathutils.Vector([list_dict['X'][0][0], list_dict['X'][0][1], list_dict['X'][0][2]])
#list of dot products
list_dict['P'][0] = 1.0
- for vi in range(1, nv):
+ for vi in xrange(1, nv):
list_dict['P'][vi] = Blender.Mathutils.DotVecs(my_face_normal, list_dict['X'][vi])
#is there at least one concave vertex?
#one_concave = reduce(lambda x, y: (x) or (y<=0.0), list_dict['P'], 0)
@@ -1050,8 +1154,8 @@ def reduce_face(verts, face):
list_dict['P'].pop(ct[1])
one_concave = reduce(lambda x, y: (x) or (y<0.0), list_dict['P'], 0)
nv -=1
- else: #here if no more concave vertices
- if nv == 4: break #quads only if no concave vertices
+ else: #here if no more concave vertexes
+ if nv == 4: break #quads only if no concave vertexes
decomposition_list.append([list_dict['MF'][0], list_dict['MF'][1], list_dict['MF'][2]])
#physical removal
list_dict['MF'].pop(1)
@@ -1064,63 +1168,37 @@ def reduce_face(verts, face):
# =========================
# === Recalculate Faces ===
# =========================
-# --------- this do the standard face + ptag_dict + uv-map recalc
-def recalc_faces(verts, faces, polytag_dict, facesuv_dict):
- # init local face list
- my_faces = []
- # init local uvface dict
- my_facesuv = {}
- for uvname in facesuv_dict:
- my_facesuv[uvname] = []
- replaced_faces_dict = {}
- j = 0
- if len(faces)==0:
- return faces, polytag_dict, facesuv_dict
- for i in range(len(faces)):
- # i = index that spans on original faces
- # j = index that spans on new faces
- if not i%100 and my_meshtools.show_progress: Blender.Window.DrawProgressBar(float(i)/len(faces), "Recalculating faces")
- numfaceverts=len(faces[i])
- if numfaceverts < 4: #This face is a triangle or quad: more strict - it has to be a triangle
- my_faces.append(faces[i]) #ok, leave it alone ....
- for uvname in facesuv_dict:
- my_facesuv[uvname].append(facesuv_dict[uvname][i])
- replaced_faces_dict[i] = [j] #.... but change the nuber order of the face
- j += 1
- else: # Reduce n-sided convex polygon.
- meta_faces = reduce_face(verts, faces[i]) # Indices of triangles.
- this_faces = [] # list of triangles poly replacing original face
- this_faces_index = []
- for mf in meta_faces:
- ll = len(mf)
- if ll == 3: #triangle
- this_faces.append([faces[i][mf[0]], faces[i][mf[1]], faces[i][mf[2]]])
- else: #quads
- this_faces.append([faces[i][mf[0]], faces[i][mf[1]], faces[i][mf[2]], faces[i][mf[3]]])
- for uvname in facesuv_dict:
- if ll == 3: #triangle
- my_facesuv[uvname].append([facesuv_dict[uvname][i][mf[0]], facesuv_dict[uvname][i][mf[1]], facesuv_dict[uvname][i][mf[2]]])
- else: #quads
- my_facesuv[uvname].append([facesuv_dict[uvname][i][mf[0]], facesuv_dict[uvname][i][mf[1]], facesuv_dict[uvname][i][mf[2]], facesuv_dict[uvname][i][mf[3]]])
- this_faces_index.append(j)
- j +=1
- my_faces.extend(this_faces)
- replaced_faces_dict[i] = this_faces_index #face i substituted by this list of faces
- #endif on face vertex number
- #end loop on every face
- #now we have the new faces list and a dictionary replacement.
- #going for polygon tagging
- my_ptag_dict = {}
- for tag in polytag_dict: #for every tag group
- my_ptag_dict[tag] = [] #rebuild a new entry
- for poly in polytag_dict[tag]: #take every element of old face list
- my_ptag_dict[tag].extend(replaced_faces_dict[poly]) #substitutes the element of new face list
- return my_faces, my_ptag_dict, my_facesuv
-
-# ========================================
-# === Revert list keeping first vertex ===
-# ========================================
+def get_uvface(complete_list, facenum):
+ # extract from the complete list only vertexes of the desired polygon
+ my_facelist = []
+ for elem in complete_list:
+ if elem[0] == facenum:
+ my_facelist.append(elem)
+ return my_facelist
+
+def get_newindex(polygon_list, vertnum):
+ # extract from the polygon list the new index associated to a vertex
+ if polygon_list == []:
+ return -1
+ for elem in polygon_list:
+ if elem[1] == vertnum:
+ return elem[2]
+ #tobj.pprint("WARNING: expected vertex %s for polygon %s. Polygon_list dump follows" % (vertnum, polygon_list[0][0]))
+ #tobj.pprint(polygon_list)
+ return -1
+
+def get_surf(surf_list, cur_tag):
+ for elem in surf_list:
+ if elem['NAME'] == cur_tag:
+ return elem
+ return {}
+
+
+
+# ==========================================
+# === Revert list (keeping first vertex) ===
+# ==========================================
def revert (llist):
#different flavors: the reverse one is the one that works better
#rhead = [llist[0]]
@@ -1128,59 +1206,201 @@ def revert (llist):
#rhead.extend(rtail)
#return rhead
#--------------
- rhead=copy.deepcopy(llist)
+ rhead=rlcopy(llist)
rhead.reverse()
return rhead
#--------------
#return llist
+
# ====================================
# === Modified Create Blender Mesh ===
# ====================================
-def my_create_mesh(complete_vertlist, complete_facelist, current_facelist, objname, not_used_faces):
+def my_create_mesh(clip_list, surf, objspec_list, current_facelist, objname, not_used_faces):
#take the needed faces and update the not-used face list
- vertex_map = [-1] * len(complete_vertlist)
+ complete_vertlist = objspec_list[2]
+ complete_facelist = objspec_list[3]
+ uvcoords_dict = objspec_list[7]
+ facesuv_dict = objspec_list[8]
+ vertex_map = {} #implementation as dict
cur_ptag_faces = []
+ cur_ptag_faces_indexes = []
+ maxface = len(complete_facelist)
for ff in current_facelist:
+ if ff >= maxface:
+ tobj.logcon("Non existent face addressed: Giving up with this object")
+ return None, not_used_faces #return the created object
cur_face = complete_facelist[ff]
- cur_ptag_faces.append(cur_face)
+ cur_ptag_faces_indexes.append(ff)
if not_used_faces != []: not_used_faces[ff] = -1
- for vv in cur_face:
- vertex_map[vv] = 1
- #end loop on vertex on this face
+ for vv in cur_face: vertex_map[vv] = 1
#end loop on faces
+ store_edge = 0
- mesh = Blender.NMesh.GetRaw()
+ msh = Blender.NMesh.GetRaw()
+ # Name the Object
+ if not my_meshtools.overwrite_mesh_name:
+ objname = my_meshtools.versioned_name(objname)
+ Blender.NMesh.PutRaw(msh, objname) # Name the Mesh
+ obj = Blender.Object.GetSelected()[0]
+ obj.name=objname
+ # Associate material and mesh properties => from create materials
+ msh = obj.getData()
+ mat_index = len(msh.getMaterials(1))
+ mat = None
+ if surf.has_key('g_MAT'):
+ mat = surf['g_MAT']
+ msh.addMaterial(mat)
+ msh.mode |= Blender.NMesh.Modes.AUTOSMOOTH #smooth it anyway
+ if surf.has_key('SMAN'):
+ #not allowed mixed mode mesh (all the mesh is smoothed and all with the same angle)
+ #only one smoothing angle will be active! => take the max one
+ s = int(surf['SMAN']/3.1415926535897932384626433832795*180.0) #lwo in radians - blender in degrees
+ if msh.getMaxSmoothAngle() < s: msh.setMaxSmoothAngle(s)
+
+ img = None
+ if surf.has_key('g_IMAG'):
+ ima = lookup_imag(clip_list, surf['g_IMAG'])
+ if ima != None:
+ img = ima['g_IMG']
+
+ #uv_flag = ((surf.has_key('UVNAME')) and (uvcoords_dict.has_key(surf['UVNAME'])) and (img != None))
+ uv_flag = ((surf.has_key('UVNAME')) and (uvcoords_dict.has_key(surf['UVNAME'])))
+
+ if uv_flag: #assign uv-data; settings at mesh level
+ msh.hasFaceUV(1)
+ msh.update(1)
+
+ tobj.pprint ("\n#===================================================================#")
+ tobj.pprint("Processing Object: %s" % objname)
+ tobj.pprint ("#===================================================================#")
- #append vertices
jj = 0
- for i in range(len(complete_vertlist)):
- if vertex_map[i] == 1:
- if not i%100 and my_meshtools.show_progress: Blender.Window.DrawProgressBar(float(i)/len(complete_vertlist), "Generating Verts")
- x, y, z = complete_vertlist[i]
- mesh.verts.append(Blender.NMesh.Vert(x, y, z))
- vertex_map[i] = jj
- jj += 1
- #end sweep over vertices
+ vertlen = len(vertex_map.keys())
+ maxvert = len(complete_vertlist)
+ for i in vertex_map.keys():
+ if not jj%1000 and my_meshtools.show_progress: Blender.Window.DrawProgressBar(float(i)/vertlen, "Generating Verts")
+ if i >= maxvert:
+ tobj.logcon("Non existent vertex addressed: Giving up with this object")
+ return obj, not_used_faces #return the created object
+ x, y, z = complete_vertlist[i]
+ msh.verts.append(Blender.NMesh.Vert(x, y, z))
+ vertex_map[i] = jj
+ jj += 1
+ #end sweep over vertexes
#append faces
- for i in range(len(cur_ptag_faces)):
- if not i%100 and my_meshtools.show_progress: Blender.Window.DrawProgressBar(float(i)/len(cur_ptag_faces), "Generating Faces")
- face = Blender.NMesh.Face()
- rev_face = revert(cur_ptag_faces[i])
- for vi in rev_face:
- #for vi in cur_ptag_faces[i]:
- index = vertex_map[vi]
- face.v.append(mesh.verts[index])
- #end sweep over vertices
- mesh.faces.append(face)
- #end sweep over faces
+ jj = 0
+ for i in cur_ptag_faces_indexes:
+ if not jj%1000 and my_meshtools.show_progress: Blender.Window.DrawProgressBar(float(jj)/len(cur_ptag_faces_indexes), "Generating Faces")
+ cur_face = complete_facelist[i]
+ numfaceverts = len(cur_face)
+ vmad_list = [] #empty VMAD in any case
+ if uv_flag: #settings at original face level
+ if facesuv_dict.has_key(surf['UVNAME']): #yes = has VMAD; no = has VMAP only
+ vmad_list = get_uvface(facesuv_dict[surf['UVNAME']],i) #this for VMAD
+
+ if numfaceverts == 2:
+ #This is not a face is an edge
+ store_edge = 1
+ if msh.edges == None: #first run
+ msh.addEdgeData()
+ #rev_face = revert(cur_face)
+ i1 = vertex_map[cur_face[1]]
+ i2 = vertex_map[cur_face[0]]
+ ee = msh.addEdge(msh.verts[i1],msh.verts[i2])
+ ee.flag |= Blender.NMesh.EdgeFlags.EDGEDRAW
+ ee.flag |= Blender.NMesh.EdgeFlags.EDGERENDER
+
+ elif numfaceverts == 3:
+ #This face is a triangle skip face reduction
+ face = Blender.NMesh.Face()
+ msh.faces.append(face)
+ # Associate face properties => from create materials
+ if mat != None: face.materialIndex = mat_index
+ face.smooth = 1 #smooth it anyway
+
+ #rev_face = revert(cur_face)
+ rev_face = [cur_face[2], cur_face[1], cur_face[0]]
+
+ for vi in rev_face:
+ index = vertex_map[vi]
+ face.v.append(msh.verts[index])
+
+ if uv_flag:
+ ni = get_newindex(vmad_list, vi)
+ if ni > -1:
+ uv_index = ni
+ else: #VMAP - uses the same criteria as face
+ uv_index = vi
+ if uvcoords_dict[surf['UVNAME']].has_key(uv_index):
+ uv_tuple = uvcoords_dict[surf['UVNAME']][uv_index]
+ else:
+ uv_tuple = (0,0)
+ face.uv.append(uv_tuple)
+
+ if uv_flag and img != None:
+ face.mode |= Blender.NMesh.FaceModes['TEX']
+ face.image = img
+ face.mode |= Blender.NMesh.FaceModes.TWOSIDE #set it anyway
+ face.transp = Blender.NMesh.FaceTranspModes['SOLID']
+ face.flag = Blender.NMesh.FaceTranspModes['SOLID']
+ #if surf.has_key('SIDE'):
+ # msh.faces[f].mode |= Blender.NMesh.FaceModes.TWOSIDE #set it anyway
+ if surf.has_key('TRAN') and mat.getAlpha()<1.0:
+ face.transp = Blender.NMesh.FaceTranspModes['ALPHA']
- if not my_meshtools.overwrite_mesh_name:
- objname = my_meshtools.versioned_name(objname)
- Blender.NMesh.PutRaw(mesh, objname) # Name the Mesh
- obj = Blender.Object.GetSelected()[0]
- obj.name=objname # Name the Object
+ elif numfaceverts > 3:
+ #Reduce all the faces with more than 3 vertexes (& test if the quad is concave .....)
+
+ meta_faces = reduce_face(complete_vertlist, cur_face) # Indices of triangles.
+ for mf in meta_faces:
+ face = Blender.NMesh.Face()
+ msh.faces.append(face)
+
+ if len(mf) == 3: #triangle
+ #rev_face = revert([cur_face[mf[0]], cur_face[mf[1]], cur_face[mf[2]]])
+ rev_face = [cur_face[mf[2]], cur_face[mf[1]], cur_face[mf[0]]]
+ else: #quads
+ #rev_face = revert([cur_face[mf[0]], cur_face[mf[1]], cur_face[mf[2]], cur_face[mf[3]]])
+ rev_face = [cur_face[mf[3]], cur_face[mf[2]], cur_face[mf[1]], cur_face[mf[0]]]
+
+ # Associate face properties => from create materials
+ if mat != None: face.materialIndex = mat_index
+ face.smooth = 1 #smooth it anyway
+
+ for vi in rev_face:
+ index = vertex_map[vi]
+ face.v.append(msh.verts[index])
+
+ if uv_flag:
+ ni = get_newindex(vmad_list, vi)
+ if ni > -1:
+ uv_index = ni
+ else: #VMAP - uses the same criteria as face
+ uv_index = vi
+ if uvcoords_dict[surf['UVNAME']].has_key(uv_index):
+ uv_tuple = uvcoords_dict[surf['UVNAME']][uv_index]
+ else:
+ uv_tuple = (0,0)
+ face.uv.append(uv_tuple)
+
+ if uv_flag and img != None:
+ face.mode |= Blender.NMesh.FaceModes['TEX']
+ face.image = img
+ face.mode |= Blender.NMesh.FaceModes.TWOSIDE #set it anyway
+ face.transp = Blender.NMesh.FaceTranspModes['SOLID']
+ face.flag = Blender.NMesh.FaceTranspModes['SOLID']
+ #if surf.has_key('SIDE'):
+ # msh.faces[f].mode |= Blender.NMesh.FaceModes.TWOSIDE #set it anyway
+ if surf.has_key('TRAN') and mat.getAlpha()<1.0:
+ face.transp = Blender.NMesh.FaceTranspModes['ALPHA']
+
+ jj += 1
+
+ if not(uv_flag): #clear eventual UV data
+ msh.hasFaceUV(0)
+ msh.update(1,store_edge)
Blender.Redraw()
return obj, not_used_faces #return the created object
@@ -1191,8 +1411,12 @@ def my_create_mesh(complete_vertlist, complete_facelist, current_facelist, objna
def set_subsurf(obj):
msh = obj.getData()
msh.setSubDivLevels([2, 2])
- msh.mode |= Blender.NMesh.Modes.SUBSURF
- msh.update(1)
+ #does not work any more in 2.40 alpha 2
+ #msh.mode |= Blender.NMesh.Modes.SUBSURF
+ if msh.edges != None:
+ msh.update(1,1)
+ else:
+ msh.update(1)
obj.makeDisplayList()
return
@@ -1212,7 +1436,7 @@ def obj_size_pos(obj):
# =========================
# === Create the object ===
# =========================
-def create_objects(objspec_list):
+def create_objects(clip_list, objspec_list, surf_list):
nf = len(objspec_list[3])
not_used_faces = range(nf)
ptag_dict = objspec_list[5]
@@ -1225,28 +1449,31 @@ def create_objects(objspec_list):
middlechar = endchar = "#"
for cur_tag in ptag_dict.keys():
if ptag_dict[cur_tag] != []:
- cur_obj, not_used_faces= my_create_mesh(objspec_list[2], objspec_list[3], ptag_dict[cur_tag], objspec_list[0][:9]+middlechar+cur_tag[:9], not_used_faces)
- if objspec_list[6] == 1:
- set_subsurf(cur_obj)
- obj_dict[cur_tag] = cur_obj
- obj_dim_dict[cur_tag] = obj_size_pos(cur_obj)
- obj_list.append(cur_obj)
+ cur_surf = get_surf(surf_list, cur_tag)
+ cur_obj, not_used_faces= my_create_mesh(clip_list, cur_surf, objspec_list, ptag_dict[cur_tag], objspec_list[0][:9]+middlechar+cur_tag[:9], not_used_faces)
+ #does not work any more in 2.40 alpha 2
+ #if objspec_list[6] == 1:
+ # set_subsurf(cur_obj)
+ if cur_obj != None:
+ obj_dict[cur_tag] = cur_obj
+ obj_dim_dict[cur_tag] = obj_size_pos(cur_obj)
+ obj_list.append(cur_obj)
#end loop on current group
#and what if some faces not used in any named PTAG? get rid of unused faces
- for ff in range(nf):
- tt = nf-1-ff #reverse order
- if not_used_faces[tt] == -1:
- not_used_faces.pop(tt)
+ orphans = []
+ for tt in not_used_faces:
+ if tt > -1: orphans.append(tt)
#end sweep on unused face list
- if not_used_faces != []:
- cur_obj, not_used_faces = my_create_mesh(objspec_list[2], objspec_list[3], not_used_faces, objspec_list[0][:9]+middlechar+"lone", [])
- #my_meshtools.create_mesh(objspec_list[2], not_used_faces, "_unk") #vert, faces, name
- #cur_obj = Blender.Object.GetSelected()[0]
- if objspec_list[6] == 1:
- set_subsurf(cur_obj)
- obj_dict["lone"] = cur_obj
- obj_dim_dict["lone"] = obj_size_pos(cur_obj)
- obj_list.append(cur_obj)
+ not_used_faces = None
+ if orphans != []:
+ cur_surf = get_surf(surf_list, "_Orphans")
+ cur_obj, not_used_faces = my_create_mesh(clip_list, cur_surf, objspec_list, orphans, objspec_list[0][:9]+middlechar+"Orphans", [])
+ if cur_obj != None:
+ if objspec_list[6] == 1:
+ set_subsurf(cur_obj)
+ obj_dict["_Orphans"] = cur_obj
+ obj_dim_dict["_Orphans"] = obj_size_pos(cur_obj)
+ obj_list.append(cur_obj)
objspec_list[1] = obj_dict
objspec_list[4] = obj_dim_dict
scene = Blender.Scene.getCurrent () # get the current scene
@@ -1304,7 +1531,7 @@ def lookup_imag(clip_list,ima_id):
# ===================================================
# === Create and assign image mapping to material ===
# ===================================================
-def create_blok(surf, mat, clip_list, dir_part, obj_size, obj_pos):
+def create_blok(surf, mat, clip_list, obj_size, obj_pos):
def output_size_ofs(size, pos, blok):
#just automate repetitive task
@@ -1351,21 +1578,10 @@ def create_blok(surf, mat, clip_list, dir_part, obj_size, obj_pos):
if ima == None:
tobj.pprint ( "***Block index image not within CLIP list? Quitting Block")
break #safety check (paranoia setting)
- #look for images
- img = load_image("",ima['NAME'])
- if img == None:
- tobj.pprint ( "***No image %s found: trying LWO file subdir" % ima['NAME'])
- img = load_image(dir_part,ima['BASENAME'])
- if img == None:
- tobj.pprint ( "***No image %s found in directory %s: trying Images subdir" % (ima['BASENAME'], dir_part))
- img = load_image(dir_part+Blender.sys.sep+"Images",ima['BASENAME'])
- if img == None:
- tobj.pprint ( "***No image %s found: trying alternate Images subdir" % ima['BASENAME'])
- img = load_image(dir_part+Blender.sys.sep+".."+Blender.sys.sep+"Images",ima['BASENAME'])
+ img = ima['g_IMG']
if img == None:
- tobj.pprint ( "***No image %s found: giving up" % ima['BASENAME'])
+ tobj.pprint ("***Failed to pre-allocate image %s found: giving up" % ima['BASENAME'])
break
- #lucky we are: we have an image
tname = str(ima['ID'])
if blok.has_key('CHAN'):
tname = tname + "+" + blok['CHAN']
@@ -1406,8 +1622,6 @@ def create_blok(surf, mat, clip_list, dir_part, obj_size, obj_pos):
if blok.has_key('CHAN'):
if blok['CHAN'] == 'COLR':
tobj.pprint ("!!!Set Texture -> MapTo -> Col = %.3f" % set_dvar)
- if set_blendmode == 0:
- surf['g_IM'] = img #do not set anything, just save image object for later assignment
if blok['CHAN'] == 'BUMP':
mapflag = Blender.Texture.MapTo.NOR
tobj.pprint ("!!!Set Texture -> MapTo -> Nor = %.3f" % set_dvar)
@@ -1462,15 +1676,16 @@ def create_blok(surf, mat, clip_list, dir_part, obj_size, obj_pos):
# ========================================
# === Create and assign a new material ===
# ========================================
-#def create_material(surf_list, ptag_dict, obj, clip_list, uv_dict, dir_part):
-def create_material(clip_list, surf_list, objspec, dir_part):
+#def update_material(surf_list, ptag_dict, obj, clip_list, uv_dict, dir_part):
+def update_material(clip_list, objspec, surf_list):
if (surf_list == []) or (objspec[5] == {}) or (objspec[1] == {}):
+ tobj.pprint( "something getting wrong in update_material: dump follows ...")
tobj.pprint( surf_list)
tobj.pprint( objspec[5])
tobj.pprint( objspec[1])
- tobj.pprint( "something getting wrong in create_material ...")
return
obj_dict = objspec[1]
+ all_faces = objspec[3]
obj_dim_dict = objspec[4]
ptag_dict = objspec[5]
uvcoords_dict = objspec[7]
@@ -1487,7 +1702,13 @@ def create_material(clip_list, surf_list, objspec, dir_part):
obj_size = obj_dim_dict[surf['NAME']][0]
obj_pos = obj_dim_dict[surf['NAME']][1]
tobj.pprint(surf)
- mat = Blender.Material.New(surf['NAME'])
+ #uncomment this if material pre-allocated by read_surf
+ mat = surf['g_MAT']
+ if mat == None:
+ tobj.pprint ("Sorry, no pre-allocated material to update. Giving up for %s." % surf['NAME'])
+ break
+ #mat = Blender.Material.New(surf['NAME'])
+ #surf['g_MAT'] = mat
if surf.has_key('COLR'):
mat.rgbCol = surf['COLR']
if surf.has_key('LUMI'):
@@ -1500,8 +1721,12 @@ def create_material(clip_list, surf_list, objspec, dir_part):
mat.setRef(surf['DIFF']) #lwo [0.0, 1.0] - blender [0.0, 1.0]
if surf.has_key('REFL'):
mat.setRayMirr(surf['REFL']) #lwo [0.0, 1.0] - blender [0.0, 1.0]
- #mat.setMode('RAYMIRROR')
- mat.mode |= Blender.Material.Modes.RAYMIRROR
+ #mat.setMode('RAYMIRROR') NO! this will reset all the other modes
+ #mat.mode |= Blender.Material.Modes.RAYMIRROR No more usable?
+ mm = mat.getMode()
+ mm |= Blender.Material.Modes.RAYMIRROR
+ mm &= 327679 #4FFFF this is implementation dependent
+ mat.setMode(mm)
#WARNING translucency not implemented yet check 2.36 API
#if surf.has_key('TRNL'):
#
@@ -1512,55 +1737,27 @@ def create_material(clip_list, surf_list, objspec, dir_part):
mat.setHardness(glo)
if surf.has_key('TRAN'):
mat.setAlpha(1.0-surf['TRAN']) #lwo [0.0, 1.0] - blender [1.0, 0.0]
- mat.mode |= Blender.Material.Modes.RAYTRANSP
+ #mat.mode |= Blender.Material.Modes.RAYTRANSP
+ mm = mat.getMode()
+ mm |= Blender.Material.Modes.RAYTRANSP
+ mm &= 327679 #4FFFF this is implementation dependent
+ mat.setMode(mm)
if surf.has_key('RIND'):
s = surf['RIND']
if s < 1.0: s = 1.0
if s > 3.0: s = 3.0
mat.setIOR(s) #clipped to blender [1.0, 3.0]
- mat.mode |= Blender.Material.Modes.RAYTRANSP
+ #mat.mode |= Blender.Material.Modes.RAYTRANSP
+ mm = mat.getMode()
+ mm |= Blender.Material.Modes.RAYTRANSP
+ mm &= 327679 #4FFFF this is implementation dependent
+ mat.setMode(mm)
if surf.has_key('BLOK') and surf['BLOK'] != []:
#update the material according to texture.
- create_blok(surf, mat, clip_list, dir_part, obj_size, obj_pos)
+ create_blok(surf, mat, clip_list, obj_size, obj_pos)
#finished setting up the material
- #associate material to mesh
- msh = cur_obj.getData()
- mat_index = len(msh.getMaterials(1))
- msh.addMaterial(mat)
- msh.mode |= Blender.NMesh.Modes.AUTOSMOOTH #smooth it anyway
- msh.update(1)
- for f in range(len(msh.faces)):
- msh.faces[f].materialIndex = mat_index
- msh.faces[f].smooth = 1 #smooth it anyway
- msh.faces[f].mode |= Blender.NMesh.FaceModes.TWOSIDE #set it anyway
- msh.faces[f].transp = Blender.NMesh.FaceTranspModes['SOLID']
- msh.faces[f].flag = Blender.NMesh.FaceTranspModes['SOLID']
- if surf.has_key('SMAN'):
- #not allowed mixed mode mesh (all the mesh is smoothed and all with the same angle)
- #only one smoothing angle will be active! => take the max one
- s = int(surf['SMAN']/3.1415926535897932384626433832795*180.0) #lwo in radians - blender in degrees
- if msh.getMaxSmoothAngle() < s: msh.setMaxSmoothAngle(s)
- #if surf.has_key('SIDE'):
- # msh.faces[f].mode |= Blender.NMesh.FaceModes.TWOSIDE #set it anyway
- if surf.has_key('TRAN') and mat.getAlpha()<1.0:
- msh.faces[f].transp = Blender.NMesh.FaceTranspModes['ALPHA']
- if surf.has_key('UVNAME') and facesuv_dict.has_key(surf['UVNAME']):
- #assign uv-data
- msh.hasFaceUV(1)
- #WARNING every block could have its own uvmap set of coordinate. take only the first one
- facesuv_list = facesuv_dict[surf['UVNAME']]
- #print "facesuv_list: ",f , facelist[f]
- rev_face = revert(facesuv_list[facelist[f]])
- for vi in rev_face:
- msh.faces[f].uv.append(uvcoords_dict[surf['UVNAME']][vi])
- if surf.has_key('g_IM'):
- msh.faces[f].mode |= Blender.NMesh.FaceModes['TEX']
- msh.faces[f].image = surf['g_IM']
- #end loop over faces
- msh.update(1)
- mat_index += 1
- #end if exist faces ib this object belonging to surf
- #end loop on surfaces
+ #end if exist SURF
+ #end loop on materials (SURFs)
return
@@ -1578,13 +1775,13 @@ def read_faces_6(lwochunk):
if polygon_type == 'PTCH': subsurf = 1
i = 0
while(i < lwochunk.chunksize-4):
- if not i%100 and my_meshtools.show_progress:
+ if not i%1000 and my_meshtools.show_progress:
Blender.Window.DrawProgressBar(float(i)/lwochunk.chunksize, "Reading Faces")
facev = []
numfaceverts, = struct.unpack(">H", data.read(2))
i += 2
- for j in range(numfaceverts):
+ for j in xrange(numfaceverts):
index, index_size = read_vx(data)
i += index_size
facev.append(index)
@@ -1602,3 +1799,5 @@ def fs_callback(filename):
read(filename)
Blender.Window.FileSelector(fs_callback, "Import LWO")
+
+
diff --git a/release/scripts/nendo_export.py b/release/scripts/nendo_export.py
index 830c4303f56..ff9600769d0 100644
--- a/release/scripts/nendo_export.py
+++ b/release/scripts/nendo_export.py
@@ -53,12 +53,27 @@ field.
# | http://www.redrival.com/scorpius |
# | scorpius@netzero.com |
# | September 25, 2001 |
-# | Released under the Blender Artistic Licence (BAL) |
-# | Import Export Suite v0.5 |
-# +---------------------------------------------------------+
# | Read and write Nendo File Format (*.nendo) |
# +---------------------------------------------------------+
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# ***** END GPL LICENCE BLOCK *****
+
import Blender, meshtools
import struct, time, sys, os
diff --git a/release/scripts/nendo_import.py b/release/scripts/nendo_import.py
index e7f3071bd99..e34101ad1e1 100644
--- a/release/scripts/nendo_import.py
+++ b/release/scripts/nendo_import.py
@@ -48,12 +48,27 @@ edges during the course of modeling.
# | http://www.redrival.com/scorpius |
# | scorpius@netzero.com |
# | September 25, 2001 |
-# | Released under the Blender Artistic Licence (BAL) |
-# | Import Export Suite v0.5 |
-# +---------------------------------------------------------+
# | Read and write Nendo File Format (*.nendo) |
# +---------------------------------------------------------+
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# ***** END GPL LICENCE BLOCK *****
+
import Blender, meshtools
import struct, time, sys, os
diff --git a/release/scripts/off_export.py b/release/scripts/off_export.py
index 8465720478f..53bd9ef6a6f 100644
--- a/release/scripts/off_export.py
+++ b/release/scripts/off_export.py
@@ -28,17 +28,34 @@ Notes:<br>
Only exports a single selected mesh.
"""
+# $Id:
+#
# +---------------------------------------------------------+
# | Copyright (c) 2002 Anthony D'Agostino |
# | http://www.redrival.com/scorpius |
# | scorpius@netzero.com |
# | February 3, 2001 |
-# | Released under the Blender Artistic Licence (BAL) |
-# | Import Export Suite v0.5 |
-# +---------------------------------------------------------+
# | Read and write Object File Format (*.off) |
# +---------------------------------------------------------+
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# ***** END GPL LICENCE BLOCK *****
+
import Blender, meshtools
#import time
diff --git a/release/scripts/off_import.py b/release/scripts/off_import.py
index cbaab483154..452a91ffc5f 100644
--- a/release/scripts/off_import.py
+++ b/release/scripts/off_import.py
@@ -29,18 +29,34 @@ Notes:<br>
UV Coordinate support has been added.
"""
-
+# $Id:
+#
# +---------------------------------------------------------+
# | Copyright (c) 2002 Anthony D'Agostino |
# | http://www.redrival.com/scorpius |
# | scorpius@netzero.com |
# | February 3, 2001 |
-# | Released under the Blender Artistic Licence (BAL) |
-# | Import Export Suite v0.5 |
-# +---------------------------------------------------------+
# | Read and write Object File Format (*.off) |
# +---------------------------------------------------------+
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# ***** END GPL LICENCE BLOCK *****
+
import Blender, meshtools
#import time
diff --git a/release/scripts/radiosity_export.py b/release/scripts/radiosity_export.py
index 4f7766c3dd5..43ec7d9838c 100644
--- a/release/scripts/radiosity_export.py
+++ b/release/scripts/radiosity_export.py
@@ -46,12 +46,27 @@ specular highlights to the vertex colors.
# | http://www.redrival.com/scorpius |
# | scorpius@netzero.com |
# | April 11, 2002 |
-# | Released under the Blender Artistic Licence (BAL) |
-# | Import Export Suite v0.5 |
-# +---------------------------------------------------------+
# | Read and write Radiosity File Format (*.radio) |
# +---------------------------------------------------------+
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# ***** END GPL LICENCE BLOCK *****
+
import Blender, meshtools
#import time
diff --git a/release/scripts/radiosity_import.py b/release/scripts/radiosity_import.py
index 704a87becf5..d2d45568b99 100644
--- a/release/scripts/radiosity_import.py
+++ b/release/scripts/radiosity_import.py
@@ -31,12 +31,27 @@ file to open.
# | http://www.redrival.com/scorpius |
# | scorpius@netzero.com |
# | April 11, 2002 |
-# | Released under the Blender Artistic Licence (BAL) |
-# | Import Export Suite v0.5 |
-# +---------------------------------------------------------+
# | Read and write Radiosity File Format (*.radio) |
# +---------------------------------------------------------+
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# ***** END GPL LICENCE BLOCK *****
+
import Blender, meshtools
#import time
diff --git a/release/scripts/raw_export.py b/release/scripts/raw_export.py
index f52f0404e19..b8469adc5f4 100644
--- a/release/scripts/raw_export.py
+++ b/release/scripts/raw_export.py
@@ -25,7 +25,6 @@ Usage:<br>
Select meshes to be exported and run this script from "File->Export" menu.
"""
-
# $Id$
#
# +---------------------------------------------------------+
@@ -33,12 +32,27 @@ Usage:<br>
# | http://www.redrival.com/scorpius |
# | scorpius@netzero.com |
# | April 28, 2002 |
-# | Released under the Blender Artistic Licence (BAL) |
-# | Import Export Suite v0.5 |
-# +---------------------------------------------------------+
# | Read and write RAW Triangle File Format (*.raw) |
# +---------------------------------------------------------+
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# ***** END GPL LICENCE BLOCK *****
+
import Blender, meshtools
import sys
#import time
diff --git a/release/scripts/raw_import.py b/release/scripts/raw_import.py
index c793791664f..a452bb4b40e 100644
--- a/release/scripts/raw_import.py
+++ b/release/scripts/raw_import.py
@@ -38,12 +38,27 @@ tolerance.
# | http://www.redrival.com/scorpius |
# | scorpius@netzero.com |
# | April 28, 2002 |
-# | Released under the Blender Artistic Licence (BAL) |
-# | Import Export Suite v0.5 |
-# +---------------------------------------------------------+
# | Read and write RAW Triangle File Format (*.raw) |
# +---------------------------------------------------------+
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# ***** END GPL LICENCE BLOCK *****
+
import Blender, meshtools
#import time
diff --git a/release/scripts/save_theme.py b/release/scripts/save_theme.py
index 79205c0d478..db3a7226cf1 100644
--- a/release/scripts/save_theme.py
+++ b/release/scripts/save_theme.py
@@ -39,15 +39,34 @@ some information on it before sharing it with others.
# $Id$
#
# --------------------------------------------------------------------------
-# Copyright (C) 2004: Willian P. Germano, wgermano _at_ ig.com.br
+# Copyright (C) 2004: Willian P. Germano, wgermano _at_ ig com br
# --------------------------------------------------------------------------
-# Released under the Blender Artistic License (BAL):
-# http://download.blender.org/documentation/html/x21254.html
-#
# The scripts generated by this script are put under Public Domain by
# default, but you are free to edit the ones you generate with this script
# and change their license to another one of your choice.
# --------------------------------------------------------------------------
+#
+# --------------------------------------------------------------------------
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+# Copyright (C) 2005: Willian P. Germano, wgermano _at_ ig.com.br
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# ***** END GPL LICENCE BLOCK *****
+# --------------------------------------------------------------------------
import Blender
from Blender.Window import Theme, FileSelector
diff --git a/release/scripts/slp_import.py b/release/scripts/slp_import.py
index 246f84ec02b..4944c34045d 100644
--- a/release/scripts/slp_import.py
+++ b/release/scripts/slp_import.py
@@ -36,12 +36,27 @@ tolerance.
# | http://www.redrival.com/scorpius |
# | scorpius@netzero.com |
# | May 3, 2004 |
-# | Released under the Blender Artistic Licence (BAL) |
-# | Import Export Suite v0.5 |
-# +---------------------------------------------------------+
# | Read and write SLP Triangle File Format (*.slp) |
# +---------------------------------------------------------+
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# ***** END GPL LICENCE BLOCK *****
+
import Blender, meshtools
#import time
diff --git a/release/scripts/truespace_export.py b/release/scripts/truespace_export.py
index af9aa2a5a23..a9f1688ae46 100644
--- a/release/scripts/truespace_export.py
+++ b/release/scripts/truespace_export.py
@@ -49,7 +49,6 @@ For Cameras: The matrix here gets a little confusing, and I'm not sure of
how to handle it.
"""
-
# $Id$
#
# +---------------------------------------------------------+
@@ -57,12 +56,27 @@ how to handle it.
# | http://www.redrival.com/scorpius |
# | scorpius@netzero.com |
# | June 12, 2001 |
-# | Released under the Blender Artistic Licence (BAL) |
-# | Import Export Suite v0.5 |
-# +---------------------------------------------------------+
# | Read and write Caligari trueSpace File Format (*.cob) |
# +---------------------------------------------------------+
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# ***** END GPL LICENCE BLOCK *****
+
import Blender, meshtools
import struct, os, cStringIO, time
diff --git a/release/scripts/truespace_import.py b/release/scripts/truespace_import.py
index 40326612f3f..ff655e1614c 100644
--- a/release/scripts/truespace_import.py
+++ b/release/scripts/truespace_import.py
@@ -62,12 +62,27 @@ how to handle it.
# | http://www.redrival.com/scorpius |
# | scorpius@netzero.com |
# | June 12, 2001 |
-# | Released under the Blender Artistic Licence (BAL) |
-# | Import Export Suite v0.5 |
-# +---------------------------------------------------------+
# | Read and write Caligari trueSpace File Format (*.cob) |
# +---------------------------------------------------------+
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# ***** END GPL LICENCE BLOCK *****
+
import Blender, meshtools
import struct, chunk, os, cStringIO, time
diff --git a/release/scripts/videoscape_export.py b/release/scripts/videoscape_export.py
index 37905239496..49160e8b48d 100644
--- a/release/scripts/videoscape_export.py
+++ b/release/scripts/videoscape_export.py
@@ -43,7 +43,6 @@ specular highlights to the vertex colors.
5. The Videoscape format also allows vertex colors to be specified.
"""
-
# $Id$
#
# +---------------------------------------------------------+
@@ -51,12 +50,27 @@ specular highlights to the vertex colors.
# | http://www.redrival.com/scorpius |
# | scorpius@netzero.com |
# | June 5, 2001 |
-# | Released under the Blender Artistic Licence (BAL) |
-# | Import Export Suite v0.5 |
-# +---------------------------------------------------------+
# | Write Videoscape File Format (*.obj NOT WAVEFRONT OBJ) |
# +---------------------------------------------------------+
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# ***** END GPL LICENCE BLOCK *****
+
import Blender, meshtools
#import time
diff --git a/release/scripts/wings_export.py b/release/scripts/wings_export.py
index c262f3faabd..8477f9e076a 100644
--- a/release/scripts/wings_export.py
+++ b/release/scripts/wings_export.py
@@ -50,12 +50,27 @@ Notes:<br>
# | http://www.redrival.com/scorpius |
# | scorpius@netzero.com |
# | Feb 19, 2002 |
-# | Released under the Blender Artistic Licence (BAL) |
-# | Import Export Suite v0.5 |
-# +---------------------------------------------------------+
# | Read and write Wings3D File Format (*.wings) |
# +---------------------------------------------------------+
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# ***** END GPL LICENCE BLOCK *****
+
import Blender, meshtools
import struct, time, sys, os, zlib, cStringIO
diff --git a/release/scripts/wings_import.py b/release/scripts/wings_import.py
index 70f124b3bac..8bba6f52de7 100644
--- a/release/scripts/wings_import.py
+++ b/release/scripts/wings_import.py
@@ -11,7 +11,7 @@ __author__ = "Anthony D'Agostino (Scorpius)"
__url__ = ("blender", "elysiun",
"Author's homepage, http://www.redrival.com/scorpius",
"Wings 3D, http://www.wings3d.com")
-__version__ = "Part of IOSuite 0.5"
+__version__ = "Update on version from IOSuite 0.5"
__bpydoc__ = """\
This script imports Wings3D files to Blender.
@@ -37,7 +37,8 @@ fanning algorithm. Convex polygons (i.e., shaped like the letter "U")
require a different algorithm, and will be triagulated incorrectly.
Notes:<br>
- Last tested with Wings 3D 0.98.25 & Blender 2.35a.
+ Last tested with Wings 3D 0.98.25 & Blender 2.35a.<br>
+ This version has improvements made by Adam Saltsman (AdamAtomic) and Toastie.
"""
# $Id$
@@ -47,15 +48,29 @@ Notes:<br>
# | http://www.redrival.com/scorpius |
# | scorpius@netzero.com |
# | Feb 19, 2002 |
-# | Released under the Blender Artistic Licence (BAL) |
-# | Import Export Suite v0.5 |
-# +---------------------------------------------------------+
-# | Read and write Wings3D File Format (*.wings) |
-# +---------------------------------------------------------+
+
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# ***** END GPL LICENCE BLOCK *****
+
import Blender, meshtools
import struct, time, sys, os, zlib, cStringIO
-
+
# ==============================================
# === Read The 'Header' Common To All Chunks ===
# ==============================================
@@ -79,26 +94,28 @@ def read_mode(data):
# === Read Hard Edges ===
# =======================
def read_hardedges(data):
+ hardedge_table = {} # hard edges table
tag = data.read(1)
if tag == '\x6A':
- return # There are no hard edges
+ return hardedge_table # There are no hard edges
elif tag == '\x6B':
numhardedges, = struct.unpack(">H", data.read(2))
- print "numhardedges:", numhardedges
+ #print "numhardedges:", numhardedges
for i in range(numhardedges):
- data.read(1)
+ hardedge_table[i] = struct.unpack(">B", data.read(1))[0]
elif tag == '\x6C':
numhardedges, = struct.unpack(">L", data.read(4))
- print "numhardedges:", numhardedges
+ #print "numhardedges:", numhardedges
for i in range(numhardedges):
misc = data.read(1)
if misc == '\x61': # next value is stored as a byte
- data.read(1)
+ hardedge_table[i] = struct.unpack(">B", data.read(1))[0]
elif misc == '\x62': # next value is stored as a long
- data.read(4)
+ hardedge_table[i] = struct.unpack(">L", data.read(4))[0]
data.read(1) # 6A
else:
print tag
+ return hardedge_table
# ==================
# === Read Edges ===
@@ -134,6 +151,7 @@ def read_edges(data):
# === Read Faces ===
# ==================
def read_faces(data):
+ mat_table = {} #list of faces and material names
misc, numfaces = struct.unpack(">BL", data.read(5))
for i in range(numfaces):
if not i%100 and meshtools.show_progress: Blender.Window.DrawProgressBar(float(i)/numfaces, "Reading Faces")
@@ -141,10 +159,10 @@ def read_faces(data):
data.read(4)
read_chunkheader(data)
misc, namelen = struct.unpack(">BH", data.read(3))
- materialname = data.read(namelen)
- data.read(1)
+ mat_table[i] = data.read(namelen)
+ data.read(1) # 6A?
data.read(1) # 6A
- return numfaces
+ return mat_table
# ==================
# === Read Verts ===
@@ -169,8 +187,10 @@ def make_face_table(edge_table): # For Wings
for i in range(len(edge_table)):
Lf = edge_table[i][2]
Rf = edge_table[i][3]
- face_table[Lf] = i
- face_table[Rf] = i
+ if Lf >= 0:
+ face_table[Lf] = i
+ if Rf >= 0:
+ face_table[Rf] = i
return face_table
# =======================
@@ -198,14 +218,17 @@ def make_faces(edge_table): # For Wings
if i == edge_table[current_edge][3]:
next_edge = edge_table[current_edge][7] # Right successor edge
next_vert = edge_table[current_edge][0]
- else:
+ elif i == edge_table[current_edge][2]:
next_edge = edge_table[current_edge][5] # Left successor edge
next_vert = edge_table[current_edge][1]
+ else:
+ break
face_verts.append(next_vert)
current_edge = next_edge
if current_edge == face_table[i]: break
- face_verts.reverse()
- faces.append(face_verts)
+ if len(face_verts) > 0:
+ face_verts.reverse()
+ faces.append(face_verts)
return faces
# =======================
@@ -223,7 +246,7 @@ def dump_wings(filename):
file.close()
data = zlib.decompress(data)
if dsize != len(data): print "ERROR: uncompressed size does not match."
- data = cStringIO.StringIO(data)
+ data = cStringIO.StringIO(data)
print "header:", header
print read_chunkheader(data) # === wings chunk ===
data.read(4) # misc bytes
@@ -236,9 +259,10 @@ def dump_wings(filename):
objname = data.read(namelen)
print read_chunkheader(data) # === winged chunk ===
edge_table = read_edges(data)
- numfaces = read_faces(data)
+ mat_table = read_faces(data)
+ numfaces = len(mat_table)
verts = read_verts(data)
- read_hardedges(data)
+ hardedge_table = read_hardedges(data)
face_table = {} # contains an incident edge
vert_table = {} # contains an incident edge
@@ -255,25 +279,26 @@ def dump_wings(filename):
print
print "Ä"*79
print "edge_table:"
- pprint.pprint(edge_table)
+ #pprint.pprint(edge_table)
#for i in range(len(edge_table)): print "%2d" % (i), edge_table[i]
print
print "face_table:"
- pprint.pprint(face_table)
+ #pprint.pprint(face_table)
#for i in range(len(face_table)): print "%2d %2d" % (i, face_table[i])
print
print "vert_table:"
- pprint.pprint(vert_table)
+ #pprint.pprint(vert_table)
#for i in range(len(vert_table)): print "%2d %2d" % (i, vert_table[i])
file.close()
end = time.clock()
print '\a\r',
- sys.stderr.write("\nDone in %.2f %s" % (end-start, "seconds"))
+ sys.stderr.write("\nDone in %.2f %s\a\r" % (end-start, "seconds"))
# =========================
# === Read Wings Format ===
# =========================
def read(filename):
+
start = time.clock()
file = open(filename, "rb")
header = file.read(15)
@@ -299,9 +324,113 @@ def read(filename):
objname = data.read(namelen)
read_chunkheader(data) # winged chunk
edge_table = read_edges(data)
- numfaces = read_faces(data)
+ mat_table = read_faces(data)
+ numfaces = len(mat_table)
verts = read_verts(data)
- read_hardedges(data)
+ hardedge_table = read_hardedges(data)
+
+ # Manually split hard edges
+ # TODO: Handle the case where there are 2+ edges on a face
+ duped = {}
+ processed = []
+ cleanup = []
+ oldedgecount = len(edge_table)
+ for i in range(len(verts)):
+ duped[i] = -1
+ for j in range(len(hardedge_table)):
+ hardedge = hardedge_table[j]
+ oldedge = edge_table[hardedge]
+ newedge = [] # Copy old edge into a new list
+ for k in range(len(oldedge)):
+ newedge.append(oldedge[k])
+
+ # Duplicate start vert if not duped already
+ sv = newedge[0]
+ if duped[sv] == -1:
+ verts.append(verts[sv])
+ duped[sv] = len(verts)-1
+ newedge[0] = duped[sv]
+
+ # Duplicate end vert if not duped already
+ ev = newedge[1]
+ if duped[ev] == -1:
+ verts.append(verts[ev])
+ duped[ev] = len(verts)-1
+ newedge[1] = duped[ev]
+
+ # Decide which way to cut the edge
+ flip = 0
+ for v in range(len(processed)):
+ if processed[v][0] == oldedge[0]:
+ flip = 1
+ elif processed[v][1] == oldedge[1]:
+ flip = 1
+ if flip == 0:
+ of = 3
+ oe1 = 6
+ oe2 = 7
+ nf = 2
+ ne1 = 4
+ ne2 = 5
+ else:
+ of = 2
+ oe1 = 4
+ oe2 = 5
+ nf = 3
+ ne1 = 6
+ ne2 = 7
+
+ # Fix up side-specific edge fields
+ oldedge[of] = -1
+ oldedge[oe1] = -1
+ oldedge[oe2] = -1
+ newedge[nf] = -1
+ newedge[ne1] = -1
+ newedge[ne2] = -1
+
+ # Store new edge's neighbors for cleanup later
+ cleanup.append(edge_table[newedge[oe1]])
+ cleanup.append(edge_table[newedge[oe2]])
+
+ #DEBUG
+ # Sv Ev | Lf Rf | Lp Ls | Rp Rs
+ #print "Old Edge:",hardedge,oldedge
+ #print "New Edge:",len(edge_table),newedge
+
+ # Add this new edge to the edge table
+ edge_table[len(edge_table)] = newedge
+ if flip == 0:
+ processed.append(oldedge) # mark it off as processed
+
+ # Cycle through cleanup list and fix it up
+ for c in range(len(cleanup)):
+ cleanupedge = cleanup[c]
+
+ # Fix up their verts in case they were duped
+ sv = cleanupedge[0]
+ if sv < len(duped):
+ if duped[sv] >= 0:
+ cleanupedge[0] = duped[sv]
+ ev = cleanupedge[1]
+ if ev < len(duped):
+ if duped[ev] >= 0:
+ cleanupedge[1] = duped[ev]
+
+ # Fix up edge info (in case a hard edge was replaced with a new one)
+ edgecount = c/2
+ hardedge = hardedge_table[edgecount] # look up what edge we were replacing
+ newedgenum = oldedgecount+edgecount # calculate new edge's index
+ if cleanupedge[4] == hardedge:
+ cleanupedge[4] = newedgenum
+ if cleanupedge[5] == hardedge:
+ cleanupedge[5] = newedgenum
+ if cleanupedge[6] == hardedge:
+ cleanupedge[6] = newedgenum
+ if cleanupedge[7] == hardedge:
+ cleanupedge[7] = newedgenum
+
+ #for i in range(len(edge_table)): print "%2d" % (i), edge_table[i]
+
read_mode(data)
faces = make_faces(edge_table)
message += "%s %8s %8s %8s\n" % (objname.ljust(15), len(faces), len(edge_table), len(verts))