Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'release/scripts/op/io_scene_x3d')
-rw-r--r--release/scripts/op/io_scene_x3d/__init__.py84
-rw-r--r--release/scripts/op/io_scene_x3d/export_x3d.py847
-rw-r--r--release/scripts/op/io_scene_x3d/import_x3d.py2658
3 files changed, 0 insertions, 3589 deletions
diff --git a/release/scripts/op/io_scene_x3d/__init__.py b/release/scripts/op/io_scene_x3d/__init__.py
deleted file mode 100644
index 8df45e3cae3..00000000000
--- a/release/scripts/op/io_scene_x3d/__init__.py
+++ /dev/null
@@ -1,84 +0,0 @@
-# ##### BEGIN GPL LICENSE BLOCK #####
-#
-# This program is free software; you can redistribute it and/or
-# modify it under the terms of the GNU General Public License
-# as published by the Free Software Foundation; either version 2
-# of the License, or (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software Foundation,
-# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
-#
-# ##### END GPL LICENSE BLOCK #####
-
-# <pep8 compliant>
-
-# To support reload properly, try to access a package var, if it's there, reload everything
-if "bpy" in locals():
- import imp
- if "export_x3d" in locals():
- imp.reload(export_x3d)
-
-
-import bpy
-from bpy.props import *
-from io_utils import ImportHelper, ExportHelper
-
-
-class ImportX3D(bpy.types.Operator, ImportHelper):
- '''Load a BVH motion capture file'''
- bl_idname = "import_scene.x3d"
- bl_label = "Import X3D/VRML"
-
- filename_ext = ".x3d"
- filter_glob = StringProperty(default="*.x3d;*.wrl", options={'HIDDEN'})
-
- def execute(self, context):
- from . import import_x3d
- return import_x3d.load(self, context, **self.as_keywords(ignore=("filter_glob",)))
-
-
-class ExportX3D(bpy.types.Operator, ExportHelper):
- '''Export selection to Extensible 3D file (.x3d)'''
- bl_idname = "export_scene.x3d"
- bl_label = 'Export X3D'
-
- filename_ext = ".x3d"
- filter_glob = StringProperty(default="*.x3d", options={'HIDDEN'})
-
- use_apply_modifiers = BoolProperty(name="Apply Modifiers", description="Use transformed mesh data from each object", default=True)
- use_triangulate = BoolProperty(name="Triangulate", description="Triangulate quads.", default=False)
- use_compress = BoolProperty(name="Compress", description="GZip the resulting file, requires a full python install", default=False)
-
- def execute(self, context):
- from . import export_x3d
- return export_x3d.save(self, context, **self.as_keywords(ignore=("check_existing", "filter_glob")))
-
-
-def menu_func_import(self, context):
- self.layout.operator(ImportX3D.bl_idname, text="X3D Extensible 3D (.x3d/.wrl)")
-
-
-def menu_func_export(self, context):
- self.layout.operator(ExportX3D.bl_idname, text="X3D Extensible 3D (.x3d)")
-
-
-def register():
- bpy.types.INFO_MT_file_import.append(menu_func_import)
- bpy.types.INFO_MT_file_export.append(menu_func_export)
-
-
-def unregister():
- bpy.types.INFO_MT_file_import.remove(menu_func_import)
- bpy.types.INFO_MT_file_export.remove(menu_func_export)
-
-# NOTES
-# - blender version is hardcoded
-
-if __name__ == "__main__":
- register()
diff --git a/release/scripts/op/io_scene_x3d/export_x3d.py b/release/scripts/op/io_scene_x3d/export_x3d.py
deleted file mode 100644
index c420b0cddd8..00000000000
--- a/release/scripts/op/io_scene_x3d/export_x3d.py
+++ /dev/null
@@ -1,847 +0,0 @@
-# ##### BEGIN GPL LICENSE BLOCK #####
-#
-# This program is free software; you can redistribute it and/or
-# modify it under the terms of the GNU General Public License
-# as published by the Free Software Foundation; either version 2
-# of the License, or (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software Foundation,
-# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
-#
-# ##### END GPL LICENSE BLOCK #####
-
-# <pep8 compliant>
-
-# Contributors: bart:neeneenee*de, http://www.neeneenee.de/vrml, Campbell Barton
-
-"""
-This script exports to X3D format.
-
-Usage:
-Run this script from "File->Export" menu. A pop-up will ask whether you
-want to export only selected or all relevant objects.
-
-Known issues:
- Doesn't handle multiple materials (don't use material indices);<br>
- Doesn't handle multiple UV textures on a single mesh (create a mesh for each texture);<br>
- Can't get the texture array associated with material * not the UV ones;
-"""
-
-import math
-import os
-
-import bpy
-import mathutils
-
-from io_utils import create_derived_objects, free_derived_objects
-
-
-def round_color(col, cp):
- return tuple([round(max(min(c, 1.0), 0.0), cp) for c in col])
-
-
-def matrix_direction(mtx):
- return (mathutils.Vector((0.0, 0.0, -1.0)) * mtx.rotation_part()).normalize()[:]
-
-
-##########################################################
-# Functions for writing output file
-##########################################################
-
-
-class x3d_class:
-
- def __init__(self, filepath):
- #--- public you can change these ---
- self.proto = 1
- self.billnode = 0
- self.halonode = 0
- self.collnode = 0
- self.verbose = 2 # level of verbosity in console 0-none, 1-some, 2-most
- self.cp = 3 # decimals for material color values 0.000 - 1.000
- self.vp = 3 # decimals for vertex coordinate values 0.000 - n.000
- self.tp = 3 # decimals for texture coordinate values 0.000 - 1.000
- self.it = 3
-
- self.global_matrix = mathutils.Matrix.Rotation(-(math.pi / 2.0), 4, 'X')
-
- #--- class private don't touch ---
- self.indentLevel = 0 # keeps track of current indenting
- self.filepath = filepath
- self.file = None
- if filepath.lower().endswith('.x3dz'):
- try:
- import gzip
- self.file = gzip.open(filepath, "w")
- except:
- print("failed to import compression modules, exporting uncompressed")
- self.filepath = filepath[:-1] # remove trailing z
-
- if self.file is None:
- self.file = open(self.filepath, "w", encoding='utf8')
-
- self.bNav = 0
- self.nodeID = 0
- self.namesReserved = ("Anchor", "Appearance", "Arc2D", "ArcClose2D", "AudioClip", "Background", "Billboard",
- "BooleanFilter", "BooleanSequencer", "BooleanToggle", "BooleanTrigger", "Box", "Circle2D",
- "Collision", "Color", "ColorInterpolator", "ColorRGBA", "component", "Cone", "connect",
- "Contour2D", "ContourPolyline2D", "Coordinate", "CoordinateDouble", "CoordinateInterpolator",
- "CoordinateInterpolator2D", "Cylinder", "CylinderSensor", "DirectionalLight", "Disk2D",
- "ElevationGrid", "EspduTransform", "EXPORT", "ExternProtoDeclare", "Extrusion", "field",
- "fieldValue", "FillProperties", "Fog", "FontStyle", "GeoCoordinate", "GeoElevationGrid",
- "GeoLocationLocation", "GeoLOD", "GeoMetadata", "GeoOrigin", "GeoPositionInterpolator",
- "GeoTouchSensor", "GeoViewpoint", "Group", "HAnimDisplacer", "HAnimHumanoid", "HAnimJoint",
- "HAnimSegment", "HAnimSite", "head", "ImageTexture", "IMPORT", "IndexedFaceSet",
- "IndexedLineSet", "IndexedTriangleFanSet", "IndexedTriangleSet", "IndexedTriangleStripSet",
- "Inline", "IntegerSequencer", "IntegerTrigger", "IS", "KeySensor", "LineProperties", "LineSet",
- "LoadSensor", "LOD", "Material", "meta", "MetadataDouble", "MetadataFloat", "MetadataInteger",
- "MetadataSet", "MetadataString", "MovieTexture", "MultiTexture", "MultiTextureCoordinate",
- "MultiTextureTransform", "NavigationInfo", "Normal", "NormalInterpolator", "NurbsCurve",
- "NurbsCurve2D", "NurbsOrientationInterpolator", "NurbsPatchSurface",
- "NurbsPositionInterpolator", "NurbsSet", "NurbsSurfaceInterpolator", "NurbsSweptSurface",
- "NurbsSwungSurface", "NurbsTextureCoordinate", "NurbsTrimmedSurface", "OrientationInterpolator",
- "PixelTexture", "PlaneSensor", "PointLight", "PointSet", "Polyline2D", "Polypoint2D",
- "PositionInterpolator", "PositionInterpolator2D", "ProtoBody", "ProtoDeclare", "ProtoInstance",
- "ProtoInterface", "ProximitySensor", "ReceiverPdu", "Rectangle2D", "ROUTE", "ScalarInterpolator",
- "Scene", "Script", "Shape", "SignalPdu", "Sound", "Sphere", "SphereSensor", "SpotLight", "StaticGroup",
- "StringSensor", "Switch", "Text", "TextureBackground", "TextureCoordinate", "TextureCoordinateGenerator",
- "TextureTransform", "TimeSensor", "TimeTrigger", "TouchSensor", "Transform", "TransmitterPdu",
- "TriangleFanSet", "TriangleSet", "TriangleSet2D", "TriangleStripSet", "Viewpoint", "VisibilitySensor",
- "WorldInfo", "X3D", "XvlShell", "VertexShader", "FragmentShader", "MultiShaderAppearance", "ShaderAppearance")
-
- self.namesFog = ("", "LINEAR", "EXPONENTIAL", "")
-
-##########################################################
-# Writing nodes routines
-##########################################################
-
- def writeHeader(self):
- #bfile = sys.expandpath( Blender.Get('filepath') ).replace('<', '&lt').replace('>', '&gt')
- bfile = repr(os.path.basename(self.filepath).replace('<', '&lt').replace('>', '&gt'))[1:-1] # use outfile name
- self.file.write("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n")
- self.file.write("<!DOCTYPE X3D PUBLIC \"ISO//Web3D//DTD X3D 3.0//EN\" \"http://www.web3d.org/specifications/x3d-3.0.dtd\">\n")
- self.file.write("<X3D version=\"3.0\" profile=\"Immersive\" xmlns:xsd=\"http://www.w3.org/2001/XMLSchema-instance\" xsd:noNamespaceSchemaLocation=\"http://www.web3d.org/specifications/x3d-3.0.xsd\">\n")
- self.file.write("<head>\n")
- self.file.write("\t<meta name=\"filename\" content=\"%s\" />\n" % bfile)
- # self.file.write("\t<meta name=\"filename\" content=\"%s\" />\n" % sys.basename(bfile))
- self.file.write("\t<meta name=\"generator\" content=\"Blender %s\" />\n" % bpy.app.version_string)
- # self.file.write("\t<meta name=\"generator\" content=\"Blender %s\" />\n" % Blender.Get('version'))
- self.file.write("\t<meta name=\"translator\" content=\"X3D exporter v1.55 (2006/01/17)\" />\n")
- self.file.write("</head>\n")
- self.file.write("<Scene>\n")
-
- # This functionality is poorly defined, disabling for now - campbell
- '''
- def writeScript(self):
- textEditor = Blender.Text.Get()
- alltext = len(textEditor)
- for i in xrange(alltext):
- nametext = textEditor[i].name
- nlines = textEditor[i].getNLines()
- if (self.proto == 1):
- if (nametext == "proto" or nametext == "proto.js" or nametext == "proto.txt") and (nlines != None):
- nalllines = len(textEditor[i].asLines())
- alllines = textEditor[i].asLines()
- for j in xrange(nalllines):
- self.write_indented(alllines[j] + "\n")
- elif (self.proto == 0):
- if (nametext == "route" or nametext == "route.js" or nametext == "route.txt") and (nlines != None):
- nalllines = len(textEditor[i].asLines())
- alllines = textEditor[i].asLines()
- for j in xrange(nalllines):
- self.write_indented(alllines[j] + "\n")
- self.write_indented("\n")
- '''
-
- def writeViewpoint(self, ob, mat, scene):
- loc, quat, scale = mat.decompose()
- self.file.write("<Viewpoint DEF=\"%s\" " % (self.cleanStr(ob.name)))
- self.file.write("description=\"%s\" " % (ob.name))
- self.file.write("centerOfRotation=\"0 0 0\" ")
- self.file.write("position=\"%3.2f %3.2f %3.2f\" " % loc[:])
- self.file.write("orientation=\"%3.2f %3.2f %3.2f %3.2f\" " % (quat.axis[:] + (quat.angle, )))
- self.file.write("fieldOfView=\"%.3f\" " % ob.data.angle)
- self.file.write(" />\n\n")
-
- def writeFog(self, world):
- if world:
- mtype = world.mist_settings.falloff
- mparam = world.mist_settings
- else:
- return
- if (mtype == 'LINEAR' or mtype == 'INVERSE_QUADRATIC'):
- mtype = 1 if mtype == 'LINEAR' else 2
- # if (mtype == 1 or mtype == 2):
- self.file.write("<Fog fogType=\"%s\" " % self.namesFog[mtype])
- self.file.write("color=\"%s %s %s\" " % round_color(world.horizon_color, self.cp))
- self.file.write("visibilityRange=\"%s\" />\n\n" % round(mparam[2], self.cp))
- else:
- return
-
- def writeNavigationInfo(self, scene):
- self.file.write('<NavigationInfo headlight="false" visibilityLimit="0.0" type=\'"EXAMINE","ANY"\' avatarSize="0.25, 1.75, 0.75" />\n')
-
- def writeSpotLight(self, ob, mtx, lamp, world):
- safeName = self.cleanStr(ob.name)
- if world:
- ambi = world.ambient_color
- ambientIntensity = ((ambi[0] + ambi[1] + ambi[2]) / 3.0) / 2.5
- del ambi
- else:
- ambientIntensity = 0.0
-
- # compute cutoff and beamwidth
- intensity = min(lamp.energy / 1.75, 1.0)
- beamWidth = lamp.spot_size * 0.37
- # beamWidth=((lamp.spotSize*math.pi)/180.0)*.37
- cutOffAngle = beamWidth * 1.3
-
- dx, dy, dz = matrix_direction(mtx)
-
- location = mtx.translation_part()
-
- radius = lamp.distance * math.cos(beamWidth)
- # radius = lamp.dist*math.cos(beamWidth)
- self.file.write("<SpotLight DEF=\"%s\" " % safeName)
- self.file.write("radius=\"%s\" " % (round(radius, self.cp)))
- self.file.write("ambientIntensity=\"%s\" " % (round(ambientIntensity, self.cp)))
- self.file.write("intensity=\"%s\" " % (round(intensity, self.cp)))
- self.file.write("color=\"%s %s %s\" " % round_color(lamp.color, self.cp))
- self.file.write("beamWidth=\"%s\" " % (round(beamWidth, self.cp)))
- self.file.write("cutOffAngle=\"%s\" " % (round(cutOffAngle, self.cp)))
- self.file.write("direction=\"%s %s %s\" " % (round(dx, 3), round(dy, 3), round(dz, 3)))
- self.file.write("location=\"%s %s %s\" />\n\n" % (round(location[0], 3), round(location[1], 3), round(location[2], 3)))
-
- def writeDirectionalLight(self, ob, mtx, lamp, world):
- safeName = self.cleanStr(ob.name)
- if world:
- ambi = world.ambient_color
- # ambi = world.amb
- ambientIntensity = ((float(ambi[0] + ambi[1] + ambi[2])) / 3.0) / 2.5
- else:
- ambi = 0
- ambientIntensity = 0
-
- intensity = min(lamp.energy / 1.75, 1.0)
- dx, dy, dz = matrix_direction(mtx)
- self.file.write("<DirectionalLight DEF=\"%s\" " % safeName)
- self.file.write("ambientIntensity=\"%s\" " % (round(ambientIntensity, self.cp)))
- self.file.write("color=\"%s %s %s\" " % (round(lamp.color[0], self.cp), round(lamp.color[1], self.cp), round(lamp.color[2], self.cp)))
- self.file.write("intensity=\"%s\" " % (round(intensity, self.cp)))
- self.file.write("direction=\"%s %s %s\" />\n\n" % (round(dx, 4), round(dy, 4), round(dz, 4)))
-
- def writePointLight(self, ob, mtx, lamp, world):
- safeName = self.cleanStr(ob.name)
- if world:
- ambi = world.ambient_color
- # ambi = world.amb
- ambientIntensity = ((float(ambi[0] + ambi[1] + ambi[2])) / 3) / 2.5
- else:
- ambi = 0
- ambientIntensity = 0
-
- location = mtx.translation_part()
-
- self.file.write("<PointLight DEF=\"%s\" " % safeName)
- self.file.write("ambientIntensity=\"%s\" " % (round(ambientIntensity, self.cp)))
- self.file.write("color=\"%s %s %s\" " % (round(lamp.color[0], self.cp), round(lamp.color[1], self.cp), round(lamp.color[2], self.cp)))
-
- self.file.write("intensity=\"%s\" " % (round(min(lamp.energy / 1.75, 1.0), self.cp)))
- self.file.write("radius=\"%s\" " % lamp.distance)
- self.file.write("location=\"%s %s %s\" />\n\n" % (round(location[0], 3), round(location[1], 3), round(location[2], 3)))
-
- def secureName(self, name):
- name = name + str(self.nodeID)
- self.nodeID = self.nodeID + 1
- if len(name) <= 3:
- newname = "_" + str(self.nodeID)
- return "%s" % (newname)
- else:
- for bad in ('"', '#', "'", ', ', '.', '[', '\\', ']', '{', '}'):
- name = name.replace(bad, "_")
- if name in self.namesReserved:
- newname = name[0:3] + "_" + str(self.nodeID)
- return "%s" % (newname)
- elif name[0].isdigit():
- newname = "_" + name + str(self.nodeID)
- return "%s" % (newname)
- else:
- newname = name
- return "%s" % (newname)
-
- def writeIndexedFaceSet(self, ob, mesh, mtx, world, EXPORT_TRI=False):
- fw = self.file.write
- mesh_name_x3d = self.cleanStr(ob.name)
-
- if not mesh.faces:
- return
-
- mode = []
- # mode = 0
- if mesh.uv_textures.active:
- # if mesh.faceUV:
- for face in mesh.uv_textures.active.data:
- # for face in mesh.faces:
- if face.use_halo and 'HALO' not in mode:
- mode += ['HALO']
- if face.use_billboard and 'BILLBOARD' not in mode:
- mode += ['BILLBOARD']
- if face.use_object_color and 'OBJECT_COLOR' not in mode:
- mode += ['OBJECT_COLOR']
- if face.use_collision and 'COLLISION' not in mode:
- mode += ['COLLISION']
- # mode |= face.mode
-
- if 'HALO' in mode and self.halonode == 0:
- # if mode & Mesh.FaceModes.HALO and self.halonode == 0:
- self.write_indented("<Billboard axisOfRotation=\"0 0 0\">\n", 1)
- self.halonode = 1
- elif 'BILLBOARD' in mode and self.billnode == 0:
- # elif mode & Mesh.FaceModes.BILLBOARD and self.billnode == 0:
- self.write_indented("<Billboard axisOfRotation=\"0 1 0\">\n", 1)
- self.billnode = 1
- elif 'COLLISION' not in mode and self.collnode == 0:
- # elif not mode & Mesh.FaceModes.DYNAMIC and self.collnode == 0:
- self.write_indented("<Collision enabled=\"false\">\n", 1)
- self.collnode = 1
-
- loc, quat, sca = mtx.decompose()
-
- self.write_indented("<Transform DEF=\"%s\" " % mesh_name_x3d, 1)
- fw("translation=\"%.6f %.6f %.6f\" " % loc[:])
- fw("scale=\"%.6f %.6f %.6f\" " % sca[:])
- fw("rotation=\"%.6f %.6f %.6f %.6f\" " % (quat.axis[:] + (quat.angle, )))
- fw(">\n")
-
- if mesh.tag:
- self.write_indented("<Group USE=\"G_%s\" />\n" % mesh_name_x3d, 1)
- else:
- mesh.tag = True
-
- self.write_indented("<Group DEF=\"G_%s\">\n" % mesh_name_x3d, 1)
-
- is_uv = bool(mesh.uv_textures.active)
- # is_col, defined for each material
-
- is_coords_written = False
-
- mesh_materials = mesh.materials[:]
- if not mesh_materials:
- mesh_materials = [None]
-
- mesh_material_tex = [None] * len(mesh_materials)
- mesh_material_mtex = [None] * len(mesh_materials)
- mesh_material_images = [None] * len(mesh_materials)
-
- for i, material in enumerate(mesh_materials):
- if material:
- for mtex in material.texture_slots:
- if mtex:
- tex = mtex.texture
- if tex and tex.type == 'IMAGE':
- image = tex.image
- if image:
- mesh_material_tex[i] = tex
- mesh_material_mtex[i] = mtex
- mesh_material_images[i] = image
- break
-
- mesh_materials_use_face_texture = [getattr(material, "use_face_texture", True) for material in mesh_materials]
-
- mesh_faces = mesh.faces[:]
- mesh_faces_materials = [f.material_index for f in mesh_faces]
-
- if is_uv and True in mesh_materials_use_face_texture:
- mesh_faces_image = [(fuv.image if (mesh_materials_use_face_texture[mesh_faces_materials[i]] and fuv.use_image) else mesh_material_images[mesh_faces_materials[i]]) for i, fuv in enumerate(mesh.uv_textures.active.data)]
- mesh_faces_image_unique = set(mesh_faces_image)
- elif len(set(mesh_material_images) | {None}) > 1: # make sure there is at least one image
- mesh_faces_image = [mesh_material_images[material_index] for material_index in mesh_faces_materials]
- mesh_faces_image_unique = set(mesh_faces_image)
- else:
- mesh_faces_image = [None] * len(mesh_faces)
- mesh_faces_image_unique = {None}
-
- # group faces
- face_groups = {}
- for material_index in range(len(mesh_materials)):
- for image in mesh_faces_image_unique:
- face_groups[material_index, image] = []
- del mesh_faces_image_unique
-
- for i, (material_index, image) in enumerate(zip(mesh_faces_materials, mesh_faces_image)):
- face_groups[material_index, image].append(i)
-
- for (material_index, image), face_group in face_groups.items():
- if face_group:
- material = mesh_materials[material_index]
-
- self.write_indented("<Shape>\n", 1)
- is_smooth = False
- is_col = (mesh.vertex_colors.active and (material is None or material.use_vertex_color_paint))
-
- # kludge but as good as it gets!
- for i in face_group:
- if mesh_faces[i].use_smooth:
- is_smooth = True
- break
-
- if image:
- self.write_indented("<Appearance>\n", 1)
- self.writeImageTexture(image)
-
- if mesh_materials_use_face_texture[material_index]:
- if image.use_tiles:
- self.write_indented("<TextureTransform scale=\"%s %s\" />\n" % (image.tiles_x, image.tiles_y))
- else:
- # transform by mtex
- loc = mesh_material_mtex[material_index].offset[:2]
-
- # mtex_scale * tex_repeat
- sca_x, sca_y = mesh_material_mtex[material_index].scale[:2]
-
- sca_x *= mesh_material_tex[material_index].repeat_x
- sca_y *= mesh_material_tex[material_index].repeat_y
-
- # flip x/y is a sampling feature, convert to transform
- if mesh_material_tex[material_index].use_flip_axis:
- rot = math.pi / -2.0
- sca_x, sca_y = sca_y, -sca_x
- else:
- rot = 0.0
-
- self.write_indented("<TextureTransform ", 1)
- # fw("center=\"%.6f %.6f\" " % (0.0, 0.0))
- fw("translation=\"%.6f %.6f\" " % loc)
- fw("scale=\"%.6f %.6f\" " % (sca_x, sca_y))
- fw("rotation=\"%.6f\" " % rot)
- fw("/>\n")
-
- self.write_indented("</Appearance>\n", -1)
-
- elif material:
- self.write_indented("<Appearance>\n", 1)
- self.writeMaterial(material, self.cleanStr(material.name, ""), world)
- self.write_indented("</Appearance>\n", -1)
-
- #-- IndexedFaceSet or IndexedLineSet
-
- self.write_indented("<IndexedFaceSet ", 1)
-
- # --- Write IndexedFaceSet Attributes
- if mesh.show_double_sided:
- fw("solid=\"true\" ")
- else:
- fw("solid=\"false\" ")
-
- if is_smooth:
- fw("creaseAngle=\"%.4f\" " % mesh.auto_smooth_angle)
-
- if is_uv:
- # "texCoordIndex"
- fw("\n\t\t\ttexCoordIndex=\"")
- j = 0
- for i in face_group:
- if len(mesh_faces[i].vertices) == 4:
- fw("%d %d %d %d -1, " % (j, j + 1, j + 2, j + 3))
- j += 4
- else:
- fw("%d %d %d -1, " % (j, j + 1, j + 2))
- j += 3
- fw("\" ")
- # --- end texCoordIndex
-
- if is_col:
- fw("colorPerVertex=\"false\" ")
-
- if True:
- # "coordIndex"
- fw('coordIndex="')
- if EXPORT_TRI:
- for i in face_group:
- fv = mesh_faces[i].vertices[:]
- if len(fv) == 3:
- fw("%i %i %i -1, " % fv)
- else:
- fw("%i %i %i -1, " % (fv[0], fv[1], fv[2]))
- fw("%i %i %i -1, " % (fv[0], fv[2], fv[3]))
- else:
- for i in face_group:
- fv = mesh_faces[i].vertices[:]
- if len(fv) == 3:
- fw("%i %i %i -1, " % fv)
- else:
- fw("%i %i %i %i -1, " % fv)
-
- fw("\" ")
- # --- end coordIndex
-
- # close IndexedFaceSet
- fw(">\n")
-
- # --- Write IndexedFaceSet Elements
- if True:
- if is_coords_written:
- self.write_indented("<Coordinate USE=\"%s%s\" />\n" % ("coord_", mesh_name_x3d))
- else:
- self.write_indented("<Coordinate DEF=\"%s%s\" \n" % ("coord_", mesh_name_x3d), 1)
- fw("\t\t\t\tpoint=\"")
- for v in mesh.vertices:
- fw("%.6f %.6f %.6f, " % v.co[:])
- fw("\" />")
- self.write_indented("\n", -1)
- is_coords_written = True
-
- if is_uv:
- self.write_indented("<TextureCoordinate point=\"", 1)
- fw = fw
- mesh_faces_uv = mesh.uv_textures.active.data
- for i in face_group:
- for uv in mesh_faces_uv[i].uv:
- fw("%.4f %.4f, " % uv[:])
- del mesh_faces_uv
- fw("\" />")
- self.write_indented("\n", -1)
-
- if is_col:
- self.write_indented("<Color color=\"", 1)
- # XXX, 1 color per face, only
- mesh_faces_col = mesh.vertex_colors.active.data
- for i in face_group:
- fw("%.3f %.3f %.3f, " % mesh_faces_col[i].color1[:])
- del mesh_faces_col
- fw("\" />")
- self.write_indented("\n", -1)
-
- #--- output vertexColors
-
- #--- output closing braces
- self.write_indented("</IndexedFaceSet>\n", -1)
- self.write_indented("</Shape>\n", -1)
-
- self.write_indented("</Group>\n", -1)
-
- self.write_indented("</Transform>\n", -1)
-
- if self.halonode == 1:
- self.write_indented("</Billboard>\n", -1)
- self.halonode = 0
-
- if self.billnode == 1:
- self.write_indented("</Billboard>\n", -1)
- self.billnode = 0
-
- if self.collnode == 1:
- self.write_indented("</Collision>\n", -1)
- self.collnode = 0
-
- fw("\n")
-
- def writeMaterial(self, mat, matName, world):
- # look up material name, use it if available
- if mat.tag:
- self.write_indented("<Material USE=\"MA_%s\" />\n" % matName)
- else:
- mat.tag = True
-
- emit = mat.emit
- ambient = mat.ambient / 3.0
- diffuseColor = tuple(mat.diffuse_color)
- if world:
- ambiColor = tuple(((c * mat.ambient) * 2.0) for c in world.ambient_color)
- else:
- ambiColor = 0.0, 0.0, 0.0
-
- emitColor = tuple(((c * emit) + ambiColor[i]) / 2.0 for i, c in enumerate(diffuseColor))
- shininess = mat.specular_hardness / 512.0
- specColor = tuple((c + 0.001) / (1.25 / (mat.specular_intensity + 0.001)) for c in mat.specular_color)
- transp = 1.0 - mat.alpha
-
- if mat.use_shadeless:
- ambient = 1.0
- shininess = 0.0
- specColor = emitColor = diffuseColor
-
- self.write_indented("<Material DEF=\"MA_%s\" " % matName, 1)
- self.file.write("diffuseColor=\"%s %s %s\" " % round_color(diffuseColor, self.cp))
- self.file.write("specularColor=\"%s %s %s\" " % round_color(specColor, self.cp))
- self.file.write("emissiveColor=\"%s %s %s\" \n" % round_color(emitColor, self.cp))
- self.write_indented("ambientIntensity=\"%s\" " % (round(ambient, self.cp)))
- self.file.write("shininess=\"%s\" " % (round(shininess, self.cp)))
- self.file.write("transparency=\"%s\" />" % (round(transp, self.cp)))
- self.write_indented("\n", -1)
-
- def writeImageTexture(self, image):
- name = image.name
- filepath = os.path.basename(image.filepath)
- if image.tag:
- self.write_indented("<ImageTexture USE=\"%s\" />\n" % self.cleanStr(name))
- else:
- image.tag = True
-
- self.write_indented("<ImageTexture DEF=\"%s\" " % self.cleanStr(name), 1)
- self.file.write("url=\"%s\" />" % filepath)
- self.write_indented("\n", -1)
-
- def writeBackground(self, world, alltextures):
- if world:
- worldname = world.name
- else:
- return
-
- blending = world.use_sky_blend, world.use_sky_paper, world.use_sky_real
-
- grd_triple = round_color(world.horizon_color, self.cp)
- sky_triple = round_color(world.zenith_color, self.cp)
- mix_triple = round_color(((grd_triple[i] + sky_triple[i]) / 2.0 for i in range(3)), self.cp)
-
- self.file.write("<Background DEF=\"%s\" " % self.secureName(worldname))
- # No Skytype - just Hor color
- if blending == (False, False, False):
- self.file.write("groundColor=\"%s %s %s\" " % grd_triple)
- self.file.write("skyColor=\"%s %s %s\" " % grd_triple)
- # Blend Gradient
- elif blending == (True, False, False):
- self.file.write("groundColor=\"%s %s %s, " % grd_triple)
- self.file.write("%s %s %s\" groundAngle=\"1.57, 1.57\" " % mix_triple)
- self.file.write("skyColor=\"%s %s %s, " % sky_triple)
- self.file.write("%s %s %s\" skyAngle=\"1.57, 1.57\" " % mix_triple)
- # Blend+Real Gradient Inverse
- elif blending == (True, False, True):
- self.file.write("groundColor=\"%s %s %s, " % sky_triple)
- self.file.write("%s %s %s\" groundAngle=\"1.57, 1.57\" " % mix_triple)
- self.file.write("skyColor=\"%s %s %s, " % grd_triple)
- self.file.write("%s %s %s\" skyAngle=\"1.57, 1.57\" " % mix_triple)
- # Paper - just Zen Color
- elif blending == (False, False, True):
- self.file.write("groundColor=\"%s %s %s\" " % sky_triple)
- self.file.write("skyColor=\"%s %s %s\" " % sky_triple)
- # Blend+Real+Paper - komplex gradient
- elif blending == (True, True, True):
- self.write_indented("groundColor=\"%s %s %s, " % sky_triple)
- self.write_indented("%s %s %s\" groundAngle=\"1.57, 1.57\" " % grd_triple)
- self.write_indented("skyColor=\"%s %s %s, " % sky_triple)
- self.write_indented("%s %s %s\" skyAngle=\"1.57, 1.57\" " % grd_triple)
- # Any Other two colors
- else:
- self.file.write("groundColor=\"%s %s %s\" " % grd_triple)
- self.file.write("skyColor=\"%s %s %s\" " % sky_triple)
-
- alltexture = len(alltextures)
-
- for i in range(alltexture):
- tex = alltextures[i]
-
- if tex.type != 'IMAGE' or tex.image is None:
- continue
-
- namemat = tex.name
- # namemat = alltextures[i].name
-
- pic = tex.image
-
- # using .expandpath just in case, os.path may not expect //
- basename = os.path.basename(bpy.path.abspath(pic.filepath))
-
- pic = alltextures[i].image
- if (namemat == "back") and (pic != None):
- self.file.write("\n\tbackUrl=\"%s\" " % basename)
- elif (namemat == "bottom") and (pic != None):
- self.write_indented("bottomUrl=\"%s\" " % basename)
- elif (namemat == "front") and (pic != None):
- self.write_indented("frontUrl=\"%s\" " % basename)
- elif (namemat == "left") and (pic != None):
- self.write_indented("leftUrl=\"%s\" " % basename)
- elif (namemat == "right") and (pic != None):
- self.write_indented("rightUrl=\"%s\" " % basename)
- elif (namemat == "top") and (pic != None):
- self.write_indented("topUrl=\"%s\" " % basename)
- self.write_indented("/>\n\n")
-
-##########################################################
-# export routine
-##########################################################
-
- def export(self, scene, world, alltextures,
- EXPORT_APPLY_MODIFIERS=False,
- EXPORT_TRI=False,
- ):
-
- # tag un-exported IDs
- bpy.data.meshes.tag(False)
- bpy.data.materials.tag(False)
- bpy.data.images.tag(False)
-
- print("Info: starting X3D export to %r..." % self.filepath)
- self.writeHeader()
- # self.writeScript()
- self.writeNavigationInfo(scene)
- self.writeBackground(world, alltextures)
- self.writeFog(world)
- self.proto = 0
-
- for ob_main in [o for o in scene.objects if o.is_visible(scene)]:
-
- free, derived = create_derived_objects(scene, ob_main)
-
- if derived is None:
- continue
-
- for ob, ob_mat in derived:
- objType = ob.type
- objName = ob.name
- ob_mat = self.global_matrix * ob_mat
-
- if objType == 'CAMERA':
- self.writeViewpoint(ob, ob_mat, scene)
- elif objType in ('MESH', 'CURVE', 'SURF', 'FONT'):
- if EXPORT_APPLY_MODIFIERS or objType != 'MESH':
- me = ob.create_mesh(scene, EXPORT_APPLY_MODIFIERS, 'PREVIEW')
- else:
- me = ob.data
-
- self.writeIndexedFaceSet(ob, me, ob_mat, world, EXPORT_TRI=EXPORT_TRI)
-
- # free mesh created with create_mesh()
- if me != ob.data:
- bpy.data.meshes.remove(me)
-
- elif objType == 'LAMP':
- data = ob.data
- datatype = data.type
- if datatype == 'POINT':
- self.writePointLight(ob, ob_mat, data, world)
- elif datatype == 'SPOT':
- self.writeSpotLight(ob, ob_mat, data, world)
- elif datatype == 'SUN':
- self.writeDirectionalLight(ob, ob_mat, data, world)
- else:
- self.writeDirectionalLight(ob, ob_mat, data, world)
- else:
- #print "Info: Ignoring [%s], object type [%s] not handle yet" % (object.name,object.getType)
- pass
-
- if free:
- free_derived_objects(ob_main)
-
- self.file.write("\n</Scene>\n</X3D>")
-
- # if EXPORT_APPLY_MODIFIERS:
- # if containerMesh:
- # containerMesh.vertices = None
-
- self.cleanup()
-
-##########################################################
-# Utility methods
-##########################################################
-
- def cleanup(self):
- self.file.close()
- self.indentLevel = 0
- print("Info: finished X3D export to %r" % self.filepath)
-
- def cleanStr(self, name, prefix='rsvd_'):
- """cleanStr(name,prefix) - try to create a valid VRML DEF name from object name"""
-
- newName = name
- if len(newName) == 0:
- self.nNodeID += 1
- return "%s%d" % (prefix, self.nNodeID)
-
- if newName in self.namesReserved:
- newName = '%s%s' % (prefix, newName)
-
- if newName[0].isdigit():
- newName = "%s%s" % ('_', newName)
-
- for bad in [' ', '"', '#', "'", ', ', '.', '[', '\\', ']', '{', '}']:
- newName = newName.replace(bad, '_')
- return newName
-
- def faceToString(self, face):
-
- print("Debug: face.flag=0x%x (bitflags)" % face.flag)
- if face.sel:
- print("Debug: face.sel=true")
-
- print("Debug: face.mode=0x%x (bitflags)" % face.mode)
- if face.mode & Mesh.FaceModes.TWOSIDE:
- print("Debug: face.mode twosided")
-
- print("Debug: face.transp=0x%x (enum)" % face.blend_type)
- if face.blend_type == Mesh.FaceTranspModes.SOLID:
- print("Debug: face.transp.SOLID")
-
- if face.image:
- print("Debug: face.image=%s" % face.image.name)
- print("Debug: face.materialIndex=%d" % face.materialIndex)
-
- def meshToString(self, mesh):
- # print("Debug: mesh.hasVertexUV=%d" % mesh.vertexColors)
- print("Debug: mesh.faceUV=%d" % (len(mesh.uv_textures) > 0))
- # print("Debug: mesh.faceUV=%d" % mesh.faceUV)
- print("Debug: mesh.hasVertexColours=%d" % (len(mesh.vertex_colors) > 0))
- # print("Debug: mesh.hasVertexColours=%d" % mesh.hasVertexColours())
- print("Debug: mesh.vertices=%d" % len(mesh.vertices))
- print("Debug: mesh.faces=%d" % len(mesh.faces))
- print("Debug: mesh.materials=%d" % len(mesh.materials))
-
- # s="%s %s %s" % (
- # round(c.r/255.0,self.cp),
- # round(c.g/255.0,self.cp),
- # round(c.b/255.0,self.cp))
- return s
-
- # For writing well formed VRML code
- #------------------------------------------------------------------------
- def write_indented(self, s, inc=0):
- if inc < 1:
- self.indentLevel = self.indentLevel + inc
-
- self.file.write((self.indentLevel * "\t") + s)
-
- if inc > 0:
- self.indentLevel = self.indentLevel + inc
-
-##########################################################
-# Callbacks, needed before Main
-##########################################################
-
-
-def save(operator, context, filepath="",
- use_apply_modifiers=False,
- use_triangulate=False,
- use_compress=False):
-
- if use_compress:
- if not filepath.lower().endswith('.x3dz'):
- filepath = '.'.join(filepath.split('.')[:-1]) + '.x3dz'
- else:
- if not filepath.lower().endswith('.x3d'):
- filepath = '.'.join(filepath.split('.')[:-1]) + '.x3d'
-
- scene = context.scene
- world = scene.world
-
- if bpy.ops.object.mode_set.poll():
- bpy.ops.object.mode_set(mode='OBJECT')
-
- # XXX these are global textures while .Get() returned only scene's?
- alltextures = bpy.data.textures
- # alltextures = Blender.Texture.Get()
-
- wrlexport = x3d_class(filepath)
- wrlexport.export(scene,
- world,
- alltextures,
- EXPORT_APPLY_MODIFIERS=use_apply_modifiers,
- EXPORT_TRI=use_triangulate,
- )
-
- return {'FINISHED'}
diff --git a/release/scripts/op/io_scene_x3d/import_x3d.py b/release/scripts/op/io_scene_x3d/import_x3d.py
deleted file mode 100644
index f2885943866..00000000000
--- a/release/scripts/op/io_scene_x3d/import_x3d.py
+++ /dev/null
@@ -1,2658 +0,0 @@
-# ##### BEGIN GPL LICENSE BLOCK #####
-#
-# This program is free software; you can redistribute it and/or
-# modify it under the terms of the GNU General Public License
-# as published by the Free Software Foundation; either version 2
-# of the License, or (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software Foundation,
-# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
-#
-# ##### END GPL LICENSE BLOCK #####
-
-# <pep8 compliant>
-
-DEBUG = False
-
-# This should work without a blender at all
-from os.path import exists
-
-
-def baseName(path):
- return path.split('/')[-1].split('\\')[-1]
-
-
-def dirName(path):
- return path[:-len(baseName(path))]
-
-
-def imageConvertCompat(path):
-
- try:
- import os
- except:
- return path
- if os.sep == '\\':
- return path # assime win32 has quicktime, dont convert
-
- if path.lower().endswith('.gif'):
- path_to = path[:-3] + 'png'
-
- '''
- if exists(path_to):
- return path_to
- '''
- # print('\n'+path+'\n'+path_to+'\n')
- os.system('convert "%s" "%s"' % (path, path_to)) # for now just hope we have image magick
-
- if exists(path_to):
- return path_to
-
- return path
-
-# notes
-# transform are relative
-# order dosnt matter for loc/size/rot
-# right handed rotation
-# angles are in radians
-# rotation first defines axis then ammount in radians
-
-
-# =============================== VRML Spesific
-
-def vrmlFormat(data):
- '''
- Keep this as a valid vrml file, but format in a way we can predict.
- '''
- # Strip all commends - # not in strings - warning multiline strings are ignored.
- def strip_comment(l):
- #l = ' '.join(l.split())
- l = l.strip()
-
- if l.startswith('#'):
- return ''
-
- i = l.find('#')
-
- if i == -1:
- return l
-
- # Most cases accounted for! if we have a comment at the end of the line do this...
- #j = l.find('url "')
- j = l.find('"')
-
- if j == -1: # simple no strings
- return l[:i].strip()
-
- q = False
- for i, c in enumerate(l):
- if c == '"':
- q = not q # invert
-
- elif c == '#':
- if q == False:
- return l[:i - 1]
-
- return l
-
- data = '\n'.join([strip_comment(l) for l in data.split('\n')]) # remove all whitespace
-
- EXTRACT_STRINGS = True # only needed when strings or filesnames containe ,[]{} chars :/
-
- if EXTRACT_STRINGS:
-
- # We need this so we can detect URL's
- data = '\n'.join([' '.join(l.split()) for l in data.split('\n')]) # remove all whitespace
-
- string_ls = []
-
- #search = 'url "'
- search = '"'
-
- ok = True
- last_i = 0
- while ok:
- ok = False
- i = data.find(search, last_i)
- if i != -1:
-
- start = i + len(search) # first char after end of search
- end = data.find('"', start)
- if end != -1:
- item = data[start:end]
- string_ls.append(item)
- data = data[:start] + data[end:]
- ok = True # keep looking
-
- last_i = (end - len(item)) + 1
- # print(last_i, item, '|' + data[last_i] + '|')
-
- # done with messy extracting strings part
-
- # Bad, dont take strings into account
- '''
- data = data.replace('#', '\n#')
- data = '\n'.join([ll for l in data.split('\n') for ll in (l.strip(),) if not ll.startswith('#')]) # remove all whitespace
- '''
- data = data.replace('{', '\n{\n')
- data = data.replace('}', '\n}\n')
- data = data.replace('[', '\n[\n')
- data = data.replace(']', '\n]\n')
- data = data.replace(',', ' , ') # make sure comma's seperate
-
- if EXTRACT_STRINGS:
- # add strings back in
-
- search = '"' # fill in these empty strings
-
- ok = True
- last_i = 0
- while ok:
- ok = False
- i = data.find(search + '"', last_i)
- # print(i)
- if i != -1:
- start = i + len(search) # first char after end of search
- item = string_ls.pop(0)
- # print(item)
- data = data[:start] + item + data[start:]
-
- last_i = start + len(item) + 1
-
- ok = True
-
- # More annoying obscure cases where USE or DEF are placed on a newline
- # data = data.replace('\nDEF ', ' DEF ')
- # data = data.replace('\nUSE ', ' USE ')
-
- data = '\n'.join([' '.join(l.split()) for l in data.split('\n')]) # remove all whitespace
-
- # Better to parse the file accounting for multiline arrays
- '''
- data = data.replace(',\n', ' , ') # remove line endings with commas
- data = data.replace(']', '\n]\n') # very very annoying - but some comma's are at the end of the list, must run this again.
- '''
-
- return [l for l in data.split('\n') if l]
-
-NODE_NORMAL = 1 # {}
-NODE_ARRAY = 2 # []
-NODE_REFERENCE = 3 # USE foobar
-# NODE_PROTO = 4 #
-
-lines = []
-
-
-def getNodePreText(i, words):
- # print(lines[i])
- use_node = False
- while len(words) < 5:
-
- if i >= len(lines):
- break
- '''
- elif lines[i].startswith('PROTO'):
- return NODE_PROTO, i+1
- '''
- elif lines[i] == '{':
- # words.append(lines[i]) # no need
- # print("OK")
- return NODE_NORMAL, i + 1
- elif lines[i].count('"') % 2 != 0: # odd number of quotes? - part of a string.
- # print('ISSTRING')
- break
- else:
- new_words = lines[i].split()
- if 'USE' in new_words:
- use_node = True
-
- words.extend(new_words)
- i += 1
-
- # Check for USE node - no {
- # USE #id - should always be on the same line.
- if use_node:
- # print('LINE', i, words[:words.index('USE')+2])
- words[:] = words[:words.index('USE') + 2]
- if lines[i] == '{' and lines[i + 1] == '}':
- # USE sometimes has {} after it anyway
- i += 2
- return NODE_REFERENCE, i
-
- # print("error value!!!", words)
- return 0, -1
-
-
-def is_nodeline(i, words):
-
- if not lines[i][0].isalpha():
- return 0, 0
-
- #if lines[i].startswith('field'):
- # return 0, 0
-
- # Is this a prototype??
- if lines[i].startswith('PROTO'):
- words[:] = lines[i].split()
- return NODE_NORMAL, i + 1 # TODO - assumes the next line is a '[\n', skip that
- if lines[i].startswith('EXTERNPROTO'):
- words[:] = lines[i].split()
- return NODE_ARRAY, i + 1 # TODO - assumes the next line is a '[\n', skip that
-
- '''
- proto_type, new_i = is_protoline(i, words, proto_field_defs)
- if new_i != -1:
- return proto_type, new_i
- '''
-
- # Simple "var [" type
- if lines[i + 1] == '[':
- if lines[i].count('"') % 2 == 0:
- words[:] = lines[i].split()
- return NODE_ARRAY, i + 2
-
- node_type, new_i = getNodePreText(i, words)
-
- if not node_type:
- if DEBUG:
- print("not node_type", lines[i])
- return 0, 0
-
- # Ok, we have a { after some values
- # Check the values are not fields
- for i, val in enumerate(words):
- if i != 0 and words[i - 1] in ('DEF', 'USE'):
- # ignore anything after DEF, it is a ID and can contain any chars.
- pass
- elif val[0].isalpha() and val not in ('TRUE', 'FALSE'):
- pass
- else:
- # There is a number in one of the values, therefor we are not a node.
- return 0, 0
-
- #if node_type==NODE_REFERENCE:
- # print(words, "REF_!!!!!!!")
- return node_type, new_i
-
-
-def is_numline(i):
- '''
- Does this line start with a number?
- '''
-
- # Works but too slow.
- '''
- l = lines[i]
- for w in l.split():
- if w==',':
- pass
- else:
- try:
- float(w)
- return True
-
- except:
- return False
-
- return False
- '''
-
- l = lines[i]
-
- line_start = 0
-
- if l.startswith(', '):
- line_start += 2
-
- line_end = len(l) - 1
- line_end_new = l.find(' ', line_start) # comma's always have a space before them
-
- if line_end_new != -1:
- line_end = line_end_new
-
- try:
- float(l[line_start:line_end]) # works for a float or int
- return True
- except:
- return False
-
-
-class vrmlNode(object):
- __slots__ = ('id',
- 'fields',
- 'proto_node',
- 'proto_field_defs',
- 'proto_fields',
- 'node_type',
- 'parent',
- 'children',
- 'parent',
- 'array_data',
- 'reference',
- 'lineno',
- 'filename',
- 'blendObject',
- 'DEF_NAMESPACE',
- 'ROUTE_IPO_NAMESPACE',
- 'PROTO_NAMESPACE',
- 'x3dNode')
-
- def __init__(self, parent, node_type, lineno):
- self.id = None
- self.node_type = node_type
- self.parent = parent
- self.blendObject = None
- self.x3dNode = None # for x3d import only
- if parent:
- parent.children.append(self)
-
- self.lineno = lineno
-
- # This is only set from the root nodes.
- # Having a filename also denotes a root node
- self.filename = None
- self.proto_node = None # proto field definition eg: "field SFColor seatColor .6 .6 .1"
-
- # Store in the root node because each inline file needs its own root node and its own namespace
- self.DEF_NAMESPACE = None
- self.ROUTE_IPO_NAMESPACE = None
- '''
- self.FIELD_NAMESPACE = None
- '''
-
- self.PROTO_NAMESPACE = None
-
- self.reference = None
-
- if node_type == NODE_REFERENCE:
- # For references, only the parent and ID are needed
- # the reference its self is assigned on parsing
- return
-
- self.fields = [] # fields have no order, in some cases rool level values are not unique so dont use a dict
-
- self.proto_field_defs = [] # proto field definition eg: "field SFColor seatColor .6 .6 .1"
- self.proto_fields = [] # proto field usage "diffuseColor IS seatColor"
- self.children = []
- self.array_data = [] # use for arrays of data - should only be for NODE_ARRAY types
-
- # Only available from the root node
- '''
- def getFieldDict(self):
- if self.FIELD_NAMESPACE != None:
- return self.FIELD_NAMESPACE
- else:
- return self.parent.getFieldDict()
- '''
- def getProtoDict(self):
- if self.PROTO_NAMESPACE != None:
- return self.PROTO_NAMESPACE
- else:
- return self.parent.getProtoDict()
-
- def getDefDict(self):
- if self.DEF_NAMESPACE != None:
- return self.DEF_NAMESPACE
- else:
- return self.parent.getDefDict()
-
- def getRouteIpoDict(self):
- if self.ROUTE_IPO_NAMESPACE != None:
- return self.ROUTE_IPO_NAMESPACE
- else:
- return self.parent.getRouteIpoDict()
-
- def setRoot(self, filename):
- self.filename = filename
- # self.FIELD_NAMESPACE = {}
- self.DEF_NAMESPACE = {}
- self.ROUTE_IPO_NAMESPACE = {}
- self.PROTO_NAMESPACE = {}
-
- def isRoot(self):
- if self.filename == None:
- return False
- else:
- return True
-
- def getFilename(self):
- if self.filename:
- return self.filename
- elif self.parent:
- return self.parent.getFilename()
- else:
- return None
-
- def getRealNode(self):
- if self.reference:
- return self.reference
- else:
- return self
-
- def getSpec(self):
- self_real = self.getRealNode()
- try:
- return self_real.id[-1] # its possible this node has no spec
- except:
- return None
-
- def findSpecRecursive(self, spec):
- self_real = self.getRealNode()
- if spec == self_real.getSpec():
- return self
-
- for child in self_real.children:
- if child.findSpecRecursive(spec):
- return child
-
- return None
-
- def getPrefix(self):
- if self.id:
- return self.id[0]
- return None
-
- def getSpecialTypeName(self, typename):
- self_real = self.getRealNode()
- try:
- return self_real.id[list(self_real.id).index(typename) + 1]
- except:
- return None
-
- def getDefName(self):
- return self.getSpecialTypeName('DEF')
-
- def getProtoName(self):
- return self.getSpecialTypeName('PROTO')
-
- def getExternprotoName(self):
- return self.getSpecialTypeName('EXTERNPROTO')
-
- def getChildrenBySpec(self, node_spec): # spec could be Transform, Shape, Appearance
- self_real = self.getRealNode()
- # using getSpec functions allows us to use the spec of USE children that dont have their spec in their ID
- if type(node_spec) == str:
- return [child for child in self_real.children if child.getSpec() == node_spec]
- else:
- # Check inside a list of optional types
- return [child for child in self_real.children if child.getSpec() in node_spec]
-
- def getChildBySpec(self, node_spec): # spec could be Transform, Shape, Appearance
- # Use in cases where there is only ever 1 child of this type
- ls = self.getChildrenBySpec(node_spec)
- if ls:
- return ls[0]
- else:
- return None
-
- def getChildrenByName(self, node_name): # type could be geometry, children, appearance
- self_real = self.getRealNode()
- return [child for child in self_real.children if child.id if child.id[0] == node_name]
-
- def getChildByName(self, node_name):
- self_real = self.getRealNode()
- for child in self_real.children:
- if child.id and child.id[0] == node_name: # and child.id[-1]==node_spec:
- return child
-
- def getSerialized(self, results, ancestry):
- ''' Return this node and all its children in a flat list '''
- ancestry = ancestry[:] # always use a copy
-
- # self_real = self.getRealNode()
-
- results.append((self, tuple(ancestry)))
- ancestry.append(self)
- for child in self.getRealNode().children:
- if child not in ancestry:
- # We dont want to load proto's, they are only references
- # We could enforce this elsewhere
-
- # Only add this in a very special case
- # where the parent of this object is not the real parent
- # - In this case we have added the proto as a child to a node instancing it.
- # This is a bit arbitary, but its how Proto's are done with this importer.
- if child.getProtoName() == None and child.getExternprotoName() == None:
- child.getSerialized(results, ancestry)
- else:
-
- if DEBUG:
- print('getSerialized() is proto:', child.getProtoName(), child.getExternprotoName(), self.getSpec())
-
- self_spec = self.getSpec()
-
- if child.getProtoName() == self_spec or child.getExternprotoName() == self_spec:
- #if DEBUG:
- # "FoundProto!"
- child.getSerialized(results, ancestry)
-
- return results
-
- def searchNodeTypeID(self, node_spec, results):
- self_real = self.getRealNode()
- # print(self.lineno, self.id)
- if self_real.id and self_real.id[-1] == node_spec: # use last element, could also be only element
- results.append(self_real)
- for child in self_real.children:
- child.searchNodeTypeID(node_spec, results)
- return results
-
- def getFieldName(self, field, ancestry, AS_CHILD=False):
- self_real = self.getRealNode() # incase we're an instance
-
- for f in self_real.fields:
- # print(f)
- if f and f[0] == field:
- # print('\tfound field', f)
-
- if len(f) >= 3 and f[1] == 'IS': # eg: 'diffuseColor IS legColor'
- field_id = f[2]
-
- # print("\n\n\n\n\n\nFOND IS!!!")
- f_proto_lookup = None
- f_proto_child_lookup = None
- i = len(ancestry)
- while i:
- i -= 1
- node = ancestry[i]
- node = node.getRealNode()
-
- # proto settings are stored in "self.proto_node"
- if node.proto_node:
- # Get the default value from the proto, this can be overwridden by the proto instace
- # 'field SFColor legColor .8 .4 .7'
- if AS_CHILD:
- for child in node.proto_node.children:
- #if child.id and len(child.id) >= 3 and child.id[2]==field_id:
- if child.id and ('point' in child.id or 'points' in child.id):
- f_proto_child_lookup = child
-
- else:
- for f_def in node.proto_node.proto_field_defs:
- if len(f_def) >= 4:
- if f_def[0] == 'field' and f_def[2] == field_id:
- f_proto_lookup = f_def[3:]
-
- # Node instance, Will be 1 up from the proto-node in the ancestry list. but NOT its parent.
- # This is the setting as defined by the instance, including this setting is optional,
- # and will override the default PROTO value
- # eg: 'legColor 1 0 0'
- if AS_CHILD:
- for child in node.children:
- if child.id and child.id[0] == field_id:
- f_proto_child_lookup = child
- else:
- for f_def in node.fields:
- if len(f_def) >= 2:
- if f_def[0] == field_id:
- if DEBUG:
- print("getFieldName(), found proto", f_def)
- f_proto_lookup = f_def[1:]
-
- if AS_CHILD:
- if f_proto_child_lookup:
- if DEBUG:
- print("getFieldName() - AS_CHILD=True, child found")
- print(f_proto_child_lookup)
- return f_proto_child_lookup
- else:
- return f_proto_lookup
- else:
- if AS_CHILD:
- return None
- else:
- # Not using a proto
- return f[1:]
- # print('\tfield not found', field)
-
- # See if this is a proto name
- if AS_CHILD:
- child_array = None
- for child in self_real.children:
- if child.id and len(child.id) == 1 and child.id[0] == field:
- return child
-
- return None
-
- def getFieldAsInt(self, field, default, ancestry):
- self_real = self.getRealNode() # incase we're an instance
-
- f = self_real.getFieldName(field, ancestry)
- if f == None:
- return default
- if ',' in f:
- f = f[:f.index(',')] # strip after the comma
-
- if len(f) != 1:
- print('\t"%s" wrong length for int conversion for field "%s"' % (f, field))
- return default
-
- try:
- return int(f[0])
- except:
- print('\tvalue "%s" could not be used as an int for field "%s"' % (f[0], field))
- return default
-
- def getFieldAsFloat(self, field, default, ancestry):
- self_real = self.getRealNode() # incase we're an instance
-
- f = self_real.getFieldName(field, ancestry)
- if f == None:
- return default
- if ',' in f:
- f = f[:f.index(',')] # strip after the comma
-
- if len(f) != 1:
- print('\t"%s" wrong length for float conversion for field "%s"' % (f, field))
- return default
-
- try:
- return float(f[0])
- except:
- print('\tvalue "%s" could not be used as a float for field "%s"' % (f[0], field))
- return default
-
- def getFieldAsFloatTuple(self, field, default, ancestry):
- self_real = self.getRealNode() # incase we're an instance
-
- f = self_real.getFieldName(field, ancestry)
- if f == None:
- return default
- # if ',' in f: f = f[:f.index(',')] # strip after the comma
-
- if len(f) < 1:
- print('"%s" wrong length for float tuple conversion for field "%s"' % (f, field))
- return default
-
- ret = []
- for v in f:
- if v != ',':
- try:
- ret.append(float(v))
- except:
- break # quit of first non float, perhaps its a new field name on the same line? - if so we are going to ignore it :/ TODO
- # print(ret)
-
- if ret:
- return ret
- if not ret:
- print('\tvalue "%s" could not be used as a float tuple for field "%s"' % (f, field))
- return default
-
- def getFieldAsBool(self, field, default, ancestry):
- self_real = self.getRealNode() # incase we're an instance
-
- f = self_real.getFieldName(field, ancestry)
- if f == None:
- return default
- if ',' in f:
- f = f[:f.index(',')] # strip after the comma
-
- if len(f) != 1:
- print('\t"%s" wrong length for bool conversion for field "%s"' % (f, field))
- return default
-
- if f[0].upper() == '"TRUE"' or f[0].upper() == 'TRUE':
- return True
- elif f[0].upper() == '"FALSE"' or f[0].upper() == 'FALSE':
- return False
- else:
- print('\t"%s" could not be used as a bool for field "%s"' % (f[1], field))
- return default
-
- def getFieldAsString(self, field, default, ancestry):
- self_real = self.getRealNode() # incase we're an instance
-
- f = self_real.getFieldName(field, ancestry)
- if f == None:
- return default
- if len(f) < 1:
- print('\t"%s" wrong length for string conversion for field "%s"' % (f, field))
- return default
-
- if len(f) > 1:
- # String may contain spaces
- st = ' '.join(f)
- else:
- st = f[0]
-
- # X3D HACK
- if self.x3dNode:
- return st
-
- if st[0] == '"' and st[-1] == '"':
- return st[1:-1]
- else:
- print('\tvalue "%s" could not be used as a string for field "%s"' % (f[0], field))
- return default
-
- def getFieldAsArray(self, field, group, ancestry):
- '''
- For this parser arrays are children
- '''
-
- def array_as_number(array_string):
- array_data = []
- try:
- array_data = [int(val) for val in array_string]
- except:
- try:
- array_data = [float(val) for val in array_string]
- except:
- print('\tWarning, could not parse array data from field')
-
- return array_data
-
- self_real = self.getRealNode() # incase we're an instance
-
- child_array = self_real.getFieldName(field, ancestry, True)
-
- #if type(child_array)==list: # happens occasionaly
- # array_data = child_array
-
- if child_array is None:
- # For x3d, should work ok with vrml too
- # for x3d arrays are fields, vrml they are nodes, annoying but not tooo bad.
- data_split = self.getFieldName(field, ancestry)
- if not data_split:
- return []
- array_data = ' '.join(data_split)
- if array_data == None:
- return []
-
- array_data = array_data.replace(',', ' ')
- data_split = array_data.split()
-
- array_data = array_as_number(data_split)
-
- elif type(child_array) == list:
- # x3d creates these
- data_split = [w.strip(",") for w in child_array]
-
- array_data = array_as_number(data_split)
- else:
- # print(child_array)
- # Normal vrml
- array_data = child_array.array_data
-
- # print('array_data', array_data)
- if group == -1 or len(array_data) == 0:
- return array_data
-
- # We want a flat list
- flat = True
- for item in array_data:
- if type(item) == list:
- flat = False
- break
-
- # make a flat array
- if flat:
- flat_array = array_data # we are alredy flat.
- else:
- flat_array = []
-
- def extend_flat(ls):
- for item in ls:
- if type(item) == list:
- extend_flat(item)
- else:
- flat_array.append(item)
-
- extend_flat(array_data)
-
- # We requested a flat array
- if group == 0:
- return flat_array
-
- new_array = []
- sub_array = []
-
- for item in flat_array:
- sub_array.append(item)
- if len(sub_array) == group:
- new_array.append(sub_array)
- sub_array = []
-
- if sub_array:
- print('\twarning, array was not aligned to requested grouping', group, 'remaining value', sub_array)
-
- return new_array
-
- def getFieldAsStringArray(self, field, ancestry):
- '''
- Get a list of strings
- '''
- self_real = self.getRealNode() # incase we're an instance
-
- child_array = None
- for child in self_real.children:
- if child.id and len(child.id) == 1 and child.id[0] == field:
- child_array = child
- break
- if not child_array:
- return []
-
- # each string gets its own list, remove ""'s
- try:
- new_array = [f[0][1:-1] for f in child_array.fields]
- except:
- print('\twarning, string array could not be made')
- new_array = []
-
- return new_array
-
- def getLevel(self):
- # Ignore self_real
- level = 0
- p = self.parent
- while p:
- level += 1
- p = p.parent
- if not p:
- break
-
- return level
-
- def __repr__(self):
- level = self.getLevel()
- ind = ' ' * level
- if self.node_type == NODE_REFERENCE:
- brackets = ''
- elif self.node_type == NODE_NORMAL:
- brackets = '{}'
- else:
- brackets = '[]'
-
- if brackets:
- text = ind + brackets[0] + '\n'
- else:
- text = ''
-
- text += ind + 'ID: ' + str(self.id) + ' ' + str(level) + (' lineno %d\n' % self.lineno)
-
- if self.node_type == NODE_REFERENCE:
- text += ind + "(reference node)\n"
- return text
-
- if self.proto_node:
- text += ind + 'PROTO NODE...\n'
- text += str(self.proto_node)
- text += ind + 'PROTO NODE_DONE\n'
-
- text += ind + 'FIELDS:' + str(len(self.fields)) + '\n'
-
- for i, item in enumerate(self.fields):
- text += ind + 'FIELD:\n'
- text += ind + str(item) + '\n'
-
- text += ind + 'PROTO_FIELD_DEFS:' + str(len(self.proto_field_defs)) + '\n'
-
- for i, item in enumerate(self.proto_field_defs):
- text += ind + 'PROTO_FIELD:\n'
- text += ind + str(item) + '\n'
-
- text += ind + 'ARRAY: ' + str(len(self.array_data)) + ' ' + str(self.array_data) + '\n'
- #text += ind + 'ARRAY: ' + str(len(self.array_data)) + '[...] \n'
-
- text += ind + 'CHILDREN: ' + str(len(self.children)) + '\n'
- for i, child in enumerate(self.children):
- text += ind + ('CHILD%d:\n' % i)
- text += str(child)
-
- text += '\n' + ind + brackets[1]
-
- return text
-
- def parse(self, i, IS_PROTO_DATA=False):
- new_i = self.__parse(i, IS_PROTO_DATA)
-
- # print(self.id, self.getFilename())
-
- # Check if this node was an inline or externproto
-
- url_ls = []
-
- if self.node_type == NODE_NORMAL and self.getSpec() == 'Inline':
- ancestry = [] # Warning! - PROTO's using this wont work at all.
- url = self.getFieldAsString('url', None, ancestry)
- if url:
- url_ls = [(url, None)]
- del ancestry
-
- elif self.getExternprotoName():
- # externproto
- url_ls = []
- for f in self.fields:
-
- if type(f) == str:
- f = [f]
-
- for ff in f:
- for f_split in ff.split('"'):
- # print(f_split)
- # "someextern.vrml#SomeID"
- if '#' in f_split:
-
- f_split, f_split_id = f_split.split('#') # there should only be 1 # anyway
-
- url_ls.append((f_split, f_split_id))
- else:
- url_ls.append((f_split, None))
-
- # Was either an Inline or an EXTERNPROTO
- if url_ls:
-
- # print(url_ls)
-
- for url, extern_key in url_ls:
- print(url)
- urls = []
- urls.append(url)
- urls.append(bpy.path.resolve_ncase(urls[-1]))
-
- urls.append(dirName(self.getFilename()) + url)
- urls.append(bpy.path.resolve_ncase(urls[-1]))
-
- urls.append(dirName(self.getFilename()) + baseName(url))
- urls.append(bpy.path.resolve_ncase(urls[-1]))
-
- try:
- url = [url for url in urls if exists(url)][0]
- url_found = True
- except:
- url_found = False
-
- if not url_found:
- print('\tWarning: Inline URL could not be found:', url)
- else:
- if url == self.getFilename():
- print('\tWarning: cant Inline yourself recursively:', url)
- else:
-
- try:
- data = gzipOpen(url)
- except:
- print('\tWarning: cant open the file:', url)
- data = None
-
- if data:
- # Tricky - inline another VRML
- print('\tLoading Inline:"%s"...' % url)
-
- # Watch it! - backup lines
- lines_old = lines[:]
-
- lines[:] = vrmlFormat(data)
-
- lines.insert(0, '{')
- lines.insert(0, 'root_node____')
- lines.append('}')
- '''
- ff = open('/tmp/test.txt', 'w')
- ff.writelines([l+'\n' for l in lines])
- '''
-
- child = vrmlNode(self, NODE_NORMAL, -1)
- child.setRoot(url) # initialized dicts
- child.parse(0)
-
- # if self.getExternprotoName():
- if self.getExternprotoName():
- if not extern_key: # if none is spesified - use the name
- extern_key = self.getSpec()
-
- if extern_key:
-
- self.children.remove(child)
- child.parent = None
-
- extern_child = child.findSpecRecursive(extern_key)
-
- if extern_child:
- self.children.append(extern_child)
- extern_child.parent = self
-
- if DEBUG:
- print("\tEXTERNPROTO ID found!:", extern_key)
- else:
- print("\tEXTERNPROTO ID not found!:", extern_key)
-
- # Watch it! - restore lines
- lines[:] = lines_old
-
- return new_i
-
- def __parse(self, i, IS_PROTO_DATA=False):
- '''
- print('parsing at', i, end="")
- print(i, self.id, self.lineno)
- '''
- l = lines[i]
-
- if l == '[':
- # An anonymous list
- self.id = None
- i += 1
- else:
- words = []
-
- node_type, new_i = is_nodeline(i, words)
- if not node_type: # fail for parsing new node.
- print("Failed to parse new node")
- raise ValueError
-
- if self.node_type == NODE_REFERENCE:
- # Only assign the reference and quit
- key = words[words.index('USE') + 1]
- self.id = (words[0],)
-
- self.reference = self.getDefDict()[key]
- return new_i
-
- self.id = tuple(words)
-
- # fill in DEF/USE
- key = self.getDefName()
- if key != None:
- self.getDefDict()[key] = self
-
- key = self.getProtoName()
- if not key:
- key = self.getExternprotoName()
-
- proto_dict = self.getProtoDict()
- if key != None:
- proto_dict[key] = self
-
- # Parse the proto nodes fields
- self.proto_node = vrmlNode(self, NODE_ARRAY, new_i)
- new_i = self.proto_node.parse(new_i)
-
- self.children.remove(self.proto_node)
-
- # print(self.proto_node)
-
- new_i += 1 # skip past the {
-
- else: # If we're a proto instance, add the proto node as our child.
- spec = self.getSpec()
- try:
- self.children.append(proto_dict[spec])
- #pass
- except:
- pass
-
- del spec
-
- del proto_dict, key
-
- i = new_i
-
- # print(self.id)
- ok = True
- while ok:
- if i >= len(lines):
- return len(lines) - 1
-
- l = lines[i]
- # print('\tDEBUG:', i, self.node_type, l)
- if l == '':
- i += 1
- continue
-
- if l == '}':
- if self.node_type != NODE_NORMAL: # also ends proto nodes, we may want a type for these too.
- print('wrong node ending, expected an } ' + str(i) + ' ' + str(self.node_type))
- if DEBUG:
- raise ValueError
- ### print("returning", i)
- return i + 1
- if l == ']':
- if self.node_type != NODE_ARRAY:
- print('wrong node ending, expected a ] ' + str(i) + ' ' + str(self.node_type))
- if DEBUG:
- raise ValueError
- ### print("returning", i)
- return i + 1
-
- node_type, new_i = is_nodeline(i, [])
- if node_type: # check text\n{
- child = vrmlNode(self, node_type, i)
- i = child.parse(i)
-
- elif l == '[': # some files have these anonymous lists
- child = vrmlNode(self, NODE_ARRAY, i)
- i = child.parse(i)
-
- elif is_numline(i):
- l_split = l.split(',')
-
- values = None
- # See if each item is a float?
-
- for num_type in (int, float):
- try:
- values = [num_type(v) for v in l_split]
- break
- except:
- pass
-
- try:
- values = [[num_type(v) for v in segment.split()] for segment in l_split]
- break
- except:
- pass
-
- if values == None: # dont parse
- values = l_split
-
- # This should not extend over multiple lines however it is possible
- # print(self.array_data)
- if values:
- self.array_data.extend(values)
- i += 1
- else:
- words = l.split()
- if len(words) > 2 and words[1] == 'USE':
- vrmlNode(self, NODE_REFERENCE, i)
- else:
-
- # print("FIELD", i, l)
- #
- #words = l.split()
- ### print('\t\ttag', i)
- # this is a tag/
- # print(words, i, l)
- value = l
- # print(i)
- # javastrips can exist as values.
- quote_count = l.count('"')
- if quote_count % 2: # odd number?
- # print('MULTILINE')
- while 1:
- i += 1
- l = lines[i]
- quote_count = l.count('"')
- if quote_count % 2: # odd number?
- value += '\n' + l[:l.rfind('"')]
- break # assume
- else:
- value += '\n' + l
-
- value_all = value.split()
-
- def iskey(k):
- if k[0] != '"' and k[0].isalpha() and k.upper() not in ('TRUE', 'FALSE'):
- return True
- return False
-
- def split_fields(value):
- '''
- key 0.0 otherkey 1,2,3 opt1 opt1 0.0
- -> [key 0.0], [otherkey 1,2,3], [opt1 opt1 0.0]
- '''
- field_list = []
- field_context = []
-
- for j in range(len(value)):
- if iskey(value[j]):
- if field_context:
- # this IS a key but the previous value was not a key, ot it was a defined field.
- if (not iskey(field_context[-1])) or ((len(field_context) == 3 and field_context[1] == 'IS')):
- field_list.append(field_context)
-
- field_context = [value[j]]
- else:
- # The last item was not a value, multiple keys are needed in some cases.
- field_context.append(value[j])
- else:
- # Is empty, just add this on
- field_context.append(value[j])
- else:
- # Add a value to the list
- field_context.append(value[j])
-
- if field_context:
- field_list.append(field_context)
-
- return field_list
-
- for value in split_fields(value_all):
- # Split
-
- if value[0] == 'field':
- # field SFFloat creaseAngle 4
- self.proto_field_defs.append(value)
- else:
- self.fields.append(value)
- i += 1
-
-
-def gzipOpen(path):
- try:
- import gzip
- except:
- gzip = None
-
- data = None
- if gzip:
- try:
- data = gzip.open(path, 'r').read()
- except:
- pass
- else:
- print('\tNote, gzip module could not be imported, compressed files will fail to load')
-
- if data == None:
- try:
- data = open(path, 'rU').read()
- except:
- pass
-
- return data
-
-
-def vrml_parse(path):
- '''
- Sets up the root node and returns it so load_web3d() can deal with the blender side of things.
- Return root (vrmlNode, '') or (None, 'Error String')
- '''
- data = gzipOpen(path)
-
- if data == None:
- return None, 'Failed to open file: ' + path
-
- # Stripped above
- lines[:] = vrmlFormat(data)
-
- lines.insert(0, '{')
- lines.insert(0, 'dymmy_node')
- lines.append('}')
- # Use for testing our parsed output, so we can check on line numbers.
-
- '''
- ff = open('/tmp/test.txt', 'w')
- ff.writelines([l+'\n' for l in lines])
- ff.close()
- '''
-
- # Now evaluate it
- node_type, new_i = is_nodeline(0, [])
- if not node_type:
- return None, 'Error: VRML file has no starting Node'
-
- # Trick to make sure we get all root nodes.
- lines.insert(0, '{')
- lines.insert(0, 'root_node____') # important the name starts with an ascii char
- lines.append('}')
-
- root = vrmlNode(None, NODE_NORMAL, -1)
- root.setRoot(path) # we need to set the root so we have a namespace and know the path incase of inlineing
-
- # Parse recursively
- root.parse(0)
-
- # This prints a load of text
- if DEBUG:
- print(root)
-
- return root, ''
-
-
-# ====================== END VRML
-
-# ====================== X3d Support
-
-# Sane as vrml but replace the parser
-class x3dNode(vrmlNode):
- def __init__(self, parent, node_type, x3dNode):
- vrmlNode.__init__(self, parent, node_type, -1)
- self.x3dNode = x3dNode
-
- def parse(self, IS_PROTO_DATA=False):
- # print(self.x3dNode.tagName)
-
- define = self.x3dNode.getAttributeNode('DEF')
- if define:
- self.getDefDict()[define.value] = self
- else:
- use = self.x3dNode.getAttributeNode('USE')
- if use:
- try:
- self.reference = self.getDefDict()[use.value]
- self.node_type = NODE_REFERENCE
- except:
- print('\tWarning: reference', use.value, 'not found')
- self.parent.children.remove(self)
-
- return
-
- for x3dChildNode in self.x3dNode.childNodes:
- if x3dChildNode.nodeType in (x3dChildNode.TEXT_NODE, x3dChildNode.COMMENT_NODE, x3dChildNode.CDATA_SECTION_NODE):
- continue
-
- node_type = NODE_NORMAL
- # print(x3dChildNode, dir(x3dChildNode))
- if x3dChildNode.getAttributeNode('USE'):
- node_type = NODE_REFERENCE
-
- child = x3dNode(self, node_type, x3dChildNode)
- child.parse()
-
- # TODO - x3d Inline
-
- def getSpec(self):
- return self.x3dNode.tagName # should match vrml spec
-
- def getDefName(self):
- data = self.x3dNode.getAttributeNode('DEF')
- if data:
- data.value # XXX, return??
- return None
-
- # Other funcs operate from vrml, but this means we can wrap XML fields, still use nice utility funcs
- # getFieldAsArray getFieldAsBool etc
- def getFieldName(self, field, ancestry, AS_CHILD=False):
- # ancestry and AS_CHILD are ignored, only used for VRML now
-
- self_real = self.getRealNode() # incase we're an instance
- field_xml = self.x3dNode.getAttributeNode(field)
- if field_xml:
- value = field_xml.value
-
- # We may want to edit. for x3d spesific stuff
- # Sucks a bit to return the field name in the list but vrml excepts this :/
- return value.split()
- else:
- return None
-
-
-def x3d_parse(path):
- '''
- Sets up the root node and returns it so load_web3d() can deal with the blender side of things.
- Return root (x3dNode, '') or (None, 'Error String')
- '''
-
- try:
- import xml.dom.minidom
- except:
- return None, 'Error, import XML parsing module (xml.dom.minidom) failed, install python'
-
- '''
- try: doc = xml.dom.minidom.parse(path)
- except: return None, 'Could not parse this X3D file, XML error'
- '''
-
- # Could add a try/except here, but a console error is more useful.
- data = gzipOpen(path)
-
- if data == None:
- return None, 'Failed to open file: ' + path
-
- doc = xml.dom.minidom.parseString(data)
-
- try:
- x3dnode = doc.getElementsByTagName('X3D')[0]
- except:
- return None, 'Not a valid x3d document, cannot import'
-
- root = x3dNode(None, NODE_NORMAL, x3dnode)
- root.setRoot(path) # so images and Inline's we load have a relative path
- root.parse()
-
- return root, ''
-
-## f = open('/_Cylinder.wrl', 'r')
-# f = open('/fe/wrl/Vrml/EGS/TOUCHSN.WRL', 'r')
-# vrml_parse('/fe/wrl/Vrml/EGS/TOUCHSN.WRL')
-#vrml_parse('/fe/wrl/Vrml/EGS/SCRIPT.WRL')
-'''
-import os
-files = os.popen('find /fe/wrl -iname "*.wrl"').readlines()
-files.sort()
-tot = len(files)
-for i, f in enumerate(files):
- #if i < 801:
- # continue
-
- f = f.strip()
- print(f, i, tot)
- vrml_parse(f)
-'''
-
-# NO BLENDER CODE ABOVE THIS LINE.
-# -----------------------------------------------------------------------------------
-import bpy
-import image_utils
-# import BPyImage
-# import BPySys
-# reload(BPySys)
-# reload(BPyImage)
-# import Blender
-# from Blender import Texture, Material, Mathutils, Mesh, Types, Window
-from mathutils import Vector, Matrix
-
-RAD_TO_DEG = 57.29578
-
-GLOBALS = {'CIRCLE_DETAIL': 16}
-
-
-def translateRotation(rot):
- ''' axis, angle '''
- return Matrix.Rotation(rot[3], 4, Vector(rot[:3]))
-
-
-def translateScale(sca):
- mat = Matrix() # 4x4 default
- mat[0][0] = sca[0]
- mat[1][1] = sca[1]
- mat[2][2] = sca[2]
- return mat
-
-
-def translateTransform(node, ancestry):
- cent = node.getFieldAsFloatTuple('center', None, ancestry) # (0.0, 0.0, 0.0)
- rot = node.getFieldAsFloatTuple('rotation', None, ancestry) # (0.0, 0.0, 1.0, 0.0)
- sca = node.getFieldAsFloatTuple('scale', None, ancestry) # (1.0, 1.0, 1.0)
- scaori = node.getFieldAsFloatTuple('scaleOrientation', None, ancestry) # (0.0, 0.0, 1.0, 0.0)
- tx = node.getFieldAsFloatTuple('translation', None, ancestry) # (0.0, 0.0, 0.0)
-
- if cent:
- cent_mat = Matrix.Translation(Vector(cent)).resize4x4()
- cent_imat = cent_mat.copy().invert()
- else:
- cent_mat = cent_imat = None
-
- if rot:
- rot_mat = translateRotation(rot)
- else:
- rot_mat = None
-
- if sca:
- sca_mat = translateScale(sca)
- else:
- sca_mat = None
-
- if scaori:
- scaori_mat = translateRotation(scaori)
- scaori_imat = scaori_mat.copy().invert()
- else:
- scaori_mat = scaori_imat = None
-
- if tx:
- tx_mat = Matrix.Translation(Vector(tx)).resize4x4()
- else:
- tx_mat = None
-
- new_mat = Matrix()
-
- mats = [tx_mat, cent_mat, rot_mat, scaori_mat, sca_mat, scaori_imat, cent_imat]
- for mtx in mats:
- if mtx:
- new_mat = new_mat * mtx
-
- return new_mat
-
-
-def translateTexTransform(node, ancestry):
- cent = node.getFieldAsFloatTuple('center', None, ancestry) # (0.0, 0.0)
- rot = node.getFieldAsFloat('rotation', None, ancestry) # 0.0
- sca = node.getFieldAsFloatTuple('scale', None, ancestry) # (1.0, 1.0)
- tx = node.getFieldAsFloatTuple('translation', None, ancestry) # (0.0, 0.0)
-
- if cent:
- # cent is at a corner by default
- cent_mat = Matrix.Translation(Vector(cent).resize3D()).resize4x4()
- cent_imat = cent_mat.copy().invert()
- else:
- cent_mat = cent_imat = None
-
- if rot:
- rot_mat = Matrix.Rotation(rot, 4, 'Z') # translateRotation(rot)
- else:
- rot_mat = None
-
- if sca:
- sca_mat = translateScale((sca[0], sca[1], 0.0))
- else:
- sca_mat = None
-
- if tx:
- tx_mat = Matrix.Translation(Vector(tx).resize3D()).resize4x4()
- else:
- tx_mat = None
-
- new_mat = Matrix()
-
- # as specified in VRML97 docs
- mats = [cent_imat, sca_mat, rot_mat, cent_mat, tx_mat]
-
- for mtx in mats:
- if mtx:
- new_mat = new_mat * mtx
-
- return new_mat
-
-
-# 90d X rotation
-import math
-MATRIX_Z_TO_Y = Matrix.Rotation(math.pi / 2.0, 4, 'X')
-
-
-def getFinalMatrix(node, mtx, ancestry):
-
- transform_nodes = [node_tx for node_tx in ancestry if node_tx.getSpec() == 'Transform']
- if node.getSpec() == 'Transform':
- transform_nodes.append(node)
- transform_nodes.reverse()
-
- if mtx is None:
- mtx = Matrix()
-
- for node_tx in transform_nodes:
- mat = translateTransform(node_tx, ancestry)
- mtx = mat * mtx
-
- # worldspace matrix
- mtx = MATRIX_Z_TO_Y * mtx
-
- return mtx
-
-
-def importMesh_IndexedFaceSet(geom, bpyima, ancestry):
- # print(geom.lineno, geom.id, vrmlNode.DEF_NAMESPACE.keys())
-
- ccw = geom.getFieldAsBool('ccw', True, ancestry)
- ifs_colorPerVertex = geom.getFieldAsBool('colorPerVertex', True, ancestry) # per vertex or per face
- ifs_normalPerVertex = geom.getFieldAsBool('normalPerVertex', True, ancestry)
-
- # This is odd how point is inside Coordinate
-
- # VRML not x3d
- #coord = geom.getChildByName('coord') # 'Coordinate'
-
- coord = geom.getChildBySpec('Coordinate') # works for x3d and vrml
-
- if coord:
- ifs_points = coord.getFieldAsArray('point', 3, ancestry)
- else:
- coord = []
-
- if not coord:
- print('\tWarnint: IndexedFaceSet has no points')
- return None, ccw
-
- ifs_faces = geom.getFieldAsArray('coordIndex', 0, ancestry)
-
- coords_tex = None
- if ifs_faces: # In rare cases this causes problems - no faces but UVs???
-
- # WORKS - VRML ONLY
- # coords_tex = geom.getChildByName('texCoord')
- coords_tex = geom.getChildBySpec('TextureCoordinate')
-
- if coords_tex:
- ifs_texpoints = coords_tex.getFieldAsArray('point', 2, ancestry)
- ifs_texfaces = geom.getFieldAsArray('texCoordIndex', 0, ancestry)
-
- if not ifs_texpoints:
- # IF we have no coords, then dont bother
- coords_tex = None
-
- # WORKS - VRML ONLY
- # vcolor = geom.getChildByName('color')
- vcolor = geom.getChildBySpec('Color')
- vcolor_spot = None # spot color when we dont have an array of colors
- if vcolor:
- # float to char
- ifs_vcol = [(0, 0, 0)] # EEKADOODLE - vertex start at 1
- ifs_vcol.extend([col for col in vcolor.getFieldAsArray('color', 3, ancestry)])
- ifs_color_index = geom.getFieldAsArray('colorIndex', 0, ancestry)
-
- if not ifs_vcol:
- vcolor_spot = vcolor.getFieldAsFloatTuple('color', [], ancestry)
-
- # Convert faces into somthing blender can use
- edges = []
-
- # All lists are aligned!
- faces = []
- faces_uv = [] # if ifs_texfaces is empty then the faces_uv will match faces exactly.
- faces_orig_index = [] # for ngons, we need to know our original index
-
- if coords_tex and ifs_texfaces:
- do_uvmap = True
- else:
- do_uvmap = False
-
- # current_face = [0] # pointer anyone
-
- def add_face(face, fuvs, orig_index):
- l = len(face)
- if l == 3 or l == 4:
- faces.append(face)
- # faces_orig_index.append(current_face[0])
- if do_uvmap:
- faces_uv.append(fuvs)
-
- faces_orig_index.append(orig_index)
- elif l == 2:
- edges.append(face)
- elif l > 4:
- for i in range(2, len(face)):
- faces.append([face[0], face[i - 1], face[i]])
- if do_uvmap:
- faces_uv.append([fuvs[0], fuvs[i - 1], fuvs[i]])
- faces_orig_index.append(orig_index)
- else:
- # faces with 1 verts? pfft!
- # still will affect index ordering
- pass
-
- face = []
- fuvs = []
- orig_index = 0
- for i, fi in enumerate(ifs_faces):
- # ifs_texfaces and ifs_faces should be aligned
- if fi != -1:
- # face.append(int(fi)) # in rare cases this is a float
- # EEKADOODLE!!!
- # Annoyance where faces that have a zero index vert get rotated. This will then mess up UVs and VColors
- face.append(int(fi) + 1) # in rare cases this is a float, +1 because of stupid EEKADOODLE :/
-
- if do_uvmap:
- if i >= len(ifs_texfaces):
- print('\tWarning: UV Texface index out of range')
- fuvs.append(ifs_texfaces[0])
- else:
- fuvs.append(ifs_texfaces[i])
- else:
- add_face(face, fuvs, orig_index)
- face = []
- if do_uvmap:
- fuvs = []
- orig_index += 1
-
- add_face(face, fuvs, orig_index)
- del add_face # dont need this func anymore
-
- bpymesh = bpy.data.meshes.new(name="XXX")
-
- # EEKADOODLE
- bpymesh.vertices.add(1 + (len(ifs_points)))
- bpymesh.vertices.foreach_set("co", [0, 0, 0] + [a for v in ifs_points for a in v]) # XXX25 speed
-
- # print(len(ifs_points), faces, edges, ngons)
-
- try:
- bpymesh.faces.add(len(faces))
- bpymesh.faces.foreach_set("vertices_raw", [a for f in faces for a in (f + [0] if len(f) == 3 else f)]) # XXX25 speed
- except KeyError:
- print("one or more vert indicies out of range. corrupt file?")
- #for f in faces:
- # bpymesh.faces.extend(faces, smooth=True)
-
- # bpymesh.calcNormals()
- bpymesh.update()
-
- if len(bpymesh.faces) != len(faces):
- print('\tWarning: adding faces did not work! file is invalid, not adding UVs or vcolors')
- return bpymesh, ccw
-
- # Apply UVs if we have them
- if not do_uvmap:
- faces_uv = faces # fallback, we didnt need a uvmap in the first place, fallback to the face/vert mapping.
- if coords_tex:
- #print(ifs_texpoints)
- # print(geom)
- uvlay = bpymesh.uv_textures.new()
-
- for i, f in enumerate(uvlay.data):
- f.image = bpyima
- fuv = faces_uv[i] # uv indicies
- for j, uv in enumerate(f.uv):
- # print(fuv, j, len(ifs_texpoints))
- try:
- f.uv[j] = ifs_texpoints[fuv[j]] # XXX25, speedup
- except:
- print('\tWarning: UV Index out of range')
- f.uv[j] = ifs_texpoints[0] # XXX25, speedup
-
- elif bpyima and len(bpymesh.faces):
- # Oh Bugger! - we cant really use blenders ORCO for for texture space since texspace dosnt rotate.
- # we have to create VRML's coords as UVs instead.
-
- # VRML docs
- '''
- If the texCoord field is NULL, a default texture coordinate mapping is calculated using the local
- coordinate system bounding box of the shape. The longest dimension of the bounding box defines the S coordinates,
- and the next longest defines the T coordinates. If two or all three dimensions of the bounding box are equal,
- ties shall be broken by choosing the X, Y, or Z dimension in that order of preference.
- The value of the S coordinate ranges from 0 to 1, from one end of the bounding box to the other.
- The T coordinate ranges between 0 and the ratio of the second greatest dimension of the bounding box to the greatest dimension.
- '''
-
- # Note, S,T == U,V
- # U gets longest, V gets second longest
- xmin, ymin, zmin = ifs_points[0]
- xmax, ymax, zmax = ifs_points[0]
- for co in ifs_points:
- x, y, z = co
- if x < xmin:
- xmin = x
- if y < ymin:
- ymin = y
- if z < zmin:
- zmin = z
-
- if x > xmax:
- xmax = x
- if y > ymax:
- ymax = y
- if z > zmax:
- zmax = z
-
- xlen = xmax - xmin
- ylen = ymax - ymin
- zlen = zmax - zmin
-
- depth_min = xmin, ymin, zmin
- depth_list = [xlen, ylen, zlen]
- depth_sort = depth_list[:]
- depth_sort.sort()
-
- depth_idx = [depth_list.index(val) for val in depth_sort]
-
- axis_u = depth_idx[-1]
- axis_v = depth_idx[-2] # second longest
-
- # Hack, swap these !!! TODO - Why swap??? - it seems to work correctly but should not.
- # axis_u,axis_v = axis_v,axis_u
-
- min_u = depth_min[axis_u]
- min_v = depth_min[axis_v]
- depth_u = depth_list[axis_u]
- depth_v = depth_list[axis_v]
-
- depth_list[axis_u]
-
- if axis_u == axis_v:
- # This should be safe because when 2 axies have the same length, the lower index will be used.
- axis_v += 1
-
- uvlay = bpymesh.uv_textures.new()
-
- # HACK !!! - seems to be compatible with Cosmo though.
- depth_v = depth_u = max(depth_v, depth_u)
-
- bpymesh_vertices = bpymesh.vertices[:]
- bpymesh_faces = bpymesh.faces[:]
-
- for j, f in enumerate(uvlay.data):
- f.image = bpyima
- fuv = f.uv
- f_v = bpymesh_faces[j].vertices[:] # XXX25 speed
-
- for i, v in enumerate(f_v):
- co = bpymesh_vertices[v].co
- fuv[i] = (co[axis_u] - min_u) / depth_u, (co[axis_v] - min_v) / depth_v
-
- # Add vcote
- if vcolor:
- # print(ifs_vcol)
- collay = bpymesh.vertex_colors.new()
-
- for f_idx, f in enumerate(collay.data):
- fv = bpymesh.faces[f_idx].vertices[:]
- if len(fv) == 3: # XXX speed
- fcol = f.color1, f.color2, f.color3
- else:
- fcol = f.color1, f.color2, f.color3, f.color4
- if ifs_colorPerVertex:
- for i, c in enumerate(fcol):
- color_index = fv[i] # color index is vert index
- if ifs_color_index:
- try:
- color_index = ifs_color_index[color_index]
- except:
- print('\tWarning: per vertex color index out of range')
- continue
-
- if color_index < len(ifs_vcol):
- c.r, c.g, c.b = ifs_vcol[color_index]
- else:
- #print('\tWarning: per face color index out of range')
- pass
- else:
- if vcolor_spot: # use 1 color, when ifs_vcol is []
- for c in fcol:
- c.r, c.g, c.b = vcolor_spot
- else:
- color_index = faces_orig_index[f_idx] # color index is face index
- #print(color_index, ifs_color_index)
- if ifs_color_index:
- if color_index >= len(ifs_color_index):
- print('\tWarning: per face color index out of range')
- color_index = 0
- else:
- color_index = ifs_color_index[color_index]
- try:
- col = ifs_vcol[color_index]
- except IndexError:
- # TODO, look
- col = (1.0, 1.0, 1.0)
- for i, c in enumerate(fcol):
- c.r, c.g, c.b = col
-
- # XXX25
- # bpymesh.vertices.delete([0, ]) # EEKADOODLE
-
- return bpymesh, ccw
-
-
-def importMesh_IndexedLineSet(geom, ancestry):
- # VRML not x3d
- #coord = geom.getChildByName('coord') # 'Coordinate'
- coord = geom.getChildBySpec('Coordinate') # works for x3d and vrml
- if coord:
- points = coord.getFieldAsArray('point', 3, ancestry)
- else:
- points = []
-
- if not points:
- print('\tWarning: IndexedLineSet had no points')
- return None
-
- ils_lines = geom.getFieldAsArray('coordIndex', 0, ancestry)
-
- lines = []
- line = []
-
- for il in ils_lines:
- if il == -1:
- lines.append(line)
- line = []
- else:
- line.append(int(il))
- lines.append(line)
-
- # vcolor = geom.getChildByName('color') # blender dosnt have per vertex color
-
- bpycurve = bpy.data.curves.new('IndexedCurve', 'CURVE')
- bpycurve.dimensions = '3D'
-
- for line in lines:
- if not line:
- continue
- co = points[line[0]]
- nu = bpycurve.splines.new('POLY')
- nu.points.add(len(line))
-
- for il, pt in zip(line, nu.points):
- pt.co[0:3] = points[il]
-
- return bpycurve
-
-
-def importMesh_PointSet(geom, ancestry):
- # VRML not x3d
- #coord = geom.getChildByName('coord') # 'Coordinate'
- coord = geom.getChildBySpec('Coordinate') # works for x3d and vrml
- if coord:
- points = coord.getFieldAsArray('point', 3, ancestry)
- else:
- points = []
-
- # vcolor = geom.getChildByName('color') # blender dosnt have per vertex color
-
- bpymesh = bpy.data.meshes.new("XXX")
- bpymesh.vertices.add(len(points))
- bpymesh.vertices.foreach_set("co", [a for v in points for a in v])
-
- # bpymesh.calcNormals() # will just be dummy normals
- bpymesh.update()
- return bpymesh
-
-GLOBALS['CIRCLE_DETAIL'] = 12
-
-
-def bpy_ops_add_object_hack(): # XXX25, evil
- scene = bpy.context.scene
- obj = scene.objects[0]
- scene.objects.unlink(obj)
- bpymesh = obj.data
- bpy.data.objects.remove(obj)
- return bpymesh
-
-
-def importMesh_Sphere(geom, ancestry):
- diameter = geom.getFieldAsFloat('radius', 0.5, ancestry)
- # bpymesh = Mesh.Primitives.UVsphere(GLOBALS['CIRCLE_DETAIL'], GLOBALS['CIRCLE_DETAIL'], diameter)
-
- bpy.ops.mesh.primitive_uv_sphere_add(segments=GLOBALS['CIRCLE_DETAIL'],
- ring_count=GLOBALS['CIRCLE_DETAIL'],
- size=diameter,
- view_align=False,
- enter_editmode=False,
- )
-
- bpymesh = bpy_ops_add_object_hack()
-
- bpymesh.transform(MATRIX_Z_TO_Y)
- return bpymesh
-
-
-def importMesh_Cylinder(geom, ancestry):
- # bpymesh = bpy.data.meshes.new()
- diameter = geom.getFieldAsFloat('radius', 1.0, ancestry)
- height = geom.getFieldAsFloat('height', 2, ancestry)
-
- # bpymesh = Mesh.Primitives.Cylinder(GLOBALS['CIRCLE_DETAIL'], diameter, height)
-
- bpy.ops.mesh.primitive_cylinder_add(vertices=GLOBALS['CIRCLE_DETAIL'],
- radius=diameter,
- depth=height,
- cap_ends=True,
- view_align=False,
- enter_editmode=False,
- )
-
- bpymesh = bpy_ops_add_object_hack()
-
- bpymesh.transform(MATRIX_Z_TO_Y)
-
- # Warning - Rely in the order Blender adds verts
- # not nice design but wont change soon.
-
- bottom = geom.getFieldAsBool('bottom', True, ancestry)
- side = geom.getFieldAsBool('side', True, ancestry)
- top = geom.getFieldAsBool('top', True, ancestry)
-
- if not top: # last vert is top center of tri fan.
- # bpymesh.vertices.delete([(GLOBALS['CIRCLE_DETAIL'] + GLOBALS['CIRCLE_DETAIL']) + 1]) # XXX25
- pass
-
- if not bottom: # second last vert is bottom of triangle fan
- # XXX25
- # bpymesh.vertices.delete([GLOBALS['CIRCLE_DETAIL'] + GLOBALS['CIRCLE_DETAIL']])
- pass
-
- if not side:
- # remove all quads
- # XXX25
- # bpymesh.faces.delete(1, [f for f in bpymesh.faces if len(f) == 4])
- pass
-
- return bpymesh
-
-
-def importMesh_Cone(geom, ancestry):
- # bpymesh = bpy.data.meshes.new()
- diameter = geom.getFieldAsFloat('bottomRadius', 1.0, ancestry)
- height = geom.getFieldAsFloat('height', 2, ancestry)
-
- # bpymesh = Mesh.Primitives.Cone(GLOBALS['CIRCLE_DETAIL'], diameter, height)
-
- bpy.ops.mesh.primitive_cone_add(vertices=GLOBALS['CIRCLE_DETAIL'],
- radius=diameter,
- depth=height,
- cap_end=True,
- view_align=False,
- enter_editmode=False,
- )
-
- bpymesh = bpy_ops_add_object_hack()
-
- bpymesh.transform(MATRIX_Z_TO_Y)
-
- # Warning - Rely in the order Blender adds verts
- # not nice design but wont change soon.
-
- bottom = geom.getFieldAsBool('bottom', True, ancestry)
- side = geom.getFieldAsBool('side', True, ancestry)
-
- if not bottom: # last vert is on the bottom
- # bpymesh.vertices.delete([GLOBALS['CIRCLE_DETAIL'] + 1]) # XXX25
- pass
- if not side: # second last vert is on the pointy bit of the cone
- # bpymesh.vertices.delete([GLOBALS['CIRCLE_DETAIL']]) # XXX25
- pass
-
- return bpymesh
-
-
-def importMesh_Box(geom, ancestry):
- # bpymesh = bpy.data.meshes.new()
-
- size = geom.getFieldAsFloatTuple('size', (2.0, 2.0, 2.0), ancestry)
-
- # bpymesh = Mesh.Primitives.Cube(1.0)
- bpy.ops.mesh.primitive_cube_add(view_align=False,
- enter_editmode=False,
- )
-
- bpymesh = bpy_ops_add_object_hack()
-
- # Scale the box to the size set
- scale_mat = Matrix(((size[0], 0, 0), (0, size[1], 0), (0, 0, size[2]))) * 0.5
- bpymesh.transform(scale_mat.resize4x4())
-
- return bpymesh
-
-
-def importShape(node, ancestry):
- vrmlname = node.getDefName()
- if not vrmlname:
- vrmlname = 'Shape'
-
- # works 100% in vrml, but not x3d
- #appr = node.getChildByName('appearance') # , 'Appearance'
- #geom = node.getChildByName('geometry') # , 'IndexedFaceSet'
-
- # Works in vrml and x3d
- appr = node.getChildBySpec('Appearance')
- geom = node.getChildBySpec(['IndexedFaceSet', 'IndexedLineSet', 'PointSet', 'Sphere', 'Box', 'Cylinder', 'Cone'])
-
- # For now only import IndexedFaceSet's
- if geom:
- bpymat = None
- bpyima = None
- texmtx = None
-
- depth = 0 # so we can set alpha face flag later
-
- if appr:
-
- #mat = appr.getChildByName('material') # 'Material'
- #ima = appr.getChildByName('texture') # , 'ImageTexture'
- #if ima and ima.getSpec() != 'ImageTexture':
- # print('\tWarning: texture type "%s" is not supported' % ima.getSpec())
- # ima = None
- # textx = appr.getChildByName('textureTransform')
-
- mat = appr.getChildBySpec('Material')
- ima = appr.getChildBySpec('ImageTexture')
-
- textx = appr.getChildBySpec('TextureTransform')
-
- if textx:
- texmtx = translateTexTransform(textx, ancestry)
-
- # print(mat, ima)
- if mat or ima:
-
- if not mat:
- mat = ima # This is a bit dumb, but just means we use default values for all
-
- # all values between 0.0 and 1.0, defaults from VRML docs
- bpymat = bpy.data.materials.new("XXX")
- bpymat.ambient = mat.getFieldAsFloat('ambientIntensity', 0.2, ancestry)
- bpymat.diffuse_color = mat.getFieldAsFloatTuple('diffuseColor', [0.8, 0.8, 0.8], ancestry)
-
- # NOTE - blender dosnt support emmisive color
- # Store in mirror color and approximate with emit.
- emit = mat.getFieldAsFloatTuple('emissiveColor', [0.0, 0.0, 0.0], ancestry)
- bpymat.mirror_color = emit
- bpymat.emit = (emit[0] + emit[1] + emit[2]) / 3.0
-
- bpymat.specular_hardness = int(1 + (510 * mat.getFieldAsFloat('shininess', 0.2, ancestry))) # 0-1 -> 1-511
- bpymat.specular_color = mat.getFieldAsFloatTuple('specularColor', [0.0, 0.0, 0.0], ancestry)
- bpymat.alpha = 1.0 - mat.getFieldAsFloat('transparency', 0.0, ancestry)
- if bpymat.alpha < 0.999:
- bpymat.use_transparency = True
-
- if ima:
- ima_url = ima.getFieldAsString('url', None, ancestry)
-
- if ima_url == None:
- try:
- ima_url = ima.getFieldAsStringArray('url', ancestry)[0] # in some cases we get a list of images.
- except:
- ima_url = None
-
- if ima_url == None:
- print("\twarning, image with no URL, this is odd")
- else:
- bpyima = image_utils.image_load(ima_url, dirName(node.getFilename()), place_holder=False, recursive=False, convert_callback=imageConvertCompat)
- if bpyima:
- texture = bpy.data.textures.new("XXX", 'IMAGE')
- texture.image = bpyima
-
- # Adds textures for materials (rendering)
- try:
- depth = bpyima.depth
- except:
- depth = -1
-
- if depth == 32:
- # Image has alpha
- bpymat.setTexture(0, texture, Texture.TexCo.UV, Texture.MapTo.COL | Texture.MapTo.ALPHA)
- texture.setImageFlags('MipMap', 'InterPol', 'UseAlpha')
- bpymat.mode |= Material.Modes.ZTRANSP
- bpymat.alpha = 0.0
- else:
- mtex = bpymat.texture_slots.add()
- mtex.texture = texture
- mtex.texture_coords = 'UV'
- mtex.use_map_diffuse = True
-
- ima_repS = ima.getFieldAsBool('repeatS', True, ancestry)
- ima_repT = ima.getFieldAsBool('repeatT', True, ancestry)
-
- # To make this work properly we'd need to scale the UV's too, better to ignore th
- # texture.repeat = max(1, ima_repS * 512), max(1, ima_repT * 512)
-
- if not ima_repS:
- bpyima.use_clamp_x = True
- if not ima_repT:
- bpyima.use_clamp_y = True
-
- bpydata = None
- geom_spec = geom.getSpec()
- ccw = True
- if geom_spec == 'IndexedFaceSet':
- bpydata, ccw = importMesh_IndexedFaceSet(geom, bpyima, ancestry)
- elif geom_spec == 'IndexedLineSet':
- bpydata = importMesh_IndexedLineSet(geom, ancestry)
- elif geom_spec == 'PointSet':
- bpydata = importMesh_PointSet(geom, ancestry)
- elif geom_spec == 'Sphere':
- bpydata = importMesh_Sphere(geom, ancestry)
- elif geom_spec == 'Box':
- bpydata = importMesh_Box(geom, ancestry)
- elif geom_spec == 'Cylinder':
- bpydata = importMesh_Cylinder(geom, ancestry)
- elif geom_spec == 'Cone':
- bpydata = importMesh_Cone(geom, ancestry)
- else:
- print('\tWarning: unsupported type "%s"' % geom_spec)
- return
-
- if bpydata:
- vrmlname = vrmlname + geom_spec
-
- bpydata.name = vrmlname
-
- bpyob = node.blendObject = bpy.data.objects.new(vrmlname, bpydata)
- bpy.context.scene.objects.link(bpyob)
-
- if type(bpydata) == bpy.types.Mesh:
- is_solid = geom.getFieldAsBool('solid', True, ancestry)
- creaseAngle = geom.getFieldAsFloat('creaseAngle', None, ancestry)
-
- if creaseAngle != None:
- bpydata.auto_smooth_angle = 1 + int(min(79, creaseAngle * RAD_TO_DEG))
- bpydata.use_auto_smooth = True
-
- # Only ever 1 material per shape
- if bpymat:
- bpydata.materials.append(bpymat)
-
- if bpydata.uv_textures:
-
- if depth == 32: # set the faces alpha flag?
- transp = Mesh.FaceTranspModes.ALPHA
- for f in bpydata.uv_textures.active.data:
- f.blend_type = 'ALPHA'
-
- if texmtx:
- # Apply texture transform?
- uv_copy = Vector()
- for f in bpydata.uv_textures.active.data:
- fuv = f.uv
- for i, uv in enumerate(fuv):
- uv_copy.x = uv[0]
- uv_copy.y = uv[1]
-
- fuv[i] = (uv_copy * texmtx)[0:2]
- # Done transforming the texture
-
- # Must be here and not in IndexedFaceSet because it needs an object for the flip func. Messy :/
- if not ccw:
- # bpydata.flipNormals()
- # XXX25
- pass
-
- # else could be a curve for example
-
- # Can transform data or object, better the object so we can instance the data
- #bpymesh.transform(getFinalMatrix(node))
- bpyob.matrix_world = getFinalMatrix(node, None, ancestry)
-
-
-def importLamp_PointLight(node, ancestry):
- vrmlname = node.getDefName()
- if not vrmlname:
- vrmlname = 'PointLight'
-
- # ambientIntensity = node.getFieldAsFloat('ambientIntensity', 0.0, ancestry) # TODO
- # attenuation = node.getFieldAsFloatTuple('attenuation', (1.0, 0.0, 0.0), ancestry) # TODO
- color = node.getFieldAsFloatTuple('color', (1.0, 1.0, 1.0), ancestry)
- intensity = node.getFieldAsFloat('intensity', 1.0, ancestry) # max is documented to be 1.0 but some files have higher.
- location = node.getFieldAsFloatTuple('location', (0.0, 0.0, 0.0), ancestry)
- # is_on = node.getFieldAsBool('on', True, ancestry) # TODO
- radius = node.getFieldAsFloat('radius', 100.0, ancestry)
-
- bpylamp = bpy.data.lamps.new("ToDo", 'POINT')
- bpylamp.energy = intensity
- bpylamp.distance = radius
- bpylamp.color = color
-
- mtx = Matrix.Translation(Vector(location))
-
- return bpylamp, mtx
-
-
-def importLamp_DirectionalLight(node, ancestry):
- vrmlname = node.getDefName()
- if not vrmlname:
- vrmlname = 'DirectLight'
-
- # ambientIntensity = node.getFieldAsFloat('ambientIntensity', 0.0) # TODO
- color = node.getFieldAsFloatTuple('color', (1.0, 1.0, 1.0), ancestry)
- direction = node.getFieldAsFloatTuple('direction', (0.0, 0.0, -1.0), ancestry)
- intensity = node.getFieldAsFloat('intensity', 1.0, ancestry) # max is documented to be 1.0 but some files have higher.
- # is_on = node.getFieldAsBool('on', True, ancestry) # TODO
-
- bpylamp = bpy.data.lamps.new(vrmlname, 'SUN')
- bpylamp.energy = intensity
- bpylamp.color = color
-
- # lamps have their direction as -z, yup
- mtx = Vector(direction).to_track_quat('-Z', 'Y').to_matrix().resize4x4()
-
- return bpylamp, mtx
-
-# looks like default values for beamWidth and cutOffAngle were swapped in VRML docs.
-
-
-def importLamp_SpotLight(node, ancestry):
- vrmlname = node.getDefName()
- if not vrmlname:
- vrmlname = 'SpotLight'
-
- # ambientIntensity = geom.getFieldAsFloat('ambientIntensity', 0.0, ancestry) # TODO
- # attenuation = geom.getFieldAsFloatTuple('attenuation', (1.0, 0.0, 0.0), ancestry) # TODO
- beamWidth = node.getFieldAsFloat('beamWidth', 1.570796, ancestry) # max is documented to be 1.0 but some files have higher.
- color = node.getFieldAsFloatTuple('color', (1.0, 1.0, 1.0), ancestry)
- cutOffAngle = node.getFieldAsFloat('cutOffAngle', 0.785398, ancestry) * 2.0 # max is documented to be 1.0 but some files have higher.
- direction = node.getFieldAsFloatTuple('direction', (0.0, 0.0, -1.0), ancestry)
- intensity = node.getFieldAsFloat('intensity', 1.0, ancestry) # max is documented to be 1.0 but some files have higher.
- location = node.getFieldAsFloatTuple('location', (0.0, 0.0, 0.0), ancestry)
- # is_on = node.getFieldAsBool('on', True, ancestry) # TODO
- radius = node.getFieldAsFloat('radius', 100.0, ancestry)
-
- bpylamp = bpy.data.lamps.new(vrmlname, 'SPOT')
- bpylamp.energy = intensity
- bpylamp.distance = radius
- bpylamp.color = color
- bpylamp.spot_size = cutOffAngle
- if beamWidth > cutOffAngle:
- bpylamp.spot_blend = 0.0
- else:
- if cutOffAngle == 0.0: # this should never happen!
- bpylamp.spot_blend = 0.5
- else:
- bpylamp.spot_blend = beamWidth / cutOffAngle
-
- # Convert
-
- # lamps have their direction as -z, y==up
- mtx = Matrix.Translation(Vector(location)) * Vector(direction).to_track_quat('-Z', 'Y').to_matrix().resize4x4()
-
- return bpylamp, mtx
-
-
-def importLamp(node, spec, ancestry):
- if spec == 'PointLight':
- bpylamp, mtx = importLamp_PointLight(node, ancestry)
- elif spec == 'DirectionalLight':
- bpylamp, mtx = importLamp_DirectionalLight(node, ancestry)
- elif spec == 'SpotLight':
- bpylamp, mtx = importLamp_SpotLight(node, ancestry)
- else:
- print("Error, not a lamp")
- raise ValueError
-
- bpyob = node.blendObject = bpy.data.objects.new("TODO", bpylamp)
- bpy.context.scene.objects.link(bpyob)
-
- bpyob.matrix_world = getFinalMatrix(node, mtx, ancestry)
-
-
-def importViewpoint(node, ancestry):
- name = node.getDefName()
- if not name:
- name = 'Viewpoint'
-
- fieldOfView = node.getFieldAsFloat('fieldOfView', 0.785398, ancestry) # max is documented to be 1.0 but some files have higher.
- # jump = node.getFieldAsBool('jump', True, ancestry)
- orientation = node.getFieldAsFloatTuple('orientation', (0.0, 0.0, 1.0, 0.0), ancestry)
- position = node.getFieldAsFloatTuple('position', (0.0, 0.0, 0.0), ancestry)
- description = node.getFieldAsString('description', '', ancestry)
-
- bpycam = bpy.data.cameras.new(name)
-
- bpycam.angle = fieldOfView
-
- mtx = Matrix.Translation(Vector(position)) * translateRotation(orientation)
-
- bpyob = node.blendObject = bpy.data.objects.new("TODO", bpycam)
- bpy.context.scene.objects.link(bpyob)
- bpyob.matrix_world = getFinalMatrix(node, mtx, ancestry)
-
-
-def importTransform(node, ancestry):
- name = node.getDefName()
- if not name:
- name = 'Transform'
-
- bpyob = node.blendObject = bpy.data.objects.new(name, None)
- bpy.context.scene.objects.link(bpyob)
-
- bpyob.matrix_world = getFinalMatrix(node, None, ancestry)
-
- # so they are not too annoying
- bpyob.empty_draw_type = 'PLAIN_AXES'
- bpyob.empty_draw_size = 0.2
-
-
-#def importTimeSensor(node):
-def action_fcurve_ensure(action, data_path, array_index):
- for fcu in action.fcurves:
- if fcu.data_path == data_path and fcu.array_index == array_index:
- return fcu
-
- return action.fcurves.new(data_path=data_path, array_index=array_index)
-
-
-def translatePositionInterpolator(node, action, ancestry):
- key = node.getFieldAsArray('key', 0, ancestry)
- keyValue = node.getFieldAsArray('keyValue', 3, ancestry)
-
- loc_x = action_fcurve_ensure(action, "location", 0)
- loc_y = action_fcurve_ensure(action, "location", 1)
- loc_z = action_fcurve_ensure(action, "location", 2)
-
- for i, time in enumerate(key):
- try:
- x, y, z = keyValue[i]
- except:
- continue
-
- loc_x.keyframe_points.add(time, x)
- loc_y.keyframe_points.add(time, y)
- loc_z.keyframe_points.add(time, z)
-
- for fcu in (loc_x, loc_y, loc_z):
- for kf in fcu.keyframe_points:
- kf.interpolation = 'LINEAR'
-
-
-def translateOrientationInterpolator(node, action, ancestry):
- key = node.getFieldAsArray('key', 0, ancestry)
- keyValue = node.getFieldAsArray('keyValue', 4, ancestry)
-
- rot_x = action_fcurve_ensure(action, "rotation_euler", 0)
- rot_y = action_fcurve_ensure(action, "rotation_euler", 1)
- rot_z = action_fcurve_ensure(action, "rotation_euler", 2)
-
- for i, time in enumerate(key):
- try:
- x, y, z, w = keyValue[i]
- except:
- continue
-
- mtx = translateRotation((x, y, z, w))
- eul = mtx.to_euler()
- rot_x.keyframe_points.add(time, eul.x)
- rot_y.keyframe_points.add(time, eul.y)
- rot_z.keyframe_points.add(time, eul.z)
-
- for fcu in (rot_x, rot_y, rot_z):
- for kf in fcu.keyframe_points:
- kf.interpolation = 'LINEAR'
-
-
-# Untested!
-def translateScalarInterpolator(node, action, ancestry):
- key = node.getFieldAsArray('key', 0, ancestry)
- keyValue = node.getFieldAsArray('keyValue', 4, ancestry)
-
- sca_x = action_fcurve_ensure(action, "scale", 0)
- sca_y = action_fcurve_ensure(action, "scale", 1)
- sca_z = action_fcurve_ensure(action, "scale", 2)
-
- for i, time in enumerate(key):
- try:
- x, y, z = keyValue[i]
- except:
- continue
-
- sca_x.keyframe_points.new(time, x)
- sca_y.keyframe_points.new(time, y)
- sca_z.keyframe_points.new(time, z)
-
-
-def translateTimeSensor(node, action, ancestry):
- '''
- Apply a time sensor to an action, VRML has many combinations of loop/start/stop/cycle times
- to give different results, for now just do the basics
- '''
-
- # XXX25 TODO
- if 1:
- return
-
- time_cu = action.addCurve('Time')
- time_cu.interpolation = Blender.IpoCurve.InterpTypes.LINEAR
-
- cycleInterval = node.getFieldAsFloat('cycleInterval', None, ancestry)
-
- startTime = node.getFieldAsFloat('startTime', 0.0, ancestry)
- stopTime = node.getFieldAsFloat('stopTime', 250.0, ancestry)
-
- if cycleInterval != None:
- stopTime = startTime + cycleInterval
-
- loop = node.getFieldAsBool('loop', False, ancestry)
-
- time_cu.append((1 + startTime, 0.0))
- time_cu.append((1 + stopTime, 1.0 / 10.0)) # anoying, the UI uses /10
-
- if loop:
- time_cu.extend = Blender.IpoCurve.ExtendTypes.CYCLIC # or - EXTRAP, CYCLIC_EXTRAP, CONST,
-
-
-def importRoute(node, ancestry):
- '''
- Animation route only at the moment
- '''
-
- if not hasattr(node, 'fields'):
- return
-
- routeIpoDict = node.getRouteIpoDict()
-
- def getIpo(id):
- try:
- action = routeIpoDict[id]
- except:
- action = routeIpoDict[id] = bpy.data.actions.new('web3d_ipo')
- return action
-
- # for getting definitions
- defDict = node.getDefDict()
- '''
- Handles routing nodes to eachother
-
-ROUTE vpPI.value_changed TO champFly001.set_position
-ROUTE vpOI.value_changed TO champFly001.set_orientation
-ROUTE vpTs.fraction_changed TO vpPI.set_fraction
-ROUTE vpTs.fraction_changed TO vpOI.set_fraction
-ROUTE champFly001.bindTime TO vpTs.set_startTime
- '''
-
- #from_id, from_type = node.id[1].split('.')
- #to_id, to_type = node.id[3].split('.')
-
- #value_changed
- set_position_node = None
- set_orientation_node = None
- time_node = None
-
- for field in node.fields:
- if field and field[0] == 'ROUTE':
- try:
- from_id, from_type = field[1].split('.')
- to_id, to_type = field[3].split('.')
- except:
- print("Warning, invalid ROUTE", field)
- continue
-
- if from_type == 'value_changed':
- if to_type == 'set_position':
- action = getIpo(to_id)
- set_data_from_node = defDict[from_id]
- translatePositionInterpolator(set_data_from_node, action, ancestry)
-
- if to_type in ('set_orientation', 'rotation'):
- action = getIpo(to_id)
- set_data_from_node = defDict[from_id]
- translateOrientationInterpolator(set_data_from_node, action, ancestry)
-
- if to_type == 'set_scale':
- action = getIpo(to_id)
- set_data_from_node = defDict[from_id]
- translateScalarInterpolator(set_data_from_node, action, ancestry)
-
- elif from_type == 'bindTime':
- action = getIpo(from_id)
- time_node = defDict[to_id]
- translateTimeSensor(time_node, action, ancestry)
-
-
-def load_web3d(path, PREF_FLAT=False, PREF_CIRCLE_DIV=16, HELPER_FUNC=None):
-
- # Used when adding blender primitives
- GLOBALS['CIRCLE_DETAIL'] = PREF_CIRCLE_DIV
-
- #root_node = vrml_parse('/_Cylinder.wrl')
- if path.lower().endswith('.x3d'):
- root_node, msg = x3d_parse(path)
- else:
- root_node, msg = vrml_parse(path)
-
- if not root_node:
- print(msg)
- return
-
- # fill with tuples - (node, [parents-parent, parent])
- all_nodes = root_node.getSerialized([], [])
-
- for node, ancestry in all_nodes:
- #if 'castle.wrl' not in node.getFilename():
- # continue
-
- spec = node.getSpec()
- '''
- prefix = node.getPrefix()
- if prefix=='PROTO':
- pass
- else
- '''
- if HELPER_FUNC and HELPER_FUNC(node, ancestry):
- # Note, include this function so the VRML/X3D importer can be extended
- # by an external script. - gets first pick
- pass
- if spec == 'Shape':
- importShape(node, ancestry)
- elif spec in ('PointLight', 'DirectionalLight', 'SpotLight'):
- importLamp(node, spec, ancestry)
- elif spec == 'Viewpoint':
- importViewpoint(node, ancestry)
- elif spec == 'Transform':
- # Only use transform nodes when we are not importing a flat object hierarchy
- if PREF_FLAT == False:
- importTransform(node, ancestry)
- '''
- # These are delt with later within importRoute
- elif spec=='PositionInterpolator':
- action = bpy.data.ipos.new('web3d_ipo', 'Object')
- translatePositionInterpolator(node, action)
- '''
-
- # After we import all nodes, route events - anim paths
- for node, ancestry in all_nodes:
- importRoute(node, ancestry)
-
- for node, ancestry in all_nodes:
- if node.isRoot():
- # we know that all nodes referenced from will be in
- # routeIpoDict so no need to run node.getDefDict() for every node.
- routeIpoDict = node.getRouteIpoDict()
- defDict = node.getDefDict()
-
- for key, action in routeIpoDict.items():
-
- # Assign anim curves
- node = defDict[key]
- if node.blendObject == None: # Add an object if we need one for animation
- node.blendObject = bpy.data.objects.new('AnimOb', None) # , name)
- bpy.context.scene.objects.link(node.blendObject)
-
- if node.blendObject.animation_data is None:
- node.blendObject.animation_data_create()
-
- node.blendObject.animation_data.action = action
-
- # Add in hierarchy
- if PREF_FLAT == False:
- child_dict = {}
- for node, ancestry in all_nodes:
- if node.blendObject:
- blendObject = None
-
- # Get the last parent
- i = len(ancestry)
- while i:
- i -= 1
- blendObject = ancestry[i].blendObject
- if blendObject:
- break
-
- if blendObject:
- # Parent Slow, - 1 liner but works
- # blendObject.makeParent([node.blendObject], 0, 1)
-
- # Parent FAST
- try:
- child_dict[blendObject].append(node.blendObject)
- except:
- child_dict[blendObject] = [node.blendObject]
-
- # Parent
- for parent, children in child_dict.items():
- for c in children:
- c.parent = parent
-
- # update deps
- bpy.context.scene.update()
- del child_dict
-
-
-def load(operator, context, filepath=""):
-
- load_web3d(filepath,
- PREF_FLAT=True,
- PREF_CIRCLE_DIV=16,
- )
-
- return {'FINISHED'}