diff options
author | Campbell Barton <ideasman42@gmail.com> | 2007-01-31 04:18:51 +0300 |
---|---|---|
committer | Campbell Barton <ideasman42@gmail.com> | 2007-01-31 04:18:51 +0300 |
commit | 9cf602b9494857d98aec2c045492377005cdcf6b (patch) | |
tree | 90a5a6cdf4d7822ca8d9eb703558bf1c920d8dcd /release/scripts/3ds_import.py | |
parent | 9ce5dd4cc4361382f30def0dede7e3397b4e7dd4 (diff) |
3ds_export - enabled textures as an option since it works with some appliactions (only way to get textured models from blender to google sketchup)
3ds_import - added option to disable recursive image searching (could be slow somtimes)
export_obj - when making group/object names only use both object and mesh name when they differ.
weightpaint_clean, weightpaint_grow_shrink - minor updates.
Render.py - own error in epydocs.
Diffstat (limited to 'release/scripts/3ds_import.py')
-rw-r--r-- | release/scripts/3ds_import.py | 30 |
1 files changed, 20 insertions, 10 deletions
diff --git a/release/scripts/3ds_import.py b/release/scripts/3ds_import.py index f3580868e20..56525d56611 100644 --- a/release/scripts/3ds_import.py +++ b/release/scripts/3ds_import.py @@ -308,7 +308,7 @@ def add_texture_to_material(image, texture, material, mapto): material.setTexture(free_tex_slots[0],texture,Texture.TexCo.UV,map) -def process_next_chunk(file, previous_chunk, importedObjects): +def process_next_chunk(file, previous_chunk, importedObjects, IMAGE_SEARCH): #print previous_chunk.bytes_read, 'BYTES READ' contextObName= None contextLamp= [None, None] # object, Data @@ -441,7 +441,7 @@ def process_next_chunk(file, previous_chunk, importedObjects): elif (new_chunk.ID==OBJECTINFO): #print 'elif (new_chunk.ID==OBJECTINFO):' # print 'found an OBJECTINFO chunk' - process_next_chunk(file, new_chunk, importedObjects) + process_next_chunk(file, new_chunk, importedObjects, IMAGE_SEARCH) #keep track of how much we read in the main chunk new_chunk.bytes_read+=temp_chunk.bytes_read @@ -523,7 +523,8 @@ def process_next_chunk(file, previous_chunk, importedObjects): if (temp_chunk.ID==MAT_MAP_FILENAME): texture_name=read_string(file) - img= TEXTURE_DICT[contextMaterial.name]= BPyImage.comprehensiveImageLoad(texture_name, FILENAME) + #img= TEXTURE_DICT[contextMaterial.name]= BPyImage.comprehensiveImageLoad(texture_name, FILENAME) + img= TEXTURE_DICT[contextMaterial.name]= BPyImage.comprehensiveImageLoad(texture_name, FILENAME, PLACE_HOLDER= False, RECURSIVE= IMAGE_SEARCH) new_chunk.bytes_read += (len(texture_name)+1) #plus one for the null character that gets removed else: @@ -545,7 +546,8 @@ def process_next_chunk(file, previous_chunk, importedObjects): if (temp_chunk.ID==MAT_MAP_FILENAME): texture_name= read_string(file) - img= BPyImage.comprehensiveImageLoad(texture_name, FILENAME) + #img= BPyImage.comprehensiveImageLoad(texture_name, FILENAME) + img= BPyImage.comprehensiveImageLoad(texture_name, FILENAME, PLACE_HOLDER= False, RECURSIVE= IMAGE_SEARCH) new_chunk.bytes_read+= (len(texture_name)+1) #plus one for the null character that gets removed else: skip_to_end(file, temp_chunk) @@ -566,7 +568,8 @@ def process_next_chunk(file, previous_chunk, importedObjects): if (temp_chunk.ID==MAT_MAP_FILENAME): texture_name= read_string(file) - img= BPyImage.comprehensiveImageLoad(texture_name, FILENAME) + #img= BPyImage.comprehensiveImageLoad(texture_name, FILENAME) + img= BPyImage.comprehensiveImageLoad(texture_name, FILENAME, PLACE_HOLDER= False, RECURSIVE= IMAGE_SEARCH) new_chunk.bytes_read += (len(texture_name)+1) #plus one for the null character that gets removed else: skip_to_end(file, temp_chunk) @@ -586,7 +589,8 @@ def process_next_chunk(file, previous_chunk, importedObjects): if (temp_chunk.ID==MAT_MAP_FILENAME): texture_name= read_string(file) - img= BPyImage.comprehensiveImageLoad(texture_name, FILENAME) + #img= BPyImage.comprehensiveImageLoad(texture_name, FILENAME) + img= BPyImage.comprehensiveImageLoad(texture_name, FILENAME, PLACE_HOLDER= False, RECURSIVE= IMAGE_SEARCH) new_chunk.bytes_read += (len(texture_name)+1) #plus one for the null character that gets removed else: skip_to_end(file, temp_chunk) @@ -729,7 +733,8 @@ def process_next_chunk(file, previous_chunk, importedObjects): try: TEXTURE_DICT[contextMaterial.name] except: - img= TEXTURE_DICT[contextMaterial.name]= BPyImage.comprehensiveImageLoad(texture_name, FILENAME) + #img= TEXTURE_DICT[contextMaterial.name]= BPyImage.comprehensiveImageLoad(texture_name, FILENAME) + img= TEXTURE_DICT[contextMaterial.name]= BPyImage.comprehensiveImageLoad(texture_name, FILENAME, PLACE_HOLDER= False, RECURSIVE= IMAGE_SEARCH) new_chunk.bytes_read+= len(texture_name)+1 #plus one for the null character that gets removed @@ -776,12 +781,14 @@ def load_3ds(filename, PREF_UI= True): return - IMPORT_AS_INSTANCE= Blender.Draw.Create(0) + # IMPORT_AS_INSTANCE= Blender.Draw.Create(0) IMPORT_CONSTRAIN_BOUNDS= Blender.Draw.Create(10.0) + IMAGE_SEARCH= Blender.Draw.Create(1) # Get USER Options pup_block= [\ ('Size Constraint:', IMPORT_CONSTRAIN_BOUNDS, 0.0, 1000.0, 'Scale the model by 10 until it reacehs the size constraint. Zero Disables.'),\ + ('Image Search', IMAGE_SEARCH, 'Search subdirs for any assosiated images (Warning, may be slow)'),\ #('Group Instance', IMPORT_AS_INSTANCE, 'Import objects into a new scene and group, creating an instance in the current scene.'),\ ] @@ -792,19 +799,22 @@ def load_3ds(filename, PREF_UI= True): Blender.Window.WaitCursor(1) IMPORT_CONSTRAIN_BOUNDS= IMPORT_CONSTRAIN_BOUNDS.val - IMPORT_AS_INSTANCE= IMPORT_AS_INSTANCE.val + # IMPORT_AS_INSTANCE= IMPORT_AS_INSTANCE.val + IMAGE_SEARCH = IMAGE_SEARCH.val if IMPORT_CONSTRAIN_BOUNDS: BOUNDS_3DS[:]= [1<<30, 1<<30, 1<<30, -1<<30, -1<<30, -1<<30] else: BOUNDS_3DS[:]= [] + ##IMAGE_SEARCH + scn= Scene.GetCurrent() SCN_OBJECTS = scn.objects SCN_OBJECTS.selected = [] # de select all importedObjects= [] # Fill this list with objects - process_next_chunk(file, current_chunk, importedObjects) + process_next_chunk(file, current_chunk, importedObjects, IMAGE_SEARCH) # Link the objects into this scene. |