Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender-addons.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMartin Poirier <theeth@yahoo.com>2011-12-31 22:25:00 +0400
committerMartin Poirier <theeth@yahoo.com>2011-12-31 22:25:00 +0400
commit7b00374ef198ea1801ac1e04ff98b949d80dd992 (patch)
tree4edfc1ac422afc669ea73255949cadf4678cf875 /netrender
parent14138fd7df450aaa7813cebc63ca891542d60c4e (diff)
netrender
wip feature: distributed point cache baking. Distributed baking works but results are sent back to master/client yet. Feature is disabled in the UI for this reason. new feature: job and slave tags, enables filtering slaves for specific jobs jobs are dispatched to a slave only it has no tags or all the job's tags Render jobs have the tag "render" by default while baking jobs the tag "baking" UI: Web interface additions to reflect tags and job subtypes (render/baking) bug fix: reseting a completed job correctly resets the status to queued
Diffstat (limited to 'netrender')
-rw-r--r--netrender/__init__.py6
-rw-r--r--netrender/baking.py87
-rw-r--r--netrender/client.py97
-rw-r--r--netrender/master.py31
-rw-r--r--netrender/master_html.py33
-rw-r--r--netrender/model.py90
-rw-r--r--netrender/operators.py64
-rw-r--r--netrender/repath.py2
-rw-r--r--netrender/slave.py57
-rw-r--r--netrender/ui.py15
-rw-r--r--netrender/utils.py44
11 files changed, 382 insertions, 144 deletions
diff --git a/netrender/__init__.py b/netrender/__init__.py
index 3e7c6e7a..1b4812d8 100644
--- a/netrender/__init__.py
+++ b/netrender/__init__.py
@@ -21,8 +21,8 @@
bl_info = {
"name": "Network Renderer",
"author": "Martin Poirier",
- "version": (1, 4),
- "blender": (2, 5, 6),
+ "version": (1, 7),
+ "blender": (2, 6, 0),
"api": 35011,
"location": "Render > Engine > Network Render",
"description": "Distributed rendering for Blender",
@@ -45,6 +45,7 @@ if "init_data" in locals():
imp.reload(ui)
imp.reload(repath)
imp.reload(versioning)
+ imp.reload(baking)
else:
from netrender import model
from netrender import operators
@@ -57,6 +58,7 @@ else:
from netrender import ui
from netrender import repath
from netrender import versioning
+ from netrender import baking
jobs = []
slaves = []
diff --git a/netrender/baking.py b/netrender/baking.py
new file mode 100644
index 00000000..b01d44d7
--- /dev/null
+++ b/netrender/baking.py
@@ -0,0 +1,87 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+import bpy
+import sys, subprocess
+
+BLENDER_PATH = sys.argv[0]
+
+def bake(job, tasks):
+ main_file = job.files[0]
+ job_full_path = main_file.filepath
+
+ task_commands = []
+ for task in tasks:
+ task_commands.extend(task)
+
+ process = subprocess.Popen([BLENDER_PATH, "-b", "-noaudio", job_full_path, "-P", __file__, "--"] + task_commands, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+
+ return process
+
+def process_cache(obj, point_cache):
+ if point_cache.is_baked:
+ bpy.ops.ptcache.free_bake({"point_cache": point_cache})
+
+ point_cache.use_disk_cache = True
+
+ bpy.ops.ptcache.bake({"point_cache": point_cache}, bake=True)
+
+def process_generic(obj, index):
+ modifier = obj.modifiers[index]
+ point_cache = modifier.point_cache
+ process_cache(obj, point_cache)
+
+def process_smoke(obj, index):
+ modifier = obj.modifiers[index]
+ point_cache = modifier.domain_settings.point_cache
+ process_cache(obj, point_cache)
+
+def process_particle(obj, index):
+ psys = obj.particle_systems[index]
+ point_cache = psys.point_cache
+ process_cache(obj, point_cache)
+
+def process_paint(obj, index):
+ modifier = obj.modifiers[index]
+ for surface in modifier.canvas_settings.canvas_surfaces:
+ process_cache(obj, surface.point_cache)
+
+def process_null(obj, index):
+ raise ValueException("No baking possible with arguments: " + " ".join(sys.argv))
+
+bake_funcs = {}
+bake_funcs["CLOTH"] = process_generic
+bake_funcs["SOFT_BODY"] = process_generic
+bake_funcs["PARTICLE_SYSTEM"] = process_particle
+bake_funcs["SMOKE"] = process_smoke
+bake_funcs["DYNAMIC_PAINT"] = process_paint
+
+if __name__ == "__main__":
+ try:
+ i = sys.argv.index("--")
+ except:
+ i = 0
+
+ if i:
+ task_args = sys.argv[i+1:]
+ for i in range(0, len(task_args), 3):
+ bake_type = task_args[i]
+ obj = bpy.data.objects[task_args[i+1]]
+ index = int(task_args[i+2])
+
+ bake_funcs.get(bake_type, process_null)(obj, index)
diff --git a/netrender/client.py b/netrender/client.py
index 057e801e..62eb4855 100644
--- a/netrender/client.py
+++ b/netrender/client.py
@@ -112,14 +112,14 @@ def fillCommonJobSettings(job, job_name, netsettings):
elif netsettings.job_type == "JOB_VCS":
job.type = netrender.model.JOB_VCS
-def clientSendJob(conn, scene, anim = False):
+def sendJob(conn, scene, anim = False):
netsettings = scene.network_render
if netsettings.job_type == "JOB_BLENDER":
- return clientSendJobBlender(conn, scene, anim)
+ return sendJobBlender(conn, scene, anim)
elif netsettings.job_type == "JOB_VCS":
- return clientSendJobVCS(conn, scene, anim)
+ return sendJobVCS(conn, scene, anim)
-def clientSendJobVCS(conn, scene, anim = False):
+def sendJobVCS(conn, scene, anim = False):
netsettings = scene.network_render
job = netrender.model.RenderJob()
@@ -140,8 +140,6 @@ def clientSendJobVCS(conn, scene, anim = False):
if filename[0] in (os.sep, os.altsep):
filename = filename[1:]
- print("CREATING VCS JOB", filename)
-
job.addFile(filename, signed=False)
job_name = netsettings.job_name
@@ -158,6 +156,8 @@ def clientSendJobVCS(conn, scene, anim = False):
job.version_info.wpath = netsettings.vcs_wpath
job.version_info.rpath = netsettings.vcs_rpath
job.version_info.revision = netsettings.vcs_revision
+
+ job.tags.add(netrender.model.TAG_RENDER)
# try to send path first
with ConnectionContext():
@@ -171,7 +171,84 @@ def clientSendJobVCS(conn, scene, anim = False):
return job_id
-def clientSendJobBlender(conn, scene, anim = False):
+def sendJobBaking(conn, scene):
+ netsettings = scene.network_render
+ job = netrender.model.RenderJob()
+
+ filename = bpy.data.filepath
+
+ if not os.path.exists(filename):
+ raise RuntimeError("Current file path not defined\nSave your file before sending a job")
+
+ job.addFile(filename)
+
+ job_name = netsettings.job_name
+ path, name = os.path.split(filename)
+ if job_name == "[default]":
+ job_name = name
+
+ ###############################
+ # LIBRARIES (needed for baking)
+ ###############################
+ for lib in bpy.data.libraries:
+ file_path = bpy.path.abspath(lib.filepath)
+ if os.path.exists(file_path):
+ job.addFile(file_path)
+
+ tasks = set()
+
+ ####################################
+ # FLUID + POINT CACHE (what we bake)
+ ####################################
+ def pointCacheFunc(object, owner, point_cache):
+ if type(owner) == bpy.types.ParticleSystem:
+ index = [index for index, data in enumerate(object.particle_systems) if data == owner][0]
+ tasks.add(("PARTICLE_SYSTEM", object.name, str(index)))
+ else: # owner is modifier
+ index = [index for index, data in enumerate(object.modifiers) if data == owner][0]
+ tasks.add((owner.type, object.name, str(index)))
+
+ def fluidFunc(object, modifier, cache_path):
+ pass
+
+ def multiresFunc(object, modifier, cache_path):
+ pass
+
+ processObjectDependencies(pointCacheFunc, fluidFunc, multiresFunc)
+
+ fillCommonJobSettings(job, job_name, netsettings)
+
+ job.tags.add(netrender.model.TAG_BAKING)
+ job.subtype = netrender.model.JOB_SUB_BAKING
+ job.chunks = 1 # No chunking for baking
+
+ for i, task in enumerate(tasks):
+ job.addFrame(i + 1)
+ job.frames[-1].command = "|".join(task)
+
+ # try to send path first
+ with ConnectionContext():
+ conn.request("POST", "/job", json.dumps(job.serialize()))
+ response = conn.getresponse()
+ response.read()
+
+ job_id = response.getheader("job-id")
+
+ # if not ACCEPTED (but not processed), send files
+ if response.status == http.client.ACCEPTED:
+ for rfile in job.files:
+ f = open(rfile.filepath, "rb")
+ with ConnectionContext():
+ conn.request("PUT", fileURL(job_id, rfile.index), f)
+ f.close()
+ response = conn.getresponse()
+ response.read()
+
+ # server will reply with ACCEPTED until all files are found
+
+ return job_id
+
+def sendJobBlender(conn, scene, anim = False):
netsettings = scene.network_render
job = netrender.model.RenderJob()
@@ -219,7 +296,7 @@ def clientSendJobBlender(conn, scene, anim = False):
###########################
default_path = cachePath(filename)
- def pointCacheFunc(object, point_cache):
+ def pointCacheFunc(object, owner, point_cache):
addPointCache(job, object, point_cache, default_path)
def fluidFunc(object, modifier, cache_path):
@@ -233,6 +310,8 @@ def clientSendJobBlender(conn, scene, anim = False):
#print(job.files)
fillCommonJobSettings(job, job_name, netsettings)
+
+ job.tags.add(netrender.model.TAG_RENDER)
# try to send path first
with ConnectionContext():
@@ -322,7 +401,7 @@ class NetworkRenderEngine(bpy.types.RenderEngine):
if response.status == http.client.NO_CONTENT:
new_job = True
- netsettings.job_id = clientSendJob(conn, scene)
+ netsettings.job_id = sendJob(conn, scene)
job_id = netsettings.job_id
requestResult(conn, job_id, scene.frame_current)
diff --git a/netrender/master.py b/netrender/master.py
index 92181ec1..599277dc 100644
--- a/netrender/master.py
+++ b/netrender/master.py
@@ -54,12 +54,9 @@ class MRenderFile(netrender.model.RenderFile):
class MRenderSlave(netrender.model.RenderSlave):
- def __init__(self, name, address, stats):
- super().__init__()
- self.id = hashlib.md5(bytes(repr(name) + repr(address), encoding='utf8')).hexdigest()
- self.name = name
- self.address = address
- self.stats = stats
+ def __init__(self, slave_info):
+ super().__init__(slave_info)
+ self.id = hashlib.md5(bytes(repr(slave_info.name) + repr(slave_info.address), encoding='utf8')).hexdigest()
self.last_seen = time.time()
self.job = None
@@ -171,6 +168,9 @@ class MRenderJob(netrender.model.RenderJob):
def reset(self, all):
for f in self.frames:
f.reset(all)
+
+ if all:
+ self.status = JOB_QUEUED
def getFrames(self):
frames = []
@@ -435,7 +435,7 @@ class RenderHandler(http.server.BaseHTTPRequestHandler):
slave = self.server.getSeenSlave(slave_id)
if slave: # only if slave id is valid
- job, frames = self.server.newDispatch(slave_id)
+ job, frames = self.server.newDispatch(slave)
if job and frames:
for f in frames:
@@ -680,8 +680,10 @@ class RenderHandler(http.server.BaseHTTPRequestHandler):
self.server.stats("", "New slave connected")
slave_info = netrender.model.RenderSlave.materialize(json.loads(str(self.rfile.read(length), encoding='utf8')), cache = False)
+
+ slave_info.address = self.client_address
- slave_id = self.server.addSlave(slave_info.name, self.client_address, slave_info.stats)
+ slave_id = self.server.addSlave(slave_info)
self.send_head(headers = {"slave-id": slave_id}, content = None)
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
@@ -919,8 +921,8 @@ class RenderMasterServer(socketserver.ThreadingMixIn, http.server.HTTPServer):
self.job_id += 1
return str(self.job_id)
- def addSlave(self, name, address, stats):
- slave = MRenderSlave(name, address, stats)
+ def addSlave(self, slave_info):
+ slave = MRenderSlave(slave_info)
self.slaves.append(slave)
self.slaves_map[slave.id] = slave
@@ -1022,10 +1024,15 @@ class RenderMasterServer(socketserver.ThreadingMixIn, http.server.HTTPServer):
for job in self.jobs:
yield job
- def newDispatch(self, slave_id):
+ def newDispatch(self, slave):
if self.jobs:
for job in self.jobs:
- if not self.balancer.applyExceptions(job) and slave_id not in job.blacklist:
+ if (
+ not self.balancer.applyExceptions(job) # No exceptions
+ and slave.id not in job.blacklist # slave is not blacklisted
+ and (not slave.tags or job.tags.issubset(slave.tags)) # slave doesn't use tags or slave has all job tags
+ ):
+
return job, job.getFrames()
return None, None
diff --git a/netrender/master_html.py b/netrender/master_html.py
index d737a51b..a8e6eaa6 100644
--- a/netrender/master_html.py
+++ b/netrender/master_html.py
@@ -115,6 +115,7 @@ def get(handler):
"id",
"name",
"category",
+ "tags",
"type",
"chunks",
"priority",
@@ -140,7 +141,8 @@ def get(handler):
job.id,
link(job.name, "/html/job" + job.id),
job.category if job.category else "<i>None</i>",
- netrender.model.JOB_TYPES[job.type],
+ ";".join(sorted(job.tags)) if job.tags else "<i>None</i>",
+ "%s [%s]" % (netrender.model.JOB_TYPES[job.type], netrender.model.JOB_SUBTYPES[job.subtype]),
str(job.chunks) +
"""<button title="increase chunks size" onclick="request('/edit_%s', &quot;{'chunks': %i}&quot;);">+</button>""" % (job.id, job.chunks + 1) +
"""<button title="decrease chunks size" onclick="request('/edit_%s', &quot;{'chunks': %i}&quot;);" %s>-</button>""" % (job.id, job.chunks - 1, "disabled=True" if job.chunks == 1 else ""),
@@ -164,11 +166,10 @@ def get(handler):
output("<h2>Slaves</h2>")
startTable()
- headerTable("name", "address", "last seen", "stats", "job")
+ headerTable("name", "address", "tags", "last seen", "stats", "job")
for slave in handler.server.slaves:
- rowTable(slave.name, slave.address[0], time.ctime(slave.last_seen), slave.stats, link(slave.job.name, "/html/job" + slave.job.id) if slave.job else "None")
-
+ rowTable(slave.name, slave.address[0], ";".join(sorted(slave.tags)) if slave.tags else "<i>All</i>", time.ctime(slave.last_seen), slave.stats, link(slave.job.name, "/html/job" + slave.job.id) if slave.job else "None")
endTable()
output("<h2>Configuration</h2>")
@@ -221,7 +222,7 @@ def get(handler):
job = handler.server.getJobID(job_id)
if job:
- output("<h2>Render Information</h2>")
+ output("<h2>Job Information</h2>")
job.initInfo()
@@ -229,6 +230,8 @@ def get(handler):
rowTable("resolution", "%ix%i at %i%%" % job.resolution)
+ rowTable("tags", ";".join(sorted(job.tags)) if job.tags else "<i>None</i>")
+
endTable()
@@ -240,18 +243,17 @@ def get(handler):
tot_cache = 0
tot_fluid = 0
+ tot_other = 0
- rowTable(job.files[0].filepath)
- rowTable("Other Files", class_style = "toggle", extra = "onclick='toggleDisplay(&quot;.other&quot;, &quot;none&quot;, &quot;table-row&quot;)'")
+ rowTable(job.files[0].original_path)
for file in job.files:
if file.filepath.endswith(".bphys"):
tot_cache += 1
elif file.filepath.endswith(".bobj.gz") or file.filepath.endswith(".bvel.gz"):
tot_fluid += 1
- else:
- if file != job.files[0]:
- rowTable(file.filepath, class_style = "other")
+ elif not file == job.files[0]:
+ tot_other += 1
if tot_cache > 0:
rowTable("%i physic cache files" % tot_cache, class_style = "toggle", extra = "onclick='toggleDisplay(&quot;.cache&quot;, &quot;none&quot;, &quot;table-row&quot;)'")
@@ -265,6 +267,17 @@ def get(handler):
if file.filepath.endswith(".bobj.gz") or file.filepath.endswith(".bvel.gz"):
rowTable(os.path.split(file.filepath)[1], class_style = "fluid")
+ if tot_other > 0:
+ rowTable("%i other files" % tot_other, class_style = "toggle", extra = "onclick='toggleDisplay(&quot;.other&quot;, &quot;none&quot;, &quot;table-row&quot;)'")
+ for file in job.files:
+ if (
+ not file.filepath.endswith(".bphys")
+ and not file.filepath.endswith(".bobj.gz") or file.filepath.endswith(".bvel.gz")
+ and not file == job.files[0]
+ ):
+
+ rowTable(file.filepath, class_style = "other")
+
endTable()
elif job.type == netrender.model.JOB_VCS:
output("<h2>Versioning</h2>")
diff --git a/netrender/model.py b/netrender/model.py
index c2331315..c5ee54f0 100644
--- a/netrender/model.py
+++ b/netrender/model.py
@@ -19,6 +19,11 @@
import netrender.versioning as versioning
from netrender.utils import *
+TAG_BAKING = "baking"
+TAG_RENDER = "render"
+
+TAG_ALL = set((TAG_BAKING, TAG_RENDER))
+
class LogFile:
def __init__(self, job_id = 0, slave_id = 0, frames = []):
self.job_id = job_id
@@ -47,14 +52,22 @@ class LogFile:
class RenderSlave:
_slave_map = {}
- def __init__(self):
+ def __init__(self, info = None):
self.id = ""
- self.name = ""
- self.address = ("",0)
- self.stats = ""
self.total_done = 0
self.total_error = 0
self.last_seen = 0.0
+
+ if info:
+ self.name = info.name
+ self.address = info.address
+ self.stats = info.stats
+ self.tags = info.tags
+ else:
+ self.name = ""
+ self.address = ("",0)
+ self.stats = ""
+ self.tags = set()
def serialize(self):
return {
@@ -64,7 +77,8 @@ class RenderSlave:
"stats": self.stats,
"total_done": self.total_done,
"total_error": self.total_error,
- "last_seen": self.last_seen
+ "last_seen": self.last_seen,
+ "tags": tuple(self.tags)
}
@staticmethod
@@ -85,6 +99,7 @@ class RenderSlave:
slave.total_done = data["total_done"]
slave.total_error = data["total_error"]
slave.last_seen = data["last_seen"]
+ slave.tags = set(data["tags"])
if cache:
RenderSlave._slave_map[slave_id] = slave
@@ -101,6 +116,14 @@ JOB_TYPES = {
JOB_VCS: "Versioned",
}
+JOB_SUB_RENDER = 1
+JOB_SUB_BAKING = 2
+
+JOB_SUBTYPES = {
+ JOB_SUB_RENDER: "Render",
+ JOB_SUB_BAKING: "Baking",
+ }
+
class VersioningInfo:
def __init__(self, info = None):
self._system = None
@@ -186,18 +209,8 @@ class RenderFile:
return rfile
class RenderJob:
- def __init__(self, job_info = None):
+ def __init__(self, info = None):
self.id = ""
- self.type = JOB_BLENDER
- self.name = ""
- self.category = "None"
- self.status = JOB_WAITING
- self.files = []
- self.chunks = 0
- self.priority = 0
- self.blacklist = []
- self.render = "BLENDER_RENDER"
- self.version_info = None
self.resolution = None
@@ -205,23 +218,38 @@ class RenderJob:
self.last_dispatched = 0.0
self.frames = []
- if job_info:
- self.type = job_info.type
- self.name = job_info.name
- self.category = job_info.category
- self.status = job_info.status
- self.files = job_info.files
- self.chunks = job_info.chunks
- self.priority = job_info.priority
- self.blacklist = job_info.blacklist
- self.version_info = job_info.version_info
- self.render = job_info.render
+ if info:
+ self.type = info.type
+ self.subtype = info.subtype
+ self.name = info.name
+ self.category = info.category
+ self.tags = info.tags
+ self.status = info.status
+ self.files = info.files
+ self.chunks = info.chunks
+ self.priority = info.priority
+ self.blacklist = info.blacklist
+ self.version_info = info.version_info
+ self.render = info.render
+ else:
+ self.type = JOB_BLENDER
+ self.subtype = JOB_SUB_RENDER
+ self.name = ""
+ self.category = "None"
+ self.tags = set()
+ self.status = JOB_WAITING
+ self.files = []
+ self.chunks = 0
+ self.priority = 0
+ self.blacklist = []
+ self.version_info = None
+ self.render = "BLENDER_RENDER"
def hasRenderResult(self):
- return self.type in (JOB_BLENDER, JOB_VCS)
+ return self.subtype == JOB_SUB_RENDER
def rendersWithBlender(self):
- return self.type in (JOB_BLENDER, JOB_VCS)
+ return self.subtype == JOB_SUB_RENDER
def addFile(self, file_path, start=-1, end=-1, signed=True):
def isFileInFrames():
@@ -297,8 +325,10 @@ class RenderJob:
return {
"id": self.id,
"type": self.type,
+ "subtype": self.subtype,
"name": self.name,
"category": self.category,
+ "tags": tuple(self.tags),
"status": self.status,
"files": [f.serialize() for f in self.files if f.start == -1 or not frames or (f.start <= max_frame and f.end >= min_frame)],
"frames": [f.serialize() for f in self.frames if not frames or f in frames],
@@ -320,8 +350,10 @@ class RenderJob:
job = RenderJob()
job.id = data["id"]
job.type = data["type"]
+ job.subtype = data["subtype"]
job.name = data["name"]
job.category = data["category"]
+ job.tags = set(data["tags"])
job.status = data["status"]
job.files = [RenderFile.materialize(f) for f in data["files"]]
job.frames = [RenderFrame.materialize(f) for f in data["frames"]]
diff --git a/netrender/operators.py b/netrender/operators.py
index 1115e2b4..d6d441ba 100644
--- a/netrender/operators.py
+++ b/netrender/operators.py
@@ -28,10 +28,10 @@ import netrender.client as client
import netrender.model
import netrender.versioning as versioning
-class RENDER_OT_netslave_bake(bpy.types.Operator):
- '''NEED DESCRIPTION'''
- bl_idname = "render.netslavebake"
- bl_label = "Bake all in file"
+class RENDER_OT_netclientsendbake(bpy.types.Operator):
+ '''Send a baking job to the Network'''
+ bl_idname = "render.netclientsendbake"
+ bl_label = "Bake on network"
@classmethod
def poll(cls, context):
@@ -39,45 +39,19 @@ class RENDER_OT_netslave_bake(bpy.types.Operator):
def execute(self, context):
scene = context.scene
- # netsettings = scene.network_render # UNUSED
-
- filename = bpy.data.filepath
- path, name = os.path.split(filename)
- root, ext = os.path.splitext(name)
- # default_path = path + os.sep + "blendcache_" + root + os.sep # need an API call for that, UNUSED
- relative_path = "//blendcache_" + root + os.sep
-
- # Force all point cache next to the blend file
- for object in bpy.data.objects:
- for modifier in object.modifiers:
- if modifier.type == 'FLUID_SIMULATION' and modifier.settings.type == "DOMAIN":
- modifier.settings.path = relative_path
- bpy.ops.fluid.bake({"active_object": object, "scene": scene})
- elif modifier.type == "CLOTH":
- modifier.point_cache.frame_step = 1
- modifier.point_cache.use_disk_cache = True
- modifier.point_cache.use_external = False
- elif modifier.type == "SOFT_BODY":
- modifier.point_cache.frame_step = 1
- modifier.point_cache.use_disk_cache = True
- modifier.point_cache.use_external = False
- elif modifier.type == "SMOKE" and modifier.smoke_type == "DOMAIN":
- modifier.domain_settings.point_cache.use_step = 1
- modifier.domain_settings.point_cache.use_disk_cache = True
- modifier.domain_settings.point_cache.use_external = False
-
- # particles modifier are stupid and don't contain data
- # we have to go through the object property
- for psys in object.particle_systems:
- psys.point_cache.use_step = 1
- psys.point_cache.use_disk_cache = True
- psys.point_cache.use_external = False
- psys.point_cache.filepath = relative_path
-
- bpy.ops.ptcache.bake_all()
-
- #bpy.ops.wm.save_mainfile(filepath = path + os.sep + root + "_baked.blend")
+ netsettings = scene.network_render
+
+ try:
+ conn = clientConnection(netsettings.server_address, netsettings.server_port, self.report)
+ if conn:
+ # Sending file
+ client.sendJobBaking(conn, scene)
+ conn.close()
+ self.report({'INFO'}, "Job sent to master")
+ except Exception as err:
+ self.report({'ERROR'}, str(err))
+
return {'FINISHED'}
def invoke(self, context, event):
@@ -100,7 +74,7 @@ class RENDER_OT_netclientanim(bpy.types.Operator):
if conn:
# Sending file
- scene.network_render.job_id = client.clientSendJob(conn, scene, True)
+ scene.network_render.job_id = client.sendJob(conn, scene, True)
conn.close()
bpy.ops.render.render('INVOKE_AREA', animation=True)
@@ -145,7 +119,7 @@ class RENDER_OT_netclientsend(bpy.types.Operator):
if conn:
# Sending file
- scene.network_render.job_id = client.clientSendJob(conn, scene, True)
+ scene.network_render.job_id = client.sendJob(conn, scene, True)
conn.close()
self.report({'INFO'}, "Job sent to master")
except Exception as err:
@@ -175,7 +149,7 @@ class RENDER_OT_netclientsendframe(bpy.types.Operator):
if conn:
# Sending file
- scene.network_render.job_id = client.clientSendJob(conn, scene, False)
+ scene.network_render.job_id = client.sendJob(conn, scene, False)
conn.close()
self.report({'INFO'}, "Job sent to master")
except Exception as err:
diff --git a/netrender/repath.py b/netrender/repath.py
index 6b2a46ed..af8f79d8 100644
--- a/netrender/repath.py
+++ b/netrender/repath.py
@@ -103,7 +103,7 @@ def process(paths):
###########################
# FLUID + POINT CACHE
###########################
- def pointCacheFunc(object, point_cache):
+ def pointCacheFunc(object, owner, point_cache):
if not point_cache.use_disk_cache:
return
diff --git a/netrender/slave.py b/netrender/slave.py
index 7e681450..36768837 100644
--- a/netrender/slave.py
+++ b/netrender/slave.py
@@ -26,6 +26,7 @@ import bpy
from netrender.utils import *
import netrender.model
import netrender.repath
+import netrender.baking
import netrender.thumbnail as thumbnail
BLENDER_PATH = sys.argv[0]
@@ -34,36 +35,17 @@ CANCEL_POLL_SPEED = 2
MAX_TIMEOUT = 10
INCREMENT_TIMEOUT = 1
MAX_CONNECT_TRY = 10
-try:
- system = platform.system()
-except UnicodeDecodeError:
- import sys
- system = sys.platform
-
-if system in ('Windows', 'win32') and platform.version() >= '5': # Error mode is only available on Win2k or higher, that's version 5
- import ctypes
- def SetErrorMode():
- val = ctypes.windll.kernel32.SetErrorMode(0x0002)
- ctypes.windll.kernel32.SetErrorMode(val | 0x0002)
- return val
-
- def RestoreErrorMode(val):
- ctypes.windll.kernel32.SetErrorMode(val)
-else:
- def SetErrorMode():
- return 0
-
- def RestoreErrorMode(val):
- pass
def clearSlave(path):
shutil.rmtree(path)
-def slave_Info():
+def slave_Info(netsettings):
sysname, nodename, release, version, machine, processor = platform.uname()
slave = netrender.model.RenderSlave()
slave.name = nodename
slave.stats = sysname + " " + release + " " + machine + " " + processor
+ if netsettings.slave_tags:
+ slave.tags = set(netsettings.slave_tags.split(";"))
return slave
def testCancel(conn, job_id, frame_number):
@@ -155,7 +137,7 @@ def render_slave(engine, netsettings, threads):
if conn:
with ConnectionContext():
- conn.request("POST", "/slave", json.dumps(slave_Info().serialize()))
+ conn.request("POST", "/slave", json.dumps(slave_Info(netsettings).serialize()))
response = conn.getresponse()
response.read()
@@ -237,14 +219,23 @@ def render_slave(engine, netsettings, threads):
print("frame", frame.number)
frame_args += ["-f", str(frame.number)]
- val = SetErrorMode()
- process = subprocess.Popen([BLENDER_PATH, "-b", "-noaudio", job_full_path, "-t", str(threads), "-o", os.path.join(JOB_PREFIX, "######"), "-E", job.render, "-F", "MULTILAYER"] + frame_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
- RestoreErrorMode(val)
+ with NoErrorDialogContext():
+ process = subprocess.Popen([BLENDER_PATH, "-b", "-noaudio", job_full_path, "-t", str(threads), "-o", os.path.join(JOB_PREFIX, "######"), "-E", job.render, "-F", "MULTILAYER"] + frame_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+
+ elif job.subtype == netrender.model.JOB_SUB_BAKING:
+ tasks = []
+ for frame in job.frames:
+ i = frame.command.index("|")
+ ri = frame.command.rindex("|")
+ tasks.append((frame.command[:i], frame.command[i+1:ri], frame.command[ri+1:]))
+
+ with NoErrorDialogContext():
+ process = netrender.baking.bake(job, tasks)
+
elif job.type == netrender.model.JOB_PROCESS:
command = job.frames[0].command
- val = SetErrorMode()
- process = subprocess.Popen(command.split(" "), stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
- RestoreErrorMode(val)
+ with NoErrorDialogContext():
+ process = subprocess.Popen(command.split(" "), stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
headers = {"slave-id":slave_id}
@@ -341,6 +332,14 @@ def render_slave(engine, netsettings, threads):
if responseStatus(conn) == http.client.NO_CONTENT:
continue
+ elif job.subtype == netrender.model.JOB_SUB_BAKING:
+ # For now just announce results
+ # TODO SEND ALL BAKING RESULTS
+ with ConnectionContext():
+ conn.request("PUT", "/render", headers=headers)
+ if responseStatus(conn) == http.client.NO_CONTENT:
+ continue
+
elif job.type == netrender.model.JOB_PROCESS:
with ConnectionContext():
conn.request("PUT", "/render", headers=headers)
diff --git a/netrender/ui.py b/netrender/ui.py
index 66939435..6193b7a6 100644
--- a/netrender/ui.py
+++ b/netrender/ui.py
@@ -159,6 +159,7 @@ class RENDER_PT_network_slave_settings(NetRenderButtonsPanel, bpy.types.Panel):
rd = context.scene.render
netsettings = context.scene.network_render
+ layout.prop(netsettings, "slave_tags", text="Tags")
layout.prop(netsettings, "use_slave_clear")
layout.prop(netsettings, "use_slave_thumb")
layout.prop(netsettings, "use_slave_output_log")
@@ -206,6 +207,7 @@ class RENDER_PT_network_job(NetRenderButtonsPanel, bpy.types.Panel):
if netsettings.server_address != "[default]":
layout.operator("render.netclientanim", icon='RENDER_ANIMATION')
layout.operator("render.netclientsend", icon='FILE_BLEND')
+ #layout.operator("render.netclientsendbake", icon='PHYSICS')
layout.operator("render.netclientsendframe", icon='RENDER_STILL')
if netsettings.job_id:
row = layout.row()
@@ -215,6 +217,7 @@ class RENDER_PT_network_job(NetRenderButtonsPanel, bpy.types.Panel):
layout.prop(netsettings, "job_type", text="Type")
layout.prop(netsettings, "job_name", text="Name")
layout.prop(netsettings, "job_category", text="Category")
+ layout.prop(netsettings, "job_tags", text="Tags")
layout.prop(netsettings, "job_render_engine", text="Engine")
if netsettings.job_render_engine == "OTHER":
@@ -411,6 +414,12 @@ class NetRenderSettings(bpy.types.PropertyGroup):
description="Generate thumbnails on slaves instead of master",
default = False)
+ NetRenderSettings.slave_tags = StringProperty(
+ name="Tags",
+ description="Tags to associate with the slave (semi-colon separated)",
+ maxlen = 256,
+ default = "")
+
NetRenderSettings.use_slave_output_log = BoolProperty(
name="Output render log on console",
description="Output render text log to console as well as sending it to the master",
@@ -465,6 +474,12 @@ class NetRenderSettings(bpy.types.PropertyGroup):
maxlen = 128,
default = "")
+ NetRenderSettings.job_tags = StringProperty(
+ name="Tags",
+ description="Tags to associate with the job (semi-colon separated)",
+ maxlen = 256,
+ default = "")
+
NetRenderSettings.job_render_engine = EnumProperty(
items = (
("BLENDER_RENDER", "BLENDER", "Standard Blender Render"),
diff --git a/netrender/utils.py b/netrender/utils.py
index 108d9c52..fa10386e 100644
--- a/netrender/utils.py
+++ b/netrender/utils.py
@@ -57,7 +57,14 @@ FRAME_STATUS_TEXT = {
ERROR: "Error"
}
-if platform.system() == "Darwin":
+try:
+ system = platform.system()
+except UnicodeDecodeError:
+ import sys
+ system = sys.platform
+
+
+if system == "Darwin":
class ConnectionContext:
def __init__(self, timeout = None):
self.old_timeout = socket.getdefaulttimeout()
@@ -81,6 +88,29 @@ else:
def __exit__(self, exc_type, exc_value, traceback):
pass
+if system in ('Windows', 'win32') and platform.version() >= '5': # Error mode is only available on Win2k or higher, that's version 5
+ import ctypes
+ class NoErrorDialogContext:
+ def __init__(self):
+ self.val = 0
+
+ def __enter__(self):
+ self.val = ctypes.windll.kernel32.SetErrorMode(0x0002)
+ ctypes.windll.kernel32.SetErrorMode(self.val | 0x0002)
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ ctypes.windll.kernel32.SetErrorMode(self.val)
+else:
+ class NoErrorDialogContext:
+ def __init__(self):
+ pass
+
+ def __enter__(self):
+ pass
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ pass
+
class DirectoryContext:
def __init__(self, path):
self.path = path
@@ -244,7 +274,7 @@ def cachePath(file_path):
return path + os.sep + "blendcache_" + root # need an API call for that
# Process dependencies of all objects with user defined functions
-# pointCacheFunction(object, point_cache)
+# pointCacheFunction(object, owner, point_cache) (owner is modifier or psys)
# fluidFunction(object, modifier, cache_path)
# multiresFunction(object, modifier, cache_path)
def processObjectDependencies(pointCacheFunction, fluidFunction, multiresFunction):
@@ -253,21 +283,21 @@ def processObjectDependencies(pointCacheFunction, fluidFunction, multiresFunctio
if modifier.type == 'FLUID_SIMULATION' and modifier.settings.type == "DOMAIN":
fluidFunction(object, modifier, bpy.path.abspath(modifier.settings.filepath))
elif modifier.type == "CLOTH":
- pointCacheFunction(object, modifier.point_cache)
+ pointCacheFunction(object, modifier, modifier.point_cache)
elif modifier.type == "SOFT_BODY":
- pointCacheFunction(object, modifier.point_cache)
+ pointCacheFunction(object, modifier, modifier.point_cache)
elif modifier.type == "SMOKE" and modifier.smoke_type == "DOMAIN":
- pointCacheFunction(object, modifier.domain_settings.point_cache)
+ pointCacheFunction(object, modifier, modifier.domain_settings.point_cache)
elif modifier.type == "MULTIRES" and modifier.is_external:
multiresFunction(object, modifier, bpy.path.abspath(modifier.filepath))
elif modifier.type == "DYNAMIC_PAINT" and modifier.canvas_settings:
for surface in modifier.canvas_settings.canvas_surfaces:
- pointCacheFunction(object, surface.point_cache)
+ pointCacheFunction(object, modifier, surface.point_cache)
# particles modifier are stupid and don't contain data
# we have to go through the object property
for psys in object.particle_systems:
- pointCacheFunction(object, psys.point_cache)
+ pointCacheFunction(object, psys, psys.point_cache)
def prefixPath(prefix_directory, file_path, prefix_path, force = False):