Welcome to mirror list, hosted at ThFree Co, Russian Federation.

slave.py « netrender « io « release - git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
blob: c12c846231d44bbe17b60e5696dc4ba09e7d4b21 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
import sys, os
import http, http.client, http.server, urllib
import subprocess, time

from netrender.utils import *
import netrender.model

CANCEL_POLL_SPEED = 2
MAX_TIMEOUT = 10
INCREMENT_TIMEOUT = 1

def slave_Info():
	sysname, nodename, release, version, machine = os.uname()
	slave = netrender.model.RenderSlave()
	slave.name = nodename
	slave.stats = sysname + " " + release + " " + machine
	return slave

def testCancel(conn, job_id):
		conn.request("HEAD", "status", headers={"job-id":job_id})
		response = conn.getresponse()
		
		# cancelled if job isn't found anymore
		if response.status == http.client.NO_CONTENT:
			return True
		else:
			return False

def testFile(conn, JOB_PREFIX, file_path, main_path = None):
	job_full_path = prefixPath(JOB_PREFIX, file_path, main_path)
	
	if not os.path.exists(job_full_path):
		temp_path = JOB_PREFIX + "slave.temp.blend"
		conn.request("GET", "file", headers={"job-id": job.id, "slave-id":slave_id, "job-file":file_path})
		response = conn.getresponse()
		
		if response.status != http.client.OK:
			return None # file for job not returned by server, need to return an error code to server
		
		f = open(temp_path, "wb")
		buf = response.read(1024)
		
		while buf:
			f.write(buf)
			buf = response.read(1024)
		
		f.close()
		
		os.renames(temp_path, job_full_path)
		
	return job_full_path


def render_slave(engine, scene):
	netsettings = scene.network_render
	timeout = 1
	
	engine.update_stats("", "Network render node initiation")
	
	conn = clientConnection(scene)
	
	if conn:
		conn.request("POST", "slave", repr(slave_Info().serialize()))
		response = conn.getresponse()
		
		slave_id = response.getheader("slave-id")
		
		NODE_PREFIX = netsettings.path + "slave_" + slave_id + os.sep
		if not os.path.exists(NODE_PREFIX):
			os.mkdir(NODE_PREFIX)
	
		while not engine.test_break():
			
			conn.request("GET", "job", headers={"slave-id":slave_id})
			response = conn.getresponse()
			
			if response.status == http.client.OK:
				timeout = 1 # reset timeout on new job
				
				job = netrender.model.RenderJob.materialize(eval(str(response.read(), encoding='utf8')))
				
				JOB_PREFIX = NODE_PREFIX + "job_" + job.id + os.sep
				if not os.path.exists(JOB_PREFIX):
					os.mkdir(JOB_PREFIX)
				
				job_path = job.files[0][0] # data in files have format (path, start, end)
				main_path, main_file = os.path.split(job_path)
				
				job_full_path = testFile(conn, JOB_PREFIX, job_path)
				print("Fullpath", job_full_path)
				print("File:", main_file, "and %i other files" % (len(job.files) - 1,))
				engine.update_stats("", "Render File", main_file, "for job", job.id)
				
				for file_path, start, end in job.files[1:]:
					print("\t", file_path)
					testFile(conn, JOB_PREFIX, file_path, main_path)
				
				frame_args = []
				
				for frame in job.frames:
					print("frame", frame.number)
					frame_args += ["-f", str(frame.number)]
					
				start_t = time.time()
				
				process = subprocess.Popen([sys.argv[0], "-b", job_full_path, "-o", JOB_PREFIX + "######", "-E", "BLENDER_RENDER", "-F", "MULTILAYER"] + frame_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)	
				
				cancelled = False
				stdout = bytes()
				run_t = time.time()
				while process.poll() == None and not cancelled:
					stdout += process.stdout.read(32)
					current_t = time.time()
					cancelled = engine.test_break()
					if current_t - run_t > CANCEL_POLL_SPEED:
						if testCancel(conn, job.id):
							cancelled = True
						else:
							run_t = current_t
				
				if cancelled:
					# kill process if needed
					if process.poll() == None:
						process.terminate()
					continue # to next frame
				
				total_t = time.time() - start_t
				
				avg_t = total_t / len(job.frames)
				
				status = process.returncode
				
				print("status", status)
				
				headers = {"job-id":job.id, "slave-id":slave_id, "job-time":str(avg_t)}
				
				if status == 0: # non zero status is error
					headers["job-result"] = str(DONE)
					for frame in job.frames:
						headers["job-frame"] = str(frame.number)
						# send result back to server
						f = open(JOB_PREFIX + "%06d" % frame.number + ".exr", 'rb')
						conn.request("PUT", "render", f, headers=headers)
						f.close()
						response = conn.getresponse()
				else:
					headers["job-result"] = str(ERROR)
					for frame in job.frames:
						headers["job-frame"] = str(frame.number)
						# send error result back to server
						conn.request("PUT", "render", headers=headers)
						response = conn.getresponse()
				
				for frame in job.frames:
					headers["job-frame"] = str(frame.number)
					# send log in any case
					conn.request("PUT", "log", stdout, headers=headers)
					response = conn.getresponse()
			else:
				if timeout < MAX_TIMEOUT:
					timeout += INCREMENT_TIMEOUT
				
				for i in range(timeout):
					time.sleep(1)
					if engine.test_break():
						conn.close()
						return
			
		conn.close()