Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLukas Stockner <lukas.stockner@freenet.de>2021-08-21 04:15:31 +0300
committerLukas Stockner <lukas.stockner@freenet.de>2021-08-21 22:39:06 +0300
commit2ea66af742bca4b427f88de13254414730a33776 (patch)
treec540f16b81762e69c92834ee9da6e3a376a4e668 /source/blender/blenloader/intern/writefile.c
parent2b170f16d6ded9b3bcb428121b27274ae8637555 (diff)
Add support for Zstandard compression for .blend files
Compressing blendfiles can help save a lot of disk space, but the slowdown while loading and saving is a major annoyance. Currently Blender uses Zlib (aka gzip aka Deflate) for compression, but there are now several more modern algorithms that outperform it in every way. In this patch, I decided for Zstandard aka Zstd for several reasons: - It is widely supported, both in other programs and libraries as well as in general-purpose compression utilities on Unix - It is extremely flexible - spanning several orders of magnitude of compression speeds depending on the level setting. - It is pretty much on the Pareto frontier for all of its configurations (meaning that no other algorithm is both faster and more efficient). One downside of course is that older versions of Blender will not be able to read these files, but one can always just re-save them without compression or decompress the file manually with an external tool. The implementation here saves additional metadata into the compressed file in order to allow for efficient seeking when loading. This is standard-compliant and will be ignored by other tools that support Zstd. If the metadata is not present (e.g. because you manually compressed a .blend file with another tool), Blender will fall back to sequential reading. Saving is multithreaded to improve performance. Loading is currently not multithreaded since it's not easy to predict the access patterns of the loading code when seeking is supported. In the future, we might want to look into making this more predictable or disabling seeking for the main .blend file, which would then allow for multiple background threads that decompress data ahead of time. The compression level was chosen to get sizes comparable to previous versions at much higher speeds. In the future, this could be exposed as an option. Reviewed By: campbellbarton, brecht, mont29 Differential Revision: https://developer.blender.org/D5799
Diffstat (limited to 'source/blender/blenloader/intern/writefile.c')
-rw-r--r--source/blender/blenloader/intern/writefile.c297
1 files changed, 231 insertions, 66 deletions
diff --git a/source/blender/blenloader/intern/writefile.c b/source/blender/blenloader/intern/writefile.c
index 4de8e35f402..6f43fbf1fa0 100644
--- a/source/blender/blenloader/intern/writefile.c
+++ b/source/blender/blenloader/intern/writefile.c
@@ -101,7 +101,12 @@
#include "BLI_bitmap.h"
#include "BLI_blenlib.h"
#include "BLI_endian_defines.h"
+#include "BLI_endian_switch.h"
+#include "BLI_link_utils.h"
+#include "BLI_linklist.h"
+#include "BLI_math_base.h"
#include "BLI_mempool.h"
+#include "BLI_threads.h"
#include "MEM_guardedalloc.h" /* MEM_freeN */
#include "BKE_blender_version.h"
@@ -129,14 +134,21 @@
#include <errno.h>
+#include <zstd.h>
+
/* Make preferences read-only. */
#define U (*((const UserDef *)&U))
/* ********* my write, buffered writing with minimum size chunks ************ */
/* Use optimal allocation since blocks of this size are kept in memory for undo. */
-#define MYWRITE_BUFFER_SIZE (MEM_SIZE_OPTIMAL(1 << 17)) /* 128kb */
-#define MYWRITE_MAX_CHUNK (MEM_SIZE_OPTIMAL(1 << 15)) /* ~32kb */
+#define MEM_BUFFER_SIZE (MEM_SIZE_OPTIMAL(1 << 17)) /* 128kb */
+#define MEM_CHUNK_SIZE (MEM_SIZE_OPTIMAL(1 << 15)) /* ~32kb */
+
+#define ZSTD_BUFFER_SIZE (1 << 21) /* 2mb */
+#define ZSTD_CHUNK_SIZE (1 << 20) /* 1mb */
+
+#define ZSTD_COMPRESSION_LEVEL 3
/** Use if we want to store how many bytes have been written to the file. */
// #define USE_WRITE_DATA_LEN
@@ -147,9 +159,16 @@
typedef enum {
WW_WRAP_NONE = 1,
- WW_WRAP_ZLIB,
+ WW_WRAP_ZSTD,
} eWriteWrapType;
+typedef struct ZstdFrame {
+ struct ZstdFrame *next, *prev;
+
+ uint32_t compressed_size;
+ uint32_t uncompressed_size;
+} ZstdFrame;
+
typedef struct WriteWrap WriteWrap;
struct WriteWrap {
/* callbacks */
@@ -161,15 +180,23 @@ struct WriteWrap {
bool use_buf;
/* internal */
- union {
- int file_handle;
- gzFile gz_handle;
- } _user_data;
+ int file_handle;
+ struct {
+ ListBase threadpool;
+ ListBase tasks;
+ ThreadMutex mutex;
+ ThreadCondition condition;
+ int next_frame;
+ int num_frames;
+
+ int level;
+ ListBase frames;
+
+ bool write_error;
+ } zstd;
};
/* none */
-#define FILE_HANDLE(ww) (ww)->_user_data.file_handle
-
static bool ww_open_none(WriteWrap *ww, const char *filepath)
{
int file;
@@ -177,7 +204,7 @@ static bool ww_open_none(WriteWrap *ww, const char *filepath)
file = BLI_open(filepath, O_BINARY + O_WRONLY + O_CREAT + O_TRUNC, 0666);
if (file != -1) {
- FILE_HANDLE(ww) = file;
+ ww->file_handle = file;
return true;
}
@@ -185,39 +212,170 @@ static bool ww_open_none(WriteWrap *ww, const char *filepath)
}
static bool ww_close_none(WriteWrap *ww)
{
- return (close(FILE_HANDLE(ww)) != -1);
+ return (close(ww->file_handle) != -1);
}
static size_t ww_write_none(WriteWrap *ww, const char *buf, size_t buf_len)
{
- return write(FILE_HANDLE(ww), buf, buf_len);
+ return write(ww->file_handle, buf, buf_len);
}
-#undef FILE_HANDLE
-/* zlib */
-#define FILE_HANDLE(ww) (ww)->_user_data.gz_handle
+/* zstd */
+
+typedef struct {
+ struct ZstdWriteBlockTask *next, *prev;
+ void *data;
+ size_t size;
+ int frame_number;
+ WriteWrap *ww;
+} ZstdWriteBlockTask;
-static bool ww_open_zlib(WriteWrap *ww, const char *filepath)
+static void *zstd_write_task(void *userdata)
{
- gzFile file;
+ ZstdWriteBlockTask *task = userdata;
+ WriteWrap *ww = task->ww;
- file = BLI_gzopen(filepath, "wb1");
+ size_t out_buf_len = ZSTD_compressBound(task->size);
+ void *out_buf = MEM_mallocN(out_buf_len, "Zstd out buffer");
+ size_t out_size = ZSTD_compress(
+ out_buf, out_buf_len, task->data, task->size, ZSTD_COMPRESSION_LEVEL);
- if (file != Z_NULL) {
- FILE_HANDLE(ww) = file;
- return true;
+ MEM_freeN(task->data);
+
+ BLI_mutex_lock(&ww->zstd.mutex);
+
+ while (ww->zstd.next_frame != task->frame_number) {
+ BLI_condition_wait(&ww->zstd.condition, &ww->zstd.mutex);
}
- return false;
+ if (ZSTD_isError(out_size)) {
+ ww->zstd.write_error = true;
+ }
+ else {
+ if (ww_write_none(ww, out_buf, out_size) == out_size) {
+ ZstdFrame *frameinfo = MEM_mallocN(sizeof(ZstdFrame), "zstd frameinfo");
+ frameinfo->uncompressed_size = task->size;
+ frameinfo->compressed_size = out_size;
+ BLI_addtail(&ww->zstd.frames, frameinfo);
+ }
+ else {
+ ww->zstd.write_error = true;
+ }
+ }
+
+ ww->zstd.next_frame++;
+
+ BLI_mutex_unlock(&ww->zstd.mutex);
+ BLI_condition_notify_all(&ww->zstd.condition);
+
+ MEM_freeN(out_buf);
+ return NULL;
+}
+
+static bool ww_open_zstd(WriteWrap *ww, const char *filepath)
+{
+ if (!ww_open_none(ww, filepath)) {
+ return false;
+ }
+
+ /* Leave one thread open for the main writing logic, unless we only have one HW thread. */
+ int num_threads = max_ii(1, BLI_system_thread_count() - 1);
+ BLI_threadpool_init(&ww->zstd.threadpool, zstd_write_task, num_threads);
+ BLI_mutex_init(&ww->zstd.mutex);
+ BLI_condition_init(&ww->zstd.condition);
+
+ return true;
}
-static bool ww_close_zlib(WriteWrap *ww)
+
+static void zstd_write_u32_le(WriteWrap *ww, uint32_t val)
+{
+#ifdef __BIG_ENDIAN__
+ BLI_endian_switch_uint32(&val);
+#endif
+ ww_write_none(ww, (char *)&val, sizeof(uint32_t));
+}
+
+/* In order to implement efficient seeking when reading the .blend, we append
+ * a skippable frame that encodes information about the other frames present
+ * in the file.
+ * The format here follows the upstream spec for seekable files:
+ * https://github.com/facebook/zstd/blob/master/contrib/seekable_format/zstd_seekable_compression_format.md
+ * If this information is not present in a file (e.g. if it was compressed
+ * with external tools), it can still be opened in Blender, but seeking will
+ * not be supported, so more memory might be needed. */
+static void zstd_write_seekable_frames(WriteWrap *ww)
+{
+ /* Write seek table header (magic number and frame size). */
+ zstd_write_u32_le(ww, 0x184D2A5E);
+
+ /* The actual frame number might not match ww->zstd.num_frames if there was a write error. */
+ const uint32_t num_frames = BLI_listbase_count(&ww->zstd.frames);
+ /* Each frame consists of two u32, so 8 bytes each.
+ * After the frames, a footer containing two u32 and one byte (9 bytes total) is written. */
+ const uint32_t frame_size = num_frames * 8 + 9;
+ zstd_write_u32_le(ww, frame_size);
+
+ /* Write seek table entries. */
+ LISTBASE_FOREACH (ZstdFrame *, frame, &ww->zstd.frames) {
+ zstd_write_u32_le(ww, frame->compressed_size);
+ zstd_write_u32_le(ww, frame->uncompressed_size);
+ }
+
+ /* Write seek table footer (number of frames, option flags and second magic number). */
+ zstd_write_u32_le(ww, num_frames);
+ const char flags = 0; /* We don't store checksums for each frame. */
+ ww_write_none(ww, &flags, 1);
+ zstd_write_u32_le(ww, 0x8F92EAB1);
+}
+
+static bool ww_close_zstd(WriteWrap *ww)
{
- return (gzclose(FILE_HANDLE(ww)) == Z_OK);
+ BLI_threadpool_end(&ww->zstd.threadpool);
+ BLI_freelistN(&ww->zstd.tasks);
+
+ BLI_mutex_end(&ww->zstd.mutex);
+ BLI_condition_end(&ww->zstd.condition);
+
+ zstd_write_seekable_frames(ww);
+ BLI_freelistN(&ww->zstd.frames);
+
+ return ww_close_none(ww) && !ww->zstd.write_error;
}
-static size_t ww_write_zlib(WriteWrap *ww, const char *buf, size_t buf_len)
+
+static size_t ww_write_zstd(WriteWrap *ww, const char *buf, size_t buf_len)
{
- return gzwrite(FILE_HANDLE(ww), buf, buf_len);
+ if (ww->zstd.write_error) {
+ return 0;
+ }
+
+ ZstdWriteBlockTask *task = MEM_mallocN(sizeof(ZstdWriteBlockTask), __func__);
+ task->data = MEM_mallocN(buf_len, __func__);
+ memcpy(task->data, buf, buf_len);
+ task->size = buf_len;
+ task->frame_number = ww->zstd.num_frames++;
+ task->ww = ww;
+
+ BLI_mutex_lock(&ww->zstd.mutex);
+ BLI_addtail(&ww->zstd.tasks, task);
+
+ /* If there's a free worker thread, just push the block into that thread.
+ * Otherwise, we wait for the earliest thread to finish.
+ * We look up the earliest thread while holding the mutex, but release it
+ * before joining the thread to prevent a deadlock. */
+ ZstdWriteBlockTask *first_task = ww->zstd.tasks.first;
+ BLI_mutex_unlock(&ww->zstd.mutex);
+ if (!BLI_available_threads(&ww->zstd.threadpool)) {
+ BLI_threadpool_remove(&ww->zstd.threadpool, first_task);
+
+ /* If the task list was empty before we pushed our task, there should
+ * always be a free thread. */
+ BLI_assert(first_task != task);
+ BLI_remlink(&ww->zstd.tasks, first_task);
+ MEM_freeN(first_task);
+ }
+ BLI_threadpool_insert(&ww->zstd.threadpool, task);
+
+ return buf_len;
}
-#undef FILE_HANDLE
/* --- end compression types --- */
@@ -226,11 +384,11 @@ static void ww_handle_init(eWriteWrapType ww_type, WriteWrap *r_ww)
memset(r_ww, 0, sizeof(*r_ww));
switch (ww_type) {
- case WW_WRAP_ZLIB: {
- r_ww->open = ww_open_zlib;
- r_ww->close = ww_close_zlib;
- r_ww->write = ww_write_zlib;
- r_ww->use_buf = false;
+ case WW_WRAP_ZSTD: {
+ r_ww->open = ww_open_zstd;
+ r_ww->close = ww_close_zstd;
+ r_ww->write = ww_write_zstd;
+ r_ww->use_buf = true;
break;
}
default: {
@@ -252,10 +410,17 @@ static void ww_handle_init(eWriteWrapType ww_type, WriteWrap *r_ww)
typedef struct {
const struct SDNA *sdna;
- /** Use for file and memory writing (fixed size of #MYWRITE_BUFFER_SIZE). */
- uchar *buf;
- /** Number of bytes used in #WriteData.buf (flushed when exceeded). */
- size_t buf_used_len;
+ struct {
+ /** Use for file and memory writing (size stored in max_size). */
+ uchar *buf;
+ /** Number of bytes used in #WriteData.buf (flushed when exceeded). */
+ size_t used_len;
+
+ /** Maximum size of the buffer. */
+ size_t max_size;
+ /** Threshold above which writes get their own chunk. */
+ size_t chunk_size;
+ } buffer;
#ifdef USE_WRITE_DATA_LEN
/** Total number of bytes written. */
@@ -271,7 +436,7 @@ typedef struct {
bool use_memfile;
/**
- * Wrap writing, so we can use zlib or
+ * Wrap writing, so we can use zstd or
* other compression types later, see: G_FILE_COMPRESS
* Will be NULL for UNDO.
*/
@@ -291,7 +456,15 @@ static WriteData *writedata_new(WriteWrap *ww)
wd->ww = ww;
if ((ww == NULL) || (ww->use_buf)) {
- wd->buf = MEM_mallocN(MYWRITE_BUFFER_SIZE, "wd->buf");
+ if (ww == NULL) {
+ wd->buffer.max_size = MEM_BUFFER_SIZE;
+ wd->buffer.chunk_size = MEM_CHUNK_SIZE;
+ }
+ else {
+ wd->buffer.max_size = ZSTD_BUFFER_SIZE;
+ wd->buffer.chunk_size = ZSTD_CHUNK_SIZE;
+ }
+ wd->buffer.buf = MEM_mallocN(wd->buffer.max_size, "wd->buffer.buf");
}
return wd;
@@ -325,8 +498,8 @@ static void writedata_do_write(WriteData *wd, const void *mem, size_t memlen)
static void writedata_free(WriteData *wd)
{
- if (wd->buf) {
- MEM_freeN(wd->buf);
+ if (wd->buffer.buf) {
+ MEM_freeN(wd->buffer.buf);
}
MEM_freeN(wd);
}
@@ -343,9 +516,9 @@ static void writedata_free(WriteData *wd)
*/
static void mywrite_flush(WriteData *wd)
{
- if (wd->buf_used_len != 0) {
- writedata_do_write(wd, wd->buf, wd->buf_used_len);
- wd->buf_used_len = 0;
+ if (wd->buffer.used_len != 0) {
+ writedata_do_write(wd, wd->buffer.buf, wd->buffer.used_len);
+ wd->buffer.used_len = 0;
}
}
@@ -369,20 +542,20 @@ static void mywrite(WriteData *wd, const void *adr, size_t len)
wd->write_len += len;
#endif
- if (wd->buf == NULL) {
+ if (wd->buffer.buf == NULL) {
writedata_do_write(wd, adr, len);
}
else {
/* if we have a single big chunk, write existing data in
* buffer and write out big chunk in smaller pieces */
- if (len > MYWRITE_MAX_CHUNK) {
- if (wd->buf_used_len != 0) {
- writedata_do_write(wd, wd->buf, wd->buf_used_len);
- wd->buf_used_len = 0;
+ if (len > wd->buffer.chunk_size) {
+ if (wd->buffer.used_len != 0) {
+ writedata_do_write(wd, wd->buffer.buf, wd->buffer.used_len);
+ wd->buffer.used_len = 0;
}
do {
- size_t writelen = MIN2(len, MYWRITE_MAX_CHUNK);
+ size_t writelen = MIN2(len, wd->buffer.chunk_size);
writedata_do_write(wd, adr, writelen);
adr = (const char *)adr + writelen;
len -= writelen;
@@ -392,14 +565,14 @@ static void mywrite(WriteData *wd, const void *adr, size_t len)
}
/* if data would overflow buffer, write out the buffer */
- if (len + wd->buf_used_len > MYWRITE_BUFFER_SIZE - 1) {
- writedata_do_write(wd, wd->buf, wd->buf_used_len);
- wd->buf_used_len = 0;
+ if (len + wd->buffer.used_len > wd->buffer.max_size - 1) {
+ writedata_do_write(wd, wd->buffer.buf, wd->buffer.used_len);
+ wd->buffer.used_len = 0;
}
/* append data at end of buffer */
- memcpy(&wd->buf[wd->buf_used_len], adr, len);
- wd->buf_used_len += len;
+ memcpy(&wd->buffer.buf[wd->buffer.used_len], adr, len);
+ wd->buffer.used_len += len;
}
}
@@ -430,9 +603,9 @@ static WriteData *mywrite_begin(WriteWrap *ww, MemFile *compare, MemFile *curren
*/
static bool mywrite_end(WriteData *wd)
{
- if (wd->buf_used_len != 0) {
- writedata_do_write(wd, wd->buf, wd->buf_used_len);
- wd->buf_used_len = 0;
+ if (wd->buffer.used_len != 0) {
+ writedata_do_write(wd, wd->buffer.buf, wd->buffer.used_len);
+ wd->buffer.used_len = 0;
}
if (wd->use_memfile) {
@@ -1150,7 +1323,6 @@ bool BLO_write_file(Main *mainvar,
ReportList *reports)
{
char tempname[FILE_MAX + 1];
- eWriteWrapType ww_type;
WriteWrap ww;
eBLO_WritePathRemap remap_mode = params->remap_mode;
@@ -1172,14 +1344,7 @@ bool BLO_write_file(Main *mainvar,
/* open temporary file, so we preserve the original in case we crash */
BLI_snprintf(tempname, sizeof(tempname), "%s@", filepath);
- if (write_flags & G_FILE_COMPRESS) {
- ww_type = WW_WRAP_ZLIB;
- }
- else {
- ww_type = WW_WRAP_NONE;
- }
-
- ww_handle_init(ww_type, &ww);
+ ww_handle_init((write_flags & G_FILE_COMPRESS) ? WW_WRAP_ZSTD : WW_WRAP_NONE, &ww);
if (ww.open(&ww, tempname) == false) {
BKE_reportf(