Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorThomas Dinges <dingto>2022-09-22 18:13:07 +0300
committerClément Foucault <foucault.clem@gmail.com>2022-09-22 18:16:22 +0300
commitbb63b98d1ff5acfd24dff9b5e72175f82f5bca26 (patch)
tree031cf8537bbcda460cb72f4fabb62ed991ebbf24 /source/blender/gpu
parentdd5131bd700c7eae9772b54de1640ef15edff28f (diff)
Metal: MTLVertBuf implementation and support for texture creation from vertex buffers.
Metal: MTLVertBuf implementation and support for texture creation from vertex buffers. Authored by Apple: Michael Parkin-White Reviewed By: fclem Maniphest Tasks: T96261 Differential Revision: https://developer.blender.org/D15452
Diffstat (limited to 'source/blender/gpu')
-rw-r--r--source/blender/gpu/CMakeLists.txt2
-rw-r--r--source/blender/gpu/metal/mtl_backend.mm4
-rw-r--r--source/blender/gpu/metal/mtl_texture.hh15
-rw-r--r--source/blender/gpu/metal/mtl_texture.mm125
-rw-r--r--source/blender/gpu/metal/mtl_vertex_buffer.hh75
-rw-r--r--source/blender/gpu/metal/mtl_vertex_buffer.mm368
6 files changed, 564 insertions, 25 deletions
diff --git a/source/blender/gpu/CMakeLists.txt b/source/blender/gpu/CMakeLists.txt
index 8b38c22ae28..18da5169620 100644
--- a/source/blender/gpu/CMakeLists.txt
+++ b/source/blender/gpu/CMakeLists.txt
@@ -202,6 +202,7 @@ set(METAL_SRC
metal/mtl_texture.mm
metal/mtl_texture_util.mm
metal/mtl_uniform_buffer.mm
+ metal/mtl_vertex_buffer.mm
metal/mtl_backend.hh
metal/mtl_capabilities.hh
@@ -222,6 +223,7 @@ set(METAL_SRC
metal/mtl_state.hh
metal/mtl_texture.hh
metal/mtl_uniform_buffer.hh
+ metal/mtl_vertex_buffer.hh
)
# Select Backend source based on availability
diff --git a/source/blender/gpu/metal/mtl_backend.mm b/source/blender/gpu/metal/mtl_backend.mm
index 3cd7794f6c9..ec9e8ab4d15 100644
--- a/source/blender/gpu/metal/mtl_backend.mm
+++ b/source/blender/gpu/metal/mtl_backend.mm
@@ -14,6 +14,7 @@
#include "mtl_query.hh"
#include "mtl_shader.hh"
#include "mtl_uniform_buffer.hh"
+#include "mtl_vertex_buffer.hh"
#include "gpu_capabilities_private.hh"
#include "gpu_platform_private.hh"
@@ -94,8 +95,7 @@ StorageBuf *MTLBackend::storagebuf_alloc(int size, GPUUsageType usage, const cha
VertBuf *MTLBackend::vertbuf_alloc()
{
- /* TODO(Metal): Implement MTLVertBuf. */
- return nullptr;
+ return new MTLVertBuf();
}
void MTLBackend::render_begin()
diff --git a/source/blender/gpu/metal/mtl_texture.hh b/source/blender/gpu/metal/mtl_texture.hh
index 766f01d9018..88d09e4e133 100644
--- a/source/blender/gpu/metal/mtl_texture.hh
+++ b/source/blender/gpu/metal/mtl_texture.hh
@@ -200,7 +200,7 @@ class MTLTexture : public Texture {
TEXTURE_VIEW_SWIZZLE_DIRTY = (1 << 0),
TEXTURE_VIEW_MIP_DIRTY = (1 << 1)
};
- id<MTLTexture> mip_swizzle_view_;
+ id<MTLTexture> mip_swizzle_view_ = nil;
char tex_swizzle_mask_[4];
MTLTextureSwizzleChannels mtl_swizzle_mask_;
bool mip_range_dirty_ = false;
@@ -216,7 +216,6 @@ class MTLTexture : public Texture {
/* VBO. */
MTLVertBuf *vert_buffer_;
id<MTLBuffer> vert_buffer_mtl_;
- int vert_buffer_offset_;
/* Core parameters and sub-resources. */
eGPUTextureUsage gpu_image_usage_flags_;
@@ -256,6 +255,14 @@ class MTLTexture : public Texture {
return name_;
}
+ id<MTLBuffer> get_vertex_buffer() const
+ {
+ if (resource_mode_ == MTL_TEXTURE_MODE_VBO) {
+ return vert_buffer_mtl_;
+ }
+ return nil;
+ }
+
protected:
bool init_internal() override;
bool init_internal(GPUVertBuf *vbo) override;
@@ -324,8 +331,6 @@ class MTLTexture : public Texture {
int height);
GPUFrameBuffer *get_blit_framebuffer(uint dst_slice, uint dst_mip);
- MEM_CXX_CLASS_ALLOC_FUNCS("gpu::MTLTexture")
-
/* Texture Update function Utilities. */
/* Metal texture updating does not provide the same range of functionality for type conversion
* and format compatibility as are available in OpenGL. To achieve the same level of
@@ -415,6 +420,8 @@ class MTLTexture : public Texture {
/* fullscreen blit utilities. */
GPUShader *fullscreen_blit_sh_get();
+
+ MEM_CXX_CLASS_ALLOC_FUNCS("MTLTexture")
};
/* Utility */
diff --git a/source/blender/gpu/metal/mtl_texture.mm b/source/blender/gpu/metal/mtl_texture.mm
index 2b7c2333bff..4af46c13751 100644
--- a/source/blender/gpu/metal/mtl_texture.mm
+++ b/source/blender/gpu/metal/mtl_texture.mm
@@ -20,6 +20,7 @@
#include "mtl_context.hh"
#include "mtl_debug.hh"
#include "mtl_texture.hh"
+#include "mtl_vertex_buffer.hh"
#include "GHOST_C-api.h"
@@ -50,7 +51,6 @@ void gpu::MTLTexture::mtl_texture_init()
/* VBO. */
vert_buffer_ = nullptr;
vert_buffer_mtl_ = nil;
- vert_buffer_offset_ = -1;
/* Default Swizzle. */
tex_swizzle_mask_[0] = 'r';
@@ -169,26 +169,39 @@ void gpu::MTLTexture::bake_mip_swizzle_view()
id<MTLTexture> gpu::MTLTexture::get_metal_handle()
{
- /* ensure up to date and baked. */
- this->ensure_baked();
-
/* Verify VBO texture shares same buffer. */
if (resource_mode_ == MTL_TEXTURE_MODE_VBO) {
- int r_offset = -1;
+ id<MTLBuffer> buf = vert_buffer_->get_metal_buffer();
+
+ /* Source vertex buffer has been re-generated, require re-initialisation. */
+ if (buf != vert_buffer_mtl_) {
+ MTL_LOG_INFO(
+ "MTLTexture '%p' using MTL_TEXTURE_MODE_VBO requires re-generation due to updated "
+ "Vertex-Buffer.\n",
+ this);
+ /* Clear state. */
+ this->reset();
+
+ /* Re-initialise. */
+ this->init_internal(wrap(vert_buffer_));
+
+ /* Update for assertion check below. */
+ buf = vert_buffer_->get_metal_buffer();
+ }
- /* TODO(Metal): Fetch buffer from MTLVertBuf when implemented. */
- id<MTLBuffer> buf = nil; /*vert_buffer_->get_metal_buffer(&r_offset);*/
+ /* Ensure buffer is valid.
+ * Fetchvert buffer handle directly in-case it changed above. */
BLI_assert(vert_buffer_mtl_ != nil);
- BLI_assert(buf == vert_buffer_mtl_ && r_offset == vert_buffer_offset_);
-
- UNUSED_VARS(buf);
- UNUSED_VARS_NDEBUG(r_offset);
+ BLI_assert(vert_buffer_->get_metal_buffer() == vert_buffer_mtl_);
}
+ /* ensure up to date and baked. */
+ this->ensure_baked();
+
if (is_baked_) {
/* For explicit texture views, ensure we always return the texture view. */
if (resource_mode_ == MTL_TEXTURE_MODE_TEXTURE_VIEW) {
- BLI_assert(mip_swizzle_view_ && "Texture view should always have a valid handle.");
+ BLI_assert_msg(mip_swizzle_view_, "Texture view should always have a valid handle.");
}
if (mip_swizzle_view_ != nil || texture_view_dirty_flags_) {
@@ -208,7 +221,7 @@ id<MTLTexture> gpu::MTLTexture::get_metal_handle_base()
/* For explicit texture views, always return the texture view. */
if (resource_mode_ == MTL_TEXTURE_MODE_TEXTURE_VIEW) {
- BLI_assert(mip_swizzle_view_ && "Texture view should always have a valid handle.");
+ BLI_assert_msg(mip_swizzle_view_, "Texture view should always have a valid handle.");
if (mip_swizzle_view_ != nil || texture_view_dirty_flags_) {
bake_mip_swizzle_view();
}
@@ -915,7 +928,7 @@ void gpu::MTLTexture::generate_mipmap()
/* Ensure texture is baked. */
this->ensure_baked();
- BLI_assert(is_baked_ && texture_ && "MTLTexture is not valid");
+ BLI_assert_msg(is_baked_ && texture_, "MTLTexture is not valid");
if (mipmaps_ == 1 || mtl_max_mips_ == 1) {
MTL_LOG_WARNING("Call to generate mipmaps on texture with 'mipmaps_=1\n'");
@@ -1231,7 +1244,7 @@ void gpu::MTLTexture::read_internal(int mip,
depth_format_mode = 4;
break;
default:
- BLI_assert(false && "Unhandled depth read format case");
+ BLI_assert_msg(false, "Unhandled depth read format case");
break;
}
}
@@ -1445,11 +1458,85 @@ bool gpu::MTLTexture::init_internal()
bool gpu::MTLTexture::init_internal(GPUVertBuf *vbo)
{
- /* Zero initialize. */
- this->prepare_internal();
+ /* Not a valid vertex buffer format, though verifying texture is not set as such
+ * as this is not supported on Apple Silicon. */
+ BLI_assert_msg(this->format_ != GPU_DEPTH24_STENCIL8,
+ "Apple silicon does not support GPU_DEPTH24_S8");
- /* TODO(Metal): Add implementation for GPU Vert buf. */
- return false;
+ MTLPixelFormat mtl_format = gpu_texture_format_to_metal(this->format_);
+ mtl_max_mips_ = 1;
+ mipmaps_ = 0;
+ this->mip_range_set(0, 0);
+
+ /* Create texture from GPUVertBuf's buffer. */
+ MTLVertBuf *mtl_vbo = static_cast<MTLVertBuf *>(unwrap(vbo));
+ mtl_vbo->bind();
+ mtl_vbo->flag_used();
+
+ /* Get Metal Buffer. */
+ id<MTLBuffer> source_buffer = mtl_vbo->get_metal_buffer();
+ BLI_assert(source_buffer);
+
+ /* Verify size. */
+ if (w_ <= 0) {
+ MTL_LOG_WARNING("Allocating texture buffer of width 0!\n");
+ w_ = 1;
+ }
+
+ /* Verify Texture and vertex buffer alignment. */
+ int bytes_per_pixel = get_mtl_format_bytesize(mtl_format);
+ int bytes_per_row = bytes_per_pixel * w_;
+
+ MTLContext *mtl_ctx = MTLContext::get();
+ uint align_requirement = static_cast<uint>(
+ [mtl_ctx->device minimumLinearTextureAlignmentForPixelFormat:mtl_format]);
+
+ /* Verify per-vertex size aligns with texture size. */
+ const GPUVertFormat *format = GPU_vertbuf_get_format(vbo);
+ BLI_assert(bytes_per_pixel == format->stride &&
+ "Pixel format stride MUST match the texture format stride -- These being different "
+ "is likely caused by Metal's VBO padding to a minimum of 4-bytes per-vertex");
+ UNUSED_VARS_NDEBUG(format);
+
+ /* Create texture descriptor. */
+ BLI_assert(type_ == GPU_TEXTURE_BUFFER);
+ texture_descriptor_ = [[MTLTextureDescriptor alloc] init];
+ texture_descriptor_.pixelFormat = mtl_format;
+ texture_descriptor_.textureType = MTLTextureTypeTextureBuffer;
+ texture_descriptor_.width = w_;
+ texture_descriptor_.height = 1;
+ texture_descriptor_.depth = 1;
+ texture_descriptor_.arrayLength = 1;
+ texture_descriptor_.mipmapLevelCount = mtl_max_mips_;
+ texture_descriptor_.usage =
+ MTLTextureUsageShaderRead | MTLTextureUsageShaderWrite |
+ MTLTextureUsagePixelFormatView; /* TODO(Metal): Optimize usage flags. */
+ texture_descriptor_.storageMode = [source_buffer storageMode];
+ texture_descriptor_.sampleCount = 1;
+ texture_descriptor_.cpuCacheMode = [source_buffer cpuCacheMode];
+ texture_descriptor_.hazardTrackingMode = [source_buffer hazardTrackingMode];
+
+ texture_ = [source_buffer
+ newTextureWithDescriptor:texture_descriptor_
+ offset:0
+ bytesPerRow:ceil_to_multiple_u(bytes_per_row, align_requirement)];
+ aligned_w_ = bytes_per_row / bytes_per_pixel;
+
+ BLI_assert(texture_);
+ texture_.label = [NSString stringWithUTF8String:this->get_name()];
+ is_baked_ = true;
+ is_dirty_ = false;
+ resource_mode_ = MTL_TEXTURE_MODE_VBO;
+
+ /* Track Status. */
+ vert_buffer_ = mtl_vbo;
+ vert_buffer_mtl_ = source_buffer;
+
+ /* Cleanup. */
+ [texture_descriptor_ release];
+ texture_descriptor_ = nullptr;
+
+ return true;
}
bool gpu::MTLTexture::init_internal(const GPUTexture *src, int mip_offset, int layer_offset)
diff --git a/source/blender/gpu/metal/mtl_vertex_buffer.hh b/source/blender/gpu/metal/mtl_vertex_buffer.hh
new file mode 100644
index 00000000000..6918298d643
--- /dev/null
+++ b/source/blender/gpu/metal/mtl_vertex_buffer.hh
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+/** \file
+ * \ingroup gpu
+ */
+
+#pragma once
+
+#include <Cocoa/Cocoa.h>
+#include <Metal/Metal.h>
+#include <QuartzCore/QuartzCore.h>
+
+#include "MEM_guardedalloc.h"
+
+#include "GPU_vertex_buffer.h"
+#include "gpu_vertex_buffer_private.hh"
+#include "mtl_context.hh"
+
+namespace blender::gpu {
+
+class MTLVertBuf : public VertBuf {
+ friend class gpu::MTLTexture; /* For buffer texture. */
+ friend class MTLShader; /* For transform feedback. */
+ friend class MTLBatch;
+ friend class MTLContext; /* For transform feedback. */
+
+ private:
+ /** Metal buffer allocation. **/
+ gpu::MTLBuffer *vbo_ = nullptr;
+ /** Texture used if the buffer is bound as buffer texture. Init on first use. */
+ struct ::GPUTexture *buffer_texture_ = nullptr;
+ /** Defines whether the buffer handle is wrapped by this MTLVertBuf, i.e. we do not own it and
+ * should not free it. */
+ bool is_wrapper_ = false;
+ /** Requested allocation size for Metal buffer.
+ * Differs from raw buffer size as alignment is not included. */
+ uint64_t alloc_size_ = 0;
+ /** Whether existing allocation has been submitted for use by the GPU. */
+ bool contents_in_flight_ = false;
+
+ /* Fetch Metal buffer and offset into allocation if ncessary.
+ * Access limited to friend classes. */
+ id<MTLBuffer> get_metal_buffer()
+ {
+ vbo_->debug_ensure_used();
+ return vbo_->get_metal_buffer();
+ }
+
+ public:
+ MTLVertBuf();
+ ~MTLVertBuf();
+
+ void bind();
+ void flag_used();
+
+ void update_sub(uint start, uint len, const void *data) override;
+
+ const void *read() const override;
+ void *unmap(const void *mapped_data) const override;
+
+ void wrap_handle(uint64_t handle) override;
+
+ protected:
+ void acquire_data() override;
+ void resize_data() override;
+ void release_data() override;
+ void upload_data() override;
+ void duplicate_data(VertBuf *dst) override;
+ void bind_as_ssbo(uint binding) override;
+ void bind_as_texture(uint binding) override;
+
+ MEM_CXX_CLASS_ALLOC_FUNCS("MTLVertBuf");
+};
+
+} // namespace blender::gpu
diff --git a/source/blender/gpu/metal/mtl_vertex_buffer.mm b/source/blender/gpu/metal/mtl_vertex_buffer.mm
new file mode 100644
index 00000000000..05f9c500832
--- /dev/null
+++ b/source/blender/gpu/metal/mtl_vertex_buffer.mm
@@ -0,0 +1,368 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+/** \file
+ * \ingroup gpu
+ */
+#include "mtl_vertex_buffer.hh"
+#include "mtl_debug.hh"
+
+namespace blender::gpu {
+
+MTLVertBuf::MTLVertBuf() : VertBuf()
+{
+}
+
+MTLVertBuf::~MTLVertBuf()
+{
+ this->release_data();
+}
+
+void MTLVertBuf::acquire_data()
+{
+ /* Discard previous data, if any. */
+ MEM_SAFE_FREE(data);
+ if (usage_ == GPU_USAGE_DEVICE_ONLY) {
+ data = nullptr;
+ }
+ else {
+ data = (uchar *)MEM_mallocN(sizeof(uchar) * this->size_alloc_get(), __func__);
+ }
+}
+
+void MTLVertBuf::resize_data()
+{
+ if (usage_ == GPU_USAGE_DEVICE_ONLY) {
+ data = nullptr;
+ }
+ else {
+ data = (uchar *)MEM_reallocN(data, sizeof(uchar) * this->size_alloc_get());
+ }
+}
+
+void MTLVertBuf::release_data()
+{
+ if (vbo_ != nullptr) {
+ vbo_->free();
+ vbo_ = nullptr;
+ is_wrapper_ = false;
+ }
+
+ GPU_TEXTURE_FREE_SAFE(buffer_texture_);
+
+ MEM_SAFE_FREE(data);
+}
+
+void MTLVertBuf::duplicate_data(VertBuf *dst_)
+{
+ BLI_assert(MTLContext::get() != NULL);
+ MTLVertBuf *src = this;
+ MTLVertBuf *dst = static_cast<MTLVertBuf *>(dst_);
+
+ /* Ensure buffer has been initialised. */
+ src->bind();
+
+ if (src->vbo_) {
+
+ /* Fetch active context. */
+ MTLContext *ctx = MTLContext::get();
+ BLI_assert(ctx);
+
+ /* Ensure destination does not have an active VBO. */
+ BLI_assert(dst->vbo_ == nullptr);
+
+ /* Allocate VBO for destination vertbuf. */
+ uint length = src->vbo_->get_size();
+ dst->vbo_ = MTLContext::get_global_memory_manager().allocate(
+ length, (dst->get_usage_type() != GPU_USAGE_DEVICE_ONLY));
+ dst->alloc_size_ = length;
+
+ /* Fetch Metal buffer handles. */
+ id<MTLBuffer> src_buffer = src->vbo_->get_metal_buffer();
+ id<MTLBuffer> dest_buffer = dst->vbo_->get_metal_buffer();
+
+ /* Use blit encoder to copy data to duplicate buffer allocation. */
+ id<MTLBlitCommandEncoder> enc = ctx->main_command_buffer.ensure_begin_blit_encoder();
+ if (G.debug & G_DEBUG_GPU) {
+ [enc insertDebugSignpost:@"VertexBufferDuplicate"];
+ }
+ [enc copyFromBuffer:src_buffer
+ sourceOffset:0
+ toBuffer:dest_buffer
+ destinationOffset:0
+ size:length];
+
+ /* Flush results back to host buffer, if one exists. */
+ if (dest_buffer.storageMode == MTLStorageModeManaged) {
+ [enc synchronizeResource:dest_buffer];
+ }
+
+ if (G.debug & G_DEBUG_GPU) {
+ [enc insertDebugSignpost:@"VertexBufferDuplicateEnd"];
+ }
+
+ /* Mark as in-use, as contents are updated via GPU command. */
+ src->flag_used();
+ }
+
+ /* Copy raw CPU data. */
+ if (data != nullptr) {
+ dst->data = (uchar *)MEM_dupallocN(src->data);
+ }
+}
+
+void MTLVertBuf::upload_data()
+{
+ this->bind();
+}
+
+void MTLVertBuf::bind()
+{
+ /* Determine allocation size. Set minimum allocation size to be
+ * the maximal of a single attribute to avoid validation and
+ * correctness errors. */
+ uint64_t required_size_raw = sizeof(uchar) * this->size_used_get();
+ uint64_t required_size = max_ulul(required_size_raw, 128);
+
+ if (required_size_raw == 0) {
+ MTL_LOG_WARNING("Warning: Vertex buffer required_size = 0\n");
+ }
+
+ /* If the vertex buffer has already been allocated, but new data is ready,
+ * or the usage size has changed, we release the existing buffer and
+ * allocate a new buffer to ensure we do not overwrite in-use GPU resources.
+ *
+ * NOTE: We only need to free the existing allocation if contents have been
+ * submitted to the GPU. Otherwise we can simply upload new data to the
+ * existing buffer, if it will fit.
+ *
+ * NOTE: If a buffer is re-sized, but no new data is provided, the previous
+ * contents are copied into the newly allocated buffer. */
+ bool requires_reallocation = (vbo_ != nullptr) && (alloc_size_ != required_size);
+ bool new_data_ready = (this->flag & GPU_VERTBUF_DATA_DIRTY) && this->data;
+
+ gpu::MTLBuffer *prev_vbo = nullptr;
+ GPUVertBufStatus prev_flag = this->flag;
+
+ if (vbo_ != nullptr) {
+ if (requires_reallocation || (new_data_ready && contents_in_flight_)) {
+ /* Track previous VBO to copy data from. */
+ prev_vbo = vbo_;
+
+ /* Reset current allocation status. */
+ vbo_ = nullptr;
+ is_wrapper_ = false;
+ alloc_size_ = 0;
+
+ /* Flag as requiring data upload. */
+ if (requires_reallocation) {
+ this->flag &= ~GPU_VERTBUF_DATA_UPLOADED;
+ }
+ }
+ }
+
+ /* Create MTLBuffer of requested size. */
+ if (vbo_ == nullptr) {
+ vbo_ = MTLContext::get_global_memory_manager().allocate(
+ required_size, (this->get_usage_type() != GPU_USAGE_DEVICE_ONLY));
+ vbo_->set_label(@"Vertex Buffer");
+ BLI_assert(vbo_ != nullptr);
+ BLI_assert(vbo_->get_metal_buffer() != nil);
+
+ is_wrapper_ = false;
+ alloc_size_ = required_size;
+ contents_in_flight_ = false;
+ }
+
+ /* Upload new data, if provided. */
+ if (new_data_ready) {
+
+ /* Only upload data if usage size is greater than zero.
+ * Do not upload data for device-only buffers. */
+ if (required_size_raw > 0 && usage_ != GPU_USAGE_DEVICE_ONLY) {
+
+ /* Debug: Verify allocation is large enough. */
+ BLI_assert(vbo_->get_size() >= required_size_raw);
+
+ /* Fetch mapped buffer host ptr and upload data. */
+ void *dst_data = vbo_->get_host_ptr();
+ memcpy((uint8_t *)dst_data, this->data, required_size_raw);
+ vbo_->flush_range(0, required_size_raw);
+ }
+
+ /* If static usage, free host-side data. */
+ if (usage_ == GPU_USAGE_STATIC) {
+ MEM_SAFE_FREE(data);
+ }
+
+ /* Flag data as having been uploaded. */
+ this->flag &= ~GPU_VERTBUF_DATA_DIRTY;
+ this->flag |= GPU_VERTBUF_DATA_UPLOADED;
+ }
+ else if (requires_reallocation) {
+
+ /* If buffer has been re-sized, copy existing data if host
+ * data had been previously uploaded. */
+ BLI_assert(prev_vbo != nullptr);
+
+ if (prev_flag & GPU_VERTBUF_DATA_UPLOADED) {
+
+ /* Fetch active econtext. */
+ MTLContext *ctx = MTLContext::get();
+ BLI_assert(ctx);
+
+ id<MTLBuffer> copy_prev_buffer = prev_vbo->get_metal_buffer();
+ id<MTLBuffer> copy_new_buffer = vbo_->get_metal_buffer();
+ BLI_assert(copy_prev_buffer != nil);
+ BLI_assert(copy_new_buffer != nil);
+
+ /* Ensure a blit command encoder is active for buffer copy operation. */
+ id<MTLBlitCommandEncoder> enc = ctx->main_command_buffer.ensure_begin_blit_encoder();
+ [enc copyFromBuffer:copy_prev_buffer
+ sourceOffset:0
+ toBuffer:copy_new_buffer
+ destinationOffset:0
+ size:min_ii([copy_new_buffer length], [copy_prev_buffer length])];
+
+ /* Flush newly copied data back to host-side buffer, if one exists.
+ * Ensures data and cache coherency for managed MTLBuffers. */
+ if (copy_new_buffer.storageMode == MTLStorageModeManaged) {
+ [enc synchronizeResource:copy_new_buffer];
+ }
+
+ /* For VBOs flagged as static, release host data as it will no longer be needed. */
+ if (usage_ == GPU_USAGE_STATIC) {
+ MEM_SAFE_FREE(data);
+ }
+
+ /* Flag data as uploaded. */
+ this->flag |= GPU_VERTBUF_DATA_UPLOADED;
+
+ /* Flag as in-use, as contents have been updated via GPU commands. */
+ this->flag_used();
+ }
+ }
+
+ /* Release previous buffer if re-allocated. */
+ if (prev_vbo != nullptr) {
+ prev_vbo->free();
+ }
+
+ /* Ensure buffer has been created. */
+ BLI_assert(vbo_ != nullptr);
+}
+
+/* Update Sub currently only used by hair */
+void MTLVertBuf::update_sub(uint start, uint len, const void *data)
+{
+ /* Fetch and verify active context. */
+ MTLContext *ctx = reinterpret_cast<MTLContext *>(unwrap(GPU_context_active_get()));
+ BLI_assert(ctx);
+ BLI_assert(ctx->device);
+
+ /* Ensure vertbuf has been created. */
+ this->bind();
+ BLI_assert(start + len <= alloc_size_);
+
+ /* Create temporary scratch buffer allocation for sub-range of data. */
+ MTLTemporaryBuffer scratch_allocation =
+ ctx->get_scratchbuffer_manager().scratch_buffer_allocate_range_aligned(len, 256);
+ memcpy(scratch_allocation.data, data, len);
+ [scratch_allocation.metal_buffer
+ didModifyRange:NSMakeRange(scratch_allocation.buffer_offset, len)];
+ id<MTLBuffer> data_buffer = scratch_allocation.metal_buffer;
+ uint data_buffer_offset = scratch_allocation.buffer_offset;
+
+ BLI_assert(vbo_ != nullptr && data != nullptr);
+ BLI_assert((start + len) <= vbo_->get_size());
+
+ /* Fetch destination buffer. */
+ id<MTLBuffer> dst_buffer = vbo_->get_metal_buffer();
+
+ /* Ensure blit command encoder for copying data. */
+ id<MTLBlitCommandEncoder> enc = ctx->main_command_buffer.ensure_begin_blit_encoder();
+ [enc copyFromBuffer:data_buffer
+ sourceOffset:data_buffer_offset
+ toBuffer:dst_buffer
+ destinationOffset:start
+ size:len];
+
+ /* Flush modified buffer back to host buffer, if one exists. */
+ if (dst_buffer.storageMode == MTLStorageModeManaged) {
+ [enc synchronizeResource:dst_buffer];
+ }
+}
+
+void MTLVertBuf::bind_as_ssbo(uint binding)
+{
+ /* TODO(Metal): Support binding of buffers as SSBOs.
+ * Pending overall compute support for Metal backend. */
+ MTL_LOG_WARNING("MTLVertBuf::bind_as_ssbo not yet implemented!\n");
+ this->flag_used();
+}
+
+void MTLVertBuf::bind_as_texture(uint binding)
+{
+ /* Ensure allocations are ready, and data uploaded. */
+ this->bind();
+ BLI_assert(vbo_ != nullptr);
+
+ /* If vertex buffer updated, release existing texture and re-create. */
+ id<MTLBuffer> buf = this->get_metal_buffer();
+ if (buffer_texture_ != nullptr) {
+ gpu::MTLTexture *mtl_buffer_tex = static_cast<gpu::MTLTexture *>(
+ unwrap(this->buffer_texture_));
+ id<MTLBuffer> tex_buf = mtl_buffer_tex->get_vertex_buffer();
+ if (tex_buf != buf) {
+ GPU_TEXTURE_FREE_SAFE(buffer_texture_);
+ buffer_texture_ = nullptr;
+ }
+ }
+
+ /* Create texture from vertex buffer. */
+ if (buffer_texture_ == nullptr) {
+ buffer_texture_ = GPU_texture_create_from_vertbuf("vertbuf_as_texture", wrap(this));
+ }
+
+ /* Verify successful creation and bind. */
+ BLI_assert(buffer_texture_ != nullptr);
+ GPU_texture_bind(buffer_texture_, binding);
+}
+
+const void *MTLVertBuf::read() const
+{
+ BLI_assert(vbo_ != nullptr);
+ BLI_assert(usage_ != GPU_USAGE_DEVICE_ONLY);
+ void *return_ptr = vbo_->get_host_ptr();
+ BLI_assert(return_ptr != nullptr);
+
+ return return_ptr;
+}
+
+void *MTLVertBuf::unmap(const void *mapped_data) const
+{
+ void *result = MEM_mallocN(alloc_size_, __func__);
+ memcpy(result, mapped_data, alloc_size_);
+ return result;
+}
+
+void MTLVertBuf::wrap_handle(uint64_t handle)
+{
+ BLI_assert(vbo_ == nullptr);
+
+ /* Attempt to cast to Metal buffer handle. */
+ BLI_assert(handle != 0);
+ id<MTLBuffer> buffer = reinterpret_cast<id<MTLBuffer>>((void *)handle);
+
+ is_wrapper_ = true;
+ vbo_ = new gpu::MTLBuffer(buffer);
+
+ /* We assume the data is already on the device, so no need to allocate or send it. */
+ flag = GPU_VERTBUF_DATA_UPLOADED;
+}
+
+void MTLVertBuf::flag_used()
+{
+ contents_in_flight_ = true;
+}
+
+} // namespace blender::gpu