From 4430e8a00810ca8df2fa20029c4cb8078e8cdbe6 Mon Sep 17 00:00:00 2001 From: Germano Cavalcante Date: Wed, 17 Feb 2021 10:48:08 -0300 Subject: Python: gpu module: add new submodules and types This commit extends the gpu python API with: ``` gpu.types.Buffer #"__init__", "to_list" gpu.types.GPUTexture #"__init__", "clear", "read", "format" gpu.types.GPUFrameBuffer #"__init__", "bind", "clear", "is_bound", "viewport", ("__enter__", "__exit__" with "GPUFrameBufferStackContext") gpu.types.GPUUniformBuf #"__init__", "update" gpu.state #"blend_set", "blend_get", "depth_test_set", "depth_test_get", "depth_mask_set", "depth_mask_get", "viewport_set", "viewport_get", "line_width_set", "line_width_get", "point_size_set", "color_mask_set", "face_culling_set", "front_facing_set", "program_point_size_set" ``` Add these methods to existing objects: ``` gpu.types.GPUShader #"uniform_sample", "uniform_buffer" ``` Maniphest Tasks: T80481 Differential Revision: https://developer.blender.org/D8826 --- source/blender/python/generic/py_capi_utils.c | 11 + source/blender/python/generic/py_capi_utils.h | 2 + source/blender/python/gpu/CMakeLists.txt | 12 + source/blender/python/gpu/gpu_py.c | 45 ++ source/blender/python/gpu/gpu_py.h | 23 + source/blender/python/gpu/gpu_py_api.c | 4 + source/blender/python/gpu/gpu_py_api.h | 4 + source/blender/python/gpu/gpu_py_buffer.c | 669 +++++++++++++++++++++++ source/blender/python/gpu/gpu_py_buffer.h | 53 ++ source/blender/python/gpu/gpu_py_framebuffer.c | 546 ++++++++++++++++++ source/blender/python/gpu/gpu_py_framebuffer.h | 33 ++ source/blender/python/gpu/gpu_py_offscreen.c | 260 +++++---- source/blender/python/gpu/gpu_py_offscreen.h | 1 - source/blender/python/gpu/gpu_py_shader.c | 70 +++ source/blender/python/gpu/gpu_py_state.c | 423 ++++++++++++++ source/blender/python/gpu/gpu_py_state.h | 23 + source/blender/python/gpu/gpu_py_texture.c | 559 +++++++++++++++++++ source/blender/python/gpu/gpu_py_texture.h | 34 ++ source/blender/python/gpu/gpu_py_types.c | 16 + source/blender/python/gpu/gpu_py_types.h | 5 + source/blender/python/gpu/gpu_py_uniformbuffer.c | 195 +++++++ source/blender/python/gpu/gpu_py_uniformbuffer.h | 33 ++ 22 files changed, 2921 insertions(+), 100 deletions(-) create mode 100644 source/blender/python/gpu/gpu_py.c create mode 100644 source/blender/python/gpu/gpu_py.h create mode 100644 source/blender/python/gpu/gpu_py_buffer.c create mode 100644 source/blender/python/gpu/gpu_py_buffer.h create mode 100644 source/blender/python/gpu/gpu_py_framebuffer.c create mode 100644 source/blender/python/gpu/gpu_py_framebuffer.h create mode 100644 source/blender/python/gpu/gpu_py_state.c create mode 100644 source/blender/python/gpu/gpu_py_state.h create mode 100644 source/blender/python/gpu/gpu_py_texture.c create mode 100644 source/blender/python/gpu/gpu_py_texture.h create mode 100644 source/blender/python/gpu/gpu_py_uniformbuffer.c create mode 100644 source/blender/python/gpu/gpu_py_uniformbuffer.h (limited to 'source/blender/python') diff --git a/source/blender/python/generic/py_capi_utils.c b/source/blender/python/generic/py_capi_utils.c index c7ce264f2f9..351ba884d49 100644 --- a/source/blender/python/generic/py_capi_utils.c +++ b/source/blender/python/generic/py_capi_utils.c @@ -282,6 +282,17 @@ int PyC_ParseStringEnum(PyObject *o, void *p) return 0; } +const char *PyC_StringEnum_FindIDFromValue(const struct PyC_StringEnumItems *items, + const int value) +{ + for (int i = 0; items[i].id; i++) { + if (items[i].value == value) { + return items[i].id; + } + } + return NULL; +} + /* silly function, we dont use arg. just check its compatible with __deepcopy__ */ int PyC_CheckArgs_DeepCopy(PyObject *args) { diff --git a/source/blender/python/generic/py_capi_utils.h b/source/blender/python/generic/py_capi_utils.h index aacc5dd7bea..842e1482c06 100644 --- a/source/blender/python/generic/py_capi_utils.h +++ b/source/blender/python/generic/py_capi_utils.h @@ -140,6 +140,8 @@ struct PyC_StringEnum { }; int PyC_ParseStringEnum(PyObject *o, void *p); +const char *PyC_StringEnum_FindIDFromValue(const struct PyC_StringEnumItems *items, + const int value); int PyC_CheckArgs_DeepCopy(PyObject *args); diff --git a/source/blender/python/gpu/CMakeLists.txt b/source/blender/python/gpu/CMakeLists.txt index 7f6fd9eefab..fe5c559fcc0 100644 --- a/source/blender/python/gpu/CMakeLists.txt +++ b/source/blender/python/gpu/CMakeLists.txt @@ -33,25 +33,37 @@ set(INC_SYS ) set(SRC + gpu_py.c gpu_py_api.c gpu_py_batch.c + gpu_py_buffer.c gpu_py_element.c + gpu_py_framebuffer.c gpu_py_matrix.c gpu_py_offscreen.c gpu_py_select.c gpu_py_shader.c + gpu_py_state.c + gpu_py_texture.c gpu_py_types.c + gpu_py_uniformbuffer.c gpu_py_vertex_buffer.c gpu_py_vertex_format.c + gpu_py.h gpu_py_api.h gpu_py_batch.h + gpu_py_buffer.h gpu_py_element.h + gpu_py_framebuffer.h gpu_py_matrix.h gpu_py_offscreen.h gpu_py_select.h gpu_py_shader.h + gpu_py_state.h + gpu_py_texture.h gpu_py_types.h + gpu_py_uniformbuffer.h gpu_py_vertex_buffer.h gpu_py_vertex_format.h ) diff --git a/source/blender/python/gpu/gpu_py.c b/source/blender/python/gpu/gpu_py.c new file mode 100644 index 00000000000..522cd89c5c0 --- /dev/null +++ b/source/blender/python/gpu/gpu_py.c @@ -0,0 +1,45 @@ +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/** \file + * \ingroup bpygpu + * + * - Use ``bpygpu_`` for local API. + * - Use ``BPyGPU`` for public API. + */ + +#include + +#include "GPU_texture.h" + +#include "../generic/py_capi_utils.h" + +#include "gpu_py.h" /* own include */ + +/* -------------------------------------------------------------------- */ +/** \name GPU Module + * \{ */ + +struct PyC_StringEnumItems bpygpu_dataformat_items[] = { + {GPU_DATA_FLOAT, "FLOAT"}, + {GPU_DATA_INT, "INT"}, + {GPU_DATA_UNSIGNED_INT, "UINT"}, + {GPU_DATA_UNSIGNED_BYTE, "UBYTE"}, + {GPU_DATA_UNSIGNED_INT_24_8, "UINT_24_8"}, + {GPU_DATA_10_11_11_REV, "10_11_11_REV"}, + {0, NULL}, +}; +/** \} */ diff --git a/source/blender/python/gpu/gpu_py.h b/source/blender/python/gpu/gpu_py.h new file mode 100644 index 00000000000..8a96391664f --- /dev/null +++ b/source/blender/python/gpu/gpu_py.h @@ -0,0 +1,23 @@ +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/** \file + * \ingroup bpygpu + */ + +#pragma once + +extern struct PyC_StringEnumItems bpygpu_dataformat_items[]; diff --git a/source/blender/python/gpu/gpu_py_api.c b/source/blender/python/gpu/gpu_py_api.c index dcc8484319e..38e9b61e147 100644 --- a/source/blender/python/gpu/gpu_py_api.c +++ b/source/blender/python/gpu/gpu_py_api.c @@ -35,6 +35,7 @@ #include "gpu_py_matrix.h" #include "gpu_py_select.h" +#include "gpu_py_state.h" #include "gpu_py_types.h" #include "gpu_py_api.h" /* own include */ @@ -134,6 +135,9 @@ PyObject *BPyInit_gpu(void) PyModule_AddObject(mod, "shader", (submodule = bpygpu_shader_init())); PyDict_SetItem(sys_modules, PyModule_GetNameObject(submodule), submodule); + PyModule_AddObject(mod, "state", (submodule = bpygpu_state_init())); + PyDict_SetItem(sys_modules, PyModule_GetNameObject(submodule), submodule); + return mod; } diff --git a/source/blender/python/gpu/gpu_py_api.h b/source/blender/python/gpu/gpu_py_api.h index b8f0cde129f..fe645d8cd3a 100644 --- a/source/blender/python/gpu/gpu_py_api.h +++ b/source/blender/python/gpu/gpu_py_api.h @@ -20,6 +20,10 @@ #pragma once +/* Each type object could have a method for free GPU resources. + * However, it is currently of little use. */ +// #define BPYGPU_USE_GPUOBJ_FREE_METHOD + int bpygpu_ParsePrimType(PyObject *o, void *p); PyObject *BPyInit_gpu(void); diff --git a/source/blender/python/gpu/gpu_py_buffer.c b/source/blender/python/gpu/gpu_py_buffer.c new file mode 100644 index 00000000000..aef819aec39 --- /dev/null +++ b/source/blender/python/gpu/gpu_py_buffer.c @@ -0,0 +1,669 @@ +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/** \file + * \ingroup bpygpu + * + * This file defines the gpu.state API. + * + * - Use ``bpygpu_`` for local API. + * - Use ``BPyGPU`` for public API. + */ + +#include + +#include "BLI_utildefines.h" + +#include "MEM_guardedalloc.h" + +#include "GPU_texture.h" + +#include "../generic/py_capi_utils.h" + +#include "gpu_py.h" + +#include "gpu_py_buffer.h" + +// #define PYGPU_BUFFER_PROTOCOL + +/* -------------------------------------------------------------------- */ +/** \name Utility Functions + * \{ */ + +static bool pygpu_buffer_dimensions_compare(int ndim, + const Py_ssize_t *shape_a, + const Py_ssize_t *shape_b) +{ + return (bool)memcmp(shape_a, shape_b, ndim * sizeof(Py_ssize_t)); +} + +static const char *pygpu_buffer_formatstr(eGPUDataFormat data_format) +{ + switch (data_format) { + case GPU_DATA_FLOAT: + return "f"; + case GPU_DATA_INT: + return "i"; + case GPU_DATA_UNSIGNED_INT: + return "I"; + case GPU_DATA_UNSIGNED_BYTE: + return "B"; + case GPU_DATA_UNSIGNED_INT_24_8: + case GPU_DATA_10_11_11_REV: + return "I"; + default: + break; + } + return NULL; +} + +/** \} */ + +/* -------------------------------------------------------------------- */ +/** \name BPyGPUBuffer API + * \{ */ + +static BPyGPUBuffer *pygpu_buffer_make_from_data(PyObject *parent, + const eGPUDataFormat format, + const int shape_len, + const Py_ssize_t *shape, + void *buf) +{ + BPyGPUBuffer *buffer = (BPyGPUBuffer *)_PyObject_GC_New(&BPyGPU_BufferType); + + buffer->parent = NULL; + buffer->format = format; + buffer->shape_len = shape_len; + buffer->shape = MEM_mallocN(shape_len * sizeof(*buffer->shape), "BPyGPUBuffer shape"); + memcpy(buffer->shape, shape, shape_len * sizeof(*buffer->shape)); + buffer->buf.as_void = buf; + + if (parent) { + Py_INCREF(parent); + buffer->parent = parent; + PyObject_GC_Track(buffer); + } + return buffer; +} + +static PyObject *pygpu_buffer__sq_item(BPyGPUBuffer *self, int i) +{ + if (i >= self->shape[0] || i < 0) { + PyErr_SetString(PyExc_IndexError, "array index out of range"); + return NULL; + } + + const char *formatstr = pygpu_buffer_formatstr(self->format); + + if (self->shape_len == 1) { + switch (self->format) { + case GPU_DATA_FLOAT: + return Py_BuildValue(formatstr, self->buf.as_float[i]); + case GPU_DATA_INT: + return Py_BuildValue(formatstr, self->buf.as_int[i]); + case GPU_DATA_UNSIGNED_BYTE: + return Py_BuildValue(formatstr, self->buf.as_byte[i]); + case GPU_DATA_UNSIGNED_INT: + case GPU_DATA_UNSIGNED_INT_24_8: + case GPU_DATA_10_11_11_REV: + return Py_BuildValue(formatstr, self->buf.as_uint[i]); + } + } + else { + int offset = i * GPU_texture_dataformat_size(self->format); + for (int j = 1; j < self->shape_len; j++) { + offset *= self->shape[j]; + } + + return (PyObject *)pygpu_buffer_make_from_data((PyObject *)self, + self->format, + self->shape_len - 1, + self->shape + 1, + self->buf.as_byte + offset); + } + + return NULL; +} + +static PyObject *pygpu_buffer_to_list(BPyGPUBuffer *self) +{ + int i, len = self->shape[0]; + PyObject *list = PyList_New(len); + + for (i = 0; i < len; i++) { + PyList_SET_ITEM(list, i, pygpu_buffer__sq_item(self, i)); + } + + return list; +} + +static PyObject *pygpu_buffer_to_list_recursive(BPyGPUBuffer *self) +{ + PyObject *list; + + if (self->shape_len > 1) { + int i, len = self->shape[0]; + list = PyList_New(len); + + for (i = 0; i < len; i++) { + /* "BPyGPUBuffer *sub_tmp" is a temporary object created just to be read for nested lists. + * That is why it is decremented/freed soon after. + * TODO: For efficiency, avoid creating #BPyGPUBuffer when creating nested lists. */ + BPyGPUBuffer *sub_tmp = (BPyGPUBuffer *)pygpu_buffer__sq_item(self, i); + PyList_SET_ITEM(list, i, pygpu_buffer_to_list_recursive(sub_tmp)); + Py_DECREF(sub_tmp); + } + } + else { + list = pygpu_buffer_to_list(self); + } + + return list; +} + +static PyObject *pygpu_buffer_dimensions(BPyGPUBuffer *self, void *UNUSED(arg)) +{ + PyObject *list = PyList_New(self->shape_len); + int i; + + for (i = 0; i < self->shape_len; i++) { + PyList_SET_ITEM(list, i, PyLong_FromLong(self->shape[i])); + } + + return list; +} + +static int pygpu_buffer__tp_traverse(BPyGPUBuffer *self, visitproc visit, void *arg) +{ + Py_VISIT(self->parent); + return 0; +} + +static int pygpu_buffer__tp_clear(BPyGPUBuffer *self) +{ + Py_CLEAR(self->parent); + return 0; +} + +static void pygpu_buffer__tp_dealloc(BPyGPUBuffer *self) +{ + if (self->parent) { + PyObject_GC_UnTrack(self); + pygpu_buffer__tp_clear(self); + Py_XDECREF(self->parent); + } + else { + MEM_freeN(self->buf.as_void); + } + + MEM_freeN(self->shape); + + PyObject_GC_Del(self); +} + +static PyObject *pygpu_buffer__tp_repr(BPyGPUBuffer *self) +{ + PyObject *repr; + + PyObject *list = pygpu_buffer_to_list_recursive(self); + const char *typestr = PyC_StringEnum_FindIDFromValue(bpygpu_dataformat_items, self->format); + + repr = PyUnicode_FromFormat("Buffer(%s, %R)", typestr, list); + Py_DECREF(list); + + return repr; +} + +static int pygpu_buffer__sq_ass_item(BPyGPUBuffer *self, int i, PyObject *v); + +static int pygpu_buffer_ass_slice(BPyGPUBuffer *self, + Py_ssize_t begin, + Py_ssize_t end, + PyObject *seq) +{ + PyObject *item; + int count, err = 0; + + if (begin < 0) { + begin = 0; + } + if (end > self->shape[0]) { + end = self->shape[0]; + } + if (begin > end) { + begin = end; + } + + if (!PySequence_Check(seq)) { + PyErr_Format(PyExc_TypeError, + "buffer[:] = value, invalid assignment. " + "Expected a sequence, not an %.200s type", + Py_TYPE(seq)->tp_name); + return -1; + } + + /* re-use count var */ + if ((count = PySequence_Size(seq)) != (end - begin)) { + PyErr_Format(PyExc_TypeError, + "buffer[:] = value, size mismatch in assignment. " + "Expected: %d (given: %d)", + count, + end - begin); + return -1; + } + + for (count = begin; count < end; count++) { + item = PySequence_GetItem(seq, count - begin); + if (item) { + err = pygpu_buffer__sq_ass_item(self, count, item); + Py_DECREF(item); + } + else { + err = -1; + } + if (err) { + break; + } + } + return err; +} + +#define MAX_DIMENSIONS 64 +static PyObject *pygpu_buffer__tp_new(PyTypeObject *UNUSED(type), PyObject *args, PyObject *kwds) +{ + PyObject *length_ob, *init = NULL; + BPyGPUBuffer *buffer = NULL; + Py_ssize_t shape[MAX_DIMENSIONS]; + + Py_ssize_t i, shape_len = 0; + + if (kwds && PyDict_Size(kwds)) { + PyErr_SetString(PyExc_TypeError, "Buffer(): takes no keyword args"); + return NULL; + } + + const struct PyC_StringEnum pygpu_dataformat = {bpygpu_dataformat_items, GPU_DATA_FLOAT}; + if (!PyArg_ParseTuple( + args, "O&O|O: Buffer", PyC_ParseStringEnum, &pygpu_dataformat, &length_ob, &init)) { + return NULL; + } + + if (PyLong_Check(length_ob)) { + shape_len = 1; + if (((shape[0] = PyLong_AsLong(length_ob)) < 1)) { + PyErr_SetString(PyExc_AttributeError, "dimension must be greater than or equal to 1"); + return NULL; + } + } + else if (PySequence_Check(length_ob)) { + shape_len = PySequence_Size(length_ob); + if (shape_len > MAX_DIMENSIONS) { + PyErr_SetString(PyExc_AttributeError, + "too many dimensions, max is " STRINGIFY(MAX_DIMENSIONS)); + return NULL; + } + if (shape_len < 1) { + PyErr_SetString(PyExc_AttributeError, "sequence must have at least one dimension"); + return NULL; + } + + for (i = 0; i < shape_len; i++) { + PyObject *ob = PySequence_GetItem(length_ob, i); + if (!PyLong_Check(ob)) { + PyErr_Format(PyExc_TypeError, + "invalid dimension %i, expected an int, not a %.200s", + i, + Py_TYPE(ob)->tp_name); + Py_DECREF(ob); + return NULL; + } + else { + shape[i] = PyLong_AsLong(ob); + } + Py_DECREF(ob); + + if (shape[i] < 1) { + PyErr_SetString(PyExc_AttributeError, "dimension must be greater than or equal to 1"); + return NULL; + } + } + } + else { + PyErr_Format(PyExc_TypeError, + "invalid second argument argument expected a sequence " + "or an int, not a %.200s", + Py_TYPE(length_ob)->tp_name); + return NULL; + } + + if (init && PyObject_CheckBuffer(init)) { + Py_buffer pybuffer; + + if (PyObject_GetBuffer(init, &pybuffer, PyBUF_ND | PyBUF_FORMAT) == -1) { + /* PyObject_GetBuffer raise a PyExc_BufferError */ + return NULL; + } + + if (shape_len != pybuffer.ndim || + !pygpu_buffer_dimensions_compare(shape_len, shape, pybuffer.shape)) { + PyErr_Format(PyExc_TypeError, "array size does not match"); + } + else { + buffer = pygpu_buffer_make_from_data( + init, pygpu_dataformat.value_found, pybuffer.ndim, shape, pybuffer.buf); + } + + PyBuffer_Release(&pybuffer); + } + else { + buffer = BPyGPU_Buffer_CreatePyObject(pygpu_dataformat.value_found, shape_len, shape, NULL); + if (init && pygpu_buffer_ass_slice(buffer, 0, shape[0], init)) { + Py_DECREF(buffer); + return NULL; + } + } + + return (PyObject *)buffer; +} + +/* BPyGPUBuffer sequence methods */ + +static int pygpu_buffer__sq_length(BPyGPUBuffer *self) +{ + return self->shape[0]; +} + +static PyObject *pygpu_buffer_slice(BPyGPUBuffer *self, Py_ssize_t begin, Py_ssize_t end) +{ + PyObject *list; + Py_ssize_t count; + + if (begin < 0) { + begin = 0; + } + if (end > self->shape[0]) { + end = self->shape[0]; + } + if (begin > end) { + begin = end; + } + + list = PyList_New(end - begin); + + for (count = begin; count < end; count++) { + PyList_SET_ITEM(list, count - begin, pygpu_buffer__sq_item(self, count)); + } + return list; +} + +static int pygpu_buffer__sq_ass_item(BPyGPUBuffer *self, int i, PyObject *v) +{ + if (i >= self->shape[0] || i < 0) { + PyErr_SetString(PyExc_IndexError, "array assignment index out of range"); + return -1; + } + + if (self->shape_len != 1) { + BPyGPUBuffer *row = (BPyGPUBuffer *)pygpu_buffer__sq_item(self, i); + + if (row) { + const int ret = pygpu_buffer_ass_slice(row, 0, self->shape[1], v); + Py_DECREF(row); + return ret; + } + + return -1; + } + + switch (self->format) { + case GPU_DATA_FLOAT: + return PyArg_Parse(v, "f:Expected floats", &self->buf.as_float[i]) ? 0 : -1; + case GPU_DATA_INT: + return PyArg_Parse(v, "i:Expected ints", &self->buf.as_int[i]) ? 0 : -1; + case GPU_DATA_UNSIGNED_BYTE: + return PyArg_Parse(v, "b:Expected ints", &self->buf.as_byte[i]) ? 0 : -1; + case GPU_DATA_UNSIGNED_INT: + case GPU_DATA_UNSIGNED_INT_24_8: + case GPU_DATA_10_11_11_REV: + return PyArg_Parse(v, "b:Expected ints", &self->buf.as_uint[i]) ? 0 : -1; + default: + return 0; /* should never happen */ + } +} + +static PyObject *pygpu_buffer__mp_subscript(BPyGPUBuffer *self, PyObject *item) +{ + if (PyIndex_Check(item)) { + Py_ssize_t i; + i = PyNumber_AsSsize_t(item, PyExc_IndexError); + if (i == -1 && PyErr_Occurred()) { + return NULL; + } + if (i < 0) { + i += self->shape[0]; + } + return pygpu_buffer__sq_item(self, i); + } + if (PySlice_Check(item)) { + Py_ssize_t start, stop, step, slicelength; + + if (PySlice_GetIndicesEx(item, self->shape[0], &start, &stop, &step, &slicelength) < 0) { + return NULL; + } + + if (slicelength <= 0) { + return PyTuple_New(0); + } + if (step == 1) { + return pygpu_buffer_slice(self, start, stop); + } + + PyErr_SetString(PyExc_IndexError, "slice steps not supported with vectors"); + return NULL; + } + + PyErr_Format( + PyExc_TypeError, "buffer indices must be integers, not %.200s", Py_TYPE(item)->tp_name); + return NULL; +} + +static int pygpu_buffer__mp_ass_subscript(BPyGPUBuffer *self, PyObject *item, PyObject *value) +{ + if (PyIndex_Check(item)) { + Py_ssize_t i = PyNumber_AsSsize_t(item, PyExc_IndexError); + if (i == -1 && PyErr_Occurred()) { + return -1; + } + if (i < 0) { + i += self->shape[0]; + } + return pygpu_buffer__sq_ass_item(self, i, value); + } + if (PySlice_Check(item)) { + Py_ssize_t start, stop, step, slicelength; + + if (PySlice_GetIndicesEx(item, self->shape[0], &start, &stop, &step, &slicelength) < 0) { + return -1; + } + + if (step == 1) { + return pygpu_buffer_ass_slice(self, start, stop, value); + } + + PyErr_SetString(PyExc_IndexError, "slice steps not supported with vectors"); + return -1; + } + + PyErr_Format( + PyExc_TypeError, "buffer indices must be integers, not %.200s", Py_TYPE(item)->tp_name); + return -1; +} + +static PyMethodDef pygpu_buffer__tp_methods[] = { + {"to_list", + (PyCFunction)pygpu_buffer_to_list_recursive, + METH_NOARGS, + "return the buffer as a list"}, + {NULL, NULL, 0, NULL}, +}; + +static PyGetSetDef pygpu_buffer_getseters[] = { + {"dimensions", (getter)pygpu_buffer_dimensions, NULL, NULL, NULL}, + {NULL, NULL, NULL, NULL, NULL}, +}; + +static PySequenceMethods pygpu_buffer__tp_as_sequence = { + (lenfunc)pygpu_buffer__sq_length, /*sq_length */ + (binaryfunc)NULL, /*sq_concat */ + (ssizeargfunc)NULL, /*sq_repeat */ + (ssizeargfunc)pygpu_buffer__sq_item, /*sq_item */ + (ssizessizeargfunc)NULL, /*sq_slice, deprecated, handled in pygpu_buffer__sq_item */ + (ssizeobjargproc)pygpu_buffer__sq_ass_item, /*sq_ass_item */ + (ssizessizeobjargproc)NULL, /* sq_ass_slice, deprecated handled in pygpu_buffer__sq_ass_item */ + (objobjproc)NULL, /* sq_contains */ + (binaryfunc)NULL, /* sq_inplace_concat */ + (ssizeargfunc)NULL, /* sq_inplace_repeat */ +}; + +static PyMappingMethods pygpu_buffer__tp_as_mapping = { + (lenfunc)pygpu_buffer__sq_length, + (binaryfunc)pygpu_buffer__mp_subscript, + (objobjargproc)pygpu_buffer__mp_ass_subscript, +}; + +#ifdef PYGPU_BUFFER_PROTOCOL +static void pygpu_buffer_strides_calc(const eGPUDataFormat format, + const int shape_len, + const Py_ssize_t *shape, + Py_ssize_t *r_strides) +{ + r_strides[0] = GPU_texture_dataformat_size(format); + for (int i = 1; i < shape_len; i++) { + r_strides[i] = r_strides[i - 1] * shape[i - 1]; + } +} + +/* Here is the buffer interface function */ +static int pygpu_buffer__bf_getbuffer(BPyGPUBuffer *self, Py_buffer *view, int flags) +{ + if (view == NULL) { + PyErr_SetString(PyExc_ValueError, "NULL view in getbuffer"); + return -1; + } + + view->obj = (PyObject *)self; + view->buf = (void *)self->buf.as_void; + view->len = bpygpu_Buffer_size(self); + view->readonly = 0; + view->itemsize = GPU_texture_dataformat_size(self->format); + view->format = pygpu_buffer_formatstr(self->format); + view->ndim = self->shape_len; + view->shape = self->shape; + view->strides = MEM_mallocN(view->ndim * sizeof(*view->strides), "BPyGPUBuffer strides"); + pygpu_buffer_strides_calc(self->format, view->ndim, view->shape, view->strides); + view->suboffsets = NULL; + view->internal = NULL; + + Py_INCREF(self); + return 0; +} + +static void pygpu_buffer__bf_releasebuffer(PyObject *UNUSED(exporter), Py_buffer *view) +{ + MEM_SAFE_FREE(view->strides); +} + +static PyBufferProcs pygpu_buffer__tp_as_buffer = { + (getbufferproc)pygpu_buffer__bf_getbuffer, + (releasebufferproc)pygpu_buffer__bf_releasebuffer, +}; +#endif + +PyDoc_STRVAR(pygpu_buffer__tp_doc, + ".. class:: Buffer(format, dimensions, data)\n" + "\n" + " For Python access to GPU functions requiring a pointer.\n" + "\n" + " :arg format: One of these primitive types: {\n" + " `FLOAT`,\n" + " `INT`,\n" + " `UINT`,\n" + " `UBYTE`,\n" + " `UINT_24_8`,\n" + " `10_11_11_REV`,\n" + " :type type: `str`\n" + " :arg dimensions: Array describing the dimensions.\n" + " :type dimensions: `int`\n" + " :arg data: Optional data array.\n" + " :type data: `array`\n"); +PyTypeObject BPyGPU_BufferType = { + PyVarObject_HEAD_INIT(NULL, 0).tp_name = "Buffer", + .tp_basicsize = sizeof(BPyGPUBuffer), + .tp_dealloc = (destructor)pygpu_buffer__tp_dealloc, + .tp_repr = (reprfunc)pygpu_buffer__tp_repr, + .tp_as_sequence = &pygpu_buffer__tp_as_sequence, + .tp_as_mapping = &pygpu_buffer__tp_as_mapping, +#ifdef PYGPU_BUFFER_PROTOCOL + .tp_as_buffer = &pygpu_buffer__tp_as_buffer, +#endif + .tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC, + .tp_doc = pygpu_buffer__tp_doc, + .tp_traverse = (traverseproc)pygpu_buffer__tp_traverse, + .tp_clear = (inquiry)pygpu_buffer__tp_clear, + .tp_methods = pygpu_buffer__tp_methods, + .tp_getset = pygpu_buffer_getseters, + .tp_new = pygpu_buffer__tp_new, +}; + +static size_t pygpu_buffer_calc_size(const int format, + const int shape_len, + const Py_ssize_t *shape) +{ + size_t r_size = GPU_texture_dataformat_size(format); + + for (int i = 0; i < shape_len; i++) { + r_size *= shape[i]; + } + + return r_size; +} + +size_t bpygpu_Buffer_size(BPyGPUBuffer *buffer) +{ + return pygpu_buffer_calc_size(buffer->format, buffer->shape_len, buffer->shape); +} + +/** + * Create a buffer object + * + * \param dimensions: An array of ndimensions integers representing the size of each dimension. + * \param initbuffer: When not NULL holds a contiguous buffer + * with the correct format from which the buffer will be initialized + */ +BPyGPUBuffer *BPyGPU_Buffer_CreatePyObject(const int format, + const int shape_len, + const Py_ssize_t *shape, + void *buffer) +{ + if (buffer == NULL) { + size_t size = pygpu_buffer_calc_size(format, shape_len, shape); + buffer = MEM_callocN(size, "BPyGPUBuffer buffer"); + } + + return pygpu_buffer_make_from_data(NULL, format, shape_len, shape, buffer); +} + +/** \} */ diff --git a/source/blender/python/gpu/gpu_py_buffer.h b/source/blender/python/gpu/gpu_py_buffer.h new file mode 100644 index 00000000000..003f1a52078 --- /dev/null +++ b/source/blender/python/gpu/gpu_py_buffer.h @@ -0,0 +1,53 @@ +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/** \file + * \ingroup bpygpu + */ + +#pragma once + +extern PyTypeObject BPyGPU_BufferType; + +#define BPyGPU_Buffer_Check(v) (Py_TYPE(v) == &BPyGPU_BufferType) + +/** + * Buffer Object + * + * For Python access to GPU functions requiring a pointer. + */ +typedef struct BPyGPUBuffer { + PyObject_VAR_HEAD PyObject *parent; + + int format; + int shape_len; + Py_ssize_t *shape; + + union { + char *as_byte; + int *as_int; + uint *as_uint; + float *as_float; + + void *as_void; + } buf; +} BPyGPUBuffer; + +size_t bpygpu_Buffer_size(BPyGPUBuffer *buffer); +BPyGPUBuffer *BPyGPU_Buffer_CreatePyObject(const int format, + const int shape_len, + const Py_ssize_t *shape, + void *buffer); diff --git a/source/blender/python/gpu/gpu_py_framebuffer.c b/source/blender/python/gpu/gpu_py_framebuffer.c new file mode 100644 index 00000000000..487f0f984cb --- /dev/null +++ b/source/blender/python/gpu/gpu_py_framebuffer.c @@ -0,0 +1,546 @@ +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/** \file + * \ingroup bpygpu + * + * This file defines the framebuffer functionalities of the 'gpu' module + * used for off-screen OpenGL rendering. + * + * - Use ``bpygpu_`` for local API. + * - Use ``BPyGPU`` for public API. + */ + +#include + +#include "GPU_context.h" +#include "GPU_framebuffer.h" +#include "GPU_init_exit.h" + +#include "../generic/py_capi_utils.h" +#include "../generic/python_utildefines.h" +#include "../mathutils/mathutils.h" + +#include "gpu_py_api.h" +#include "gpu_py_texture.h" + +#include "gpu_py_framebuffer.h" /* own include */ + +/* -------------------------------------------------------------------- */ +/** \name GPUFrameBuffer Common Utilities + * \{ */ + +static int pygpu_framebuffer_valid_check(BPyGPUFrameBuffer *bpygpu_fb) +{ + if (UNLIKELY(bpygpu_fb->fb == NULL)) { + PyErr_SetString(PyExc_ReferenceError, +#ifdef BPYGPU_USE_GPUOBJ_FREE_METHOD + "GPU framebuffer was freed, no further access is valid" +#else + "GPU framebuffer: internal error" +#endif + ); + return -1; + } + return 0; +} + +#define PYGPU_FRAMEBUFFER_CHECK_OBJ(bpygpu) \ + { \ + if (UNLIKELY(pygpu_framebuffer_valid_check(bpygpu) == -1)) { \ + return NULL; \ + } \ + } \ + ((void)0) + +static void pygpu_framebuffer_free_if_possible(GPUFrameBuffer *fb) +{ + if (!fb) { + return; + } + + if (GPU_is_init()) { + GPU_framebuffer_free(fb); + } + else { + printf("PyFramebuffer freed after the context has been destroyed.\n"); + } +} + +/* Keep less than or equal to #FRAMEBUFFER_STACK_DEPTH */ +#define GPU_PY_FRAMEBUFFER_STACK_LEN 16 + +static bool pygpu_framebuffer_stack_push_and_bind_or_error(GPUFrameBuffer *fb) +{ + if (GPU_framebuffer_stack_level_get() >= GPU_PY_FRAMEBUFFER_STACK_LEN) { + PyErr_SetString( + PyExc_RuntimeError, + "Maximum framebuffer stack depth " STRINGIFY(GPU_PY_FRAMEBUFFER_STACK_LEN) " reached"); + return false; + } + GPU_framebuffer_push(GPU_framebuffer_active_get()); + GPU_framebuffer_bind(fb); + return true; +} + +static bool pygpu_framebuffer_stack_pop_and_restore_or_error(GPUFrameBuffer *fb) +{ + if (GPU_framebuffer_stack_level_get() == 0) { + PyErr_SetString(PyExc_RuntimeError, "Minimum framebuffer stack depth reached"); + return false; + } + + if (fb && !GPU_framebuffer_bound(fb)) { + PyErr_SetString(PyExc_RuntimeError, "Framebuffer is not bound"); + return false; + } + + GPUFrameBuffer *fb_prev = GPU_framebuffer_pop(); + GPU_framebuffer_bind(fb_prev); + return true; +} + +/** \} */ + +/* -------------------------------------------------------------------- */ +/** \name Stack (Context Manager) + * + * Safer alternative to ensure balanced push/pop calls. + * + * \{ */ + +typedef struct { + PyObject_HEAD /* required python macro */ + BPyGPUFrameBuffer *py_fb; + int level; +} PyFrameBufferStackContext; + +static void pygpu_framebuffer_stack_context__tp_dealloc(PyFrameBufferStackContext *self) +{ + Py_DECREF(self->py_fb); + PyObject_DEL(self); +} + +static PyObject *pygpu_framebuffer_stack_context_enter(PyFrameBufferStackContext *self) +{ + PYGPU_FRAMEBUFFER_CHECK_OBJ(self->py_fb); + + /* sanity - should never happen */ + if (self->level != -1) { + PyErr_SetString(PyExc_RuntimeError, "Already in use"); + return NULL; + } + + if (!pygpu_framebuffer_stack_push_and_bind_or_error(self->py_fb->fb)) { + return NULL; + } + + self->level = GPU_framebuffer_stack_level_get(); + Py_RETURN_NONE; +} + +static PyObject *pygpu_framebuffer_stack_context_exit(PyFrameBufferStackContext *self, + PyObject *UNUSED(args)) +{ + PYGPU_FRAMEBUFFER_CHECK_OBJ(self->py_fb); + + /* sanity - should never happen */ + if (self->level == -1) { + fprintf(stderr, "Not yet in use\n"); + return NULL; + } + + const int level = GPU_framebuffer_stack_level_get(); + if (level != self->level) { + fprintf(stderr, "Level of bind mismatch, expected %d, got %d\n", self->level, level); + } + + if (!pygpu_framebuffer_stack_pop_and_restore_or_error(self->py_fb->fb)) { + return NULL; + } + Py_RETURN_NONE; +} + +static PyMethodDef pygpu_framebuffer_stack_context__tp_methods[] = { + {"__enter__", (PyCFunction)pygpu_framebuffer_stack_context_enter, METH_NOARGS}, + {"__exit__", (PyCFunction)pygpu_framebuffer_stack_context_exit, METH_VARARGS}, + {NULL}, +}; + +static PyTypeObject FramebufferStackContext_Type = { + PyVarObject_HEAD_INIT(NULL, 0).tp_name = "GPUFrameBufferStackContext", + .tp_basicsize = sizeof(PyFrameBufferStackContext), + .tp_dealloc = (destructor)pygpu_framebuffer_stack_context__tp_dealloc, + .tp_flags = Py_TPFLAGS_DEFAULT, + .tp_methods = pygpu_framebuffer_stack_context__tp_methods, +}; + +PyDoc_STRVAR(pygpu_framebuffer_bind_doc, + ".. function:: bind()\n" + "\n" + " Context manager to ensure balanced bind calls, even in the case of an error.\n"); +static PyObject *pygpu_framebuffer_bind(BPyGPUFrameBuffer *self) +{ + PyFrameBufferStackContext *ret = PyObject_New(PyFrameBufferStackContext, + &FramebufferStackContext_Type); + ret->py_fb = self; + ret->level = -1; + Py_INCREF(self); + return (PyObject *)ret; +} + +/** \} */ + +/* -------------------------------------------------------------------- */ +/** \name GPUFramebuffer Type + * \{ */ + +/* Fill in the GPUAttachment according to the PyObject parameter. + * PyObject *o can be NULL, Py_None, BPyGPUTexture or a dictionary containing the keyword "texture" + * and the optional keywords "layer" and "mip". + * Returns false on error. In this case, a python message will be raised and GPUAttachment will not + * be touched. */ +static bool pygpu_framebuffer_new_parse_arg(PyObject *o, GPUAttachment *r_attach) +{ + GPUAttachment tmp_attach = GPU_ATTACHMENT_NONE; + + if (!o || o == Py_None) { + /* Pass. */; + } + else if (BPyGPUTexture_Check(o)) { + if (!bpygpu_ParseTexture(o, &tmp_attach.tex)) { + return false; + } + } + else { + const char *c_texture = "texture"; + const char *c_layer = "layer"; + const char *c_mip = "mip"; + PyObject *key, *value; + Py_ssize_t pos = 0; + while (PyDict_Next(o, &pos, &key, &value)) { + if (!PyUnicode_Check(key)) { + PyErr_SetString(PyExc_TypeError, "keywords must be strings"); + return false; + } + + if (c_texture && _PyUnicode_EqualToASCIIString(key, c_texture)) { + /* Compare only once. */ + c_texture = NULL; + if (!bpygpu_ParseTexture(value, &tmp_attach.tex)) { + return false; + } + } + else if (c_layer && _PyUnicode_EqualToASCIIString(key, c_layer)) { + /* Compare only once. */ + c_layer = NULL; + tmp_attach.layer = PyLong_AsLong(value); + if (tmp_attach.layer == -1 && PyErr_Occurred()) { + return false; + } + } + else if (c_mip && _PyUnicode_EqualToASCIIString(key, c_mip)) { + /* Compare only once. */ + c_mip = NULL; + tmp_attach.mip = PyLong_AsLong(value); + if (tmp_attach.mip == -1 && PyErr_Occurred()) { + return false; + } + } + else { + PyErr_Format( + PyExc_TypeError, "'%U' is an invalid keyword argument for this attribute", key); + return false; + } + } + } + + *r_attach = tmp_attach; + return true; +} + +static PyObject *pygpu_framebuffer__tp_new(PyTypeObject *UNUSED(self), + PyObject *args, + PyObject *kwds) +{ + BPYGPU_IS_INIT_OR_ERROR_OBJ; + if (!GPU_context_active_get()) { + PyErr_SetString(PyExc_RuntimeError, "No active GPU context found"); + return NULL; + } + + PyObject *depth_attachment = NULL; + PyObject *color_attachements = NULL; + static const char *_keywords[] = {"depth_slot", "color_slots", NULL}; + static _PyArg_Parser _parser = {"|$OO:GPUFrameBuffer.__new__", _keywords, 0}; + if (!_PyArg_ParseTupleAndKeywordsFast( + args, kwds, &_parser, &depth_attachment, &color_attachements)) { + return NULL; + } + + /* Keep in sync with #GPU_FB_MAX_COLOR_ATTACHMENT. + * TODO: share the define. */ +#define BPYGPU_FB_MAX_COLOR_ATTACHMENT 6 + + GPUAttachment config[BPYGPU_FB_MAX_COLOR_ATTACHMENT + 1]; + + if (!pygpu_framebuffer_new_parse_arg(depth_attachment, &config[0])) { + return NULL; + } + else if (config[0].tex && !GPU_texture_depth(config[0].tex)) { + PyErr_SetString(PyExc_ValueError, "Depth texture with incompatible format"); + return NULL; + } + + int color_attachements_len = 0; + if (color_attachements && color_attachements != Py_None) { + if (PySequence_Check(color_attachements)) { + color_attachements_len = PySequence_Size(color_attachements); + if (color_attachements_len > BPYGPU_FB_MAX_COLOR_ATTACHMENT) { + PyErr_SetString( + PyExc_AttributeError, + "too many attachements, max is " STRINGIFY(BPYGPU_FB_MAX_COLOR_ATTACHMENT)); + return NULL; + } + + for (int i = 1; i <= color_attachements_len; i++) { + PyObject *o = PySequence_GetItem(color_attachements, i); + bool ok = pygpu_framebuffer_new_parse_arg(o, &config[i]); + Py_DECREF(o); + if (!ok) { + return NULL; + } + } + } + else { + if (!pygpu_framebuffer_new_parse_arg(color_attachements, &config[1])) { + return NULL; + } + color_attachements_len = 1; + } + } + + GPUFrameBuffer *fb_python = GPU_framebuffer_create("fb_python"); + GPU_framebuffer_config_array(fb_python, config, color_attachements_len + 1); + + return BPyGPUFrameBuffer_CreatePyObject(fb_python); +} + +PyDoc_STRVAR(pygpu_framebuffer_is_bound_doc, + "Checks if this is the active framebuffer in the context."); +static PyObject *pygpu_framebuffer_is_bound(BPyGPUFrameBuffer *self, void *UNUSED(type)) +{ + PYGPU_FRAMEBUFFER_CHECK_OBJ(self); + return PyBool_FromLong(GPU_framebuffer_bound(self->fb)); +} + +PyDoc_STRVAR(pygpu_framebuffer_clear_doc, + ".. method:: clear(color=None, depth=None, stencil=None)\n" + "\n" + " Fill color, depth and stencil textures with specific value.\n" + " Common values: color=(0.0, 0.0, 0.0, 1.0), depth=1.0, stencil=0.\n" + "\n" + " :arg color: float sequence each representing ``(r, g, b, a)``.\n" + " :type color: sequence of 3 or 4 floats\n" + " :arg depth: depth value.\n" + " :type depth: `float`\n" + " :arg stencil: stencil value.\n" + " :type stencil: `int`\n"); +static PyObject *pygpu_framebuffer_clear(BPyGPUFrameBuffer *self, PyObject *args, PyObject *kwds) +{ + PYGPU_FRAMEBUFFER_CHECK_OBJ(self); + + if (!GPU_framebuffer_bound(self->fb)) { + return NULL; + } + + PyObject *py_col = NULL; + PyObject *py_depth = NULL; + PyObject *py_stencil = NULL; + + static const char *_keywords[] = {"color", "depth", "stencil", NULL}; + static _PyArg_Parser _parser = {"|$OOO:clear", _keywords, 0}; + if (!_PyArg_ParseTupleAndKeywordsFast(args, kwds, &_parser, &py_col, &py_depth, &py_stencil)) { + return NULL; + } + + eGPUFrameBufferBits buffers = 0; + float col[4] = {0.0f, 0.0f, 0.0f, 1.0f}; + float depth = 1.0f; + uint stencil = 0; + + if (py_col && py_col != Py_None) { + if (mathutils_array_parse(col, 3, 4, py_col, "GPUFrameBuffer.clear(), invalid 'color' arg") == + -1) { + return NULL; + } + buffers |= GPU_COLOR_BIT; + } + + if (py_depth && py_depth != Py_None) { + depth = PyFloat_AsDouble(py_depth); + if (PyErr_Occurred()) { + return NULL; + } + buffers |= GPU_DEPTH_BIT; + } + + if (py_stencil && py_stencil != Py_None) { + if ((stencil = PyC_Long_AsU32(py_stencil)) == (uint)-1) { + return NULL; + } + buffers |= GPU_STENCIL_BIT; + } + + GPU_framebuffer_clear(self->fb, buffers, col, depth, stencil); + Py_RETURN_NONE; +} + +PyDoc_STRVAR(pygpu_framebuffer_viewport_set_doc, + ".. function:: viewport_set(x, y, xsize, ysize)\n" + "\n" + " Set the viewport for this framebuffer object.\n" + " Note: The viewport state is not saved upon framebuffer rebind.\n" + "\n" + " :param x, y: lower left corner of the viewport_set rectangle, in pixels.\n" + " :param xsize, ysize: width and height of the viewport_set.\n" + " :type x, y, xsize, ysize: `int`\n"); +static PyObject *pygpu_framebuffer_viewport_set(BPyGPUFrameBuffer *self, + PyObject *args, + void *UNUSED(type)) +{ + int x, y, xsize, ysize; + if (!PyArg_ParseTuple(args, "iiii:viewport_set", &x, &y, &xsize, &ysize)) { + return NULL; + } + + GPU_framebuffer_viewport_set(self->fb, x, y, xsize, ysize); + Py_RETURN_NONE; +} + +PyDoc_STRVAR(pygpu_framebuffer_viewport_get_doc, + ".. function:: viewport_get()\n" + "\n" + " Returns position and dimension to current viewport.\n"); +static PyObject *pygpu_framebuffer_viewport_get(BPyGPUFrameBuffer *self, void *UNUSED(type)) +{ + PYGPU_FRAMEBUFFER_CHECK_OBJ(self); + int viewport[4]; + GPU_framebuffer_viewport_get(self->fb, viewport); + + PyObject *ret = PyTuple_New(4); + PyTuple_SET_ITEMS(ret, + PyLong_FromLong(viewport[0]), + PyLong_FromLong(viewport[1]), + PyLong_FromLong(viewport[2]), + PyLong_FromLong(viewport[3])); + return ret; +} + +#ifdef BPYGPU_USE_GPUOBJ_FREE_METHOD +PyDoc_STRVAR(pygpu_framebuffer_free_doc, + ".. method:: free()\n" + "\n" + " Free the framebuffer object.\n" + " The framebuffer will no longer be accessible.\n"); +static PyObject *pygpu_framebuffer_free(BPyGPUFrameBuffer *self) +{ + PYGPU_FRAMEBUFFER_CHECK_OBJ(self); + pygpu_framebuffer_free_if_possible(self->fb); + self->fb = NULL; + Py_RETURN_NONE; +} +#endif + +static void BPyGPUFrameBuffer__tp_dealloc(BPyGPUFrameBuffer *self) +{ + pygpu_framebuffer_free_if_possible(self->fb); + Py_TYPE(self)->tp_free((PyObject *)self); +} + +static PyGetSetDef pygpu_framebuffer__tp_getseters[] = { + {"is_bound", + (getter)pygpu_framebuffer_is_bound, + (setter)NULL, + pygpu_framebuffer_is_bound_doc, + NULL}, + {NULL, NULL, NULL, NULL, NULL} /* Sentinel */ +}; + +static struct PyMethodDef pygpu_framebuffer__tp_methods[] = { + {"bind", (PyCFunction)pygpu_framebuffer_bind, METH_NOARGS, pygpu_framebuffer_bind_doc}, + {"clear", + (PyCFunction)pygpu_framebuffer_clear, + METH_VARARGS | METH_KEYWORDS, + pygpu_framebuffer_clear_doc}, + {"viewport_set", + (PyCFunction)pygpu_framebuffer_viewport_set, + METH_NOARGS, + pygpu_framebuffer_viewport_set_doc}, + {"viewport_get", + (PyCFunction)pygpu_framebuffer_viewport_get, + METH_VARARGS, + pygpu_framebuffer_viewport_get_doc}, +#ifdef BPYGPU_USE_GPUOBJ_FREE_METHOD + {"free", (PyCFunction)pygpu_framebuffer_free, METH_NOARGS, pygpu_framebuffer_free_doc}, +#endif + {NULL, NULL, 0, NULL}, +}; + +PyDoc_STRVAR(pygpu_framebuffer__tp_doc, + ".. class:: GPUFrameBuffer(depth_slot=None, color_slots=None)\n" + "\n" + " This object gives access to framebuffer functionallities.\n" + " When a 'layer' is specified in a argument, a single layer of a 3D or array " + "texture is attached to the frame-buffer.\n" + " For cube map textures, layer is translated into a cube map face.\n" + "\n" + " :arg depth_slot: GPUTexture to attach or a `dict` containing keywords: " + "'texture', 'layer' and 'mip'.\n" + " :type depth_slot: :class:`gpu.types.GPUTexture`, `dict` or `Nonetype`\n" + " :arg color_slots: Tuple where each item can be a GPUTexture or a `dict` " + "containing keywords: 'texture', 'layer' and 'mip'.\n" + " :type color_slots: `tuple` or `Nonetype`\n"); +PyTypeObject BPyGPUFrameBuffer_Type = { + PyVarObject_HEAD_INIT(NULL, 0).tp_name = "GPUFrameBuffer", + .tp_basicsize = sizeof(BPyGPUFrameBuffer), + .tp_dealloc = (destructor)BPyGPUFrameBuffer__tp_dealloc, + .tp_flags = Py_TPFLAGS_DEFAULT, + .tp_doc = pygpu_framebuffer__tp_doc, + .tp_methods = pygpu_framebuffer__tp_methods, + .tp_getset = pygpu_framebuffer__tp_getseters, + .tp_new = pygpu_framebuffer__tp_new, +}; + +/** \} */ + +/* -------------------------------------------------------------------- */ +/** \name Public API + * \{ */ + +PyObject *BPyGPUFrameBuffer_CreatePyObject(GPUFrameBuffer *fb) +{ + BPyGPUFrameBuffer *self; + + self = PyObject_New(BPyGPUFrameBuffer, &BPyGPUFrameBuffer_Type); + self->fb = fb; + + return (PyObject *)self; +} + +/** \} */ + +#undef PYGPU_FRAMEBUFFER_CHECK_OBJ diff --git a/source/blender/python/gpu/gpu_py_framebuffer.h b/source/blender/python/gpu/gpu_py_framebuffer.h new file mode 100644 index 00000000000..7113e7c35aa --- /dev/null +++ b/source/blender/python/gpu/gpu_py_framebuffer.h @@ -0,0 +1,33 @@ +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/** \file + * \ingroup bpygpu + */ + +#pragma once + +#include "BLI_compiler_attrs.h" + +extern PyTypeObject BPyGPUFrameBuffer_Type; + +#define BPyGPUFrameBuffer_Check(v) (Py_TYPE(v) == &BPyGPUFrameBuffer_Type) + +typedef struct BPyGPUFrameBuffer { + PyObject_HEAD struct GPUFrameBuffer *fb; +} BPyGPUFrameBuffer; + +PyObject *BPyGPUFrameBuffer_CreatePyObject(struct GPUFrameBuffer *fb) ATTR_NONNULL(1); diff --git a/source/blender/python/gpu/gpu_py_offscreen.c b/source/blender/python/gpu/gpu_py_offscreen.c index 2431a1fca5b..a98d9649f6f 100644 --- a/source/blender/python/gpu/gpu_py_offscreen.c +++ b/source/blender/python/gpu/gpu_py_offscreen.c @@ -30,6 +30,7 @@ #include "MEM_guardedalloc.h" +#include "BLI_string.h" #include "BLI_utildefines.h" #include "BKE_global.h" @@ -54,14 +55,23 @@ #include "gpu_py_api.h" #include "gpu_py_offscreen.h" /* own include */ +/* Define the free method to avoid breakage. */ +#define BPYGPU_USE_GPUOBJ_FREE_METHOD + /* -------------------------------------------------------------------- */ /** \name GPUOffScreen Common Utilities * \{ */ -static int pygpu_offscreen_valid_check(BPyGPUOffScreen *pygpu_ofs) +static int pygpu_offscreen_valid_check(BPyGPUOffScreen *py_ofs) { - if (UNLIKELY(pygpu_ofs->ofs == NULL)) { - PyErr_SetString(PyExc_ReferenceError, "GPU offscreen was freed, no further access is valid"); + if (UNLIKELY(py_ofs->ofs == NULL)) { + PyErr_SetString(PyExc_ReferenceError, +#ifdef BPYGPU_USE_GPUOBJ_FREE_METHOD + "GPU offscreen was freed, no further access is valid" +#else + "GPU offscreen: internal error" +#endif + ); return -1; } return 0; @@ -77,11 +87,130 @@ static int pygpu_offscreen_valid_check(BPyGPUOffScreen *pygpu_ofs) /** \} */ +/* -------------------------------------------------------------------- */ +/** \name Stack (Context Manager) + * + * Safer alternative to ensure balanced push/pop calls. + * + * \{ */ + +typedef struct { + PyObject_HEAD /* required python macro */ + BPyGPUOffScreen *py_offs; + int level; + bool is_explicitly_bound; /* Bound by "bind" method. */ +} OffScreenStackContext; + +static void pygpu_offscreen_stack_context__tp_dealloc(OffScreenStackContext *self) +{ + Py_DECREF(self->py_offs); + PyObject_DEL(self); +} + +static PyObject *pygpu_offscreen_stack_context_enter(OffScreenStackContext *self) +{ + BPY_GPU_OFFSCREEN_CHECK_OBJ(self->py_offs); + + if (!self->is_explicitly_bound) { + if (self->level != -1) { + PyErr_SetString(PyExc_RuntimeError, "Already in use"); + return NULL; + } + + GPU_offscreen_bind(self->py_offs->ofs, true); + self->level = GPU_framebuffer_stack_level_get(); + } + + Py_RETURN_NONE; +} + +static PyObject *pygpu_offscreen_stack_context_exit(OffScreenStackContext *self, + PyObject *UNUSED(args)) +{ + BPY_GPU_OFFSCREEN_CHECK_OBJ(self->py_offs); + + if (self->level == -1) { + PyErr_SetString(PyExc_RuntimeError, "Not yet in use\n"); + return NULL; + } + + const int level = GPU_framebuffer_stack_level_get(); + if (level != self->level) { + PyErr_Format( + PyExc_RuntimeError, "Level of bind mismatch, expected %d, got %d\n", self->level, level); + } + + GPU_offscreen_unbind(self->py_offs->ofs, true); + Py_RETURN_NONE; +} + +static PyMethodDef pygpu_offscreen_stack_context__tp_methods[] = { + {"__enter__", (PyCFunction)pygpu_offscreen_stack_context_enter, METH_NOARGS}, + {"__exit__", (PyCFunction)pygpu_offscreen_stack_context_exit, METH_VARARGS}, + {NULL}, +}; + +static PyTypeObject PyGPUOffscreenStackContext_Type = { + PyVarObject_HEAD_INIT(NULL, 0).tp_name = "GPUFrameBufferStackContext", + .tp_basicsize = sizeof(OffScreenStackContext), + .tp_dealloc = (destructor)pygpu_offscreen_stack_context__tp_dealloc, + .tp_flags = Py_TPFLAGS_DEFAULT, + .tp_methods = pygpu_offscreen_stack_context__tp_methods, +}; + +PyDoc_STRVAR(pygpu_offscreen_bind_doc, + ".. function:: bind()\n" + "\n" + " Context manager to ensure balanced bind calls, even in the case of an error.\n"); +static PyObject *pygpu_offscreen_bind(BPyGPUOffScreen *self) +{ + OffScreenStackContext *ret = PyObject_New(OffScreenStackContext, + &PyGPUOffscreenStackContext_Type); + ret->py_offs = self; + ret->level = -1; + ret->is_explicitly_bound = false; + Py_INCREF(self); + + pygpu_offscreen_stack_context_enter(ret); + ret->is_explicitly_bound = true; + + return (PyObject *)ret; +} + +PyDoc_STRVAR(pygpu_offscreen_unbind_doc, + ".. method:: unbind(restore=True)\n" + "\n" + " Unbind the offscreen object.\n" + "\n" + " :arg restore: Restore the OpenGL state, can only be used when the state has been " + "saved before.\n" + " :type restore: `bool`\n"); +static PyObject *pygpu_offscreen_unbind(BPyGPUOffScreen *self, PyObject *args, PyObject *kwds) +{ + bool restore = true; + + BPY_GPU_OFFSCREEN_CHECK_OBJ(self); + + static const char *_keywords[] = {"restore", NULL}; + static _PyArg_Parser _parser = {"|O&:unbind", _keywords, 0}; + if (!_PyArg_ParseTupleAndKeywordsFast(args, kwds, &_parser, PyC_ParseBool, &restore)) { + return NULL; + } + + GPU_offscreen_unbind(self->ofs, restore); + GPU_apply_state(); + Py_RETURN_NONE; +} + +/** \} */ + /* -------------------------------------------------------------------- */ /** \name GPUOffscreen Type * \{ */ -static PyObject *pygpu_offscreen_new(PyTypeObject *UNUSED(self), PyObject *args, PyObject *kwds) +static PyObject *pygpu_offscreen__tp_new(PyTypeObject *UNUSED(self), + PyObject *args, + PyObject *kwds) { BPYGPU_IS_INIT_OR_ERROR_OBJ; @@ -90,7 +219,7 @@ static PyObject *pygpu_offscreen_new(PyTypeObject *UNUSED(self), PyObject *args, char err_out[256]; static const char *_keywords[] = {"width", "height", NULL}; - static _PyArg_Parser _parser = {"ii|i:GPUOffScreen.__new__", _keywords, 0}; + static _PyArg_Parser _parser = {"ii:GPUOffScreen.__new__", _keywords, 0}; if (!_PyArg_ParseTupleAndKeywordsFast(args, kwds, &_parser, &width, &height)) { return NULL; } @@ -99,7 +228,7 @@ static PyObject *pygpu_offscreen_new(PyTypeObject *UNUSED(self), PyObject *args, ofs = GPU_offscreen_create(width, height, true, false, err_out); } else { - strncpy(err_out, "No active GPU context found", 256); + STRNCPY(err_out, "No active GPU context found"); } if (ofs == NULL) { @@ -135,61 +264,6 @@ static PyObject *pygpu_offscreen_color_texture_get(BPyGPUOffScreen *self, void * return PyLong_FromLong(GPU_texture_opengl_bindcode(texture)); } -PyDoc_STRVAR( - pygpu_offscreen_bind_doc, - ".. method:: bind(save=True)\n" - "\n" - " Bind the offscreen object.\n" - " To make sure that the offscreen gets unbind whether an exception occurs or not,\n" - " pack it into a `with` statement.\n" - "\n" - " :arg save: Save the current OpenGL state, so that it can be restored when unbinding.\n" - " :type save: `bool`\n"); -static PyObject *pygpu_offscreen_bind(BPyGPUOffScreen *self, PyObject *args, PyObject *kwds) -{ - BPY_GPU_OFFSCREEN_CHECK_OBJ(self); - bool save = true; - - static const char *_keywords[] = {"save", NULL}; - static _PyArg_Parser _parser = {"|O&:bind", _keywords, 0}; - if (!_PyArg_ParseTupleAndKeywordsFast(args, kwds, &_parser, PyC_ParseBool, &save)) { - return NULL; - } - - GPU_offscreen_bind(self->ofs, save); - GPU_apply_state(); - - self->is_saved = save; - Py_INCREF(self); - - return (PyObject *)self; -} - -PyDoc_STRVAR(pygpu_offscreen_unbind_doc, - ".. method:: unbind(restore=True)\n" - "\n" - " Unbind the offscreen object.\n" - "\n" - " :arg restore: Restore the OpenGL state, can only be used when the state has been " - "saved before.\n" - " :type restore: `bool`\n"); -static PyObject *pygpu_offscreen_unbind(BPyGPUOffScreen *self, PyObject *args, PyObject *kwds) -{ - bool restore = true; - - BPY_GPU_OFFSCREEN_CHECK_OBJ(self); - - static const char *_keywords[] = {"restore", NULL}; - static _PyArg_Parser _parser = {"|O&:unbind", _keywords, 0}; - if (!_PyArg_ParseTupleAndKeywordsFast(args, kwds, &_parser, PyC_ParseBool, &restore)) { - return NULL; - } - - GPU_offscreen_unbind(self->ofs, restore); - GPU_apply_state(); - Py_RETURN_NONE; -} - PyDoc_STRVAR( pygpu_offscreen_draw_view3d_doc, ".. method:: draw_view3d(scene, view_layer, view3d, region, view_matrix, projection_matrix)\n" @@ -210,8 +284,8 @@ PyDoc_STRVAR( " :type projection_matrix: :class:`mathutils.Matrix`\n"); static PyObject *pygpu_offscreen_draw_view3d(BPyGPUOffScreen *self, PyObject *args, PyObject *kwds) { - MatrixObject *pygpu_mat_view, *pygpu_mat_projection; - PyObject *pygpu_scene, *pygpu_view_layer, *pygpu_region, *pygpu_view3d; + MatrixObject *py_mat_view, *py_mat_projection; + PyObject *py_scene, *py_view_layer, *py_region, *py_view3d; struct Depsgraph *depsgraph; struct Scene *scene; @@ -228,18 +302,18 @@ static PyObject *pygpu_offscreen_draw_view3d(BPyGPUOffScreen *self, PyObject *ar if (!_PyArg_ParseTupleAndKeywordsFast(args, kwds, &_parser, - &pygpu_scene, - &pygpu_view_layer, - &pygpu_view3d, - &pygpu_region, + &py_scene, + &py_view_layer, + &py_view3d, + &py_region, Matrix_Parse4x4, - &pygpu_mat_view, + &py_mat_view, Matrix_Parse4x4, - &pygpu_mat_projection) || - (!(scene = PyC_RNA_AsPointer(pygpu_scene, "Scene")) || - !(view_layer = PyC_RNA_AsPointer(pygpu_view_layer, "ViewLayer")) || - !(v3d = PyC_RNA_AsPointer(pygpu_view3d, "SpaceView3D")) || - !(region = PyC_RNA_AsPointer(pygpu_region, "Region")))) { + &py_mat_projection) || + (!(scene = PyC_RNA_AsPointer(py_scene, "Scene")) || + !(view_layer = PyC_RNA_AsPointer(py_view_layer, "ViewLayer")) || + !(v3d = PyC_RNA_AsPointer(py_view3d, "SpaceView3D")) || + !(region = PyC_RNA_AsPointer(py_region, "Region")))) { return NULL; } @@ -262,8 +336,8 @@ static PyObject *pygpu_offscreen_draw_view3d(BPyGPUOffScreen *self, PyObject *ar region, GPU_offscreen_width(self->ofs), GPU_offscreen_height(self->ofs), - (float(*)[4])pygpu_mat_view->matrix, - (float(*)[4])pygpu_mat_projection->matrix, + (float(*)[4])py_mat_view->matrix, + (float(*)[4])py_mat_projection->matrix, true, true, "", @@ -281,6 +355,7 @@ static PyObject *pygpu_offscreen_draw_view3d(BPyGPUOffScreen *self, PyObject *ar Py_RETURN_NONE; } +#ifdef BPYGPU_USE_GPUOBJ_FREE_METHOD PyDoc_STRVAR(pygpu_offscreen_free_doc, ".. method:: free()\n" "\n" @@ -294,17 +369,7 @@ static PyObject *pygpu_offscreen_free(BPyGPUOffScreen *self) self->ofs = NULL; Py_RETURN_NONE; } - -static PyObject *pygpu_offscreen_bind_context_enter(BPyGPUOffScreen *UNUSED(self)) -{ - Py_RETURN_NONE; -} - -static PyObject *pygpu_offscreen_bind_context_exit(BPyGPUOffScreen *self, PyObject *UNUSED(args)) -{ - GPU_offscreen_unbind(self->ofs, self->is_saved); - Py_RETURN_NONE; -} +#endif static void BPyGPUOffScreen__tp_dealloc(BPyGPUOffScreen *self) { @@ -314,7 +379,7 @@ static void BPyGPUOffScreen__tp_dealloc(BPyGPUOffScreen *self) Py_TYPE(self)->tp_free((PyObject *)self); } -static PyGetSetDef pygpu_offscreen_getseters[] = { +static PyGetSetDef pygpu_offscreen__tp_getseters[] = { {"color_texture", (getter)pygpu_offscreen_color_texture_get, (setter)NULL, @@ -325,11 +390,8 @@ static PyGetSetDef pygpu_offscreen_getseters[] = { {NULL, NULL, NULL, NULL, NULL} /* Sentinel */ }; -static struct PyMethodDef pygpu_offscreen_methods[] = { - {"bind", - (PyCFunction)pygpu_offscreen_bind, - METH_VARARGS | METH_KEYWORDS, - pygpu_offscreen_bind_doc}, +static struct PyMethodDef pygpu_offscreen__tp_methods[] = { + {"bind", (PyCFunction)pygpu_offscreen_bind, METH_NOARGS, pygpu_offscreen_bind_doc}, {"unbind", (PyCFunction)pygpu_offscreen_unbind, METH_VARARGS | METH_KEYWORDS, @@ -338,13 +400,13 @@ static struct PyMethodDef pygpu_offscreen_methods[] = { (PyCFunction)pygpu_offscreen_draw_view3d, METH_VARARGS | METH_KEYWORDS, pygpu_offscreen_draw_view3d_doc}, +#ifdef BPYGPU_USE_GPUOBJ_FREE_METHOD {"free", (PyCFunction)pygpu_offscreen_free, METH_NOARGS, pygpu_offscreen_free_doc}, - {"__enter__", (PyCFunction)pygpu_offscreen_bind_context_enter, METH_NOARGS}, - {"__exit__", (PyCFunction)pygpu_offscreen_bind_context_exit, METH_VARARGS}, +#endif {NULL, NULL, 0, NULL}, }; -PyDoc_STRVAR(pygpu_offscreen_doc, +PyDoc_STRVAR(pygpu_offscreen__tp_doc, ".. class:: GPUOffScreen(width, height)\n" "\n" " This object gives access to off screen buffers.\n" @@ -358,10 +420,10 @@ PyTypeObject BPyGPUOffScreen_Type = { .tp_basicsize = sizeof(BPyGPUOffScreen), .tp_dealloc = (destructor)BPyGPUOffScreen__tp_dealloc, .tp_flags = Py_TPFLAGS_DEFAULT, - .tp_doc = pygpu_offscreen_doc, - .tp_methods = pygpu_offscreen_methods, - .tp_getset = pygpu_offscreen_getseters, - .tp_new = pygpu_offscreen_new, + .tp_doc = pygpu_offscreen__tp_doc, + .tp_methods = pygpu_offscreen__tp_methods, + .tp_getset = pygpu_offscreen__tp_getseters, + .tp_new = pygpu_offscreen__tp_new, }; /** \} */ diff --git a/source/blender/python/gpu/gpu_py_offscreen.h b/source/blender/python/gpu/gpu_py_offscreen.h index efe5b57b22e..f551730cf54 100644 --- a/source/blender/python/gpu/gpu_py_offscreen.h +++ b/source/blender/python/gpu/gpu_py_offscreen.h @@ -28,7 +28,6 @@ extern PyTypeObject BPyGPUOffScreen_Type; typedef struct BPyGPUOffScreen { PyObject_HEAD struct GPUOffScreen *ofs; - bool is_saved; } BPyGPUOffScreen; PyObject *BPyGPUOffScreen_CreatePyObject(struct GPUOffScreen *ofs) ATTR_NONNULL(1); diff --git a/source/blender/python/gpu/gpu_py_shader.c b/source/blender/python/gpu/gpu_py_shader.c index ddc3ccd9827..538bed7df6d 100644 --- a/source/blender/python/gpu/gpu_py_shader.c +++ b/source/blender/python/gpu/gpu_py_shader.c @@ -26,12 +26,16 @@ #include "BLI_utildefines.h" #include "GPU_shader.h" +#include "GPU_texture.h" +#include "GPU_uniform_buffer.h" #include "../generic/py_capi_utils.h" #include "../generic/python_utildefines.h" #include "../mathutils/mathutils.h" #include "gpu_py_api.h" +#include "gpu_py_texture.h" +#include "gpu_py_uniformbuffer.h" #include "gpu_py_vertex_format.h" #include "gpu_py_shader.h" /* own include */ @@ -464,6 +468,64 @@ static PyObject *pygpu_shader_uniform_int(BPyGPUShader *self, PyObject *args) Py_RETURN_NONE; } +PyDoc_STRVAR(pygpu_shader_uniform_sampler_doc, + ".. method:: uniform_sampler(name, texture)\n" + "\n" + " Specify the value of a texture uniform variable for the current GPUShader.\n" + "\n" + " :param name: name of the uniform variable whose texture is to be specified.\n" + " :type name: str\n" + " :param texture: Texture to attach.\n" + " :type texture: :class:`gpu.types.GPUTexture`\n"); +static PyObject *pygpu_shader_uniform_sampler(BPyGPUShader *self, PyObject *args) +{ + const char *name; + BPyGPUTexture *py_texture; + if (!PyArg_ParseTuple( + args, "sO!:GPUShader.uniform_sampler", &name, &BPyGPUTexture_Type, &py_texture)) { + return NULL; + } + + int slot = GPU_shader_get_texture_binding(self->shader, name); + GPU_texture_bind(py_texture->tex, slot); + GPU_shader_uniform_1i(self->shader, name, slot); + + Py_RETURN_NONE; +} + +PyDoc_STRVAR( + pygpu_shader_uniform_buffer_doc, + ".. method:: uniform_buffer(name, ubo)\n" + "\n" + " Specify the value of an uniform buffer object variable for the current GPUShader.\n" + "\n" + " :param name: name of the uniform variable whose UBO is to be specified.\n" + " :type name: str\n" + " :param ubo: Uniform Buffer to attach.\n" + " :type texture: :class:`gpu.types.GPUUniformBuf`\n"); +static PyObject *pygpu_shader_uniform_buffer(BPyGPUShader *self, PyObject *args) +{ + const char *name; + BPyGPUUniformBuf *py_ubo; + if (!PyArg_ParseTuple( + args, "sO!:GPUShader.uniform_buffer", &name, &BPyGPUUniformBuf_Type, &py_ubo)) { + return NULL; + } + + int slot = GPU_shader_get_uniform_block(self->shader, name); + if (slot == -1) { + PyErr_SetString( + PyExc_BufferError, + "GPUShader.uniform_buffer: uniform block not found, make sure the name is correct"); + return NULL; + } + + GPU_uniformbuf_bind(py_ubo->ubo, slot); + GPU_shader_uniform_1i(self->shader, name, slot); + + Py_RETURN_NONE; +} + PyDoc_STRVAR( pygpu_shader_attr_from_name_doc, ".. method:: attr_from_name(name)\n" @@ -535,6 +597,14 @@ static struct PyMethodDef pygpu_shader__tp_methods[] = { (PyCFunction)pygpu_shader_uniform_int, METH_VARARGS, pygpu_shader_uniform_int_doc}, + {"uniform_sampler", + (PyCFunction)pygpu_shader_uniform_sampler, + METH_VARARGS, + pygpu_shader_uniform_sampler_doc}, + {"uniform_buffer", + (PyCFunction)pygpu_shader_uniform_buffer, + METH_VARARGS, + pygpu_shader_uniform_buffer_doc}, {"attr_from_name", (PyCFunction)pygpu_shader_attr_from_name, METH_O, diff --git a/source/blender/python/gpu/gpu_py_state.c b/source/blender/python/gpu/gpu_py_state.c new file mode 100644 index 00000000000..d1b3a01e2e4 --- /dev/null +++ b/source/blender/python/gpu/gpu_py_state.c @@ -0,0 +1,423 @@ +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/** \file + * \ingroup bpygpu + * + * This file defines the gpu.state API. + * + * - Use ``bpygpu_`` for local API. + * - Use ``BPyGPU`` for public API. + */ + +#include + +#include "GPU_state.h" + +#include "../generic/py_capi_utils.h" +#include "../generic/python_utildefines.h" + +#include "gpu_py_state.h" /* own include */ + +/* -------------------------------------------------------------------- */ +/** \name Helper Functions + * \{ */ + +static const struct PyC_StringEnumItems pygpu_state_blend_items[] = { + {GPU_BLEND_NONE, "NONE"}, + {GPU_BLEND_ALPHA, "ALPHA"}, + {GPU_BLEND_ALPHA_PREMULT, "ALPHA_PREMULT"}, + {GPU_BLEND_ADDITIVE, "ADDITIVE"}, + {GPU_BLEND_ADDITIVE_PREMULT, "ADDITIVE_PREMULT"}, + {GPU_BLEND_MULTIPLY, "MULTIPLY"}, + {GPU_BLEND_SUBTRACT, "SUBTRACT"}, + {GPU_BLEND_INVERT, "INVERT"}, + /** + * These are quite special cases used inside the draw manager. + * {GPU_BLEND_OIT, "OIT"}, + * {GPU_BLEND_BACKGROUND, "BACKGROUND"}, + * {GPU_BLEND_CUSTOM, "CUSTOM"}, + */ + {0, NULL}, +}; + +static const struct PyC_StringEnumItems pygpu_state_depthtest_items[] = { + {GPU_DEPTH_NONE, "NONE"}, + {GPU_DEPTH_ALWAYS, "ALWAYS"}, + {GPU_DEPTH_LESS, "LESS"}, + {GPU_DEPTH_LESS_EQUAL, "LESS_EQUAL"}, + {GPU_DEPTH_EQUAL, "EQUAL"}, + {GPU_DEPTH_GREATER, "GREATER"}, + {GPU_DEPTH_GREATER_EQUAL, "GREATER_EQUAL"}, + {0, NULL}, +}; + +static const struct PyC_StringEnumItems pygpu_state_faceculling_items[] = { + {GPU_CULL_NONE, "NONE"}, + {GPU_CULL_FRONT, "FRONT"}, + {GPU_CULL_BACK, "BACK"}, + {0, NULL}, +}; + +/** \} */ + +/* -------------------------------------------------------------------- */ +/** \name Manage Stack + * \{ */ + +PyDoc_STRVAR(pygpu_state_blend_set_doc, + ".. function:: blend_set(mode)\n" + "\n" + " Defines the fixed pipeline blending equation.\n" + "\n" + " :param mode: One of these modes: {\n" + " `NONE`,\n" + " `ALPHA`,\n" + " `ALPHA_PREMULT`,\n" + " `ADDITIVE`,\n" + " `ADDITIVE_PREMULT`,\n" + " `MULTIPLY`,\n" + " `SUBTRACT`,\n" + " `INVERT`,\n" + //" `OIT`,\n" + //" `BACKGROUND`,\n" + //" `CUSTOM`,\n" + " :type mode: `str`\n"); +static PyObject *pygpu_state_blend_set(PyObject *UNUSED(self), PyObject *value) +{ + struct PyC_StringEnum pygpu_blend = {pygpu_state_blend_items}; + if (!PyC_ParseStringEnum(value, &pygpu_blend)) { + return NULL; + } + GPU_blend(pygpu_blend.value_found); + Py_RETURN_NONE; +} + +PyDoc_STRVAR(pygpu_state_blend_get_doc, + ".. function:: blend_get()\n" + "\n" + " Current blending equation.\n" + "\n"); +static PyObject *pygpu_state_blend_get(PyObject *UNUSED(self)) +{ + eGPUBlend blend = GPU_blend_get(); + return PyUnicode_FromString(PyC_StringEnum_FindIDFromValue(pygpu_state_blend_items, blend)); +} + +PyDoc_STRVAR(pygpu_state_depth_test_set_doc, + ".. function:: depth_test_set(mode)\n" + "\n" + " Defines the depth_test equation.\n" + "\n" + " :param mode: One of these modes: {\n" + " `NONE`,\n" + " `ALWAYS`,\n" + " `LESS`,\n" + " `LESS_EQUAL`,\n" + " `EQUAL`,\n" + " `GREATER`,\n" + " `GREATER_EQUAL`,\n" + " :type mode: `str`\n"); +static PyObject *pygpu_state_depth_test_set(PyObject *UNUSED(self), PyObject *value) +{ + struct PyC_StringEnum pygpu_depth_test = {pygpu_state_depthtest_items}; + if (!PyC_ParseStringEnum(value, &pygpu_depth_test)) { + return NULL; + } + GPU_depth_test(pygpu_depth_test.value_found); + Py_RETURN_NONE; +} + +PyDoc_STRVAR(pygpu_state_depth_test_get_doc, + ".. function:: blend_depth_test_get()\n" + "\n" + " Current depth_test equation.\n" + "\n"); +static PyObject *pygpu_state_depth_test_get(PyObject *UNUSED(self)) +{ + eGPUDepthTest test = GPU_depth_test_get(); + return PyUnicode_FromString(PyC_StringEnum_FindIDFromValue(pygpu_state_depthtest_items, test)); +} + +PyDoc_STRVAR(pygpu_state_depth_mask_set_doc, + ".. function:: depth_mask_set(value)\n" + "\n" + " Write to depth component.\n" + "\n" + " :param value: True for writing to the depth component.\n" + " :type near: `bool`\n"); +static PyObject *pygpu_state_depth_mask_set(PyObject *UNUSED(self), PyObject *value) +{ + bool write_to_depth; + if (!PyC_ParseBool(value, &write_to_depth)) { + return NULL; + } + GPU_depth_mask(write_to_depth); + Py_RETURN_NONE; +} + +PyDoc_STRVAR(pygpu_state_depth_mask_get_doc, + ".. function:: depth_mask_set_get()\n" + "\n" + " Writing status in the depth component.\n"); +static PyObject *pygpu_state_depth_mask_get(PyObject *UNUSED(self)) +{ + return PyBool_FromLong(GPU_depth_mask_get()); +} + +PyDoc_STRVAR(pygpu_state_viewport_set_doc, + ".. function:: viewport_set(x, y, xsize, ysize)\n" + "\n" + " Specifies the viewport of the active framebuffer.\n" + " Note: The viewport state is not saved upon framebuffer rebind.\n" + "\n" + " :param x, y: lower left corner of the viewport_set rectangle, in pixels.\n" + " :param width, height: width and height of the viewport_set.\n" + " :type x, y, xsize, ysize: `int`\n"); +static PyObject *pygpu_state_viewport_set(PyObject *UNUSED(self), PyObject *args) +{ + int x, y, xsize, ysize; + if (!PyArg_ParseTuple(args, "iiii:viewport_set", &x, &y, &xsize, &ysize)) { + return NULL; + } + + GPU_viewport(x, y, xsize, ysize); + Py_RETURN_NONE; +} + +PyDoc_STRVAR(pygpu_state_viewport_get_doc, + ".. function:: viewport_get()\n" + "\n" + " Viewport of the active framebuffer.\n"); +static PyObject *pygpu_state_viewport_get(PyObject *UNUSED(self), PyObject *UNUSED(args)) +{ + int viewport[4]; + GPU_viewport_size_get_i(viewport); + + PyObject *ret = PyTuple_New(4); + PyTuple_SET_ITEMS(ret, + PyLong_FromLong(viewport[0]), + PyLong_FromLong(viewport[1]), + PyLong_FromLong(viewport[2]), + PyLong_FromLong(viewport[3])); + return ret; +} + +PyDoc_STRVAR(pygpu_state_line_width_set_doc, + ".. function:: line_width_set(width)\n" + "\n" + " Specify the width of rasterized lines.\n" + "\n" + " :param size: New width.\n" + " :type mode: `float`\n"); +static PyObject *pygpu_state_line_width_set(PyObject *UNUSED(self), PyObject *value) +{ + float width = (float)PyFloat_AsDouble(value); + if (PyErr_Occurred()) { + return NULL; + } + + GPU_line_width(width); + Py_RETURN_NONE; +} + +PyDoc_STRVAR(pygpu_state_line_width_get_doc, + ".. function:: line_width_get()\n" + "\n" + " Current width of rasterized lines.\n"); +static PyObject *pygpu_state_line_width_get(PyObject *UNUSED(self)) +{ + float width = GPU_line_width_get(); + return PyFloat_FromDouble((double)width); +} + +PyDoc_STRVAR(pygpu_state_point_size_set_doc, + ".. function:: point_size_set(size)\n" + "\n" + " Specify the diameter of rasterized points.\n" + "\n" + " :param size: New diameter.\n" + " :type mode: `float`\n"); +static PyObject *pygpu_state_point_size_set(PyObject *UNUSED(self), PyObject *value) +{ + float size = (float)PyFloat_AsDouble(value); + if (PyErr_Occurred()) { + return NULL; + } + + GPU_point_size(size); + Py_RETURN_NONE; +} + +PyDoc_STRVAR(pygpu_state_color_mask_set_doc, + ".. function:: color_mask_set(r, g, b, a)\n" + "\n" + " Enable or disable writing of frame buffer color components.\n" + "\n" + " :param r, g, b, a: components red, green, blue, and alpha.\n" + " :type r, g, b, a: `bool`\n"); +static PyObject *pygpu_state_color_mask_set(PyObject *UNUSED(self), PyObject *args) +{ + int r, g, b, a; + if (!PyArg_ParseTuple(args, "pppp:color_mask_set", &r, &g, &b, &a)) { + return NULL; + } + + GPU_color_mask((bool)r, (bool)g, (bool)b, (bool)a); + Py_RETURN_NONE; +} + +PyDoc_STRVAR(pygpu_state_face_culling_set_doc, + ".. function:: face_culling_set(culling)\n" + "\n" + " Specify whether none, front-facing or back-facing facets can be culled.\n" + "\n" + " :param mode: One of these modes: {\n" + " `NONE`,\n" + " `FRONT`,\n" + " `BACK`,\n" + " :type mode: `str`\n"); +static PyObject *pygpu_state_face_culling_set(PyObject *UNUSED(self), PyObject *value) +{ + struct PyC_StringEnum pygpu_faceculling = {pygpu_state_faceculling_items}; + if (!PyC_ParseStringEnum(value, &pygpu_faceculling)) { + return NULL; + } + + GPU_face_culling(pygpu_faceculling.value_found); + Py_RETURN_NONE; +} + +PyDoc_STRVAR(pygpu_state_front_facing_set_doc, + ".. function:: front_facing_set(invert)\n" + "\n" + " Specifies the orientation of front-facing polygons.\n" + "\n" + " :param invert: True for clockwise polygons as front-facing.\n" + " :type mode: `bool`\n"); +static PyObject *pygpu_state_front_facing_set(PyObject *UNUSED(self), PyObject *value) +{ + bool invert; + if (!PyC_ParseBool(value, &invert)) { + return NULL; + } + + GPU_front_facing(invert); + Py_RETURN_NONE; +} + +PyDoc_STRVAR(pygpu_state_program_point_size_set_doc, + ".. function:: use_program_point_size(enable)\n" + "\n" + " If enabled, the derived point size is taken from the (potentially clipped) " + "shader builtin gl_PointSize.\n" + "\n" + " :param enable: True for shader builtin gl_PointSize.\n" + " :type enable: `bool`\n"); +static PyObject *pygpu_state_program_point_size_set(PyObject *UNUSED(self), PyObject *value) +{ + bool enable; + if (!PyC_ParseBool(value, &enable)) { + return NULL; + } + + GPU_program_point_size(enable); + Py_RETURN_NONE; +} + +/** \} */ + +/* -------------------------------------------------------------------- */ +/** \name Module + * \{ */ + +static struct PyMethodDef pygpu_state__tp_methods[] = { + /* Manage Stack */ + {"blend_set", (PyCFunction)pygpu_state_blend_set, METH_O, pygpu_state_blend_set_doc}, + {"blend_get", (PyCFunction)pygpu_state_blend_get, METH_NOARGS, pygpu_state_blend_get_doc}, + {"depth_test_set", + (PyCFunction)pygpu_state_depth_test_set, + METH_O, + pygpu_state_depth_test_set_doc}, + {"depth_test_get", + (PyCFunction)pygpu_state_depth_test_get, + METH_NOARGS, + pygpu_state_depth_test_get_doc}, + {"depth_mask_set", + (PyCFunction)pygpu_state_depth_mask_set, + METH_O, + pygpu_state_depth_mask_set_doc}, + {"depth_mask_get", + (PyCFunction)pygpu_state_depth_mask_get, + METH_NOARGS, + pygpu_state_depth_mask_get_doc}, + {"viewport_set", + (PyCFunction)pygpu_state_viewport_set, + METH_VARARGS, + pygpu_state_viewport_set_doc}, + {"viewport_get", + (PyCFunction)pygpu_state_viewport_get, + METH_NOARGS, + pygpu_state_viewport_get_doc}, + {"line_width_set", + (PyCFunction)pygpu_state_line_width_set, + METH_O, + pygpu_state_line_width_set_doc}, + {"line_width_get", + (PyCFunction)pygpu_state_line_width_get, + METH_NOARGS, + pygpu_state_line_width_get_doc}, + {"point_size_set", + (PyCFunction)pygpu_state_point_size_set, + METH_O, + pygpu_state_point_size_set_doc}, + {"color_mask_set", + (PyCFunction)pygpu_state_color_mask_set, + METH_VARARGS, + pygpu_state_color_mask_set_doc}, + {"face_culling_set", + (PyCFunction)pygpu_state_face_culling_set, + METH_O, + pygpu_state_face_culling_set_doc}, + {"front_facing_set", + (PyCFunction)pygpu_state_front_facing_set, + METH_O, + pygpu_state_front_facing_set_doc}, + {"program_point_size_set", + (PyCFunction)pygpu_state_program_point_size_set, + METH_O, + pygpu_state_program_point_size_set_doc}, + {NULL, NULL, 0, NULL}, +}; + +PyDoc_STRVAR(pygpu_state__tp_doc, "This module provides access to the gpu state."); +static PyModuleDef pygpu_state_module_def = { + PyModuleDef_HEAD_INIT, + .m_name = "gpu.state", + .m_doc = pygpu_state__tp_doc, + .m_methods = pygpu_state__tp_methods, +}; + +PyObject *bpygpu_state_init(void) +{ + PyObject *submodule; + + submodule = PyModule_Create(&pygpu_state_module_def); + + return submodule; +} + +/** \} */ diff --git a/source/blender/python/gpu/gpu_py_state.h b/source/blender/python/gpu/gpu_py_state.h new file mode 100644 index 00000000000..415c5ede822 --- /dev/null +++ b/source/blender/python/gpu/gpu_py_state.h @@ -0,0 +1,23 @@ +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/** \file + * \ingroup bpygpu + */ + +#pragma once + +PyObject *bpygpu_state_init(void); diff --git a/source/blender/python/gpu/gpu_py_texture.c b/source/blender/python/gpu/gpu_py_texture.c new file mode 100644 index 00000000000..97dc99f5d58 --- /dev/null +++ b/source/blender/python/gpu/gpu_py_texture.c @@ -0,0 +1,559 @@ +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/** \file + * \ingroup bpygpu + * + * This file defines the texture functionalities of the 'gpu' module + * + * - Use ``bpygpu_`` for local API. + * - Use ``BPyGPU`` for public API. + */ + +#include + +#include "BLI_string.h" + +#include "GPU_context.h" +#include "GPU_texture.h" + +#include "../generic/py_capi_utils.h" + +#include "gpu_py.h" +#include "gpu_py_api.h" +#include "gpu_py_buffer.h" + +#include "gpu_py_texture.h" /* own include */ + +/* -------------------------------------------------------------------- */ +/** \name GPUTexture Common Utilities + * \{ */ + +static const struct PyC_StringEnumItems pygpu_textureformat_items[] = { + {GPU_RGBA8UI, "RGBA8UI"}, + {GPU_RGBA8I, "RGBA8I"}, + {GPU_RGBA8, "RGBA8"}, + {GPU_RGBA32UI, "RGBA32UI"}, + {GPU_RGBA32I, "RGBA32I"}, + {GPU_RGBA32F, "RGBA32F"}, + {GPU_RGBA16UI, "RGBA16UI"}, + {GPU_RGBA16I, "RGBA16I"}, + {GPU_RGBA16F, "RGBA16F"}, + {GPU_RGBA16, "RGBA16"}, + {GPU_RG8UI, "RG8UI"}, + {GPU_RG8I, "RG8I"}, + {GPU_RG8, "RG8"}, + {GPU_RG32UI, "RG32UI"}, + {GPU_RG32I, "RG32I"}, + {GPU_RG32F, "RG32F"}, + {GPU_RG16UI, "RG16UI"}, + {GPU_RG16I, "RG16I"}, + {GPU_RG16F, "RG16F"}, + {GPU_RG16, "RG16"}, + {GPU_R8UI, "R8UI"}, + {GPU_R8I, "R8I"}, + {GPU_R8, "R8"}, + {GPU_R32UI, "R32UI"}, + {GPU_R32I, "R32I"}, + {GPU_R32F, "R32F"}, + {GPU_R16UI, "R16UI"}, + {GPU_R16I, "R16I"}, + {GPU_R16F, "R16F"}, + {GPU_R16, "R16"}, + {GPU_R11F_G11F_B10F, "R11F_G11F_B10F"}, + {GPU_DEPTH32F_STENCIL8, "DEPTH32F_STENCIL8"}, + {GPU_DEPTH24_STENCIL8, "DEPTH24_STENCIL8"}, + {GPU_SRGB8_A8, "SRGB8_A8"}, + {GPU_RGB16F, "RGB16F"}, + {GPU_SRGB8_A8_DXT1, "SRGB8_A8_DXT1"}, + {GPU_SRGB8_A8_DXT3, "SRGB8_A8_DXT3"}, + {GPU_SRGB8_A8_DXT5, "SRGB8_A8_DXT5"}, + {GPU_RGBA8_DXT1, "RGBA8_DXT1"}, + {GPU_RGBA8_DXT3, "RGBA8_DXT3"}, + {GPU_RGBA8_DXT5, "RGBA8_DXT5"}, + {GPU_DEPTH_COMPONENT32F, "DEPTH_COMPONENT32F"}, + {GPU_DEPTH_COMPONENT24, "DEPTH_COMPONENT24"}, + {GPU_DEPTH_COMPONENT16, "DEPTH_COMPONENT16"}, + {0, NULL}, +}; + +static int pygpu_texture_valid_check(BPyGPUTexture *bpygpu_tex) +{ + if (UNLIKELY(bpygpu_tex->tex == NULL)) { + PyErr_SetString(PyExc_ReferenceError, +#ifdef BPYGPU_USE_GPUOBJ_FREE_METHOD + "GPU texture was freed, no further access is valid" +#else + "GPU texture: internal error" +#endif + ); + + return -1; + } + return 0; +} + +#define BPYGPU_TEXTURE_CHECK_OBJ(bpygpu) \ + { \ + if (UNLIKELY(pygpu_texture_valid_check(bpygpu) == -1)) { \ + return NULL; \ + } \ + } \ + ((void)0) + +/** \} */ + +/* -------------------------------------------------------------------- */ +/** \name GPUTexture Type + * \{ */ + +static PyObject *pygpu_texture__tp_new(PyTypeObject *UNUSED(self), PyObject *args, PyObject *kwds) +{ + BPYGPU_IS_INIT_OR_ERROR_OBJ; + + PyObject *py_size; + int size[3] = {1, 1, 1}; + int layers = 0; + int is_cubemap = false; + struct PyC_StringEnum pygpu_textureformat = {pygpu_textureformat_items, GPU_RGBA8}; + BPyGPUBuffer *pybuffer_obj = NULL; + char err_out[256] = "unknown error. See console"; + + static const char *_keywords[] = {"size", "layers", "is_cubemap", "format", "data", NULL}; + static _PyArg_Parser _parser = {"O|$ipO&O!:GPUTexture.__new__", _keywords, 0}; + if (!_PyArg_ParseTupleAndKeywordsFast(args, + kwds, + &_parser, + &py_size, + &layers, + &is_cubemap, + PyC_ParseStringEnum, + &pygpu_textureformat, + &BPyGPU_BufferType, + &pybuffer_obj)) { + return NULL; + } + + int len = 1; + if (PySequence_Check(py_size)) { + len = PySequence_Size(py_size); + if (PyC_AsArray(size, py_size, len, &PyLong_Type, false, "GPUTexture.__new__") == -1) { + return NULL; + } + } + else if (PyLong_Check(py_size)) { + size[0] = PyLong_AsLong(py_size); + } + else { + PyErr_SetString(PyExc_ValueError, "GPUTexture.__new__: Expected an int or tuple as first arg"); + return NULL; + } + + void *data = NULL; + if (pybuffer_obj) { + if (pybuffer_obj->format != GPU_DATA_FLOAT) { + PyErr_SetString(PyExc_ValueError, + "GPUTexture.__new__: Only Buffer of format `FLOAT` is currently supported"); + return NULL; + } + + int component_len = GPU_texture_component_len(pygpu_textureformat.value_found); + int component_size_expected = sizeof(float); + size_t data_space_expected = (size_t)size[0] * size[1] * size[2] * max_ii(1, layers) * + component_len * component_size_expected; + if (is_cubemap) { + data_space_expected *= 6 * size[0]; + } + + if (bpygpu_Buffer_size(pybuffer_obj) < data_space_expected) { + PyErr_SetString(PyExc_ValueError, "GPUTexture.__new__: Buffer size smaller than requested"); + return NULL; + } + data = pybuffer_obj->buf.as_void; + } + + GPUTexture *tex = NULL; + if (is_cubemap && len != 1) { + STRNCPY(err_out, + "In cubemaps the same dimension represents height, width and depth. No tuple needed"); + } + else if (size[0] < 1 || size[1] < 1 || size[2] < 1) { + STRNCPY(err_out, "Values less than 1 are not allowed in dimensions"); + } + else if (layers && len == 3) { + STRNCPY(err_out, "3D textures have no layers"); + } + else if (!GPU_context_active_get()) { + STRNCPY(err_out, "No active GPU context found"); + } + else { + const char *name = "python_texture"; + if (is_cubemap) { + if (layers) { + tex = GPU_texture_create_cube_array( + name, size[0], layers, 1, pygpu_textureformat.value_found, data); + } + else { + tex = GPU_texture_create_cube(name, size[0], 1, pygpu_textureformat.value_found, data); + } + } + else if (layers) { + if (len == 2) { + tex = GPU_texture_create_2d_array( + name, size[0], size[1], layers, 1, pygpu_textureformat.value_found, data); + } + else { + tex = GPU_texture_create_1d_array( + name, size[0], layers, 1, pygpu_textureformat.value_found, data); + } + } + else if (len == 3) { + tex = GPU_texture_create_3d(name, + size[0], + size[1], + size[2], + 1, + pygpu_textureformat.value_found, + GPU_DATA_FLOAT, + NULL); + } + else if (len == 2) { + tex = GPU_texture_create_2d( + name, size[0], size[1], 1, pygpu_textureformat.value_found, data); + } + else { + tex = GPU_texture_create_1d(name, size[0], 1, pygpu_textureformat.value_found, data); + } + } + + if (tex == NULL) { + PyErr_Format(PyExc_RuntimeError, "gpu.texture.new(...) failed with '%s'", err_out); + return NULL; + } + + return BPyGPUTexture_CreatePyObject(tex); +} + +PyDoc_STRVAR(pygpu_texture_width_doc, "Width of the texture.\n\n:type: `int`"); +static PyObject *pygpu_texture_width_get(BPyGPUTexture *self, void *UNUSED(type)) +{ + BPYGPU_TEXTURE_CHECK_OBJ(self); + return PyLong_FromLong(GPU_texture_width(self->tex)); +} + +PyDoc_STRVAR(pygpu_texture_height_doc, "Height of the texture.\n\n:type: `int`"); +static PyObject *pygpu_texture_height_get(BPyGPUTexture *self, void *UNUSED(type)) +{ + BPYGPU_TEXTURE_CHECK_OBJ(self); + return PyLong_FromLong(GPU_texture_height(self->tex)); +} + +PyDoc_STRVAR(pygpu_texture_format_doc, "Format of the texture.\n\n:type: `str`"); +static PyObject *pygpu_texture_format_get(BPyGPUTexture *self, void *UNUSED(type)) +{ + BPYGPU_TEXTURE_CHECK_OBJ(self); + eGPUTextureFormat format = GPU_texture_format(self->tex); + return PyUnicode_FromString(PyC_StringEnum_FindIDFromValue(pygpu_textureformat_items, format)); +} + +PyDoc_STRVAR(pygpu_texture_clear_doc, + ".. method:: clear(format='FLOAT', value=(0.0, 0.0, 0.0, 1.0))\n" + "\n" + " Fill texture with specific value.\n" + "\n" + " :param format: One of these primitive types: {\n" + " `FLOAT`,\n" + " `INT`,\n" + " `UINT`,\n" + " `UBYTE`,\n" + " `UINT_24_8`,\n" + " `10_11_11_REV`,\n" + " :type type: `str`\n" + " :arg value: sequence each representing the value to fill.\n" + " :type value: sequence of 1, 2, 3 or 4 values\n"); +static PyObject *pygpu_texture_clear(BPyGPUTexture *self, PyObject *args, PyObject *kwds) +{ + BPYGPU_TEXTURE_CHECK_OBJ(self); + struct PyC_StringEnum pygpu_dataformat = {bpygpu_dataformat_items}; + union { + int i[4]; + float f[4]; + char c[4]; + } values; + + PyObject *py_values; + + static const char *_keywords[] = {"format", "value", NULL}; + static _PyArg_Parser _parser = {"$O&O:clear", _keywords, 0}; + if (!_PyArg_ParseTupleAndKeywordsFast( + args, kwds, &_parser, PyC_ParseStringEnum, &pygpu_dataformat, &py_values)) { + return NULL; + } + + int shape = PySequence_Size(py_values); + if (shape == -1) { + return NULL; + } + + if (shape > 4) { + PyErr_SetString(PyExc_AttributeError, "too many dimensions, max is 4"); + return NULL; + } + + if (shape != 1 && + ELEM(pygpu_dataformat.value_found, GPU_DATA_UNSIGNED_INT_24_8, GPU_DATA_10_11_11_REV)) { + PyErr_SetString(PyExc_AttributeError, + "`UINT_24_8` and `10_11_11_REV` only support single values"); + return NULL; + } + + memset(&values, 0, sizeof(values)); + if (PyC_AsArray(&values, + py_values, + shape, + pygpu_dataformat.value_found == GPU_DATA_FLOAT ? &PyFloat_Type : &PyLong_Type, + false, + "clear") == -1) { + return NULL; + } + + if (pygpu_dataformat.value_found == GPU_DATA_UNSIGNED_BYTE) { + /* Convert to byte. */ + values.c[0] = values.i[0]; + values.c[1] = values.i[1]; + values.c[2] = values.i[2]; + values.c[3] = values.i[3]; + } + + GPU_texture_clear(self->tex, pygpu_dataformat.value_found, &values); + Py_RETURN_NONE; +} + +PyDoc_STRVAR(pygpu_texture_read_doc, + ".. method:: read()\n" + "\n" + " Creates a buffer with the value of all pixels.\n" + "\n"); +static PyObject *pygpu_texture_read(BPyGPUTexture *self) +{ + BPYGPU_TEXTURE_CHECK_OBJ(self); + + /* #GPU_texture_read is restricted in combining 'data_format' with 'tex_format'. + * So choose data_format here. */ + eGPUDataFormat best_data_format; + switch (GPU_texture_format(self->tex)) { + case GPU_DEPTH_COMPONENT24: + case GPU_DEPTH_COMPONENT16: + case GPU_DEPTH_COMPONENT32F: + best_data_format = GPU_DATA_FLOAT; + break; + case GPU_DEPTH24_STENCIL8: + case GPU_DEPTH32F_STENCIL8: + best_data_format = GPU_DATA_UNSIGNED_INT_24_8; + break; + case GPU_R8UI: + case GPU_R16UI: + case GPU_RG16UI: + case GPU_R32UI: + best_data_format = GPU_DATA_UNSIGNED_INT; + break; + case GPU_RG16I: + case GPU_R16I: + best_data_format = GPU_DATA_INT; + break; + case GPU_R8: + case GPU_RG8: + case GPU_RGBA8: + case GPU_RGBA8UI: + case GPU_SRGB8_A8: + best_data_format = GPU_DATA_UNSIGNED_BYTE; + break; + case GPU_R11F_G11F_B10F: + best_data_format = GPU_DATA_10_11_11_REV; + break; + default: + best_data_format = GPU_DATA_FLOAT; + break; + } + + void *buf = GPU_texture_read(self->tex, best_data_format, 0); + return (PyObject *)BPyGPU_Buffer_CreatePyObject( + best_data_format, + 2, + (Py_ssize_t[2]){GPU_texture_height(self->tex), GPU_texture_width(self->tex)}, + buf); +} + +#ifdef BPYGPU_USE_GPUOBJ_FREE_METHOD +PyDoc_STRVAR(pygpu_texture_free_doc, + ".. method:: free()\n" + "\n" + " Free the texture object.\n" + " The texture object will no longer be accessible.\n"); +static PyObject *pygpu_texture_free(BPyGPUTexture *self) +{ + BPYGPU_TEXTURE_CHECK_OBJ(self); + + GPU_texture_free(self->tex); + self->tex = NULL; + Py_RETURN_NONE; +} +#endif + +static void BPyGPUTexture__tp_dealloc(BPyGPUTexture *self) +{ + if (self->tex) { + GPU_texture_free(self->tex); + } + Py_TYPE(self)->tp_free((PyObject *)self); +} + +static PyGetSetDef pygpu_texture__tp_getseters[] = { + {"width", (getter)pygpu_texture_width_get, (setter)NULL, pygpu_texture_width_doc, NULL}, + {"height", (getter)pygpu_texture_height_get, (setter)NULL, pygpu_texture_height_doc, NULL}, + {"format", (getter)pygpu_texture_format_get, (setter)NULL, pygpu_texture_format_doc, NULL}, + {NULL, NULL, NULL, NULL, NULL} /* Sentinel */ +}; + +static struct PyMethodDef pygpu_texture__tp_methods[] = { + {"clear", + (PyCFunction)pygpu_texture_clear, + METH_VARARGS | METH_KEYWORDS, + pygpu_texture_clear_doc}, + {"read", (PyCFunction)pygpu_texture_read, METH_NOARGS, pygpu_texture_read_doc}, +#ifdef BPYGPU_USE_GPUOBJ_FREE_METHOD + {"free", (PyCFunction)pygpu_texture_free, METH_NOARGS, pygpu_texture_free_doc}, +#endif + {NULL, NULL, 0, NULL}, +}; + +PyDoc_STRVAR( + pygpu_texture__tp_doc, + ".. class:: GPUTexture(size, layers=0, is_cubemap=False, format='RGBA8', data=None)\n" + "\n" + " This object gives access to off GPU textures.\n" + "\n" + " :arg size: Dimensions of the texture 1D, 2D, 3D or cubemap.\n" + " :type size: `tuple` or `int`\n" + " :arg layers: Number of layers in texture array or number of cubemaps in cubemap array\n" + " :type layers: `int`\n" + " :arg is_cubemap: Indicates the creation of a cubemap texture.\n" + " :type is_cubemap: `int`\n" + " :arg format: One of these primitive types: {\n" + " `RGBA8UI`,\n" + " `RGBA8I`,\n" + " `RGBA8`,\n" + " `RGBA32UI`,\n" + " `RGBA32I`,\n" + " `RGBA32F`,\n" + " `RGBA16UI`,\n" + " `RGBA16I`,\n" + " `RGBA16F`,\n" + " `RGBA16`,\n" + " `RG8UI`,\n" + " `RG8I`,\n" + " `RG8`,\n" + " `RG32UI`,\n" + " `RG32I`,\n" + " `RG32F`,\n" + " `RG16UI`,\n" + " `RG16I`,\n" + " `RG16F`,\n" + " `RG16`,\n" + " `R8UI`,\n" + " `R8I`,\n" + " `R8`,\n" + " `R32UI`,\n" + " `R32I`,\n" + " `R32F`,\n" + " `R16UI`,\n" + " `R16I`,\n" + " `R16F`,\n" + " `R16`,\n" + " `R11F_G11F_B10F`,\n" + " `DEPTH32F_STENCIL8`,\n" + " `DEPTH24_STENCIL8`,\n" + " `SRGB8_A8`,\n" + " `RGB16F`,\n" + " `SRGB8_A8_DXT1`,\n" + " `SRGB8_A8_DXT3`,\n" + " `SRGB8_A8_DXT5`,\n" + " `RGBA8_DXT1`,\n" + " `RGBA8_DXT3`,\n" + " `RGBA8_DXT5`,\n" + " `DEPTH_COMPONENT32F`,\n" + " `DEPTH_COMPONENT24`,\n" + " `DEPTH_COMPONENT16`,\n" + " :type format: `str`\n" + " :arg data: Buffer object to fill the texture.\n" + " :type data: `Buffer`\n"); +PyTypeObject BPyGPUTexture_Type = { + PyVarObject_HEAD_INIT(NULL, 0).tp_name = "GPUTexture", + .tp_basicsize = sizeof(BPyGPUTexture), + .tp_dealloc = (destructor)BPyGPUTexture__tp_dealloc, + .tp_flags = Py_TPFLAGS_DEFAULT, + .tp_doc = pygpu_texture__tp_doc, + .tp_methods = pygpu_texture__tp_methods, + .tp_getset = pygpu_texture__tp_getseters, + .tp_new = pygpu_texture__tp_new, +}; + +/** \} */ + +/* -------------------------------------------------------------------- */ +/** \name Local API + * \{ */ + +int bpygpu_ParseTexture(PyObject *o, void *p) +{ + if (o == Py_None) { + *(GPUTexture **)p = NULL; + return 1; + } + + if (!BPyGPUTexture_Check(o)) { + PyErr_Format( + PyExc_ValueError, "expected a texture or None object, got %s", Py_TYPE(o)->tp_name); + return 0; + } + + if (UNLIKELY(pygpu_texture_valid_check((BPyGPUTexture *)o) == -1)) { + return 0; + } + + *(GPUTexture **)p = ((BPyGPUTexture *)o)->tex; + return 1; +} + +/** \} */ + +/* -------------------------------------------------------------------- */ +/** \name Public API + * \{ */ + +PyObject *BPyGPUTexture_CreatePyObject(GPUTexture *tex) +{ + BPyGPUTexture *self; + + self = PyObject_New(BPyGPUTexture, &BPyGPUTexture_Type); + self->tex = tex; + + return (PyObject *)self; +} + +/** \} */ + +#undef BPYGPU_TEXTURE_CHECK_OBJ diff --git a/source/blender/python/gpu/gpu_py_texture.h b/source/blender/python/gpu/gpu_py_texture.h new file mode 100644 index 00000000000..be7348b2bd4 --- /dev/null +++ b/source/blender/python/gpu/gpu_py_texture.h @@ -0,0 +1,34 @@ +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/** \file + * \ingroup bpygpu + */ + +#pragma once + +#include "BLI_compiler_attrs.h" + +extern PyTypeObject BPyGPUTexture_Type; + +#define BPyGPUTexture_Check(v) (Py_TYPE(v) == &BPyGPUTexture_Type) + +typedef struct BPyGPUTexture { + PyObject_HEAD struct GPUTexture *tex; +} BPyGPUTexture; + +int bpygpu_ParseTexture(PyObject *o, void *p); +PyObject *BPyGPUTexture_CreatePyObject(struct GPUTexture *tex) ATTR_NONNULL(1); diff --git a/source/blender/python/gpu/gpu_py_types.c b/source/blender/python/gpu/gpu_py_types.c index 5829cd8c088..fdd589d788e 100644 --- a/source/blender/python/gpu/gpu_py_types.c +++ b/source/blender/python/gpu/gpu_py_types.c @@ -43,6 +43,9 @@ PyObject *bpygpu_types_init(void) submodule = PyModule_Create(&pygpu_types_module_def); + if (PyType_Ready(&BPyGPU_BufferType) < 0) { + return NULL; + } if (PyType_Ready(&BPyGPUVertFormat_Type) < 0) { return NULL; } @@ -61,13 +64,26 @@ PyObject *bpygpu_types_init(void) if (PyType_Ready(&BPyGPUShader_Type) < 0) { return NULL; } + if (PyType_Ready(&BPyGPUTexture_Type) < 0) { + return NULL; + } + if (PyType_Ready(&BPyGPUFrameBuffer_Type) < 0) { + return NULL; + } + if (PyType_Ready(&BPyGPUUniformBuf_Type) < 0) { + return NULL; + } + PyModule_AddType(submodule, &BPyGPU_BufferType); PyModule_AddType(submodule, &BPyGPUVertFormat_Type); PyModule_AddType(submodule, &BPyGPUVertBuf_Type); PyModule_AddType(submodule, &BPyGPUIndexBuf_Type); PyModule_AddType(submodule, &BPyGPUBatch_Type); PyModule_AddType(submodule, &BPyGPUOffScreen_Type); PyModule_AddType(submodule, &BPyGPUShader_Type); + PyModule_AddType(submodule, &BPyGPUTexture_Type); + PyModule_AddType(submodule, &BPyGPUFrameBuffer_Type); + PyModule_AddType(submodule, &BPyGPUUniformBuf_Type); return submodule; } diff --git a/source/blender/python/gpu/gpu_py_types.h b/source/blender/python/gpu/gpu_py_types.h index cf8d6d694e6..eb72c04d53e 100644 --- a/source/blender/python/gpu/gpu_py_types.h +++ b/source/blender/python/gpu/gpu_py_types.h @@ -20,10 +20,15 @@ #pragma once +#include "gpu_py_buffer.h" + #include "gpu_py_batch.h" #include "gpu_py_element.h" +#include "gpu_py_framebuffer.h" #include "gpu_py_offscreen.h" #include "gpu_py_shader.h" +#include "gpu_py_texture.h" +#include "gpu_py_uniformbuffer.h" #include "gpu_py_vertex_buffer.h" #include "gpu_py_vertex_format.h" diff --git a/source/blender/python/gpu/gpu_py_uniformbuffer.c b/source/blender/python/gpu/gpu_py_uniformbuffer.c new file mode 100644 index 00000000000..d1b86455918 --- /dev/null +++ b/source/blender/python/gpu/gpu_py_uniformbuffer.c @@ -0,0 +1,195 @@ +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/** \file + * \ingroup bpygpu + * + * This file defines the uniform buffer functionalities of the 'gpu' module + * + * - Use ``bpygpu_`` for local API. + * - Use ``BPyGPU`` for public API. + */ + +#include + +#include "BLI_string.h" + +#include "GPU_context.h" +#include "GPU_texture.h" +#include "GPU_uniform_buffer.h" + +#include "../generic/py_capi_utils.h" + +#include "gpu_py.h" +#include "gpu_py_api.h" +#include "gpu_py_buffer.h" + +#include "gpu_py_uniformbuffer.h" /* own include */ + +/* -------------------------------------------------------------------- */ +/** \name GPUUniformBuf Common Utilities + * \{ */ + +static int pygpu_uniformbuffer_valid_check(BPyGPUUniformBuf *bpygpu_ub) +{ + if (UNLIKELY(bpygpu_ub->ubo == NULL)) { + PyErr_SetString(PyExc_ReferenceError, +#ifdef BPYGPU_USE_GPUOBJ_FREE_METHOD + "GPU uniform buffer was freed, no further access is valid"); +#else + + "GPU uniform buffer: internal error"); +#endif + return -1; + } + return 0; +} + +#define BPYGPU_UNIFORMBUF_CHECK_OBJ(bpygpu) \ + { \ + if (UNLIKELY(pygpu_uniformbuffer_valid_check(bpygpu) == -1)) { \ + return NULL; \ + } \ + } \ + ((void)0) + +/** \} */ + +/* -------------------------------------------------------------------- */ +/** \name GPUUniformBuf Type + * \{ */ + +static PyObject *pygpu_uniformbuffer__tp_new(PyTypeObject *UNUSED(self), + PyObject *args, + PyObject *kwds) +{ + BPYGPU_IS_INIT_OR_ERROR_OBJ; + + GPUUniformBuf *ubo = NULL; + BPyGPUBuffer *pybuffer_obj; + char err_out[256] = "unknown error. See console"; + + static const char *_keywords[] = {"data", NULL}; + static _PyArg_Parser _parser = {"O!:GPUUniformBuf.__new__", _keywords, 0}; + if (!_PyArg_ParseTupleAndKeywordsFast(args, kwds, &_parser, &BPyGPU_BufferType, &pybuffer_obj)) { + return NULL; + } + + if (GPU_context_active_get()) { + ubo = GPU_uniformbuf_create_ex( + bpygpu_Buffer_size(pybuffer_obj), pybuffer_obj->buf.as_void, "python_uniformbuffer"); + } + else { + STRNCPY(err_out, "No active GPU context found"); + } + + if (ubo == NULL) { + PyErr_Format(PyExc_RuntimeError, "GPUUniformBuf.__new__(...) failed with '%s'", err_out); + return NULL; + } + + return BPyGPUUniformBuf_CreatePyObject(ubo); +} + +PyDoc_STRVAR(pygpu_uniformbuffer_update_doc, + ".. method::update(data)\n" + "\n" + " Update the data of the uniform buffer object.\n"); +static PyObject *pygpu_uniformbuffer_update(BPyGPUUniformBuf *self, PyObject *obj) +{ + BPYGPU_UNIFORMBUF_CHECK_OBJ(self); + + if (!BPyGPU_Buffer_Check(obj)) { + return NULL; + } + + GPU_uniformbuf_update(self->ubo, ((BPyGPUBuffer *)obj)->buf.as_void); + Py_RETURN_NONE; +} + +#ifdef BPYGPU_USE_GPUOBJ_FREE_METHOD +PyDoc_STRVAR(pygpu_uniformbuffer_free_doc, + ".. method::free()\n" + "\n" + " Free the uniform buffer object.\n" + " The uniform buffer object will no longer be accessible.\n"); +static PyObject *pygpu_uniformbuffer_free(BPyGPUUniformBuf *self) +{ + BPYGPU_UNIFORMBUF_CHECK_OBJ(self); + + GPU_uniformbuf_free(self->ubo); + self->ubo = NULL; + Py_RETURN_NONE; +} +#endif + +static void BPyGPUUniformBuf__tp_dealloc(BPyGPUUniformBuf *self) +{ + if (self->ubo) { + GPU_uniformbuf_free(self->ubo); + } + Py_TYPE(self)->tp_free((PyObject *)self); +} + +static PyGetSetDef pygpu_uniformbuffer__tp_getseters[] = { + {NULL, NULL, NULL, NULL, NULL} /* Sentinel */ +}; + +static struct PyMethodDef pygpu_uniformbuffer__tp_methods[] = { + {"update", (PyCFunction)pygpu_uniformbuffer_update, METH_O, pygpu_uniformbuffer_update_doc}, +#ifdef BPYGPU_USE_GPUOBJ_FREE_METHOD + {"free", (PyCFunction)pygpu_uniformbuffer_free, METH_NOARGS, pygpu_uniformbuffer_free_doc}, +#endif + {NULL, NULL, 0, NULL}, +}; + +PyDoc_STRVAR(pygpu_uniformbuffer__tp_doc, + ".. class:: GPUUniformBuf(data)\n" + "\n" + " This object gives access to off uniform buffers.\n" + "\n" + " :arg data: Buffer object.\n" + " :type data: `Buffer`\n"); +PyTypeObject BPyGPUUniformBuf_Type = { + PyVarObject_HEAD_INIT(NULL, 0).tp_name = "GPUUniformBuf", + .tp_basicsize = sizeof(BPyGPUUniformBuf), + .tp_dealloc = (destructor)BPyGPUUniformBuf__tp_dealloc, + .tp_flags = Py_TPFLAGS_DEFAULT, + .tp_doc = pygpu_uniformbuffer__tp_doc, + .tp_methods = pygpu_uniformbuffer__tp_methods, + .tp_getset = pygpu_uniformbuffer__tp_getseters, + .tp_new = pygpu_uniformbuffer__tp_new, +}; + +/** \} */ + +/* -------------------------------------------------------------------- */ +/** \name Public API + * \{ */ + +PyObject *BPyGPUUniformBuf_CreatePyObject(GPUUniformBuf *ubo) +{ + BPyGPUUniformBuf *self; + + self = PyObject_New(BPyGPUUniformBuf, &BPyGPUUniformBuf_Type); + self->ubo = ubo; + + return (PyObject *)self; +} + +/** \} */ + +#undef BPYGPU_UNIFORMBUF_CHECK_OBJ diff --git a/source/blender/python/gpu/gpu_py_uniformbuffer.h b/source/blender/python/gpu/gpu_py_uniformbuffer.h new file mode 100644 index 00000000000..a13c33ae78a --- /dev/null +++ b/source/blender/python/gpu/gpu_py_uniformbuffer.h @@ -0,0 +1,33 @@ +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/** \file + * \ingroup bpygpu + */ + +#pragma once + +#include "BLI_compiler_attrs.h" + +extern PyTypeObject BPyGPUUniformBuf_Type; + +#define BPyGPUUniformBuf_Check(v) (Py_TYPE(v) == &BPyGPUUniformBuf_Type) + +typedef struct BPyGPUUniformBuf { + PyObject_HEAD struct GPUUniformBuf *ubo; +} BPyGPUUniformBuf; + +PyObject *BPyGPUUniformBuf_CreatePyObject(struct GPUUniformBuf *ubo) ATTR_NONNULL(1); -- cgit v1.2.3