Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'source/gameengine/VideoTexture')
-rw-r--r--source/gameengine/VideoTexture/CMakeLists.txt13
-rw-r--r--source/gameengine/VideoTexture/Common.h3
-rw-r--r--source/gameengine/VideoTexture/DeckLink.cpp813
-rw-r--r--source/gameengine/VideoTexture/DeckLink.h86
-rw-r--r--source/gameengine/VideoTexture/Exception.cpp15
-rw-r--r--source/gameengine/VideoTexture/Exception.h18
-rw-r--r--source/gameengine/VideoTexture/FilterBase.h7
-rw-r--r--source/gameengine/VideoTexture/FilterSource.h24
-rw-r--r--source/gameengine/VideoTexture/ImageBase.cpp99
-rw-r--r--source/gameengine/VideoTexture/ImageBase.h10
-rw-r--r--source/gameengine/VideoTexture/ImageMix.cpp2
-rw-r--r--source/gameengine/VideoTexture/ImageRender.cpp200
-rw-r--r--source/gameengine/VideoTexture/ImageRender.h28
-rw-r--r--source/gameengine/VideoTexture/ImageViewport.cpp99
-rw-r--r--source/gameengine/VideoTexture/ImageViewport.h11
-rw-r--r--source/gameengine/VideoTexture/Texture.cpp7
-rw-r--r--source/gameengine/VideoTexture/VideoBase.cpp47
-rw-r--r--source/gameengine/VideoTexture/VideoBase.h2
-rw-r--r--source/gameengine/VideoTexture/VideoDeckLink.cpp1228
-rw-r--r--source/gameengine/VideoTexture/VideoDeckLink.h256
-rw-r--r--source/gameengine/VideoTexture/VideoFFmpeg.cpp4
-rw-r--r--source/gameengine/VideoTexture/blendVideoTex.cpp15
22 files changed, 2908 insertions, 79 deletions
diff --git a/source/gameengine/VideoTexture/CMakeLists.txt b/source/gameengine/VideoTexture/CMakeLists.txt
index 4be9a9abe5c..1eb09b02e05 100644
--- a/source/gameengine/VideoTexture/CMakeLists.txt
+++ b/source/gameengine/VideoTexture/CMakeLists.txt
@@ -45,6 +45,9 @@ set(INC
../../../intern/glew-mx
../../../intern/guardedalloc
../../../intern/string
+ ../../../intern/decklink
+ ../../../intern/gpudirect
+ ../../../intern/atomic
)
set(INC_SYS
@@ -68,8 +71,10 @@ set(SRC
ImageViewport.cpp
PyTypeList.cpp
Texture.cpp
+ DeckLink.cpp
VideoBase.cpp
VideoFFmpeg.cpp
+ VideoDeckLink.cpp
blendVideoTex.cpp
BlendType.h
@@ -87,8 +92,10 @@ set(SRC
ImageViewport.h
PyTypeList.h
Texture.h
+ DeckLink.h
VideoBase.h
VideoFFmpeg.h
+ VideoDeckLink.h
)
if(WITH_CODEC_FFMPEG)
@@ -100,7 +107,13 @@ if(WITH_CODEC_FFMPEG)
remove_strict_flags_file(
VideoFFmpeg.cpp
+ VideoDeckLink
+ DeckLink
)
endif()
+if(WITH_GAMEENGINE_DECKLINK)
+ add_definitions(-DWITH_GAMEENGINE_DECKLINK)
+endif()
+
blender_add_lib(ge_videotex "${SRC}" "${INC}" "${INC_SYS}")
diff --git a/source/gameengine/VideoTexture/Common.h b/source/gameengine/VideoTexture/Common.h
index 90f7e66452a..22ea177addc 100644
--- a/source/gameengine/VideoTexture/Common.h
+++ b/source/gameengine/VideoTexture/Common.h
@@ -36,7 +36,8 @@
#define NULL 0
#endif
-#ifndef HRESULT
+#ifndef _HRESULT_DEFINED
+#define _HRESULT_DEFINED
#define HRESULT long
#endif
diff --git a/source/gameengine/VideoTexture/DeckLink.cpp b/source/gameengine/VideoTexture/DeckLink.cpp
new file mode 100644
index 00000000000..0506756ef2d
--- /dev/null
+++ b/source/gameengine/VideoTexture/DeckLink.cpp
@@ -0,0 +1,813 @@
+/*
+ * ***** BEGIN GPL LICENSE BLOCK *****
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+* The Original Code is Copyright (C) 2015, Blender Foundation
+* All rights reserved.
+*
+* The Original Code is: all of this file.
+*
+* Contributor(s): Blender Foundation.
+ *
+ * ***** END GPL LICENSE BLOCK *****
+ */
+
+/** \file gameengine/VideoTexture/Texture.cpp
+ * \ingroup bgevideotex
+ */
+
+#ifdef WITH_GAMEENGINE_DECKLINK
+
+// implementation
+
+// FFmpeg defines its own version of stdint.h on Windows.
+// Decklink needs FFmpeg, so it uses its version of stdint.h
+// this is necessary for INT64_C macro
+#ifndef __STDC_CONSTANT_MACROS
+#define __STDC_CONSTANT_MACROS
+#endif
+// this is necessary for UINTPTR_MAX (used by atomic-ops)
+#ifndef __STDC_LIMIT_MACROS
+#define __STDC_LIMIT_MACROS
+#endif
+
+#include "atomic_ops.h"
+
+#include "EXP_PyObjectPlus.h"
+#include "KX_KetsjiEngine.h"
+#include "KX_PythonInit.h"
+#include "DeckLink.h"
+
+#include <memory.h>
+
+// macro for exception handling and logging
+#define CATCH_EXCP catch (Exception & exp) \
+{ exp.report(); return NULL; }
+
+static struct
+{
+ const char *name;
+ BMDDisplayMode mode;
+} sModeStringTab[] = {
+ { "NTSC", bmdModeNTSC },
+ { "NTSC2398", bmdModeNTSC2398 },
+ { "PAL", bmdModePAL },
+ { "NTSCp", bmdModeNTSCp },
+ { "PALp", bmdModePALp },
+
+ /* HD 1080 Modes */
+
+ { "HD1080p2398", bmdModeHD1080p2398 },
+ { "HD1080p24", bmdModeHD1080p24 },
+ { "HD1080p25", bmdModeHD1080p25 },
+ { "HD1080p2997", bmdModeHD1080p2997 },
+ { "HD1080p30", bmdModeHD1080p30 },
+ { "HD1080i50", bmdModeHD1080i50 },
+ { "HD1080i5994", bmdModeHD1080i5994 },
+ { "HD1080i6000", bmdModeHD1080i6000 },
+ { "HD1080p50", bmdModeHD1080p50 },
+ { "HD1080p5994", bmdModeHD1080p5994 },
+ { "HD1080p6000", bmdModeHD1080p6000 },
+
+ /* HD 720 Modes */
+
+ { "HD720p50", bmdModeHD720p50 },
+ { "HD720p5994", bmdModeHD720p5994 },
+ { "HD720p60", bmdModeHD720p60 },
+
+ /* 2k Modes */
+
+ { "2k2398", bmdMode2k2398 },
+ { "2k24", bmdMode2k24 },
+ { "2k25", bmdMode2k25 },
+
+ /* DCI Modes (output only) */
+
+ { "2kDCI2398", bmdMode2kDCI2398 },
+ { "2kDCI24", bmdMode2kDCI24 },
+ { "2kDCI25", bmdMode2kDCI25 },
+
+ /* 4k Modes */
+
+ { "4K2160p2398", bmdMode4K2160p2398 },
+ { "4K2160p24", bmdMode4K2160p24 },
+ { "4K2160p25", bmdMode4K2160p25 },
+ { "4K2160p2997", bmdMode4K2160p2997 },
+ { "4K2160p30", bmdMode4K2160p30 },
+ { "4K2160p50", bmdMode4K2160p50 },
+ { "4K2160p5994", bmdMode4K2160p5994 },
+ { "4K2160p60", bmdMode4K2160p60 },
+ // sentinel
+ { NULL }
+};
+
+static struct
+{
+ const char *name;
+ BMDPixelFormat format;
+} sFormatStringTab[] = {
+ { "8BitYUV", bmdFormat8BitYUV },
+ { "10BitYUV", bmdFormat10BitYUV },
+ { "8BitARGB", bmdFormat8BitARGB },
+ { "8BitBGRA", bmdFormat8BitBGRA },
+ { "10BitRGB", bmdFormat10BitRGB },
+ { "12BitRGB", bmdFormat12BitRGB },
+ { "12BitRGBLE", bmdFormat12BitRGBLE },
+ { "10BitRGBXLE", bmdFormat10BitRGBXLE },
+ { "10BitRGBX", bmdFormat10BitRGBX },
+ // sentinel
+ { NULL }
+};
+
+ExceptionID DeckLinkBadDisplayMode, DeckLinkBadPixelFormat;
+ExpDesc DeckLinkBadDisplayModeDesc(DeckLinkBadDisplayMode, "Invalid or unsupported display mode");
+ExpDesc DeckLinkBadPixelFormatDesc(DeckLinkBadPixelFormat, "Invalid or unsupported pixel format");
+
+HRESULT decklink_ReadDisplayMode(const char *format, size_t len, BMDDisplayMode *displayMode)
+{
+ int i;
+
+ if (len == 0)
+ len = strlen(format);
+ for (i = 0; sModeStringTab[i].name != NULL; i++) {
+ if (strlen(sModeStringTab[i].name) == len &&
+ !strncmp(sModeStringTab[i].name, format, len))
+ {
+ *displayMode = sModeStringTab[i].mode;
+ return S_OK;
+ }
+ }
+ if (len != 4)
+ THRWEXCP(DeckLinkBadDisplayMode, S_OK);
+ // assume the user entered directly the mode value as a 4 char string
+ *displayMode = (BMDDisplayMode)((((uint32_t)format[0]) << 24) + (((uint32_t)format[1]) << 16) + (((uint32_t)format[2]) << 8) + ((uint32_t)format[3]));
+ return S_OK;
+}
+
+HRESULT decklink_ReadPixelFormat(const char *format, size_t len, BMDPixelFormat *pixelFormat)
+{
+ int i;
+
+ if (!len)
+ len = strlen(format);
+ for (i = 0; sFormatStringTab[i].name != NULL; i++) {
+ if (strlen(sFormatStringTab[i].name) == len &&
+ !strncmp(sFormatStringTab[i].name, format, len))
+ {
+ *pixelFormat = sFormatStringTab[i].format;
+ return S_OK;
+ }
+ }
+ if (len != 4)
+ THRWEXCP(DeckLinkBadPixelFormat, S_OK);
+ // assume the user entered directly the mode value as a 4 char string
+ *pixelFormat = (BMDPixelFormat)((((uint32_t)format[0]) << 24) + (((uint32_t)format[1]) << 16) + (((uint32_t)format[2]) << 8) + ((uint32_t)format[3]));
+ return S_OK;
+}
+
+class DeckLink3DFrameWrapper : public IDeckLinkVideoFrame, IDeckLinkVideoFrame3DExtensions
+{
+public:
+ // IUnknown
+ virtual HRESULT STDMETHODCALLTYPE QueryInterface(REFIID iid, LPVOID *ppv)
+ {
+ if (!memcmp(&iid, &IID_IDeckLinkVideoFrame3DExtensions, sizeof(iid))) {
+ if (mpRightEye) {
+ *ppv = (IDeckLinkVideoFrame3DExtensions*)this;
+ return S_OK;
+ }
+ }
+ return E_NOTIMPL;
+ }
+ virtual ULONG STDMETHODCALLTYPE AddRef(void) { return 1U; }
+ virtual ULONG STDMETHODCALLTYPE Release(void) { return 1U; }
+ // IDeckLinkVideoFrame
+ virtual long STDMETHODCALLTYPE GetWidth(void) { return mpLeftEye->GetWidth(); }
+ virtual long STDMETHODCALLTYPE GetHeight(void) { return mpLeftEye->GetHeight(); }
+ virtual long STDMETHODCALLTYPE GetRowBytes(void) { return mpLeftEye->GetRowBytes(); }
+ virtual BMDPixelFormat STDMETHODCALLTYPE GetPixelFormat(void) { return mpLeftEye->GetPixelFormat(); }
+ virtual BMDFrameFlags STDMETHODCALLTYPE GetFlags(void) { return mpLeftEye->GetFlags(); }
+ virtual HRESULT STDMETHODCALLTYPE GetBytes(void **buffer) { return mpLeftEye->GetBytes(buffer); }
+ virtual HRESULT STDMETHODCALLTYPE GetTimecode(BMDTimecodeFormat format,IDeckLinkTimecode **timecode)
+ { return mpLeftEye->GetTimecode(format, timecode); }
+ virtual HRESULT STDMETHODCALLTYPE GetAncillaryData(IDeckLinkVideoFrameAncillary **ancillary)
+ { return mpLeftEye->GetAncillaryData(ancillary); }
+ // IDeckLinkVideoFrame3DExtensions
+ virtual BMDVideo3DPackingFormat STDMETHODCALLTYPE Get3DPackingFormat(void)
+ {
+ return bmdVideo3DPackingLeftOnly;
+ }
+ virtual HRESULT STDMETHODCALLTYPE GetFrameForRightEye(
+ /* [out] */ IDeckLinkVideoFrame **rightEyeFrame)
+ {
+ mpRightEye->AddRef();
+ *rightEyeFrame = mpRightEye;
+ return S_OK;
+ }
+ // Constructor
+ DeckLink3DFrameWrapper(IDeckLinkVideoFrame *leftEye, IDeckLinkVideoFrame *rightEye)
+ {
+ mpLeftEye = leftEye;
+ mpRightEye = rightEye;
+ }
+ // no need for a destructor, it's just a wrapper
+private:
+ IDeckLinkVideoFrame *mpLeftEye;
+ IDeckLinkVideoFrame *mpRightEye;
+};
+
+static void decklink_Reset(DeckLink *self)
+{
+ self->m_lastClock = 0.0;
+ self->mDLOutput = NULL;
+ self->mUse3D = false;
+ self->mDisplayMode = bmdModeUnknown;
+ self->mKeyingSupported = false;
+ self->mHDKeyingSupported = false;
+ self->mSize[0] = 0;
+ self->mSize[1] = 0;
+ self->mFrameSize = 0;
+ self->mLeftFrame = NULL;
+ self->mRightFrame = NULL;
+ self->mKeyer = NULL;
+ self->mUseKeying = false;
+ self->mKeyingLevel = 255;
+ self->mUseExtend = false;
+}
+
+#ifdef __BIG_ENDIAN__
+#define CONV_PIXEL(i) ((((i)>>16)&0xFF00)+(((i)&0xFF00)<<16)+((i)&0xFF00FF))
+#else
+#define CONV_PIXEL(i) ((((i)&0xFF)<<16)+(((i)>>16)&0xFF)+((i)&0xFF00FF00))
+#endif
+
+// adapt the pixel format and picture size from VideoTexture (RGBA) to DeckLink (BGRA)
+static void decklink_ConvImage(uint32_t *dest, const short *destSize, const uint32_t *source, const short *srcSize, bool extend)
+{
+ short w, h, x, y;
+ const uint32_t *s;
+ uint32_t *d, p;
+ bool sameSize = (destSize[0] == srcSize[0] && destSize[1] == srcSize[1]);
+
+ if (sameSize || !extend) {
+ // here we convert pixel by pixel
+ w = (destSize[0] < srcSize[0]) ? destSize[0] : srcSize[0];
+ h = (destSize[1] < srcSize[1]) ? destSize[1] : srcSize[1];
+ for (y = 0; y < h; ++y) {
+ s = source + y*srcSize[0];
+ d = dest + y*destSize[0];
+ for (x = 0; x < w; ++x, ++s, ++d) {
+ *d = CONV_PIXEL(*s);
+ }
+ }
+ }
+ else {
+ // here we scale
+ // interpolation accumulator
+ int accHeight = srcSize[1] >> 1;
+ d = dest;
+ s = source;
+ // process image rows
+ for (y = 0; y < srcSize[1]; ++y) {
+ // increase height accum
+ accHeight += destSize[1];
+ // if pixel row has to be drawn
+ if (accHeight >= srcSize[1]) {
+ // decrease accum
+ accHeight -= srcSize[1];
+ // width accum
+ int accWidth = srcSize[0] >> 1;
+ // process row
+ for (x = 0; x < srcSize[0]; ++x, ++s) {
+ // increase width accum
+ accWidth += destSize[0];
+ // convert pixel
+ p = CONV_PIXEL(*s);
+ // if pixel has to be drown one or more times
+ while (accWidth >= srcSize[0]) {
+ // decrease accum
+ accWidth -= srcSize[0];
+ *d++ = p;
+ }
+ }
+ // if there should be more identical lines
+ while (accHeight >= srcSize[1]) {
+ accHeight -= srcSize[1];
+ // copy previous line
+ memcpy(d, d - destSize[0], 4 * destSize[0]);
+ d += destSize[0];
+ }
+ }
+ else {
+ // if we skip a source line
+ s += srcSize[0];
+ }
+ }
+ }
+}
+
+// DeckLink object allocation
+static PyObject *DeckLink_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
+{
+ // allocate object
+ DeckLink * self = reinterpret_cast<DeckLink*>(type->tp_alloc(type, 0));
+ // initialize object structure
+ decklink_Reset(self);
+ // m_leftEye is a python object, it's handled by python
+ self->m_leftEye = NULL;
+ self->m_rightEye = NULL;
+ // return allocated object
+ return reinterpret_cast<PyObject*>(self);
+}
+
+
+// forward declaration
+PyObject *DeckLink_close(DeckLink *self);
+int DeckLink_setSource(DeckLink *self, PyObject *value, void *closure);
+
+
+// DeckLink object deallocation
+static void DeckLink_dealloc(DeckLink *self)
+{
+ // release renderer
+ Py_XDECREF(self->m_leftEye);
+ // close decklink
+ PyObject *ret = DeckLink_close(self);
+ Py_DECREF(ret);
+ // release object
+ Py_TYPE((PyObject *)self)->tp_free((PyObject *)self);
+}
+
+
+ExceptionID AutoDetectionNotAvail, DeckLinkOpenCard, DeckLinkBadFormat, DeckLinkInternalError;
+ExpDesc AutoDetectionNotAvailDesc(AutoDetectionNotAvail, "Auto detection not yet available");
+ExpDesc DeckLinkOpenCardDesc(DeckLinkOpenCard, "Cannot open card for output");
+ExpDesc DeckLinkBadFormatDesc(DeckLinkBadFormat, "Invalid or unsupported output format, use <mode>[/3D]");
+ExpDesc DeckLinkInternalErrorDesc(DeckLinkInternalError, "DeckLink API internal error, please report");
+
+// DeckLink object initialization
+static int DeckLink_init(DeckLink *self, PyObject *args, PyObject *kwds)
+{
+ IDeckLinkIterator* pIterator;
+ IDeckLinkAttributes* pAttributes;
+ IDeckLinkDisplayModeIterator* pDisplayModeIterator;
+ IDeckLinkDisplayMode* pDisplayMode;
+ IDeckLink* pDL;
+ char* p3D;
+ BOOL flag;
+ size_t len;
+ int i;
+ uint32_t displayFlags;
+ BMDVideoOutputFlags outputFlags;
+ BMDDisplayModeSupport support;
+ uint32_t* bytes;
+
+
+ // material ID
+ short cardIdx = 0;
+ // texture ID
+ char *format = NULL;
+
+ static const char *kwlist[] = {"cardIdx", "format", NULL};
+ // get parameters
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "|hs",
+ const_cast<char**>(kwlist), &cardIdx, &format))
+ return -1;
+
+ try {
+ if (format == NULL) {
+ THRWEXCP(AutoDetectionNotAvail, S_OK);
+ }
+
+ if ((p3D = strchr(format, '/')) != NULL && strcmp(p3D, "/3D"))
+ THRWEXCP(DeckLinkBadFormat, S_OK);
+ self->mUse3D = (p3D) ? true : false;
+ // read the mode
+ len = (p3D) ? (size_t)(p3D - format) : strlen(format);
+ // throws if bad mode
+ decklink_ReadDisplayMode(format, len, &self->mDisplayMode);
+
+ pIterator = BMD_CreateDeckLinkIterator();
+ pDL = NULL;
+ if (pIterator) {
+ i = 0;
+ while (pIterator->Next(&pDL) == S_OK) {
+ if (i == cardIdx) {
+ break;
+ }
+ i++;
+ pDL->Release();
+ pDL = NULL;
+ }
+ pIterator->Release();
+ }
+
+ if (!pDL) {
+ THRWEXCP(DeckLinkOpenCard, S_OK);
+ }
+ // detect the capabilities
+ if (pDL->QueryInterface(IID_IDeckLinkAttributes, (void**)&pAttributes) == S_OK) {
+ if (pAttributes->GetFlag(BMDDeckLinkSupportsInternalKeying, &flag) == S_OK && flag) {
+ self->mKeyingSupported = true;
+ if (pAttributes->GetFlag(BMDDeckLinkSupportsHDKeying, &flag) == S_OK && flag) {
+ self->mHDKeyingSupported = true;
+ }
+ }
+ pAttributes->Release();
+ }
+
+ if (pDL->QueryInterface(IID_IDeckLinkOutput, (void**)&self->mDLOutput) != S_OK) {
+ self->mDLOutput = NULL;
+ }
+ if (self->mKeyingSupported) {
+ pDL->QueryInterface(IID_IDeckLinkKeyer, (void **)&self->mKeyer);
+ }
+ // we don't need the device anymore, release to avoid leaking
+ pDL->Release();
+
+ if (!self->mDLOutput)
+ THRWEXCP(DeckLinkOpenCard, S_OK);
+
+ if (self->mDLOutput->GetDisplayModeIterator(&pDisplayModeIterator) != S_OK)
+ THRWEXCP(DeckLinkInternalError, S_OK);
+
+ displayFlags = (self->mUse3D) ? bmdDisplayModeSupports3D : 0;
+ outputFlags = (self->mUse3D) ? bmdVideoOutputDualStream3D : bmdVideoOutputFlagDefault;
+ pDisplayMode = NULL;
+ i = 0;
+ while (pDisplayModeIterator->Next(&pDisplayMode) == S_OK) {
+ if (pDisplayMode->GetDisplayMode() == self->mDisplayMode
+ && (pDisplayMode->GetFlags() & displayFlags) == displayFlags) {
+ if (self->mDLOutput->DoesSupportVideoMode(self->mDisplayMode, bmdFormat8BitBGRA, outputFlags, &support, NULL) != S_OK ||
+ support == bmdDisplayModeNotSupported)
+ {
+ printf("Warning: DeckLink card %d reports no BGRA support, proceed anyway\n", cardIdx);
+ }
+ break;
+ }
+ pDisplayMode->Release();
+ pDisplayMode = NULL;
+ i++;
+ }
+ pDisplayModeIterator->Release();
+
+ if (!pDisplayMode)
+ THRWEXCP(DeckLinkBadFormat, S_OK);
+ self->mSize[0] = pDisplayMode->GetWidth();
+ self->mSize[1] = pDisplayMode->GetHeight();
+ self->mFrameSize = 4*self->mSize[0]*self->mSize[1];
+ pDisplayMode->Release();
+ if (self->mDLOutput->EnableVideoOutput(self->mDisplayMode, outputFlags) != S_OK)
+ // this shouldn't fail
+ THRWEXCP(DeckLinkOpenCard, S_OK);
+
+ if (self->mDLOutput->CreateVideoFrame(self->mSize[0], self->mSize[1], self->mSize[0] * 4, bmdFormat8BitBGRA, bmdFrameFlagFlipVertical, &self->mLeftFrame) != S_OK)
+ THRWEXCP(DeckLinkInternalError, S_OK);
+ // clear alpha channel in the frame buffer
+ self->mLeftFrame->GetBytes((void **)&bytes);
+ memset(bytes, 0, self->mFrameSize);
+ if (self->mUse3D) {
+ if (self->mDLOutput->CreateVideoFrame(self->mSize[0], self->mSize[1], self->mSize[0] * 4, bmdFormat8BitBGRA, bmdFrameFlagFlipVertical, &self->mRightFrame) != S_OK)
+ THRWEXCP(DeckLinkInternalError, S_OK);
+ // clear alpha channel in the frame buffer
+ self->mRightFrame->GetBytes((void **)&bytes);
+ memset(bytes, 0, self->mFrameSize);
+ }
+ }
+ catch (Exception & exp)
+ {
+ printf("DeckLink: exception when opening card %d: %s\n", cardIdx, exp.what());
+ exp.report();
+ // normally, the object should be deallocated
+ return -1;
+ }
+ // initialization succeeded
+ return 0;
+}
+
+
+// close added decklink
+PyObject *DeckLink_close(DeckLink * self)
+{
+ if (self->mLeftFrame)
+ self->mLeftFrame->Release();
+ if (self->mRightFrame)
+ self->mRightFrame->Release();
+ if (self->mKeyer)
+ self->mKeyer->Release();
+ if (self->mDLOutput)
+ self->mDLOutput->Release();
+ decklink_Reset(self);
+ Py_RETURN_NONE;
+}
+
+
+// refresh decklink key frame
+static PyObject *DeckLink_refresh(DeckLink *self, PyObject *args)
+{
+ // get parameter - refresh source
+ PyObject *param;
+ double ts = -1.0;
+
+ if (!PyArg_ParseTuple(args, "O|d:refresh", &param, &ts) || !PyBool_Check(param)) {
+ // report error
+ PyErr_SetString(PyExc_TypeError, "The value must be a bool");
+ return NULL;
+ }
+ // some trick here: we are in the business of loading a key frame in decklink,
+ // no use to do it if we are still in the same rendering frame.
+ // We find this out by looking at the engine current clock time
+ KX_KetsjiEngine* engine = KX_GetActiveEngine();
+ if (engine->GetClockTime() != self->m_lastClock)
+ {
+ self->m_lastClock = engine->GetClockTime();
+ // set source refresh
+ bool refreshSource = (param == Py_True);
+ uint32_t *leftEye = NULL;
+ uint32_t *rightEye = NULL;
+ // try to process key frame from source
+ try {
+ // check if optimization is possible
+ if (self->m_leftEye != NULL) {
+ ImageBase *leftImage = self->m_leftEye->m_image;
+ short * srcSize = leftImage->getSize();
+ self->mLeftFrame->GetBytes((void **)&leftEye);
+ if (srcSize[0] == self->mSize[0] && srcSize[1] == self->mSize[1])
+ {
+ // buffer has same size, can load directly
+ if (!leftImage->loadImage(leftEye, self->mFrameSize, GL_BGRA, ts))
+ leftEye = NULL;
+ }
+ else {
+ // scaling is required, go the hard way
+ unsigned int *src = leftImage->getImage(0, ts);
+ if (src != NULL)
+ decklink_ConvImage(leftEye, self->mSize, src, srcSize, self->mUseExtend);
+ else
+ leftEye = NULL;
+ }
+ }
+ if (leftEye) {
+ if (self->mUse3D && self->m_rightEye != NULL) {
+ ImageBase *rightImage = self->m_rightEye->m_image;
+ short * srcSize = rightImage->getSize();
+ self->mRightFrame->GetBytes((void **)&rightEye);
+ if (srcSize[0] == self->mSize[0] && srcSize[1] == self->mSize[1])
+ {
+ // buffer has same size, can load directly
+ rightImage->loadImage(rightEye, self->mFrameSize, GL_BGRA, ts);
+ }
+ else {
+ // scaling is required, go the hard way
+ unsigned int *src = rightImage->getImage(0, ts);
+ if (src != NULL)
+ decklink_ConvImage(rightEye, self->mSize, src, srcSize, self->mUseExtend);
+ }
+ }
+ if (self->mUse3D) {
+ DeckLink3DFrameWrapper frame3D(
+ (IDeckLinkVideoFrame*)self->mLeftFrame,
+ (IDeckLinkVideoFrame*)self->mRightFrame);
+ self->mDLOutput->DisplayVideoFrameSync(&frame3D);
+ }
+ else {
+ self->mDLOutput->DisplayVideoFrameSync((IDeckLinkVideoFrame*)self->mLeftFrame);
+ }
+ }
+ // refresh texture source, if required
+ if (refreshSource) {
+ if (self->m_leftEye)
+ self->m_leftEye->m_image->refresh();
+ if (self->m_rightEye)
+ self->m_rightEye->m_image->refresh();
+ }
+ }
+ CATCH_EXCP;
+ }
+ Py_RETURN_NONE;
+}
+
+// get source object
+static PyObject *DeckLink_getSource(DeckLink *self, PyObject *value, void *closure)
+{
+ // if source exists
+ if (self->m_leftEye != NULL) {
+ Py_INCREF(self->m_leftEye);
+ return reinterpret_cast<PyObject*>(self->m_leftEye);
+ }
+ // otherwise return None
+ Py_RETURN_NONE;
+}
+
+
+// set source object
+int DeckLink_setSource(DeckLink *self, PyObject *value, void *closure)
+{
+ // check new value
+ if (value == NULL || !pyImageTypes.in(Py_TYPE(value))) {
+ // report value error
+ PyErr_SetString(PyExc_TypeError, "Invalid type of value");
+ return -1;
+ }
+ // increase ref count for new value
+ Py_INCREF(value);
+ // release previous
+ Py_XDECREF(self->m_leftEye);
+ // set new value
+ self->m_leftEye = reinterpret_cast<PyImage*>(value);
+ // return success
+ return 0;
+}
+
+// get source object
+static PyObject *DeckLink_getRight(DeckLink *self, PyObject *value, void *closure)
+{
+ // if source exists
+ if (self->m_rightEye != NULL)
+ {
+ Py_INCREF(self->m_rightEye);
+ return reinterpret_cast<PyObject*>(self->m_rightEye);
+ }
+ // otherwise return None
+ Py_RETURN_NONE;
+}
+
+
+// set source object
+static int DeckLink_setRight(DeckLink *self, PyObject *value, void *closure)
+{
+ // check new value
+ if (value == NULL || !pyImageTypes.in(Py_TYPE(value)))
+ {
+ // report value error
+ PyErr_SetString(PyExc_TypeError, "Invalid type of value");
+ return -1;
+ }
+ // increase ref count for new value
+ Py_INCREF(value);
+ // release previous
+ Py_XDECREF(self->m_rightEye);
+ // set new value
+ self->m_rightEye = reinterpret_cast<PyImage*>(value);
+ // return success
+ return 0;
+}
+
+
+static PyObject *DeckLink_getKeying(DeckLink *self, PyObject *value, void *closure)
+{
+ if (self->mUseKeying) Py_RETURN_TRUE;
+ else Py_RETURN_FALSE;
+}
+
+static int DeckLink_setKeying(DeckLink *self, PyObject *value, void *closure)
+{
+ if (value == NULL || !PyBool_Check(value))
+ {
+ PyErr_SetString(PyExc_TypeError, "The value must be a bool");
+ return -1;
+ }
+ if (self->mKeyer != NULL)
+ {
+ if (value == Py_True)
+ {
+ if (self->mKeyer->Enable(false) != S_OK)
+ {
+ PyErr_SetString(PyExc_RuntimeError, "Error enabling keyer");
+ return -1;
+ }
+ self->mUseKeying = true;
+ self->mKeyer->SetLevel(self->mKeyingLevel);
+ }
+ else
+ {
+ self->mKeyer->Disable();
+ self->mUseKeying = false;
+ }
+ }
+ // success
+ return 0;
+}
+
+static PyObject *DeckLink_getLevel(DeckLink *self, PyObject *value, void *closure)
+{
+ return Py_BuildValue("h", self->mKeyingLevel);
+}
+
+static int DeckLink_setLevel(DeckLink *self, PyObject *value, void *closure)
+{
+ long level;
+ if (value == NULL || !PyLong_Check(value)) {
+ PyErr_SetString(PyExc_TypeError, "The value must be an integer from 0 to 255");
+ return -1;
+ }
+ level = PyLong_AsLong(value);
+ if (level > 255)
+ level = 255;
+ else if (level < 0)
+ level = 0;
+ self->mKeyingLevel = (uint8_t)level;
+ if (self->mUseKeying) {
+ if (self->mKeyer->SetLevel(self->mKeyingLevel) != S_OK) {
+ PyErr_SetString(PyExc_RuntimeError, "Error changin level of keyer");
+ return -1;
+ }
+ }
+ // success
+ return 0;
+}
+
+static PyObject *DeckLink_getExtend(DeckLink *self, PyObject *value, void *closure)
+{
+ if (self->mUseExtend) Py_RETURN_TRUE;
+ else Py_RETURN_FALSE;
+}
+
+static int DeckLink_setExtend(DeckLink *self, PyObject *value, void *closure)
+{
+ if (value == NULL || !PyBool_Check(value))
+ {
+ PyErr_SetString(PyExc_TypeError, "The value must be a bool");
+ return -1;
+ }
+ self->mUseExtend = (value == Py_True);
+ return 0;
+}
+
+// class DeckLink methods
+static PyMethodDef decklinkMethods[] =
+{
+ { "close", (PyCFunction)DeckLink_close, METH_NOARGS, "Close dynamic decklink and restore original"},
+ { "refresh", (PyCFunction)DeckLink_refresh, METH_VARARGS, "Refresh decklink from source"},
+ {NULL} /* Sentinel */
+};
+
+// class DeckLink attributes
+static PyGetSetDef decklinkGetSets[] =
+{
+ { (char*)"source", (getter)DeckLink_getSource, (setter)DeckLink_setSource, (char*)"source of decklink (left eye)", NULL},
+ { (char*)"right", (getter)DeckLink_getRight, (setter)DeckLink_setRight, (char*)"source of decklink (right eye)", NULL },
+ { (char*)"keying", (getter)DeckLink_getKeying, (setter)DeckLink_setKeying, (char*)"whether keying is enabled (frame is alpha-composited with passthrough output)", NULL },
+ { (char*)"level", (getter)DeckLink_getLevel, (setter)DeckLink_setLevel, (char*)"change the level of keying (overall alpha level of key frame, 0 to 255)", NULL },
+ { (char*)"extend", (getter)DeckLink_getExtend, (setter)DeckLink_setExtend, (char*)"whether image should stretched to fit frame", NULL },
+ { NULL }
+};
+
+
+// class DeckLink declaration
+PyTypeObject DeckLinkType =
+{
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "VideoTexture.DeckLink", /*tp_name*/
+ sizeof(DeckLink), /*tp_basicsize*/
+ 0, /*tp_itemsize*/
+ (destructor)DeckLink_dealloc,/*tp_dealloc*/
+ 0, /*tp_print*/
+ 0, /*tp_getattr*/
+ 0, /*tp_setattr*/
+ 0, /*tp_compare*/
+ 0, /*tp_repr*/
+ 0, /*tp_as_number*/
+ 0, /*tp_as_sequence*/
+ 0, /*tp_as_mapping*/
+ 0, /*tp_hash */
+ 0, /*tp_call*/
+ 0, /*tp_str*/
+ 0, /*tp_getattro*/
+ 0, /*tp_setattro*/
+ &imageBufferProcs, /*tp_as_buffer*/
+ Py_TPFLAGS_DEFAULT, /*tp_flags*/
+ "DeckLink objects", /* tp_doc */
+ 0, /* tp_traverse */
+ 0, /* tp_clear */
+ 0, /* tp_richcompare */
+ 0, /* tp_weaklistoffset */
+ 0, /* tp_iter */
+ 0, /* tp_iternext */
+ decklinkMethods, /* tp_methods */
+ 0, /* tp_members */
+ decklinkGetSets, /* tp_getset */
+ 0, /* tp_base */
+ 0, /* tp_dict */
+ 0, /* tp_descr_get */
+ 0, /* tp_descr_set */
+ 0, /* tp_dictoffset */
+ (initproc)DeckLink_init, /* tp_init */
+ 0, /* tp_alloc */
+ DeckLink_new, /* tp_new */
+};
+
+#endif /* WITH_GAMEENGINE_DECKLINK */
diff --git a/source/gameengine/VideoTexture/DeckLink.h b/source/gameengine/VideoTexture/DeckLink.h
new file mode 100644
index 00000000000..1c96af7b4bc
--- /dev/null
+++ b/source/gameengine/VideoTexture/DeckLink.h
@@ -0,0 +1,86 @@
+/*
+ * ***** BEGIN GPL LICENSE BLOCK *****
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+* The Original Code is Copyright (C) 2015, Blender Foundation
+* All rights reserved.
+*
+* The Original Code is: all of this file.
+*
+* Contributor(s): Blender Foundation.
+ *
+ * ***** END GPL LICENSE BLOCK *****
+ */
+
+/** \file VideoTexture/DeckLink.h
+ * \ingroup bgevideotex
+ */
+
+#ifndef __DECKLINK_H__
+#define __DECKLINK_H__
+
+#ifdef WITH_GAMEENGINE_DECKLINK
+
+#include "EXP_PyObjectPlus.h"
+#include <structmember.h>
+
+#include "DNA_image_types.h"
+
+#include "DeckLinkAPI.h"
+
+#include "ImageBase.h"
+#include "BlendType.h"
+#include "Exception.h"
+
+
+// type DeckLink declaration
+struct DeckLink
+{
+ PyObject_HEAD
+
+ // last refresh
+ double m_lastClock;
+ // decklink card to which we output
+ IDeckLinkOutput * mDLOutput;
+ IDeckLinkKeyer * mKeyer;
+ IDeckLinkMutableVideoFrame *mLeftFrame;
+ IDeckLinkMutableVideoFrame *mRightFrame;
+ bool mUse3D;
+ bool mUseKeying;
+ bool mUseExtend;
+ bool mKeyingSupported;
+ bool mHDKeyingSupported;
+ uint8_t mKeyingLevel;
+ BMDDisplayMode mDisplayMode;
+ short mSize[2];
+ uint32_t mFrameSize;
+
+ // image source
+ PyImage * m_leftEye;
+ PyImage * m_rightEye;
+};
+
+
+// DeckLink type description
+extern PyTypeObject DeckLinkType;
+
+// helper function
+HRESULT decklink_ReadDisplayMode(const char *format, size_t len, BMDDisplayMode *displayMode);
+HRESULT decklink_ReadPixelFormat(const char *format, size_t len, BMDPixelFormat *displayMode);
+
+#endif /* WITH_GAMEENGINE_DECKLINK */
+
+#endif /* __DECKLINK_H__ */
diff --git a/source/gameengine/VideoTexture/Exception.cpp b/source/gameengine/VideoTexture/Exception.cpp
index 08616e0c41c..9f82987ea62 100644
--- a/source/gameengine/VideoTexture/Exception.cpp
+++ b/source/gameengine/VideoTexture/Exception.cpp
@@ -213,6 +213,7 @@ void registerAllExceptions(void)
ImageSizesNotMatchDesc.registerDesc();
ImageHasExportsDesc.registerDesc();
InvalidColorChannelDesc.registerDesc();
+ InvalidImageModeDesc.registerDesc();
SceneInvalidDesc.registerDesc();
CameraInvalidDesc.registerDesc();
ObserverInvalidDesc.registerDesc();
@@ -223,4 +224,18 @@ void registerAllExceptions(void)
MirrorTooSmallDesc.registerDesc();
SourceVideoEmptyDesc.registerDesc();
SourceVideoCreationDesc.registerDesc();
+ OffScreenInvalidDesc.registerDesc();
+#ifdef WITH_GAMEENGINE_DECKLINK
+ AutoDetectionNotAvailDesc.registerDesc();
+ DeckLinkBadDisplayModeDesc.registerDesc();
+ DeckLinkBadPixelFormatDesc.registerDesc();
+ DeckLinkOpenCardDesc.registerDesc();
+ DeckLinkBadFormatDesc.registerDesc();
+ DeckLinkInternalErrorDesc.registerDesc();
+ SourceVideoOnlyCaptureDesc.registerDesc();
+ VideoDeckLinkBadFormatDesc.registerDesc();
+ VideoDeckLinkOpenCardDesc.registerDesc();
+ VideoDeckLinkDvpInternalErrorDesc.registerDesc();
+ VideoDeckLinkPinMemoryErrorDesc.registerDesc();
+#endif
}
diff --git a/source/gameengine/VideoTexture/Exception.h b/source/gameengine/VideoTexture/Exception.h
index c3c27abe019..c4de85ff34d 100644
--- a/source/gameengine/VideoTexture/Exception.h
+++ b/source/gameengine/VideoTexture/Exception.h
@@ -46,7 +46,7 @@
throw Exception (err, macroHRslt, __FILE__, __LINE__); \
}
-#define THRWEXCP(err,hRslt) throw Exception (err, hRslt, __FILE__, __LINE__);
+#define THRWEXCP(err,hRslt) throw Exception (err, hRslt, __FILE__, __LINE__)
#if defined WIN32
@@ -209,9 +209,11 @@ extern ExpDesc MaterialNotAvailDesc;
extern ExpDesc ImageSizesNotMatchDesc;
extern ExpDesc ImageHasExportsDesc;
extern ExpDesc InvalidColorChannelDesc;
+extern ExpDesc InvalidImageModeDesc;
extern ExpDesc SceneInvalidDesc;
extern ExpDesc CameraInvalidDesc;
extern ExpDesc ObserverInvalidDesc;
+extern ExpDesc OffScreenInvalidDesc;
extern ExpDesc MirrorInvalidDesc;
extern ExpDesc MirrorSizeInvalidDesc;
extern ExpDesc MirrorNormalInvalidDesc;
@@ -219,7 +221,19 @@ extern ExpDesc MirrorHorizontalDesc;
extern ExpDesc MirrorTooSmallDesc;
extern ExpDesc SourceVideoEmptyDesc;
extern ExpDesc SourceVideoCreationDesc;
-
+extern ExpDesc DeckLinkBadDisplayModeDesc;
+extern ExpDesc DeckLinkBadPixelFormatDesc;
+extern ExpDesc AutoDetectionNotAvailDesc;
+extern ExpDesc DeckLinkOpenCardDesc;
+extern ExpDesc DeckLinkBadFormatDesc;
+extern ExpDesc DeckLinkInternalErrorDesc;
+extern ExpDesc SourceVideoOnlyCaptureDesc;
+extern ExpDesc VideoDeckLinkBadFormatDesc;
+extern ExpDesc VideoDeckLinkOpenCardDesc;
+extern ExpDesc VideoDeckLinkDvpInternalErrorDesc;
+extern ExpDesc VideoDeckLinkPinMemoryErrorDesc;
+
+extern ExceptionID InvalidImageMode;
void registerAllExceptions(void);
#endif
diff --git a/source/gameengine/VideoTexture/FilterBase.h b/source/gameengine/VideoTexture/FilterBase.h
index 498917e2375..db688d551d0 100644
--- a/source/gameengine/VideoTexture/FilterBase.h
+++ b/source/gameengine/VideoTexture/FilterBase.h
@@ -44,6 +44,13 @@
#define VT_A(v) ((unsigned char*)&v)[3]
#define VT_RGBA(v,r,g,b,a) VT_R(v)=(unsigned char)r, VT_G(v)=(unsigned char)g, VT_B(v)=(unsigned char)b, VT_A(v)=(unsigned char)a
+#ifdef __BIG_ENDIAN__
+# define VT_SWAPBR(i) ((((i) >> 16) & 0xFF00) + (((i) & 0xFF00) << 16) + ((i) & 0xFF00FF))
+#else
+# define VT_SWAPBR(i) ((((i) & 0xFF) << 16) + (((i) >> 16) & 0xFF) + ((i) & 0xFF00FF00))
+#endif
+
+
// forward declaration
class FilterBase;
diff --git a/source/gameengine/VideoTexture/FilterSource.h b/source/gameengine/VideoTexture/FilterSource.h
index bc80b2b36cc..820576dfff9 100644
--- a/source/gameengine/VideoTexture/FilterSource.h
+++ b/source/gameengine/VideoTexture/FilterSource.h
@@ -81,6 +81,30 @@ protected:
}
};
+/// class for BGRA32 conversion
+class FilterBGRA32 : public FilterBase
+{
+public:
+ /// constructor
+ FilterBGRA32 (void) {}
+ /// destructor
+ virtual ~FilterBGRA32 (void) {}
+
+ /// get source pixel size
+ virtual unsigned int getPixelSize (void) { return 4; }
+
+protected:
+ /// filter pixel, source byte buffer
+ virtual unsigned int filter(
+ unsigned char *src, short x, short y,
+ short * size, unsigned int pixSize, unsigned int val)
+ {
+ VT_RGBA(val,src[2],src[1],src[0],src[3]);
+ return val;
+ }
+};
+
+
/// class for BGR24 conversion
class FilterBGR24 : public FilterBase
{
diff --git a/source/gameengine/VideoTexture/ImageBase.cpp b/source/gameengine/VideoTexture/ImageBase.cpp
index 8be152c7b8e..0db1fa293da 100644
--- a/source/gameengine/VideoTexture/ImageBase.cpp
+++ b/source/gameengine/VideoTexture/ImageBase.cpp
@@ -32,7 +32,6 @@
extern "C" {
#include "bgl.h"
}
-#include "glew-mx.h"
#include <vector>
#include <string.h>
@@ -50,6 +49,14 @@ extern "C" {
// ImageBase class implementation
+ExceptionID ImageHasExports;
+ExceptionID InvalidColorChannel;
+ExceptionID InvalidImageMode;
+
+ExpDesc ImageHasExportsDesc(ImageHasExports, "Image has exported buffers, cannot resize");
+ExpDesc InvalidColorChannelDesc(InvalidColorChannel, "Invalid or too many color channels specified. At most 4 values within R, G, B, A, 0, 1");
+ExpDesc InvalidImageModeDesc(InvalidImageMode, "Invalid image mode, only RGBA and BGRA are supported");
+
// constructor
ImageBase::ImageBase (bool staticSrc) : m_image(NULL), m_imgSize(0),
m_avail(false), m_scale(false), m_scaleChange(false), m_flip(false),
@@ -111,6 +118,28 @@ unsigned int * ImageBase::getImage (unsigned int texId, double ts)
return m_avail ? m_image : NULL;
}
+bool ImageBase::loadImage(unsigned int *buffer, unsigned int size, unsigned int format, double ts)
+{
+ unsigned int *d, *s, v, len;
+ if (getImage(0, ts) != NULL && size >= getBuffSize()) {
+ switch (format) {
+ case GL_RGBA:
+ memcpy(buffer, m_image, getBuffSize());
+ break;
+ case GL_BGRA:
+ len = (unsigned int)m_size[0] * m_size[1];
+ for (s=m_image, d=buffer; len; len--) {
+ v = *s++;
+ *d++ = VT_SWAPBR(v);
+ }
+ break;
+ default:
+ THRWEXCP(InvalidImageMode,S_OK);
+ }
+ return true;
+ }
+ return false;
+}
// refresh image source
void ImageBase::refresh (void)
@@ -179,11 +208,18 @@ void ImageBase::setFilter (PyFilter * filt)
m_pyfilter = filt;
}
-ExceptionID ImageHasExports;
-ExceptionID InvalidColorChannel;
+void ImageBase::swapImageBR()
+{
+ unsigned int size, v, *s;
-ExpDesc ImageHasExportsDesc(ImageHasExports, "Image has exported buffers, cannot resize");
-ExpDesc InvalidColorChannelDesc(InvalidColorChannel, "Invalid or too many color channels specified. At most 4 values within R, G, B, A, 0, 1");
+ if (m_avail) {
+ size = 1 * m_size[0] * m_size[1];
+ for (s=m_image; size; size--) {
+ v = *s;
+ *s++ = VT_SWAPBR(v);
+ }
+ }
+}
// initialize image data
void ImageBase::init (short width, short height)
@@ -500,10 +536,57 @@ PyObject *Image_getSize (PyImage *self, void *closure)
}
// refresh image
-PyObject *Image_refresh (PyImage *self)
-{
+PyObject *Image_refresh (PyImage *self, PyObject *args)
+{
+ Py_buffer buffer;
+ bool done = true;
+ char *mode = NULL;
+ double ts = -1.0;
+ unsigned int format;
+
+ memset(&buffer, 0, sizeof(buffer));
+ if (PyArg_ParseTuple(args, "|s*sd:refresh", &buffer, &mode, &ts)) {
+ if (buffer.buf) {
+ // a target buffer is provided, verify its format
+ if (buffer.readonly) {
+ PyErr_SetString(PyExc_TypeError, "Buffers passed in argument must be writable");
+ }
+ else if (!PyBuffer_IsContiguous(&buffer, 'C')) {
+ PyErr_SetString(PyExc_TypeError, "Buffers passed in argument must be contiguous in memory");
+ }
+ else if (((intptr_t)buffer.buf & 3) != 0) {
+ PyErr_SetString(PyExc_TypeError, "Buffers passed in argument must be aligned to 4 bytes boundary");
+ }
+ else {
+ // ready to get the image into our buffer
+ try {
+ if (mode == NULL || !strcmp(mode, "RGBA"))
+ format = GL_RGBA;
+ else if (!strcmp(mode, "BGRA"))
+ format = GL_BGRA;
+ else
+ THRWEXCP(InvalidImageMode,S_OK);
+
+ done = self->m_image->loadImage((unsigned int *)buffer.buf, buffer.len, format, ts);
+ }
+ catch (Exception & exp) {
+ exp.report();
+ }
+ }
+ PyBuffer_Release(&buffer);
+ if (PyErr_Occurred()) {
+ return NULL;
+ }
+ }
+ }
+ else {
+ return NULL;
+ }
+
self->m_image->refresh();
- Py_RETURN_NONE;
+ if (done)
+ Py_RETURN_TRUE;
+ Py_RETURN_FALSE;
}
// get scale
diff --git a/source/gameengine/VideoTexture/ImageBase.h b/source/gameengine/VideoTexture/ImageBase.h
index f646d145365..4c9fc5a58fb 100644
--- a/source/gameengine/VideoTexture/ImageBase.h
+++ b/source/gameengine/VideoTexture/ImageBase.h
@@ -40,6 +40,7 @@
#include "FilterBase.h"
+#include "glew-mx.h"
// forward declarations
struct PyImage;
@@ -104,6 +105,13 @@ public:
/// calculate size(nearest power of 2)
static short calcSize(short size);
+ /// calculate image from sources and send it to a target buffer instead of a texture
+ /// format is GL_RGBA or GL_BGRA
+ virtual bool loadImage(unsigned int *buffer, unsigned int size, unsigned int format, double ts);
+
+ /// swap the B and R channel in-place in the image buffer
+ void swapImageBR();
+
/// number of buffer pointing to m_image, public because not handled by this class
int m_exports;
@@ -348,7 +356,7 @@ PyObject *Image_getImage(PyImage *self, char *mode);
// get image size
PyObject *Image_getSize(PyImage *self, void *closure);
// refresh image - invalidate current content
-PyObject *Image_refresh(PyImage *self);
+PyObject *Image_refresh(PyImage *self, PyObject *args);
// get scale
PyObject *Image_getScale(PyImage *self, void *closure);
diff --git a/source/gameengine/VideoTexture/ImageMix.cpp b/source/gameengine/VideoTexture/ImageMix.cpp
index 973be52e0fc..2de00f5ba05 100644
--- a/source/gameengine/VideoTexture/ImageMix.cpp
+++ b/source/gameengine/VideoTexture/ImageMix.cpp
@@ -156,7 +156,7 @@ static PyMethodDef imageMixMethods[] = {
{"getWeight", (PyCFunction)getWeight, METH_VARARGS, "get image source weight"},
{"setWeight", (PyCFunction)setWeight, METH_VARARGS, "set image source weight"},
// methods from ImageBase class
- {"refresh", (PyCFunction)Image_refresh, METH_NOARGS, "Refresh image - invalidate its current content"},
+ {"refresh", (PyCFunction)Image_refresh, METH_VARARGS, "Refresh image - invalidate its current content"},
{NULL}
};
// attributes structure
diff --git a/source/gameengine/VideoTexture/ImageRender.cpp b/source/gameengine/VideoTexture/ImageRender.cpp
index a374fbba2df..9991bf42a9f 100644
--- a/source/gameengine/VideoTexture/ImageRender.cpp
+++ b/source/gameengine/VideoTexture/ImageRender.cpp
@@ -43,6 +43,8 @@
#include "RAS_CameraData.h"
#include "RAS_MeshObject.h"
#include "RAS_Polygon.h"
+#include "RAS_IOffScreen.h"
+#include "RAS_ISync.h"
#include "BLI_math.h"
#include "ImageRender.h"
@@ -51,11 +53,12 @@
#include "Exception.h"
#include "Texture.h"
-ExceptionID SceneInvalid, CameraInvalid, ObserverInvalid;
+ExceptionID SceneInvalid, CameraInvalid, ObserverInvalid, OffScreenInvalid;
ExceptionID MirrorInvalid, MirrorSizeInvalid, MirrorNormalInvalid, MirrorHorizontal, MirrorTooSmall;
ExpDesc SceneInvalidDesc(SceneInvalid, "Scene object is invalid");
ExpDesc CameraInvalidDesc(CameraInvalid, "Camera object is invalid");
ExpDesc ObserverInvalidDesc(ObserverInvalid, "Observer object is invalid");
+ExpDesc OffScreenInvalidDesc(OffScreenInvalid, "Offscreen object is invalid");
ExpDesc MirrorInvalidDesc(MirrorInvalid, "Mirror object is invalid");
ExpDesc MirrorSizeInvalidDesc(MirrorSizeInvalid, "Mirror has no vertex or no size");
ExpDesc MirrorNormalInvalidDesc(MirrorNormalInvalid, "Cannot determine mirror plane");
@@ -63,12 +66,15 @@ ExpDesc MirrorHorizontalDesc(MirrorHorizontal, "Mirror is horizontal in local sp
ExpDesc MirrorTooSmallDesc(MirrorTooSmall, "Mirror is too small");
// constructor
-ImageRender::ImageRender (KX_Scene *scene, KX_Camera * camera) :
- ImageViewport(),
+ImageRender::ImageRender (KX_Scene *scene, KX_Camera * camera, PyRASOffScreen * offscreen) :
+ ImageViewport(offscreen),
m_render(true),
+ m_done(false),
m_scene(scene),
m_camera(camera),
m_owncamera(false),
+ m_offscreen(offscreen),
+ m_sync(NULL),
m_observer(NULL),
m_mirror(NULL),
m_clip(100.f),
@@ -81,6 +87,10 @@ ImageRender::ImageRender (KX_Scene *scene, KX_Camera * camera) :
m_engine = KX_GetActiveEngine();
m_rasterizer = m_engine->GetRasterizer();
m_canvas = m_engine->GetCanvas();
+ // keep a reference to the offscreen buffer
+ if (m_offscreen) {
+ Py_INCREF(m_offscreen);
+ }
}
// destructor
@@ -88,6 +98,9 @@ ImageRender::~ImageRender (void)
{
if (m_owncamera)
m_camera->Release();
+ if (m_sync)
+ delete m_sync;
+ Py_XDECREF(m_offscreen);
}
// get background color
@@ -121,30 +134,41 @@ void ImageRender::setBackgroundFromScene (KX_Scene *scene)
// capture image from viewport
-void ImageRender::calcImage (unsigned int texId, double ts)
+void ImageRender::calcViewport (unsigned int texId, double ts, unsigned int format)
{
- if (m_rasterizer->GetDrawingMode() != RAS_IRasterizer::KX_TEXTURED || // no need for texture
- m_camera->GetViewport() || // camera must be inactive
- m_camera == m_scene->GetActiveCamera())
- {
- // no need to compute texture in non texture rendering
- m_avail = false;
- return;
- }
// render the scene from the camera
- Render();
- // get image from viewport
- ImageViewport::calcImage(texId, ts);
- // restore OpenGL state
- m_canvas->EndFrame();
+ if (!m_done) {
+ if (!Render()) {
+ return;
+ }
+ }
+ else if (m_offscreen) {
+ m_offscreen->ofs->Bind(RAS_IOffScreen::RAS_OFS_BIND_READ);
+ }
+ // wait until all render operations are completed
+ WaitSync();
+ // get image from viewport (or FBO)
+ ImageViewport::calcViewport(texId, ts, format);
+ if (m_offscreen) {
+ m_offscreen->ofs->Unbind();
+ }
}
-void ImageRender::Render()
+bool ImageRender::Render()
{
RAS_FrameFrustum frustum;
- if (!m_render)
- return;
+ if (!m_render ||
+ m_rasterizer->GetDrawingMode() != RAS_IRasterizer::KX_TEXTURED || // no need for texture
+ m_camera->GetViewport() || // camera must be inactive
+ m_camera == m_scene->GetActiveCamera())
+ {
+ // no need to compute texture in non texture rendering
+ return false;
+ }
+
+ if (!m_scene->IsShadowDone())
+ m_engine->RenderShadowBuffers(m_scene);
if (m_mirror)
{
@@ -164,7 +188,7 @@ void ImageRender::Render()
MT_Scalar observerDistance = mirrorPlaneDTerm - observerWorldPos.dot(mirrorWorldZ);
// if distance < 0.01 => observer is on wrong side of mirror, don't render
if (observerDistance < 0.01)
- return;
+ return false;
// set camera world position = observerPos + normal * 2 * distance
MT_Point3 cameraWorldPos = observerWorldPos + (MT_Scalar(2.0)*observerDistance)*mirrorWorldZ;
m_camera->GetSGNode()->SetLocalPosition(cameraWorldPos);
@@ -215,7 +239,15 @@ void ImageRender::Render()
RAS_Rect area = m_canvas->GetWindowArea();
// The screen area that ImageViewport will copy is also the rendering zone
- m_canvas->SetViewPort(m_position[0], m_position[1], m_position[0]+m_capSize[0]-1, m_position[1]+m_capSize[1]-1);
+ if (m_offscreen) {
+ // bind the fbo and set the viewport to full size
+ m_offscreen->ofs->Bind(RAS_IOffScreen::RAS_OFS_BIND_RENDER);
+ // this is needed to stop crashing in canvas check
+ m_canvas->UpdateViewPort(0, 0, m_offscreen->ofs->GetWidth(), m_offscreen->ofs->GetHeight());
+ }
+ else {
+ m_canvas->SetViewPort(m_position[0], m_position[1], m_position[0]+m_capSize[0]-1, m_position[1]+m_capSize[1]-1);
+ }
m_canvas->ClearColor(m_background[0], m_background[1], m_background[2], m_background[3]);
m_canvas->ClearBuffer(RAS_ICanvas::COLOR_BUFFER|RAS_ICanvas::DEPTH_BUFFER);
m_rasterizer->BeginFrame(m_engine->GetClockTime());
@@ -292,17 +324,18 @@ void ImageRender::Render()
MT_Transform camtrans(m_camera->GetWorldToCamera());
MT_Matrix4x4 viewmat(camtrans);
- m_rasterizer->SetViewMatrix(viewmat, m_camera->NodeGetWorldOrientation(), m_camera->NodeGetWorldPosition(), m_camera->GetCameraData()->m_perspective);
+ m_rasterizer->SetViewMatrix(viewmat, m_camera->NodeGetWorldOrientation(), m_camera->NodeGetWorldPosition(), m_camera->NodeGetLocalScaling(), m_camera->GetCameraData()->m_perspective);
m_camera->SetModelviewMatrix(viewmat);
// restore the stereo mode now that the matrix is computed
m_rasterizer->SetStereoMode(stereomode);
- if (stereomode == RAS_IRasterizer::RAS_STEREO_QUADBUFFERED) {
- // In QUAD buffer stereo mode, the GE render pass ends with the right eye on the right buffer
- // but we need to draw on the left buffer to capture the render
- // TODO: implement an explicit function in rasterizer to restore the left buffer.
- m_rasterizer->SetEye(RAS_IRasterizer::RAS_STEREO_LEFTEYE);
- }
+ if (m_rasterizer->Stereo()) {
+ // stereo mode change render settings that disturb this render, cancel them all
+ // we don't need to restore them as they are set before each frame render.
+ glDrawBuffer(GL_BACK_LEFT);
+ glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE);
+ glDisable(GL_POLYGON_STIPPLE);
+ }
m_scene->CalculateVisibleMeshes(m_rasterizer,m_camera);
@@ -314,8 +347,48 @@ void ImageRender::Render()
// restore the canvas area now that the render is completed
m_canvas->GetWindowArea() = area;
+ m_canvas->EndFrame();
+
+ // In case multisample is active, blit the FBO
+ if (m_offscreen)
+ m_offscreen->ofs->Blit();
+ // end of all render operations, let's create a sync object just in case
+ if (m_sync) {
+ // a sync from a previous render, should not happen
+ delete m_sync;
+ m_sync = NULL;
+ }
+ m_sync = m_rasterizer->CreateSync(RAS_ISync::RAS_SYNC_TYPE_FENCE);
+ // remember that we have done render
+ m_done = true;
+ // the image is not available at this stage
+ m_avail = false;
+ return true;
+}
+
+void ImageRender::Unbind()
+{
+ if (m_offscreen)
+ {
+ m_offscreen->ofs->Unbind();
+ }
}
+void ImageRender::WaitSync()
+{
+ if (m_sync) {
+ m_sync->Wait();
+ // done with it, deleted it
+ delete m_sync;
+ m_sync = NULL;
+ }
+ if (m_offscreen) {
+ // this is needed to finalize the image if the target is a texture
+ m_offscreen->ofs->MipMap();
+ }
+ // all rendered operation done and complete, invalidate render for next time
+ m_done = false;
+}
// cast Image pointer to ImageRender
inline ImageRender * getImageRender (PyImage *self)
@@ -337,11 +410,13 @@ static int ImageRender_init(PyObject *pySelf, PyObject *args, PyObject *kwds)
PyObject *scene;
// camera object
PyObject *camera;
+ // offscreen buffer object
+ PyRASOffScreen *offscreen = NULL;
// parameter keywords
- static const char *kwlist[] = {"sceneObj", "cameraObj", NULL};
+ static const char *kwlist[] = {"sceneObj", "cameraObj", "ofsObj", NULL};
// get parameters
- if (!PyArg_ParseTupleAndKeywords(args, kwds, "OO",
- const_cast<char**>(kwlist), &scene, &camera))
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "OO|O",
+ const_cast<char**>(kwlist), &scene, &camera, &offscreen))
return -1;
try
{
@@ -357,11 +432,16 @@ static int ImageRender_init(PyObject *pySelf, PyObject *args, PyObject *kwds)
// throw exception if camera is not available
if (cameraPtr == NULL) THRWEXCP(CameraInvalid, S_OK);
+ if (offscreen) {
+ if (Py_TYPE(offscreen) != &PyRASOffScreen_Type) {
+ THRWEXCP(OffScreenInvalid, S_OK);
+ }
+ }
// get pointer to image structure
PyImage *self = reinterpret_cast<PyImage*>(pySelf);
// create source object
if (self->m_image != NULL) delete self->m_image;
- self->m_image = new ImageRender(scenePtr, cameraPtr);
+ self->m_image = new ImageRender(scenePtr, cameraPtr, offscreen);
}
catch (Exception & exp)
{
@@ -372,6 +452,55 @@ static int ImageRender_init(PyObject *pySelf, PyObject *args, PyObject *kwds)
return 0;
}
+static PyObject *ImageRender_refresh(PyImage *self, PyObject *args)
+{
+ ImageRender *imageRender = getImageRender(self);
+
+ if (!imageRender) {
+ PyErr_SetString(PyExc_TypeError, "Incomplete ImageRender() object");
+ return NULL;
+ }
+ if (PyArg_ParseTuple(args, "")) {
+ // refresh called with no argument.
+ // For other image objects it simply invalidates the image buffer
+ // For ImageRender it triggers a render+sync
+ // Note that this only makes sense when doing offscreen render on texture
+ if (!imageRender->isDone()) {
+ if (!imageRender->Render()) {
+ Py_RETURN_FALSE;
+ }
+ // as we are not trying to read the pixels, just unbind
+ imageRender->Unbind();
+ }
+ // wait until all render operations are completed
+ // this will also finalize the texture
+ imageRender->WaitSync();
+ Py_RETURN_TRUE;
+ }
+ else {
+ // fallback on standard processing
+ PyErr_Clear();
+ return Image_refresh(self, args);
+ }
+}
+
+// refresh image
+static PyObject *ImageRender_render(PyImage *self)
+{
+ ImageRender *imageRender = getImageRender(self);
+
+ if (!imageRender) {
+ PyErr_SetString(PyExc_TypeError, "Incomplete ImageRender() object");
+ return NULL;
+ }
+ if (!imageRender->Render()) {
+ Py_RETURN_FALSE;
+ }
+ // we are not reading the pixels now, unbind
+ imageRender->Unbind();
+ Py_RETURN_TRUE;
+}
+
// get background color
static PyObject *getBackground (PyImage *self, void *closure)
@@ -410,7 +539,8 @@ static int setBackground(PyImage *self, PyObject *value, void *closure)
// methods structure
static PyMethodDef imageRenderMethods[] =
{ // methods from ImageBase class
- {"refresh", (PyCFunction)Image_refresh, METH_NOARGS, "Refresh image - invalidate its current content"},
+ {"refresh", (PyCFunction)ImageRender_refresh, METH_VARARGS, "Refresh image - invalidate its current content after optionally transferring its content to a target buffer"},
+ {"render", (PyCFunction)ImageRender_render, METH_NOARGS, "Render scene - run before refresh() to performs asynchronous render"},
{NULL}
};
// attributes structure
@@ -601,7 +731,9 @@ static PyGetSetDef imageMirrorGetSets[] =
ImageRender::ImageRender (KX_Scene *scene, KX_GameObject *observer, KX_GameObject *mirror, RAS_IPolyMaterial *mat) :
ImageViewport(),
m_render(false),
+ m_done(false),
m_scene(scene),
+ m_offscreen(NULL),
m_observer(observer),
m_mirror(mirror),
m_clip(100.f)
diff --git a/source/gameengine/VideoTexture/ImageRender.h b/source/gameengine/VideoTexture/ImageRender.h
index ef55e4dea84..d062db44348 100644
--- a/source/gameengine/VideoTexture/ImageRender.h
+++ b/source/gameengine/VideoTexture/ImageRender.h
@@ -39,6 +39,8 @@
#include "DNA_screen_types.h"
#include "RAS_ICanvas.h"
#include "RAS_IRasterizer.h"
+#include "RAS_IOffScreen.h"
+#include "RAS_ISync.h"
#include "ImageViewport.h"
@@ -48,7 +50,7 @@ class ImageRender : public ImageViewport
{
public:
/// constructor
- ImageRender(KX_Scene *scene, KX_Camera *camera);
+ ImageRender(KX_Scene *scene, KX_Camera *camera, PyRASOffScreen *offscreen);
ImageRender(KX_Scene *scene, KX_GameObject *observer, KX_GameObject *mirror, RAS_IPolyMaterial * mat);
/// destructor
@@ -63,16 +65,30 @@ public:
float getClip (void) { return m_clip; }
/// set whole buffer use
void setClip (float clip) { m_clip = clip; }
+ /// render status
+ bool isDone() { return m_done; }
+ /// render frame (public so that it is accessible from python)
+ bool Render();
+ /// in case fbo is used, method to unbind
+ void Unbind();
+ /// wait for render to complete
+ void WaitSync();
protected:
/// true if ready to render
bool m_render;
+ /// is render done already?
+ bool m_done;
/// rendered scene
KX_Scene * m_scene;
/// camera for render
KX_Camera * m_camera;
/// do we own the camera?
bool m_owncamera;
+ /// if offscreen render
+ PyRASOffScreen *m_offscreen;
+ /// object to synchronize render even if no buffer transfer
+ RAS_ISync *m_sync;
/// for mirror operation
KX_GameObject * m_observer;
KX_GameObject * m_mirror;
@@ -91,15 +107,15 @@ protected:
KX_KetsjiEngine* m_engine;
/// background color
- float m_background[4];
+ float m_background[4];
/// render 3d scene to image
- virtual void calcImage (unsigned int texId, double ts);
+ virtual void calcImage (unsigned int texId, double ts) { calcViewport(texId, ts, GL_RGBA); }
+
+ /// render 3d scene to image
+ virtual void calcViewport (unsigned int texId, double ts, unsigned int format);
- void Render();
- void SetupRenderFrame(KX_Scene *scene, KX_Camera* cam);
- void RenderFrame(KX_Scene* scene, KX_Camera* cam);
void setBackgroundFromScene(KX_Scene *scene);
void SetWorldSettings(KX_WorldInfo* wi);
};
diff --git a/source/gameengine/VideoTexture/ImageViewport.cpp b/source/gameengine/VideoTexture/ImageViewport.cpp
index 820a019832e..8852c190053 100644
--- a/source/gameengine/VideoTexture/ImageViewport.cpp
+++ b/source/gameengine/VideoTexture/ImageViewport.cpp
@@ -45,14 +45,22 @@
// constructor
-ImageViewport::ImageViewport (void) : m_alpha(false), m_texInit(false)
+ImageViewport::ImageViewport (PyRASOffScreen *offscreen) : m_alpha(false), m_texInit(false)
{
// get viewport rectangle
- RAS_Rect rect = KX_GetActiveEngine()->GetCanvas()->GetWindowArea();
- m_viewport[0] = rect.GetLeft();
- m_viewport[1] = rect.GetBottom();
- m_viewport[2] = rect.GetWidth();
- m_viewport[3] = rect.GetHeight();
+ if (offscreen) {
+ m_viewport[0] = 0;
+ m_viewport[1] = 0;
+ m_viewport[2] = offscreen->ofs->GetWidth();
+ m_viewport[3] = offscreen->ofs->GetHeight();
+ }
+ else {
+ RAS_Rect rect = KX_GetActiveEngine()->GetCanvas()->GetWindowArea();
+ m_viewport[0] = rect.GetLeft();
+ m_viewport[1] = rect.GetBottom();
+ m_viewport[2] = rect.GetWidth();
+ m_viewport[3] = rect.GetHeight();
+ }
//glGetIntegerv(GL_VIEWPORT, m_viewport);
// create buffer for viewport image
@@ -60,7 +68,7 @@ ImageViewport::ImageViewport (void) : m_alpha(false), m_texInit(false)
// float (1 float = 4 bytes per pixel)
m_viewportImage = new BYTE [4 * getViewportSize()[0] * getViewportSize()[1]];
// set attributes
- setWhole(false);
+ setWhole((offscreen) ? true : false);
}
// destructor
@@ -126,25 +134,26 @@ void ImageViewport::setPosition (GLint pos[2])
// capture image from viewport
-void ImageViewport::calcImage (unsigned int texId, double ts)
+void ImageViewport::calcViewport (unsigned int texId, double ts, unsigned int format)
{
// if scale was changed
if (m_scaleChange)
// reset image
init(m_capSize[0], m_capSize[1]);
// if texture wasn't initialized
- if (!m_texInit) {
+ if (!m_texInit && texId != 0) {
// initialize it
loadTexture(texId, m_image, m_size);
m_texInit = true;
}
// if texture can be directly created
- if (texId != 0 && m_pyfilter == NULL && m_capSize[0] == calcSize(m_capSize[0])
- && m_capSize[1] == calcSize(m_capSize[1]) && !m_flip && !m_zbuff && !m_depth)
+ if (texId != 0 && m_pyfilter == NULL && m_size[0] == m_capSize[0] &&
+ m_size[1] == m_capSize[1] && !m_flip && !m_zbuff && !m_depth)
{
// just copy current viewport to texture
glBindTexture(GL_TEXTURE_2D, texId);
glCopyTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, m_upLeft[0], m_upLeft[1], (GLsizei)m_capSize[0], (GLsizei)m_capSize[1]);
+ glBindTexture(GL_TEXTURE_2D, 0);
// image is not available
m_avail = false;
}
@@ -176,11 +185,33 @@ void ImageViewport::calcImage (unsigned int texId, double ts)
// get frame buffer data
if (m_alpha) {
- glReadPixels(m_upLeft[0], m_upLeft[1], (GLsizei)m_capSize[0], (GLsizei)m_capSize[1], GL_RGBA,
- GL_UNSIGNED_BYTE, m_viewportImage);
- // filter loaded data
- FilterRGBA32 filt;
- filterImage(filt, m_viewportImage, m_capSize);
+ // as we are reading the pixel in the native format, we can read directly in the image buffer
+ // if we are sure that no processing is needed on the image
+ if (m_size[0] == m_capSize[0] &&
+ m_size[1] == m_capSize[1] &&
+ !m_flip &&
+ !m_pyfilter)
+ {
+ glReadPixels(m_upLeft[0], m_upLeft[1], (GLsizei)m_capSize[0], (GLsizei)m_capSize[1], format,
+ GL_UNSIGNED_BYTE, m_image);
+ m_avail = true;
+ }
+ else if (!m_pyfilter) {
+ glReadPixels(m_upLeft[0], m_upLeft[1], (GLsizei)m_capSize[0], (GLsizei)m_capSize[1], format,
+ GL_UNSIGNED_BYTE, m_viewportImage);
+ FilterRGBA32 filt;
+ filterImage(filt, m_viewportImage, m_capSize);
+ }
+ else {
+ glReadPixels(m_upLeft[0], m_upLeft[1], (GLsizei)m_capSize[0], (GLsizei)m_capSize[1], GL_RGBA,
+ GL_UNSIGNED_BYTE, m_viewportImage);
+ FilterRGBA32 filt;
+ filterImage(filt, m_viewportImage, m_capSize);
+ if (format == GL_BGRA) {
+ // in place byte swapping
+ swapImageBR();
+ }
+ }
}
else {
glReadPixels(m_upLeft[0], m_upLeft[1], (GLsizei)m_capSize[0], (GLsizei)m_capSize[1], GL_RGB,
@@ -188,12 +219,46 @@ void ImageViewport::calcImage (unsigned int texId, double ts)
// filter loaded data
FilterRGB24 filt;
filterImage(filt, m_viewportImage, m_capSize);
+ if (format == GL_BGRA) {
+ // in place byte swapping
+ swapImageBR();
+ }
}
}
}
}
}
+bool ImageViewport::loadImage(unsigned int *buffer, unsigned int size, unsigned int format, double ts)
+{
+ unsigned int *tmp_image;
+ bool ret;
+
+ // if scale was changed
+ if (m_scaleChange) {
+ // reset image
+ init(m_capSize[0], m_capSize[1]);
+ }
+
+ // size must be identical
+ if (size < getBuffSize())
+ return false;
+
+ if (m_avail) {
+ // just copy
+ return ImageBase::loadImage(buffer, size, format, ts);
+ }
+ else {
+ tmp_image = m_image;
+ m_image = buffer;
+ calcViewport(0, ts, format);
+ ret = m_avail;
+ m_image = tmp_image;
+ // since the image was not loaded to our buffer, it's not valid
+ m_avail = false;
+ }
+ return ret;
+}
// cast Image pointer to ImageViewport
@@ -336,7 +401,7 @@ int ImageViewport_setCaptureSize(PyImage *self, PyObject *value, void *closure)
// methods structure
static PyMethodDef imageViewportMethods[] =
{ // methods from ImageBase class
- {"refresh", (PyCFunction)Image_refresh, METH_NOARGS, "Refresh image - invalidate its current content"},
+ {"refresh", (PyCFunction)Image_refresh, METH_VARARGS, "Refresh image - invalidate its current content"},
{NULL}
};
// attributes structure
diff --git a/source/gameengine/VideoTexture/ImageViewport.h b/source/gameengine/VideoTexture/ImageViewport.h
index 10d894a9fb8..8a7e9cfd2ba 100644
--- a/source/gameengine/VideoTexture/ImageViewport.h
+++ b/source/gameengine/VideoTexture/ImageViewport.h
@@ -35,6 +35,7 @@
#include "Common.h"
#include "ImageBase.h"
+#include "RAS_IOffScreen.h"
/// class for viewport access
@@ -42,7 +43,7 @@ class ImageViewport : public ImageBase
{
public:
/// constructor
- ImageViewport (void);
+ ImageViewport (PyRASOffScreen *offscreen=NULL);
/// destructor
virtual ~ImageViewport (void);
@@ -67,6 +68,9 @@ public:
/// set position in viewport
void setPosition (GLint pos[2] = NULL);
+ /// capture image from viewport to user buffer
+ virtual bool loadImage(unsigned int *buffer, unsigned int size, unsigned int format, double ts);
+
protected:
/// frame buffer rectangle
GLint m_viewport[4];
@@ -89,7 +93,10 @@ protected:
bool m_texInit;
/// capture image from viewport
- virtual void calcImage (unsigned int texId, double ts);
+ virtual void calcImage (unsigned int texId, double ts) { calcViewport(texId, ts, GL_RGBA); }
+
+ /// capture image from viewport
+ virtual void calcViewport (unsigned int texId, double ts, unsigned int format);
/// get viewport size
GLint * getViewportSize (void) { return m_viewport + 2; }
diff --git a/source/gameengine/VideoTexture/Texture.cpp b/source/gameengine/VideoTexture/Texture.cpp
index f1c7bc303ee..bb995747360 100644
--- a/source/gameengine/VideoTexture/Texture.cpp
+++ b/source/gameengine/VideoTexture/Texture.cpp
@@ -393,9 +393,10 @@ static PyObject *Texture_refresh(Texture *self, PyObject *args)
}
// load texture for rendering
loadTexture(self->m_actTex, texture, size, self->m_mipmap);
-
- // refresh texture source, if required
- if (refreshSource) self->m_source->m_image->refresh();
+ }
+ // refresh texture source, if required
+ if (refreshSource) {
+ self->m_source->m_image->refresh();
}
}
}
diff --git a/source/gameengine/VideoTexture/VideoBase.cpp b/source/gameengine/VideoTexture/VideoBase.cpp
index 9c8df0ca8c4..d373055b5df 100644
--- a/source/gameengine/VideoTexture/VideoBase.cpp
+++ b/source/gameengine/VideoTexture/VideoBase.cpp
@@ -137,8 +137,53 @@ PyObject *Video_getStatus(PyImage *self, void *closure)
}
// refresh video
-PyObject *Video_refresh(PyImage *self)
+PyObject *Video_refresh(PyImage *self, PyObject *args)
{
+ Py_buffer buffer;
+ char *mode = NULL;
+ unsigned int format;
+ double ts = -1.0;
+
+ memset(&buffer, 0, sizeof(buffer));
+ if (PyArg_ParseTuple(args, "|s*sd:refresh", &buffer, &mode, &ts)) {
+ if (buffer.buf) {
+ // a target buffer is provided, verify its format
+ if (buffer.readonly) {
+ PyErr_SetString(PyExc_TypeError, "Buffers passed in argument must be writable");
+ }
+ else if (!PyBuffer_IsContiguous(&buffer, 'C')) {
+ PyErr_SetString(PyExc_TypeError, "Buffers passed in argument must be contiguous in memory");
+ }
+ else if (((intptr_t)buffer.buf & 3) != 0) {
+ PyErr_SetString(PyExc_TypeError, "Buffers passed in argument must be aligned to 4 bytes boundary");
+ }
+ else {
+ // ready to get the image into our buffer
+ try {
+ if (mode == NULL || !strcmp(mode, "RGBA"))
+ format = GL_RGBA;
+ else if (!strcmp(mode, "BGRA"))
+ format = GL_BGRA;
+ else
+ THRWEXCP(InvalidImageMode,S_OK);
+
+ if (!self->m_image->loadImage((unsigned int *)buffer.buf, buffer.len, format, ts)) {
+ PyErr_SetString(PyExc_TypeError, "Could not load the buffer, perhaps size is not compatible");
+ }
+ }
+ catch (Exception & exp) {
+ exp.report();
+ }
+ }
+ PyBuffer_Release(&buffer);
+ if (PyErr_Occurred())
+ return NULL;
+ }
+ }
+ else
+ {
+ return NULL;
+ }
getVideo(self)->refresh();
return Video_getStatus(self, NULL);
}
diff --git a/source/gameengine/VideoTexture/VideoBase.h b/source/gameengine/VideoTexture/VideoBase.h
index 6f35c474300..77f46fdccd8 100644
--- a/source/gameengine/VideoTexture/VideoBase.h
+++ b/source/gameengine/VideoTexture/VideoBase.h
@@ -190,7 +190,7 @@ void Video_open(VideoBase *self, char *file, short captureID);
PyObject *Video_play(PyImage *self);
PyObject *Video_pause(PyImage *self);
PyObject *Video_stop(PyImage *self);
-PyObject *Video_refresh(PyImage *self);
+PyObject *Video_refresh(PyImage *self, PyObject *args);
PyObject *Video_getStatus(PyImage *self, void *closure);
PyObject *Video_getRange(PyImage *self, void *closure);
int Video_setRange(PyImage *self, PyObject *value, void *closure);
diff --git a/source/gameengine/VideoTexture/VideoDeckLink.cpp b/source/gameengine/VideoTexture/VideoDeckLink.cpp
new file mode 100644
index 00000000000..c8d3c28c551
--- /dev/null
+++ b/source/gameengine/VideoTexture/VideoDeckLink.cpp
@@ -0,0 +1,1228 @@
+/*
+ * ***** BEGIN GPL LICENSE BLOCK *****
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+* The Original Code is Copyright (C) 2015, Blender Foundation
+* All rights reserved.
+*
+* The Original Code is: all of this file.
+*
+* Contributor(s): Blender Foundation.
+ *
+ * ***** END GPL LICENSE BLOCK *****
+ */
+
+/** \file gameengine/VideoTexture/VideoDeckLink.cpp
+ * \ingroup bgevideotex
+ */
+
+#ifdef WITH_GAMEENGINE_DECKLINK
+
+// FFmpeg defines its own version of stdint.h on Windows.
+// Decklink needs FFmpeg, so it uses its version of stdint.h
+// this is necessary for INT64_C macro
+#ifndef __STDC_CONSTANT_MACROS
+#define __STDC_CONSTANT_MACROS
+#endif
+// this is necessary for UINTPTR_MAX (used by atomic-ops)
+#ifndef __STDC_LIMIT_MACROS
+#define __STDC_LIMIT_MACROS
+#ifdef __STDC_LIMIT_MACROS /* else it may be unused */
+#endif
+#endif
+#include <stdint.h>
+#include <string.h>
+#ifndef WIN32
+#include <sys/time.h>
+#include <sys/resource.h>
+#include <sys/mman.h>
+#endif
+
+#include "atomic_ops.h"
+
+#include "MEM_guardedalloc.h"
+#include "PIL_time.h"
+#include "VideoDeckLink.h"
+#include "DeckLink.h"
+#include "Exception.h"
+#include "KX_KetsjiEngine.h"
+#include "KX_PythonInit.h"
+
+extern ExceptionID DeckLinkInternalError;
+ExceptionID SourceVideoOnlyCapture, VideoDeckLinkBadFormat, VideoDeckLinkOpenCard, VideoDeckLinkDvpInternalError, VideoDeckLinkPinMemoryError;
+ExpDesc SourceVideoOnlyCaptureDesc(SourceVideoOnlyCapture, "This video source only allows live capture");
+ExpDesc VideoDeckLinkBadFormatDesc(VideoDeckLinkBadFormat, "Invalid or unsupported capture format, should be <mode>/<pixel>[/3D]");
+ExpDesc VideoDeckLinkOpenCardDesc(VideoDeckLinkOpenCard, "Cannot open capture card, check if driver installed");
+ExpDesc VideoDeckLinkDvpInternalErrorDesc(VideoDeckLinkDvpInternalError, "DVP API internal error, please report");
+ExpDesc VideoDeckLinkPinMemoryErrorDesc(VideoDeckLinkPinMemoryError, "Error pinning memory");
+
+
+#ifdef WIN32
+////////////////////////////////////////////
+// SynInfo
+//
+// Sets up a semaphore which is shared between the GPU and CPU and used to
+// synchronise access to DVP buffers.
+#define DVP_CHECK(cmd) if ((cmd) != DVP_STATUS_OK) THRWEXCP(VideoDeckLinkDvpInternalError, S_OK)
+
+struct SyncInfo
+{
+ SyncInfo(uint32_t semaphoreAllocSize, uint32_t semaphoreAddrAlignment)
+ {
+ mSemUnaligned = (uint32_t*)malloc(semaphoreAllocSize + semaphoreAddrAlignment - 1);
+
+ // Apply alignment constraints
+ uint64_t val = (uint64_t)mSemUnaligned;
+ val += semaphoreAddrAlignment - 1;
+ val &= ~((uint64_t)semaphoreAddrAlignment - 1);
+ mSem = (uint32_t*)val;
+
+ // Initialise
+ mSem[0] = 0;
+ mReleaseValue = 0;
+ mAcquireValue = 0;
+
+ // Setup DVP sync object and import it
+ DVPSyncObjectDesc syncObjectDesc;
+ syncObjectDesc.externalClientWaitFunc = NULL;
+ syncObjectDesc.sem = (uint32_t*)mSem;
+
+ DVP_CHECK(dvpImportSyncObject(&syncObjectDesc, &mDvpSync));
+
+ }
+ ~SyncInfo()
+ {
+ dvpFreeSyncObject(mDvpSync);
+ free((void*)mSemUnaligned);
+ }
+
+ volatile uint32_t* mSem;
+ volatile uint32_t* mSemUnaligned;
+ volatile uint32_t mReleaseValue;
+ volatile uint32_t mAcquireValue;
+ DVPSyncObjectHandle mDvpSync;
+};
+
+////////////////////////////////////////////
+// TextureTransferDvp: transfer with GPUDirect
+////////////////////////////////////////////
+
+class TextureTransferDvp : public TextureTransfer
+{
+public:
+ TextureTransferDvp(DVPBufferHandle dvpTextureHandle, TextureDesc *pDesc, void *address, uint32_t allocatedSize)
+ {
+ DVPSysmemBufferDesc sysMemBuffersDesc;
+
+ mExtSync = NULL;
+ mGpuSync = NULL;
+ mDvpSysMemHandle = 0;
+ mDvpTextureHandle = 0;
+ mTextureHeight = 0;
+ mAllocatedSize = 0;
+ mBuffer = NULL;
+
+ if (!_PinBuffer(address, allocatedSize))
+ THRWEXCP(VideoDeckLinkPinMemoryError, S_OK);
+ mAllocatedSize = allocatedSize;
+ mBuffer = address;
+
+ try {
+ if (!mBufferAddrAlignment) {
+ DVP_CHECK(dvpGetRequiredConstantsGLCtx(&mBufferAddrAlignment, &mBufferGpuStrideAlignment,
+ &mSemaphoreAddrAlignment, &mSemaphoreAllocSize,
+ &mSemaphorePayloadOffset, &mSemaphorePayloadSize));
+ }
+ mExtSync = new SyncInfo(mSemaphoreAllocSize, mSemaphoreAddrAlignment);
+ mGpuSync = new SyncInfo(mSemaphoreAllocSize, mSemaphoreAddrAlignment);
+ sysMemBuffersDesc.width = pDesc->width;
+ sysMemBuffersDesc.height = pDesc->height;
+ sysMemBuffersDesc.stride = pDesc->stride;
+ switch (pDesc->format) {
+ case GL_RED_INTEGER:
+ sysMemBuffersDesc.format = DVP_RED_INTEGER;
+ break;
+ default:
+ sysMemBuffersDesc.format = DVP_BGRA;
+ break;
+ }
+ switch (pDesc->type) {
+ case GL_UNSIGNED_BYTE:
+ sysMemBuffersDesc.type = DVP_UNSIGNED_BYTE;
+ break;
+ case GL_UNSIGNED_INT_2_10_10_10_REV:
+ sysMemBuffersDesc.type = DVP_UNSIGNED_INT_2_10_10_10_REV;
+ break;
+ case GL_UNSIGNED_INT_8_8_8_8:
+ sysMemBuffersDesc.type = DVP_UNSIGNED_INT_8_8_8_8;
+ break;
+ case GL_UNSIGNED_INT_10_10_10_2:
+ sysMemBuffersDesc.type = DVP_UNSIGNED_INT_10_10_10_2;
+ break;
+ default:
+ sysMemBuffersDesc.type = DVP_UNSIGNED_INT;
+ break;
+ }
+ sysMemBuffersDesc.size = pDesc->width * pDesc->height * 4;
+ sysMemBuffersDesc.bufAddr = mBuffer;
+ DVP_CHECK(dvpCreateBuffer(&sysMemBuffersDesc, &mDvpSysMemHandle));
+ DVP_CHECK(dvpBindToGLCtx(mDvpSysMemHandle));
+ mDvpTextureHandle = dvpTextureHandle;
+ mTextureHeight = pDesc->height;
+ }
+ catch (Exception &) {
+ clean();
+ throw;
+ }
+ }
+ ~TextureTransferDvp()
+ {
+ clean();
+ }
+
+ virtual void PerformTransfer()
+ {
+ // perform the transfer
+ // tell DVP that the old texture buffer will no longer be used
+ dvpMapBufferEndAPI(mDvpTextureHandle);
+ // do we need this?
+ mGpuSync->mReleaseValue++;
+ dvpBegin();
+ // Copy from system memory to GPU texture
+ dvpMapBufferWaitDVP(mDvpTextureHandle);
+ dvpMemcpyLined(mDvpSysMemHandle, mExtSync->mDvpSync, mExtSync->mAcquireValue, DVP_TIMEOUT_IGNORED,
+ mDvpTextureHandle, mGpuSync->mDvpSync, mGpuSync->mReleaseValue, 0, mTextureHeight);
+ dvpMapBufferEndDVP(mDvpTextureHandle);
+ dvpEnd();
+ dvpMapBufferWaitAPI(mDvpTextureHandle);
+ // the transfer is now complete and the texture is ready for use
+ }
+
+private:
+ static uint32_t mBufferAddrAlignment;
+ static uint32_t mBufferGpuStrideAlignment;
+ static uint32_t mSemaphoreAddrAlignment;
+ static uint32_t mSemaphoreAllocSize;
+ static uint32_t mSemaphorePayloadOffset;
+ static uint32_t mSemaphorePayloadSize;
+
+ void clean()
+ {
+ if (mDvpSysMemHandle) {
+ dvpUnbindFromGLCtx(mDvpSysMemHandle);
+ dvpDestroyBuffer(mDvpSysMemHandle);
+ }
+ if (mExtSync)
+ delete mExtSync;
+ if (mGpuSync)
+ delete mGpuSync;
+ if (mBuffer)
+ _UnpinBuffer(mBuffer, mAllocatedSize);
+ }
+ SyncInfo* mExtSync;
+ SyncInfo* mGpuSync;
+ DVPBufferHandle mDvpSysMemHandle;
+ DVPBufferHandle mDvpTextureHandle;
+ uint32_t mTextureHeight;
+ uint32_t mAllocatedSize;
+ void* mBuffer;
+};
+
+uint32_t TextureTransferDvp::mBufferAddrAlignment;
+uint32_t TextureTransferDvp::mBufferGpuStrideAlignment;
+uint32_t TextureTransferDvp::mSemaphoreAddrAlignment;
+uint32_t TextureTransferDvp::mSemaphoreAllocSize;
+uint32_t TextureTransferDvp::mSemaphorePayloadOffset;
+uint32_t TextureTransferDvp::mSemaphorePayloadSize;
+
+#endif
+
+////////////////////////////////////////////
+// TextureTransferOGL: transfer using standard OGL buffers
+////////////////////////////////////////////
+
+class TextureTransferOGL : public TextureTransfer
+{
+public:
+ TextureTransferOGL(GLuint texId, TextureDesc *pDesc, void *address)
+ {
+ memcpy(&mDesc, pDesc, sizeof(mDesc));
+ mTexId = texId;
+ mBuffer = address;
+
+ // as we cache transfer object, we will create one texture to hold the buffer
+ glGenBuffers(1, &mUnpinnedTextureBuffer);
+ // create a storage for it
+ glBindBuffer(GL_PIXEL_UNPACK_BUFFER, mUnpinnedTextureBuffer);
+ glBufferData(GL_PIXEL_UNPACK_BUFFER, pDesc->size, NULL, GL_DYNAMIC_DRAW);
+ glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
+ }
+ ~TextureTransferOGL()
+ {
+ glDeleteBuffers(1, &mUnpinnedTextureBuffer);
+ }
+
+ virtual void PerformTransfer()
+ {
+ glBindBuffer(GL_PIXEL_UNPACK_BUFFER, mUnpinnedTextureBuffer);
+ glBufferSubData(GL_PIXEL_UNPACK_BUFFER, 0, mDesc.size, mBuffer);
+ glBindTexture(GL_TEXTURE_2D, mTexId);
+ // NULL for last arg indicates use current GL_PIXEL_UNPACK_BUFFER target as texture data
+ glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, mDesc.width, mDesc.height, mDesc.format, mDesc.type, NULL);
+ glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
+ }
+private:
+ // intermediate texture to receive the buffer
+ GLuint mUnpinnedTextureBuffer;
+ // target texture to receive the image
+ GLuint mTexId;
+ // buffer
+ void *mBuffer;
+ // characteristic of the image
+ TextureDesc mDesc;
+};
+
+////////////////////////////////////////////
+// TextureTransferPMB: transfer using pinned memory buffer
+////////////////////////////////////////////
+
+class TextureTransferPMD : public TextureTransfer
+{
+public:
+ TextureTransferPMD(GLuint texId, TextureDesc *pDesc, void *address, uint32_t allocatedSize)
+ {
+ memcpy(&mDesc, pDesc, sizeof(mDesc));
+ mTexId = texId;
+ mBuffer = address;
+ mAllocatedSize = allocatedSize;
+
+ _PinBuffer(address, allocatedSize);
+
+ // as we cache transfer object, we will create one texture to hold the buffer
+ glGenBuffers(1, &mPinnedTextureBuffer);
+ // create a storage for it
+ glBindBuffer(GL_EXTERNAL_VIRTUAL_MEMORY_BUFFER_AMD, mPinnedTextureBuffer);
+ glBufferData(GL_EXTERNAL_VIRTUAL_MEMORY_BUFFER_AMD, pDesc->size, address, GL_STREAM_DRAW);
+ glBindBuffer(GL_EXTERNAL_VIRTUAL_MEMORY_BUFFER_AMD, 0);
+ }
+ ~TextureTransferPMD()
+ {
+ glDeleteBuffers(1, &mPinnedTextureBuffer);
+ if (mBuffer)
+ _UnpinBuffer(mBuffer, mAllocatedSize);
+ }
+
+ virtual void PerformTransfer()
+ {
+ glBindBuffer(GL_PIXEL_UNPACK_BUFFER, mPinnedTextureBuffer);
+ glBindTexture(GL_TEXTURE_2D, mTexId);
+ // NULL for last arg indicates use current GL_PIXEL_UNPACK_BUFFER target as texture data
+ glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, mDesc.width, mDesc.height, mDesc.format, mDesc.type, NULL);
+ // wait for the trasnfer to complete
+ GLsync fence = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0);
+ glClientWaitSync(fence, GL_SYNC_FLUSH_COMMANDS_BIT, 40 * 1000 * 1000); // timeout in nanosec
+ glDeleteSync(fence);
+ glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
+ }
+private:
+ // intermediate texture to receive the buffer
+ GLuint mPinnedTextureBuffer;
+ // target texture to receive the image
+ GLuint mTexId;
+ // buffer
+ void *mBuffer;
+ // the allocated size
+ uint32_t mAllocatedSize;
+ // characteristic of the image
+ TextureDesc mDesc;
+};
+
+bool TextureTransfer::_PinBuffer(void *address, uint32_t size)
+{
+#ifdef WIN32
+ return VirtualLock(address, size);
+#elif defined(_POSIX_MEMLOCK_RANGE)
+ return !mlock(address, size);
+#endif
+}
+
+void TextureTransfer::_UnpinBuffer(void* address, uint32_t size)
+{
+#ifdef WIN32
+ VirtualUnlock(address, size);
+#elif defined(_POSIX_MEMLOCK_RANGE)
+ munlock(address, size);
+#endif
+}
+
+
+
+////////////////////////////////////////////
+// PinnedMemoryAllocator
+////////////////////////////////////////////
+
+
+// static members
+bool PinnedMemoryAllocator::mGPUDirectInitialized = false;
+bool PinnedMemoryAllocator::mHasDvp = false;
+bool PinnedMemoryAllocator::mHasAMDPinnedMemory = false;
+size_t PinnedMemoryAllocator::mReservedProcessMemory = 0;
+
+bool PinnedMemoryAllocator::ReserveMemory(size_t size)
+{
+#ifdef WIN32
+ // Increase the process working set size to allow pinning of memory.
+ if (size <= mReservedProcessMemory)
+ return true;
+ SIZE_T dwMin = 0, dwMax = 0;
+ HANDLE hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_SET_QUOTA, FALSE, GetCurrentProcessId());
+ if (!hProcess)
+ return false;
+
+ // Retrieve the working set size of the process.
+ if (!dwMin && !GetProcessWorkingSetSize(hProcess, &dwMin, &dwMax))
+ return false;
+
+ BOOL res = SetProcessWorkingSetSize(hProcess, (size - mReservedProcessMemory) + dwMin, (size - mReservedProcessMemory) + dwMax);
+ if (!res)
+ return false;
+ mReservedProcessMemory = size;
+ CloseHandle(hProcess);
+ return true;
+#else
+ struct rlimit rlim;
+ if (getrlimit(RLIMIT_MEMLOCK, &rlim) == 0) {
+ if (rlim.rlim_cur < size) {
+ if (rlim.rlim_max < size)
+ rlim.rlim_max = size;
+ rlim.rlim_cur = size;
+ return !setrlimit(RLIMIT_MEMLOCK, &rlim);
+ }
+ }
+ return false;
+#endif
+}
+
+PinnedMemoryAllocator::PinnedMemoryAllocator(unsigned cacheSize, size_t memSize) :
+mRefCount(1U),
+#ifdef WIN32
+mDvpCaptureTextureHandle(0),
+#endif
+mTexId(0),
+mBufferCacheSize(cacheSize)
+{
+ pthread_mutex_init(&mMutex, NULL);
+ // do it once
+ if (!mGPUDirectInitialized) {
+#ifdef WIN32
+ // In windows, AMD_pinned_memory option is not available,
+ // we must use special DVP API only available for Quadro cards
+ const char* renderer = (const char *)glGetString(GL_RENDERER);
+ mHasDvp = (strstr(renderer, "Quadro") != NULL);
+
+ if (mHasDvp) {
+ // In case the DLL is not in place, don't fail, just fallback on OpenGL
+ if (dvpInitGLContext(DVP_DEVICE_FLAGS_SHARE_APP_CONTEXT) != DVP_STATUS_OK) {
+ printf("Warning: Could not initialize DVP context, fallback on OpenGL transfer.\nInstall dvp.dll to take advantage of nVidia GPUDirect.\n");
+ mHasDvp = false;
+ }
+ }
+#endif
+ if (GLEW_AMD_pinned_memory)
+ mHasAMDPinnedMemory = true;
+
+ mGPUDirectInitialized = true;
+ }
+ if (mHasDvp || mHasAMDPinnedMemory) {
+ ReserveMemory(memSize);
+ }
+}
+
+PinnedMemoryAllocator::~PinnedMemoryAllocator()
+{
+ void *address;
+ // first clean the cache if not already done
+ while (!mBufferCache.empty()) {
+ address = mBufferCache.back();
+ mBufferCache.pop_back();
+ _ReleaseBuffer(address);
+ }
+ // clean preallocated buffers
+ while (!mAllocatedSize.empty()) {
+ address = mAllocatedSize.begin()->first;
+ _ReleaseBuffer(address);
+ }
+
+#ifdef WIN32
+ if (mDvpCaptureTextureHandle)
+ dvpDestroyBuffer(mDvpCaptureTextureHandle);
+#endif
+}
+
+void PinnedMemoryAllocator::TransferBuffer(void* address, TextureDesc* texDesc, GLuint texId)
+{
+ uint32_t allocatedSize = 0;
+ TextureTransfer *pTransfer = NULL;
+
+ Lock();
+ if (mAllocatedSize.count(address) > 0)
+ allocatedSize = mAllocatedSize[address];
+ Unlock();
+ if (!allocatedSize)
+ // internal error!!
+ return;
+ if (mTexId != texId)
+ {
+ // first time we try to send data to the GPU, allocate a buffer for the texture
+ glBindTexture(GL_TEXTURE_2D, texId);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
+ glTexImage2D(GL_TEXTURE_2D, 0, texDesc->internalFormat, texDesc->width, texDesc->height, 0, texDesc->format, texDesc->type, NULL);
+ glBindTexture(GL_TEXTURE_2D, 0);
+ mTexId = texId;
+ }
+#ifdef WIN32
+ if (mHasDvp)
+ {
+ if (!mDvpCaptureTextureHandle)
+ {
+ // bind DVP to the OGL texture
+ DVP_CHECK(dvpCreateGPUTextureGL(texId, &mDvpCaptureTextureHandle));
+ }
+ }
+#endif
+ Lock();
+ if (mPinnedBuffer.count(address) > 0)
+ {
+ pTransfer = mPinnedBuffer[address];
+ }
+ Unlock();
+ if (!pTransfer)
+ {
+#ifdef WIN32
+ if (mHasDvp)
+ pTransfer = new TextureTransferDvp(mDvpCaptureTextureHandle, texDesc, address, allocatedSize);
+ else
+#endif
+ if (mHasAMDPinnedMemory) {
+ pTransfer = new TextureTransferPMD(texId, texDesc, address, allocatedSize);
+ }
+ else {
+ pTransfer = new TextureTransferOGL(texId, texDesc, address);
+ }
+ if (pTransfer)
+ {
+ Lock();
+ mPinnedBuffer[address] = pTransfer;
+ Unlock();
+ }
+ }
+ if (pTransfer)
+ pTransfer->PerformTransfer();
+}
+
+// IUnknown methods
+HRESULT STDMETHODCALLTYPE PinnedMemoryAllocator::QueryInterface(REFIID /*iid*/, LPVOID* /*ppv*/)
+{
+ return E_NOTIMPL;
+}
+
+ULONG STDMETHODCALLTYPE PinnedMemoryAllocator::AddRef(void)
+{
+ return atomic_add_uint32(&mRefCount, 1U);
+}
+
+ULONG STDMETHODCALLTYPE PinnedMemoryAllocator::Release(void)
+{
+ uint32_t newCount = atomic_sub_uint32(&mRefCount, 1U);
+ if (newCount == 0)
+ delete this;
+ return (ULONG)newCount;
+}
+
+// IDeckLinkMemoryAllocator methods
+HRESULT STDMETHODCALLTYPE PinnedMemoryAllocator::AllocateBuffer(dl_size_t bufferSize, void* *allocatedBuffer)
+{
+ Lock();
+ if (mBufferCache.empty())
+ {
+ // Allocate memory on a page boundary
+ // Note: aligned alloc exist in Blender but only for small alignment, use direct allocation then.
+ // Note: the DeckLink API tries to allocate up to 65 buffer in advance, we will limit this to 3
+ // because we don't need any caching
+ if (mAllocatedSize.size() >= mBufferCacheSize)
+ *allocatedBuffer = NULL;
+ else {
+#ifdef WIN32
+ *allocatedBuffer = VirtualAlloc(NULL, bufferSize, MEM_COMMIT | MEM_RESERVE | MEM_WRITE_WATCH, PAGE_READWRITE);
+#else
+ if (posix_memalign(allocatedBuffer, 4096, bufferSize) != 0)
+ *allocatedBuffer = NULL;
+#endif
+ mAllocatedSize[*allocatedBuffer] = bufferSize;
+ }
+ }
+ else {
+ // Re-use most recently ReleaseBuffer'd address
+ *allocatedBuffer = mBufferCache.back();
+ mBufferCache.pop_back();
+ }
+ Unlock();
+ return (*allocatedBuffer) ? S_OK : E_OUTOFMEMORY;
+}
+
+HRESULT STDMETHODCALLTYPE PinnedMemoryAllocator::ReleaseBuffer(void* buffer)
+{
+ HRESULT result = S_OK;
+ Lock();
+ if (mBufferCache.size() < mBufferCacheSize) {
+ mBufferCache.push_back(buffer);
+ }
+ else {
+ result = _ReleaseBuffer(buffer);
+ }
+ Unlock();
+ return result;
+}
+
+
+HRESULT PinnedMemoryAllocator::_ReleaseBuffer(void* buffer)
+{
+ TextureTransfer *pTransfer;
+ if (mAllocatedSize.count(buffer) == 0) {
+ // Internal error!!
+ return S_OK;
+ }
+ else {
+ // No room left in cache, so un-pin (if it was pinned) and free this buffer
+ if (mPinnedBuffer.count(buffer) > 0) {
+ pTransfer = mPinnedBuffer[buffer];
+ mPinnedBuffer.erase(buffer);
+ delete pTransfer;
+ }
+#ifdef WIN32
+ VirtualFree(buffer, 0, MEM_RELEASE);
+#else
+ free(buffer);
+#endif
+ mAllocatedSize.erase(buffer);
+ }
+ return S_OK;
+}
+
+HRESULT STDMETHODCALLTYPE PinnedMemoryAllocator::Commit()
+{
+ return S_OK;
+}
+
+HRESULT STDMETHODCALLTYPE PinnedMemoryAllocator::Decommit()
+{
+ void *buffer;
+ Lock();
+ while (!mBufferCache.empty()) {
+ // Cleanup any frames allocated and pinned in AllocateBuffer() but not freed in ReleaseBuffer()
+ buffer = mBufferCache.back();
+ mBufferCache.pop_back();
+ _ReleaseBuffer(buffer);
+ }
+ Unlock();
+ return S_OK;
+}
+
+
+////////////////////////////////////////////
+// Capture Delegate Class
+////////////////////////////////////////////
+
+CaptureDelegate::CaptureDelegate(VideoDeckLink* pOwner) : mpOwner(pOwner)
+{
+}
+
+HRESULT CaptureDelegate::VideoInputFrameArrived(IDeckLinkVideoInputFrame* inputFrame, IDeckLinkAudioInputPacket* /*audioPacket*/)
+{
+ if (!inputFrame) {
+ // It's possible to receive a NULL inputFrame, but a valid audioPacket. Ignore audio-only frame.
+ return S_OK;
+ }
+ if ((inputFrame->GetFlags() & bmdFrameHasNoInputSource) == bmdFrameHasNoInputSource) {
+ // let's not bother transferring frames if there is no source
+ return S_OK;
+ }
+ mpOwner->VideoFrameArrived(inputFrame);
+ return S_OK;
+}
+
+HRESULT CaptureDelegate::VideoInputFormatChanged(BMDVideoInputFormatChangedEvents notificationEvents, IDeckLinkDisplayMode *newDisplayMode, BMDDetectedVideoInputFormatFlags detectedSignalFlags)
+{
+ return S_OK;
+}
+
+
+
+
+// macro for exception handling and logging
+#define CATCH_EXCP catch (Exception & exp) \
+{ exp.report(); m_status = SourceError; }
+
+// class VideoDeckLink
+
+
+// constructor
+VideoDeckLink::VideoDeckLink (HRESULT * hRslt) : VideoBase(),
+mDLInput(NULL),
+mUse3D(false),
+mFrameWidth(0),
+mFrameHeight(0),
+mpAllocator(NULL),
+mpCaptureDelegate(NULL),
+mpCacheFrame(NULL),
+mClosing(false)
+{
+ mDisplayMode = (BMDDisplayMode)0;
+ mPixelFormat = (BMDPixelFormat)0;
+ pthread_mutex_init(&mCacheMutex, NULL);
+}
+
+// destructor
+VideoDeckLink::~VideoDeckLink ()
+{
+ LockCache();
+ mClosing = true;
+ if (mpCacheFrame)
+ {
+ mpCacheFrame->Release();
+ mpCacheFrame = NULL;
+ }
+ UnlockCache();
+ if (mDLInput != NULL)
+ {
+ // Cleanup for Capture
+ mDLInput->StopStreams();
+ mDLInput->SetCallback(NULL);
+ mDLInput->DisableVideoInput();
+ mDLInput->DisableAudioInput();
+ mDLInput->FlushStreams();
+ if (mDLInput->Release() != 0) {
+ printf("Reference count not NULL on DeckLink device when closing it, please report!\n");
+ }
+ mDLInput = NULL;
+ }
+
+ if (mpAllocator)
+ {
+ // if the device was properly cleared, this should be 0
+ if (mpAllocator->Release() != 0) {
+ printf("Reference count not NULL on Allocator when closing it, please report!\n");
+ }
+ mpAllocator = NULL;
+ }
+ if (mpCaptureDelegate)
+ {
+ delete mpCaptureDelegate;
+ mpCaptureDelegate = NULL;
+ }
+}
+
+void VideoDeckLink::refresh(void)
+{
+ m_avail = false;
+}
+
+// release components
+bool VideoDeckLink::release()
+{
+ // release
+ return true;
+}
+
+// open video file
+void VideoDeckLink::openFile (char *filename)
+{
+ // only live capture on this device
+ THRWEXCP(SourceVideoOnlyCapture, S_OK);
+}
+
+
+// open video capture device
+void VideoDeckLink::openCam (char *format, short camIdx)
+{
+ IDeckLinkDisplayModeIterator* pDLDisplayModeIterator;
+ BMDDisplayModeSupport modeSupport;
+ IDeckLinkDisplayMode* pDLDisplayMode;
+ IDeckLinkIterator* pIterator;
+ BMDTimeValue frameDuration;
+ BMDTimeScale frameTimescale;
+ IDeckLink* pDL;
+ uint32_t displayFlags, inputFlags;
+ char *pPixel, *p3D, *pEnd, *pSize;
+ size_t len;
+ int i, modeIdx, cacheSize;
+
+ // format is constructed as <displayMode>/<pixelFormat>[/3D][:<cacheSize>]
+ // <displayMode> takes the form of BMDDisplayMode identifier minus the 'bmdMode' prefix.
+ // This implementation understands all the modes defined in SDK 10.3.1 but you can alternatively
+ // use the 4 characters internal representation of the mode (e.g. 'HD1080p24' == '24ps')
+ // <pixelFormat> takes the form of BMDPixelFormat identifier minus the 'bmdFormat' prefix.
+ // This implementation understand all the formats defined in SDK 10.32.1 but you can alternatively
+ // use the 4 characters internal representation of the format (e.g. '10BitRGB' == 'r210')
+ // Not all combinations of mode and pixel format are possible and it also depends on the card!
+ // Use /3D postfix if you are capturing a 3D stream with frame packing
+ // Example: To capture FullHD 1920x1080@24Hz with 3D packing and 4:4:4 10 bits RGB pixel format, use
+ // "HD1080p24/10BitRGB/3D" (same as "24ps/r210/3D")
+ // (this will be the normal capture format for FullHD on the DeckLink 4k extreme)
+
+ if ((pSize = strchr(format, ':')) != NULL) {
+ cacheSize = strtol(pSize+1, &pEnd, 10);
+ }
+ else {
+ cacheSize = 8;
+ pSize = format + strlen(format);
+ }
+ if ((pPixel = strchr(format, '/')) == NULL ||
+ ((p3D = strchr(pPixel + 1, '/')) != NULL && strncmp(p3D, "/3D", pSize-p3D)))
+ THRWEXCP(VideoDeckLinkBadFormat, S_OK);
+ mUse3D = (p3D) ? true : false;
+ // to simplify pixel format parsing
+ if (!p3D)
+ p3D = pSize;
+
+ // read the mode
+ len = (size_t)(pPixel - format);
+ // accept integer display mode
+
+ try {
+ // throws if bad mode
+ decklink_ReadDisplayMode(format, len, &mDisplayMode);
+ // found a valid mode, remember that we do not look for an index
+ modeIdx = -1;
+ }
+ catch (Exception &) {
+ // accept also purely numerical mode as a mode index
+ modeIdx = strtol(format, &pEnd, 10);
+ if (pEnd != pPixel || modeIdx < 0)
+ // not a pure number, give up
+ throw;
+ }
+
+ // skip /
+ pPixel++;
+ len = (size_t)(p3D - pPixel);
+ // throws if bad format
+ decklink_ReadPixelFormat(pPixel, len, &mPixelFormat);
+
+ // Caution: DeckLink API used from this point, make sure entity are released before throwing
+ // open the card
+ pIterator = BMD_CreateDeckLinkIterator();
+ if (pIterator) {
+ i = 0;
+ while (pIterator->Next(&pDL) == S_OK) {
+ if (i == camIdx) {
+ if (pDL->QueryInterface(IID_IDeckLinkInput, (void**)&mDLInput) != S_OK)
+ mDLInput = NULL;
+ pDL->Release();
+ break;
+ }
+ i++;
+ pDL->Release();
+ }
+ pIterator->Release();
+ }
+ if (!mDLInput)
+ THRWEXCP(VideoDeckLinkOpenCard, S_OK);
+
+
+ // check if display mode and pixel format are supported
+ if (mDLInput->GetDisplayModeIterator(&pDLDisplayModeIterator) != S_OK)
+ THRWEXCP(DeckLinkInternalError, S_OK);
+
+ pDLDisplayMode = NULL;
+ displayFlags = (mUse3D) ? bmdDisplayModeSupports3D : 0;
+ inputFlags = (mUse3D) ? bmdVideoInputDualStream3D : bmdVideoInputFlagDefault;
+ while (pDLDisplayModeIterator->Next(&pDLDisplayMode) == S_OK)
+ {
+ if (modeIdx == 0 || pDLDisplayMode->GetDisplayMode() == mDisplayMode) {
+ // in case we get here because of modeIdx, make sure we have mDisplayMode set
+ mDisplayMode = pDLDisplayMode->GetDisplayMode();
+ if ((pDLDisplayMode->GetFlags() & displayFlags) == displayFlags &&
+ mDLInput->DoesSupportVideoMode(mDisplayMode, mPixelFormat, inputFlags, &modeSupport, NULL) == S_OK &&
+ modeSupport == bmdDisplayModeSupported)
+ {
+ break;
+ }
+ }
+ pDLDisplayMode->Release();
+ pDLDisplayMode = NULL;
+ if (modeIdx-- == 0) {
+ // reached the correct mode index but it does not meet the pixel format, give up
+ break;
+ }
+ }
+ pDLDisplayModeIterator->Release();
+
+ if (pDLDisplayMode == NULL)
+ THRWEXCP(VideoDeckLinkBadFormat, S_OK);
+
+ mFrameWidth = pDLDisplayMode->GetWidth();
+ mFrameHeight = pDLDisplayMode->GetHeight();
+ mTextureDesc.height = (mUse3D) ? 2 * mFrameHeight : mFrameHeight;
+ pDLDisplayMode->GetFrameRate(&frameDuration, &frameTimescale);
+ pDLDisplayMode->Release();
+ // for information, in case the application wants to know
+ m_size[0] = mFrameWidth;
+ m_size[1] = mTextureDesc.height;
+ m_frameRate = (float)frameTimescale / (float)frameDuration;
+
+ switch (mPixelFormat)
+ {
+ case bmdFormat8BitYUV:
+ // 2 pixels per word
+ mTextureDesc.stride = mFrameWidth * 2;
+ mTextureDesc.width = mFrameWidth / 2;
+ mTextureDesc.internalFormat = GL_RGBA;
+ mTextureDesc.format = GL_BGRA;
+ mTextureDesc.type = GL_UNSIGNED_BYTE;
+ break;
+ case bmdFormat10BitYUV:
+ // 6 pixels in 4 words, rounded to 48 pixels
+ mTextureDesc.stride = ((mFrameWidth + 47) / 48) * 128;
+ mTextureDesc.width = mTextureDesc.stride/4;
+ mTextureDesc.internalFormat = GL_RGB10_A2;
+ mTextureDesc.format = GL_BGRA;
+ mTextureDesc.type = GL_UNSIGNED_INT_2_10_10_10_REV;
+ break;
+ case bmdFormat8BitARGB:
+ mTextureDesc.stride = mFrameWidth * 4;
+ mTextureDesc.width = mFrameWidth;
+ mTextureDesc.internalFormat = GL_RGBA;
+ mTextureDesc.format = GL_BGRA;
+ mTextureDesc.type = GL_UNSIGNED_INT_8_8_8_8;
+ break;
+ case bmdFormat8BitBGRA:
+ mTextureDesc.stride = mFrameWidth * 4;
+ mTextureDesc.width = mFrameWidth;
+ mTextureDesc.internalFormat = GL_RGBA;
+ mTextureDesc.format = GL_BGRA;
+ mTextureDesc.type = GL_UNSIGNED_BYTE;
+ break;
+ case bmdFormat10BitRGBXLE:
+ // 1 pixel per word, rounded to 64 pixels
+ mTextureDesc.stride = ((mFrameWidth + 63) / 64) * 256;
+ mTextureDesc.width = mTextureDesc.stride/4;
+ mTextureDesc.internalFormat = GL_RGB10_A2;
+ mTextureDesc.format = GL_RGBA;
+ mTextureDesc.type = GL_UNSIGNED_INT_10_10_10_2;
+ break;
+ case bmdFormat10BitRGBX:
+ case bmdFormat10BitRGB:
+ // 1 pixel per word, rounded to 64 pixels
+ mTextureDesc.stride = ((mFrameWidth + 63) / 64) * 256;
+ mTextureDesc.width = mTextureDesc.stride/4;
+ mTextureDesc.internalFormat = GL_R32UI;
+ mTextureDesc.format = GL_RED_INTEGER;
+ mTextureDesc.type = GL_UNSIGNED_INT;
+ break;
+ case bmdFormat12BitRGB:
+ case bmdFormat12BitRGBLE:
+ // 8 pixels in 9 word
+ mTextureDesc.stride = (mFrameWidth * 36) / 8;
+ mTextureDesc.width = mTextureDesc.stride/4;
+ mTextureDesc.internalFormat = GL_R32UI;
+ mTextureDesc.format = GL_RED_INTEGER;
+ mTextureDesc.type = GL_UNSIGNED_INT;
+ break;
+ default:
+ // for unknown pixel format, this will be resolved when a frame arrives
+ mTextureDesc.format = GL_RED_INTEGER;
+ mTextureDesc.type = GL_UNSIGNED_INT;
+ break;
+ }
+ // reserve memory for cache frame + 1 to accomodate for pixel format that we don't know yet
+ // note: we can't use stride as it is not yet known if the pixel format is unknown
+ // use instead the frame width as in worst case it's not much different (e.g. HD720/10BITYUV: 1296 pixels versus 1280)
+ // note: some pixel format take more than 4 bytes take that into account (9/8 versus 1)
+ mpAllocator = new PinnedMemoryAllocator(cacheSize, mFrameWidth*mTextureDesc.height * 4 * (1+cacheSize*9/8));
+
+ if (mDLInput->SetVideoInputFrameMemoryAllocator(mpAllocator) != S_OK)
+ THRWEXCP(DeckLinkInternalError, S_OK);
+
+ mpCaptureDelegate = new CaptureDelegate(this);
+ if (mDLInput->SetCallback(mpCaptureDelegate) != S_OK)
+ THRWEXCP(DeckLinkInternalError, S_OK);
+
+ if (mDLInput->EnableVideoInput(mDisplayMode, mPixelFormat, ((mUse3D) ? bmdVideoInputDualStream3D : bmdVideoInputFlagDefault)) != S_OK)
+ // this shouldn't failed, we tested above
+ THRWEXCP(DeckLinkInternalError, S_OK);
+
+ // just in case it is needed to capture from certain cards, we don't check error because we don't need audio
+ mDLInput->EnableAudioInput(bmdAudioSampleRate48kHz, bmdAudioSampleType16bitInteger, 2);
+
+ // open base class
+ VideoBase::openCam(format, camIdx);
+
+ // ready to capture, will start when application calls play()
+}
+
+// play video
+bool VideoDeckLink::play (void)
+{
+ try
+ {
+ // if object is able to play
+ if (VideoBase::play())
+ {
+ mDLInput->FlushStreams();
+ return (mDLInput->StartStreams() == S_OK);
+ }
+ }
+ CATCH_EXCP;
+ return false;
+}
+
+
+// pause video
+bool VideoDeckLink::pause (void)
+{
+ try
+ {
+ if (VideoBase::pause())
+ {
+ mDLInput->PauseStreams();
+ return true;
+ }
+ }
+ CATCH_EXCP;
+ return false;
+}
+
+// stop video
+bool VideoDeckLink::stop (void)
+{
+ try
+ {
+ VideoBase::stop();
+ mDLInput->StopStreams();
+ return true;
+ }
+ CATCH_EXCP;
+ return false;
+}
+
+
+// set video range
+void VideoDeckLink::setRange (double start, double stop)
+{
+}
+
+// set framerate
+void VideoDeckLink::setFrameRate (float rate)
+{
+}
+
+
+// image calculation
+// send cache frame directly to GPU
+void VideoDeckLink::calcImage (unsigned int texId, double ts)
+{
+ IDeckLinkVideoInputFrame* pFrame;
+ LockCache();
+ pFrame = mpCacheFrame;
+ mpCacheFrame = NULL;
+ UnlockCache();
+ if (pFrame) {
+ // BUG: the dvpBindToGLCtx function fails the first time it is used, don't know why.
+ // This causes an exception to be thrown.
+ // This should be fixed but in the meantime we will catch the exception because
+ // it is crucial that we release the frame to keep the reference count right on the DeckLink device
+ try {
+ uint32_t rowSize = pFrame->GetRowBytes();
+ uint32_t textureSize = rowSize * pFrame->GetHeight();
+ void* videoPixels = NULL;
+ void* rightEyePixels = NULL;
+ if (!mTextureDesc.stride) {
+ // we could not compute the texture size earlier (unknown pixel size)
+ // let's do it now
+ mTextureDesc.stride = rowSize;
+ mTextureDesc.width = mTextureDesc.stride / 4;
+ }
+ if (mTextureDesc.stride != rowSize) {
+ // unexpected frame size, ignore
+ // TBD: print a warning
+ }
+ else {
+ pFrame->GetBytes(&videoPixels);
+ if (mUse3D) {
+ IDeckLinkVideoFrame3DExtensions *if3DExtensions = NULL;
+ IDeckLinkVideoFrame *rightEyeFrame = NULL;
+ if (pFrame->QueryInterface(IID_IDeckLinkVideoFrame3DExtensions, (void **)&if3DExtensions) == S_OK &&
+ if3DExtensions->GetFrameForRightEye(&rightEyeFrame) == S_OK) {
+ rightEyeFrame->GetBytes(&rightEyePixels);
+ textureSize += ((uint64_t)rightEyePixels - (uint64_t)videoPixels);
+ }
+ if (rightEyeFrame)
+ rightEyeFrame->Release();
+ if (if3DExtensions)
+ if3DExtensions->Release();
+ }
+ mTextureDesc.size = mTextureDesc.width * mTextureDesc.height * 4;
+ if (mTextureDesc.size == textureSize) {
+ // this means that both left and right frame are contiguous and that there is no padding
+ // do the transfer
+ mpAllocator->TransferBuffer(videoPixels, &mTextureDesc, texId);
+ }
+ }
+ }
+ catch (Exception &) {
+ pFrame->Release();
+ throw;
+ }
+ // this will trigger PinnedMemoryAllocator::RealaseBuffer
+ pFrame->Release();
+ }
+ // currently we don't pass the image to the application
+ m_avail = false;
+}
+
+// A frame is available from the board
+// Called from an internal thread, just pass the frame to the main thread
+void VideoDeckLink::VideoFrameArrived(IDeckLinkVideoInputFrame* inputFrame)
+{
+ IDeckLinkVideoInputFrame* pOldFrame = NULL;
+ LockCache();
+ if (!mClosing)
+ {
+ pOldFrame = mpCacheFrame;
+ mpCacheFrame = inputFrame;
+ inputFrame->AddRef();
+ }
+ UnlockCache();
+ // old frame no longer needed, just release it
+ if (pOldFrame)
+ pOldFrame->Release();
+}
+
+// python methods
+
+// object initialization
+static int VideoDeckLink_init(PyObject *pySelf, PyObject *args, PyObject *kwds)
+{
+ static const char *kwlist[] = { "format", "capture", NULL };
+ PyImage *self = reinterpret_cast<PyImage*>(pySelf);
+ // see openCam for a description of format
+ char * format = NULL;
+ // capture device number, i.e. DeckLink card number, default first one
+ short capt = 0;
+
+ if (!GLEW_VERSION_1_5) {
+ PyErr_SetString(PyExc_RuntimeError, "VideoDeckLink requires at least OpenGL 1.5");
+ return -1;
+ }
+ // get parameters
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "s|h",
+ const_cast<char**>(kwlist), &format, &capt))
+ return -1;
+
+ try {
+ // create video object
+ Video_init<VideoDeckLink>(self);
+
+ // open video source, control comes back to VideoDeckLink::openCam
+ Video_open(getVideo(self), format, capt);
+ }
+ catch (Exception & exp) {
+ exp.report();
+ return -1;
+ }
+ // initialization succeded
+ return 0;
+}
+
+// methods structure
+static PyMethodDef videoMethods[] =
+{ // methods from VideoBase class
+ {"play", (PyCFunction)Video_play, METH_NOARGS, "Play (restart) video"},
+ {"pause", (PyCFunction)Video_pause, METH_NOARGS, "pause video"},
+ {"stop", (PyCFunction)Video_stop, METH_NOARGS, "stop video (play will replay it from start)"},
+ {"refresh", (PyCFunction)Video_refresh, METH_VARARGS, "Refresh video - get its status"},
+ {NULL}
+};
+// attributes structure
+static PyGetSetDef videoGetSets[] =
+{ // methods from VideoBase class
+ {(char*)"status", (getter)Video_getStatus, NULL, (char*)"video status", NULL},
+ {(char*)"framerate", (getter)Video_getFrameRate, NULL, (char*)"frame rate", NULL},
+ // attributes from ImageBase class
+ {(char*)"valid", (getter)Image_valid, NULL, (char*)"bool to tell if an image is available", NULL},
+ {(char*)"image", (getter)Image_getImage, NULL, (char*)"image data", NULL},
+ {(char*)"size", (getter)Image_getSize, NULL, (char*)"image size", NULL},
+ {(char*)"scale", (getter)Image_getScale, (setter)Image_setScale, (char*)"fast scale of image (near neighbor)", NULL},
+ {(char*)"flip", (getter)Image_getFlip, (setter)Image_setFlip, (char*)"flip image vertically", NULL},
+ {(char*)"filter", (getter)Image_getFilter, (setter)Image_setFilter, (char*)"pixel filter", NULL},
+ {NULL}
+};
+
+// python type declaration
+PyTypeObject VideoDeckLinkType =
+{
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "VideoTexture.VideoDeckLink", /*tp_name*/
+ sizeof(PyImage), /*tp_basicsize*/
+ 0, /*tp_itemsize*/
+ (destructor)Image_dealloc, /*tp_dealloc*/
+ 0, /*tp_print*/
+ 0, /*tp_getattr*/
+ 0, /*tp_setattr*/
+ 0, /*tp_compare*/
+ 0, /*tp_repr*/
+ 0, /*tp_as_number*/
+ 0, /*tp_as_sequence*/
+ 0, /*tp_as_mapping*/
+ 0, /*tp_hash */
+ 0, /*tp_call*/
+ 0, /*tp_str*/
+ 0, /*tp_getattro*/
+ 0, /*tp_setattro*/
+ &imageBufferProcs, /*tp_as_buffer*/
+ Py_TPFLAGS_DEFAULT, /*tp_flags*/
+ "DeckLink video source", /* tp_doc */
+ 0, /* tp_traverse */
+ 0, /* tp_clear */
+ 0, /* tp_richcompare */
+ 0, /* tp_weaklistoffset */
+ 0, /* tp_iter */
+ 0, /* tp_iternext */
+ videoMethods, /* tp_methods */
+ 0, /* tp_members */
+ videoGetSets, /* tp_getset */
+ 0, /* tp_base */
+ 0, /* tp_dict */
+ 0, /* tp_descr_get */
+ 0, /* tp_descr_set */
+ 0, /* tp_dictoffset */
+ (initproc)VideoDeckLink_init, /* tp_init */
+ 0, /* tp_alloc */
+ Image_allocNew, /* tp_new */
+};
+
+
+
+////////////////////////////////////////////
+// DeckLink Capture Delegate Class
+////////////////////////////////////////////
+
+#endif // WITH_GAMEENGINE_DECKLINK
+
diff --git a/source/gameengine/VideoTexture/VideoDeckLink.h b/source/gameengine/VideoTexture/VideoDeckLink.h
new file mode 100644
index 00000000000..be81f63d93c
--- /dev/null
+++ b/source/gameengine/VideoTexture/VideoDeckLink.h
@@ -0,0 +1,256 @@
+/*
+ * ***** BEGIN GPL LICENSE BLOCK *****
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+* The Original Code is Copyright (C) 2015, Blender Foundation
+* All rights reserved.
+*
+* The Original Code is: all of this file.
+*
+* Contributor(s): Blender Foundation.
+ *
+ * ***** END GPL LICENSE BLOCK *****
+ */
+
+/** \file VideoDeckLink.h
+ * \ingroup bgevideotex
+ */
+
+#ifndef __VIDEODECKLINK_H__
+#define __VIDEODECKLINK_H__
+
+#ifdef WITH_GAMEENGINE_DECKLINK
+
+/* this needs to be parsed with __cplusplus defined before included through DeckLink_compat.h */
+#if defined(__FreeBSD__)
+# include <inttypes.h>
+#endif
+#include <map>
+#include <set>
+
+extern "C" {
+#include <pthread.h>
+#include "DNA_listBase.h"
+#include "BLI_threads.h"
+#include "BLI_blenlib.h"
+}
+#include "GL/glew.h"
+#ifdef WIN32
+#include "dvpapi.h"
+#endif
+#include "DeckLinkAPI.h"
+#include "VideoBase.h"
+
+class PinnedMemoryAllocator;
+
+struct TextureDesc
+{
+ uint32_t width;
+ uint32_t height;
+ uint32_t stride;
+ uint32_t size;
+ GLenum internalFormat;
+ GLenum format;
+ GLenum type;
+ TextureDesc()
+ {
+ width = 0;
+ height = 0;
+ stride = 0;
+ size = 0;
+ internalFormat = 0;
+ format = 0;
+ type = 0;
+ }
+};
+
+class CaptureDelegate;
+
+// type VideoDeckLink declaration
+class VideoDeckLink : public VideoBase
+{
+ friend class CaptureDelegate;
+public:
+ /// constructor
+ VideoDeckLink (HRESULT * hRslt);
+ /// destructor
+ virtual ~VideoDeckLink ();
+
+ /// open video/image file
+ virtual void openFile(char *file);
+ /// open video capture device
+ virtual void openCam(char *driver, short camIdx);
+
+ /// release video source
+ virtual bool release (void);
+ /// overwrite base refresh to handle fixed image
+ virtual void refresh(void);
+ /// play video
+ virtual bool play (void);
+ /// pause video
+ virtual bool pause (void);
+ /// stop video
+ virtual bool stop (void);
+ /// set play range
+ virtual void setRange (double start, double stop);
+ /// set frame rate
+ virtual void setFrameRate (float rate);
+
+protected:
+ // format and codec information
+ /// image calculation
+ virtual void calcImage (unsigned int texId, double ts);
+
+private:
+ void VideoFrameArrived(IDeckLinkVideoInputFrame* inputFrame);
+ void LockCache()
+ {
+ pthread_mutex_lock(&mCacheMutex);
+ }
+ void UnlockCache()
+ {
+ pthread_mutex_unlock(&mCacheMutex);
+ }
+
+ IDeckLinkInput* mDLInput;
+ BMDDisplayMode mDisplayMode;
+ BMDPixelFormat mPixelFormat;
+ bool mUse3D;
+ uint32_t mFrameWidth;
+ uint32_t mFrameHeight;
+ TextureDesc mTextureDesc;
+ PinnedMemoryAllocator* mpAllocator;
+ CaptureDelegate* mpCaptureDelegate;
+
+ // cache frame in transit between the callback thread and the main BGE thread
+ // keep only one frame in cache because we just want to keep up with real time
+ pthread_mutex_t mCacheMutex;
+ IDeckLinkVideoInputFrame* mpCacheFrame;
+ bool mClosing;
+
+};
+
+inline VideoDeckLink *getDeckLink(PyImage *self)
+{
+ return static_cast<VideoDeckLink*>(self->m_image);
+}
+
+////////////////////////////////////////////
+// TextureTransfer : Abstract class to perform a transfer to GPU memory using fast transfer if available
+////////////////////////////////////////////
+class TextureTransfer
+{
+public:
+ TextureTransfer() {}
+ virtual ~TextureTransfer() { }
+
+ virtual void PerformTransfer() = 0;
+protected:
+ static bool _PinBuffer(void *address, uint32_t size);
+ static void _UnpinBuffer(void* address, uint32_t size);
+};
+
+////////////////////////////////////////////
+// PinnedMemoryAllocator
+////////////////////////////////////////////
+
+// PinnedMemoryAllocator implements the IDeckLinkMemoryAllocator interface and can be used instead of the
+// built-in frame allocator, by setting with SetVideoInputFrameMemoryAllocator() or SetVideoOutputFrameMemoryAllocator().
+//
+// For this sample application a custom frame memory allocator is used to ensure each address
+// of frame memory is aligned on a 4kB boundary required by the OpenGL pinned memory extension.
+// If the pinned memory extension is not available, this allocator will still be used and
+// demonstrates how to cache frame allocations for efficiency.
+//
+// The frame cache delays the releasing of buffers until the cache fills up, thereby avoiding an
+// allocate plus pin operation for every frame, followed by an unpin and deallocate on every frame.
+
+
+class PinnedMemoryAllocator : public IDeckLinkMemoryAllocator
+{
+public:
+ PinnedMemoryAllocator(unsigned cacheSize, size_t memSize);
+ virtual ~PinnedMemoryAllocator();
+
+ void TransferBuffer(void* address, TextureDesc* texDesc, GLuint texId);
+
+ // IUnknown methods
+ virtual HRESULT STDMETHODCALLTYPE QueryInterface(REFIID iid, LPVOID *ppv);
+ virtual ULONG STDMETHODCALLTYPE AddRef(void);
+ virtual ULONG STDMETHODCALLTYPE Release(void);
+
+ // IDeckLinkMemoryAllocator methods
+ virtual HRESULT STDMETHODCALLTYPE AllocateBuffer(dl_size_t bufferSize, void* *allocatedBuffer);
+ virtual HRESULT STDMETHODCALLTYPE ReleaseBuffer(void* buffer);
+ virtual HRESULT STDMETHODCALLTYPE Commit();
+ virtual HRESULT STDMETHODCALLTYPE Decommit();
+
+private:
+ static bool mGPUDirectInitialized;
+ static bool mHasDvp;
+ static bool mHasAMDPinnedMemory;
+ static size_t mReservedProcessMemory;
+ static bool ReserveMemory(size_t size);
+
+ void Lock()
+ {
+ pthread_mutex_lock(&mMutex);
+ }
+ void Unlock()
+ {
+ pthread_mutex_unlock(&mMutex);
+ }
+ HRESULT _ReleaseBuffer(void* buffer);
+
+ uint32_t mRefCount;
+ // protect the cache and the allocated map,
+ // not the pinnedBuffer map as it is only used from main thread
+ pthread_mutex_t mMutex;
+ std::map<void*, uint32_t> mAllocatedSize;
+ std::vector<void*> mBufferCache;
+ std::map<void *, TextureTransfer*> mPinnedBuffer;
+#ifdef WIN32
+ DVPBufferHandle mDvpCaptureTextureHandle;
+#endif
+ // target texture in GPU
+ GLuint mTexId;
+ uint32_t mBufferCacheSize;
+};
+
+////////////////////////////////////////////
+// Capture Delegate Class
+////////////////////////////////////////////
+
+class CaptureDelegate : public IDeckLinkInputCallback
+{
+ VideoDeckLink* mpOwner;
+
+public:
+ CaptureDelegate(VideoDeckLink* pOwner);
+
+ // IUnknown needs only a dummy implementation
+ virtual HRESULT STDMETHODCALLTYPE QueryInterface(REFIID iid, LPVOID *ppv) { return E_NOINTERFACE; }
+ virtual ULONG STDMETHODCALLTYPE AddRef() { return 1; }
+ virtual ULONG STDMETHODCALLTYPE Release() { return 1; }
+
+ virtual HRESULT STDMETHODCALLTYPE VideoInputFrameArrived(IDeckLinkVideoInputFrame *videoFrame, IDeckLinkAudioInputPacket *audioPacket);
+ virtual HRESULT STDMETHODCALLTYPE VideoInputFormatChanged(BMDVideoInputFormatChangedEvents notificationEvents, IDeckLinkDisplayMode *newDisplayMode, BMDDetectedVideoInputFormatFlags detectedSignalFlags);
+};
+
+
+#endif /* WITH_GAMEENGINE_DECKLINK */
+
+#endif /* __VIDEODECKLINK_H__ */
diff --git a/source/gameengine/VideoTexture/VideoFFmpeg.cpp b/source/gameengine/VideoTexture/VideoFFmpeg.cpp
index 5fed1211d6c..083e9e28502 100644
--- a/source/gameengine/VideoTexture/VideoFFmpeg.cpp
+++ b/source/gameengine/VideoTexture/VideoFFmpeg.cpp
@@ -1203,7 +1203,7 @@ static PyMethodDef videoMethods[] =
{"play", (PyCFunction)Video_play, METH_NOARGS, "Play (restart) video"},
{"pause", (PyCFunction)Video_pause, METH_NOARGS, "pause video"},
{"stop", (PyCFunction)Video_stop, METH_NOARGS, "stop video (play will replay it from start)"},
- {"refresh", (PyCFunction)Video_refresh, METH_NOARGS, "Refresh video - get its status"},
+ {"refresh", (PyCFunction)Video_refresh, METH_VARARGS, "Refresh video - get its status"},
{NULL}
};
// attributes structure
@@ -1326,7 +1326,7 @@ static PyObject *Image_reload(PyImage *self, PyObject *args)
// methods structure
static PyMethodDef imageMethods[] =
{ // methods from VideoBase class
- {"refresh", (PyCFunction)Video_refresh, METH_NOARGS, "Refresh image, i.e. load it"},
+ {"refresh", (PyCFunction)Video_refresh, METH_VARARGS, "Refresh image, i.e. load it"},
{"reload", (PyCFunction)Image_reload, METH_VARARGS, "Reload image, i.e. reopen it"},
{NULL}
};
diff --git a/source/gameengine/VideoTexture/blendVideoTex.cpp b/source/gameengine/VideoTexture/blendVideoTex.cpp
index a62ffee3137..9b046d46412 100644
--- a/source/gameengine/VideoTexture/blendVideoTex.cpp
+++ b/source/gameengine/VideoTexture/blendVideoTex.cpp
@@ -128,6 +128,10 @@ static PyMethodDef moduleMethods[] =
extern PyTypeObject VideoFFmpegType;
extern PyTypeObject ImageFFmpegType;
#endif
+#ifdef WITH_GAMEENGINE_DECKLINK
+extern PyTypeObject VideoDeckLinkType;
+extern PyTypeObject DeckLinkType;
+#endif
extern PyTypeObject FilterBlueScreenType;
extern PyTypeObject FilterGrayType;
extern PyTypeObject FilterColorType;
@@ -145,6 +149,9 @@ static void registerAllTypes(void)
pyImageTypes.add(&VideoFFmpegType, "VideoFFmpeg");
pyImageTypes.add(&ImageFFmpegType, "ImageFFmpeg");
#endif
+#ifdef WITH_GAMEENGINE_DECKLINK
+ pyImageTypes.add(&VideoDeckLinkType, "VideoDeckLink");
+#endif
pyImageTypes.add(&ImageBuffType, "ImageBuff");
pyImageTypes.add(&ImageMixType, "ImageMix");
pyImageTypes.add(&ImageRenderType, "ImageRender");
@@ -194,6 +201,10 @@ PyMODINIT_FUNC initVideoTexturePythonBinding(void)
return NULL;
if (PyType_Ready(&TextureType) < 0)
return NULL;
+#ifdef WITH_GAMEENGINE_DECKLINK
+ if (PyType_Ready(&DeckLinkType) < 0)
+ return NULL;
+#endif
m = PyModule_Create(&VideoTexture_module_def);
PyDict_SetItemString(PySys_GetObject("modules"), VideoTexture_module_def.m_name, m);
@@ -207,6 +218,10 @@ PyMODINIT_FUNC initVideoTexturePythonBinding(void)
Py_INCREF(&TextureType);
PyModule_AddObject(m, "Texture", (PyObject *)&TextureType);
+#ifdef WITH_GAMEENGINE_DECKLINK
+ Py_INCREF(&DeckLinkType);
+ PyModule_AddObject(m, "DeckLink", (PyObject *)&DeckLinkType);
+#endif
PyModule_AddIntConstant(m, "SOURCE_ERROR", SourceError);
PyModule_AddIntConstant(m, "SOURCE_EMPTY", SourceEmpty);
PyModule_AddIntConstant(m, "SOURCE_READY", SourceReady);