Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'extern/glog/src/stacktrace_x86-inl.h')
-rw-r--r--extern/glog/src/stacktrace_x86-inl.h139
1 files changed, 139 insertions, 0 deletions
diff --git a/extern/glog/src/stacktrace_x86-inl.h b/extern/glog/src/stacktrace_x86-inl.h
new file mode 100644
index 00000000000..cfd31f783e3
--- /dev/null
+++ b/extern/glog/src/stacktrace_x86-inl.h
@@ -0,0 +1,139 @@
+// Copyright (c) 2000 - 2007, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Produce stack trace
+
+#include <stdint.h> // for uintptr_t
+
+#include "utilities.h" // for OS_* macros
+
+#if !defined(OS_WINDOWS)
+#include <unistd.h>
+#include <sys/mman.h>
+#endif
+
+#include <stdio.h> // for NULL
+#include "stacktrace.h"
+
+_START_GOOGLE_NAMESPACE_
+
+// Given a pointer to a stack frame, locate and return the calling
+// stackframe, or return NULL if no stackframe can be found. Perform sanity
+// checks (the strictness of which is controlled by the boolean parameter
+// "STRICT_UNWINDING") to reduce the chance that a bad pointer is returned.
+template<bool STRICT_UNWINDING>
+static void **NextStackFrame(void **old_sp) {
+ void **new_sp = (void **) *old_sp;
+
+ // Check that the transition from frame pointer old_sp to frame
+ // pointer new_sp isn't clearly bogus
+ if (STRICT_UNWINDING) {
+ // With the stack growing downwards, older stack frame must be
+ // at a greater address that the current one.
+ if (new_sp <= old_sp) return NULL;
+ // Assume stack frames larger than 100,000 bytes are bogus.
+ if ((uintptr_t)new_sp - (uintptr_t)old_sp > 100000) return NULL;
+ } else {
+ // In the non-strict mode, allow discontiguous stack frames.
+ // (alternate-signal-stacks for example).
+ if (new_sp == old_sp) return NULL;
+ // And allow frames upto about 1MB.
+ if ((new_sp > old_sp)
+ && ((uintptr_t)new_sp - (uintptr_t)old_sp > 1000000)) return NULL;
+ }
+ if ((uintptr_t)new_sp & (sizeof(void *) - 1)) return NULL;
+#ifdef __i386__
+ // On 64-bit machines, the stack pointer can be very close to
+ // 0xffffffff, so we explicitly check for a pointer into the
+ // last two pages in the address space
+ if ((uintptr_t)new_sp >= 0xffffe000) return NULL;
+#endif
+#if !defined(OS_WINDOWS)
+ if (!STRICT_UNWINDING) {
+ // Lax sanity checks cause a crash in 32-bit tcmalloc/crash_reason_test
+ // on AMD-based machines with VDSO-enabled kernels.
+ // Make an extra sanity check to insure new_sp is readable.
+ // Note: NextStackFrame<false>() is only called while the program
+ // is already on its last leg, so it's ok to be slow here.
+ static int page_size = getpagesize();
+ void *new_sp_aligned = (void *)((uintptr_t)new_sp & ~(page_size - 1));
+ if (msync(new_sp_aligned, page_size, MS_ASYNC) == -1)
+ return NULL;
+ }
+#endif
+ return new_sp;
+}
+
+// If you change this function, also change GetStackFrames below.
+int GetStackTrace(void** result, int max_depth, int skip_count) {
+ void **sp;
+#ifdef __i386__
+ // Stack frame format:
+ // sp[0] pointer to previous frame
+ // sp[1] caller address
+ // sp[2] first argument
+ // ...
+ sp = (void **)&result - 2;
+#endif
+
+#ifdef __x86_64__
+ // __builtin_frame_address(0) can return the wrong address on gcc-4.1.0-k8
+ unsigned long rbp;
+ // Move the value of the register %rbp into the local variable rbp.
+ // We need 'volatile' to prevent this instruction from getting moved
+ // around during optimization to before function prologue is done.
+ // An alternative way to achieve this
+ // would be (before this __asm__ instruction) to call Noop() defined as
+ // static void Noop() __attribute__ ((noinline)); // prevent inlining
+ // static void Noop() { asm(""); } // prevent optimizing-away
+ __asm__ volatile ("mov %%rbp, %0" : "=r" (rbp));
+ // Arguments are passed in registers on x86-64, so we can't just
+ // offset from &result
+ sp = (void **) rbp;
+#endif
+
+ int n = 0;
+ while (sp && n < max_depth) {
+ if (*(sp+1) == (void *)0) {
+ // In 64-bit code, we often see a frame that
+ // points to itself and has a return address of 0.
+ break;
+ }
+ if (skip_count > 0) {
+ skip_count--;
+ } else {
+ result[n++] = *(sp+1);
+ }
+ // Use strict unwinding rules.
+ sp = NextStackFrame<true>(sp);
+ }
+ return n;
+}
+
+_END_GOOGLE_NAMESPACE_