Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/facebook/luaffifb.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJames R. McKaskill <james@foobar.co.nz>2012-04-12 03:13:37 +0400
committerJames R. McKaskill <james@foobar.co.nz>2012-04-22 07:10:48 +0400
commitb23a23357c101355a958ca8ad52068746a54d2ac (patch)
tree89a3474aa4120be332cafdd73c9305ec5dbb77a9 /ctype.c
parent8042d117bbbd5208ed59fee6591abe267ac4ecbf (diff)
Overallocate bitfield structs to the next 8 byte boundary to stop valgrind.
Valgrind is rightly complaining as we always access bitfields in 8 byte chunks to simplify the get/set logic.
Diffstat (limited to 'ctype.c')
-rw-r--r--ctype.c17
1 files changed, 17 insertions, 0 deletions
diff --git a/ctype.c b/ctype.c
index 1465043..da668c4 100644
--- a/ctype.c
+++ b/ctype.c
@@ -132,10 +132,27 @@ void* push_cdata(lua_State* L, int ct_usr, const struct ctype* ct)
size_t sz = ct->is_reference ? sizeof(void*) : ctype_size(L, ct);
ct_usr = lua_absindex(L, ct_usr);
+ /* This is to stop valgrind from complaining. Bitfields are accessed in 8
+ * byte chunks so that the code doesn't have to deal with different access
+ * patterns, but this means that occasionally it will read past the end of
+ * the struct. As its not setting the bits past the end (only reading and
+ * then writing the bits back) and the read is aligned its a non-issue,
+ * but valgrind complains nonetheless.
+ */
+ if (ct->has_bitfield) {
+ sz = ALIGN_UP(sz, 7);
+ }
+
cd = (struct cdata*) lua_newuserdata(L, sizeof(struct cdata) + sz);
*(struct ctype*) &cd->type = *ct;
memset(cd+1, 0, sz);
+ /* TODO: handle cases where lua_newuserdata returns a pointer that is not
+ * aligned */
+#if 0
+ assert((uintptr_t) (cd + 1) % 8 == 0);
+#endif
+
#if LUA_VERSION_NUM == 501
if (ct_usr && lua_isnil(L, ct_usr)) {
push_upval(L, &niluv_key);